2024-02-27 20:44:35 +01:00
|
|
|
import json
|
2020-05-01 12:38:57 +02:00
|
|
|
import uuid
|
|
|
|
|
2024-02-27 20:44:35 +01:00
|
|
|
from django.conf import settings
|
2020-04-15 20:29:59 +02:00
|
|
|
from django.db import models
|
2020-04-29 18:48:23 +02:00
|
|
|
from django.urls import reverse
|
2020-04-23 20:19:49 +02:00
|
|
|
from django.utils.translation import gettext as _
|
2020-05-01 12:38:57 +02:00
|
|
|
from django.db.models.signals import pre_delete
|
|
|
|
from portier.common import handlers
|
implement active srs sync and celery
reworks the entire logic how active streams are being tracked.
instead of keeping a counter by listening only to srs callback
hooks, portier will now actively scrape the http api of known
srs nodes to retrieve information about all currently existing
streams on a srs node. this prevents portier from being wrong
about active stream counts due to drift, and allows us to show
more information about stream data to users in the future,
as the srs api will also expose information about used codecs,
stream resolution and data rates as seen by srs itself.
to implement this, the previous remains of celery have been
made active again, and it is now required to run exactly one
celery beat instance and one or more celery workers beside
portier itself. these will make sure that every 5 seconds all srs
nodes are actively being scraped, on top of the scrape that is
triggered by every srs callback hook.
this keeps the data always superfresh.
the celery beat function allows us to implement cron-based
automation for many other functions (restream, pull, etc) in
the future as well, so it's okay to pull in something more heavy
here rather than just using a system cron and executing a
custom management command all the time.
2024-02-29 18:33:52 +01:00
|
|
|
from config import signals_shared
|
2020-04-15 20:29:59 +02:00
|
|
|
|
|
|
|
|
2020-04-23 21:08:39 +02:00
|
|
|
class Stream(models.Model):
|
2020-04-29 21:09:04 +02:00
|
|
|
stream = models.UUIDField(unique=True, default=uuid.uuid4, help_text=_('stream_stream_help'))
|
|
|
|
name = models.CharField(max_length=100, help_text=_('stream_name_help'))
|
2020-04-15 20:29:59 +02:00
|
|
|
|
2020-04-26 19:46:58 +02:00
|
|
|
# the same stream uuid can be published multiple times to different origin
|
|
|
|
# servers. this is a valid scheme to achieve a failover on the origin layer.
|
|
|
|
# thus we need to keep track if a stream is published at least once,
|
|
|
|
# and only send signals when we are going to / coming from 0 published streams.
|
|
|
|
publish_counter = models.PositiveIntegerField(default=0)
|
|
|
|
|
implement active srs sync and celery
reworks the entire logic how active streams are being tracked.
instead of keeping a counter by listening only to srs callback
hooks, portier will now actively scrape the http api of known
srs nodes to retrieve information about all currently existing
streams on a srs node. this prevents portier from being wrong
about active stream counts due to drift, and allows us to show
more information about stream data to users in the future,
as the srs api will also expose information about used codecs,
stream resolution and data rates as seen by srs itself.
to implement this, the previous remains of celery have been
made active again, and it is now required to run exactly one
celery beat instance and one or more celery workers beside
portier itself. these will make sure that every 5 seconds all srs
nodes are actively being scraped, on top of the scrape that is
triggered by every srs callback hook.
this keeps the data always superfresh.
the celery beat function allows us to implement cron-based
automation for many other functions (restream, pull, etc) in
the future as well, so it's okay to pull in something more heavy
here rather than just using a system cron and executing a
custom management command all the time.
2024-02-29 18:33:52 +01:00
|
|
|
def save(self, *args, **kwargs):
|
|
|
|
super().save(*args, **kwargs)
|
2020-04-26 19:46:58 +02:00
|
|
|
if self.publish_counter > 0:
|
implement active srs sync and celery
reworks the entire logic how active streams are being tracked.
instead of keeping a counter by listening only to srs callback
hooks, portier will now actively scrape the http api of known
srs nodes to retrieve information about all currently existing
streams on a srs node. this prevents portier from being wrong
about active stream counts due to drift, and allows us to show
more information about stream data to users in the future,
as the srs api will also expose information about used codecs,
stream resolution and data rates as seen by srs itself.
to implement this, the previous remains of celery have been
made active again, and it is now required to run exactly one
celery beat instance and one or more celery workers beside
portier itself. these will make sure that every 5 seconds all srs
nodes are actively being scraped, on top of the scrape that is
triggered by every srs callback hook.
this keeps the data always superfresh.
the celery beat function allows us to implement cron-based
automation for many other functions (restream, pull, etc) in
the future as well, so it's okay to pull in something more heavy
here rather than just using a system cron and executing a
custom management command all the time.
2024-02-29 18:33:52 +01:00
|
|
|
signals_shared.stream_active.send(sender=self.__class__,
|
|
|
|
stream=str(self.stream),
|
|
|
|
param=None
|
|
|
|
)
|
|
|
|
else:
|
2024-02-27 20:44:35 +01:00
|
|
|
signals_shared.stream_inactive.send(sender=self.__class__,
|
implement active srs sync and celery
reworks the entire logic how active streams are being tracked.
instead of keeping a counter by listening only to srs callback
hooks, portier will now actively scrape the http api of known
srs nodes to retrieve information about all currently existing
streams on a srs node. this prevents portier from being wrong
about active stream counts due to drift, and allows us to show
more information about stream data to users in the future,
as the srs api will also expose information about used codecs,
stream resolution and data rates as seen by srs itself.
to implement this, the previous remains of celery have been
made active again, and it is now required to run exactly one
celery beat instance and one or more celery workers beside
portier itself. these will make sure that every 5 seconds all srs
nodes are actively being scraped, on top of the scrape that is
triggered by every srs callback hook.
this keeps the data always superfresh.
the celery beat function allows us to implement cron-based
automation for many other functions (restream, pull, etc) in
the future as well, so it's okay to pull in something more heavy
here rather than just using a system cron and executing a
custom management command all the time.
2024-02-29 18:33:52 +01:00
|
|
|
stream=str(self.stream),
|
|
|
|
param=None
|
|
|
|
)
|
|
|
|
|
2020-04-15 20:29:59 +02:00
|
|
|
|
2020-04-29 18:48:23 +02:00
|
|
|
def get_absolute_url(self):
|
2024-02-27 20:44:35 +01:00
|
|
|
return reverse('config:stream_detail', kwargs={'pk': self.pk})
|
2020-04-29 18:48:23 +02:00
|
|
|
|
|
|
|
def class_name(self):
|
2020-04-29 21:09:04 +02:00
|
|
|
return _('stream_class_name')
|
2020-04-29 18:48:23 +02:00
|
|
|
|
2020-04-15 20:29:59 +02:00
|
|
|
def __str__(self):
|
2020-04-23 21:08:39 +02:00
|
|
|
return self.name
|
2020-05-01 12:38:57 +02:00
|
|
|
|
|
|
|
|
|
|
|
pre_delete.connect(handlers.remove_obj_perms_connected_with_user, sender=Stream)
|
2024-02-27 20:44:35 +01:00
|
|
|
|
implement active srs sync and celery
reworks the entire logic how active streams are being tracked.
instead of keeping a counter by listening only to srs callback
hooks, portier will now actively scrape the http api of known
srs nodes to retrieve information about all currently existing
streams on a srs node. this prevents portier from being wrong
about active stream counts due to drift, and allows us to show
more information about stream data to users in the future,
as the srs api will also expose information about used codecs,
stream resolution and data rates as seen by srs itself.
to implement this, the previous remains of celery have been
made active again, and it is now required to run exactly one
celery beat instance and one or more celery workers beside
portier itself. these will make sure that every 5 seconds all srs
nodes are actively being scraped, on top of the scrape that is
triggered by every srs callback hook.
this keeps the data always superfresh.
the celery beat function allows us to implement cron-based
automation for many other functions (restream, pull, etc) in
the future as well, so it's okay to pull in something more heavy
here rather than just using a system cron and executing a
custom management command all the time.
2024-02-29 18:33:52 +01:00
|
|
|
|
2024-03-02 14:17:04 +01:00
|
|
|
class SRSNode(models.Model):
|
|
|
|
name = models.CharField(max_length=100, help_text=_('srsnode_name_help'))
|
|
|
|
api_base = models.CharField(max_length=256, help_text=_('srsnode_api_base_help'))
|
|
|
|
rtmp_base = models.CharField(max_length=256, help_text=_('srsnode_rtmp_base_help'))
|
|
|
|
active = models.BooleanField(help_text=_('srsnode_active_help'))
|
|
|
|
|
|
|
|
class Meta:
|
|
|
|
verbose_name = _('srsnode_verbose_name')
|
|
|
|
verbose_name_plural = _('srsnode_verbose_name_plural')
|
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
return self.name
|
|
|
|
|
|
|
|
|
|
|
|
class SRSStreamInstance(models.Model):
|
|
|
|
node = models.ForeignKey(SRSNode, on_delete=models.CASCADE, help_text=_('srsstreaminstance_node_help'))
|
|
|
|
stream = models.ForeignKey(Stream, on_delete=models.CASCADE, help_text=_('srsstreaminstance_stream_help'))
|
|
|
|
last_update = models.DateTimeField(auto_now=True, help_text=_('srsstreaminstance_last_update_help'))
|
|
|
|
statusdata = models.TextField(default="{}", help_text=_('srsstreaminstance_statusdata_help'))
|
|
|
|
|
|
|
|
class Meta:
|
|
|
|
verbose_name = _('srsstreaminstance_verbose_name')
|
|
|
|
verbose_name_plural = _('srsstreaminstance_verbose_name_plural')
|
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
return f"{self.stream} on {self.node}"
|
|
|
|
|
|
|
|
|
2024-02-27 20:44:35 +01:00
|
|
|
class Restream(models.Model):
|
|
|
|
FORMATS = (
|
|
|
|
('flv', 'flv (RTMP)'),
|
|
|
|
('mpegts', 'mpegts (SRT)'),
|
|
|
|
)
|
|
|
|
stream = models.ForeignKey(Stream, on_delete=models.CASCADE, help_text=_('restream_stream_help'))
|
|
|
|
target = models.CharField(max_length=500, help_text=_('restream_target_help'))
|
|
|
|
name = models.CharField(max_length=100, help_text=_('restream_name_help'))
|
|
|
|
active = models.BooleanField(help_text=_('restream_activate_help'))
|
|
|
|
format = models.CharField(max_length=6, choices=FORMATS, default='flv', help_text=_('restream_format_help'))
|
|
|
|
|
|
|
|
class Meta:
|
|
|
|
verbose_name = _('restream_verbose_name')
|
|
|
|
verbose_name_plural = _('restream_verbose_name_plural')
|
|
|
|
|
|
|
|
def class_name(self):
|
|
|
|
return _('restream_class_name')
|
|
|
|
|
|
|
|
def get_absolute_url(self):
|
|
|
|
return reverse('config:restream_detail', kwargs={'pk': self.pk})
|
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
return '{} to {}'.format(self.stream, self.name)
|
|
|
|
|
2024-03-02 14:17:04 +01:00
|
|
|
def get_concierge_configuration(self):
|
|
|
|
stream_instances = SRSStreamInstance.objects.filter(stream=self.stream).all()
|
2024-02-27 20:44:35 +01:00
|
|
|
|
2024-03-02 14:17:04 +01:00
|
|
|
if not stream_instances:
|
|
|
|
return None
|
implement active srs sync and celery
reworks the entire logic how active streams are being tracked.
instead of keeping a counter by listening only to srs callback
hooks, portier will now actively scrape the http api of known
srs nodes to retrieve information about all currently existing
streams on a srs node. this prevents portier from being wrong
about active stream counts due to drift, and allows us to show
more information about stream data to users in the future,
as the srs api will also expose information about used codecs,
stream resolution and data rates as seen by srs itself.
to implement this, the previous remains of celery have been
made active again, and it is now required to run exactly one
celery beat instance and one or more celery workers beside
portier itself. these will make sure that every 5 seconds all srs
nodes are actively being scraped, on top of the scrape that is
triggered by every srs callback hook.
this keeps the data always superfresh.
the celery beat function allows us to implement cron-based
automation for many other functions (restream, pull, etc) in
the future as well, so it's okay to pull in something more heavy
here rather than just using a system cron and executing a
custom management command all the time.
2024-02-29 18:33:52 +01:00
|
|
|
|
2024-03-02 14:17:04 +01:00
|
|
|
# We use the actual SRS node of the first stream instance to make sure
|
|
|
|
# that we use the same node for the restreaming. This avoids unnecessary
|
|
|
|
# traffic between the SRS nodes. In theory a clustered SRS setup could
|
|
|
|
# provide the same stream on all edges, but that would mean east-west
|
|
|
|
# traffic between the nodes.
|
implement active srs sync and celery
reworks the entire logic how active streams are being tracked.
instead of keeping a counter by listening only to srs callback
hooks, portier will now actively scrape the http api of known
srs nodes to retrieve information about all currently existing
streams on a srs node. this prevents portier from being wrong
about active stream counts due to drift, and allows us to show
more information about stream data to users in the future,
as the srs api will also expose information about used codecs,
stream resolution and data rates as seen by srs itself.
to implement this, the previous remains of celery have been
made active again, and it is now required to run exactly one
celery beat instance and one or more celery workers beside
portier itself. these will make sure that every 5 seconds all srs
nodes are actively being scraped, on top of the scrape that is
triggered by every srs callback hook.
this keeps the data always superfresh.
the celery beat function allows us to implement cron-based
automation for many other functions (restream, pull, etc) in
the future as well, so it's okay to pull in something more heavy
here rather than just using a system cron and executing a
custom management command all the time.
2024-02-29 18:33:52 +01:00
|
|
|
|
2024-03-02 14:17:04 +01:00
|
|
|
# This assumption breaks when a second stream instance appears on another
|
|
|
|
# SRS node, and only afterwards the first stream instance is removed.
|
|
|
|
# In that case, the restreaming would not be retriggered, and the edge
|
|
|
|
# would automatically start pulling the stream from the other SRS node.
|
implement active srs sync and celery
reworks the entire logic how active streams are being tracked.
instead of keeping a counter by listening only to srs callback
hooks, portier will now actively scrape the http api of known
srs nodes to retrieve information about all currently existing
streams on a srs node. this prevents portier from being wrong
about active stream counts due to drift, and allows us to show
more information about stream data to users in the future,
as the srs api will also expose information about used codecs,
stream resolution and data rates as seen by srs itself.
to implement this, the previous remains of celery have been
made active again, and it is now required to run exactly one
celery beat instance and one or more celery workers beside
portier itself. these will make sure that every 5 seconds all srs
nodes are actively being scraped, on top of the scrape that is
triggered by every srs callback hook.
this keeps the data always superfresh.
the celery beat function allows us to implement cron-based
automation for many other functions (restream, pull, etc) in
the future as well, so it's okay to pull in something more heavy
here rather than just using a system cron and executing a
custom management command all the time.
2024-02-29 18:33:52 +01:00
|
|
|
|
2024-03-02 14:17:04 +01:00
|
|
|
# It's a tradeoff between stability and efficiency. We choose stability.
|
|
|
|
rtmp_base = stream_instances[0].node.rtmp_base
|
implement active srs sync and celery
reworks the entire logic how active streams are being tracked.
instead of keeping a counter by listening only to srs callback
hooks, portier will now actively scrape the http api of known
srs nodes to retrieve information about all currently existing
streams on a srs node. this prevents portier from being wrong
about active stream counts due to drift, and allows us to show
more information about stream data to users in the future,
as the srs api will also expose information about used codecs,
stream resolution and data rates as seen by srs itself.
to implement this, the previous remains of celery have been
made active again, and it is now required to run exactly one
celery beat instance and one or more celery workers beside
portier itself. these will make sure that every 5 seconds all srs
nodes are actively being scraped, on top of the scrape that is
triggered by every srs callback hook.
this keeps the data always superfresh.
the celery beat function allows us to implement cron-based
automation for many other functions (restream, pull, etc) in
the future as well, so it's okay to pull in something more heavy
here rather than just using a system cron and executing a
custom management command all the time.
2024-02-29 18:33:52 +01:00
|
|
|
|
2024-03-02 14:17:04 +01:00
|
|
|
return {
|
|
|
|
'config_version': 1,
|
|
|
|
'stream_source_url': f"{rtmp_base}/{settings.GLOBAL_STREAM_NAMESPACE}/{self.stream.stream}",
|
|
|
|
'stream_target_url': self.target,
|
|
|
|
'stream_target_transport': self.format,
|
|
|
|
}
|
implement active srs sync and celery
reworks the entire logic how active streams are being tracked.
instead of keeping a counter by listening only to srs callback
hooks, portier will now actively scrape the http api of known
srs nodes to retrieve information about all currently existing
streams on a srs node. this prevents portier from being wrong
about active stream counts due to drift, and allows us to show
more information about stream data to users in the future,
as the srs api will also expose information about used codecs,
stream resolution and data rates as seen by srs itself.
to implement this, the previous remains of celery have been
made active again, and it is now required to run exactly one
celery beat instance and one or more celery workers beside
portier itself. these will make sure that every 5 seconds all srs
nodes are actively being scraped, on top of the scrape that is
triggered by every srs callback hook.
this keeps the data always superfresh.
the celery beat function allows us to implement cron-based
automation for many other functions (restream, pull, etc) in
the future as well, so it's okay to pull in something more heavy
here rather than just using a system cron and executing a
custom management command all the time.
2024-02-29 18:33:52 +01:00
|
|
|
|
|
|
|
|
2024-03-02 14:17:04 +01:00
|
|
|
pre_delete.connect(handlers.remove_obj_perms_connected_with_user, sender=Restream)
|
implement active srs sync and celery
reworks the entire logic how active streams are being tracked.
instead of keeping a counter by listening only to srs callback
hooks, portier will now actively scrape the http api of known
srs nodes to retrieve information about all currently existing
streams on a srs node. this prevents portier from being wrong
about active stream counts due to drift, and allows us to show
more information about stream data to users in the future,
as the srs api will also expose information about used codecs,
stream resolution and data rates as seen by srs itself.
to implement this, the previous remains of celery have been
made active again, and it is now required to run exactly one
celery beat instance and one or more celery workers beside
portier itself. these will make sure that every 5 seconds all srs
nodes are actively being scraped, on top of the scrape that is
triggered by every srs callback hook.
this keeps the data always superfresh.
the celery beat function allows us to implement cron-based
automation for many other functions (restream, pull, etc) in
the future as well, so it's okay to pull in something more heavy
here rather than just using a system cron and executing a
custom management command all the time.
2024-02-29 18:33:52 +01:00
|
|
|
|