diff --git a/docusaurus.config.js b/docusaurus.config.js index 989e3213..f5a52730 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -45,7 +45,11 @@ const config = { //lastVersion: versions[0], versions: { current: { - label: `6.0 (Unstable) 🚧`, + label: `7.0 (Unstable) 🚧`, + path: 'v7', + }, + '6.0': { + label: '6.0 (Alpha) 🚀', path: 'v6', }, '5.0': { diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0.json b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0.json new file mode 100644 index 00000000..38ea900f --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0.json @@ -0,0 +1,26 @@ +{ + "sidebar.docsSidebar.category.Getting Started": { + "message": "起步" + }, + "sidebar.docsSidebar.category.Main Protocols": { + "message": "核心协议" + }, + "sidebar.docsSidebar.category.Main Features": { + "message": "核心功能" + }, + "sidebar.docsSidebar.category.OpenAPI": { + "message": "开放接口" + }, + "sidebar.docsSidebar.category.Clusters": { + "message": "集群" + }, + "sidebar.docsSidebar.category.DevOps": { + "message": "运维" + }, + "sidebar.docsSidebar.category.Advanced Guides": { + "message": "高级指引" + }, + "sidebar.docsSidebar.category.Others": { + "message": "其他" + } +} \ No newline at end of file diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/arm.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/arm.md new file mode 100644 index 00000000..d692f1eb --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/arm.md @@ -0,0 +1,245 @@ +--- +title: ARM and CrossBuild +sidebar_label: ARM和交叉编译 +hide_title: false +hide_table_of_contents: false +--- + +# SRS for linux-arm + +注意:一般情况下,直接在ARM下是可以编译SRS的,参考官网正常的编译方法就可以,不需要交叉编译。 + +!!! 注意,请先确认是否需要交叉编译,一般可以直接编译,除非极少数情况。 + +> 翁晓晶:看来很多人误解了交叉编译的意思了,异构平台编译才要,比如编译平台跟运行平台是异构的,才需要,都是同一个平台不需要。 + +> 翁晓晶:最常见的场景就是玩网络设备的,因为网络设备的u都很弱,编译很慢,所以才在pc上做交叉编译,因为pc的u是x86,网络设备的u基本上都是mips或者arm的低频率的,属于异构,x86的u编译速度明显快于它们,所以大家都在pc上做交叉编译,然后把结果拷贝进网络设备,这样编译速度快很多,当然你有时间也可以直接在网络设备上正常编译也是可以的,就是慢很多。 + +> 翁晓晶:我看到有个朋友提到arm的服务器比如鲲鹏,那就直接在arm的服务器上编译就好了,没必要再交叉编译了,arm服务器又不是网络设备,U编译个程序还是没问题的,不要走弯路了。 + +## Why run SRS on ARM? + +ARM跑SRS主要原因: + +* ARM服务器越来越多了,可以直接编译和运行SRS,参考 [#1282](https://github.com/ossrs/srs/issues/1282#issue-386077124)。 +* ARM嵌入式设备上用SRS,会比较难,但可以交叉编译,参考 [#1547](https://github.com/ossrs/srs/issues/1547#issue-543780097)。 + +## RaspberryPi + +SRS可以直接在`RespberryPI`上编译和运行,不用交叉编译。参考 [#1282](https://github.com/ossrs/srs/issues/1282#issue-386077124)。 + + + +## ARM Server: armv7, armv8(aarch64) + +SRS可以直接在ARM Server上编译和运行,不用交叉编译。参考 [#1282](https://github.com/ossrs/srs/issues/1282#issue-386077124)。 + +``` +./configure && make +``` + +如果想编译出arm的二进制,在arm服务器上运行,比如在mac上编译出二进制后放在鲲鹏服务器上跑,也可以用arm docker编译,参考[aarch64](https://github.com/ossrs/dev-docker/tree/aarch64#usage)。 + +``` +docker run -it --rm -v `pwd`:/srs -w /srs ossrs/srs:aarch64 \ + bash -c "./configure && make" +``` + +对于龙芯和鲲鹏等armv8平台,可能无法识别出来CPU,可以指定为armv8,参考[#1282](https://github.com/ossrs/srs/issues/1282#issuecomment-568891854): + +```bash +./configure --extra-flags='-D__aarch64__' && make +``` + +直接运行SRS: + +``` +./objs/srs -c conf/console.conf +``` + +推流到这个docker: + +``` +ffmpeg -re -i doc/source.flv -c copy -f flv rtmp://127.0.0.1:1935/live/livestream +``` + +播放:[http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.flv&port=8080&schema=http) + +![image](https://user-images.githubusercontent.com/2777660/72774670-7108c980-3c46-11ea-9e8b-d4fb3a475ea2.png) + + + +## Ubuntu Cross Build SRS: ARMv8(aarch64) + +!!! 注意,请先确认是否需要交叉编译,一般可以直接编译,除非极少数情况,参考[#1547](https://github.com/ossrs/srs/issues/1547#issue-543780097)。 + +启动容器Ubuntu20(xenial),主目录为SRS: + +``` +cd ~/git/srs/trunk +docker run --rm -it -v `pwd`:/srs -w /srs \ + registry.cn-hangzhou.aliyuncs.com/ossrs/srs:ubuntu20 bash +``` + +> 推荐使用阿里云的容器,下载的速度比较快,也可以使用docker官方容器:`ossrs/srs:ubuntu20` + +安装toolchain(容器已经安装好了): + +``` +apt-get install -y gcc-aarch64-linux-gnu g++-aarch64-linux-gnu +``` + +交叉编译SRS: + +``` +./configure --cross-build --cross-prefix=aarch64-linux-gnu- +make +``` + +> 编译时,默认会重新交叉编译OpenSSL,而不会使用系统的SSL,如果需要强制使用系统的SSL,可以用`--use-sys-ssl`。 + +> 若编译时无法识别出aarch64,可以在configure时加编译选项`--extra-flags='-D__aarch64__'`,一般没有这个问题。 + +在ARMv8(aarch64)的docker上跑SRS:https://hub.docker.com/r/arm64v8/ubuntu + +``` +cd ~/git/srs/trunk && docker run --rm -it -v `pwd`:/srs -w /srs \ + -p 1935:1935 -p 1985:1985 -p 8080:8080 arm64v8/ubuntu \ + ./objs/srs -c conf/console.conf +``` + +推流到这个docker: + +``` +ffmpeg -re -i doc/source.flv -c copy -f flv rtmp://127.0.0.1:1935/live/livestream +``` + +播放:[http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.flv&port=8080&schema=http) + +## Ubuntu Cross Build SRS: ARMv7 + +!!! 注意,请先确认是否需要交叉编译,一般可以直接编译,除非极少数情况,参考[#1547](https://github.com/ossrs/srs/issues/1547#issue-543780097)。 + +启动容器Ubuntu20(xenial),主目录为SRS: + +``` +cd ~/git/srs/trunk +docker run --rm -it -v `pwd`:/srs -w /srs \ + registry.cn-hangzhou.aliyuncs.com/ossrs/srs:ubuntu20 bash +``` + +> 推荐使用阿里云的容器,下载的速度比较快,也可以使用docker官方容器:`ossrs/srs:ubuntu20` + +安装toolchain(容器已经安装好了),例如[Acqua or RoadRunner board](https://www.acmesystems.it/arm9_toolchain) + +``` +apt-get install -y gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf +``` + +交叉编译SRS: + +``` +./configure --cross-build --cross-prefix=arm-linux-gnueabihf- +make +``` + +> 编译时,默认会重新交叉编译OpenSSL,而不会使用系统的SSL,如果需要强制使用系统的SSL,可以用`--use-sys-ssl`。 + +在ARMv7的docker上跑SRS:https://hub.docker.com/r/armv7/armhf-ubuntu + +``` +cd ~/git/srs/trunk && docker run --rm -it -v `pwd`:/srs -w /srs \ + -p 1935:1935 -p 1985:1985 -p 8080:8080 armv7/armhf-ubuntu \ + ./objs/srs -c conf/console.conf +``` + +推流到这个docker: + +``` +ffmpeg -re -i doc/source.flv -c copy -f flv rtmp://127.0.0.1:1935/live/livestream +``` + +播放:[http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.flv&port=8080&schema=http) + +## Ubuntu Cross Build SRS: ARMv7(hisiv500) + +首先,找一台Ubuntu20的虚拟机,或者启动Docker: + +```bash +docker run --rm -it -v $(pwd):/srs -w /srs/trunk \ + registry.cn-hangzhou.aliyuncs.com/ossrs/srs:ubuntu20 bash +``` + +宿主机是64位的,而编译工具是32位的,所以需要安装一个工具: + +```bash +apt-get -y install lib32z1-dev +``` + +然后,从[海思](https://www.hisilicon.com/)下载交叉编译工具,或者从网上找地方下载。解压后安装: + +```bash +chmod +x arm-hisiv500-linux.install +./arm-hisiv500-linux.install +source /etc/profile +``` + +验证环境, 执行`which arm-hisiv500-linux-g++`能成功找到编译器,就安装成功了: + +```bash +which arm-hisiv500-linux-g++ +# /opt/hisi-linux/x86-arm/arm-hisiv500-linux/target/bin/arm-hisiv500-linux-g++ +``` + +编译SRS,命令如下: + +```bash +./configure --cross-build --cross-prefix=arm-hisiv500-linux- +make +``` + +在海思的板子启动SRS就可以了: + +```bash +./objs/srs -c conf/console.conf +``` + +## Use Other Cross build tools + +!!! 注意,请先确认是否需要交叉编译,一般可以直接编译,除非极少数情况,参考[#1547](https://github.com/ossrs/srs/issues/1547#issue-543780097)。 + +SRS相关的参数如下: + +```bash +./configure -h + +Presets: + --cross-build Enable cross-build, please set bellow Toolchain also. Default: off + +Cross Build options: @see https://ossrs.net/lts/zh-cn/docs/v6/doc/arm#ubuntu-cross-build-srs + --cpu= Toolchain: Select the minimum required CPU. For example: --cpu=24kc + --arch= Toolchain: Select architecture. For example: --arch=aarch64 + --host= Toolchain: Build programs to run on HOST. For example: --host=aarch64-linux-gnu + --cross-prefix= Toolchain: Use PREFIX for tools. For example: --cross-prefix=aarch64-linux-gnu- + +Toolchain options: + --static=on|off Whether add '-static' to link options. Default: off + --cc= Toolchain: Use c compiler CC. Default: gcc + --cxx= Toolchain: Use c++ compiler CXX. Default: g++ + --ar= Toolchain: Use archive tool AR. Default: g++ + --ld= Toolchain: Use linker tool LD. Default: g++ + --randlib= Toolchain: Use randlib tool RANDLIB. Default: g++ + --extra-flags= Set EFLAGS as CFLAGS and CXXFLAGS. Also passed to ST as EXTRA_CFLAGS. +``` + +具体使用例子参考[这里](#ubuntu-cross-build-srs) + +**--extra-flags** + +之前在支持ARM时,新增过一个Flags的选项( https://github.com/ossrs/srs/issues/1282#issuecomment-568891854 ),会设置`CFLAGS and CXXFLAGS`,也会将这个设置传递到ST设置`EXTRA_CFLAGS`。同样,对于交叉编译,这个选项也是有效的。 + +Winlin 2014.2 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/arm) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/client-sdk.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/client-sdk.md new file mode 100644 index 00000000..8e9d2746 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/client-sdk.md @@ -0,0 +1,52 @@ +--- +title: Client SDK +sidebar_label: Client SDK +hide_title: false +hide_table_of_contents: false +--- + +# Client SDK + +整个直播的业务架构是: + +``` ++---------+ +-----------------+ +---------+ +| Encoder +-->---+ SRS/CDN Network +--->---+ Player | ++---------+ +-----------------+ +---------+ +``` + +## EXOPlayer + +[EXOPlayer](https://github.com/google/ExoPlayer)是基于Android新的播放器框架的新播放器,支持众多的协议,包括HTTP-FLV和HLS。虽然不支持RTMP,但是支持FLV,因此延迟可以比HLS低很多。 + +## IJKPlayer + +[ijkplayer](https://github.com/Bilibili/ijkplayer)是[B站](http://www.bilibili.com/)出的一个播放器,基于FFMPEG软件解码,可以在Android和iOS上用。 + +## FFmpeg + +[FFmpeg](https://ffmpeg.org) is a complete, cross-platform solution to record, convert and stream audio and video. + +## WebRTC + +[WebRTC](https://webrtc.org/) is Real-time communication for the web. + +## LIBRTMP + +[LIBRTMP](https://github.com/ossrs/librtmp)或者[SRS-LIBRTMP](https://github.com/ossrs/srs-librtmp),只是提供了Transport(RTMP)的功能,用于只需要做传输的场景,比如一些安防摄像头厂商,Transport之前是用RTSP/RTP做的,如果需要接入互联网,将流送到CDN给PC和移动端观看,直接使用H5或者Flash,不需要装插件时,可以用librtmp将已经编码的流MUX成FLV(RTMP传输实际上用的是FLV格式),然后通过librtmp发送出去。 + +## PC + +有些应用场景,还是会用PC端推流,当然是用[OBS](https://obsproject.com/)。 + +> 注意:OBS推流时,流名称的翻译有问题,**流名称**是要写在**流密钥**这里的,如下图所示。 + +![OBS](/img/doc-integration-client-sdk-001.png) + +![OBS](/img/doc-integration-client-sdk-002.png) + +Winlin 2017.4 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/client-sdk) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/cloud.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/cloud.md new file mode 100644 index 00000000..ccb23c6f --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/cloud.md @@ -0,0 +1,12 @@ +--- +title: Cloud +sidebar_label: 云服务 +hide_title: false +hide_table_of_contents: false +--- + +# Docker + +迁移到了[Cloud](/cloud) + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/cloud) diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/delivery-hds.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/delivery-hds.md new file mode 100644 index 00000000..2bc34341 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/delivery-hds.md @@ -0,0 +1,61 @@ +--- +title: HDS 分发 +sidebar_label: HDS 分发 +hide_title: false +hide_table_of_contents: false +--- + +# HDS 分发 + +HDS指Adobe的Http Dynamic Stream,和Apple的[HLS](./hls.md)类似。 + +HDS规范参考:http://www.adobe.com/devnet/hds.html + +## Build + +编译SRS时可以打开或者关闭HDS,详细参考:[Build](./install.md) + +``` +./configure --hds=on +``` + +## Player + +Adobe的HDS可以在Flash播放器中,使用[OSMF播放器](http://www.ossrs.net/players/osmf.html)打开。 + +输入地址:`http://ossrs.net:8081/live/livestream.f4m` + +## HDS Config + +conf/full.conf中hds.srs.com是HDS的配置实例: + +``` +vhost __defaultVhost__ { + hds { + # whether hds enabled + # default: off + enabled on; + # the hds fragment in seconds. + # default: 10 + hds_fragment 10; + # the hds window in seconds, erase the segment when exceed the window. + # default: 60 + hds_window 60; + # the path to store the hds files. + # default: ./objs/nginx/html + hds_path ./objs/nginx/html; + } +} +``` + +配置项的意义和HLS类似,参考[HLS config](./hls.md#hls-config) + +## Why HDS + +为何SRS要在SRS2引入HDS?主要是SRS的HTTP服务器重写,以及文杰哥对于HDS很熟悉。另外,加入HDS不会对SRS现有结构有影响。 + +Winlin 2015.3 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/delivery-hds) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/delivery-hls.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/delivery-hls.md new file mode 100644 index 00000000..0d8e3449 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/delivery-hls.md @@ -0,0 +1,14 @@ +--- +title: HLS 分发 +sidebar_label: HLS 分发 +hide_title: false +hide_table_of_contents: false +--- + +# HLS 分发 + +迁移到了[HLS](./hls.md). + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/delivery-hls) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/delivery-http-flv.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/delivery-http-flv.md new file mode 100644 index 00000000..cf358762 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/delivery-http-flv.md @@ -0,0 +1,14 @@ +--- +title: HTTP-FLV 分发 +sidebar_label: HTTP-FLV 分发 +hide_title: false +hide_table_of_contents: false +--- + +# HTTP FLV 分发 + +迁移到了[HTTP-FLV](./flv.md). + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/delivery-http-flv) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/delivery-rtmp.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/delivery-rtmp.md new file mode 100644 index 00000000..51814721 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/delivery-rtmp.md @@ -0,0 +1,14 @@ +--- +title: RTMP 分发 +sidebar_label: RTMP 分发 +hide_title: false +hide_table_of_contents: false +--- + +# RTMP Delivery + +迁移到了[RTMP](./rtmp.md). + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/delivery-rtmp) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/drm.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/drm.md new file mode 100644 index 00000000..45432171 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/drm.md @@ -0,0 +1,136 @@ +--- +title: DRM防盗链 +sidebar_label: DRM防盗链 +hide_title: false +hide_table_of_contents: false +--- + +# DRM + +DRM重要的功能就是防盗链,只有允许的用户,才能访问服务器的流。有多种DRM的方式: +* referer防盗链:检查用户从哪个网站过来的。譬如不是从公司的页面过来的人都不让看。 +* token防盗链:用户在播放时,必须先申请token,SRS会回调http检查这个token合法性。 +* FMS token tranverse:边缘RTMP服务器收到每个连接,都去上行节点验证,即token穿越认证。 +* Access服务器:专门的access服务器负责DRM。譬如adobe的access服务器。 +* 推流认证:adobe的RTMP推流时,支持几种认证方式,这个也可以归于防盗链概念。 + + + + +## Referer Anti-suck + +SRS支持referer防盗链,adobe的flash在播放RTMP流时,会把页面的http url放在请求中, +as客户端代码不可以更改。当然如果用自己的客户端,不用flash播放流,就可以随意伪造了; +尽管如此,referer防盗链还是能防住相当一部分盗链。 + +配置Referer防盗链,在vhost中开启referer即可,可以指定publish和play的referer: + +```bash +# the vhost for anti-suck. +vhost refer.anti_suck.com { + # refer hotlink-denial. + refer { + # whether enable the refer hotlink-denial. + # default: off. + enabled on; + # the common refer for play and publish. + # if the page url of client not in the refer, access denied. + # if not specified this field, allow all. + # default: not specified. + all github.com github.io; + # refer for publish clients specified. + # the common refer is not overrided by this. + # if not specified this field, allow all. + # default: not specified. + publish github.com github.io; + # refer for play clients specified. + # the common refer is not overrided by this. + # if not specified this field, allow all. + # default: not specified. + play github.com github.io; + } +} +``` + +> Remark: SRS1/2的Referer配置方法和SRS3不一致,SRS3兼容SRS1/2的配置方法。 + +支持Referer防盗链的协议包括: + +* RTMP:推流和拉流。 + +## Token Authentication + +token类似于referer,不过是放在URL中,在请求参数中,譬如: + +``` +rtmp://vhost/app/stream?token=xxxx +http://vhost/app/stream.flv?token=xxxx +http://vhost/app/stream.m3u8?token=xxxx +http://vhost/rtc/v1/whip/?app=live&stream=livestream&token=xxx +http://vhost/rtc/v1/whep/?app=live&stream=livestream&token=xxx +``` + +这样服务器在`on_publish`或`on_play`回调接口中, 就会把url带过去验证。参考:[HTTP callback](./http-callback.md) + +token比referer更强悍,可以指定超时时间,可以变更token之类。可惜就是需要服务器端做定制,做验证。 +SRS提供http回调来做验证,已经有人用这种方式做了,比较简单靠谱。 + +举个常用的token认证的例子: + +1. 用户在web页面登录,服务器可以生成一个token,譬如:`token=md5(time+id+私钥+有效期)=88195f8943e5c944066725df2b1706f8` +1. 服务器返回给用户一个地址,带token,譬如:`rtmp://192.168.1.10/live/livestream?time=1402307089&expire=3600&token=88195f8943e5c944066725df2b1706f8` +1. 配置srs的http回调,`on_publish http://127.0.0.1:8085/api/v1/streams;` ,参考:[HTTP callback](./http-callback.md#config-srs) +1. 用户推流时,srs会回调那个地址,解析请求的内容,里面的params就有那些认证信息。 +1. 按同样的算法验证,如果md5变了就返回错误,srs就会拒绝连接。如果返回0就会接受连接。 + +> Note: 这是验证推流的,也可以验证播放。 + +## TokenTraverse + +Token防盗链的穿越,指的是在origin-edge集群中,客户播放edge边缘服务器的流时, +边缘将认证的token发送给源站进行验证,即token穿越。 + +FMS的edge和FMS的origin使用私有协议,使用一个连接回源取数据,一个连接回源传输控制命令, +譬如token穿越就是在这个连接做的。参考:https://github.com/ossrs/srs/issues/104 + +token认证建议使用http方式,也就是说客户端连接到边缘时,边缘使用http回调方式验证token。 +像fms那种token穿越,是需要走RTMP协议,其他开源服务器一般都不支持这种方式(中国特色)。 + +SRS可以支持类似fms的token穿越,不过实现方式稍微有区别,不是采用fms edge的私有协议, +而是每次新开一个连接回源验证,验证通过后边缘才提供服务。也就是边缘先做一个完全的代理。 + +SRS这种方式的特点是: +* 在token认证上,能和fms源站对接,fms源站感觉不到什么区别。 +* 每次边缘都会新开连接去验证,开销会大一些;而且只限于connect事件验证,马上验证过后就会收到disconnect事件。 +* 会导致源站的短连接过多(连接验证token,断开),不过可以加一层fms edge解决,这样比所有都是fms edge要好。 + +对于源站短连接过多的问题,可以加一层fms边缘缓解,假设1000个客户端连接到边缘: +* srs => 客户fms 这种方案,会有1000个连接去回源验证,然后断开。 +* srs => cdn-fms => 客户fms 这种方案,会有1000个连接去cdn的fms去验证,只有1个连接去客户那边验证。 + +SRS的token穿越(traverse)的配置,参考`edge.token.traverse.conf`: + +```bash +listen 1935; +vhost __defaultVhost__ { + cluster { + mode remote; + origin 127.0.0.1:19350; + token_traverse on; + } +} +``` + +## Access服务器 + +SRS暂时不支持。 + +## 推流认证 + +SRS暂时不支持,是RTMP特殊的握手协议。 + +Winlin 2015.8 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/drm) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/dvr.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/dvr.md new file mode 100644 index 00000000..e1b4cc54 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/dvr.md @@ -0,0 +1,266 @@ +--- +title: DVR +sidebar_label: DVR +hide_title: false +hide_table_of_contents: false +--- + +# DVR + +SRS支持将RTMP流录制成FLV或MP4文件。下面的描述以FLV为例,但是对MP4也是一样的。 + +当 FFmpeg/OBS 将 RTMP 流发布到 SRS 时,SRS 将把流写入 FLV/MP4 文件。工作流程是: + +```text ++------------+ +-------+ +---------------+ ++ FFmpeg/OBS +---RTMP-->--+ SRS +---DVR-->--+ FLV/MP4 File + ++------------+ +-------+ +---------------+ +``` + +许多用户希望 DVR 提供更多功能,请考虑使用 [Oryx](./getting-started-oryx.md#dvr) 替代, 例如: + +* Oryx 支持 S3 云存储,将最终的 MP4 文件移动到 S3 云存储。 +* Oryx 支持全局过滤器,仅记录指定的流,而不是所有流。 +* Oryx 支持将多个发布会话合并到一个 MP4 文件中。 + +实际上,DVR 功能可能非常复杂,SRS 只支持基本的 DVR 功能,而 Oryx 将继续改进 DVR 功能。 + +## Build + +DVR作为SRS3的核心功能,永远开启DVR。 + +参考:[Build](./install.md) + +## Config + +DVR的难点在于写入flv和文件命名,SRS的做法是随机生成文件名,用户可以使用http-callback方式,使用外部程序记录这个文件名,或者改成自己要的文件命名方式。 + +当然也可以修改SRS代码,这种做法不推荐,c操作文件名比较麻烦。还是用外部辅助系统做会很方便。 + +DVR的配置文件说明: + +```bash +vhost your_vhost { + # DVR RTMP stream to file, + # start to record to file when encoder publish, + # reap flv/mp4 according by specified dvr_plan. + dvr { + # whether enabled dvr features + # default: off + enabled on; + # the filter for dvr to apply to. + # all, dvr all streams of all apps. + # /, apply to specified stream of app. + # for example, to dvr the following two streams: + # live/stream1 live/stream2 + # default: all + dvr_apply all; + # the dvr plan. canbe: + # session reap flv/mp4 when session end(unpublish). + # segment reap flv/mp4 when flv duration exceed the specified dvr_duration. + # @remark The plan append is removed in SRS3+, for it's no use. + # default: session + dvr_plan session; + # the dvr output path, *.flv or *.mp4. + # we supports some variables to generate the filename. + # [vhost], the vhost of stream. + # [app], the app of stream. + # [stream], the stream name of stream. + # [2006], replace this const to current year. + # [01], replace this const to current month. + # [02], replace this const to current date. + # [15], replace this const to current hour. + # [04], replace this const to current minute. + # [05], replace this const to current second. + # [999], replace this const to current millisecond. + # [timestamp],replace this const to current UNIX timestamp in ms. + # @remark we use golang time format "2006-01-02 15:04:05.999" as "[2006]-[01]-[02]_[15].[04].[05]_[999]" + # for example, for url rtmp://ossrs.net/live/livestream and time 2015-01-03 10:57:30.776 + # 1. No variables, the rule of SRS1.0(auto add [stream].[timestamp].flv as filename): + # dvr_path ./objs/nginx/html; + # => + # dvr_path ./objs/nginx/html/live/livestream.1420254068776.flv; + # 2. Use stream and date as dir name, time as filename: + # dvr_path /data/[vhost]/[app]/[stream]/[2006]/[01]/[02]/[15].[04].[05].[999].flv; + # => + # dvr_path /data/ossrs.net/live/livestream/2015/01/03/10.57.30.776.flv; + # 3. Use stream and year/month as dir name, date and time as filename: + # dvr_path /data/[vhost]/[app]/[stream]/[2006]/[01]/[02]-[15].[04].[05].[999].flv; + # => + # dvr_path /data/ossrs.net/live/livestream/2015/01/03-10.57.30.776.flv; + # 4. Use vhost/app and year/month as dir name, stream/date/time as filename: + # dvr_path /data/[vhost]/[app]/[2006]/[01]/[stream]-[02]-[15].[04].[05].[999].flv; + # => + # dvr_path /data/ossrs.net/live/2015/01/livestream-03-10.57.30.776.flv; + # 5. DVR to mp4: + # dvr_path ./objs/nginx/html/[app]/[stream].[timestamp].mp4; + # => + # dvr_path ./objs/nginx/html/live/livestream.1420254068776.mp4; + # @see https://ossrs.net/lts/zh-cn/docs/v4/doc/dvr#custom-path + # @see https://ossrs.net/lts/zh-cn/docs/v4/doc/dvr#custom-path + # segment,session apply it. + # default: ./objs/nginx/html/[app]/[stream].[timestamp].flv + dvr_path ./objs/nginx/html/[app]/[stream].[timestamp].flv; + # the duration for dvr file, reap if exceed, in seconds. + # segment apply it. + # session,append ignore. + # default: 30 + dvr_duration 30; + # whether wait keyframe to reap segment, + # if off, reap segment when duration exceed the dvr_duration, + # if on, reap segment when duration exceed and got keyframe. + # segment apply it. + # session,append ignore. + # default: on + dvr_wait_keyframe on; + # about the stream monotonically increasing: + # 1. video timestamp is monotonically increasing, + # 2. audio timestamp is monotonically increasing, + # 3. video and audio timestamp is interleaved monotonically increasing. + # it's specified by RTMP specification, @see 3. Byte Order, Alignment, and Time Format + # however, some encoder cannot provides this feature, please set this to off to ignore time jitter. + # the time jitter algorithm: + # 1. full, to ensure stream start at zero, and ensure stream monotonically increasing. + # 2. zero, only ensure stream start at zero, ignore timestamp jitter. + # 3. off, disable the time jitter algorithm, like atc. + # apply for all dvr plan. + # default: full + time_jitter full; + + # on_dvr, never config in here, should config in http_hooks. + # for the dvr http callback, @see http_hooks.on_dvr of vhost hooks.callback.srs.com + # @see https://ossrs.net/lts/zh-cn/docs/v4/doc/dvr#http-callback + # @see https://ossrs.net/lts/zh-cn/docs/v4/doc/dvr#http-callback + } +} +``` + +DVR的计划即决定什么时候关闭flv文件,打开新的flv文件,主要的录制计划包括: + +* session:按照session来关闭flv文件,即编码器停止推流时关闭flv,整个session录制为一个flv。 +* segment:按照时间分段录制,flv文件时长配置为dvr_duration和dvr_wait_keyframe。注意:若不按关键帧切flv(即dvr_wait_keyframe配置为off),所以会导致后面的flv启动时会花屏。 +* time_jitter: 时间戳抖动算法。full使用完全的时间戳矫正;zero只是保证从0开始;off不矫正时间戳。 +* dvr_path: 录制的路径,规则参考下一章。 + +参考`conf/dvr.segment.conf`和`conf/dvr.session.conf`配置实例。 + +## Apply + +DVR的apply决定了是否对某个流开启dvr,默认的all是对所有开启。 +这个功能是SRS实现nginx提供的control module的一个基础,而且更丰富。 +也就是可以支持用户调用[http raw api](./http-api.md)控制是否以及何时DVR。 +参考[351](https://github.com/ossrs/srs/issues/459#issuecomment-134983742) + +Apply可以对多个流进行录制,譬如对`live/stream1`和`live/stream2`录制,可以配置成: +``` +vhost xxx { + dvr { + dvr_apply live/stream1 live/stream2; + } +} +``` + +可以使用RAW API控制DVR,参考[319](https://github.com/ossrs/srs/issues/319)和[wiki](./http-api.md#raw-dvr). + +## Custom Path + +我们可以自定义DVR的路径和文件名,规则如下: + +* 按年月日以及流信息生成子目录。便于做软链,或者避免一个目录的文件太多(貌似超过几万linux会支持不了)。 +* 按日期和时间以及流信息生成文件名。便于搜索。 +* 提供日期和时间,以及流信息的变量,以中括号代表变量。 +* 保留目前的方式,按照时间戳生成文件名,保存在一个文件夹。若没有指定文件名(只指定了目录),则默认使用`[stream].[timestamp].flv`作为文件名,和目前保持一致。 + +关于日期和时间的变量,参考了GO的时间格式化字符串,譬如2006代表YYYY这种,比较方便: + +``` +2006-01-02 15:04:05.999 +``` + +DVR支持的变量包括: + +1. 年:[2006],将这个字符串替换为年份。 +1. 月:[01],将这个字符串替换成月份。 +1. 日:[02],将这个字符串替换成日期。 +1. 时:[15],将这个字符串替换成小时。 +1. 分:[04],将这个字符串替换成分。 +1. 秒:[05),将这个字符串替换成秒。 +1. 毫秒:[999],将这个字符串替换成毫秒。 +1. 时间戳:[timestamp],将这个字符串替换成UNIX时间戳,单位是毫秒。 +1. 流相关变量,参考转码:[vhost], [app], [stream] + +下面的例子说明了替换方式, url是`rtmp://ossrs.net/live/livestream`,time是`2015-01-03 10:57:30.776` + +1. 没有变量,SRS1.0方式(自动添加`[stream].[timestamp].flv`作为文件名): + * dvr_path ./objs/nginx/html; + * => + * dvr_path ./objs/nginx/html/live/livestream.1420254068776.flv; + +1. 按流和年月日分目录,时间作为文件名: + * dvr_path /data/[vhost]/[app]/[stream]/[2006]/[01]/[02]/[15].[04].[05].[999].flv; + * => + * dvr_path /data/ossrs.net/live/livestream/2015/01/03/10.57.30.776.flv; + +1. 按流和年月分目录,日和时间作为文件名: + * dvr_path /data/[vhost]/[app]/[stream]/[2006]/[01]/[02]-[15].[04].[05].[999].flv; + * => + * dvr_path /data/ossrs.net/live/livestream/2015/01/03-10.57.30.776.flv; + +1. 按vhost/app和年月分目录,流名称、日和时间作为文件名: + * dvr_path /data/[vhost]/[app]/[2006]/[01]/[stream]-[02]-[15].[04].[05].[999].flv; + * => + * dvr_path /data/ossrs.net/live/2015/01/livestream-03-10.57.30.776.flv; + +1. 按app分目录,流和时间戳作为文件名(SRS1.0方式): + * dvr_path /data/[app]/[stream].[timestamp].flv; + * => + * dvr_path /data/live/livestream.1420254068776.flv; + +## Http Callback + +打开`http_hooks`的`on_dvr`配置: + +``` +vhost your_vhost { + dvr { + enabled on; + dvr_path ./objs/nginx/html/[app]/[stream]/[2006]/[01]/[02]/[15].[04].[05].[999].flv; + dvr_plan segment; + dvr_duration 30; + dvr_wait_keyframe on; + } + http_hooks { + enabled on; + on_dvr http://127.0.0.1:8085/api/v1/dvrs; + } +} +``` + +api-server的日志: + +``` +[2015-01-03 15:25:48][trace] post to dvrs, req={"action":"on_dvr","client_id":108,"ip":"127.0.0.1","vhost":"__defaultVhost__","app":"live","stream":"livestream","cwd":"/home/winlin/git/srs/trunk","file":"./objs/nginx/html/live/livestream/2015/1/3/15.25.18.442.flv"} +[2015-01-03 15:25:48][trace] srs on_dvr: client id=108, ip=127.0.0.1, vhost=__defaultVhost__, app=live, stream=livestream, cwd=/home/winlin/git/srs/trunk, file=./objs/nginx/html/live/livestream/2015/1/3/15.25.18.442.flv +127.0.0.1 - - [03/Jan/2015:15:25:48] "POST /api/v1/dvrs HTTP/1.1" 200 1 "" "SRS(Simple RTMP Server)2.0.88" +``` + +更多HTTP回调的信息,请参考 [HttpCallback](./http-callback.md) + +## Bug + +关于DVR的bug: + +* 文件名规则:[#179](https://github.com/ossrs/srs/issues/179) +* DVR时HTTP回调:[#274](https://github.com/ossrs/srs/issues/274) +* DVR支持MP4格式:[#738](https://github.com/ossrs/srs/issues/738) +* 如何录制成一个文件:[#776](https://github.com/ossrs/srs/pull/776) + +## Reload + +改变dvr配置后reload,会导致dvr重启,即关闭当前dvr文件后重新应用dvr配置。 + +Winlin 2015.1 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/dvr) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/edge.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/edge.md new file mode 100644 index 00000000..263f85b7 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/edge.md @@ -0,0 +1,211 @@ +--- +title: Edge Cluster +sidebar_label: Edge Cluster +hide_title: false +hide_table_of_contents: false +--- + +# Edge Server + +SRS的Edge主要解决几条流有大量播放请求的场景,比如一个流有上万人观看。SRS的Edge能对接所有的标准RTMP源站服务器。 + +![](/img/doc-main-concepts-edge-001.png) + +> 备注:Edge一般负载高,SRS支持的并发足够跑满千兆网带宽了。 + +> Remark: SRS Edge does not support Transcoding, DVR and HLS, which is supported by SRS Origin Server. + +Edge的主要应用场景: +* CDN/VDN大规模集群,客户众多流众多需要按需回源。 +* 小规模集群,但是流比较多,需要按需回源。 +* 骨干带宽低,边缘服务器强悍,可以使用多层edge,降低上层BGP带宽。 + +注意:edge可以从源站拉流,也可以将流转发给源站。也就是说,播放edge上的流时,edge会 +回源拉流;推流到edge上时,edge会直接将流转发给源站。 + +注意:若只需要中转流给源站,不必用forward,直接使用edge模式即可。可以直接支持推流 +和拉流的中转,简单快捷。Forward应用于目标服务器是多个,譬如将一路流主动送给多路服务 +器;edge虽然配置了多台服务器,但是只用了一台,有故障时才切换。 + +注意:优先使用edge,除非知道必须用forward,才使用forward。 + +## 概念 + +所谓边缘edge服务器,就是边缘直播缓存服务器,配置时指定为remote模式和origin(指定一 +个或多个源站IP),这个边缘edge服务器就是源站的缓存了。 + +当用户推流到边缘服务器时,边缘直接将流转发给源站。譬如源站在北京BGP机房,湖南有个 +电信ADSL用户要推流发布自己的直播流,要是直接推流到北京BGP可能效果不是很好,可以在 +湖南电信机房部署一个边缘,用户推流到湖南边缘,边缘转发给北京源站BGP。 + +当用户播放边缘服务器的流时,边缘服务器看有没有缓存,若缓存了就直接将流发给客户端。 +若没有缓存,则发起一路回源链接,从源站取数据源源不断放到自己的缓存队列。也就是说, +多个客户端连接到边缘时,只有一路回源。这种结构在CDN是最典型的部署结构。譬如北京源站, +在全国32个省每个省都部署了10台服务器,一共就有320台边缘,假设每个省1台边缘服务器都有 +2000用户观看,那么就有64万用户,每秒钟集群发送640Gbps数据;而回源链接只有320个, +实现了大规模分发。 + +边缘edge服务器,实际上是解决大并发问题产生的分布式集群结构。SRS的边缘可以指定多个源站, +在源站出现故障时会自动切换到下一个源站,不影响用户观看,具有最佳的容错性,用户完全不会觉察。 + +## Config + +edge属于vhost的配置,将某个vhost配置为edge后,该vhost会回源取流(播放时)或者将流转发 +给源站(发布时)。 + +```bash +vhost __defaultVhost__ { + # The config for cluster. + cluster { + # The cluster mode, local or remote. + # local: It's an origin server, serve streams itself. + # remote: It's an edge server, fetch or push stream to origin server. + # default: local + mode remote; + + # For edge(mode remote), user must specifies the origin server + # format as: [:port] + # @remark user can specifies multiple origin for error backup, by space, + # for example, 192.168.1.100:1935 192.168.1.101:1935 192.168.1.102:1935 + origin 127.0.0.1:1935 localhost:1935; + + # For edge(mode remote), whether open the token traverse mode, + # if token traverse on, all connections of edge will forward to origin to check(auth), + # it's very important for the edge to do the token auth. + # the better way is use http callback to do the token auth by the edge, + # but if user prefer origin check(auth), the token_traverse if better solution. + # default: off + token_traverse off; + + # For edge(mode remote), the vhost to transform for edge, + # to fetch from the specified vhost at origin, + # if not specified, use the current vhost of edge in origin, the variable [vhost]. + # default: [vhost] + vhost same.edge.srs.com; + + # For edge(mode remote), when upnode(forward to, edge push to, edge pull from) is srs, + # it's strongly recommend to open the debug_srs_upnode, + # when connect to upnode, it will take the debug info, + # for example, the id, source id, pid. + # please see https://ossrs.net/lts/zh-cn/docs/v4/doc/log + # default: on + debug_srs_upnode on; + } +} +``` + +可配置`多个`源站,在故障时会切换到下一个源站。 + +## 集群配置 + +下面举例说明如何配置一个源站和集群。 + +源站配置,参考`origin.conf`: + +```bash +listen 19350; +pid objs/origin.pid; +srs_log_file ./objs/origin.log; +vhost __defaultVhost__ { +} +``` + +边缘配置,参考`edge.conf`: + +```bash +listen 1935; +pid objs/edge.pid; +srs_log_file ./objs/edge.log; +vhost __defaultVhost__ { + cluster { + mode remote; + origin 127.0.0.1:19350; + } +} +``` + +## HLS边缘 + +Edge指的是RTMP边缘,也就是说,配置为Edge后,流推送到源站(Origin)时,Edge不会切片生成HLS。 + +HLS切片配置在源站,只有源站会在推流上来就产生HLS切片。边缘只有在访问时才会回源(这个时候 +也会生成HLS,但单独访问边缘的HLS是不行的)。 + +也就是说,HLS的边缘需要使用WEB服务器缓存,譬如nginx反向代理,squid,或者traffic server等。 + +## 下行边缘结构设计 + +下行边缘指的是下行加速边缘,即客户端播放边缘服务器的流,边缘服务器从上层或源站取流。 + +SRS下行边缘是非常重要的功能,需要考虑以下因素: +* 以后支持多进程时结构变动最小。 +* 和目前所有功能的对接良好。 +* 支持平滑切换,源站和边缘两种角色。 + +权衡后,SRS下行边缘的结构设计如下: +* 客户端连接到SRS +* 开始播放SRS的流 +* 若流存在则直接播放。 +* 若流不存在,则从源站开始取流。 +* 其他其他流的功能,譬如转码/转发/采集等等。 + +核心原则是: +* 边缘服务器在没有流时,向源站拉取流。 +* 当流建立起来后,边缘完全变成源站服务器,对流的处理逻辑保持一致。 +* 支持回多个源站,错误时切换。这样可以支持上层服务器热备。 + +备注:RTMP多进程(计划中)的核心原则是用多进程作为完全镜像代理,连接到本地的服务器 +(源站或边缘),完全不考虑其他业务因素,透明代理。这样可以简单,而且利用多CPU能力。 +HTTP多进程是不考虑支持的,用NGINX是最好选择,SRS的HTTP服务器只是用在嵌入式设备中, +没有性能要求的场合。 + +## 上行边缘结构设计 + +上行边缘指的是上行推流加速,客户端推流到边缘服务器,边缘将流转发给源站服务器。 + +考虑到下行和上行可能同时发生在一台边缘服务器,所以上行边缘只能用最简单的代理方式, +完全将流代理到上层或源站服务器。也就是说,只有在下行边缘时,边缘服务器才会启用其他 +的功能,譬如HLS转发等等。 + +上行边缘主要流程是: +* 客户端连接到SRS +* 开始推流到SRS。 +* 开始转发到源站服务器。 + +## EdgeState + +边缘的状态图分析如下: + +![RTMP-HLS-latency](/img/doc-main-concepts-edge-002.jpg) + +注意:这种细节的文档很难保持不变,以代码为准。 + +## 边缘的难点 + +RTMP边缘对于SRS来讲问题不大,主要是混合了reload和HLS功能的边缘服务器,会是一个难点。 + +譬如,用户在访问边缘上的HLS流时,是使用nginx反向代理回源,还是使用RTMP回源后在边缘切片? +对于前者,需要部署srs作为RTMP边缘,nginx作为HLS边缘,管理两个服务器自然是比一个要费劲。 +若使用后者,即RTMP回源后边缘切片,能节省骨干带宽,只有一路回源,难点在于访问HLS时要发起 +RTMP回源连接。 + +正因为业务逻辑会是边缘服务器的难点,所以SRS对于上行边缘,采取直接代理方式,并没有采取 +边缘缓存方式。所谓边缘缓存方式,即推流到边缘时边缘也会当作源站直接缓存(作为源站), +然后转发给源站。边缘缓存方式看起来先进,这个边缘节点不必回源,实际上加大了集群的逻辑难度, +不如直接作为代理方式简单。 + +## Transform Vhost + +一般CDN都支持上行和下行边缘加速,上行和下行的域名是分开的,譬如上行使用`up.srs.com`,下行使用`down.srs.com`,这样可以使用不同的设备组,避免下行影响下行之类。 + +用户在推流到`up.srs.com`时,边缘使用edge模式,回源时也是用的`up.srs.com`,到源站还是`up.srs.com`,所以播放`down.srs.com`这个vhost的流时就播放不了用户推的那个流。因此需要edge在回源时transform vhost,也就是转换vhost。 + +解决方案:在最上层edge,可以配置回源的vhost,默认使用当前的vhost。譬如上行`up.srs.com`,可以指定回源`down.srs.com`;配置时指定`vhost down.srs.com;`就可以了。 + +具体配置参考上面的Config。 + +Winlin 2015.4 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/edge) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/exporter.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/exporter.md new file mode 100644 index 00000000..fb96aed4 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/exporter.md @@ -0,0 +1,183 @@ +--- +title: Prometheus Exporter +sidebar_label: Exporter +hide_title: false +hide_table_of_contents: false +--- + +# Prometheus Exporter + +SRS的可观测性是支撑业务的运营的能力,主要指监控(Prometheus Exporter)、分布式链路追踪(APM)、上下文日志(Cloud Logging)三个核心能力,以及基于这些能力的运营大盘、监控系统、问题排查、日志收集和分析等功能。 + +## Introduction + +可观测性,在云原生中有详细的定义,参考[OpenTelemetry.io](https://opentelemetry.io),从可观测性上看其实就是三个问题: + +* [监控指标(Metrics)](https://opentelemetry.io/docs/concepts/observability-primer/#reliability--metrics):就是我们一般所理解的监控告警。监控一般是将一些数据聚合,体现系统在不同层面的状态,当达到某个状态后告警。比如区域的水位值,达到一定水位后就需要自动或人工扩容,或者调整调度降低这个区域的负载。 +* [分布式跟踪(Tracing)](https://opentelemetry.io/docs/concepts/observability-primer/#distributed-traces):我们排查问题时,一般是按照会话或请求维度排查,在系统中会涉及多个服务器,比如播放一个流会经过API、调度、边缘、上游服务器、源站等,如何把这个全链路的信息给出来,就是分布式追踪(Tracing)。明显这是非常高效的解决问题的方法,问题迟迟得不到解决甚至不了了之,迟早会失去用户。 +* [日志(Logging)](https://opentelemetry.io/docs/concepts/observability-primer/#logs): 就是我们一般所理解的日志,也是一般研发所依赖的排查问题的几乎唯一的方法。其实日志是最低效的方法,因为日志没有上下文,无法在分布式系统中分离出某个会话的多个日志。日志只有具备追踪的能力,或者在关联到Traceing中,这样才能更高效。 + +![](/img/doc-2022-10-30-001.png) + +> Note: 上图请参考[Metrics, tracing, and logging](https://peter.bourgon.org/blog/2017/02/21/metrics-tracing-and-logging.html) + +针对上面的问题,SRS的运营能力分成几个独立的部分,首先是提供了Prometheus可以对接的Exporter,Prometheus可以直接从SRS拉取监控数据,而不依赖外部第三方服务,如下图所示: + +``` ++-----+ +-----------+ +---------+ +| SRS +--Exporter-->--| Promethus +-->--+ Grafana + ++-----+ (HTTP) +-----------+ +---------+ +``` + +> Note: Promethus是云原生的标准监控系统,在K8s中部署也可以使用这个能力,比如通过Pod发现和采集数据。 + +下面是关于Exporter的配置。 + +## Config + +Exporter的配置如下,推荐使用环境变量方式开启配置: + +```bash +# Prometheus exporter config. +# See https://prometheus.io/docs/instrumenting/exporters +exporter { + # Whether exporter is enabled. + # Overwrite by env SRS_EXPORTER_ENABLED + # Default: off + enabled off; + # The http api listen port for exporter metrics. + # Overwrite by env SRS_EXPORTER_LISTEN + # Default: 9972 + # See https://github.com/prometheus/prometheus/wiki/Default-port-allocations + listen 9972; + # The logging label to category the cluster servers. + # Overwrite by env SRS_EXPORTER_LABEL + label cn-beijing; + # The logging tag to category the cluster servers. + # Overwrite by env SRS_EXPORTER_TAG + tag cn-edge; +} +``` + +下面是详细的使用说明。 + +## Usage for SRS Exporter + +首先,编译和启动SRS,要求`SRS 5.0.86+`: + +```bash +./configure && make +env SRS_ENV_ONLY=on SRS_EXPORTER_ENABLED=on SRS_LISTEN=1935 \ + ./objs/srs -e +``` + +> Note: 我们使用环境变量方式配置SRS,不依赖配置文件。当然使用`conf/prometheus.conf`启动也可以。 + +> Note: SRS启动成功后,可以打开[http://localhost:9972/metrics](http://localhost:9972/metrics)验证,能看到返回指标数据就是成功了。 + +接着,我们启动FFmpeg推流: + +```bash +docker run --rm -it registry.cn-hangzhou.aliyuncs.com/ossrs/srs:encoder ffmpeg -stream_loop -1 -re -i doc/source.flv \ + -c copy -f flv rtmp://host.docker.internal/live/livestream +``` + +然后,启动[node_exporter](https://github.com/prometheus/node_exporter),收集节点的数据,这样和SRS的服务器数据可以形成完整的监控数据: + +```bash +docker run --rm -p 9100:9100 prom/node-exporter +``` + +> Note: 用Docker启动node_exporter数据不准,需要特殊的权限而mac不支持。实际场景请使用二进制直接在主机上启动,可以从[这里](https://github.com/prometheus/node_exporter/releases)下载对应系统的二进制。 + +> Note: node_exporter启动后,可以打开[http://localhost:9100/metrics](http://localhost:9100/metrics)验证,能看到返回指标数据就是成功了。 + +最后,编写配置文件`prometheus.yml`,内容如下: + +```yml +scrape_configs: + - job_name: "node" + metrics_path: "/metrics" + scrape_interval: 5s + static_configs: + - targets: ["host.docker.internal:9100"] + - job_name: "srs" + metrics_path: "/metrics" + scrape_interval: 5s + static_configs: + - targets: ["host.docker.internal:9972"] +``` + +> Note: 默认`scrape_interval`是1m即一分钟,为了测试方便我们设置为`5s`。 + +启动Prometheus: + +```bash +docker run --rm -v $(pwd)/prometheus.yml:/etc/prometheus/prometheus.yml \ + -p 9090:9090 prom/prometheus +``` + +打开[Prometheus: Targets](http://localhost:9090/targets),可以看到抓取数据的状态。打开[Prometheus: Graph](http://localhost:9090/graph),输入一下语句,可以验证是否正常: + +```sql +rate(srs_receive_bytes_total[10s])*8 +``` + +这个语句是计算输入的带宽,也就是直播流的码率,如下图所示: + +![](/img/doc-2022-10-30-002.png) + +虽然Prometheus也能生成图,不过一般是使用Grafana对接Prometheus展示图表。 + +## Usage for Grafana + +首先启动Grafana: + +```bash +docker run --rm -it -p 3000:3000 \ + -e GF_SECURITY_ADMIN_USER=admin \ + -e GF_SECURITY_ADMIN_PASSWORD=12345678 \ + -e GF_USERS_DEFAULT_THEME=light \ + grafana/grafana +``` + +然后打开Grafana页面:[http://localhost:3000/](http://localhost:3000/) + +输入用户名`admin`,以及密码`12345678`就可以进入Grafana后台了。 + +执行命令[添加](https://grafana.com/docs/grafana/latest/developers/http_api/data_source/#create-a-data-source)Prometheus的DataSource: + +```bash +curl -s -H "Content-Type: application/json" \ + -XPOST http://admin:12345678@localhost:3000/api/datasources \ + -d '{ + "name": "prometheus", + "type": "prometheus", + "access": "proxy", "isDefault": true, + "url": "http://host.docker.internal:9090" +}' +``` + +执行命令[导入](https://grafana.com/docs/grafana/latest/developers/http_api/dashboard/#create--update-dashboard)HelloWorld图表: + +```bash +data=$(curl https://raw.githubusercontent.com/ossrs/srs-grafana/main/dashboards/helloworld-import.json 2>/dev/null) +curl -s -H "Content-Type: application/json" \ + -XPOST http://admin:12345678@localhost:3000/api/dashboards/db \ + --data-binary "{\"dashboard\":${data},\"overwrite\":true,\"inputs\":[],\"folderId\":0}" +``` + +> Note: 这里[srs-grafana](https://github.com/ossrs/srs-grafana/tree/main/dashboards)有更丰富的仪表盘,可以选择手动导入,或者修改上面的导入命令。 + +导入后就可以在[仪表盘中](http://localhost:3000/dashboards)看到了,如下图所示: + +![](/img/doc-2022-10-30-003.png) + +我们还提供了更加完整的仪表盘,可以在[srs-grafana](https://github.com/ossrs/srs-grafana/tree/main/dashboards)中看到,如下图所示: + +![](/img/doc-2022-10-30-004.png) + +欢迎一起来完善SRS仪表盘。 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/exporter) + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/ffmpeg.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/ffmpeg.md new file mode 100644 index 00000000..e1914347 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/ffmpeg.md @@ -0,0 +1,388 @@ +--- +title: FFMPEG +sidebar_label: FFMPEG +hide_title: false +hide_table_of_contents: false +--- + +# Live Streaming Transcode + +SRS可以对推送到SRS的RTMP流进行转码,然后输出到RTMP服务器(也可以是SRS自己)。 + +## Use Scenario + +FFMPEG的重要应用场景包括: +* 推送一路高码率,转多路输出。譬如:游戏直播中,推送一路1080p流到SRS,SRS可以转码输出1080p/720p/576p多路,低码率可以给移动设备观看。这样节省了推流带宽(一般源站为BGP带宽,很贵),也减轻了客户端压力(譬如客户端边玩游戏边直播)。 +* 支持多屏输出。譬如:网页推流(主播)编码为vp6/mp3或speex,推流到SRS后无法支持HLS(要求h264+aac),可以转码成h264+aac后切片成HLS或者推送到其他服务器再分发。 +* 加水印。适用于需要对流进行加水印的情况,譬如打上自己的logo。SRS支持文字水印和图片水印,也可以支持视频作为水印,或者将两路流叠加(参考ffmpeg的用法)。 +* 截图:参考[使用Transcoder截图](./snapshot.md#transcoder) +* 其他滤镜:SRS支持所有ffmpeg的滤镜。 + +## Workflow + +SRS转码的主要流程包括: + +1. 编码器推送RTMP流到SRS的vhost。 +1. SRS的vhost若配置了转码,则进行转码。 +1. 转码后,按照配置,推送到SRS本身或者其他RTMP服务器。 + +## Transcode Config + +SRS可以对vhost的所有的流转码,或者对某些app的流转码,或者对某些流转码。 + +```bash +listen 1935; +vhost __defaultVhost__ { + # the streaming transcode configs. + transcode { + # whether the transcode enabled. + # if off, donot transcode. + # default: off. + enabled on; + # the ffmpeg + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + # the transcode engine for matched stream. + # all matched stream will transcoded to the following stream. + # the transcode set name(ie. hd) is optional and not used. + # we will build the parameters to fork ffmpeg: + # ffmpeg + # -i + # + # -vcodec -b:v -r -s x -profile:v -preset + # + # -acodec -b:a -ar -ac + # + # -f + # -y + engine example { + # whether the engine is enabled + # default: off. + enabled on; + # pre-file options, before "-i" + perfile { + re; + rtsp_transport tcp; + } + # input format, can be: + # off, do not specifies the format, ffmpeg will guess it. + # flv, for flv or RTMP stream. + # other format, for example, mp4/aac whatever. + # default: flv + iformat flv; + # ffmpeg filters, follows the main input. + vfilter { + # the logo input file. + i ./doc/ffmpeg-logo.png; + # the ffmpeg complex filter. + # for filters, @see: http://ffmpeg.org/ffmpeg-filters.html + filter_complex 'overlay=10:10'; + } + # video encoder name. can be: + # libx264: use h.264(libx264) video encoder. + # png: use png to snapshot thumbnail. + # copy: donot encoder the video stream, copy it. + # vn: disable video output. + vcodec libx264; + # video bitrate, in kbps + # @remark 0 to use source video bitrate. + # default: 0 + vbitrate 1500; + # video framerate. + # @remark 0 to use source video fps. + # default: 0 + vfps 25; + # video width, must be even numbers. + # @remark 0 to use source video width. + # default: 0 + vwidth 768; + # video height, must be even numbers. + # @remark 0 to use source video height. + # default: 0 + vheight 320; + # the max threads for ffmpeg to used. + # default: 1 + vthreads 12; + # x264 profile, @see x264 -help, can be: + # high,main,baseline + vprofile main; + # x264 preset, @see x264 -help, can be: + # ultrafast,superfast,veryfast,faster,fast + # medium,slow,slower,veryslow,placebo + vpreset medium; + # other x264 or ffmpeg video params + vparams { + # ffmpeg options, @see: http://ffmpeg.org/ffmpeg.html + t 100; + # 264 params, @see: http://ffmpeg.org/ffmpeg-codecs.html#libx264 + coder 1; + b_strategy 2; + bf 3; + refs 10; + } + # audio encoder name. can be: + # libfdk_aac: use aac(libfdk_aac) audio encoder. + # copy: donot encoder the audio stream, copy it. + # an: disable audio output. + acodec libfdk_aac; + # audio bitrate, in kbps. [16, 72] for libfdk_aac. + # @remark 0 to use source audio bitrate. + # default: 0 + abitrate 70; + # audio sample rate. for flv/rtmp, it must be: + # 44100,22050,11025,5512 + # @remark 0 to use source audio sample rate. + # default: 0 + asample_rate 44100; + # audio channel, 1 for mono, 2 for stereo. + # @remark 0 to use source audio channels. + # default: 0 + achannels 2; + # other ffmpeg audio params + aparams { + # audio params, @see: http://ffmpeg.org/ffmpeg-codecs.html#Audio-Encoders + # @remark SRS supported aac profile for HLS is: aac_low, aac_he, aac_he_v2 + profile:a aac_low; + bsf:a aac_adtstoasc; + } + # output format, can be: + # off, do not specifies the format, ffmpeg will guess it. + # flv, for flv or RTMP stream. + # image2, for vcodec png to snapshot thumbnail. + # other format, for example, mp4/aac whatever. + # default: flv + oformat flv; + # output stream. variables: + # [vhost] the input stream vhost. + # [port] the intput stream port. + # [app] the input stream app. + # [stream] the input stream name. + # [engine] the tanscode engine name. + output rtmp://127.0.0.1:[port]/[app]?vhost=[vhost]/[stream]_[engine]; + } + } +} +``` + +该配置对频道的所有流转码。譬如: +* 推送流:rtmp://dev:1935/live/livestream +* 观看原始流:rtmp://dev:1935/live/livestream +* 观看转码流:rtmp://dev:1935/live/livestream_ff + +输出配置使用了变量替换,主要的参数是: +* [vhost] 输入流的vhost。譬如:dev +* [port] 输入流的端口。譬如:1935 +* [app] 输入流的app。譬如:live +* [stream] 输入流名称。譬如:livestream +* [engine] 转码engine名称,engine后面就是名称。譬如:ff +注意:转码会使用自动检测,保证推送到自己的流不会被再次转码。但转码推送到SRS自己的流可以被切片成HLS。譬如,若开启了HLS,上面的live/livestream,和转码出来的流live/livestream_ff都能观看HLS。 + +对app或流转码时,只要在transcode后面加app和stream就可以。譬如: + +```bash +listen 1935; +vhost __defaultVhost__ { + # 对app为live的所有流转码 + transcode live{ + } +} +``` + +以及对指定的流转码: + +```bash +listen 1935; +vhost __defaultVhost__ { + # 对app为live并且流名称为livestream的流转码 + transcode live/livestream{ + } +} +``` + +## Transcode Rulers + +SRS的转码参数全是FFMPEG的参数,有些参数SRS做了自定义,见下表。 + +| SRS参数 | FFMPEG参数 | 实例 | 说明 | +| ------ | --------- | ---- | ----- | +| vcodec | vcodec | ffmpeg ... -vcodec libx264 ... | 指定视频编码器 | +| vbitrate | b:v | ffmpeg ... -b:v 500000 ... | 输出的视频码率 | +| vfps | r | ffmpeg ... -r 25 ... | 输出的视频帧率 | +| vwidth/vheight | s | ffmpeg ... -s 400x300 -aspect 400:300 ... | 输出的视频宽度x高度,以及宽高比 | +| vthreads | threads | ffmpeg ... -threads 8 ... | 编码线程数 | +| vprofile | profile:v | ffmpeg ... -profile:v high ... | 编码x264的profile | +| vpreset | preset | ffmpeg ... -preset medium ... | 编码x264的preset | +| acodec | acodec | ffmpeg ... -acodec libfdk_aac ... | 音频编码器 | +| abitrate | b:a | ffmpeg ... -b:a 70000 ... | 音频输出码率。libaacplus:16-72k。libfdk_aac没有限制。 | +| asample_rate | ar | ffmpeg ... -ar 44100 ... | 音频采样率 | +| achannels | ac | ffmpeg ... -ac 2 ... | 音频声道 | + +另外,还有四个是可以加其他ffmpeg参数: +* perfile: 添加在iformat之前的参数。譬如指定rtsp的transport为tcp。 +* vfilter:添加在vcodec之前的滤镜参数。 +* vparams:添加在vcodec之后,acodec之前的视频编码参数。 +* aparams:添加在acodec之后,-y之前的音频编码参数。 + +这些参数应用的顺序是: +```bash +ffmpeg -f flv -i {vfilter} -vcodec ... {vparams} -acodec ... {aparams} -f flv -y {output} +``` + +具体参数可以查看SRS的日志,譬如: +```bash +[2014-02-28 21:38:09.603][4][trace][start] start transcoder, +log: ./objs/logs/encoder-__defaultVhost__-live-livestream.log, +params: ./objs/ffmpeg/bin/ffmpeg -f flv -i +rtmp://127.0.0.1:1935/live?vhost=__defaultVhost__/livestream +-vcodec libx264 -b:v 500000 -r 25.00 -s 768x320 -aspect 768:320 +-threads 12 -profile:v main -preset medium -acodec libfdk_aac +-b:a 70000 -ar 44100 -ac 2 -f flv +-y rtmp://127.0.0.1:1935/live?vhost=__defaultVhost__/livestream_ff +``` + +## FFMPEG Log Path + +FFMPEG启动后,SRS会将stdout和stderr都定向到日志文件,譬如`./objs/logs/encoder-__defaultVhost__-live-livestream.log`,有时候日志会比较大。可以配置ffmpeg输出较少日志: + +```bash +listen 1935; +vhost __defaultVhost__ { + transcode { + enabled on; + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + engine ff { + enabled on; + vfilter { + # -v quiet + v quiet; + } + vcodec libx264; + vbitrate 500; + vfps 25; + vwidth 768; + vheight 320; + vthreads 12; + vprofile main; + vpreset medium; + vparams { + } + acodec libfdk_aac; + abitrate 70; + asample_rate 44100; + achannels 2; + aparams { + } + output rtmp://127.0.0.1:[port]/[app]?vhost=[vhost]/[stream]_[engine]; + } + } +} +``` + +对ffmpeg添加`-v quiet`参数即可。 + +## Copy Without Transcode + +可以配置vcodec/acodec copy,实现不转码。譬如,视频为h264编码,但是音频是mp3/speex,需要转码音频为aac,然后切片为HLS输出。 + +```bash +listen 1935; +vhost __defaultVhost__ { + transcode { + enabled on; + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + engine ff { + enabled on; + vcodec copy; + acodec libfdk_aac; + abitrate 70; + asample_rate 44100; + achannels 2; + aparams { + } + output rtmp://127.0.0.1:[port]/[app]?vhost=[vhost]/[stream]_[engine]; + } + } +} +``` + +或者拷贝视频和音频: +```bash +listen 1935; +vhost __defaultVhost__ { + transcode { + enabled on; + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + engine ff { + enabled on; + vcodec copy; + acodec copy; + output rtmp://127.0.0.1:[port]/[app]?vhost=[vhost]/[stream]_[engine]; + } + } +} +``` + +## Drop Video or Audio + +可以禁用视频或者音频,只输出音频或视频。譬如,电台可以丢弃视频,对音频转码为aac后输出HLS。 + +可以配置vcodec为vn,acodec为an实现禁用。例如: + +```bash +listen 1935; +vhost __defaultVhost__ { + transcode { + enabled on; + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + engine vn { + enabled on; + vcodec vn; + acodec libfdk_aac; + abitrate 45; + asample_rate 44100; + achannels 2; + aparams { + } + output rtmp://127.0.0.1:[port]/[app]?vhost=[vhost]/[stream]_[engine]; + } + } +} +``` + +该配置只输出纯音频,编码为aac。 + +## Other Transcode Config + +conf/full.conf中有很多FFMPEG转码配置的实例,也可以参考ffmpeg的命令行。 +* mirror.transcode.srs.com 将视频流上半截,翻转到下半截,看起来像个镜子。 +* drawtext.transcode.srs.com 加文字水印。 +* crop.transcode.srs.com 剪裁视频。 +* logo.transcode.srs.com 添加图片logo。 +* audio.transcode.srs.com 只对音频转码。 +* copy.transcode.srs.com 不转码只转封装,类似于SRS的Forward。 +* all.transcode.srs.com 转码参数的详细说明。 +* ffempty.transcode.srs.com 一个ffmpeg的mock,不转码只打印参数。 +* app.transcode.srs.com 对指定的app的流转码。 +* stream.transcode.srs.com 对指定的流转码。 +* vn.transcode.srs.com 只输出音频,禁止视频输出。 + +## FFMPEG Transcode the Stream by Flash encoder + +flash可以当作编码器推流,参考演示中的编码器或者视频会议。flash只支持speex/nellymoser/pcma/pcmu,但flash会有一个特性,没有声音时就没有音频包。FFMPEG会依赖于这些音频包,如果没有会认为没有音频。 + +所以FFMPEG用来转码flash推上来的RTMP流时,可能会有一个问题:ffmpeg认为没有音频。 + +另外,FFMPEG取flash的流的时间会很长,也可能是在等待这些音频包。 + +## FFMPEG + +FFMPEG相关链接: +* [ffmpeg.org](http://ffmpeg.org) +* [ffmpeg命令行](http://ffmpeg.org/ffmpeg.html) +* [ffmpeg滤镜](http://ffmpeg.org/ffmpeg-filters.html) +* [ffmpeg编解码参数](http://ffmpeg.org/ffmpeg-codecs.html) + +Winlin 2015.6 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/ffmpeg) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/flv-vod-stream.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/flv-vod-stream.md new file mode 100644 index 00000000..63a0bd90 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/flv-vod-stream.md @@ -0,0 +1,47 @@ +--- +title: 点播FLV流 +sidebar_label: 点播FLV流 +hide_title: false +hide_table_of_contents: false +--- + +# 点播FLV流 + +## HTTP VOD + +推荐以下的方式: +* 点播建议用http分发,http服务器一大堆。 +SRS能将直播流录制为flv文件,并且提供了一些工具来支持flv点播流, +但是应该使用其他的HTTP服务器分发flv文件。 +* 总之,srs不支持点播,只支持直播。这是官方回答。 + +点播FLV流的主要流程是: + +* 服务器录制直播为FLV文件,或者上传FLV点播文件资源,到SRS的HTTP根目录:`objs/nginx/html` +* HTTP服务器必须要支持flv的start=offset,譬如nginx的flv模块,或者SRS的实验性HTTP服务器。 +* 使用`research/librtmp/objs/srs_flv_injecter`将FLV的时间和对于的offset(文件偏移量)写入FLV的metadata。 +* 播放器请求FLV文件,譬如:`http://192.168.1.170:8080/sample.flv` +* 用户点击进度条进行SEEK,譬如SEEK到300秒。 +* 播放器根据inject的时间和offset对应关系找出准确的关键帧的offset。譬如:300秒偏移是`6638860` +* 根据offset发起新请求:`http://192.168.1.170:8080/sample.flv?start=6638860` + +备注:SRS还不支持限速,会以最快的速度将文件发给客户端。 +备注:SRS还提供了查看FLV文件内容的工具`research/librtmp/objs/srs_flv_parser`,可以看到metadata和每个tag信息。 + +## SRS Embeded HTTP server + +SRS支持http-api,因此也能解析HTTP协议(目前是部分支持),所以也实现了一个简单的HTTP服务器。 + +SRS的HTTP服务器已经重写,稳定可以商用。 + +对于一些嵌入式设备,并发也不高时,可以考虑使用SRS的HTTP服务器分发HLS,这样比较简单。 + +## Config + +参考[HTTP Server](./http-server.md#config) + +Winlin 2015.1 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/flv-vod-stream) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/flv.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/flv.md new file mode 100644 index 00000000..33eccbac --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/flv.md @@ -0,0 +1,213 @@ +--- +title: HTTP-FLV +sidebar_label: HTTP-FLV +hide_title: false +hide_table_of_contents: false +--- + +# HTTP-FLV + +HTTP-FLV是一种直播流协议,有时候也简称FLV,是在HTTP连接上传输FLV格式的直播流。 + +和文件下载不同的是,直播流的长度是无限长,或者不确定长度,因此一般是基于HTTP Chunked协议实现。和HTTP-FLV类似的,还有HTTP-TS, +或者HTTP-MP3,TS主要应用于广播电视领域,MP3主要应用于音频领域。 + +和HLS不同的是,HLS本质上就是HTTP文件下载,而HTTP-FLV本质上是流传输。CDN对于HTTP文件下载的支持很完善,因此HLS的兼容性比HTTP-FLV +要好很多;同样HTTP-FLV的延迟比HLS要低很多,基本上可以做到3的5秒左右延迟,而HLS的延迟一般是8到10秒以上。 + +从协议实现上看,RTMP和HTTP-FLV几乎一样,RTMP是基于TCP协议,而HTTP-FLV基于HTTP也是TCP协议,因此两者的特点也非常类似。一般推流和 +流的生产使用RTMP,主要是因为流的生产设备都支持RTMP;而流的播放和消费端采用HTTP-FLV或这HLS,因为播放设备支持HTTP更完善。 + +HTTP-FLV的兼容性很好,除了iOS原生浏览器不支持,其他平台和浏览器都支持了,参考[MSE](https://caniuse.com/?search=mse)。 +若需要支持iOS浏览器,你可以考虑使用HLS或者使用WASM;注意一般iOS的Native应用,可以选择使用ijkplayer播放器。 + +## Usage + +SRS支持HTTP-FLV分发,可以用[docker](./getting-started.md)或者[从源码编译](./getting-started-build.md): + +```bash +docker run --rm -it -p 1935:1935 -p 8080:8080 registry.cn-hangzhou.aliyuncs.com/ossrs/srs:5 \ + ./objs/srs -c conf/http.flv.live.conf +``` + +使用 [FFmpeg(点击下载)](https://ffmpeg.org/download.html) 或 [OBS(点击下载)](https://obsproject.com/download) 推流: + +```bash +ffmpeg -re -i ./doc/source.flv -c copy -f flv rtmp://localhost/live/livestream +``` + +打开下面的页面播放流(若SRS不在本机,请将localhost更换成服务器IP): + +* HLS by SRS player: [http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html) + +## Config + +HTTP-FLV相关的配置如下: + +```bash +http_server { + # whether http streaming service is enabled. + # Overwrite by env SRS_HTTP_SERVER_ENABLED + # default: off + enabled on; + # the http streaming listen entry is <[ip:]port> + # for example, 192.168.1.100:8080 + # where the ip is optional, default to 0.0.0.0, that is 8080 equals to 0.0.0.0:8080 + # @remark, if use lower port, for instance 80, user must start srs by root. + # Overwrite by env SRS_HTTP_SERVER_LISTEN + # default: 8080 + listen 8080; + # whether enable crossdomain request. + # for both http static and stream server and apply on all vhosts. + # Overwrite by env SRS_HTTP_SERVER_CROSSDOMAIN + # default: on + crossdomain on; +} +vhost __defaultVhost__ { + # http flv/mp3/aac/ts stream vhost specified config + http_remux { + # whether enable the http live streaming service for vhost. + # Overwrite by env SRS_VHOST_HTTP_REMUX_ENABLED for all vhosts. + # default: off + enabled on; + # the fast cache for audio stream(mp3/aac), + # to cache more audio and send to client in a time to make android(weixin) happy. + # @remark the flv/ts stream ignore it + # @remark 0 to disable fast cache for http audio stream. + # Overwrite by env SRS_VHOST_HTTP_REMUX_FAST_CACHE for all vhosts. + # default: 0 + fast_cache 30; + # Whether drop packet if not match header. For example, there is has_audio and has video flag in FLV header, if + # this is set to on and has_audio is false, then SRS will drop audio packets when got audio packets. Generally + # it should work, but sometimes you might need SRS to keep packets even when FLV header is set to false. + # See https://github.com/ossrs/srs/issues/939#issuecomment-1348740526 + # TODO: Only support HTTP-FLV stream right now. + # Overwrite by env SRS_VHOST_HTTP_REMUX_DROP_IF_NOT_MATCH for all vhosts. + # Default: on + drop_if_not_match on; + # Whether stream has audio track, used as default value for stream metadata, for example, FLV header contains + # this flag. Sometimes you might want to force the metadata by disable guess_has_av. + # For HTTP-FLV, use this as default value for FLV header audio flag. See https://github.com/ossrs/srs/issues/939#issuecomment-1351385460 + # For HTTP-TS, use this as default value for PMT table. See https://github.com/ossrs/srs/issues/939#issuecomment-1365086204 + # Overwrite by env SRS_VHOST_HTTP_REMUX_HAS_AUDIO for all vhosts. + # Default: on + has_audio on; + # Whether stream has video track, used as default value for stream metadata, for example, FLV header contains + # this flag. Sometimes you might want to force the metadata by disable guess_has_av. + # For HTTP-FLV, use this as default value for FLV header video flag. See https://github.com/ossrs/srs/issues/939#issuecomment-1351385460 + # For HTTP-TS, use this as default value for PMT table. See https://github.com/ossrs/srs/issues/939#issuecomment-1365086204 + # Overwrite by env SRS_VHOST_HTTP_REMUX_HAS_VIDEO for all vhosts. + # Default: on + has_video on; + # Whether guessing stream about audio or video track, used to generate the flags in, such as FLV header. If + # guessing, depends on sequence header and frames in gop cache, so it might be incorrect especially your stream + # is not regular. If not guessing, use the configured default value has_audio and has_video. + # For HTTP-FLV, enable guessing for av header flag, because FLV can't change the header. See https://github.com/ossrs/srs/issues/939#issuecomment-1351385460 + # For HTTP-TS, ignore guessing because TS refresh the PMT when codec changed. See https://github.com/ossrs/srs/issues/939#issuecomment-1365086204 + # Overwrite by env SRS_VHOST_HTTP_REMUX_GUESS_HAS_AV for all vhosts. + # Default: on + guess_has_av on; + # the stream mount for rtmp to remux to live streaming. + # typical mount to [vhost]/[app]/[stream].flv + # the variables: + # [vhost] current vhost for http live stream. + # [app] current app for http live stream. + # [stream] current stream for http live stream. + # @remark the [vhost] is optional, used to mount at specified vhost. + # the extension: + # .flv mount http live flv stream, use default gop cache. + # .ts mount http live ts stream, use default gop cache. + # .mp3 mount http live mp3 stream, ignore video and audio mp3 codec required. + # .aac mount http live aac stream, ignore video and audio aac codec required. + # for example: + # mount to [vhost]/[app]/[stream].flv + # access by http://ossrs.net:8080/live/livestream.flv + # mount to /[app]/[stream].flv + # access by http://ossrs.net:8080/live/livestream.flv + # or by http://192.168.1.173:8080/live/livestream.flv + # mount to [vhost]/[app]/[stream].mp3 + # access by http://ossrs.net:8080/live/livestream.mp3 + # mount to [vhost]/[app]/[stream].aac + # access by http://ossrs.net:8080/live/livestream.aac + # mount to [vhost]/[app]/[stream].ts + # access by http://ossrs.net:8080/live/livestream.ts + # @remark the port of http is specified by http_server section. + # Overwrite by env SRS_VHOST_HTTP_REMUX_MOUNT for all vhosts. + # default: [vhost]/[app]/[stream].flv + mount [vhost]/[app]/[stream].flv; + } +} +``` + +> Note: 这些配置只是播放HTTP-FLV相关的配置,推流的配置请根据你的协议,比如参考[RTMP](./rtmp.md#config)或者[SRT](./srt.md#config)或者[WebRTC](./webrtc.md#config)的推流配置。 + +关键配置说明如下: + +* `has_audio` 是否有音频流,如果你的流没有音频,则需要配置这个为`off`,否则播放器可能会等待音频。 +* `has_video` 是否有视频流,如果你的流没有视频,则需要配置这个为`off`,否则播放器可能会等待视频。 + +## Cluster + +SRS支持HTTP-FLV集群分发,可以支持海量的观看客户端,参考[HTTP-FLV Cluster](./sample-http-flv-cluster.md)和[Edge](./edge.md) + +## Crossdomain + +SRS默认支持了HTTP CORS,请参考[HTTP CORS](./http-server.md#crossdomain) + +## Websocket FLV + +可以将HTTP-FLV转成WebSocket-FLV流,参考[videojs-flow](https://github.com/winlinvip/videojs-flow)。 + +关于HTTP转WebSocket参考[mse.go](https://github.com/winlinvip/videojs-flow/blob/master/demo/mse.go)。 + +## HTTP FLV VOD Stream + +关于HTTP flv 点播流,参考:[v4_CN_FlvVodStream](./flv-vod-stream.md) + +## HTTP and HTTPS Proxy + +SRS可以和HTTP/HTTPS代理一起工作得很好,比如[Nginx](https://github.com/ossrs/srs/issues/2881#nginx-proxy), +[HTTPX](https://github.com/ossrs/srs/issues/2881#httpx-proxy), [CaddyServer](https://github.com/ossrs/srs/issues/2881#caddy-proxy), +等等。详细配置请参考 [#2881](https://github.com/ossrs/srs/issues/2881)。 + +## HTTPS FLV Live Stream + +SRS支持将RTMP流转封装为HTTPS flv流,即在publish发布RTMP流时,在SRS的http模块中挂载一个对应的http地址(根据配置), +用户在访问这个https flv文件时,从rtmp流转封装为flv分发给用户。 + +具体请参考[HTTPS Server](./http-server.md#https-server),或者`conf/https.flv.live.conf`配置文件。 + +## HTTP TS Live Stream + +SRS支持将RTMP流转封装为HTTP ts流,即在publish发布RTMP流时,在SRS的http模块中挂载一个对应的http地址(根据配置), +用户在访问这个http ts文件时,从rtmp流转封装为ts分发给用户。 + +具体请参考`conf/http.ts.live.conf`配置文件。 + +## HTTP Mp3 Live Stream + +SRS支持将rtmp流中的视频丢弃,将音频流转封装为mp3格式,在SRS的http模块中挂载对应的http地址(根据配置), +用户在访问这个http mp3文件时,从rtmp转封装为mp3分发给用户。 + +具体请参考`conf/http.mp3.live.conf`配置文件。 + +## HTTP Aac Live Stream + +SRS支持将rtmp流中的视频丢弃,将音频流转封装为aac格式,在SRS的http模块中挂载对应的http地址(根据配置), +用户在访问这个http aac文件时,从rtmp转封装为aac分发给用户。 + +具体请参考`conf/http.aac.live.conf`配置文件。 + +## Why HTTP FLV + +为何要整个HTTP FLV出来呢?当下HTTP FLV流正大行其道。主要的优势在于: + +1. 互联网流媒体实时领域,还是RTMP。HTTP-FLV和RTMP的延迟一样,因此可以满足延迟的要求。 +1. 穿墙:很多防火墙会墙掉RTMP,但是不会墙HTTP,因此HTTP FLV出现奇怪问题的概率很小。 +1. 调度:RTMP也有个302,可惜是播放器as中支持的,HTTP FLV流就支持302方便CDN纠正DNS的错误。 +1. 容错:SRS的HTTP FLV回源时可以回多个,和RTMP一样,可以支持多级热备。 +1. 通用:Flash可以播RTMP,也可以播HTTP FLV。自己做的APP,也都能支持。主流播放器也都支持http flv的播放。 +1. 简单:FLV是最简单的流媒体封装,HTTP是最广泛的协议,这两个到一起维护性很高,比RTMP简单多了。 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/flv) + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/forward.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/forward.md new file mode 100644 index 00000000..3702fd45 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/forward.md @@ -0,0 +1,289 @@ +--- +title: Forward +sidebar_label: Forward +hide_title: false +hide_table_of_contents: false +--- + +# Forward For Small Cluster + +srs定位为直播服务器,其中一项重要的功能是forward,即将服务器的流转发到其他服务器。 + +备注:SRS的边缘RTMP参考[Edge](./edge.md),支持访问时回源,为大规模并发提供最佳解决方案。 + +注意:edge可以从源站拉流,也可以将流转发给源站。也就是说,播放edge上的流时,edge会回源拉流;推流到edge上时,edge会直接将流转发给源站。 + +注意:若只需要中转流给源站,不必用forward,直接使用edge模式即可。可以直接支持推流和拉流的中转,简单快捷。Forward应用于目标服务器是多个,譬如将一路流主动送给多路服务器;edge虽然配置了多台服务器,但是只用了一台,有故障时才切换。 + +注意:优先使用edge,除非知道必须用forward,才使用forward。 + +forward本身是用做热备,即用户推一路流上来,可以被SRS转发(或者转码后转发)到多个slave源站,CDN边缘可以回多个slave源,实现故障热备的功能,构建强容错系统。 + +转发的部署实例参考:[Usage: Forward](./sample-forward.md) + +## Keywords + +为了和edge方式区分,forward定义一次词汇如下: + +* master:主服务器,编码器推流到这个服务器,或者用ingest流到服务器。总之,master就是主服务器,负责转发流给其他服务器。 +* slave:从服务器,主服务器转发流到这个服务器。 + +如果结合edge集群方式,一般而言master和slave都是origin(源站服务器),edge边缘服务器可以从master或者slave回源取流。 + +实际上master和slave也可以是edge,但是不推荐,这种组合方式太多了,测试没有办法覆盖到。因此,强烈建议简化服务器的结构,只有origin(源站服务器)才配置转发,edge(边缘服务器)只做边缘。 + +## Config + +可以参考`full.conf`中的`same.vhost.forward.srs.com`的配置: + +``` +vhost __defaultVhost__ { + # forward stream to other servers. + forward { + # whether enable the forward. + # default: off + enabled on; + # forward all publish stream to the specified server. + # this used to split/forward the current stream for cluster active-standby, + # active-active for cdn to build high available fault tolerance system. + # format: {ip}:{port} {ip_N}:{port_N} + destination 127.0.0.1:1936 127.0.0.1:1937; + + # when client(encoder) publish to vhost/app/stream, call the hook in creating backend forwarder. + # the request in the POST data string is a object encode by json: + # { + # "action": "on_forward", + # "server_id": "vid-k21d7y2", + # "client_id": "9o7g1330", + # "ip": "127.0.0.1", + # "vhost": "__defaultVhost__", + # "app": "live", + # "tcUrl": "rtmp://127.0.0.1:1935/live", + # "stream": "livestream", + # "param": "" + # } + # if valid, the hook must return HTTP code 200(Status OK) and response + # an int value specifies the error code(0 corresponding to success): + # { + # "code": 0, + # "data": { + # "urls":[ + # "rtmp://127.0.0.1:19350/test/teststream" + # ] + # } + # } + # PS: you can transform params to backend service, such as: + # { "param": "?forward=rtmp://127.0.0.1:19351/test/livestream" } + # then backend return forward's url in response. + # if backend return empty urls, destanition is still disabled. + # only support one api hook, format: + # backend http://xxx/api0 + backend http://127.0.0.1:8085/api/v1/forward; + } +} +``` + +## Dynamic Forward + +SRS支持动态Forward,从你的后端服务查询是否需要转发,以及转发的目标地址。 + +你必须自己实现一个后端服务器,也就是一个HTTP服务器,或者Web服务器。你的后端服务器接收SRS发起的HTTP请求,然后把需要转发的RTMP服务器地址返回 +给SRS,最后SRS就会将RTMP流转推给目标RTMP服务器。工作流如下: + +```text + +------+ +Client ---Push-RTMP-->--+ SRS +---HTTP-Request---> Your Backend Server + | | + + + +--<---Forward-Config----+ + | | + + +----Push-RTMP----> RTMP Server + +------+ +``` + +首先,配置`backend`,你的后端服务的地址: + +``` +vhost __defaultVhost__ { + forward { + enabled on; + backend http://127.0.0.1:8085/api/v1/forward; + } +} +``` + +当推流到SRS时,SRS会调用你的后端服务,SRS的请求Body如下: + +```json +{ + "action": "on_forward", + "server_id": "vid-k21d7y2", + "client_id": "9o7g1330", + "ip": "127.0.0.1", + "vhost": "__defaultVhost__", + "app": "live", + "tcUrl": "rtmp://127.0.0.1:1935/live", + "stream": "livestream", + "param": "" +} +``` + +如果你的后端服务返回了RTMP urls,SRS会开始转发到这个RTMP地址: + +```json +{ + "code": 0, + "data": { + "urls":[ + "rtmp://127.0.0.1:19350/test/teststream" + ] + } +} +``` + +> Note: 如果urls为空数组,SRS不会转发。 + +关于动态Forward的信息,请参考[#1342](https://github.com/ossrs/srs/issues/1342)。 + +## For Small Cluster + +forward也可以用作搭建小型集群。架构图如下: + +```bash + +-------------+ +---------------+ + +-->+ Slave(1935) +->--+ Player(3000) + + | +-------------+ +---------------+ + | +-------------+ +---------------+ + |-->+ Slave(1936) +->--+ Player(3000) + + publish forward | +-------------+ +---------------+ ++-----------+ +--------+ | 192.168.1.6 +| Encoder +-->-+ Master +-->-| ++-----------+ +--------+ | +-------------+ +---------------+ + 192.168.1.3 192.168.1.5 +-->+ Slave(1935) +->--+ Player(3000) + + | +-------------+ +---------------+ + | +-------------+ +---------------+ + +-->+ Slave(1936) +->--+ Player(3000) + + +-------------+ +---------------+ + 192.168.1.7 +``` + +下面是搭建小型集群的实例。 + +### Encoder + +编码器使用FFMPEG推流。编码参数如下: + +```bash +for((;;)); do\ + ./objs/ffmpeg/bin/ffmpeg -re -i doc/source.flv \ + -c copy -f flv rtmp://192.168.1.5:1935/live/livestream; \ +done +``` + +### SRS-Master Server + +SRS(192.168.1.5)的配置如下: + +```bash +listen 1935; +pid ./objs/srs.pid; +max_connections 10240; +vhost __defaultVhost__ { + forward { + enabled on; + destination 192.168.1.6:1935 192.168.1.6:1936 192.168.1.7:1935 192.168.1.7:1936; + } +} +``` + +源站的流地址播放地址是:`rtmp://192.168.1.5/live/livestream` + +将流forward到两个边缘节点上。 + +### SRS-Slave Server + +Slave节点启动多个SRS的进程,每个进程一个配置文件,侦听不同的端口。 + +以192.168.1.6的配置为例,需要侦听1935和1936端口。 + +配置文件`srs.1935.conf`配置如下: + +```bash +listen 1935; +pid ./objs/srs.1935.pid; +max_connections 10240; +vhost __defaultVhost__ { +} +``` + +配置文件`srs.1936.conf`配置如下: + +```bash +listen 1936; +pid ./objs/srs.1936.pid; +max_connections 10240; +vhost __defaultVhost__ { +} +``` + +启动两个SRS进程: + +```bash +nohup ./objs/srs -c srs.1935.conf >/dev/null 2>&1 & +nohup ./objs/srs -c srs.1936.conf >/dev/null 2>&1 & +``` + +播放器可以随机播放着两个流: +* `rtmp://192.168.1.6:1935/live/livestream` +* `rtmp://192.168.1.6:1936/live/livestream` + +另外一个Slave节点192.168.1.7的配置和192.168.1.6一样。 + +### Stream in Service + +此架构服务中的流为: + +| 流地址 | 服务器 | 端口 | 连接数 | +| ---- | ----- | ----- | ------- | +| rtmp://192.168.1.6:1935/live/livestream | 192.168.1.6 | 1935 | 3000 | +| rtmp://192.168.1.6:1936/live/livestream | 192.168.1.6 | 1936 | 3000 | +| rtmp://192.168.1.7:1935/live/livestream | 192.168.1.7 | 1935 | 3000 | +| rtmp://192.168.1.7:1936/live/livestream | 192.168.1.7 | 1936 | 3000 | + +这个架构每个节点可以支撑6000个并发,两个节点可以支撑1.2万并发。 +还可以加端口,可以支持更多并发。 + +## Forward VS Edge + +Forward架构和CDN架构的最大区别在于,CDN属于大规模集群,边缘节点会有成千上万台,源站2台(做热备),还需要有中间层。CDN的客户很多,流也会有很多。所以假若源站将每个流都转发给边缘,会造成巨大的浪费(有很多流只有少数节点需要)。 + +可见,forward只适用于所有边缘节点都需要所有的流。CDN是某些边缘节点需要某些流。 + +forward的瓶颈在于流的数目,假设每个SRS只侦听一个端口: + +```bash +系统中流的数目 = 编码器的流数目 × 节点数目 × 端口数目 +``` + +考虑5个节点,每个节点起4个端口,即有20个SRS边缘。编码器出5路流,则有`20 * 5 = 100路流`。 + +同样的架构,对于CDN的边缘节点来讲,系统的流数为`用户访问边缘节点的流`,假设没有用户访问,系统中就没有流量。某个区域的用户访问某个节点上的流,系统中只有一路流,而不是forward广播式的多路流。 + +另外,forward需要播放器随机访问多个端口,实现负载均衡,或者播放器访问api服务器,api服务器实现负载均衡,对于CDN来讲也不合适(需要客户改播放器)。 + +总之,forward适用于小型规模的集群,不适用于CDN大规模集群应用。 + +## Other Use Scenarios + +forward还可以结合hls和transcoder功能使用,即在源站将流转码,然后forward到Slave节点,Slave节点支持rtmp同时切HLS。 + +因为用户推上来的流,或者编码器(譬如FMLE)可能不是h264+aac,需要先转码为h264+aac(可以只转码音频)后才能切片为hls。 + +需要结合vhost,先将流transcode送到另外一个vhost,这个vhost将流转发到Slave。这样可以只转发转码的流。 + +参考vhost,hls和transcoder相关wiki。 + +Winlin 2014.2 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/forward) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/gb28181.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/gb28181.md new file mode 100644 index 00000000..20400bd9 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/gb28181.md @@ -0,0 +1,452 @@ +--- +title: GB28181 +sidebar_label: GB28181 +hide_title: false +hide_table_of_contents: false +--- + +# GB28181 + +支持GB28181是正确的事情,可能也是困难的事情,因为困难所以有趣。 + +重要说明:SRS 5.0已经是beta或更稳定的版本,推荐使用SRS 5.0的GB,而不要使用4.0的GB,因为虽然4.0是稳定发布版本,但是4.0的GB是实验性的和不稳定的。 + +研发的详细过程请参考[#3176](https://github.com/ossrs/srs/issues/3176)。 + +## Usage + +首先,编译和启动SRS,请确认版本为`5.0.74+`: + +```bash +./configure --gb28181=on +make +./objs/srs -c conf/gb28181.conf +``` + +> Note: 如果你是公网服务器,一定要配置对Candidate,请参考[Candidate](#candidate)的说明。 + +然后,在摄像头配置中,选择AAC编码,然后在平台中配置SIP服务器为SRS,如下图所示: + +![](/img/doc-2022-10-08-001.png) + +![](/img/doc-2022-10-08-002.png) + +* 必须是`AAC`编码,在音频编码中,选择`AAC`,采样率`44100HZ`。 +* 必须是`GB-2016`标准,否则不支持`TCP`,在协议版本中选择`GB/T28181-2016`。 +* 必须是`TCP`协议,不支持`UDP`,在传输协议中选择`TCP`,并使用`GB-2016`标准。 + +摄像头注册后,SRS会自动邀请摄像头推流,可以打开下面的链接播放: + +* [http://localhost:8080/live/34020000001320000001.flv](http://localhost:8080/players/srs_player.html?stream=34020000001320000001.flv) +* [http://localhost:8080/live/34020000001320000001.m3u8](http://localhost:8080/players/srs_player.html?stream=34020000001320000001.m3u8) +* [webrtc://localhost/live/34020000001320000001](http://localhost:8080/players/rtc_player.html?stream=34020000001320000001) + +> Note: 请把流名称换成你的设备名称,然后点播放。 + +## Candidate + +如果服务器IP不是内网IP,比如部署在公网,则SRS无法获取自己的出口IP,需要配置: + +```bash +stream_caster { + enabled on; + caster gb28181; + listen 9000; + sip { + enabled on; + listen 5060; + candidate a.b.c.d; + } +} +``` + +> Note: 请将`stream_caster.sip.candidate`换成摄像头能访问到你的服务器的IP,不管是内网还是公网IP,摄像头能访问到就可以。 + +GB的Candidate定义和[WebRTC: Candidate](./webrtc.md#config-candidate)概念上一致,都是需要暴露一个客户端能访问的IP地址,在SDP中传递给客户端。比如: + +1. 在SRS配置中设置`stream_caster.sip.candidate`,SRS启动会读取这个配置,比如`192.168.1.100`。 +1. GB设备通过SIP注册到SRS,SRS发起INVITE消息,消息的Body就是SDP,SDP会指定这个IP地址,比如`IN IP4 192.168.1.100`。 +1. GB设备连接这个IP地址`tcp://192.168.1.100:9000`,并发起媒体请求。 + +> Note: 媒体的端口是配置在`stream_caster.listen`中的,目前只支持TCP端口。 + +这个`CANDIDATE`就是媒体服务器的IP,它和SIP的服务器地址可以是不同的,SIP服务器地址是在[Usage](#usage)中配置在客户端的。 + +> Note: 由于GB的SIP协议,在REGISTER时To字段并没有带服务器的地址,所以导致服务器无法从SIP中发现自己的地址,只能依靠服务器配置。 + +当然,如果网卡配置了客户端可以访问的地址,可以把`CANDIDATE`配置为`*`,让SRS自己发现。 + +## Latency + +与普遍认知相反,安全摄像头并非天生就是低延迟系统。它们的主要目的是长期存储。在平移-倾斜-缩放(PTZ)场景中,可能存在一定的延迟要求, +但这并不是我们通常所说的低延迟。PTZ摄像头的延迟通常在1秒左右,这被认为是可以接受的,而实时通信(RTC)中的低延迟通常在200ms左右。 + +延迟不仅与服务器有关,还涉及到整个链路。关于详细的延迟优化,请参考[Low Latency](./low-latency.md)。 +我使用默认配置测试了海康威视摄像头的延迟,如下所示: + +* 流类型:`子流` +* 视频类型:`复合流` +* 分辨率:`640x480` +* 码率类型:`可变码率` +* 图像质量:`中等` +* 视频帧率:`25`fps +* 码率限制:`1024`Kbps +* 视频编码:`H.264` +* 编码复杂度:`中等` +* I帧间隔:`50` +* SVC:`禁用` +* 平台接入协议:`GB/T28181-2016` +* 平台传输协议:`TCP`,即通过GB/TCP将流推送到SRS +* 观看:WebRTC(UDP) +* SRS版本:`v5.0.100` +* 摄像头和服务器:内网 +* 延迟:358ms + +## Features + +目前SRS支持的GB的功能清单: + +1. 摄像头通过SIP注册。srs-gb28181支持。**SRS 5.0 支持**。 +1. 自动邀请摄像头推流。srs-gb28181支持。**SRS 5.0 支持**。 +1. GB/2016转RTMP协议。srs-gb28181支持。**SRS 5.0 支持**。 +1. 基于TCP的SIP信令。srs-gb28181支持。**SRS 5.0 支持**。 +1. TCP单端口传输媒体。srs-gb28181支持。**SRS 5.0 支持**。 + +目前还没有支持的GB功能: + +1. 基于UDP的SIP信令。srs-gb28181支持。SRS 5.0 不支持。 +1. UDP单端口传输媒体。srs-gb28181支持。SRS 5.0 不支持。 +1. GB/2011转RTMP协议。srs-gb28181支持。SRS 5.0 不支持。 +1. UDP/TCP多端口传输媒体。srs-gb28181支持。SRS 5.0 不支持。 +1. HTTP API查询GB流。srs-gb28181支持。SRS 5.0 不支持。 +1. HTTP API云台摄像头。srs-gb28181支持。SRS 5.0 不支持。 +1. Web管理页面。srs-gb28181支持。SRS 5.0 不支持。 +1. GB下级服务器。srs-gb28181不支持。SRS 5.0 不支持。 +1. GB语音对讲。srs-gb28181不支持。SRS 5.0 不支持。 +1. GB回看。srs-gb28181不支持。SRS 5.0 不支持。 +1. GB加密传输。srs-gb28181不支持。SRS 5.0 不支持。 + +希望大家降低期望,GB的坑太难填了,希望不要期待SRS能做多好。 + +## Protocols + +GB相关的协议如下: + +* [RFC3261: SIP: Session Initiation Protocol](https://www.ietf.org/rfc/rfc3261.html) +* [RFC4566: SDP: Session Description Protocol](https://www.ietf.org/rfc/rfc4566.html) +* [RFC4571: RTP & RTCP over Connection-Oriented Transport](https://www.ietf.org/rfc/rfc4571.html) +* [GB28181-2016: 公共安全视频监控联网系统信息传输、交换、控制技术要求](https://openstd.samr.gov.cn/bzgk/gb/newGbInfo?hcno=469659DC56B9B8187671FF08748CEC89) +* [ISO13818-1-2000](https://ossrs.net/lts/zh-cn/assets/files/hls-mpeg-ts-iso13818-1-d21d1e9765012a327f03b43ce460079a.pdf): MPEGPS(Program Stream), PS媒体流规范。 + +## External SIP + +> Note: SRS 6.0.144+支持外部SIP,若需要使用这个功能,请升级到这个版本。 + +目前SRS内置的SIP服务器仅实现了简单的`Register`、`Invite`指令,而要实现GB/T-28181的全部功能,势必会引入复杂的上层业务逻辑。 +因此,我们开发了一个独立的外置的SIP服务器。而SRS,只需开放几个简单的API接口,这样既保证了其媒体转发服务器的单一属性, +又兼顾了与第三方SIP信令服务器对接的需求。 + +播放器请求SRS-SIP,SRS-SIP向SRS Server申请媒体端口,然后邀请GB28181 Device设备推流。设备推流到SRS后,播放器直接从SRS播放流。 +下面是几个组件的关系图,详细的交互时序图参考[srs-sip](https://github.com/ossrs/srs-sip#sequence) + +```text + +-------------API/Media--------------------+ + | | ++----------+ +------------+ +------+------+ +----------------+ +| Player +--API--+ SRS-SIP +---API--+ SRS Server +----Media----+ GB28181 Device + ++----------+ +-----+------+ +-------------+ +-------+--------+ + | | + +------------------SIP-------------------------+ +``` + +> Note: 暂时没有实现鉴权功能,敬请期待。 + +摄像头上面的配置方法同上,仅需将SIP服务器地址从SRS改成SRS-SIP。 + +首先启动SRS,请确认版本为`6.0.144+`,使用配置`conf/gb28181-without-sip.conf`,参考[Usage](#usage)。 + +```bash +./objs/srs -c conf/gb28181-without-sip.conf +``` + +然后启动SRS-SIP,参考[srs-sip](https://github.com/ossrs/srs-sip#usage)。 + +```bash +./bin/srs-sip -sip-port 5060 -media-addr 127.0.0.1:1985 -api-port 2020 -http-server-port 8888 +``` + +* `-sip-port`是SIP服务器的端口,默认是5060。GB摄像头和这个SIP服务器通信,完成设备注册等能力。 +* `-media-addr`是SRS的媒体服务器地址,SIP服务器返回这个地址给GB摄像头,GB摄像头推流到这个地址。 +* `-api-port`是SIP服务器的API端口,默认是2020。这个API是给Player和用户使用的,比如查询设备列表、要求摄像头推流等。 +* `-http-server-port`是SIP服务器的Web端口,默认是8888。这个HTTP服务器是提供网页的web服务器,用户通过网页访问摄像头。 + +启动GB28181设备,将SIP服务器地址改成SRS-SIP的地址,端口为5060。 + +现在,可以通过SRS-SIP内置的网页播放器测试 [http://localhost:8888](http://localhost:8888) + +## SIP Parser + +SRS本身也内嵌了一个简单的SIP服务器,支持部分SIP协议的解析;不过C++没有特别好的SIP库,这也是之前SIP处理不稳定的一个原因。 + +调研发现,SIP协议和HTTP协议结构非常一致,因此SRS采用[http-parser](https://github.com/ossrs/http-parser)解析SIP,这个库是nodejs维护的,之前好像是NGINX中扒出来的,所以稳定性还是非常高的。 + +当然用HTTP解析SIP,需要有些修改,主要是以下修改: + +* Method:需要新增几个方法,比如`REGISTER`、`INVITE`、`ACK`、`MESSAGE`和`BYE`,这是GB常用的几个消息。 +* RequestLine:解析path时需要修改,SIP是`sip:xxx`格式,会被认为是HTTP完整URL格式导致解析失败。 +* ResponseLine:生成Response时需要修改,主要是协议头,从`HTTP/1.1`改成`SIP.2.0`。 + +基本上改变非常小,所以协议稳定性是可以保障,可以算是解决了一个难题。 + +SIP和HTTP不同的是,在同一个TCP通道中,并不一定就是一个Request对应一个Response,比如INVITE之后,可能会有100和200两个响应,而SRS也不固定就是Server,也有可能是Client。而这些情况,http-parser可以设置为BOTH方式,这样可以解析出Request和Response: + +``` + SrsHttpParser* parser = new SrsHttpParser(); + SrsAutoFree(SrsHttpParser, parser); + + // We might get SIP request or response message. + if ((err = parser->initialize(HTTP_BOTH)) != srs_success) { + return srs_error_wrap(err, "init parser"); + } +``` + +> Note: 从HTTP消息来看,并没有规定只能一个Request对应一个Response,因此这个也不会带来额外问题。 + +在实际解析中,发现有时候发送的头有空格,比如: + +``` +Content-Length: 142\r\n +``` + +这实际上是符合规范的,但如果手动解析可能会有问题,而HTTP-Parser能正确处理这种情况。 + +## REGISTER + +GB的注册流程: + +1. 在设备设置好SIP服务器为SRS。 +1. 设备发送SIP格式的REGISTER消息。 +1. SRS回应200/OK,注册成功。 + +GB的心跳: + +1. 设备后面会不断发送MESSAGE作为心跳消息。 +1. SRS回应200/OK,心跳成功。 + +SRS若重启后,由于没有保存任何状态,所以收到的可能是设备的MESSAGE消息,而没有REGISTER消息,所以希望设备能重新注册。向各位同学以及SIP和GB的专家请教后,重新注册的可能方法包括: + +* 不回应MESSAGE消息,一般3次心跳超时(在设备配置上有设置)。验证发现,海康设备心跳周期默认60秒,所以大概在3分钟左右会重新注册。 +* 对MESSAGE回应403或者其他消息。验证发现,效果和不回应一样,设备并不会特别处理。 +* 给设备发送重启指令,参考`A.2.3 控制命令`,远程启动是`Boot`,尝试重启设备后会重新注册。这个还没验证。 +* 回应REGISTER消息时,将EXPIRE设置短一些,缩短注册的间隔,比如改成30秒。验证发现,尽管设置为30秒,但还是会在一个心跳时间才会重新注册,也就是60秒。 +* 在设备的配置上,将心跳周期改短一些,默认60秒,最小是5秒,这样超时会更快。验证发现,心跳改5秒,最短可以26秒左右重新注册。 +* 加一层SIP Proxy,让Proxy来保存相关的信息,将状态转移到Proxy。这个方案应该可行,不过SRS不太合适,引入额外组件会让开源很复杂,大家自己的实现中可以尝试。 +* 重启前发消息。这个方案在SRS Gracefully Quit时有效,但有时候会`kill -9`或者系统OOM,不会给程序机会清理,所以这个不能适应所有场景。不过在主动升级时,一般会用Gracefully Quit,这时可以有机会处理这个问题,大家可以尝试。 + +总之,是没有特别可靠的办法能让摄像头立刻重新注册,SRS必须在逻辑上处理这个问题:SRS启动或重启后,摄像头还在已经注册,甚至在传输流的状态。 + +> Note: 由于很多问题都是持续长时间运行,而系统的某一方重启了,导致状态不一致,引起各种问题。因此,在SRS重启或者启动时,若发现有摄像头是在注册或传输流的状态,那么应该尝试让摄像头重新走一次流程,比如重新注册和重新推流,这样让双方的状态一致,可靠性会更高。 + +> Note: 验证发现,重新注册,对正在传输的媒体流不影响。设备会探测端口可达性,如果TCP断开,或者UDP端口不可达,则会停止流传输。 + +## TCP or UDP + +在使用TCP或UDP协议上,我们选择先支持TCP协议,包括SIP信令和PS媒体。 + +根据SIP协议的规定,TCP是必须要支持的,也是RFC3261比RFC2543一个重要的更新,参考[RFC3261: Transport](https://www.ietf.org/rfc/rfc3261.html#section-18)。 + +至于媒体协议,GB由于使用了PS格式,其实PS一般是用于存储格式,而TS是网络传输格式,或者说TS考虑了更多的网络传输问题,而PS则更多假设像磁盘读写文件一样可靠,因此,PS基于TCP传输也会更加简单。 + +GB 2016中对于TCP的描述在`附录L`,即`基于TCP协议的视音频媒体传输`: + +> 实时视频点播、历史视频回放与下载的TCP媒体传输应支持基于RTP封装的视音频PS流,封装格式参照IETF RFC 4571。 + +在实际应用中,大部分也是使用TCP,而不是用UDP,特别在公网上UDP会有丢包,而GB没有设计重传或FEC。使用TCP的好处: + +* UDP由于无状态,在服务器重启时,设备感知不到服务重启,可能还能继续传输数据,导致两边状态不同步,长久持续这样可能会导致问题,比如设备会提示请求超过上限。 +* GB的信令和媒体分离,如果使用TCP则可以很好的同步状态,比如信令可用媒体不可用或断开,媒体可用信令不可用,这些最终都反应到连接的断开。具体请参考[Protocol Notes](#protocol-notes)。 +* 服务器重启后,可以使用缩短REGISTER的Expires,缩短心跳间隔,让设备重新注册,重新进入推流状态。服务器重启后,设备可以快速感知到媒体链路断开。 +* 传输过程中,若出现网络抖动导致链接断开,服务器和设备都可以很快感知到,进入异常处理流程。 + +因此,SRS先支持TCP,而不支持UDP。也就是先支持GB28181 2016,而不是支持GB28181 2011。 + +> Note: 需要显式开启GB28181-2016,并开启TCP协议才可以。 + +## Protocol Notes + +SIP协议上特别需要注意的地方: + +* Via的branch必须是`z9hG4bK`开头,参考[Via](https://www.ietf.org/rfc/rfc3261.html#section-8.1.1.7)的说明。 +* INVITE的200(OK)的ACK消息,ACK的Via的branch必须是新的,ACK并不是INVITE的transaction,参考[Via](https://www.ietf.org/rfc/rfc3261.html#section-8.1.1.7)和[Example](https://www.ietf.org/rfc/rfc3261.html#section-24.2)。 +* INVITE的Contact是自己的地址,而不是GB设备的,也就是Contact应该由From生成而不是To,参考[Contact](https://www.ietf.org/rfc/rfc3261.html#section-8.1.1.8)和[Example](https://www.ietf.org/rfc/rfc3261.html#section-24.2)。 +* INVITE的Subject,定义为`媒体流发送者ID:发送方媒体流序列号,媒体流接收者ID:接收方媒体流序列号`,参考[附录K](https://openstd.samr.gov.cn/bzgk/gb/newGbInfo?hcno=469659DC56B9B8187671FF08748CEC89)。对于`s=Play`实时观看的场景,接收方媒体流序列号(SSRC)其实没有定义;根据各位同学反馈,一般这个字段填0。 + +SDP协议上特别注意的地方: + +* y字段: 为十进制整数字符串, 表示SSRC值。格式如下:`dddddddddd`。其中, 第1位为历史或实时媒体流的标识位, 0为实时, 1为历史;第2位至第6位取20位SIP监控域ID之中的4到8位作为域标识, 例如`13010000002000000001`中取数字`10000`; 第7位至第10位作为域内媒体流标识, 是一个与当前域内产生的媒体流SSRC值后4位不重复的四位十进制整数。 + +> Note: SDP中的`y=`字段,是GB扩展的字段,在WebRTC中是用`a=ssrc:xxxx`表达的SSRC。 + +信令和媒体配合: + +* 信令注册、INVITE、TRYING、200、ACK后,媒体开始传输。参考 [gb-media-ps-normal.pcapng.zip](https://github.com/ossrs/srs/files/9630224/gb-media-ps-normal.pcapng.zip) +* 媒体正常传输过程中,信令重新注册,不影响媒体,继续正常传输。 参考 [gb-media-ps-sip-register-loop.pcapng.zip](https://github.com/ossrs/srs/files/9630227/gb-media-ps-sip-register-loop.pcapng.zip) +* 信令正常完成INVITE,媒体TCP端口若不打开,设备尝试连接一次后放弃。参考 [gb-media-disabled-sip-ok.pcapng.zip](https://github.com/ossrs/srs/files/9630216/gb-media-disabled-sip-ok.pcapng.zip) +* 媒体正常传输过程中,信令断开,一定时间后,媒体断开。参考 [gb-media-ps-sip-disconnect.pcapng.zip](https://github.com/ossrs/srs/files/9630218/gb-media-ps-sip-disconnect.pcapng.zip) +* 媒体正常传输过程中,TCP连接断开,客户端不会重试。参考 [gb-media-disconnect-sip-ok.pcapng.zip](https://github.com/ossrs/srs/files/9630220/gb-media-disconnect-sip-ok.pcapng.zip) + +媒体协议: + +* 解析媒体流时,可能会出现各种错误,此时会丢弃整个pack的数据,直到下一个pack到来(`00 00 01 ba`)。其中包括RTP解析失败,非法的PS头(非`00 00 01`开头),部分PES头(比如在前一个TCP包的尾部),甚至还有RFC4571的包解析失败(头两个字节代表的长度信息是0)。 +* SRS支持恢复模式,遇到解析摄像头的包失败会进入恢复模式,但有时候也会出现无法恢复的情况,因此会限制每次最大的恢复次数,如果连续多个包还不能恢复,那就断开媒体连接,进入信令重新INVITE的过程。若包长度异常,很大概率是无法恢复,则关闭恢复模式,直接进入重新INVITE流程。 +* 媒体使用MPEGPS流,其中length为16位,也就是PES最大长度为64KB。PS是对超过64KB的帧直接分包,分成多个PES,按照时间戳组合;另外,一个pack中只有一个Video,可能会有多个Audio,因此Audio每帧不超过64KB。示例如下所示: + +媒体PS组包,超过64KB的情况: + +``` +PS: New pack header clock=2454808848, rate=159953 +PS: New system header rate_bound=159953, video_bound=1, audio_bound=1 +PS: Got message Video, dts=2454808848, seq=22204, base=2454808848 payload=29B, 0, 0, 0, 0x1, 0x67, 0x4d, 0, 0x32 +PS: Got message Video, dts=0, seq=22204, base=2454808848 payload=8B, 0, 0, 0, 0x1, 0x68, 0xee, 0x3c, 0x80 +PS: Got message Video, dts=0, seq=22204, base=2454808848 payload=9B, 0, 0, 0, 0x1, 0x6, 0xe5, 0x1, 0x2b +PS: Got message Video, dts=0, seq=22250, base=2454808848 payload=65471B, 0, 0, 0, 0x1, 0x65, 0xb8, 0, 0 +PS: Got message Video, dts=0, seq=22252, base=2454808848 payload=2112B, 0x48, 0x4c, 0xf2, 0x94, 0xaa, 0xbc, 0xed, 0x3d +PS: Got message Audio, dts=2454812268, seq=22253, base=2454808848 payload=99B, 0xff, 0xf9, 0x50, 0x40, 0xc, 0x7f, 0xfc, 0x1 +PS: Got message Audio, dts=2454814338, seq=22254, base=2454808848 payload=96B, 0xff, 0xf9, 0x50, 0x40, 0xc, 0x1f, 0xfc, 0x1 + +PS: New pack header clock=2454812448, rate=159953 +PS: Got message Video, dts=2454812448, seq=22283, base=2454812448 payload=39457B, 0, 0, 0, 0x1, 0x61, 0xe0, 0x8, 0xbf +PS: Got message Audio, dts=2454816498, seq=22284, base=2454812448 payload=101B, 0xff, 0xf9, 0x50, 0x40, 0xc, 0xbf, 0xfc, 0x1 +PS: Got message Audio, dts=2454818568, seq=22285, base=2454812448 payload=107B, 0xff, 0xf9, 0x50, 0x40, 0xd, 0x7f, 0xfc, 0x1 +``` + +> Note: 这两有两个pack,每个pack只有一个Video帧(不算编码头),每个都有两个Audio包。 +> Note: 第一个pack,前三个Video(Seq=22204),是编解码信息,一般在I帧前面都是编码头,SPS/PPS等信息。 +> Note: 第一个pack,后两个Video(Seq=22250/22252)实际上就是一个关键帧,第一个是`00 00 01`开头,第二个直接就是接续的视频数据;第一个超过64KB,所以分成了两个。 +> Note: 第一个pack,最后两个Audio消息,它们时间戳是不同的。 +> Note: 第二个pack,只有一个Video,没超过64KB,而且没有system header和PSM,所以一般不是关键帧(具体以NALU解析为准)。 +> Note: 第二个pack,后面两个是Audio消息,时间戳也不同。 +> Note: 两个pack的Video间隔,是`2454812448-2454808848=3600`,也就是40ms,也就是视频FPS=25。而Audio之间的间隔,是`2454810198-2454808128=2070`,也就是23ms。 + +## Wireshark + +Wireshark默认就能解析GB的SIP的包,5060端口认为是SIP的默认端口。而GB媒体则需要操作下,这小节总结下如何用Wireshark解析媒体包。 + +SRS使用TCP传输媒体,所以格式是[RFC4571: RTP & RTCP over Connection-Oriented Transport](https://www.ietf.org/rfc/rfc4571.html),就是前两个字节是长度,后面是RTP包。 + +> Note: Wireshark支持[RFC4571](https://github.com/wireshark/wireshark/commit/7eee48ad5588bc2debec0e564b3526c97a0eb125#diff-ef0e5a499517cb594820f7dfd9200ee5c3cf5bd32259e066464a40aa6eebfb1cR3601),它的Dissecotr是`rtp.rfc4571`。 + +有两种方法,一种直接打开包后,输入过滤`tcp.port==9000`,然后右键包,选择`Decode as > RTP`,就可以看到解析成了RFC4517,如下图所示: + +![](/img/doc-2022-10-08-003.png) + +还有一种方法,直接加载SRS的[research/wireshark/gb28181.lua](https://github.com/ossrs/srs/blob/develop/trunk/research/wireshark/gb28181.lua)插件,将TCP/9000数据解析为RFC4571格式,执行如下命令: + +```bash +cd ~/git/srs/trunk/research/wireshark +mkdir -p ~/.local/lib/wireshark/plugins +ln -sf $(pwd)/gb28181.lua ~/.local/lib/wireshark/plugins/gb28181.lua +``` + +> Note: Wireshark的插件目录,不同平台会不同,请百度下在哪里,直接把插件拷贝进去也可以。 + +解析成功后,直接过滤`rtp`包,可以看到GB的媒体数据,如下图所示: + +![](/img/doc-2022-10-08-004.png) + +> Note: 注意RTP的Payload就是[MPEG-PS](https://en.wikipedia.org/wiki/MPEG_program_stream),开头是`00 00 01 BA`的标识符,不过Wireshark不支持PS流解析。 + +工具准备好了,分析起来也会更方便。 + +## Lazy Sweep + +GB存在和[Source清理](https://github.com/ossrs/srs/issues/413#issuecomment-1227972901)一样的问题。在GB中,存在SIP连接协程,媒体连接协程,会话协程等多个协程,这些协程之间会互相引用对象,而它们的生命周期是不一致的。 + +比如:SIP连接,需要持有会话对象的指针,当设备连接到SRS时,需要更新会话协程的SIP连接对象,这样会话需要发送信令消息,就可以走最新的SIP连接发送。 + +比如:媒体连接,收到媒体PS pack时,需要通知会话协程处理,转成RTMP流。媒体连接断开时,需要通知会话协程,会话协程会发送BYE和重新INVITE,通知设备重新推流。 + +比如:会话对象,有自己的生命周期,简单设计就是和Source一样永远不清理,这样它生命周期就会比SIP和媒体协程活得更长,这样它们引用会话对象时就是安全的,但这样就会有内存不释放的问题。同样,SIP连接一定需要清理,所以会话对象就可能会持有野指针问题。 + +Source清理的问题,本质上是多个协程之间生命周期不同步,所以如果释放Source后可能有些协程活得比Source更久,就可能出现野指针引用。详细请查看[#413](https://github.com/ossrs/srs/issues/413)的描述。 + +SRS 6.0引入了Smart Pointer,解决了Source清理的问题,具体参考[SmartPtr for GB](https://github.com/ossrs/srs/commit/6834ec208d67fa47c21536d1f1041bb6d60c1834)的修改。 + +## Benchmark + +GB缺乏工具链,基本上是空白的,而没有工具,就只能借助真实的摄像头测试,这基本上就是原始时代: + +* 摄像头只能覆盖基本的正常流程,以及一些能操作的异常流程,无法覆盖SRS设计的状态机,会导致某些状态下异常。 +* 摄像头的断开和重推流周期太慢,估计得15秒左右,而工具比如utest或benchmark,可以做到1秒就能重推,这样效率才能提高。 +* 摄像头无法实现utest和回归测试,这次测试完是好的,但未来不知道什么时候就改坏了,这样出现问题后排查的效率就非常低。 + +和WebRTC一样,SRS也会完善GB的工具链,参考[srs-bench](https://github.com/ossrs/srs-bench/tree/feature/rtc),我们会基于Go的各种库实现GB的自动测试,也可以用作模拟摄像头。使用到的库包括: + +* [ghettovoice/gosip](https://github.com/ghettovoice/gosip):SIP协议栈,这个库WebRTC段维伟也有贡献,模拟GB的SIP。 +* [gomedia/mpeg2](https://github.com/yapingcat/gomedia/mpeg2):打包PS流,包括Pack头、System头、PSM包、音视频PES包等。 +* [pion/rtp](https://github.com/pion/rtp):打包RTP头。由于GB的SDP不标准,所以没用pion的SDP解析,直接字符串查找SSRC即可。 +* [pion/h264reader](https://github.com/pion/webrtc): 读取h264格式的视频文件,在压测工具中,使用FFmpeg将FLV转成h264格式的视频文件,方便测试时分开测试音视频。 +* [go-oryx-lib/aac](https://github.com/ossrs/go-oryx-lib): 读取AAC格式的音频文件,在压测工具中,使用FFmpeg将FLV转成ogg/aac等音频文件格式,方便测试时分开测试音视频。 + +Go的库一致性比C++的高,当然风格也有差别,调试很方便,用作Benchmark工具是足够了。 + +使用方法,下载代码后编译,执行`--help`可以看到参数和实例,注意依赖Go编译环境请先安装Go: + +```bash +git clone -b feature/rtc https://gitee.com/ossrs/srs-bench.git +cd srs-bench +make && ./objs/srs_bench -sfu gb28181 --help +``` + +模拟一个摄像头推流: + +```bash +./objs/srs_bench -sfu gb28181 -pr tcp://127.0.0.1:5060 -user 3402000000 -random 10 \ + -server 34020000002000000001 -domain 3402000000 -sa avatar.aac \ + -sv avatar.h264 -fps 25 +``` + +> Note: SRS使用user字段作为设备标识,转成RTMP也作为流名称,压测工具支持随机10位数字的user,通过`random`指定,这样可以每次模拟不同的设备。如果希望模拟一台固定的设备,不指定`random`,而指定完整的user即可。 + +> Note: 需要先启动本机SRS。压测工具自带了测试样本`avatar.h264`和`avatar.aac`,如果需要其他的测试样本,可以用FFmpeg生成。 + +同样,SRS的回归测试,也会执行GB的回归测试,每次提交都会检查是否GB正常,也可以手动执行回归测试: + +```bash +cd srs-bench +go test ./gb28181 -mod=vendor -v +``` + +> Note: 测试前需要先启动SRS服务器,参考前面压测的说明。 + +Go最厉害的是这些控制机制,覆盖得非常全面,比如: + +1. 启动三个协程,必须等三个协程结束后才能退出,并判断错误结果,决定测试是否成功。 +1. 推流和播放协程,必须等待主协程初始化完毕才能启动。 +1. 播放协程,必须等待推流建联后才能建联。 +1. 所有协程,都不能超过测试用例的超时时间,比如5秒。 +1. 若有协程异常,应该立刻结束,比如播放异常,推流就算正常也应该结束。 +1. 若所有正常,也应该在一定包数之后结束,比如收发100个包,这样尽快可以跑完测试,比如100ms,而不是每个必须5秒(会导致整体测试时间太长)。 + +这些全都是控制机制,Go用了`select+chan`、`WaitGroup`、`Context`三个基础组件就全部支持了,不得不佩服Go这个设计还是非常非常牛逼的。 + +## Commits + +和GB相关的修改: + +* [HTTP: Support HTTP header in creating order. v5.0.68](https://github.com/ossrs/srs/commit/4b7d9587f) GB的SIP头没有明确要求有序,不过倒是提过尽量优先,比如Via一般是放第一个。SIP使用HTTP协议栈解析,所以SIP的头有序,就改了HTTP支持头有序,按照添加的顺序,而不是默认的字母顺序。 +* [Kernel: Support lazy sweeping simple GC. v5.0.69](https://github.com/ossrs/srs/commit/927dd473e) 支持简单的延迟清理的GC,解决多个协程之间依赖对象的问题,未来[Source清理](https://github.com/ossrs/srs/issues/413)也可以使用这个机制。在GB上这个问题非常严重,因为有多个协程和多个对象互相依赖,清理时特别容易出现问题。此外,WebRTC over TCP也有两个协程的交互,也可以采用这个机制。 +* [GB28181: Refine HTTP parser to support SIP. v5.0.70](https://github.com/ossrs/srs/commit/1e6143e2e) 使用http parser解析SIP协议栈,本质上SIP和HTTP在协议格式上基本一致,在RFC中也说明了这一点。详细参考[SIP Parser](#sip-parser)中的说明。 +* [RTC: Refine SDP to support GB28181 SSRC spec. v5.0.71](https://github.com/ossrs/srs/commit/4ad4dd097) 使用RTC的SDP对象,解析和编码GB的SDP,主要是支持SSRC的格式`y=ssrc`的方式,以及一些不同的需要定义的字段。 +* [ST: Support set context id while thread running. v5.0.72](https://github.com/ossrs/srs/commit/dc20d5ddb) 支持协程运行过程中改变ContextID,SIP和媒体线程运行后,从包中解析才知道对应的Session,然后将自己的ContextID设置为Session的,这样可以将日志全部打印到一个ContextID上,排查时可以查询这个ID即可。 +* [HTTP: Skip body and left message by upgrade. v5.0.73](https://github.com/ossrs/srs/commit/cfbbe3044) 解决HTTP Parser调用的问题,支持SIP这样可能多个Request或Response消息的情况,只解析头并且不解析剩下的数据,避免解析失败。 +* [GB28181: Support GB28181-2016 protocol. v5.0.74](https://github.com/ossrs/srs/pull/3201) GB的主要逻辑,大约3.5K行左右,其他是测试代码大约4K行,以及srs-bench依赖的Go的第三方库。 + +## Thanks + +特别感谢夏立新等同学,两年前让SRS支持了GB功能,经过这两年的积累,我们形成了GB的开源社区,了解了GB的应用场景,以及主要的发展方向。 + +经过这两年对GB的理解,我们也终于有信心把GB合并到SRS 5.0,除了夏立新和陈海博,其中有非常多的同学的贡献,很抱歉无法一一表达。 + +在合并GB进SRS 5.0过程中,对于其中的难点和疑点,也有很多同学给与了帮助,包括王冰洋、陈海博、沈巍、周小军、夏立新、杜金房、姚文佳、潘林林等等同学。 + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/getting-started-build.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/getting-started-build.md new file mode 100644 index 00000000..98bf3ac8 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/getting-started-build.md @@ -0,0 +1,223 @@ +--- +title: Build +sidebar_label: 源码编译 +hide_title: false +hide_table_of_contents: false +--- + +# Build + +SRS可以从源码编译和启动,但推荐更简单的[Docker](./getting-started.md)方式启动。 + +## Live Streaming + +直播是SRS的典型场景,支持推直播流后多种观看方式。 + +下载源码,推荐用[Ubuntu20](./install.md): + +``` +git clone -b develop https://gitee.com/ossrs/srs.git +``` + +编译,注意需要切换到`srs/trunk`目录: + +``` +cd srs/trunk +./configure +make +``` + +启动服务器: + +``` +./objs/srs -c conf/srs.conf +``` + +检查SRS是否成功启动,可以打开 [http://localhost:8080/](http://localhost:8080/) ,或者执行命令: + +``` +# 查看SRS的状态 +./etc/init.d/srs status + +# 或者看SRS的日志 +tail -n 30 -f ./objs/srs.log +``` + +例如,下面的命令显示SRS正在运行: + +``` +MB0:trunk $ ./etc/init.d/srs status +SRS(pid 90408) is running. [ OK ] + +MB0:trunk $ tail -n 30 -f ./objs/srs.log +[2021-08-13 10:30:36.634][Trace][90408][12c97232] Hybrid cpu=0.00%,0MB, cid=1,1, timer=61,0,0, clock=0,22,25,0,0,0,0,1,0 +``` + +使用 [FFmpeg(点击下载)](https://ffmpeg.org/download.html) 或 [OBS(点击下载)](https://obsproject.com/download) 推流: + +```bash +ffmpeg -re -i ./doc/source.flv -c copy -f flv rtmp://localhost/live/livestream +``` + +> Note: 实例文件`./doc/source.flv`在SRS的源代码目录中有。 + +打开下面的页面播放流(若SRS不在本机,请将localhost更换成服务器IP): + +* RTMP (by [VLC](https://www.videolan.org/)): `rtmp://localhost/live/livestream` +* H5(HTTP-FLV): [http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.flv&port=8080&schema=http) +* H5(HLS): [http://localhost:8080/live/livestream.m3u8](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.m3u8&port=8080&schema=http) + +## WebRTC + +SRS支持WebRTC,可以做会议或视频聊天。 + +下载源码,推荐用[Ubuntu20](./install.md): + +``` +git clone -b develop https://gitee.com/ossrs/srs.git +``` + +编译,注意需要切换到`srs/trunk`目录: + +``` +cd srs/trunk +./configure +make +``` + +启动服务器: + +``` +CANDIDATE="192.168.1.10" +./objs/srs -c conf/srs.conf +``` + +> Note: 请将IP换成你的SRS的IP地址。 + +> Note: 请将CANDIDATE设置为服务器的外网地址,详细请阅读[WebRTC: CANDIDATE](./webrtc.md#config-candidate)。 + +检查SRS是否成功启动,可以打开 [http://localhost:8080/](http://localhost:8080/) ,或者执行命令: + +``` +# 查看SRS的状态 +./etc/init.d/srs status + +# 或者看SRS的日志 +tail -n 30 -f ./objs/srs.log +``` + +例如,下面的命令显示SRS正在运行: + +``` +MB0:trunk $ ./etc/init.d/srs status +SRS(pid 90408) is running. [ OK ] + +MB0:trunk $ tail -n 30 -f ./objs/srs.log +[2021-08-13 10:30:36.634][Trace][90408][12c97232] Hybrid cpu=0.00%,0MB, cid=1,1, timer=61,0,0, clock=0,22,25,0,0,0,0,1,0 +``` + +本机推拉流(即浏览器和SRS都在本机),使用WebRTC推流到SRS:[WebRTC: Publish](http://localhost:8080/players/rtc_publisher.html?autostart=true&stream=livestream&port=8080&schema=http) + +> Note: 非本机推拉流,也就是不能用localhost访问SRS时,浏览器限制必须HTTPS才能推拉流,请参考[WebRTC using HTTPS](./getting-started-build.md#webrtc-using-https),再次强调这是浏览器限制。 + +打开页面观看WebRTC流:[WebRTC: Play](http://localhost:8080/players/rtc_player.html?autostart=true&stream=livestream&schema=http) + +> Note: 可以打开不同的页面,推拉不同的流,就可以实现视频聊天了。 + +## WebRTC for Live Streaming + +SRS支持直播转WebRTC,推直播流,使用WebRTC观看。 + +下载源码,推荐用[Ubuntu20](./install.md): + +``` +git clone -b develop https://gitee.com/ossrs/srs.git +``` + +编译,注意需要切换到`srs/trunk`目录: + +``` +cd srs/trunk +./configure +make +``` + +启动服务器: + +``` +CANDIDATE="192.168.1.10" +./objs/srs -c conf/rtmp2rtc.conf +``` + +> Note: 请将IP换成你的SRS的IP地址。 + +> Note: 请将CANDIDATE设置为服务器的外网地址,详细请阅读[WebRTC: CANDIDATE](./webrtc.md#config-candidate)。 + +> Note: 注意如果RTMP转WebRTC流播放,必须使用配置文件[`rtmp2rtc.conf`](https://github.com/ossrs/srs/issues/2728#rtmp2rtc-cn-guide) + +使用 [FFmpeg(点击下载)](https://ffmpeg.org/download.html) 或 [OBS(点击下载)](https://obsproject.com/download) 推流: + +```bash +ffmpeg -re -i ./doc/source.flv -c copy -f flv rtmp://localhost/live/livestream +``` + +> Note: 实例文件`./doc/source.flv`在SRS的源代码目录中有。 + +打开下面的页面播放流(若SRS不在本机,请将localhost更换成服务器IP): + +* WebRTC: [http://localhost:1985/rtc/v1/whep/?app=live&stream=livestream](http://localhost:8080/players/whep.html?autostart=true) +* H5(HTTP-FLV): [http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.flv&port=8080&schema=http) +* H5(HLS): [http://localhost:8080/live/livestream.m3u8](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.m3u8&port=8080&schema=http) + +## WebRTC using HTTPS + +若需要在非本机使用WebRTC,比如SRS运行在远程服务器,在笔记本或者手机上使用WebRTC,则需要开启HTTPS API。 + +下载源码,推荐用[Ubuntu20](./install.md): + +``` +git clone -b develop https://gitee.com/ossrs/srs.git +``` + +编译,注意需要切换到`srs/trunk`目录: + +``` +cd srs/trunk +./configure +make +``` + +启动服务器: + +``` +CANDIDATE="192.168.1.10" +./objs/srs -c conf/https.rtc.conf +``` + +> Note: 请将IP换成你的SRS的IP地址。 + +> Note: 请将CANDIDATE设置为服务器的外网地址,详细请阅读[WebRTC: CANDIDATE](./webrtc.md#config-candidate)。 + +> Remark: 请使用你的证书文件,代替上面配置中的key和cert,请参考 +> **[HTTPS API](./http-api.md#https-api)** +> 以及 **[HTTPS Callback](./http-callback.md#https-callback)** +> 以及 **[HTTPS Live Streaming](./flv.md#https-flv-live-stream)**, +> 当然了HTTPS的反向代理也能和SRS工作很好,比如Nginx代理到SRS。 + +使用WebRTC推流到SRS:[WebRTC: Publish](https://192.168.3.82:8088/players/rtc_publisher.html?autostart=true&stream=livestream&api=1990&schema=https) + +打开页面观看WebRTC流:[WebRTC: Play](https://192.168.3.82:8088/players/rtc_player.html?autostart=true&stream=livestream&api=1990&schema=https) + +> 注意:自签名证书,在空白处输入`thisisunsafe`(注意没空格)。 + +> Note: 可以打开不同的页面,推拉不同的流,就可以实现视频聊天了。 + +## Cross Build + +注意一般都可以直接编译,即使是ARM系统也可以直接编译,只有嵌入式板子才需要交叉编译。 + +若需要交叉编译,请参考[ARM和交叉编译](./arm.md)。 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/getting-started-build) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/getting-started-k8s.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/getting-started-k8s.md new file mode 100644 index 00000000..b5a96b53 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/getting-started-k8s.md @@ -0,0 +1,21 @@ +--- +title: K8s +sidebar_label: K8s +hide_title: false +hide_table_of_contents: false +--- + +# K8s + +推荐使用HELM方式部署SRS,参考[srs-helm](https://github.com/ossrs/srs-helm)。当然,SRS也支持K8s方式直接部署, +参考[SRS K8s](./k8s.md)。 + +其实HELM是基于K8s的,HELM最终部署的也是K8s的pod,而且可以使用kubectl管理。不过,HELM提供了更加方便的应用管理和安装方式, +因此,未来SRS主要支持的是HELM方式。 + +和Docker方式相比,HELM和K8s主要是中大规模的部署。如果你的业务规模并不大,那么推荐直接使用Docker或者Oryx方式。 +一般而言,如果你的流没有超过一千路,请不要使用HELM或K8s的方式。 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/getting-started-k8s) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/getting-started-oryx.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/getting-started-oryx.md new file mode 100644 index 00000000..4d753052 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/getting-started-oryx.md @@ -0,0 +1,404 @@ +--- +title: Oryx +sidebar_label: Oryx +hide_title: false +hide_table_of_contents: false +--- + +# Oryx + +Oryx(SRS Stack)是一个基于Go、Reactjs、SRS、FFmpeg、WebRTC等的轻量级、开源的视频云解决方案。 + +## Introduction + +Oryx作为开源的开箱即用的音视频方案,是完全基于场景构建的。 常见的比如推拉流场景,支持各种不同协议的推拉流, +并且支持嵌入到WordPress等网站。 + +比如录制场景,支持合并多次推流,支持设置过滤器,只录制特定的流。比如转发和虚拟直播,可以将文件和其他流,转发到不同的平台, +或者转发到Oryx自身。比如AI自动字幕,可以使用OpenAI的能力,自动识别字幕并将字幕嵌入到视频流中。比如一键自动HTTPS, +可以非常方便的开启HTTPS能力。 + +未来还会有更多丰富的场景。 + +## FAQ + +若使用Oryx时遇到问题,请先阅读[FAQ](../../../faq-oryx)。 + +## Usage + +请根据你的平台,选择安装方式。 + +> Remark: 请选择Ubuntu 20系统,其他系统可能会碰到一些奇怪的问题。 + +### Docker + +推荐使用Docker运行Oryx: + +```bash +docker run --restart always -d -it --name oryx -v $HOME/data:/data \ + -p 80:2022 -p 443:2443 -p 1935:1935 -p 8000:8000/udp -p 10080:10080/udp \ + registry.cn-hangzhou.aliyuncs.com/ossrs/oryx:5 +``` + +请打开页面[http://localhost](http://localhost)开始使用Oryx。 + +关于使用说明,请参考 [Oryx Docker](https://github.com/ossrs/oryx#usage)。 + +### HELM + +推荐使用HELM安装和运行Oryx: + +```bash +helm repo add srs http://helm.ossrs.io/stable +helm install srs srs/oryx --set persistence.path=$HOME/data \ + --set service.http=80 --set service.https=443 --set service.rtmp=1935 \ + --set service.rtc=8000 --set service.srt=10080 +``` + +请打开页面[http://localhost](http://localhost)开始使用Oryx。 + +### BT + +Oryx提供了宝塔插件,使用方法参考[宝塔插件](../../../blog/BT-aaPanel)。 + +### Script + +对于 Ubuntu 20+,您可以下载 [linux-oryx-zh.tar.gz](https://github.com/ossrs/oryx/releases/latest/download/linux-oryx-zh.tar.gz) +并安装它。 + +### AWS Lightsail + +Oryx支持AWS Lightsail,这是AWS提供的一种虚拟专用服务器(VPS)服务。请参阅[如何一键建立视频流媒体服务](../../../blog/Oryx-Tutorial)。 + +### TencentCloud LightHouse + +在国内做流媒体或RTC业务,可以在腾讯云轻量服务器上购买Oryx,参考[Oryx:起步、购买和入门](../../../blog/Oryx-Tutorial)。 + +## Changelog + +关于Oryx的更新日志,参考[CHANGELOG](https://github.com/ossrs/oryx/blob/main/DEVELOPER.md#changelog) + +某个具体版本支持的具体功能,可以在版本发布中查看CHANGELOG,参考[Releases](https://github.com/ossrs/oryx/releases) + +## Features + +关于Oryx的功能以及与SRS的比较,详细的功能清单参考[Features](https://github.com/ossrs/oryx?tab=readme-ov-file#features)。 + +### Compare to SRS + +在比较Oryx和SRS时,两者都提供相似级别的媒体流功能。然而,Oryx为终端用户提供了更强大且功能丰富的体验, +无需编写任何代码。用户可以直接使用Oryx满足您的媒体服务需求。 + +| 比较 | Oryx | SRS | 说明 | +|------------|-------------------|----------|---------------------------------------| +| 许可证 | MIT | MIT | SRS是MIT,而Oryx是MIT。 | +| 直播流 | Yes | Yes | 两者都支持RTMP,HLS和HTTP-FLV协议。 | +| WebRTC | Yes | Yes | 两者都支持WebRTC。 | +| Auto HTTPS | Yes | No | Oryx自动申请和更新HTTPS证书. | +| 控制台 | 增强 | HTTP API | Oryx提供了更强大的控制台。 | +| 身份验证 | Yes | HTTP回调 | Oryx具有内置身份验证,而SRS使用回调。 | +| DVR | 增强 | 基于文件 | Oryx支持将DVR存储到文件和云存储。 | +| 转发 | 增强 | 基本 | Oryx可以通过各种协议转发到多个平台。 | +| 虚拟直播 | Yes | No | Oryx提供了先进的虚拟直播功能。 | +| WordPress | Yes | No | Oryx提供了WordPress插件和操作指南。 | +| 转码 | Yes | No | Oryx提供了直播转码的能力。 | +| AI字幕 | Yes | No | 自动识别直播语音并转为字幕,叠加到视频 | +| 直播间 | Yes | No | 直播间的鉴权和业务功能 | +| 视频翻译 | Yes | No | 支持点播视频多语言翻译 | + +### Streaming and Authentication + +Oryx支持基于SRS回调的带身份验证的增强流媒体。Oryx生成并将流令牌保存到Redis中,并在用户通过RTMP、 +SRT或WHIP/WebRTC发布流时验证流令牌。 + +Oryx还代理并保护SRS的所有HTTP API,因此只有经过身份验证的用户才能访问HTTP API和控制台。 + +### DVR + +Oryx 支持 DVR 或录制功能,将实时流转换为文件,然后保存到本地磁盘或云存储中。 +我们还支持将多个重新发布会话合并到一个 DVR 文件中,并支持设置过滤器来录制指定的流。 + +详细信息请参阅[服务器端录制和 AWS S3 集成的指南](../../../blog/Record-Live-Streaming)。 + +### Automatic HTTPS + +Oryx 支持自动 HTTPS,只需点击一下,您就可以为您的 Oryx 启用 HTTPS。Oryx 将自动从 [Let's Encrypt](https://letsencrypt.org/) +请求和更新 HTTPS 证书。自动 HTTPS 允许 WHIP 或通过网页发布,同时支持 WebRTC,并访问用户的麦克风。 + +详细信息请参阅[如何通过一键开启HTTPS](../../../blog/Oryx-HTTPS)。 + +### Virtual Live Events + +您可以使用预先录制的视频来模拟现场活动。您只需1个视频文件就可以进行7x24小时的直播。您还可以将流拉到您的直播间,使直播更强大。 +您甚至可以将您的IP摄像头流拉到您的直播间。 + +请参阅[虚拟直播](../../../blog/Virtual-Live-Events)和[摄像头直播](../../../blog/Stream-IP-Camera-Events)。 + +### Restream + +使用Oryx,您可以将流媒体重新发送到多个平台。Oryx会自动选择一个流进行转发,因此您可以发布多个流作为容错或备份流, +当某个流中断时,Oryx会切换到另一个流。 + +请参阅[多平台转播](../../../blog/Multi-Platform-Streaming)以获取详细信息。 + +### AI Transcription + +Oryx支持由OpenAI提供支持的AI转录功能,将实时语音转换为文本并叠加到视频流中作为新的实时流。借助此功能,您可以吸引更多观众, +特别是对于有听力障碍的人或非母语者。 + +请参阅[AI自动字幕](../../../blog/live-streams-transcription)以获取详细信息。 + +### Transcode + +Oryx支持对实时流进行转码,以降低比特率、节省带宽和成本,或过滤实时流内容以使其更优。 + +详细信息请参阅[直播转码](../../../blog/Live-Transcoding)。 + +## AI Products + +我们正在Oryx中实现各种AI的工具和产品,这里是最新的状态,我们会持续更新这个文档。 + +1. 直播AI字幕,对接的OpenAI的Whisper实现的语音转文字,然后将文字字幕叠加到直播中,实现直播的自动字幕。 + * 状态:已经完成,可以在Oryx中使用。参考[AI自动字幕](../../../blog/live-streams-transcription)。 +1. 直播间AI助手,在直播间放一个AI当助手,可以让助手帮自己回答问题,或者完成直播中的一些任务。 + * 状态: Beta版本,可在Oryx中使用。参考 [基于浏览器的语音GPT的AI助手](../../../blog/browser-voice-driven-gpt)。 +1. 点播视频翻译,可以将英文视频翻译成中文了学习英文,或者制作多语言的视频,在教育和电商中用的比较多。 + * 状态: Beta版本,可在Oryx中使用。参考 [视频多语言翻译和配音](../../../blog/dubbing-translating)。 +1. 流OCR:从直播中的图像提取文本,实现实时文本识别和翻译,适用于各种应用。 + * 状态: Beta版本,可在Oryx中使用。参考 [基于AI的视频流的OCR和对象识别](../../../blog/ocr-video-streams)。 + +如果你对我们的AI产品感兴趣,可以关注我们的公众号后,在[公众号](/contact#discussion)菜单选择`AI产品`,可以加入微信群,和我们一起讨论。 + +## HTTP API + +你可以打开`系统配置 > OpenAPI`来获取Bearer鉴权并尝试HTTP API。 + +你可以点击网页上的按钮请求HTTP API,也可以使用curl或js代码请求HTTP API。请按照网页上的说明操作,例如, +使用curl请求HTTP API: + +```bash +curl http://localhost/terraform/v1/mgmt/versions +``` + +或使用Bearer鉴权: + +```bash +curl http://localhost/terraform/v1/hooks/srs/secret/query \ + -X POST -H 'Authorization: Bearer xxxxxx' \ + -H 'Content-Type: application/json' --data '{}' +``` + +> Note: 你可以打开`系统配置 > OpenAPI`来获取Bearer鉴权并尝试HTTP API。 + +> Note:网页可能使用JWT鉴权,但您也可以使用Bearer鉴权请求HTTP API。 + +除了页面列出的示例API之外,所有后台页面上的操作,你都可以通过HTTP API实现。要知道每个API的请求和响应,请打开谷歌Chrome, +导航至`视图 > 开发人员 > 开发者工具`,点击`网络`选项卡,然后检查相关的API交互。 + +Oryx还代理了[SRS HTTP API](./http-api.md),前缀为`/api/v1/`,例如: + +```bash +curl http://localhost/api/v1/versions +``` + +或使用Bearer鉴权: + +```bash +curl http://localhost/api/v1/vhosts/ \ + -X GET -H 'Authorization: Bearer xxxxxx' \ + -H 'Content-Type: application/json' +``` + +> Note: 你可以打开`系统配置 > OpenAPI`来获取Bearer鉴权并尝试HTTP API。 + +请阅读[SRS HTTP API](./http-api.md)了解API的详细信息。 + +## HTTP Callback + +HTTP回调是指在Docker容器中运行的Oryx,向target URL发起HTTP请求。例如,以下过程说明了当OBS发送RTMP流时, +Oryx会给你的服务器发起一个请求,你可以通过target URL来配置你的服务器地址。 + +```bash + +-----------------------+ + + + ++-------+ + +-----------+ + +--------------+ ++ OBS +--RTMP->--+-----+ Oryx +-----+----HTTP--->-----+ Your Server + ++-------+ + +-----------+ + (Target URL) +--------------+ + + + + + Docker + + +-----------------------+ +``` + +所有请求的格式是json: + +* `Content-Type: application-json` + +所有响应都应该遵守: + +* 成功:`Status: 200 OK` and `"code": 0` +* 其他代表失败或错误。 + +关于如何实现回调的处理,请参考[HTTP Callback](../docs/v6/doc/http-callback#go-example) + +### HTTP Callback: Connectivity Check + +有时,您可能需要验证网络是否可访问并确定要使用的适当目标URL。通过在Docker容器内使用curl命令,您可以模拟此请求并确认 +target URL是否可以通过curl或Oryx访问。 + +首先,在Oryx的容器中安装curl: + +```bash +docker exec -it oryx apt-get update -y +docker exec -it oryx apt-get install -y curl +``` + +然后,用curl模拟Oryx发起一个HTTP请求: + +```bash +docker exec -it oryx curl http://your-target-URL +``` + +你可以使用任何合法的target URL来测试,包括: + +* 内网IP:`http://192.168.1.10/check` +* 公网IP:`http://159.133.96.20/check` +* HTTP地址,使用域名: `http://your-domain.com/check` +* HTTPS地址,使用域名:`https://your-domain.com/check` + +请记住,您应在Oryx Docker中测试与target URL的连通性,并避免从其他服务器运行curl命令。 + +### HTTP Callback: on_publish + +For HTTP callback `on_publish` event: + +```json +Request: +{ + "request_id": "3ab26a09-59b0-42f7-98e3-a281c7d0712b", + "action": "on_unpublish", + "opaque": "mytoken", + "vhost": "__defaultVhost__", + "app": "live", + "stream": "livestream", + "param": "?secret=8f7605d657c74d69b6b48f532c469bc9" +} + +Response: +{ + "code": 0 +} +``` + +* Allow publishing if response success. +* Reject publishing if response error. + +### HTTP Callback: on_unpublish + +For HTTP callback `on_unpublish` event: + +```json +Request: +{ + "request_id": "9ea987fa-1563-4c28-8c6c-a0e9edd4f536", + "action": "on_unpublish", + "opaque": "mytoken", + "vhost": "__defaultVhost__", + "app": "live", + "stream": "livestream" +} + +Response: +{ + "code": 0 +} +``` + +* Ignore any response error. + +### HTTP Callback: on_record_begin + +For HTTP callback `on_record_begin` event: + +```json +Request: +{ + "request_id": "80ad1ddf-1731-450c-83ec-735ea79dd6a3", + "action": "on_record_begin", + "opaque": "mytoken", + "vhost": "__defaultVhost__", + "app": "live", + "stream": "livestream", + "uuid": "824b96f9-8d51-4046-ba1e-a9aec7d57c95" +} + +Response: +{ +"code": 0 +} +``` + +* Ignore any response error. + +### HTTP Callback: on_record_end + +For HTTP callback `on_record_end` event: + +```json +Request: +{ + "request_id": "d13a0e60-e2fe-42cd-a8d8-f04c7e71b5f5", + "action": "on_record_end", + "opaque": "mytoken", + "vhost": "__defaultVhost__", + "app": "live", + "stream": "livestream", + "uuid": "824b96f9-8d51-4046-ba1e-a9aec7d57c95", + "artifact_code": 0, + "artifact_path": "/data/record/824b96f9-8d51-4046-ba1e-a9aec7d57c95/index.mp4", + "artifact_url": "http://localhost/terraform/v1/hooks/record/hls/824b96f9-8d51-4046-ba1e-a9aec7d57c95/index.mp4" +} + +Response: +{ + "code": 0 +} +``` + +* The `uuid` is the UUID of record task. +* The `artifact_code` indicates the error code. If no error, it's 0. +* The `artifact_path` is the path of artifact mp4 in the container. +* The `artifact_url` is the URL path to access the artifact mp4. +* Ignore any response error. + +### HTTP Callback: on_ocr + +For HTTP callback `on_ocr` event: + +```json +Request: +{ + "request_id": "d13a0e60-e2fe-42cd-a8d8-f04c7e71b5f5", + "action": "on_ocr", + "opaque": "mytoken", + "vhost": "__defaultVhost__", + "app": "live", + "stream": "livestream", + "uuid": "824b96f9-8d51-4046-ba1e-a9aec7d57c95", + "prompt": "What is in the image?", + "result": "The image shows a scene featuring a character from a film, likely set in a military or high-tech environment." +} + +Response: +{ + "code": 0 +} +``` + +* The `uuid` is the UUID of OCR task. +* The `prompt` the AI model used for OCR. +* The `result` is the OCR result. +* Ignore any response error. + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/getting-started-oryx) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/getting-started.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/getting-started.md new file mode 100644 index 00000000..1b1bc09f --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/getting-started.md @@ -0,0 +1,189 @@ +--- +title: Docker +sidebar_label: Docker镜像 +hide_title: false +hide_table_of_contents: false +--- + +# Docker + +推荐使用Docker启动SRS,这是最简单也是最方便的方式。 + +## Live Streaming + +直播是SRS的典型场景,支持推直播流后多种观看方式。 + +先用Docker启动SRS: + +```bash +docker run --rm -it -p 1935:1935 -p 1985:1985 -p 8080:8080 \ + registry.cn-hangzhou.aliyuncs.com/ossrs/srs:5 +``` + +> Note: 可用镜像在 [这里](https://cr.console.aliyun.com/repository/cn-hangzhou/ossrs/srs/images) 和每个 [Release](https://github.com/ossrs/srs/releases?q=v4&expanded=true) 都会给出来链接。 + +使用FFmpeg的Docker推流到本机: + +```bash +docker run --rm -it registry.cn-hangzhou.aliyuncs.com/ossrs/srs:encoder \ + ffmpeg -stream_loop -1 -re -i doc/source.flv -c copy \ + -f flv rtmp://host.docker.internal/live/livestream +``` + +或者使用 [FFmpeg(点击下载)](https://ffmpeg.org/download.html) 或 [OBS(点击下载)](https://obsproject.com/download) 推流: + +```bash +ffmpeg -re -i ./doc/source.flv -c copy -f flv rtmp://localhost/live/livestream +``` + +> Note: 实例文件`./doc/source.flv`在SRS的源代码目录中有。 + +打开下面的页面播放流(若SRS不在本机,请将localhost更换成服务器IP): + +* RTMP (by [VLC](https://www.videolan.org/)): `rtmp://localhost/live/livestream` +* H5(HTTP-FLV): [http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.flv&port=8080&schema=http) +* H5(HLS): [http://localhost:8080/live/livestream.m3u8](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.m3u8&port=8080&schema=http) + +## WebRTC + +SRS支持WebRTC,可以做会议或视频聊天。 + +先使用Docker启动SRS: + +```bash +CANDIDATE="192.168.1.10" +docker run --rm -it -p 1935:1935 -p 1985:1985 -p 8080:8080 -p 1990:1990 -p 8088:8088 \ + --env CANDIDATE=$CANDIDATE -p 8000:8000/udp \ + registry.cn-hangzhou.aliyuncs.com/ossrs/srs:5 +``` + +> Note: 请将IP换成你的SRS的IP地址。 + +> Note: 请将CANDIDATE设置为服务器的外网地址,详细请阅读[WebRTC: CANDIDATE](./webrtc.md#config-candidate)。 + +本机推拉流(即浏览器和SRS都在本机),使用WebRTC推流到SRS:[WebRTC: Publish](http://localhost:8080/players/rtc_publisher.html?autostart=true&stream=livestream&port=8080&schema=http) + +> Note: 非本机推拉流,也就是不能用localhost访问SRS时,浏览器限制必须HTTPS才能推拉流,请参考[WebRTC using HTTPS](./getting-started.md#webrtc-using-https),再次强调这是浏览器限制。 + +打开页面观看WebRTC流:[WebRTC: Play](http://localhost:8080/players/rtc_player.html?autostart=true&stream=livestream&schema=http) + +> Note: 可以打开不同的页面,推拉不同的流,就可以实现视频聊天了。 + +## WebRTC for Live Streaming + +SRS支持直播转WebRTC,推直播流,使用WebRTC观看。 + +先用Docker启动SRS: + +```bash +CANDIDATE="192.168.1.10" +docker run --rm -it -p 1935:1935 -p 1985:1985 -p 8080:8080 \ + --env CANDIDATE=$CANDIDATE -p 8000:8000/udp \ + registry.cn-hangzhou.aliyuncs.com/ossrs/srs:5 ./objs/srs -c conf/rtmp2rtc.conf +``` + +> Note: 请将IP换成你的SRS的IP地址。 + +> Note: 请将CANDIDATE设置为服务器的外网地址,详细请阅读[WebRTC: CANDIDATE](./webrtc.md#config-candidate)。 + +> Note: 注意如果RTMP转WebRTC流播放,必须使用配置文件[`rtmp2rtc.conf`](https://github.com/ossrs/srs/issues/2728#rtmp2rtc-cn-guide) + +使用FFmpeg的Docker推流到本机: + +```bash +docker run --rm -it registry.cn-hangzhou.aliyuncs.com/ossrs/srs:encoder ffmpeg -stream_loop -1 -re -i doc/source.flv \ + -c copy -f flv rtmp://host.docker.internal/live/livestream +``` + +或者使用 [FFmpeg(点击下载)](https://ffmpeg.org/download.html) 或 [OBS(点击下载)](https://obsproject.com/download) 推流: + +```bash +ffmpeg -re -i ./doc/source.flv -c copy -f flv rtmp://localhost/live/livestream +``` + +> Note: 实例文件`./doc/source.flv`在SRS的源代码目录中有。 + +打开下面的页面播放流(若SRS不在本机,请将localhost更换成服务器IP): + +* WebRTC: [http://localhost:1985/rtc/v1/whep/?app=live&stream=livestream](http://localhost:8080/players/whep.html?autostart=true) +* H5(HTTP-FLV): [http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.flv&port=8080&schema=http) +* H5(HLS): [http://localhost:8080/live/livestream.m3u8](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.m3u8&port=8080&schema=http) + +## WebRTC using HTTPS + +推流时, 若需要在非本机使用WebRTC,比如SRS运行在远程服务器,在笔记本或者手机上使用WebRTC,则需要开启HTTPS API。 + +> Note:如果只是播放WebRTC流,并不需要HTTPS,只有推流时(且非localhost)才需要HTTPS,这是浏览器的安全策略要求。 + +先用Docker启动SRS: + +```bash +CANDIDATE="192.168.1.10" +docker run --rm -it -p 1935:1935 -p 1985:1985 -p 8080:8080 -p 1990:1990 -p 8088:8088 \ + --env CANDIDATE=$CANDIDATE -p 8000:8000/udp \ + registry.cn-hangzhou.aliyuncs.com/ossrs/srs:5 ./objs/srs -c conf/https.docker.conf +``` + +> Note: 请将IP换成你的SRS的IP地址。 + +> Note: 请将CANDIDATE设置为服务器的外网地址,详细请阅读[WebRTC: CANDIDATE](./webrtc.md#config-candidate)。 + +> Remark: 请使用你的证书文件,代替上面配置中的key和cert,请参考 +> **[HTTPS API](./http-api.md#https-api)** +> 以及 **[HTTPS Callback](./http-callback.md#https-callback)** +> 以及 **[HTTPS Live Streaming](./flv.md#https-flv-live-stream)**, +> 当然了HTTPS的反向代理也能和SRS工作很好,比如Nginx代理到SRS。 + +使用WebRTC推流到SRS:[WebRTC: Publish](https://192.168.3.82:8088/players/rtc_publisher.html?autostart=true&stream=livestream&api=1990&schema=https) + +打开页面观看WebRTC流:[WebRTC: Play](https://192.168.3.82:8088/players/rtc_player.html?autostart=true&stream=livestream&api=1990&schema=https) + +> 注意:自签名证书,在空白处输入`thisisunsafe`(注意没空格)。 + +> Note: 可以打开不同的页面,推拉不同的流,就可以实现视频聊天了。 + +## SRT for Live Streaming + +SRS支持SRT推直播流,使用SRT或其他协议观看。 + +先用Docker启动SRS: + +```bash +docker run --rm -it -p 1935:1935 -p 1985:1985 -p 8080:8080 -p 10080:10080/udp \ + registry.cn-hangzhou.aliyuncs.com/ossrs/srs:5 ./objs/srs -c conf/srt.conf +``` + +使用 [FFmpeg(点击下载)](https://ffmpeg.org/download.html) 或 [OBS(点击下载)](https://obsproject.com/download) 推流: + +```bash +ffmpeg -re -i ./doc/source.flv -c copy -pes_payload_size 0 -f mpegts \ + 'srt://127.0.0.1:10080?streamid=#!::r=live/livestream,m=publish' +``` + +使用 [ffplay(点击下载)](https://ffmpeg.org/download.html) 或 [OBS(点击下载)](https://obsproject.com/download) 播放: + +```bash +ffplay 'srt://127.0.0.1:10080?streamid=#!::r=live/livestream,m=request' +``` + +## Multiple Streams + +你可以推拉多路流到SRS,不需要特殊的设置,按照前面的步骤运行SRS后,改变推拉流的URL就可以。比如: + +* `rtmp://ip/live/livesteam` +* `rtmp://ip/live/livesteamN` +* `rtmp://ip/liveN/livestreamN` +* `rtmp://ip/whatever/doesnotmatter` +* `srt://ip:10080?streamid=#!::r=anyM/streamN,m=publish` +* `http://ip:1985/rtc/v1/whip/?app=anyM&stream=streamN` +* `http://ip:1985/rtc/v1/whep/?app=anyM&stream=streamN` +* `http://ip:8080/anyM/streamN.flv` +* `http://ip:8080/anyM/streamN.m3u8` +* `https://ip:8080/anyM/streamN.flv` +* `https://ip:8080/anyM/streamN.m3u8` + +> Note: 详细请参考[RTMP URL](./rtmp-url-vhost.md)。 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/getting-started) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/git.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/git.md new file mode 100644 index 00000000..2ad29416 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/git.md @@ -0,0 +1,82 @@ +--- +title: Git +sidebar_label: Git +hide_title: false +hide_table_of_contents: false +--- + +# GIT + +如何选择SRS的稳定版本?如何更新代码? + +## Fast Checkout + +很多人希望缩减仓库大小,很多doc和3rdparty中的第三方代码压缩包。我在这里集中说明一次,为何需要把doc收集那么全?这个不言自明,srs中23%的代码都是注释,注释会说明是哪个文档的哪一页,难道还需要再下载一次这个文档吗?为何需要把依赖的第三方代码放进来,譬如ffmpeg和nignx还有openssl?再去下载这些相关的程序会比较麻烦,而且对于新手来说,下载正确的版本和编译都是比较复杂的问题。为了好用,大一点的仓库我觉得而是可以接受的。 + +为何不做这些改变?这些是次要复杂度,仓库多大对于代码质量没有任何影响。而且更重要的是,国内很多git镜像站点,SRS是同步更新的,阿里云提供服务的oschina,我git clone一次是40秒左右。这个问题就变成一个小问题了。 + +如何使用国内镜像站点clone,然后把服务器设置为github上?这样和直接从github上clone一模一样了。执行下面的命令就可以了: + +``` +git clone -b develop https://gitee.com/ossrs/srs.git && +cd srs && git remote set-url origin https://github.com/ossrs/srs.git +``` + +其他国内镜像参考:https://github.com/ossrs/srs/tree/develop#mirrors + +## Checkout Branch + +有些功能只有SRS2.0有,SRS1.0没有,请注意看wiki是v1还是v2的。 + +如果是1.0的功能,更新代码后要改变到1.0分支: + +``` +git pull && git checkout 1.0release +``` + +如果是2.0的功能,更新代码后要改变到2.0分支: + +``` +git pull && git checkout 2.0release +``` + +如果是3.0的功能,更新代码后要改变到3.0分支: + +``` +git pull && git checkout 2.0release +``` + +如果是4.0的功能,更新代码后要改变到4.0分支: + +``` +git pull && git checkout 4.0release +``` + +如果是5.0的功能,更新代码后要改变到5.0分支(没有单独的5.0release分支就是develop): + +``` +git pull && git checkout develop +``` + +## SRS Branches + +release分支会比develop稳定,不过只有正式发布的版本才比较稳定,发布中的版本也会有release分支。 + +* 3.0release,稳定发布版本。 +* 4.0release,稳定发布版本。 +* develop(5.0),开发版本,没有稳定性保障。 + +所谓稳定性,开源项目和商业产品的定义是不同的。开源产品没有明确的稳定性定义,也没有SLA定义,肯定是会碰到问题,就需要开发者自己能解决。 +毕竟代码都有了,如果解决不了问题,那还是建议用商业的云服务吧。 + +SRS的稳定性保障,主要依靠几个方法: + +* 一旦进入Release阶段,变更会考虑稳定性,不会新增功能,基本上只改善代码和解决bug。 +* 不断完善UTest和RegressionTest,防止引入问题,提前发现问题。 +* 依靠社区的反馈,一般Release分支会提交一些Commit但不一定会打版本,如果过段时间没有稳定性问题反馈才会发版本。 + +Winlin 2014.3 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/git) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/gperf.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/gperf.md new file mode 100644 index 00000000..a1ca2434 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/gperf.md @@ -0,0 +1,180 @@ +--- +title: GPERF +sidebar_label: GPERF +hide_title: false +hide_table_of_contents: false +--- + +# GPERF内存和性能分析 + +本文只描述了GPERF工具的用法,关于完整的性能分析方法,参考 [SRS性能(CPU)、内存优化工具用法](https://www.jianshu.com/p/6d4a89359352) 或 [CSDN](https://winlin.blog.csdn.net/article/details/53503869) + +valgrind一个很好用的内存和CPU分析工具,SRS已经支持。 + +[gperf/gperftools](https://github.com/gperftools/gperftools) 是google用作内存和CPU分析的工具,基于tcmalloc(也是google内存分配库,替换glibc的malloc和free)。好消息是gperf可以用作srs的内存和性能分析。 + +gperf主要有三个应用: +* gmc: gperf memory check, 内存检查(泄漏,错误等),参考:[heap_checker](https://gperftools.github.io/gperftools/heap_checker.html) +* gmp: gperf memory profile, 内存性能分析(哪个函数内存分配多),参考:[heapprofile](https://gperftools.github.io/gperftools/heapprofile.html) +* gcp: gperf cpu profile, CPU性能分析(函数消耗CPU多),参考:[cpuprofile](https://gperftools.github.io/gperftools/cpuprofile.html) + +## gmc内存检查 + +> SRS有例子说明如何集成和调用gmc,参考:[research/gperftools/heap-checker](https://github.com/ossrs/srs/tree/4.0release/trunk/research/gperftools/heap-checker) + +> 本文只描述了GPERF工具的用法,关于完整的性能分析方法,参考 [SRS性能(CPU)、内存优化工具用法](https://www.jianshu.com/p/6d4a89359352) 或 [CSDN](https://winlin.blog.csdn.net/article/details/53503869) + +使用gmc时,需要将tcmalloc编译进去(或者动态链接),具体参考官方文档。除此之外,必须设置环境变量,gmc才被开启。 + +SRS开启gmc的方法是: +* 配置时加上gmc:`./configure --with-gperf --with-gmc` +* 编译srs:`make` +* 启动时指定环境变量:`env PPROF_PATH=./objs/pprof HEAPCHECK=normal ./objs/srs -c conf/console.conf` +* 停止srs,打印gmc结果:`CTRL+C` 或者发送SIGINT信号给SRS + +备注:make编译SRS成功后,会打印出这些操作命令。 + +注意:必须导出pprof环境变量`PPROF_PATH`,否则函数的地址和符合对应不上 + +若能打印下面的信息,说明gmc成功启动: + +```bash +[winlin@dev6 srs]$ env PPROF_PATH=./objs/pprof HEAPCHECK=normal ./objs/srs -c conf/console.conf +WARNING: Perftools heap leak checker is active -- Performance may suffer +``` + +gmc的结果: + +```bash +Leak check _main_ detected leaks of 184 bytes in 4 objects +The 4 largest leaks: +Using local file ./objs/srs. +Leak of 56 bytes in 1 objects allocated from: + @ 46fae8 _st_stack_new + @ 46f6b1 st_thread_create + @ 46ea65 st_init + @ 433f41 SrsServer::initialize + @ 46e4ca main + @ 3855a1ec5d __libc_start_main +``` + +有的时候gmc显示符号有问题,无法显示函数,那么就直接运行pprof,gmc会有提示,譬如: +``` +pprof ./objs/srs "/tmp/srs.11469._main_-end.heap" --inuse_objects --lines --heapcheck --edgefraction=1e-10 --nodefraction=1e-10 --gv +``` + +需要改动两个地方: + +1. pprof改成`./objs/pprof`。 +1. 去掉--gv,直接进入命令行,然后输入top就可以看到。 + +结果如下: + +``` +[winlin@centos6 srs]$ ./objs/pprof ./objs/srs "/tmp/srs.11469._main_-end.heap" --inuse_objects --lines --heapcheck --edgefraction=1e-10 --nodefraction=1e-10 +Using local file ./objs/srs. +Using local file /tmp/srs.11469._main_-end.heap. +Welcome to pprof! For help, type 'help'. +(pprof) top +Total: 9 objects + 3 33.3% 33.3% 3 33.3% _st_netfd_new /home/winlin/srs/objs/st-1.9/io.c:136 + 3 33.3% 66.7% 3 33.3% _st_stack_new /home/winlin/srs/objs/st-1.9/stk.c:78 + 2 22.2% 88.9% 2 22.2% st_cond_new /home/winlin/srs/objs/st-1.9/sync.c:158 + 1 11.1% 100.0% 1 11.1% SrsPithyPrint::create_ingester /home/winlin/srs/src/app/srs_app_pithy_print.cpp:139 + 0 0.0% 100.0% 4 44.4% SrsAsyncCallWorker::start /home/winlin/srs/src/app/srs_app_async_call.cpp:70 + 0 0.0% 100.0% 4 44.4% SrsConnection::cycle /home/winlin/srs/src/app/srs_app_conn.cpp:88 + 0 0.0% 100.0% 2 22.2% SrsDvr::initialize /home/winlin/srs/src/app/srs_app_dvr.cpp:980 + 0 0.0% 100.0% 2 22.2% SrsDvrPlan::initialize /home/winlin/srs/src/app/srs_app_dvr.cpp:570 + 0 0.0% 100.0% 2 22.2% SrsHls::initialize /home/winlin/srs/src/app/srs_app_hls.cpp:1214 + 0 0.0% 100.0% 2 22.2% SrsHlsMuxer::initialize /home/winlin/srs/src/app/srs_app_hls.cpp:370 +``` + +## GMP内存性能 + +> SRS有例子说明如何集成和调用gmc,参考:[research/gperftools/heap-profiler](https://github.com/ossrs/srs/tree/4.0release/trunk/research/gperftools/heap-profiler) + +> 本文只描述了GPERF工具的用法,关于完整的性能分析方法,参考 [SRS性能(CPU)、内存优化工具用法](https://www.jianshu.com/p/6d4a89359352) 或 [CSDN](https://winlin.blog.csdn.net/article/details/53503869) + +使用gmc时,需要将tcmalloc编译进去(或者动态链接),具体参考官方文档。 + +SRS开启gmp的方法是: +* 配置时加上gmc:`./configure --with-gperf --with-gmp` +* 编译srs:`make` +* 正常启动srs就开始内存性能分析:`rm -f gperf.srs.gmp*; ./objs/srs -c conf/console.conf` +* 停止srs,生成gmp分析文件:`CTRL+C` 或者发送SIGINT信号给SRS +* 分析gmp文件:`./objs/pprof --text objs/srs gperf.srs.gmp*` + +备注:make编译SRS成功后,会打印出这些操作命令。 + +若能打印下面的信息,则表示成功启动gmp: + +```bash +[winlin@dev6 srs]$ ./objs/srs -c conf/console.conf +Starting tracking the heap +``` + +内存性能分析的结果如下: + +```bash +[winlin@dev6 srs]$ ./objs/pprof --text objs/srs gperf.srs.gmp* +Using local file objs/srs. +Using local file gperf.srs.gmp.0001.heap. +Total: 0.1 MB + 0.0 31.5% 31.5% 0.0 49.1% SrsConfDirective::parse_conf + 0.0 28.4% 59.9% 0.0 28.4% std::basic_string::_Rep::_S_create + 0.0 27.4% 87.3% 0.0 27.4% _st_epoll_init + 0.0 11.7% 99.0% 0.0 11.7% __gnu_cxx::new_allocator::allocate + 0.0 0.4% 99.5% 0.0 27.9% st_init +``` + +## GCP-CPU性能分析 + +> SRS有例子说明如何集成和调用gmc,参考:[research/gperftools/cpu-profiler](https://github.com/ossrs/srs/tree/4.0release/trunk/research/gperftools/cpu-profiler) + +> 本文只描述了GPERF工具的用法,关于完整的性能分析方法,参考 [SRS性能(CPU)、内存优化工具用法](https://www.jianshu.com/p/6d4a89359352) 或 [CSDN](https://winlin.blog.csdn.net/article/details/53503869) + +使用gcp时,需要将tcmalloc编译进去(或者动态链接),具体参考官方文档。 + +SRS开启gcp的方法是: +* 配置时加上gmc:`./configure --with-gperf --with-gcp` +* 编译srs:`make` +* 正常启动srs就开始内存性能分析:`rm -f gperf.srs.gcp*; ./objs/srs -c conf/console.conf` +* 停止srs,生成gmc分析文件:`CTRL+C` 或者发送SIGINT信号给SRS +* 分析gcp文件:`./objs/pprof --text objs/srs gperf.srs.gcp*` + +备注:make编译SRS成功后,会打印出这些操作命令。 + +性能分析的结果如下: + +```bash +[winlin@dev6 srs]$ ./objs/pprof --text objs/srs gperf.srs.gcp* +Using local file objs/srs. +Using local file gperf.srs.gcp. +Removing _L_unlock_16 from all stack traces. +Total: 20 samples + 8 40.0% 40.0% 8 40.0% 0x00007fff0ea35917 + 4 20.0% 60.0% 4 20.0% __epoll_wait_nocancel + 2 10.0% 70.0% 2 10.0% __read_nocancel + 1 5.0% 95.0% 1 5.0% memset + 1 5.0% 100.0% 1 5.0% tc_delete + 0 0.0% 100.0% 5 25.0% 0x00007f9fad927c4f + 0 0.0% 100.0% 2 10.0% SrsBuffer::ensure_buffer_bytes + 0 0.0% 100.0% 5 25.0% SrsClient::do_cycle + 0 0.0% 100.0% 5 25.0% SrsClient::fmle_publish + 0 0.0% 100.0% 1 5.0% SrsClient::process_publish_message +``` + +## 同时使用 + +可以同时开启: +* gmc和gmp:不支持同时开启。它们使用同一个框架,无法一起运行;参考文档的说明。 +* gmc和gcp:支持同时开启。检测内存泄漏和测试CPU性能瓶颈。 +* gmp和gcp:支持同时开启。检测内存瓶颈和CPU性能瓶颈。 + +备注:SRS的configure脚本会检查是否可以同时开启。 + +Winlin 2014.3 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/gperf) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/gprof.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/gprof.md new file mode 100644 index 00000000..49c6dafd --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/gprof.md @@ -0,0 +1,14 @@ +--- +title: GPROF +sidebar_label: GPROF +hide_title: false +hide_table_of_contents: false +--- + +# GProf + +最新更新,参考[SRS性能(CPU)、内存优化工具用法](https://www.jianshu.com/p/6d4a89359352) + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/gprof) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/hevc.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/hevc.md new file mode 100644 index 00000000..6bfabf21 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/hevc.md @@ -0,0 +1,329 @@ +--- +title: HEVC +sidebar_label: HEVC +hide_title: false +hide_table_of_contents: false +--- + +# HEVC + +HEVC,也就是H.265,是H.264的下一代编码,和AV1属于统一代的编解码器,H.265大概比H.264能节约一半的带宽,或者同等带宽下能提升 +一倍的清晰度和画质。 + +当然,H.265的问题在于支持的客户端还不够广泛,几乎所有的设备都支持H.264,包括很差性能的手机或者盒子,对于H.264的支持都是有专门 +的芯片,然而H.265虽然经过了差不多十年的发展,支持的设备还不够多。 在特定场景下,比如设备端明确支持H.265时,可以选择H.265, +否则还是选择H.264。 + +此外,传输协议对于H.265的支持也在逐步完善,但是还并非所有的协议都支持。MEPG-TS是最早支持H.265的,当然SRT和HLS是基于TS的协议, +所以也支持了;RTMP和HTTP-FLV,直到2023.03,终于[Enhanced RTMP](https://github.com/veovera/enhanced-rtmp)项目建立, +开始支持了HEVC和AV1;而WebRTC目前只有Safari支持了,据说Chrome还在开发中。 + +SRS 6.0正式支持了H.265的能力,若需要使用H.265功能,请切换到SRS 6.0版本。研发的详细过程请参考[#465](https://github.com/ossrs/srs/issues/465)。 + +## Overview + +SRS 支持 H.265(或 HEVC)的架构: + +```text +FFmpeg --RTMP(h.265)---> SRS ----RTMP/FLV/TS/HLS/WebRTC(h.265)--> Chrome/Safari +``` + +对于直播流: + +* [Chrome 105+](https://caniuse.com/?search=HEVC) 默认支持 HEVC,参见[这篇文章](https://zhuanlan.zhihu.com/p/541082191)。 + * 你可以通过 H5 视频直接播放 mp4,或者通过 MSE 播放 HTTP-FLV/HTTP-TS/HLS 等。 + * 请使用 [mpegts.js](https://github.com/xqq/mpegts.js) 播放带有 HEVC 的 HTTP-TS。 + * mpegts.js 计划支持带有 HEVC 的 HTTP-FLV,参见 [mpegts.js#64](https://github.com/xqq/mpegts.js/issues/64) +* [OBS 29+](https://github.com/obsproject/obs-studio/releases/tag/29.1.3) 支持 HEVC RTMP。 +* FFmpeg 或 ffplay 支持 libx265 + * FFmpeg 6支持HEVC RTMP,参考[637c761b](https://github.com/FFmpeg/FFmpeg/commit/637c761be1bf9c3e1f0f347c5c3a390d7c32b282)。 + * FFmpeg 4或5,需要一些补丁来支持 RTMP/FLV 上的 HEVC,参见下面的**[FFmpeg 工具](#ffmpeg-tools)**。 +* SRS 也支持 HEVC。 + * SRS 6.0已经支持了HEVC。 + * 原始的 HEVC 支持是由 [runner365](https://github.com/runner365) 在 [srs-gb28181/feature/h265](https://github.com/ossrs/srs-gb28181/commits/feature/h265) 中提供的。 + +> Note:要检查您的 Chrome 是否支持 HEVC,请打开 `chrome://gpu` 并搜索 `hevc`。 + +对于 WebRTC: + +* Chrome 目前(2022.11)不支持 HEVC,但支持 AV1,请参见 [#2324](https://github.com/ossrs/srs/pull/2324) +* Safari 支持 HEVC,如果用户启用它,请参见[本节](#safari-webrtc) +* SRS 也只支持 AV1,因为 Chrome 尚未支持 HEVC。 + +## Usage + +请确保您的SRS版本为`6.0.4+`,并使用h265构建: + +```bash +docker run --rm -it -p 1935:1935 -p 8080:8080 registry.cn-hangzhou.aliyuncs.com/ossrs/srs:6 \ + ./objs/srs -c conf/hevc.flv.conf +``` + +> Note:除了环境变量,您还可以使用`conf/hevc.flv.conf`或`conf/hevc.ts.conf`配置文件。 +> Note:建议使用`conf/hevc.ts.conf`,因为TS对于HEVC更好。 + +构建并修补FFmpeg,请参见[FFmpeg工具](#ffmpeg-tools): + +```bash +# 对于macOS +docker run --rm -it registry.cn-hangzhou.aliyuncs.com/ossrs/srs:encoder \ + ffmpeg -stream_loop -1 -re -i doc/source.flv \ + -acodec copy -vcodec libx265 -f flv rtmp://host.docker.internal/live/livestream + +# 对于linux +docker run --net=host --rm -it registry.cn-hangzhou.aliyuncs.com/ossrs/srs:encoder \ + ffmpeg -stream_loop -1 -re -i doc/source.flv \ + -acodec copy -vcodec libx265 -f flv rtmp://127.0.0.1/live/livestream +``` + +> Note:请将IP`host.docker.internal`更改为您的SRS的IP。 + +通过以下方式播放HEVC直播流: + +* HTTP-FLV(通过H5):[http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?autostart=true) +* HLS(通过VLC或fflay):`http://localhost:8080/live/livestream.m3u8` + +> Note:请通过`SRS_VHOST_DASH_ENABLED=on`启用MPEG-DASH,然后使用VLC/ffplay播放流`http://localhost:8080/live/livestream.mpd` + +> Note:请通过`SRS_VHOST_HTTP_REMUX_MOUNT=[vhost]/[app]/[stream].ts`启用HTTP-TS,然后使用H5/VLC/ffplay播放流`http://localhost:8080/live/livestream.ts` + +> Note:如果要将直播流转换为MP4文件,请通过`SRS_VHOST_DVR_ENABLED=on SRS_VHOST_DVR_DVR_PATH=./objs/nginx/html/[app]/[stream].[timestamp].mp4`启用DVR MP4。 + +> Note:关于HEVC可用协议和工具的详细信息,请参见[SRS中的HEVC状态](#status-of-hevc-in-srs)。 + +> Note:H5播放器使用[mpegts.js](https://github.com/xqq/mpegts.js)。 + +## Status of HEVC in SRS + +The status of protocols and HEVC: + +* [x] PUSH HEVC over RTMP by FFmpeg. [v6.0.2](https://github.com/ossrs/srs/commit/178e40a5fc3cf0856ace914ae61696a73007f5bf) +* [x] PUSH HEVC over SRT by FFmpeg. [v6.0.20](https://github.com/ossrs/srs/pull/3366) +* [x] PUSH HEVC over RTMP by OBS. [#3464](https://github.com/ossrs/srs/issues/3464) https://github.com/obsproject/obs-studio/pull/8522 +* [x] PUSH HEVC over SRT by OBS. [v6.0.20](https://github.com/ossrs/srs/pull/3366) +* [x] PUSH HEVC over GB28181. [v6.0.25](https://github.com/ossrs/srs/pull/3408) +* [x] PULL HEVC over RTMP by FFmpeg, with [patch](#ffmpeg-tools) for FFmpeg. [v6.0.2](https://github.com/ossrs/srs/commit/178e40a5fc3cf0856ace914ae61696a73007f5bf) +* [x] PULL HEVC over HTTP-FLV by FFmpeg, with [patch](#ffmpeg-tools) for FFmpeg. [v6.0.2](https://github.com/ossrs/srs/commit/178e40a5fc3cf0856ace914ae61696a73007f5bf) +* [x] PULL HEVC over HTTP-TS by FFmpeg [v6.0.4](https://github.com/ossrs/srs/commit/70d5618979e5c8dc41b7cd87c78db7ca2b8a10e8) +* [x] PULL HEVC over HLS by FFmpeg [v6.0.11](https://github.com/ossrs/srs/commit/fff8d9863c3fba769b01782428257edf40f80a12) +* [x] PULL HEVC over MPEG-DASH by FFmpeg [v6.0.14](https://github.com/ossrs/srs/commit/edba2c25f13c0fa915bd8e8093a4005df6077858) +* [x] PULL HEVC over SRT by FFmpeg. [v6.0.20](https://github.com/ossrs/srs/pull/3366) +* [x] PUSH HEVC over WebRTC by Safari. [v6.0.34](https://github.com/ossrs/srs/pull/3441) +* [x] PULL HEVC over WebRTC by Safari. [v6.0.34](https://github.com/ossrs/srs/pull/3441) +* [ ] PUSH HEVC over WebRTC by Chrome/Firefox +* [ ] PULL HEVC over WebRTC by Chrome/Firefox +* [x] Play HEVC over HTTP-TS by [mpegts.js](https://github.com/xqq/mpegts.js), by Chrome 105+ MSE, **NO WASM**. [v6.0.1](https://github.com/ossrs/srs/commit/7e02d972ea74faad9f4f96ae881d5ece0b89f33b) +* [x] Play pure video(no audio) HEVC over HTTP-TS by [mpegts.js](https://github.com/xqq/mpegts.js). [v6.0.9](https://github.com/ossrs/srs/commit/d5bf0ba2da30698e18700b210d2b12eed5b21d29) +* [x] Play HEVC over HTTP-FLV by [mpegts.js](https://github.com/xqq/mpegts.js), by Chrome 105+ MSE, **NO WASM**. [v6.0.1](https://github.com/ossrs/srs/commit/7e02d972ea74faad9f4f96ae881d5ece0b89f33b) +* [ ] Play HEVC over HLS by [hls.js](https://github.com/video-dev/hls.js) +* [ ] Play HEVC over MPEG-DASH by [dash.js](https://github.com/Dash-Industry-Forum/dash.js) +* [x] Play HEVC over HTTP-TS by ffplay, by offical release. [v6.0.4](https://github.com/ossrs/srs/commit/70d5618979e5c8dc41b7cd87c78db7ca2b8a10e8) +* [x] PULL HEVC over RTMP by ffplay, with [patch](#ffmpeg-tools) for FFmpeg. [v6.0.2](https://github.com/ossrs/srs/commit/178e40a5fc3cf0856ace914ae61696a73007f5bf) +* [x] Play HEVC over HTTP-FLV by ffplay, with [patch](#ffmpeg-tools) for FFmpeg. [v6.0.2](https://github.com/ossrs/srs/commit/178e40a5fc3cf0856ace914ae61696a73007f5bf) +* [x] Play pure video(no audio) HEVC by ffplay. +* [x] Play HEVC over HLS by ffplay. [v6.0.11](https://github.com/ossrs/srs/commit/fff8d9863c3fba769b01782428257edf40f80a12) +* [x] Play HEVC over MPEG-DASH by ffplay. [v6.0.14](https://github.com/ossrs/srs/commit/edba2c25f13c0fa915bd8e8093a4005df6077858) +* [x] Play HEVC over SRT by ffplay. [v6.0.20](https://github.com/ossrs/srs/pull/3366) +* [x] Play HEVC over HTTP-TS by VLC, by official release. [v6.0.4](https://github.com/ossrs/srs/commit/70d5618979e5c8dc41b7cd87c78db7ca2b8a10e8) +* [x] Play HEVC over SRT by VLC, by official. [v6.0.20](https://github.com/ossrs/srs/pull/3366) +* [x] Play pure video(no audio) HEVC by VLC. +* [ ] Play HEVC over RTMP by VLC. +* [ ] Play HEVC over HTTP-FLV by VLC. +* [x] Play HEVC over HLS by VLC. [v6.0.11](https://github.com/ossrs/srs/commit/fff8d9863c3fba769b01782428257edf40f80a12) +* [x] Play HEVC over MPEG-DASH by VLC. [v6.0.14](https://github.com/ossrs/srs/commit/edba2c25f13c0fa915bd8e8093a4005df6077858) +* [x] DVR HEVC to MP4/FLV file. [v6.0.14](https://github.com/ossrs/srs/commit/edba2c25f13c0fa915bd8e8093a4005df6077858) +* [x] HTTP API contains HEVC metadata. +* [ ] HTTP Callback takes HEVC metadata. +* [ ] Prometheus Exporter supports HEVC metadata. +* [ ] Improve coverage for HEVC. +* [x] Add regression/blackbox tests for HEVC. +* [ ] Supports benchmark for HEVC by [srs-bench](https://github.com/ossrs/srs-bench). +* [x] Support patched FFmpeg for SRS dockers: [CentOS7](https://github.com/ossrs/dev-docker/commit/0691d016adfe521f77350728d15cead8086d527d), [Ubuntu20](https://github.com/ossrs/dev-docker/commit/0e36323d15544ffe2901d10cfd255d9ef08fb250) and [Encoder](https://github.com/ossrs/dev-docker/commit/782bb31039653f562e0765a0c057d9f9babd1d1f). +* [x] Update [WordPress plugin SrsPlayer](https://github.com/ossrs/WordPress-Plugin-SrsPlayer) for HEVC. +* [ ] Update [srs-cloud](https://github.com/ossrs/srs-cloud) for HEVC. +* [ ] Edge server supports publish HEVC stream to origin. +* [ ] Edge server supprots play HEVC stream from origin. +* [ ] [HEVC: Error empty SPS/PPS when coverting RTMP to HEVC.](https://github.com/ossrs/srs/issues/3407) + +> Note: We're merging HEVC support to SRS 6.0, the original supports for HEVC is [srs-gb28181/feature/h265](https://github.com/ossrs/srs-gb28181/commits/feature/h265) by [runner365](https://github.com/runner365) + +## FFmpeg Tools + +镜像 `ossrs/srs:encoder` 或 `ossrs/srs:6` 中的 FFmpeg 是使用 libx265 构建的,并且支持 RTMP 上的 HEVC。因此,您可以直接使用: + +```bash +docker run --rm -it --net host \ + registry.cn-hangzhou.aliyuncs.com/ossrs/srs:encoder \ + ffmpeg -re -i doc/source.flv -acodec copy -vcodec libx265 \ + -f flv rtmp://localhost/live/livestream +``` + +如果您想从代码构建,请阅读以下说明。在构建 FFmpeg 之前,我们必须先构建 [libx264](https://www.videolan.org/developers/x264.html): + +```bash +git clone https://code.videolan.org/videolan/x264.git ~/git/x264 +cd ~/git/x264 +./configure --prefix=$(pwd)/build --disable-asm --disable-cli --disable-shared --enable-static +make -j10 +make install +``` + +然后是编译 [libx265](https://www.videolan.org/developers/x265.html): + +```bash +git clone https://bitbucket.org/multicoreware/x265_git.git ~/git/x265_git +cd ~/git/x265_git/build/linux +cmake -DCMAKE_INSTALL_PREFIX=$(pwd)/build -DENABLE_SHARED=OFF ../../source +make -j10 +make install +``` + +请注意,FFmpeg 6.0 在以下提交之前不支持 RTMP 上的 HEVC [637c761b](https://github.com/FFmpeg/FFmpeg/commit/637c761be1bf9c3e1f0f347c5c3a390d7c32b282): + +``` +commit 637c761be1bf9c3e1f0f347c5c3a390d7c32b282 +Author: Steven Liu +Date: Mon Aug 28 09:59:24 2023 +0800 + + avformat/rtmpproto: support enhanced rtmp + + add option named rtmp_enhanced_codec, + it would support hvc1,av01,vp09 now, + the fourcc is using Array of strings. + + Signed-off-by: Steven Liu +``` + +因此,如果您使用的是 FFmpeg 6,您可以通过以下命令直接构建 FFmpeg,无需任何补丁: + +```bash +git clone -b master https://github.com/FFmpeg/FFmpeg.git ~/git/FFmpeg +cd ~/git/FFmpeg +env PKG_CONFIG_PATH=~/git/x264/build/lib/pkgconfig:~/git/x265_git/build/linux/build/lib/pkgconfig \ +./configure \ + --prefix=$(pwd)/build \ + --enable-gpl --enable-nonfree --enable-pthreads --extra-libs=-lpthread \ + --disable-asm --disable-x86asm --disable-inline-asm \ + --enable-decoder=aac --enable-decoder=aac_fixed --enable-decoder=aac_latm --enable-encoder=aac \ + --enable-libx264 --enable-libx265 \ + --pkg-config-flags='--static' +make -j10 +``` + +推送HEVC over RTMP 到 SRS: + +```bash +./ffmpeg -stream_loop -1 -re -i ~/srs/doc/source.flv -acodec copy -vcodec libx265 \ + -f flv rtmp://localhost/live/livestream +``` + +通过 ffplay 播放 HEVC over RTMP: + +```bash +./ffplay rtmp://localhost/live/livestream +``` + +它就像魔术一样奏效! + +如果您想在 FFmpeg 4.1 或 5.1 中使用 HEVC over RTM,请阅读以下说明。请下载 FFmepg 并切换到 5.1: + +> Note: The [specfication](https://github.com/ksvc/FFmpeg/wiki) and [usage](https://github.com/ksvc/FFmpeg/wiki/hevcpush) +to support HEVC over RTMP or FLV. There is a [patch for FFmpeg 4.1/5.1/6.0](https://github.com/runner365/ffmpeg_rtmp_h265) +from [runner365](https://github.com/runner365) for FFmpeg to support HEVC over RTMP or FLV. There is also a +[patch](https://github.com/VCDP/CDN/blob/master/FFmpeg_patches/0001-Add-SVT-HEVC-FLV-support-on-FFmpeg.patch) +from Intel for this feature. + +```bash +git clone -b n5.1.2 https://github.com/FFmpeg/FFmpeg.git ~/git/FFmpeg +``` + +Then, patch for [HEVC over RTMP/FLV](https://github.com/runner365/ffmpeg_rtmp_h265): + +```bash +git clone -b 5.1 https://github.com/runner365/ffmpeg_rtmp_h265.git ~/git/ffmpeg_rtmp_h265 +cp ~/git/ffmpeg_rtmp_h265/flv.h ~/git/FFmpeg/libavformat/ +cp ~/git/ffmpeg_rtmp_h265/flv*.c ~/git/FFmpeg/libavformat/ +``` + +最后,请参考之前的操作方法编译FFmpeg即可。 + +## MSE for HEVC + +[MSE](https://caniuse.com/?search=mse) is a base technology for [mpegts.js](https://github.com/xqq/mpegts.js), [hls.js](https://github.com/video-dev/hls.js/) and [dash.js](https://github.com/Dash-Industry-Forum/dash.js). + +Now [Chrome 105+](https://caniuse.com/?search=HEVC) supports HEVC by default, see [this post](https://zhuanlan.zhihu.com/p/541082191), which means, MSE(Chrome 105+) is available for HEVC. + +You can verify this feature, by generating a HEVC mp4 file: + +```bash +ffmpeg -i ~/git/srs/trunk/doc/source.flv -acodec copy \ + -vcodec libx265 -y source.hevc.mp4 +``` + +> Note: Please make sure your FFmpeg is 5.0 and libx265 is enabled. + +Open `source.hevc.mp4` in Chrome 105+ directly, it should works. + +You can also move the file to SRS webserver: + +```bash +mkdir -p ~/git/srs/trunk/objs/nginx/html/vod/ +mv source.hevc.mp4 ~/git/srs/trunk/objs/nginx/html/vod +``` + +Then open by [srs-player](http://localhost:8080/players/srs_player.html?app=vod&stream=source.hevc.mp4&autostart=true) + +## Safari WebRTC + +Safari supports WebRTC, if you enable it by: + +* English version: `Develop > Experimental Features > WebRTC H265 codec` +* Chinese version: `Development > Experimental Features > WebRTC H265 codec` + +Then open the url in safari, to publish or play WebRTC stream: + +* Play [http://localhost:1985/rtc/v1/whep/?app=live&stream=livestream&codec=hevc](http://localhost:8080/players/whep.html?autostart=true&codec=hevc) +* Publish [http://localhost:1985/rtc/v1/whip/?app=live&stream=livestream&codec=hevc](http://localhost:8080/players/whip.html?autostart=true&codec=hevc) + +Please follow other section to publish HEVC stream. + +## Thanks for Contributors + +There is a list of commits and contributors about HEVC in SRS: + +* [H265: For #1747, Support HEVC/H.265 in SRT/RTMP/HLS.](https://github.com/ossrs/srs-gb28181/commit/3ca11071b45495e82d2d6958e5d0f7eab05e71e5) +* [H265: For #1747, Fix build fail bug for H.265](https://github.com/ossrs/srs-gb28181/commit/e355f3c37228f3602c88fed68e8fe5e6ba1153ea) +* [H265: For #1747, GB28181 support h.265 (#2037)](https://github.com/ossrs/srs-gb28181/commit/b846217bc7f94034b33bdf918dc3a49fb17947e0) +* [H265: fix some important bugs (#2156)](https://github.com/ossrs/srs-gb28181/commit/26218965dd083d13173af6eb31fcdf9868b753c6) +* [H265: Deliver the right hevc nalu and dump the wrong nalu. (#2447)](https://github.com/ossrs/srs-gb28181/commit/a13b9b54938a14796abb9011e7a8ee779439a452) +* [H265: Fix multi nal hevc frame demux fail. #2494](https://github.com/ossrs/srs-gb28181/commit/6c5e6090d7c82eb37530e109c230cabaedf948e1) +* [H265: Fix build error #2657 #2664](https://github.com/ossrs/srs-gb28181/commit/eac99e19fba6063279b9e47272523014f5e3334a) +* [H265: Update mpegts demux in srt. #2678](https://github.com/ossrs/srs-gb28181/commit/391c1426fc484c990e4324a4ae2f0de900074578) +* [H265: Fix the stat issue for h265. (#1949)](https://github.com/ossrs/srs-gb28181/commit/b4486e3b51281b4c227b2cc4f58d2b06db599ce0) +* [H265: Add h265 codec written support for MP4 format. (#2697)](https://github.com/ossrs/srs-gb28181/commit/3175d7e26730a04b27724e55dc95ef86c1f2886e) +* [H265: Add h265 for SRT.](https://github.com/runner365/srs/commit/0fa86e4f23847e8a46e3d0e91e0acd2c27047e11) + +We will merge some of these commits to SRS 6.0, but not all commits. + +* [PULL HEVC over WebRTC by Safari. v6.0.34](https://github.com/ossrs/srs/pull/3441) +* [GB: Support H.265 for GB28181. v6.0.25 (#3408)](https://github.com/ossrs/srs/pull/3408) +* [H265: Support HEVC over SRT. v6.0.20 (#465) (#3366)](https://github.com/ossrs/srs/pull/3366) +* [H265: Support DVR HEVC stream to MP4. v6.0.14](https://github.com/ossrs/srs/pull/3360) +* HLS: Support HEVC over HLS. v6.0.11 +* [HEVC: The codec information is incorrect. v6.0.5](https://github.com/ossrs/srs/issues/3271) +* FFmpeg support libx265 and HEVC over RTMP/FLV: [CentOS7](https://github.com/ossrs/dev-docker/commit/0691d016adfe521f77350728d15cead8086d527d), [Ubuntu20](https://github.com/ossrs/dev-docker/commit/0e36323d15544ffe2901d10cfd255d9ef08fb250) and [Encoder](https://github.com/ossrs/dev-docker/commit/782bb31039653f562e0765a0c057d9f9babd1d1f). +* [H265: Support HEVC over HTTP-TS. v6.0.4](https://github.com/ossrs/srs/commit/70d5618979e5c8dc41b7cd87c78db7ca2b8a10e8) +* [H265: Support parse multiple NALUs in a frame. v6.0.3](https://github.com/ossrs/srs/commit/f316e9a0de3a892d25f2d8e7efd28ee9334f5bd6) +* [H265: Support HEVC over RTMP or HTTP-FLV. v6.0.2](https://github.com/ossrs/srs/commit/178e40a5fc3cf0856ace914ae61696a73007f5bf) +* [H265: Update mpegts.js to play HEVC over HTTP-TS/FLV. v6.0.1](https://github.com/ossrs/srs/commit/7e02d972ea74faad9f4f96ae881d5ece0b89f33b) + +## Known Issues + +1. HEVC over Safari WebRTC, only support WebRTC to WebRTC, doesn't support converting to RTMP. +2. Chrome/Firefox does not support HEVC, no any plan as I know. +3. Almost all browsers supports MSE, except iOS. HEVC over MSE requires hardware decoder. +4. Apart from mpegts.js, other H5 players such as hls.js/dash.js doesn't support HEVC. + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/hevc) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/hls.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/hls.md new file mode 100644 index 00000000..e0355d2c --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/hls.md @@ -0,0 +1,506 @@ +--- +title: HLS +sidebar_label: HLS +hide_title: false +hide_table_of_contents: false +--- + +# HLS + +HLS是适配性和兼容性最好的流媒体协议,没有之一。这个世界上几乎所有的设备都能支持HLS协议,包括PC、Android、iOS、OTT、SmartTV等等。 +各种各样的浏览器对HLS的支持也很好,包括Chrome、Safari、Firefox、Edge等等,包括移动端的浏览器。 + +如果你的用户群体是多种多样的,特别是设备性能还不太好,那么HLS是最好的选择。如果你希望兼容更多的设备,那么HLS是最好的选择。 +如果你希望在任何一个CDN都能分发你的直播流,在全球范围内分发你的直播流,那么HLS是最好的选择。 + +当然了,HLS并不是没有毛病,它的问题就是延迟比较高,一般在30秒左右。虽然经过优化可以到8秒左右,但是不同播放器的行为可能不一致。 +对比起其他流媒体协议,优化后的延迟也很高。因此如果你特别在意直播的延迟,那么请使用[RTMP](./rtmp.md)或者 +[HTTP-FLV](./flv.md)协议。 + +HLS主要的应用场景包括: +* 跨平台:PC主要的直播方案是HLS,可用hls.js库播放HLS。所以实际上如果选一种协议能跨PC/Android/IOS,那就是HLS。 +* iOS上苛刻的稳定性要求:iOS上最稳定的当然是HLS,稳定性不差于RTMP和HTTP-FLV的稳定性。 +* 友好的CDN分发方式:HLS分发的基础是HTTP,所以CDN的接入和分发会比RTMP更加完善。HLS能在各种CDN之间切换。 +* 简单问题少:HLS作为流媒体协议非常简单,apple支持得也很完善。Android对HLS的支持也会越来越完善。 + +HLS协议是SRS的核心协议,将会持续维护和更新,不断完善对HLS协议的支持。SRS将RTMP、SRT或WebRTC流,转换成HLS流。 +特别是WebRTC,SRS实现了音频转码的能力。 + +## Usage + +SRS内置HLS的支持,可以用[docker](./getting-started.md)或者[从源码编译](./getting-started-build.md): + +```bash +docker run --rm -it -p 1935:1935 -p 8080:8080 registry.cn-hangzhou.aliyuncs.com/ossrs/srs:5 \ + ./objs/srs -c conf/hls.conf +``` + +使用 [FFmpeg(点击下载)](https://ffmpeg.org/download.html) 或 [OBS(点击下载)](https://obsproject.com/download) 推流: + +```bash +ffmpeg -re -i ./doc/source.flv -c copy -f flv rtmp://localhost/live/livestream +``` + +打开下面的页面播放流(若SRS不在本机,请将localhost更换成服务器IP): + +* HLS by SRS player: [http://localhost:8080/live/livestream.m3u8](http://localhost:8080/players/srs_player.html?stream=livestream.m3u8) + +> Note: 请等待大约10秒左右,再播放流,否则会播放失败,因为生成第一个切片需要一些时间。 + +## Config + +HLS相关的配置如下: + +```bash +vhost __defaultVhost__ { + hls { + # whether the hls is enabled. + # if off, do not write hls(ts and m3u8) when publish. + # Overwrite by env SRS_VHOST_HLS_ENABLED for all vhosts. + # default: off + enabled on; + + # the hls fragment in seconds, the duration of a piece of ts. + # Overwrite by env SRS_VHOST_HLS_HLS_FRAGMENT for all vhosts. + # default: 10 + hls_fragment 10; + # the hls m3u8 target duration ratio, + # EXT-X-TARGETDURATION = hls_td_ratio * hls_fragment // init + # EXT-X-TARGETDURATION = max(ts_duration, EXT-X-TARGETDURATION) // for each ts + # Overwrite by env SRS_VHOST_HLS_HLS_TD_RATIO for all vhosts. + # default: 1.0 + hls_td_ratio 1.0; + # the audio overflow ratio. + # for pure audio, the duration to reap the segment. + # for example, the hls_fragment is 10s, hls_aof_ratio is 1.2, + # the segment will reap to 12s for pure audio. + # Overwrite by env SRS_VHOST_HLS_HLS_AOF_RATIO for all vhosts. + # default: 1.2 + hls_aof_ratio 1.2; + # the hls window in seconds, the number of ts in m3u8. + # Overwrite by env SRS_VHOST_HLS_HLS_WINDOW for all vhosts. + # default: 60 + hls_window 60; + # the error strategy. can be: + # ignore, disable the hls. + # disconnect, require encoder republish. + # continue, ignore failed try to continue output hls. + # Overwrite by env SRS_VHOST_HLS_HLS_ON_ERROR for all vhosts. + # default: continue + hls_on_error continue; + # the hls output path. + # the m3u8 file is configured by hls_path/hls_m3u8_file, the default is: + # ./objs/nginx/html/[app]/[stream].m3u8 + # the ts file is configured by hls_path/hls_ts_file, the default is: + # ./objs/nginx/html/[app]/[stream]-[seq].ts + # @remark the hls_path is compatible with srs v1 config. + # Overwrite by env SRS_VHOST_HLS_HLS_PATH for all vhosts. + # default: ./objs/nginx/html + hls_path ./objs/nginx/html; + # the hls m3u8 file name. + # we supports some variables to generate the filename. + # [vhost], the vhost of stream. + # [app], the app of stream. + # [stream], the stream name of stream. + # Overwrite by env SRS_VHOST_HLS_HLS_M3U8_FILE for all vhosts. + # default: [app]/[stream].m3u8 + hls_m3u8_file [app]/[stream].m3u8; + # the hls ts file name. + # we supports some variables to generate the filename. + # [vhost], the vhost of stream. + # [app], the app of stream. + # [stream], the stream name of stream. + # [2006], replace this const to current year. + # [01], replace this const to current month. + # [02], replace this const to current date. + # [15], replace this const to current hour. + # [04], replace this const to current minute. + # [05], replace this const to current second. + # [999], replace this const to current millisecond. + # [timestamp],replace this const to current UNIX timestamp in ms. + # [seq], the sequence number of ts. + # [duration], replace this const to current ts duration. + # @see https://ossrs.net/lts/zh-cn/docs/v4/doc/dvr#custom-path + # @see https://ossrs.net/lts/zh-cn/docs/v4/doc/delivery-hls#hls-config + # Overwrite by env SRS_VHOST_HLS_HLS_TS_FILE for all vhosts. + # default: [app]/[stream]-[seq].ts + hls_ts_file [app]/[stream]-[seq].ts; + # the hls entry prefix, which is base url of ts url. + # for example, the prefix is: + # http://your-server/ + # then, the ts path in m3u8 will be like: + # http://your-server/live/livestream-0.ts + # http://your-server/live/livestream-1.ts + # ... + # Overwrite by env SRS_VHOST_HLS_HLS_ENTRY_PREFIX for all vhosts. + # optional, default to empty string. + hls_entry_prefix http://your-server; + # the default audio codec of hls. + # when codec changed, write the PAT/PMT table, but maybe ok util next ts. + # so user can set the default codec for mp3. + # the available audio codec: + # aac, mp3, an + # Overwrite by env SRS_VHOST_HLS_HLS_ACODEC for all vhosts. + # default: aac + hls_acodec aac; + # the default video codec of hls. + # when codec changed, write the PAT/PMT table, but maybe ok util next ts. + # so user can set the default codec for pure audio(without video) to vn. + # the available video codec: + # h264, vn + # Overwrite by env SRS_VHOST_HLS_HLS_VCODEC for all vhosts. + # default: h264 + hls_vcodec h264; + # whether cleanup the old expired ts files. + # Overwrite by env SRS_VHOST_HLS_HLS_CLEANUP for all vhosts. + # default: on + hls_cleanup on; + # If there is no incoming packets, dispose HLS in this timeout in seconds, + # which removes all HLS files including m3u8 and ts files. + # @remark 0 to disable dispose for publisher. + # @remark apply for publisher timeout only, while "etc/init.d/srs stop" always dispose hls. + # Overwrite by env SRS_VHOST_HLS_HLS_DISPOSE for all vhosts. + # default: 120 + hls_dispose 120; + # whether wait keyframe to reap segment, + # if off, reap segment when duration exceed the fragment, + # if on, reap segment when duration exceed and got keyframe. + # Overwrite by env SRS_VHOST_HLS_HLS_WAIT_KEYFRAME for all vhosts. + # default: on + hls_wait_keyframe on; + # whether use floor for the hls_ts_file path generation. + # if on, use floor(timestamp/hls_fragment) as the variable [timestamp], + # and use enhanced algorithm to calc deviation for segment. + # @remark when floor on, recommend the hls_segment>=2*gop. + # Overwrite by env SRS_VHOST_HLS_HLS_TS_FLOOR for all vhosts. + # default: off + hls_ts_floor off; + # the max size to notify hls, + # to read max bytes from ts of specified cdn network, + # @remark only used when on_hls_notify is config. + # Overwrite by env SRS_VHOST_HLS_HLS_NB_NOTIFY for all vhosts. + # default: 64 + hls_nb_notify 64; + + # Whether enable hls_ctx for HLS streaming, for which we create a "fake" connection for HTTP API and callback. + # For each HLS streaming session, we use a child m3u8 with a session identified by query "hls_ctx", it simply + # work as the session id. + # Once the HLS streaming session is created, we will cleanup it when timeout in 2*hls_window seconds. So it + # takes a long time period to identify the timeout. + # Now we got a HLS stremaing session, just like RTMP/WebRTC/HTTP-FLV streaming, we're able to stat the session + # as a "fake" connection, do HTTP callback when start playing the HLS streaming. You're able to do querying and + # authentication. + # Note that it will make NGINX edge cache always missed, so never enable HLS streaming if use NGINX edges. + # Overwrite by env SRS_VHOST_HLS_HLS_CTX for all vhosts. + # Default: on + hls_ctx on; + # For HLS pseudo streaming, whether enable the session for each TS segment. + # If enabled, SRS HTTP API will show the statistics about HLS streaming bandwidth, both m3u8 and ts file. Please + # note that it also consumes resource, because each ts file should be served by SRS, all NGINX cache will be + # missed because we add session id to each ts file. + # Note that it will make NGINX edge cache always missed, so never enable HLS streaming if use NGINX edges. + # Overwrite by env SRS_VHOST_HLS_HLS_TS_CTX for all vhosts. + # Default: on + hls_ts_ctx on; + + # whether using AES encryption. + # Overwrite by env SRS_VHOST_HLS_HLS_KEYS for all vhosts. + # default: off + hls_keys on; + # the number of clear ts which one key can encrypt. + # Overwrite by env SRS_VHOST_HLS_HLS_FRAGMENTS_PER_KEY for all vhosts. + # default: 5 + hls_fragments_per_key 5; + # the hls key file name. + # we supports some variables to generate the filename. + # [vhost], the vhost of stream. + # [app], the app of stream. + # [stream], the stream name of stream. + # [seq], the sequence number of key corresponding to the ts. + # Overwrite by env SRS_VHOST_HLS_HLS_KEY_FILE for all vhosts. + hls_key_file [app]/[stream]-[seq].key; + # the key output path. + # the key file is configed by hls_path/hls_key_file, the default is: + # ./objs/nginx/html/[app]/[stream]-[seq].key + # Overwrite by env SRS_VHOST_HLS_HLS_KEY_FILE_PATH for all vhosts. + hls_key_file_path ./objs/nginx/html; + # the key root URL, use this can support https. + # @remark It's optional. + # Overwrite by env SRS_VHOST_HLS_HLS_KEY_URL for all vhosts. + hls_key_url https://localhost:8080; + + # Special control controls. + ########################################### + # Whether calculate the DTS of audio frame directly. + # If on, guess the specific DTS by AAC samples, please read https://github.com/ossrs/srs/issues/547#issuecomment-294350544 + # If off, directly turn the FLV timestamp to DTS, which might cause corrupt audio stream. + # @remark Recommend to set to off, unless your audio stream sample-rate and timestamp is not correct. + # Overwrite by env SRS_VHOST_HLS_HLS_DTS_DIRECTLY for all vhosts. + # Default: on + hls_dts_directly on; + + # on_hls, never config in here, should config in http_hooks. + # for the hls http callback, @see http_hooks.on_hls of vhost hooks.callback.srs.com + # @see https://ossrs.net/lts/zh-cn/docs/v4/doc/delivery-hls#http-callback + # @see https://ossrs.io/lts/en-us/docs/v4/doc/delivery-hls#http-callback + + # on_hls_notify, never config in here, should config in http_hooks. + # we support the variables to generate the notify url: + # [app], replace with the app. + # [stream], replace with the stream. + # [param], replace with the param. + # [ts_url], replace with the ts url. + # for the hls http callback, @see http_hooks.on_hls_notify of vhost hooks.callback.srs.com + # @see https://ossrs.net/lts/zh-cn/docs/v4/doc/delivery-hls#on-hls-notify + # @see https://ossrs.io/lts/en-us/docs/v4/doc/delivery-hls#on-hls-notify + } +} +``` + +> Note: 这些配置只是播放HLS相关的配置,推流的配置请根据你的协议,比如参考[RTMP](./rtmp.md#config)或者[SRT](./srt.md#config)或者[WebRTC](./webrtc.md#config)的推流配置。 + +主要配置项如下: +* enabled:是否开启HLS,on/off,默认off。 +* hls_fragment:秒,指定ts切片的最小长度。实际上ts文件的长度请参考[HLS TS Duration](#hls-ts-duration)的详细说明。 +* hls_td_ratio:正常切片时长倍数。实际上ts文件的长度请参考[HLS TS Duration](#hls-ts-duration)的详细说明。 +* hls_wait_keyframe: 是否按top切片,即等待到关键帧后开始切片。实际上ts文件的长度请参考[HLS TS Duration](#hls-ts-duration)的详细说明。 +* hls_aof_ratio: 纯音频切片时长倍数。纯音频时,当ts时长超过配置的ls_fragment乘以这个系数时就切割文件。实际上ts文件的长度请参考[HLS TS Duration](#hls-ts-duration)的详细说明。 +* hls_window:秒,指定HLS窗口大小,即m3u8中ts文件的时长之和,决定了m3u8中ts文件数量,详细参考[HLS TS Files](#hls-ts-files)。 +* hls_path:HLS的m3u8和ts文件保存的路径。m3u8和ts文件都保存在这个目录中。 +* hls_m3u8_file: HLS的m3u8文件名,包含可替换的`[vhost]`,`[app]`和`[stream]`变量。 +* hls_ts_file: HLS的ts文件名,包含可替换的一系列变量,参考[dvr variables](./dvr.md#custom-path),另外,`[seq]`是ts的seqence number。 +* hls_entry_prefix: TS的base url。可选默认为空字符串;非空时加在ts前面作为base url。 +* hls_acodec: 默认的音频编码。当流的编码改变时,会更新PMT/PAT信息;默认是aac,因此默认的PMT/PAT信息是aac;如果流是mp3,那么可以配置这个参数为mp3,避免PMT/PAT改变。 +* hls_vcodec: 默认的视频编码。当流的编码改变时,会更新PMT/PAT信息;默认是h264。如果是纯音频HLS,可以配置为vn,可以减少SRS检测纯音频的时间,直接进入纯音频模式。 +* hls_cleanup: 是否删除过期的ts切片,不在hls_window中就是过期。可以关闭清除ts切片,实现时移和存储,使用自己的切片管理系统。 +* hls_dispose: 在没有流时,HLS清理的过期时间(秒),系统重启或者超过这个时间时,清理HLS的所有文件,包括m3u8和ts。若配置为0,则不清理。 +* hls_nb_notify: 从notify服务器读取数据的长度。 +* on_hls: 当切片生成时,回调这个url,使用POST回调。用来和自己的系统集成,譬如实现切片移动等。 +* on_hls_notify: 当切片生成时,回调这个url,使用GET回调。用来和系统集成,可以使用`[ts_url]`变量,实现预分发(即下载一次ts片)。 + +## HLS TS Duration + +HLS的TS切片时长如何决定的?由配置和流的特征决定的。 + +若有视频,切片时长为`max(hls_fragment*hls_td_ratio, gop_size*N)`,即`hls_fragment`和`gop_size`中的最大值。 +而`gop_size`则是由编码器决定的,比如OBS可以设置GOP大小单位是秒,而FFmpeg则是帧数量结合帧率可以换算成秒。 + +举个例子,若流的帧率是25,GOP是50帧,那么`gop_size`就是2秒: + +* 若`hls_fragment`是10秒,那么最终的TS切片时长就是10秒。 +* 若`hls_fragment`是5秒,那么最终的TS切片时长就是6秒,此时有3个GOP。 +* 若`hls_fragment`是5秒,`hls_td_ratio`是2,那么最终的TS切片时长就是10秒。 + +若配置了`hls_wait_keyframe off`,则不再参考GOP大小,无论GOP多大,假设GOP是10秒: + +* 若`hls_fragment`是10秒,那么最终的TS切片时长就是10秒。 +* 若`hls_fragment`是5秒,那么最终的TS切片时长就是5秒。 +* 若`hls_fragment`是3秒,`hls_td_ratio`是2,那么最终的TS切片时长就是6秒。 + +> Note: 由此可见,关闭`hls_wait_keyframe`后,可以减少切片大小,从而减少延迟,但是由于以非关键帧开头,有些播放器开始播放时可能会有花屏。 + +若无视频,即纯音频HLS,则切片无法根据GOP大小决定,则是根据`hls_fragment*hls_aof_ratio`决定的: + +* 若`hls_fragment`是10秒,`hls_aof_ratio`是1.2,那么最终的TS切片时长就是12秒。 +* 若`hls_fragment`是5秒,`hls_aof_ratio`是1,那么最终的TS切片时长就是5秒。 + +注意若切片时长异常,超过了一定的大小,一般是3倍切片的最大长度,则会直接丢弃。 + +## HLS TS Files + +m3u8中的TS文件数量,由TS的时长和`hls_window`决定的。 当TS的总时长,超过`hls_window`后,丢弃第一个m3u8中的第一个切片, +直到ts的总时长在这个配置项范围之内。 + +即SRS保证下面的公式: + +```bash +hls_window >= sum(m3u8中每个ts的时长) +``` + +举个例子,若`hls_window`是60秒,`hls_fragment`是10秒,若TS实际切片时长是10秒,那么m3u8中的ts文件数量是6个。 +当然TS实际切片时长,可能比`hls_fragment`要大,具体参考[HLS TS Duration](#hls-ts-duration)。 + +## HTTP Callback + +可以配置`on_hls`实现回调,应该在`http_hooks`中配置,而不是在hls中配置。 + +备注:HLS热备可以基于这个回调实现,参考[#351](https://github.com/ossrs/srs/issues/351). + +备注:HLS热备必须保证两个服务器的切片完全一样,因为负载均衡器或者边缘可能从两个服务器取切片,必须完全一样。因此在切片上保证两个服务器切片完全一致,是一个非常非常复杂的流媒体问题;但是通过业务系统和回调,通过选择两个服务器的切片的方式,可以做到非常简单可靠的HLS热备系统。 + +## HLS Authentication + +SRS支持HLS客户端播放和在线人数的统计,默认会开启`hls_ctx`和`hls_ts_ctx`,这样HLS和其他协议一样,可以通过回调实现鉴权播放和数据统计。 +比如在播放HLS时,通过`on_play`回调返回错误,实现拒绝客户端播放的功能。 + +```bash +vhost __defaultVhost__ { + hls { + enabled on; + hls_ctx on; + hls_ts_ctx on; + } +} +``` + +但这个功能会导致HLS在CDN的缓存失效,因为每个播放都会有不同的ctx_id,类似会话ID的功能。因此,在[HLS Cluster](./nginx-for-hls.md) +中必须关闭这两个选项。 + +## HLS Dispose + +若停止推流,HLS由于切片文件依然存在,客户端依然还可以播放,不过播放的是之前的内容。 + +有时候直播中,临时需要停止推流后,更换编码参数或者推流设备,然后重新推流。因此SRS不能在停止推流时就删除HLS的文件。 + +SRS默认是会在`hls_dispose`配置的时间后,再清理HLS的切片文件。这个时间默认是120秒,即2分钟后,清理HLS的切片文件。 + +```bash +vhost __defaultVhost__ { + hls { + enabled on; + hls_dispose 120; + } +} +``` + +若需要更快清理,则可以缩短这个清理时间,但这个配置不能配置太短,建议不要小于`hls_window`,否则可能会在重新推流时, +出现过早清理的情况,导致播放器无法访问到HLS流。 + +## HLS in RAM + +若需要提高HLS的并发数量,可以试用内存直接分发HLS,不写入磁盘。 + +可以挂载内存为磁盘目录,然后将HLS切片写入内存盘: + +```bash +mkdir -p /ramdisk && +mount -o size=7G -t tmpfs none /ramdisk +``` + +> Note: 取消挂载内存盘,可以使用命令`unmount /randisk`即可。 + +> Note: 若流路数不多,需要磁盘空间不大,可以将HLS切片写入`/tmp`目录,`/tmp`默认就是内存盘。 + +然后配置`hls_path`,或者软链接目录即可。 + +## HLS Delivery Cluster + +部署HLS的分发集群,边缘分发集群,实现自建CDN分发HLS,解决海量的观看问题,请参考[Nginx for HLS](./nginx-for-hls.md)。 + +## HLS Low Latency + +如何降低HLS延迟?关键减少切片数量,减少m3u8中的TS文件数量。SRS的默认配置是10秒一个切片,60秒一个m3u8,这样延迟是30秒左右。 +因为有些播放器是从中间位置开始请求切片,也就是第3个切片开始请求,因此会有3个切片的延迟。 + +我们可以调整下面三个配置,可以将延迟降低到6到8秒左右: + +* 减少GOP大小,比如设置OBS的GOP为1秒,或者FFmpeg的GOP为FPS的帧数。 +* 减少编码器的延迟,比如设置OBS为`配置(Profile)`为`baseline`,选择`微调(Tune)`为`zerolatency`。 +* 减少`hls_fragment`,比如设置为2秒,或者1秒。 +* 减少`hls_window`,比如配置为10秒,或者5秒。 +* 使用低延迟播放器,比如hls.js或者ijkplayer或ffplay,不要使用VLC等很高延迟的播放器。 + +参考配置文件`conf/hls.realtime.conf`: + +```bash +vhost __defaultVhost__ { + hls { + enabled on; + hls_fragment 2; + hls_window 10; + } +} +``` + +> Note: 若无法调整编码器的OGP大小,则可以考虑配置`hls_wait_keyframe off`,不参考GOP,但可能会有花屏,请测试你的设备的支持情况。 + +当然,也不能减少得非常少,容易造成播放器缓冲不足,或者播放器网络不佳时跳片,可能会有播放失败。 +延迟越低,卡顿概率越高,HLS的延迟并不能做到5秒之内,特别是考虑CDN和播放器的适配情况。 + +尽管调整后HLS的延迟会降低,但是也不会低于5秒,而且LLHLS协议也不能再降低延迟,因为LLHLS只是尝试解决了开始播放时的GOP的影响, +在上面的配置中,我们通过编码器的配置同样降低了GOP的影响,而网络抖动和播放器策略都是HLS延迟偏高的原因,而且无法解决。 +如果需要5秒之内的延迟,建议使用[HTTP-FLV](./flv.md)或者[SRT](./srt.md)或者[WebRTC](./webrtc.md)等协议。 + +## ON HLS Notify + +可以配置`on_hls_notify`实现CDN预分发,应该在`http_hooks`中配置,而不是在hls中配置。 + +## HLS Audio Corrupt + +HLS可能会有爆音的问题,这是因为AAC的采样率导致在FLV(tbn=1000)和TS(tbn=90000)之间变换时,引入了微小的误差导致的。SRS3使用采样个数来计算精确的时间戳,详细参考[HLS爆音](https://github.com/ossrs/srs/issues/547#issuecomment-294350544)。 + +> 注意:如果需要解决HLS爆音问题,需要手动禁用`hls_dts_directly`(设为off)。 + +燃鹅,SRS3修正后,发现有些音频流本身的时间戳是有问题,导致从AAC采样个数计算出来的时间戳不对,所以提供了配置项`hls_dts_directly`强制使用原始时间戳,参考[HLS强制使用原始时间戳](https://github.com/ossrs/srs/issues/547#issuecomment-563942711)。 + +## HLS Audio Only + +SRS支持分发HLS纯音频流,当RTMP流没有视频,且音频为aac(可以使用转码转为aac,参考[Usage: Transcode2HLS](./sample-transcode-to-hls.md)),SRS只切片音频。 + +若RTMP流中已经有视频和音频,需要支持纯音频HLS流,可以用转码将视频去掉,参考:[转码: 禁用流](./ffmpeg.md#%E7%A6%81%E7%94%A8)。然后分发音频流。 + +分发纯音频流不需要特殊配置,和HLS分发一样。 + +## HLS and Forward + +Forward的流和普通流不做区分,若forward的流所在的VHOST配置了HLS,一样会应用HLS配置进行切片。 + +因此,可以对原始流进行Transcode之后,保证流符合h.264/aac的规范,然后forward到多个配置了HLS的VHOST进行切片。支持多个源站的热备。 + +## HLS and Transcode + +HLS要求RTMP流的编码为h.264+aac/mp3,否则会自动禁用HLS,会出现RTMP流能看HLS流不能看(或者看到的HLS是之前的流)。 + +Transcode将RTMP流转码后,可以让SRS接入任何编码的RTMP流,然后转换成HLS要求的h.264/aac/mp3编码方式。 + +配置Transcode时,若需要控制ts长度,需要[配置ffmpeg编码的gop](http://ffmpeg.org/ffmpeg-codecs.html#Options-7),譬如: +```bash +vhost hls.transcode.vhost.com { + transcode { + enabled on; + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + engine hls { + enabled on; + vfilter { + } + vcodec libx264; + vbitrate 500; + vfps 20; + vwidth 768; + vheight 320; + vthreads 2; + vprofile baseline; + vpreset superfast; + vparams { + g 100; + } + acodec libaacplus; + abitrate 45; + asample_rate 44100; + achannels 2; + aparams { + } + output rtmp://127.0.0.1:[port]/[app]?vhost=[vhost]/[stream]_[engine]; + } + } +} +``` +该FFMPEG转码参数,指定gop时长为100/20=5秒,fps帧率(vfps=20),gop帧数(g=100)。 + +## HLS Multiple Bitrate + +SRS目前不支持HLS自适应码流,因为一般需要将一个码流转码为多个码流,而且需要GOP对齐,可以使用FFmpeg实现, +参考[How to generate multiple resolutions HLS using FFmpeg for live streaming](https://stackoverflow.com/a/71985380/17679565)。 + +## Apple Examples + +Apple的HLS的示例文件: + +https://developer.apple.com/library/ios/technotes/tn2288/_index.html + +## HLS Encryption + +SRS3支持切片加密,具体使用方法参考[#1093](https://github.com/ossrs/srs/issues/1093#issuecomment-415971022)。 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/hls) + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/http-api.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/http-api.md new file mode 100644 index 00000000..9bfed730 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/http-api.md @@ -0,0 +1,563 @@ +--- +title: HTTP API +sidebar_label: HTTP API +hide_title: false +hide_table_of_contents: false +--- + +# HTTP API + +SRS提供HTTP接口,供外部程序管理服务器,并支持跨域(js可以直接控制和获取服务器的各种信息)。 + +通过HTTP API,可以使用[srs-console](http://ossrs.net/console/)连接到你的服务器。 + +工作流如下: + +```text ++-------------------------+ +-------+ ++ Chrome/Your Application +--HTTP-API-->--+ SRS + ++-------------------------+ +-------+ +``` + +您可以使用 Chrome 或您的应用程序,请求 SRS 的 HTTP API 以获取 SRS 的状态。 + +## Goals + +SRS的HTTP接口遵循最简单原则,主要包括: +* 只提供json数据格式接口,要求请求和响应的数据全都是json。 +* [srs-console](https://github.com/ossrs/srs-console)可访问SRS的API,提供管理后台。 +* 发生错误时,支持HTTP错误码,或者json中的code错误码。 + +## Build + +SRS自动打开HTTPApi选项,参考:[configure选项](./install.md) + +```bash +./configure && make +``` + +## Config + +配置文件需要开启http-api: + +```bash +listen 1935; +# system statistics section. +# the main cycle will retrieve the system stat, +# for example, the cpu/mem/network/disk-io data, +# the http api, for instance, /api/v1/summaries will show these data. +# @remark the heartbeat depends on the network, +# for example, the eth0 maybe the device which index is 0. +stats { + # the index of device ip. + # we may retrieve more than one network device. + # default: 0 + network 0; + # the device name to stat the disk iops. + # ignore the device of /proc/diskstats if not configed. + disk sda sdb xvda xvdb; +} +# api of srs. +# the http api config, export for external program to manage srs. +# user can access http api of srs in browser directly, for instance, to access by: +# curl http://192.168.1.170:1985/api/v1/reload +# which will reload srs, like cmd killall -1 srs, but the js can also invoke the http api, +# where the cli can only be used in shell/terminate. +http_api { + # whether http api is enabled. + # default: off + enabled on; + # the http api listen entry is <[ip:]port> + # for example, 192.168.1.100:1985 + # where the ip is optional, default to 0.0.0.0, that is 1985 equals to 0.0.0.0:1985 + # default: 1985 + listen 1985; + # whether enable crossdomain request. + # default: on + crossdomain on; + # the HTTP RAW API is more powerful api to change srs state and reload. + raw_api { + # whether enable the HTTP RAW API. + # Overwrite by env SRS_HTTP_API_RAW_API_ENABLED + # default: off + enabled off; + # whether enable rpc reload. + # Overwrite by env SRS_HTTP_API_RAW_API_ALLOW_RELOAD + # default: off + allow_reload off; + # whether enable rpc query. + # Always off by https://github.com/ossrs/srs/issues/2653 + #allow_query off; + # whether enable rpc update. + # Always off by https://github.com/ossrs/srs/issues/2653 + #allow_update off; + } + # the auth is authentication for http api + auth { + # whether enable the HTTP AUTH. + # Overwrite by env SRS_HTTP_API_AUTH_ENABLED + # default: off + enabled on; + # The username of Basic authentication: + # Overwrite by env SRS_HTTP_API_AUTH_USERNAME + username admin; + # The password of Basic authentication: + # Overwrite by env SRS_HTTP_API_AUTH_PASSWORD + password admin; + } + # For https_api or HTTPS API. + https { + # Whether enable HTTPS API. + # default: off + enabled on; + # The listen endpoint for HTTPS API. + # default: 1986 + listen 1986; + # The SSL private key file, generated by: + # openssl genrsa -out server.key 2048 + # default: ./conf/server.key + key ./conf/server.key; + # The SSL public cert file, generated by: + # openssl req -new -x509 -key server.key -out server.crt -days 3650 -subj "/C=CN/ST=Beijing/L=Beijing/O=Me/OU=Me/CN=ossrs.net" + # default: ./conf/server.crt + cert ./conf/server.crt; + } +} +vhost __defaultVhost__ { +} +``` + +其中,`http_api`开启了HTTP API,`stats`配置了SRS后台统计的信息,包括: + +* network: 这个配置了heartbeat使用的网卡ip,即SRS主动汇报的网卡信息。 +* disk: 这个配置了需要统计的磁盘的IOPS,可以通过`cat /proc/diskstats`命令获得名称,譬如阿里云的磁盘名称叫xvda. + +## Start + +启动服务器:`./objs/srs -c http-api.conf` + +访问api,浏览器打开地址: + +* [http://127.0.0.1:1985/api/v1](http://127.0.0.1:1985/api/v1) +* [https://127.0.0.1:1986/api/v1](https://127.0.0.1:1986/api/v1) + +> 注意:请将`192.168.1.170`或者下面提到的任何服务器IP,换成您的服务器的IP。 + +## Performance + +机器:虚拟机CentOS6-64位,4CPU,T430笔记本,VirtualBox + +10%CPU,10000次请求,27秒,平均370次请求/秒,30毫秒一个请求 + +```bash +top - 09:59:49 up 3 days, 50 min, 4 users, load average: 0.00, 0.00, 0.00 +Tasks: 140 total, 1 running, 139 sleeping, 0 stopped, 0 zombie +Cpu(s): 11.6%us, 20.0%sy, 0.0%ni, 66.7%id, 0.0%wa, 0.0%hi, 1.8%si, 0.0%st +Mem: 2055440k total, 990148k used, 1065292k free, 228544k buffers +Swap: 2064376k total, 0k used, 2064376k free, 486620k cached + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND +29696 winlin 20 0 15872 1592 1360 S 9.3 0.1 0:14.21 ./objs/srs -c console.conf +``` + +```bash +[winlin@dev6 srs]$ time for((i=0;i<10000;i++)); do curl http://127.0.0.1:1985/api >/dev/null 2>&1; done + +real 0m27.375s +user 0m8.223s +sys 0m16.289s +``` + +## Access Api + +直接在浏览器中就可以访问,或者用curl发起http请求。 + +SRS提供了api的面包屑,可以从根目录开始导航,不需要任何记忆。一般的字段包括: +* code表示错误码,按照linux惯例,0表示成功。 +* urls表示是面包屑导航,该api下面的子api(链接)。 +* data表示最后一级提供服务的api,返回的数据。 + +另外,提供服务的api按照HTTP RESTful规则是复数,譬如versions/authors,表示资源。HTTP的各种方法表示操作,譬如GET查询,PUT更新,DELETE删除。参考:[Redmine HTTP Rest api](http://www.redmine.org/projects/redmine/wiki/Rest_api) + +根目录: + +```bash +# curl http://192.168.1.170:1985/ + "urls": { + "api": "the api root" + } +``` + +继续: + +```bash +# curl http://192.168.1.170:1985/api/v1/versions + "major": 0, + "minor": 9, + "revision": 43, + "version": "0.9.43" +``` + +或者: + +```bash +# curl http://192.168.1.170:1985/api/v1/authors + "primary_authors": "xxx", + "contributors_link": "https://github.com/ossrs/srs/blob/master/AUTHORS.txt", + "contributors": "xxx" +``` + +SRS的API属于“自解释型,HTTP RESTful API” + +## Error Code + +SRS可能返回HTTP错误,即Status不等于200;或者在HTTP Status为200时,响应的json的code不为0. + +譬如,返回HTTP错误: + +``` +winlin:~ winlin$ curl -v http://127.0.0.1:1985 && echo "" +< HTTP/1.1 404 Not Found +< Connection: Keep-Alive +< Content-Length: 9 +< Content-Type: text/plain; charset=utf-8 +< Server: SRS/2.0.184 +< +Not Found +``` + +譬如,HTTP200时内容中code不等于0: + +``` +winlin:~ winlin$ curl -v http://127.0.0.1:1985/api/v1/tests/errors && echo "" +< HTTP/1.1 200 OK +< Connection: Keep-Alive +< Content-Length: 12 +< Content-Type: application/json +< Server: SRS/2.0.184 +< +{"code":100} +``` + +用户应该处理这两种错误。 + +## Crossdomain + +SRS HTTP API支持跨域,js可以直接调用srs的http api。 + +SRS支持两种跨域方式: + +* OPTIONS: jquery可以直接跨域请求API,浏览器会发送一个OPTIONS跨域请求,SRS允许跨域后,浏览器再次发起API请求。 +* JSONP: jquery/angularjs可以发起JSONP跨域请求,服务器会将响应作为js文件,内容是调用一个函数,函数名由QueryString中的callback指定。 +* JSONP-DELETE: JSONP只能GET,因此DELETE方法是由QueryString的method指定的。 + +JSONP实例,例如: + +``` +GET http://localhost:1985/api/v1/vhosts/?callback=JSON_CALLBACK +JSON_CALLBACK({"code":0,"server":13449}) +GET http://localhost:1985/api/v1/vhosts/100?callback=JSON_CALLBACK&method=DELETE +JSON_CALLBACK({"code":0}) +``` + +## HTTPS API + +SRS内置支持HTTPS API,只需要开启配置`https`: + +``` +http_api { + enabled on; + listen 1985; + https { + # Whether enable HTTPS API. + # default: off + enabled on; + # The listen endpoint for HTTPS API. + # default: 1990 + listen 1990; + # The SSL private key file, generated by: + # openssl genrsa -out server.key 2048 + # default: ./conf/server.key + key ./conf/server.key; + # The SSL public cert file, generated by: + # openssl req -new -x509 -key server.key -out server.crt -days 3650 -subj "/C=CN/ST=Beijing/L=Beijing/O=Me/OU=Me/CN=ossrs.net" + # default: ./conf/server.crt + cert ./conf/server.crt; + } +} +``` + +> Remark: 请使用你的证书文件,代替上面配置中的key和cert。 + +> Note: 若需要开启HTTPS直播流,请参考[HTTPS FLV Live Stream](./flv.md#https-flv-live-stream) + +## HTTP and HTTPS Proxy + +SRS可以和HTTP/HTTPS代理一起工作得很好,比如[Nginx](./http-server.md#nginx-proxy), +[HTTPX](./http-server.md#httpx-proxy), [CaddyServer](./http-server.md#caddy-proxy), +等等。 + +## Server ID + +SRS返回的api中都会带有`server`的信息,即Server的ID,用来标识服务器。客户端在获取信息时,必须检查ServerID是否改变,改变时就是服务器重启,之前所有的数据都应该作废了。 + +## API Navigation + +SRS提供了API的导航,即所有支持的API及描述。 + +地址是:`http://192.168.1.170:1985/api/v1`,主要包含的子api有: + +| API | Example | Description | +| --- | -------- | --------- | +| server | 4481 | 服务器标识 | +| versions | /api/v1/versions | 获取服务器版本信息 | +| summaries | /api/v1/summaries | 获取服务器的摘要信息 | +| rusages | /api/v1/rusages | 获取服务器资源使用信息 | +| self_proc_stats | /api/v1/self_proc_stats | 获取服务器进程信息 | +| system_proc_stats | /api/v1/system_proc_stats | 获取服务器所有进程情况 | +| meminfos | /api/v1/meminfos | 获取服务器内存使用情况 | +| authors | /api/v1/authors | 获取作者、版权和License信息 | +| features | /api/v1/features | 获取系统支持的功能列表 | +| requests | /api/v1/requests | 获取请求的信息,即当前发起的请求的详细信息 | +| vhosts | /api/v1/vhosts | 获取服务器上的vhosts信息 | +| streams | /api/v1/streams | 获取服务器的streams信息 | +| clients | /api/v1/clients | 获取服务器的clients信息,默认获取前10个 | +| configs | /api/v1/configs | CUID配置,RAW API | +| publish | /rtc/v1/publish/ | WebRTC推流的API | +| play | /rtc/v1/play/ | WebRTC播放流的API | + +## WebRTC Publish + +使用WebRTC推流到SRS时,需要先调用API交换SDK,SRS支持[WHIP](https://datatracker.ietf.org/doc/draft-ietf-wish-whip/)协议。例如: + +```text +POST /rtc/v1/whip/?app=live&stream=livestream + +Body in SDP, the Content-type is application/sdp: + +v=0 +...... +a=ssrc:2064016335 label:c8243ce9-ace5-4d17-9184-41a2543101b5 +``` + +服务器响应对应的SDP如下: + +```text +v=0 +...... +a=candidate:1 1 udp 2130706431 172.18.0.4 8000 typ host generation 0 +``` + +> Note: 按照WHIP的要求,响应的HTTP Status是201,而不是200。 + +具体调用和使用请参考[srs.sdk.js](https://github.com/ossrs/srs/blob/develop/trunk/research/players/js/srs.sdk.js)和[srs-unity: Publisher](https://github.com/ossrs/srs-unity#usage-publisher)。 + +## WebRTC Play + +拉流或播放时,需要调用另外的API,请求格式和publish一样,SRS支持[WHEP](https://datatracker.ietf.org/doc/draft-murillo-whep/)协议。例如: + +```text +POST /rtc/v1/whep/?app=live&stream=livestream + +Body in SDP, the Content-type is application/sdp: + +v=0 +...... +a=ssrc:2064016335 label:c8243ce9-ace5-4d17-9184-41a2543101b5 +``` + +> Note: 虽然WHIP是推流的协议,但是实际上也可以支持播放流,差异只在SDP中;当然有专门为播放涉及的[WHEP](https://datatracker.ietf.org/doc/draft-murillo-whep/),目前还没有RFC草案。 + +服务器响应对应的SDP如下: + +``` +v=0 +...... +a=candidate:1 1 udp 2130706431 172.18.0.4 8000 typ host generation 0 +``` + +> Note: 按照WHIP的要求,响应的HTTP Status是201,而不是200。 + +具体调用和使用请参考[srs.sdk.js](https://github.com/ossrs/srs/blob/develop/trunk/research/players/js/srs.sdk.js)和[srs-unity: Player](https://github.com/ossrs/srs-unity#usage-player)。 + +## Summaries + +SRS提供系统的摘要信息接口,譬如当前的内存、CPU、网络、负载使用率。 + +地址为:`http://192.168.1.170:1985/api/v1/summaries` + +## Vhosts + +SRS提供获取所有vhost的接口。 + +地址为:`http://192.168.1.170:1985/api/v1/vhosts` + +还可以继续处理某个vhost的信息,譬如`http://192.168.1.170:1985/api/v1/vhosts/3756` + +响应内容: + +* vhost中的server为srs的id,用来标识是否服务器重启了。 + +## Streams + +SRS提供获取所有stream的接口。 + +地址为:`http://192.168.1.170:1985/api/v1/streams` + +参数: + +* `?start=N`: 开始的索引,默认0。 +* `?count=N`: 返回的总数目,默认为10。 + +还可以继续处理某个stream的信息,譬如`http://192.168.1.170:1985/api/v1/streams/3756` + +响应内容: + +* stream中的server为srs的id,用来标识是否服务器重启了。 +* vhost为stream所属的vhost的id。 + +## Clients + +SRS提供查询客户端信息的接口。 + +地址为:`http://192.168.1.170:1985/api/v1/clients` + +参数: + +* `?start=N`: 开始的索引,默认0。 +* `?count=N`: 返回的总数目,默认为10。 + +还可以继续处理某个client的信息,譬如`http://192.168.1.170:1985/api/v1/clients/3756` + +## Kickoff Client + +可以踢掉连接的用户,SRS提供了HTTP RESTful接口: + +``` +DELETE /api/v1/clients/{id} +``` + +可以先查询到需要踢掉的Client的ID: + +``` +GET /api/v1/clients +``` + +若需要踢掉推流的Client,可以从streams接口中查询推流client的id: + +``` +GET /api/v1/streams +or GET /api/v1/streams/6745 +``` + +流信息中的`stream.publish.cid`就是推流的客户端id: + +``` +1. GET http://192.168.1.170:1985/api/v1/streams/6745 +2. Response stream.publish.cid: +stream: { + publish: { + active: true, + cid: 107 + } +} +3. DELETE http://192.168.1.170:1985/api/v1/clients/107 +``` + +备注:HTTP请求可以使用[HTTP REST Tool](http://ossrs.net/srs.release/http-rest/index.html) + +备注:HTTP请求还可以使用Linux的工具`curl`,常见的请求如下: + +``` +curl -v -X GET http://192.168.1.170:1985/api/v1/clients/426 && echo "" +curl -v -X DELETE http://192.168.1.170:1985/api/v1/clients/426 && echo "" +``` + +## Persistence Config + +保存日志文件的功能,已经在4.0禁用了。 + +## HTTP RAW API + +SRS支持RAW API,一般的服务器只能提供读(Read)形式的API,譬如获取系统状态之类,但是SRS提供写(Write)形式的API,譬如Reload和修改系统配置等所有改写系统的行为。 + +注意: 必须在`http_api`配置中,开启`http_api.raw_api.enabled`才能允许HTTP RAW API,否则会返回错误代码是1061。 + +``` +http_api { + enabled on; + listen 1985; + raw_api { + enabled on; + allow_reload on; + } +} +``` + +SRS支持的HTTP RAW API包括: + +* `Raw`: 查看HTTP RAW API的配置。 +* `Reload`: 支持reload配置。 + +### Raw + +| Key | DESC | +| ---- | ---- | +| feature | 查询服务器端HTTP RAW API的配置 | +| url | `/api/v1/raw?rpc=raw` | +| curl | `curl http://127.0.0.1:1985/api/v1/raw?rpc=raw` | +| config | 不需要 | +| params | 无参数 | + +### RAW Reload + +| Key | DESC | +| ---- | ---- | +| feature | 可以重新加载配置,和`killall -1 srs`的效果是一样的 | +| url | `/api/v1/raw?rpc=reload` | +| curl | `curl http://127.0.0.1:1985/api/v1/raw?rpc=reload` | +| config | `allow_reload on;`| +| params | 无参数 | + +### Other RAW APIs + +其他RAW API已经在4.0中删除了。 + +## Authentication + +SRS在`5.0.152+`或者`6.0.40+`版本开始支持HTTP API鉴权,可以通过配置`http_api.auth`开启。 + +```bash +# conf/http.api.auth.conf +http_api { + enabled on; + listen 1985; + auth { + enabled on; + username admin; + password admin; + } +} +``` + +或者,通过环境变量设置用户名和密码: + +```bash +env SRS_HTTP_API_ENABLED=on SRS_HTTP_SERVER_ENABLED=on \ + SRS_HTTP_API_AUTH_ENABLED=on SRS_HTTP_API_AUTH_USERNAME=admin SRS_HTTP_API_AUTH_PASSWORD=admin \ + ./objs/srs -e +``` + +可以访问下面的地址来验证: +- 提示输入用户名和密码:http://localhost:1985/api/v1/versions +- 带用户名和密码的URL:http://admin:admin@localhost:1985/api/v1/versions + +要清除用户名和密码,可以通过用户名访问HTTP API: +- http://admin@localhost:1985/api/v1/versions + +> 注意:只针对HTTP API开启了鉴权,不包括HTTP服务器和WebRTC HTTP API。 + +Winlin 2015.8 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/http-api) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/http-callback.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/http-callback.md new file mode 100644 index 00000000..5345499e --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/http-callback.md @@ -0,0 +1,419 @@ +--- +title: HTTP回调 +sidebar_label: HTTP回调 +hide_title: false +hide_table_of_contents: false +--- + +# HTTP Callback + +服务器端定制的实现方式,就是HTTP回调。譬如当客户端连接到SRS时,回调指定的http地址,这样可以实现验证功能。工作流如下: + +```text ++--------+ +--------+ +-----------------------+ +| FFmpeg |-->--+ SRS |--HTTP-Callback-->--+ Your Business Server | ++--------+ +--------+ +-----------------------+ +``` + +当FFmpeg/OBS向SRS发布或播放流时,SRS将调用您的业务服务器以通知该事件。 + +## Usage + +首先,运行SRS,在配置中启用HTTP回调: + +```bash +./objs/srs -c conf/http.hooks.callback.conf +``` + +启动演示HTTP回调服务器,这是您的业务服务器: + +```bash +go run research/api-server/server.go +``` + +将流发布到SRS,参数为: + +```bash +ffmpeg -re -i doc/source.flv -c copy -f flv rtmp://localhost/live/livestream?k=v +``` + +您的业务服务器将收到HTTP事件: + +```text +Got action=on_publish, client_id=3y1tcaw2, ip=127.0.0.1, vhost=__defaultVhost__, stream=livestream, param=?k=v +``` + +请注意,`k=v`可用于身份验证,关于Token认证,即基于http回调的认证,参考:[Token Authentication](./drm.md#token-authentication) + +## Compile + +SRS总是开启HttpCallback。 + +参考:[Build](./install.md) + +## Config SRS + +这里有一个示例 [conf/http.hooks.callback.conf](https://github.com/ossrs/srs/blob/develop/trunk/conf/http.hooks.callback.conf) +可用,演示了常见回调事件的配置,可直接使用。 + +http hooks的配置如下: + +```bash +vhost your_vhost { + http_hooks { + # whether the http hooks enable. + # default off. + enabled on; + # when client(encoder) publish to vhost/app/stream, call the hook, + # the request in the POST data string is a object encode by json: + # { + # "action": "on_publish", + # "client_id": "9308h583", + # "ip": "192.168.1.10", "vhost": "video.test.com", "app": "live", + # "stream": "livestream", "param":"?token=xxx&salt=yyy", "server_id": "vid-werty", + # "stream_url": "video.test.com/live/livestream", "stream_id": "vid-124q9y3" + # } + # if valid, the hook must return HTTP code 200(Status OK) and response + # an int value specifies the error code(0 corresponding to success): + # 0 + # support multiple api hooks, format: + # on_publish http://xxx/api0 http://xxx/api1 http://xxx/apiN + # @remark For SRS4, the HTTPS url is supported, for example: + # on_publish https://xxx/api0 https://xxx/api1 https://xxx/apiN + on_publish http://127.0.0.1:8085/api/v1/streams http://localhost:8085/api/v1/streams; + # when client(encoder) stop publish to vhost/app/stream, call the hook, + # the request in the POST data string is a object encode by json: + # { + # "action": "on_unpublish", + # "client_id": "9308h583", + # "ip": "192.168.1.10", "vhost": "video.test.com", "app": "live", + # "stream": "livestream", "param":"?token=xxx&salt=yyy", "server_id": "vid-werty", + # "stream_url": "video.test.com/live/livestream", "stream_id": "vid-124q9y3" + # } + # if valid, the hook must return HTTP code 200(Status OK) and response + # an int value specifies the error code(0 corresponding to success): + # 0 + # support multiple api hooks, format: + # on_unpublish http://xxx/api0 http://xxx/api1 http://xxx/apiN + # @remark For SRS4, the HTTPS url is supported, for example: + # on_unpublish https://xxx/api0 https://xxx/api1 https://xxx/apiN + on_unpublish http://127.0.0.1:8085/api/v1/streams http://localhost:8085/api/v1/streams; + # when client start to play vhost/app/stream, call the hook, + # the request in the POST data string is a object encode by json: + # { + # "action": "on_play", + # "client_id": "9308h583", + # "ip": "192.168.1.10", "vhost": "video.test.com", "app": "live", + # "stream": "livestream", "param":"?token=xxx&salt=yyy", + # "pageUrl": "http://www.test.com/live.html", "server_id": "vid-werty", + # "stream_url": "video.test.com/live/livestream", "stream_id": "vid-124q9y3" + # } + # if valid, the hook must return HTTP code 200(Status OK) and response + # an int value specifies the error code(0 corresponding to success): + # 0 + # support multiple api hooks, format: + # on_play http://xxx/api0 http://xxx/api1 http://xxx/apiN + # @remark For SRS4, the HTTPS url is supported, for example: + # on_play https://xxx/api0 https://xxx/api1 https://xxx/apiN + on_play http://127.0.0.1:8085/api/v1/sessions http://localhost:8085/api/v1/sessions; + # when client stop to play vhost/app/stream, call the hook, + # the request in the POST data string is a object encode by json: + # { + # "action": "on_stop", + # "client_id": "9308h583", + # "ip": "192.168.1.10", "vhost": "video.test.com", "app": "live", + # "stream": "livestream", "param":"?token=xxx&salt=yyy", "server_id": "vid-werty", + # "stream_url": "video.test.com/live/livestream", "stream_id": "vid-124q9y3" + # } + # if valid, the hook must return HTTP code 200(Status OK) and response + # an int value specifies the error code(0 corresponding to success): + # 0 + # support multiple api hooks, format: + # on_stop http://xxx/api0 http://xxx/api1 http://xxx/apiN + # @remark For SRS4, the HTTPS url is supported, for example: + # on_stop https://xxx/api0 https://xxx/api1 https://xxx/apiN + on_stop http://127.0.0.1:8085/api/v1/sessions http://localhost:8085/api/v1/sessions; + # when srs reap a dvr file, call the hook, + # the request in the POST data string is a object encode by json: + # { + # "action": "on_dvr", + # "client_id": "9308h583", + # "ip": "192.168.1.10", "vhost": "video.test.com", "app": "live", + # "stream": "livestream", "param":"?token=xxx&salt=yyy", + # "cwd": "/usr/local/srs", + # "file": "./objs/nginx/html/live/livestream.1420254068776.flv", "server_id": "vid-werty", + # "stream_url": "video.test.com/live/livestream", "stream_id": "vid-124q9y3" + # } + # if valid, the hook must return HTTP code 200(Status OK) and response + # an int value specifies the error code(0 corresponding to success): + # 0 + on_dvr http://127.0.0.1:8085/api/v1/dvrs http://localhost:8085/api/v1/dvrs; + # when srs reap a ts file of hls, call the hook, + # the request in the POST data string is a object encode by json: + # { + # "action": "on_hls", + # "client_id": "9308h583", + # "ip": "192.168.1.10", "vhost": "video.test.com", "app": "live", + # "stream": "livestream", "param":"?token=xxx&salt=yyy", + # "duration": 9.36, // in seconds + # "cwd": "/usr/local/srs", + # "file": "./objs/nginx/html/live/livestream/2015-04-23/01/476584165.ts", + # "url": "live/livestream/2015-04-23/01/476584165.ts", + # "m3u8": "./objs/nginx/html/live/livestream/live.m3u8", + # "m3u8_url": "live/livestream/live.m3u8", + # "seq_no": 100, "server_id": "vid-werty", + # "stream_url": "video.test.com/live/livestream", "stream_id": "vid-124q9y3" + # } + # if valid, the hook must return HTTP code 200(Status OK) and response + # an int value specifies the error code(0 corresponding to success): + # 0 + on_hls http://127.0.0.1:8085/api/v1/hls http://localhost:8085/api/v1/hls; + # when srs reap a ts file of hls, call this hook, + # used to push file to cdn network, by get the ts file from cdn network. + # so we use HTTP GET and use the variable following: + # [server_id], replace with the server_id + # [app], replace with the app. + # [stream], replace with the stream. + # [param], replace with the param. + # [ts_url], replace with the ts url. + # ignore any return data of server. + # @remark random select a url to report, not report all. + on_hls_notify http://127.0.0.1:8085/api/v1/hls/[server_id]/[app]/[stream]/[ts_url][param]; + } +} +``` + +重点参数说明: + +* `stream_url`: 流的URL,无扩展名信息,例如:`/live/livestream`. +* `stream_id`: 流的ID,可以通过API查询流的详细信息。 + +> Note: 推流的回调是`on_publish`和`on_unpublish`,播放的回调是`on_play`和`on_stop`。 + +> Note: SRS 4之前,还有`on_connect`和`on_close`,这是RTMP定义的事件,只有RTMP流才有,而且和推流和播放的事件是重叠的,所以不推荐使用。 + +> Note: 可以参考conf/full.conf配置文件中的hooks.callback.vhost.com实例。 + +## Protocol + +HTTP回调的格式如下,以`on_publish`为例: + +```text +POST /api/v1/streams HTTP/1.1 +Content-Type: application-json + +Body: +{ + "server_id": "vid-0xk989d", + "action": "on_publish", + "client_id": "341w361a", + "ip": "127.0.0.1", + "vhost": "__defaultVhost__", + "app": "live", + "tcUrl": "rtmp://127.0.0.1:1935/live?vhost=__defaultVhost__", + "stream": "livestream", + "param": "", + "stream_url": "video.test.com/live/livestream", + "stream_id": "vid-124q9y3" +} +``` + +> Note: 也可以用wireshark或tcpdump抓包验证。 + +## Heartbeat + +SRS将向HTTP回调服务器发送心跳信号。这允许你监控SRS服务器的健康状况。启用此功能方法: + +```bash +# heartbeat to api server +# @remark, the ip report to server, is retrieve from system stat, +# which need the config item stats.network. +heartbeat { + # whether heartbeat is enabled. + # Overwrite by env SRS_HEARTBEAT_ENABLED + # default: off + enabled off; + # the interval seconds for heartbeat, + # recommend 0.3,0.6,0.9,1.2,1.5,1.8,2.1,2.4,2.7,3,...,6,9,12,.... + # Overwrite by env SRS_HEARTBEAT_INTERVAL + # default: 9.9 + interval 9.3; + # when startup, srs will heartbeat to this api. + # @remark: must be a restful http api url, where SRS will POST with following data: + # { + # "device_id": "my-srs-device", + # "ip": "192.168.1.100" + # } + # Overwrite by env SRS_HEARTBEAT_URL + # default: http://127.0.0.1:8085/api/v1/servers + url http://127.0.0.1:8085/api/v1/servers; + # the id of device. + # Overwrite by env SRS_HEARTBEAT_DEVICE_ID + device_id "my-srs-device"; + # whether report with summaries + # if on, put /api/v1/summaries to the request data: + # { + # "summaries": summaries object. + # } + # @remark: optional config. + # Overwrite by env SRS_HEARTBEAT_SUMMARIES + # default: off + summaries off; +} +``` + +通过启用`summaries`,您可以获取SRS服务器状态,例如`self.pid`和`self.srs_uptime`,因此您可以使用它来判断SRS是否重新启动。 + +> Note: 关于`summaries`的字段,请参阅 [HTTP API: summaries](./http-api.md#summaries) 了解详细信息。 + +## Go Example + +使用Go处理SRS的回调,以`on_publish`为例: + +```go +http.HandleFunc("/api/v1/streams", func(w http.ResponseWriter, r *http.Request) { + b, err := ioutil.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + + fmt.Println(string(b)) + + res, err := json.Marshal(struct { + Code int `json:"code"` + Message string `json:"msg"` + }{ + 0, "OK", + }) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + w.Write(res) +}) + +_ = http.ListenAndServe(":8085", nil) +``` + +## Nodejs Koa Example + +使用Nodejs/Koa处理SRS的回调,以`on_publish`为例: + +```js +const Router = require('koa-router'); +const router = new Router(); + +router.all('/api/v1/streams', async (ctx) => { + console.log(ctx.request.body); + + ctx.body = {code: 0, msg: 'OK'}; +}); +``` + +## PHP Example + +使用PHP处理SRS的回调,以`on_publish`为例: + +```php +$body = json_decode(file_get_contents('php://input')); +printf($body); + +echo json_encode(array("code"=>0, "msg"=>"OK")); +``` + +## HTTP callback events + +SRS的回调事件包括: + +* `on_publish`: 当客户端发布流时,譬如flash/FMLE方式推流到服务器 +* `on_unpublish`: 当客户端停止发布流时 +* `on_play`: 当客户端开始播放流时 +* `on_stop`: 当客户端停止播放时 +* `on_dvr`: 当DVR录制关闭一个flv文件时 +* `on_hls`: 当HLS关闭一个TS文件时 + +对于事件`on_publish`和`on_play`: +* 返回值:SRS要求HTTP服务器返回HTTP200并且response内容为整数错误码(0表示成功),其他错误码会断开客户端连接。 + +其中, +* 事件:发生该事件时,即回调指定的HTTP地址。 +* HTTP地址:可以支持多个,以空格分隔,SRS会依次回调这些接口。 +* 数据:SRS将数据POST到HTTP接口。 + +## SRS HTTP Callback Server + +SRS自带了一个默认的处理HTTP Callback的服务器,启动时需要指定端口,譬如8085端口。 + +启动方法: + +```bash +cd research/api-server && go run server.go 8085 +``` + +启动日志如下: + +```bash +#2023/01/18 22:57:40.835254 server.go:572: api server listen at port:8085, static_dir:/Users/panda/srs/trunk/static-dir +#2023/01/18 22:57:40.835600 server.go:836: start listen on::8085 +``` + +> Remark: For SRS4, the HTTP/HTTPS url is supported, see [#1657](https://github.com/ossrs/srs/issues/1657#issuecomment-720889906). + +## HTTPS Callback + +SRS4支持HTTPS回调,只需要简单的将回调地址,从`http://`改成`https://`即可,比如: + +``` +vhost your_vhost { + http_hooks { + enabled on; + on_publish https://127.0.0.1:8085/api/v1/streams; + on_unpublish https://127.0.0.1:8085/api/v1/streams; + on_play https://127.0.0.1:8085/api/v1/sessions; + on_stop https://127.0.0.1:8085/api/v1/sessions; + on_dvr https://127.0.0.1:8085/api/v1/dvrs; + on_hls https://127.0.0.1:8085/api/v1/hls; + on_hls_notify https://127.0.0.1:8085/api/v1/hls/[app]/[stream]/[ts_url][param]; + } +} +``` + +## Response + +如果回调成功,必须响应正确的格式的数值,否则就会返回错误给客户端导致推流或播放失败。也可以参考[HTTP API](./http-api.md#error-code)。格式是: + +* HTTP/200, 必须返回200否则认为是错误。 +* 并且,响应的内容必须是int值0,或者是JSON对象带字段code值为0。 + +比如: + +``` +HTTP/1.1 200 OK +Content-Length: 1 +0 +``` + +或者: + +``` +HTTP/1.1 200 OK +Content-Length: 11 +{"code": 0} +``` + +可以查看实例回调的响应: + +``` +cd srs/trunk/research/api-server && go run server.go 8085 +``` + +你会明白什么是正确的响应格式。 + +## Snapshot + +HttpCallback也可以用来截图,参考[snapshot](./snapshot.md#httpcallback) + +Winlin 2015.1 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/http-callback) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/http-server.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/http-server.md new file mode 100644 index 00000000..28dd12a1 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/http-server.md @@ -0,0 +1,351 @@ +--- +title: HTTP Server +sidebar_label: HTTP Server +hide_title: false +hide_table_of_contents: false +--- + +# HTTP Server + +SRS内嵌了一个web服务器,支持api和简单的文件分发。 + +部署和使用SRS的内嵌http服务器,参考:[Usage: HTTP](./sample-http.md) + +SRS的内置HTTP服务器已经参考GO的HTTP模块重写,满足商用要求,可以当作web服务器使用。参考:[#277](https://github.com/ossrs/srs/issues/277) + +> Note: SRS只支持源站HTTP分发,边缘还是需要用Web服务器比如NGINX、SQUID或ATS等。 + +SRS也可以很好地与HTTP反向代理服务器一起使用,例如[NGINX](#nginx-proxy)和[Caddy](#caddy-proxy)。 + +## Use Scenario + +它的定位很简单:智能手机上的摄像头。 + +Nginx/Apache/lighthttpd等众多HTTP server大佬就是专业的单反,老长老长镜头了。 +难道有了单反智能手机上就不能有摄像头?不会吧!而且必须有。所以不是要和nginx拼个你死我活, +定位不一样,就像fms内嵌apache一样(不过fms嵌得很烂),真的有必要而且方便。 + +为何srs不内嵌一个nginx呢?智能手机上能内嵌一个单反长镜头么?我去,那是怪物吧。 +nginx14万行代码,巨大无比,srs才2万行,如何能内嵌呢?最核心的原因是:srs需要提供http的api, +方便外部管理和调用;这点往往都毫无异议,但是提到srs要内嵌web服务器,就都炸开锅啦。 +OK,其实就是http的api稍微扩展点,支持读文件后发送给客户端。 + +srs会一如既往的保持最简单,http的代码不会有多少行,功能不会有几个,就支持简单的文件分发就足够了。可以: +* 只需要部署一个服务器就可以分发RTMP和HLS。 +* SRS对于HLS/HDS/DASH的支持会更完善。 +* SRS可以支持点播,动态转封装等。 +* SRS依然可以用nginx作为反向代理,或者禁用这个选项,使用nginx分发。 + +实际上,RTMP协议本身比HTTP复杂很多,所以st来做http分发,没有任何不可以的地方,更何况只是做部分。所以,淡定~ + +## Config + +需要配置全局的HTTP端口和根目录的路径。 + +```bash +# embeded http server in srs. +# the http streaming config, for HLS/HDS/DASH/HTTPProgressive +# global config for http streaming, user must config the http section for each vhost. +# the embed http server used to substitute nginx in ./objs/nginx, +# for example, srs runing in arm, can provides RTMP and HTTP service, only with srs installed. +# user can access the http server pages, generally: +# curl http://192.168.1.170:80/srs.html +# which will show srs version and welcome to srs. +# @remeark, the http embeded stream need to config the vhost, for instance, the __defaultVhost__ +# need to open the feature http of vhost. +http_server { + # whether http streaming service is enabled. + # Overwrite by env SRS_HTTP_SERVER_ENABLED + # default: off + enabled on; + # the http streaming listen entry is <[ip:]port> + # for example, 192.168.1.100:8080 + # where the ip is optional, default to 0.0.0.0, that is 8080 equals to 0.0.0.0:8080 + # @remark, if use lower port, for instance 80, user must start srs by root. + # Overwrite by env SRS_HTTP_SERVER_LISTEN + # default: 8080 + listen 8080; + # the default dir for http root. + # Overwrite by env SRS_HTTP_SERVER_DIR + # default: ./objs/nginx/html + dir ./objs/nginx/html; + # whether enable crossdomain request. + # for both http static and stream server and apply on all vhosts. + # Overwrite by env SRS_HTTP_SERVER_CROSSDOMAIN + # default: on + crossdomain on; + # For https_server or HTTPS Streaming. + https { + # Whether enable HTTPS Streaming. + # Overwrite by env SRS_HTTP_SERVER_HTTPS_ENABLED + # default: off + enabled on; + # The listen endpoint for HTTPS Streaming. + # Overwrite by env SRS_HTTP_SERVER_HTTPS_LISTEN + # default: 8088 + listen 8088; + # The SSL private key file, generated by: + # openssl genrsa -out server.key 2048 + # Overwrite by env SRS_HTTP_SERVER_HTTPS_KEY + # default: ./conf/server.key + key ./conf/server.key; + # The SSL public cert file, generated by: + # openssl req -new -x509 -key server.key -out server.crt -days 3650 -subj "/C=CN/ST=Beijing/L=Beijing/O=Me/OU=Me/CN=ossrs.net" + # Overwrite by env SRS_HTTP_SERVER_HTTPS_CERT + # default: ./conf/server.crt + cert ./conf/server.crt; + } +} +``` + +## HTTP Vhost + +同时,vhost上可以指定http配置(虚拟目录和vhost): + +```bash +vhost your_vhost { + # http static vhost specified config + http_static { + # whether enabled the http static service for vhost. + # default: off + enabled on; + # the url to mount to, + # typical mount to [vhost]/ + # the variables: + # [vhost] current vhost for http server. + # @remark the [vhost] is optional, used to mount at specified vhost. + # @remark the http of __defaultVhost__ will override the http_stream section. + # for example: + # mount to [vhost]/ + # access by http://ossrs.net:8080/xxx.html + # mount to [vhost]/hls + # access by http://ossrs.net:8080/hls/xxx.html + # mount to / + # access by http://ossrs.net:8080/xxx.html + # or by http://192.168.1.173:8080/xxx.html + # mount to /hls + # access by http://ossrs.net:8080/hls/xxx.html + # or by http://192.168.1.173:8080/hls/xxx.html + # default: [vhost]/ + mount [vhost]/hls; + # main dir of vhost, + # to delivery HTTP stream of this vhost. + # default: ./objs/nginx/html + dir ./objs/nginx/html/hls; + } +} +``` + +注意:SRS1中的`http_stream`在SRS2改名为`http_server`,全局的server配置,即静态HTTP服务器,可用来分发dvr的HLS/FLV/HDS/MPEG-DASH等。 + +注意:SRS1中vhost的`http`在SRS2改名为`http_static`,和全局的`http_server`类似用来分发静态的文件。而SRS2新增的功能`http_remux`,用来动态转封装,将RTMP流转封装为 HTTP Live FLV/Mp3/Aac/Hls/Hds/MPEG-DASH流。 + +## HTTPS Server + +SRS支持HTTPS,在配置中开启即可,默认使用子签名证书,若需要使用CA颁发的证书,请替换相关的文件。相关配置如下: + +```bash +http_server { + https { + # Whether enable HTTPS Streaming. + # Overwrite by env SRS_HTTP_SERVER_HTTPS_ENABLED + # default: off + enabled on; + # The listen endpoint for HTTPS Streaming. + # Overwrite by env SRS_HTTP_SERVER_HTTPS_LISTEN + # default: 8088 + listen 8088; + # The SSL private key file, generated by: + # openssl genrsa -out server.key 2048 + # Overwrite by env SRS_HTTP_SERVER_HTTPS_KEY + # default: ./conf/server.key + key ./conf/server.key; + # The SSL public cert file, generated by: + # openssl req -new -x509 -key server.key -out server.crt -days 3650 -subj "/C=CN/ST=Beijing/L=Beijing/O=Me/OU=Me/CN=ossrs.net" + # Overwrite by env SRS_HTTP_SERVER_HTTPS_CERT + # default: ./conf/server.crt + cert ./conf/server.crt; + } +} +``` + +## Crossdomain + +SRS默认开启了CORS跨域的支持,相关配置如下: + +```bash +http_server { + # whether enable crossdomain request. + # for both http static and stream server and apply on all vhosts. + # Overwrite by env SRS_HTTP_SERVER_CROSSDOMAIN + # default: on + crossdomain on; +} +``` + +## MIME + +支持少量的MIME,见下表。 + +| 文件扩展名 | Content-Type | +| ------------- | ----------- | +| .ts | Content-Type: video/MP2T;charset=utf-8 | +| .m3u8 | Content-Type: application/x-mpegURL;charset=utf-8 | +| .json | Content-Type: application/json;charset=utf-8 | +| .css | Content-Type: text/css;charset=utf-8 | +| .swf | Content-Type: application/x-shockwave-flash;charset=utf-8 | +| .js | Content-Type: text/javascript;charset=utf-8 | +| .xml | Content-Type: text/xml;charset=utf-8 | +| 其他 | Content-Type: text/html;charset=utf-8 | + +## Method + +支持的Method包括: +* GET: 下载文件。 + +## Paths + +HTTP/HTTPS API: + +* `/api/` SRS HTTP API +* `/rtc/` SRS WebRTC API + +HTTP/HTTPS Stream: + +* `/{app}/{stream}` HTTP Stream mounted by publisher. + +以下是一些与SRS一起使用的反向代理。 + +> Note: 通常,代理可以基于路径将API和Stream一起路由。 + +## Nginx Proxy + +以下是作为文件[nginx.conf](https://github.com/ossrs/srs/blob/develop/trunk/conf/nginx.proxy.conf)的NGINX配置: + +``` +worker_processes 1; +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + + server { + listen 80; + listen 443 ssl http2; + server_name _; + ssl_certificate /usr/local/srs/conf/server.crt; + ssl_certificate_key /usr/local/srs/conf/server.key; + + # For SRS homepage, console and players + # http://r.ossrs.net/console/ + # http://r.ossrs.net/players/ + location ~ ^/(console|players)/ { + proxy_pass http://127.0.0.1:8080/$request_uri; + } + # For SRS streaming, for example: + # http://r.ossrs.net/live/livestream.flv + # http://r.ossrs.net/live/livestream.m3u8 + location ~ ^/.+/.*\.(flv|m3u8|ts|aac|mp3)$ { + proxy_pass http://127.0.0.1:8080$request_uri; + } + # For SRS backend API for console. + # For SRS WebRTC publish/play API. + location ~ ^/(api|rtc)/ { + proxy_pass http://127.0.0.1:1985$request_uri; + } + } +} +``` + +## Caddy Proxy + +使用自动HTTPS的[CaddyServer](https://caddyserver.com/docs/getting-started)配置,请使用配置文件`Caddyfile`。 + +对于HTTP服务器,请注意设置默认端口: + +``` +:80 +reverse_proxy /* 127.0.0.1:8080 +reverse_proxy /api/* 127.0.0.1:1985 +reverse_proxy /rtc/* 127.0.0.1:1985 +``` + +对于HTTPS服务器,请启用一个域名: + +``` +example.com { + reverse_proxy /* 127.0.0.1:8080 + reverse_proxy /api/* 127.0.0.1:1985 + reverse_proxy /rtc/* 127.0.0.1:1985 +} +``` + +启动CaddyServer: + +``` +caddy start -config Caddyfile +``` + +## Nodejs KOA Proxy + +nodejs koa 代理也非常适用于 SRS,请使用基于[node-http-proxy](https://github.com/nodejitsu/node-http-proxy)的[koa-proxies](https://www.npmjs.com/package/koa-proxies),这里有一个示例: + +```js +const Koa = require('koa'); +const proxy = require('koa-proxies'); +const BodyParser = require('koa-bodyparser'); +const Router = require('koa-router'); + +const app = new Koa(); +app.use(proxy('/api/', {target: 'http://127.0.0.1:1985/'})); +app.use(proxy('/rtc/', {target: 'http://127.0.0.1:1985/'})); +app.use(proxy('/*/*.(flv|m3u8|ts|aac|mp3)', {target: 'http://127.0.0.1:8080/'})); +app.use(proxy('/console/', {target: 'http://127.0.0.1:8080/'})); +app.use(proxy('/players/', {target: 'http://127.0.0.1:8080/'})); + +// Start body-parser after proxies, see https://github.com/vagusX/koa-proxies/issues/55 +app.use(BodyParser()); + +// APIs that depends on body-parser +const router = new Router(); +router.all('/', async (ctx) => { + ctx.body = 'Hello World'; +}); +app.use(router.routes()); + +app.listen(3000, () => { + console.log(`Server start on http://localhost:3000`); +}); +``` + +将其保存为 `index.js`,然后运行: + +``` +npm init -y +npm install koa koa-proxies koa-proxies koa-bodyparser koa-router +node . +``` + +## HTTPX Proxy + +好吧,[httpx-static](https://github.com/ossrs/go-oryx/tree/develop/httpx-static#usage) 是用 Go 编写的一个简单的 HTTP/HTTPS 代理: + +``` +go get github.com/ossrs/go-oryx/httpx-static +cd $GOPATH/bin +./httpx-static -http=80 -https=443 \ + -skey /usr/local/srs/etc/server.key -scert /usr/local/srs/etc/server.crt \ + -proxy=http://127.0.0.1:1985/api/v1/ \ + -proxy=http://127.0.0.1:1985/rtc/v1/ \ + -proxy=http://127.0.0.1:8080/ +``` + +> Please make sure the path `/` is the last one. + +Winlin 2015.1 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/http-server) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/ide.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/ide.md new file mode 100644 index 00000000..c6331e6a --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/ide.md @@ -0,0 +1,99 @@ +--- +title: IDE +sidebar_label: IDE +hide_title: false +hide_table_of_contents: false +--- + +# IDE + +关于SRS的IDE(集成开发环境)。 + +## CLion + +SRS只支持JetBrains的[CLion](http://www.jetbrains.com/clion/),它是基于cmake编译的。 + +IDE操作步骤: + +1. 先下载和安装[CLion](http://www.jetbrains.com/clion/) +1. 运行CLion,打开SRS的目录,打开文件 `trunk/ide/srs_clion/CMakeLists.txt` +2. 选择右上角`Load CMake project`,就开始编译SRS的依赖,对应的是 `./configure` 命令,参考: ![](/img/doc-integration-ide-001.png) ![](/img/doc-integration-ide-003.png) +3. 点右上角编译、启动或调试SRS,就可以启动调试,对应的是 `make && ./objs/srs -c conf/clion.conf` ,参考: ![](/img/doc-integration-ide-004.png) + +若执行失败,也可以右键`CMakeLists.txt`,选择`Reload CMake project`重试,参考: ![](/img/doc-integration-ide-002.png) + +在IDE调试SRS,对新同学是非常友好的,各种信息扑面而来,有种信心大增的错觉: + +![](/img/doc-integration-ide-005.png) + +Clion的主要亮点: + +1. windows下linux程序的IDE。别纠缠vs是不是王中之王,用vs打开srs代码错误就一坨一坨的,没法正常使用。 +2. 可以忽略编译,当作编辑器使用。windows下的linux代码无法编译过,mingw有时也不好使,但是Clion可以当作编辑器使用。 +3. 支持基本功能:函数跳转,类跳转,头文件跳转,hpp和cpp直接跳转,智能提示,没用的宏定义的提示。 +4. 支持FindUsage:函数或者类在什么地方使用了,这个功能对于代码阅读和修改很有用。 +5. 支持Refactor:Rename,Extract,Move,ChangeSignature,PullMemberUp/Down众多减少苦力的功能。 +6. 还有个牛逼的东西,选中后按CTRL+F,自动高亮。这个是非常非常常用的功能,比notepad++好。upp就是没有这个愁死我了。 +7. InspectCode,代码检查,分析代码潜在的问题,譬如我检查srs有:一千个拼写问题,没有用到的代码2百行,类型检查1百个,声明问题2个。 + +术业有专攻,JetBrains的IDE做得非常之用心。 + +## Windows + +下面介绍一种在Windows系统上图形化调试Linux的代码的方法。采用的是SSH远程调试的方式。 所以,需要准备2台主机,一台本地机器Windows,一台远端机器Linux(可以是虚拟机)。 + +Linux主机安装以下几个必需的软件。注意:本文的方式不需要安装gdb-server。 + +```bash +yum install perl-core cmake gcc gcc-c++ gdb -y +``` + +接下来,就全部是Clion的图形化配置了。 + +**1. 新建SSH连接** + +> 路径:File-->Settings-->Tools-->SSH Configurations + +点击`+`新建一个SSH连接,输入IP、端口、用户名、密码之后,点击`Test Connection`测试配置是否成功,如下图所示: + +![image.png](/img/doc-integration-ide-006.png) + +**2. 配置工具链** + +> 路径:File-->Settings-->Build,Execution,Deployment-->Toolchains + +点击`+`新建`Remote Host`,Credentials项选择刚才创建的SSH连接。 + +![image.png](/img/doc-integration-ide-007.png) + +`Clion`会自动检测CMake、gcc、g++、gdb的信息。上图中因为升级了cmake和gcc的版本,导致自动检测失败,所以手动指定其路径。 + +**3. 配置远程部署** + +> 路径:File-->Settings-->Build,Execution,Deployment-->Deployment + +点击`+`新建`SFTP`,切换到`Connection`选项卡,选择刚才新建的SSH连接。 + +![image.png](/img/doc-integration-ide-008.png) + +再切换到`Mappings`选项卡,配置本地路径和远程路径,用于代码同步 + +![image.png](/img/doc-integration-ide-009.png) + +**4. 代码上传** + +![image.png](/img/doc-integration-ide-010.png) + +> **注意**:代码上传到远端机之后,需要`chmod`授权,否则可能会遇到编译失败。 + +**5. 配置工作目录和启动参数** + +![image.png](/img/doc-integration-ide-011.png) + +最后,就是愉快的Clion之旅了。 + +Winlin 2015.3 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/ide) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/ingest.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/ingest.md new file mode 100644 index 00000000..cf43c10c --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/ingest.md @@ -0,0 +1,107 @@ +--- +title: Ingest +sidebar_label: Ingest +hide_title: false +hide_table_of_contents: false +--- + +# 采集 + +采集(Ingest)指的是将文件(flv,mp4,mkv,avi,rmvb等等),流(RTMP,RTMPT,RTMPS,RTSP,HTTP,HLS等等),设备等的数据,转封装为RTMP流(若编码不是h264/aac则需要转码),推送到SRS。 + +采集基本上就是使用FFMPEG作为编码器,或者转封装器,将外部流主动抓取到SRS。 + +采集的部署实例参考:[Ingest](./sample-ingest.md) + +## 应用场景 + +采集的主要应用场景包括: +* 虚拟直播:将文件编码为直播流。可以指定多个文件后,SRS会循环播放。 +* RTSP摄像头对接:以前安防摄像头都支持访问RTSP地址,RTSP无法在互联网播放。可以将RTSP采集后,以RTMP推送到SRS,后面的东西就不用讲了。 +* 直接采集设备:SRS采集功能可以作为编码器采集设备上的未压缩图像数据,譬如video4linux和alsa设备,编码为h264/aac后输出RTMP到SRS。 +* 将HTTP流采集为RTMP:有些老的设备,能输出HTTP的ts或FLV流,可以采集后转封装为RTMP,支持HLS输出。 + +总之,采集的应用场景主要是“SRS拉流”;能拉任意的流,只要ffmpeg支持;不是h264/aac都没有关系,ffmpeg能转码。 + +SRS默认是支持“推流”,即等待编码器推流上来,可以是专门的编码设备,FMLE,ffmpeg,xsplit,flash等等。 + +如此,SRS的接入方式可以是“推流到SRS”和“SRS主动拉流”,基本上作为源站的功能就完善了。 + +## 编译 + +Ingest需要在编译时打开:`--with-ingest`。参考:[Build](./install.md) + +Ingest默认使用自带的ffmpeg,也可以不编译ffmpeg,使用自己的编转码工具。禁用默认的ffmpeg在编译时指定`--without-ffmpeg`即可。参考:[Build](./install.md) + +## 配置 + +Ingest的配置如下: + +```bash +vhost your_vhost { + # ingest file/stream/device then push to SRS over RTMP. + # the name/id used to identify the ingest, must be unique in global. + # ingest id is used in reload or http api management. + ingest livestream { + # whether enabled ingest features + # default: off + enabled on; + # input file/stream/device + # @remark only support one input. + input { + # the type of input. + # can be file/stream/device, that is, + # file: ingest file specifies by url. + # stream: ingest stream specifeis by url. + # device: not support yet. + # default: file + type file; + # the url of file/stream. + url ./doc/source.flv; + } + # the ffmpeg + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + # the transcode engine, @see all.transcode.srs.com + # @remark, the output is specified following. + engine { + # @see enabled of transcode engine. + # if disabled or vcodec/acodec not specified, use copy. + # default: off. + enabled off; + # output stream. variables: + # [vhost] current vhost which start the ingest. + # [port] system RTMP stream port. + output rtmp://127.0.0.1:[port]/live?vhost=[vhost]/livestream; + } + } +} +``` + +ingest指令后面是ingest的id,全局需要唯一,用来标识这个ingest。在reload/http-api管理时才知道操作的是哪个。譬如,reload时用来检测哪些ingest更新了,需要通知那些已经存在的ingest,停止已经不存在的ingest。 + +其中,`type`指定了输入的几种类型: +* file: 输入为文件,url指定了文件的路径。srs会给ffmpeg传递-re参数。 +* stream: 输入为流,url指定了流地址。 +* device: 暂时不支持。 + +`engine`指定了转码引擎参数: +* enabled: 指定是否转码,若off或者vcodec/acodec没有指定,则不转码,使用ffmpeg-copy。 +* output:输出路径。有两个变量可以使用:port为系统侦听的RTMP端口,vhost为配置了ingest的vhost。 +* 其他参考转码的配置:[FFMPEG](./ffmpeg.md) + +注意:engine默认为copy,当: +* engine的enabled为off,没有开启转码engine,则使用copy。 +* engine的vcodec/acodec没有指定,则使用copy。 + +## 采集多个文件 + +实现方法: +* 可以把输入文件变成文件列表。自己写工具实现采集列表。 + +参考:https://github.com/ossrs/srs/issues/55 + +Winlin 2014.4 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/ingest) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/install.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/install.md new file mode 100644 index 00000000..b05c134c --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/install.md @@ -0,0 +1,68 @@ +--- +title: 编译以及安装 +sidebar_label: 编译以及安装 +hide_title: false +hide_table_of_contents: false +--- + +# Install + +本文说明了如何编译和打包SRS,另外,可以直接下载release的binary,提供了几个常见系统的安装包,安装程序会安装系统服务,直接以系统服务启动即可。参考:[Github: release](http://ossrs.net/srs.release/releases/)或者[国内镜像: release](http://www.ossrs.net/srs.release/releases/) + +## OS + +* 推荐使用Ubuntu20. +* 若需要开发和编译SRS,建议用[srs-docker](https://github.com/ossrs/dev-docker/tree/dev). +* 建议直接使用[srs-docker](https://github.com/ossrs/dev-docker)运行SRS. + +## IPTABLES and SELINUX + +有时候启动没有问题,但是就是看不了,原因是防火墙和selinux开着。 + +可以用下面的方法关掉防火墙: + +```bash +# disable the firewall +sudo /etc/init.d/iptables stop +sudo /sbin/chkconfig iptables off +``` + +selinux也需要disable,运行命令`getenforce`,若不是Disabled,执行下面的步骤: + +1. 编辑配置文件:`sudo vi /etc/sysconfig/selinux` +1. 把SELINUX的值改为disabled:`SELINUX=disabled` +1. 重启系统:`sudo init 6` + +## Build and Run SRS + +确定用什么编译选项后(参考下面的说明),编译SRS其实很简单。只需要RTMP和HLS: + +``` +./configure && make +``` + +指定配置文件,即可启动SRS: + +```bash +./objs/srs -c conf/srs.conf +``` + +推RTMP流和观看,参考[Usage: RTMP](./rtmp.md) + +更多使用方法,参考[Usage](https://github.com/ossrs/srs/tree/3.0release#usage) + +服务管理,参考[Service](./service.md) + +Docker启动SRS,参考[srs-docker](https://github.com/ossrs/dev-docker#usage) + +## ARM + +一般的ARM都可以直接编译,使用和上面的方法是一样的。 + +某些编译非常慢,或者没有编译器的嵌入式平台,才需要交叉编译,请参考[这里](./arm.md) + +Winlin 2014.11 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/install) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/introduction.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/introduction.md new file mode 100644 index 00000000..97c89199 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/introduction.md @@ -0,0 +1,147 @@ +--- +title: Introduction +sidebar_label: 介绍 +hide_title: false +hide_table_of_contents: false +--- + +# Introduction + +> 注意:SRS6属于开发版,不稳定。 + +SRS是一个开源的([MIT协议](../../../license))简单高效的实时视频服务器,支持RTMP、WebRTC、HLS、HTTP-FLV、SRT、MPEG-DASH和GB28181等协议。 +SRS媒体服务器和[FFmpeg](https://ffmpeg.org)、[OBS](https://obsproject.com)、[VLC](https://www.videolan.org)、 +[WebRTC](https://webrtc.org)等客户端配合使用,提供[流的接收和分发](./getting-started.md)的能力,是一个典型的发布 +(推流)和订阅(播放)服务器模型。 SRS支持互联网广泛应用的音视频协议转换,比如可以将[RTMP](./rtmp.md)或[SRT](./srt.md), +转成[HLS](./hls.md)或[HTTP-FLV](./flv.md)或[WebRTC](./webrtc.md)等协议。 + +SRS主要用于直播和WebRTC领域。在直播领域,SRS支持RTMP、HLS、SRT、MPEG-DASH和HTTP-FLV等典型协议。在WebRTC领域,SRS支持WebRTC、 +WHIP和WHEP等协议。SRS可以为直播和WebRTC实现协议转换。作为媒体服务器,SRS通常与FFmpeg、OBS和WebRTC等其他开源项目一起工作。 +Oryx作为一个开箱即用的媒体解决方案,整合了众多开源项目和工具,更多详细信息,请参考Oryx的 +[介绍](./getting-started-oryx.md#introduction)。 + +SRS提供了[HTTP API](./http-api.md)开放接口,可以查询系统的状态和流状态。同时还支持[HTTP Callback](./http-callback.md) +支持回调能力,主动通知你的系统,并可以实现流的鉴权能力和业务定制(比如动态DVR)。SRS也支持官方的[Prometheus Exporter](./exporter.md) +对接到云原生的监控系统,具备强大的可观测性。SRS支持会话级别[可追踪日志](./log.md),极大降低了系统维护成本。 + +若你是新接触音视频和流媒体的朋友,或者新接触SRS的朋友,推荐阅读[快速起步](./getting-started.md)和[学习路径](/guide)。请花时间 +阅读后续的文档,阅读和熟悉文档是社区的基本要求。如果你遇到问题,请先在[FAQ](../../../faq)中快速查找,然后在[Issues](https://github.com/ossrs/srs/issues) +和[Discussions](https://github.com/ossrs/srs/discussions)中查找,几乎所有问题都可以在这里找到答案。 + +SRS使用ANSI C++ (98)开发,只使用了基本的C++能力,可以在Linux、Windows、macOS等多个平台运行,推荐使用Ubuntu 20+系统开发和调试, +我们提供的镜像[ossrs/srs](https://hub.docker.com/r/ossrs/srs)也是基于Ubuntu 20 (focal)构建的。 + +> Note: 为了解决复杂的流媒体处理中的长连接和复杂的状态机问题,SRS使用了[ST(State Threads)](https://github.com/ossrs/state-threads) +协程技术(类似[Goroutine](https://go.dev/doc/effective_go#goroutines)),并在不断增强和维护ST的能力,支持了Linux、Windows、macOS +多个平台,X86_64、ARMv7、AARCH64、M1、RISCV、LOONGARCH和MIPS等多种CPU架构。 + +## Features + +功能一般是大家比较关注的点,丰富程度也是选择项目的重要原因, +详细的功能列表可以看 [Features](https://github.com/ossrs/srs/blob/develop/trunk/doc/Features.md#features) 。 +我们列出了主要的功能的版本,以及相关的Issue和PR链接。 + +此外,在 [里程碑](/product) 的详细描述中,也会介绍这个大的版本,所支持的功能。 + +> Note: 如果希望看每个里程碑的Issues,则可以在 [Milestones](https://github.com/ossrs/srs/milestones) 中查看。 + +特别注意的是,尽管不多,但SRS还是会将某些功能设置为 [Deprecated](https://github.com/ossrs/srs/blob/develop/trunk/doc/Features.md#features) , +可以在页面中搜索`Deprecated`或者`Removed`。我们也会详细解释为何要移除这个功能。 + +如果你想知道我们正在做的功能,可以在 [微信公众号](/contact#discussion) 中,点菜单的最新版本,比如`SRS 5.0`或者`SRS 6.0`。 +新的功能完成后,我们也会发布文章到微信公众号,请关注。 + +## Who's using SRS? + +SRS的用户遍布全球,欢迎大家在[SRS应用案例](https://github.com/ossrs/srs/discussions/3771)中展示自己的SRS应用。 + +## Governance + +欢迎大家参与SRS的开发和维护,推荐从[Contribute](https://github.com/ossrs/srs/contribute)解决Issue和 +[提交PR](/how-to-file-pr)开始, 所有贡献过的朋友都会在[Contributors](https://github.com/ossrs/srs#authors) +中展示。 + +SRS是一个非商业化的开源社区,活跃的开发者都有自己的工作,会花自己的业余时间推动SRS的发展。 + +由于SRS整个体系是非常高效的,因此我们可以花很少的时间让SRS不断进步,交付功能丰富且稳定性很高的高质量产品,基于SRS定制也很容易。 + +我们是全球的开源社区,国内和海外都有开发者社区,我们欢迎开发者加入我们: + +* 巨大的成就感:你的代码可以影响全球的用户,改变音视频行业,并且随着SRS在各行各业的广泛应用,也改变了各行各业。 +* 扎实的技术进步:在这里可以和全球顶尖的音视频开发者交流,掌握高质量软件开发的能力,互相提升技术能力。 + +SRS目前使用了以下的技术和规则,保证项目的高质量和高效率: + +* 长时间的架构和方案探讨,对于大的功能和方案,需要得到长时间探讨,比如 [HEVC/H.265](https://github.com/ossrs/srs/issues/465) 的支持,我们讨论了7年。 +* 仔细认真的CodeReview,每个PullRequest至少2个TOC和Developer通过,并且Actions全部通过,才能合并。 +* 完善的单元测试(500多个)、覆盖率(60%左右)、黑盒测试等,保持一年开发一年测试的充足测试时间。 +* 全流水线,每个PullRequest会有流水线,每次发布由流水线自动完成。 + +欢迎加入我们,具体请访问 [Contribute](https://github.com/ossrs/srs/contribute) 按要求提交PullRequest。 + +## Milestone + +SRS大概是两年发布一个大版本,一年时间开发,一年时间提升稳定性,详细请参考[Milestone](/product)。 + +如果你想在线上使用SRS,推荐使用稳定版本。如果你想用新功能,就用开发版本。 + +SRS的分支规则,是按版本的分支,比如: + +* [develop](https://github.com/ossrs/srs/tree/develop) SRS 6.0,开发分支,不稳定,但是新功能最多。 +* [5.0release](https://github.com/ossrs/srs/tree/5.0release#releases) SRS 5.0,目前已经稳定,具体要看分支的状态。 +* [4.0release](https://github.com/ossrs/srs/tree/4.0release#releases) SRS 4.0,目前是稳定分支,而且会越来越稳定。 + +具体分支是否稳定,要看Releases的标记,比如 [SRS 4.0](https://github.com/ossrs/srs/tree/4.0release#releases) : + +* 2022-06-11, Release v4.0-r0,这个是稳定的发布版本。 +* 2021-12-01, Release v4.0-b0,这个是相对比较稳定的beta版本,也就是公测版本。 +* 2021-11-15, Release v4.0.198,这个版本就是不稳定的开发版。 + +> Note: 除了beta版本,还有alpha版本,比如`v5.0-a0`,是比beta更不稳定的内测版本。 + +> Note:每个alpha、beta、release版本,都会对应具体的版本号,比如`v5.0-a0`,对应的就是`v5.0.98`。 + +对于SRS来说,一般达到beta版本,就可以在线上使用了。 + +## Strategy + +SRS不做客户端,因为无论是FFmpeg,还是OBS,还是VLC,还是WebRTC,都是非常成熟和庞大的开源社区,我们和这些社区合作,使用这些社区的产品。 + +除了SRS服务器,我们还在做Oryx,还有WordPress插件等等,主要的目标还是根据不同行业,做出更简单的应用方式,包括: + +* [Oryx](https://github.com/ossrs/oryx) Oryx(SRS Stack),是一个开箱即用的单机的视频云,里面有FFmpeg和SRS等,主要是方便不会命令行的用户,直接通过腾讯云镜像或者宝塔,鼠标操作,就可以把音视频的应用搭起来。 +* [WordPress-Plugin-SrsPlayer](https://github.com/ossrs/WordPress-Plugin-SrsPlayer) 出版领域,比如个人博客、网站传媒等,方便用户可以使用音视频的能力。 +* [srs-unity](https://github.com/ossrs/srs-unity) 游戏领域,对接Unity的WebRTC SDK,使用音视频的能力。 + +SRS还会在工具链上不断完善,开发者可能不用SRS,但可能用过SB压测工具: + +* [srs-bench](https://github.com/ossrs/srs-bench) 音视频压测工具,包括RTMP/FLV/WebRTC/GB28181等,未来还会完善。 +* [state-threads](https://github.com/ossrs/state-threads) C的协程库,可以认为是C版本的Go,很小巧但很强大的服务器库,我们也会不断完善它。 +* [tea](https://github.com/ossrs/tea) 这是eBPF方向的探索,网络的弱网模拟,以及LB负载均衡。 + +通过不断完善音视频的工具链、解决方案、场景化的能力,让各行各业都可以应用音视频的能力。 + +## Sponsors + +SRS致力于构建一个非盈利性的开源项目和社区,我们对赞助SRS朋友提供专门的社区支持,请看[Sponsor](/contact#donation)。 + +音视频开发者,几乎必然碰到问题,估计大家比较习惯云厂商的贴身服务,来到开源社区就非常不习惯。 + +其实遇到问题不要慌张,大部分问题都是已经有的,可以在 [FAQ](../../../faq) 中找到答案,或者在文档 [Docs](./getting-started.md) 中找到答案。 + +也可以在 [支持](/contact) 中加微信群,和其他开发者交流,不过请遵守社区规范,否则也得不到支持的。 + +作为开发者,我们必须学会看文档,调查问题,然后再在社区中交流。 + +值得澄清的是,中国开发者的素质也越来越高了,深度开发者我们建议加付费星球,参考 [Support](/contact#donation) 。 + +SRS没有商业化的计划,我们目前正在努力建设全球的活跃的开发者社区,开源的价值会越来越大,社区彼此的支持也会越来越多。 + +## About Oryx + +Oryx是一个基于Go、Reactjs、SRS、FFmpeg、WebRTC等的轻量级、开源的视频云解决方案。 +详细请参考[Oryx](./getting-started-oryx.md)。 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/introduction) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/k8s.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/k8s.md new file mode 100644 index 00000000..9397013f --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/k8s.md @@ -0,0 +1,2112 @@ +--- +title: K8s集群 +sidebar_label: K8s集群 +hide_title: false +hide_table_of_contents: false +--- + +# K8S + +> 流媒体服务和流媒体服务器的关键差异是什么?高效的运维能力是其中极其关键的差异之一,云计算+Docker+K8S让开源项目也能拥有这种能力,让每个人都能具备互联网流媒体服务能力,正如:旧时王谢堂前燕,飞入寻常百姓家! + +为何要用[k8s](https://docs.kubernetes.io/zh/docs/concepts/overview/what-is-kubernetes)部署SRS集群? + +* Simple(简单有效): 这玩意儿真的非常简单、高效便捷、直击服务部署和维护的痛点。羽扇纶巾,谈笑间强撸灰飞湮灭,不信一起来看[QuickStart](./k8s.md#quick-start). +* Declarative deployment(声明式部署):只需要根据业务量声明需要多少个SRS,自动配置和更新SLB,不用启动服务和看门狗,也不用机器故障时一顿操作猛如虎的迁移和更新。 +* Expand easily(扩容很容易): K8S可以自动扩容底层基础设施,例如可通过[ESS](https://essnew.console.aliyun.com/)自动,而业务集群(如SRS Edge)通过修改Pod数量(或根据策略)实现扩容。 +* Rolling Update(滚动式更新): K8S可以在不中断服务的前提下,实现服务的更新、回滚和灰度发布,这是提供稳定可靠高效服务的大杀器,总不能每次更新就被用户投诉吧?总不能每次都半夜三更提心吊胆吧? + +本文介绍了,在不同的业务场景下,如何使用[ACK(AlibabaCloud Container Service for Kubernetes)](https://www.alibabacloud.com/product/kubernetes)构建SRS集群。 + +1. [Deploy to Cloud Platforms](./k8s.md#deploy-to-cloud-platforms): 直接部署GitHub项目到云平台K8s。 +2. [Quick Start](./k8s.md#quick-start): 快速入门,在ACK中部署单SRS源站服务。 +3. [SRS Shares Volume with Nginx](./k8s.md#srs-shares-volume-with-nginx): SRS能分发简单的HTTP,也能和Nginx配合工作提供更强大的HTTP能力,比如:SRS分发RTMP/HTTP-FLV等流协议,Nginx分发HLS。 +4. [SRS Edge Cluster for High Concurrency Streaming](./k8s.md#srs-edge-cluster-for-high-concurrency-streaming): SRS边缘集群,支持高并发流媒体播放,减轻源站压力,分离源站关键业务,在SLB下自动扩容和更新。 +5. [SRS Origin Cluster for a Large Number of Streams](./k8s.md#srs-origin-cluster-for-a-large-number-of-streams): SRS源站集群,支持大规模的推流,流的自动发现,以及流的灾备。 +6. [SRS Cluster Update, Rollback, Gray Release with Zero Downtime](./k8s.md#srs-cluster-update-rollback-gray-release-with-zero-downtime): 如何在不中断服务的前提下,实现SRS集群的更新、回滚和灰度发布。 + 1. [SRS Cluster Rolling Update](./k8s.md#srs-cluster-rolling-update): 在平滑退出基础上的滚动更新,集群更新的基础机制。 + 2. [SRS Cluster Rolling Back](./k8s.md#srs-cluster-rolling-back): 在平滑退出基础上的发布回滚,发布遇到问题首先考虑回滚。 + 3. [SRS Cluster Canary Release](./k8s.md#srs-cluster-canary-release): 金丝雀升级,可精确控制的流量控制和回滚。 +7. [Useful Tips](./k8s.md#useful-tips): 补充的实用话题和场景 + 1. [Create K8S Cluster in ACK](./k8s.md#create-k8s-cluster-in-ack): 在阿里云ACK创建你的K8S集群。 + 2. [Publish Demo Streams to SRS](./k8s.md#publish-demo-streams-to-srs): 推送SRS的演示流,可直接推源站,也可以推边缘集群。 + 3. [Cleanup For DVR/HLS Temporary Files](./k8s.md#cleanup-for-dvrhls-temporary-files): 定期,比如每天凌晨1点,清理临时文件。 + 4. [Use One SLB and EIP for All Streaming Service](./k8s.md#use-one-slb-and-eip-for-all-streaming-service): 使用一个SLB(EIP)对外提供RTMP、HTTP-FLV、HLS等服务。 + 5. [Build SRS Origin Cluster as Deployment](./k8s.md#build-srs-origin-cluster-as-deployment): 除了以StatefulSet有状态应用方式部署Origin Cluster,我们还可以选择Deployment无状态应用方式。 + 6. [Managing Compute Resources for Containers](./k8s.md#managing-compute-resources-for-containers): 资源的申请和限制,以及如何调度和限制如何生效。 + 7. [Auto Reload by Inotify](./k8s.md#auto-reload-by-inotify): SRS侦听ConfigMap的变更,并支持自动reload。 + +## Deploy to Cloud Platforms + +SRS提供了一系列的模版项目,可以快速部署到云平台K8s: + +* [通用K8s](https://github.com/ossrs/srs-k8s-template) +* [TKE(腾讯云K8s)](https://github.com/ossrs/srs-tke-template) +* [ACK(阿里云K8s)](https://github.com/ossrs/srs-ack-template) +* [EKS(亚马逊AWS K8s)](https://github.com/ossrs/srs-eks-template) +* [AKS(微软Azure K8s)](https://github.com/ossrs/srs-aks-template) + +## Quick Start + +假设你有一个k8s集群(如果没有可以从[Create K8S Cluster in ACK](./k8s.md#create-k8s-cluster-in-ack)轻松创建),执行下面的命令应该是成功的: + +```bash +kubectl cluster-info +``` + +基于K8S,我们可以快速构建一个流媒体服务,尽管只有一个SRS源站。 + +在这个场景下,对比K8S和传统部署方式的差异: + +| 对比项 | ECS | K8S | 说明 | +| --- | --- | --- | --- | +| 资源 | 手动 | 自动 | 部署时,传统方式需要手动购买相关资源,
K8S自动购买需要的资源比如ECS、SLB和EIP等 | +| 部署 | 安装包 | 镜像 | Docker镜像可回滚,开发和生产环境一致,可Cache,
高效率和高密度,高可移植性,资源隔离可预测程序性能 | +| 看门狗 | 手动 | 自动 | SRS异常退出由看门狗重新拉起,非K8S需要手动安装,
K8S自动管理和拉起服务 | +| 迁移 | 手动 | 自动 | ECS更换时,非K8S需要手动申请,修改SLB,安装服务,
K8S自动迁移服务,更新SLB配置监听和保活等 | + +实现该场景的架构图如下所示: + +![SRS: Single Origin Server](/img/doc-advanced-guides-k8s-001.png) + +**Step 1:** 创建一个无状态应用[k8s deployment](https://v1-14.docs.kubernetes.io/docs/concepts/workloads/controllers/deployment),运行SRS源站服务器: + +```bash +cat < Note: 如果是自动创建SLB和EIP,那么HLS和RTMP/HTTP-FLV的IP是不一样的,你可以选择手动指定SLB,这两个服务可以用同一个SLB,参考[Use One SLB and EIP for All Streaming Service](./k8s.md#ack-srs-buy-slb-eip)。 + +**Step 3:** 大功告成。查询服务的EIP地址,你就可以推拉流了。 + +执行命令`kubectl get svc/srs-origin-service`,可以查看服务的ExternalIP,也就是公网IP: + +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP +srs-origin-service LoadBalancer 172.21.12.131 28.170.32.118 +``` + +例子中的IP是`28.170.32.118`,就可以推流到这个公网IP地址,也可以从这个地址播放: + +* Publish RTMP to `rtmp://28.170.32.118/live/livestream` or [Publish Demo Streams to SRS](./k8s.md#ack-srs-publish-demo-stream-to-origin). +* Play RTMP from [rtmp://28.170.32.118/live/livestream](http://ossrs.net/players/srs_player.html?app=live&stream=livestream&server=28.170.32.118&port=1935&autostart=true&vhost=28.170.32.118) +* Play HTTP-FLV from [http://28.170.32.118:8080/live/livestream.flv](http://ossrs.net/players/srs_player.html?app=live&stream=livestream.flv&server=28.170.32.118&port=8080&autostart=true&vhost=28.170.32.118&schema=http) +* Play HLS from [http://28.170.32.118:8080/live/livestream.m3u8](http://ossrs.net/players/srs_player.html?app=live&stream=livestream.m3u8&server=28.170.32.118&port=8080&autostart=true&vhost=28.170.32.118&schema=http) + +![ACK: SRS Done](/img/doc-advanced-guides-k8s-002.png) + +## SRS Shares Volume with Nginx + +本章描述了基于K8S,SRS如何和Nginx配合提供更丰富的HTTP服务。 + +我们可以用SRS分发RTMP和HTTP-FLV等流媒体,并生成HLS切片到共享Volume,然后Nginx读取Volume并分发HLS。当然SRS也可以直接分发HLS切片,之所以用Nginx,这个场景可以用在: + +* 已经有Nginx和Web服务,SRS无法使用80端口,可以选择共享Volume方式给Nginx,当然也可以配置Nginx代理特定的URL。 +* SRS不支持HTTPS。Nginx可以支持HTTPS,配置Nginx支持证书后,可以将SRS生成的HLS,通过HTTPS分发。 +* SRS不支持HLS的鉴权。Nginx或其他Web框架,可以在用户访问HLS文件时,实现鉴权的逻辑。 +* SRS只支持HTTP/1.1部分协议。Nginx有更完善的HTTP功能,比如HTTP/2,完整的HTTP协议支持。 + +在这个场景下,对比K8S和传统部署方式的差异: + +| 对比项 | ECS | K8S | 说明 | +| --- | --- | --- | --- | +| 资源 | 手动 | 自动 | 部署时,传统方式需要手动购买相关资源,
K8S自动购买需要的资源比如ECS、SLB和EIP等 | +| 部署 | 安装包 | 镜像 | Docker镜像可回滚,开发和生产环境一致,可Cache,
高效率和高密度,高可移植性,资源隔离可预测程序性能 | +| 看门狗 | 手动 | 自动 | SRS异常退出由看门狗重新拉起,非K8S需要手动安装,
K8S自动管理和拉起服务 | +| 迁移 | 手动 | 自动 | ECS更换时,非K8S需要手动申请,修改SLB,安装服务,
K8S自动迁移服务,更新SLB配置监听和保活等 | + +实现该场景的架构图如下所示: + +![ACK: SRS Shares Volume with Nginx](/img/doc-advanced-guides-k8s-003.png) + +**Step 1:** 创建一个无状态应用[k8s deployment](https://v1-14.docs.kubernetes.io/docs/concepts/workloads/controllers/deployment),运行SRS和Nginx,HLS写入共享[Volume](https://v1-14.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir): + +```bash +cat < + if [[ ! -f /tmp/html/index.html ]]; then + cp -R ./objs/nginx/html/* /tmp/html + fi && + sleep infinity +EOF +``` + +> Note: Nginx的默认目录是`/usr/share/nginx/html`,若不是请改成你自己的目录。 + +> Note: SRS和Nginx挂载了[emptyDir Volume](https://v1-14.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir)共享HLS文件,默认是空目录,会随着Pod的销毁而清空。 + +> Note: 由于共享目录是空目录,我们启动了一个`srs-cp-files`的container,拷贝SRS默认的文件,参考[#1603](https://github.com/ossrs/srs/issues/1603). + +**Step 2:** 创建一个服务[k8s service](https://v1-14.docs.kubernetes.io/docs/concepts/services-networking/service),使用SLB对外提供流媒体服务: + +```bash +cat < Note: 我们通过Service暴露端口,对外提供服务,其中RTMP(1935)/FLV(8080)/API(1985)由SRS提供服务,HLS(80)由Nginx提供服务。 + +> Note: 这里我们选择ACK自动创建SLB和EIP,也可以手动指定SLB,参考[Use One SLB and EIP for All Streaming Service](./k8s.md#ack-srs-buy-slb-eip)。 + +**Step 3:** 大功告成。你可以推拉流了,其中HLS流可以从SRS(8080)播放,也可以从Nginx(80)播放: + +* Publish RTMP to `rtmp://28.170.32.118/live/livestream` or [Publish Demo Streams to SRS](./k8s.md#ack-srs-publish-demo-stream-to-origin). +* Play RTMP from [rtmp://28.170.32.118/live/livestream](http://ossrs.net/players/srs_player.html?app=live&stream=livestream&server=28.170.32.118&port=1935&autostart=true&vhost=28.170.32.118) +* Play HTTP-FLV from [http://28.170.32.118:8080/live/livestream.flv](http://ossrs.net/players/srs_player.html?app=live&stream=livestream.flv&server=28.170.32.118&port=8080&autostart=true&vhost=28.170.32.118&schema=http) +* Play HLS from [http://28.170.32.118:8080/live/livestream.m3u8](http://ossrs.net/players/srs_player.html?app=live&stream=livestream.m3u8&server=28.170.32.118&port=8080&autostart=true&vhost=28.170.32.118&schema=http) +* Play HLS from [http://28.170.32.118/live/livestream.m3u8](http://ossrs.net/players/srs_player.html?app=live&stream=livestream.m3u8&server=28.170.32.118&port=80&autostart=true&vhost=28.170.32.118&schema=http) + +> Note: 请将上面的EIP换成你自己的,可用命令`kubectl get svc/srs-origin-service`查看你的EIP。 + +## SRS Edge Cluster for High Concurrency Streaming + +本章描述了基于K8S,如何构建Edge Cluster实现高并发流媒体播放。 + +Edge Cluster实现了合并回源,对于某一路流,不管有多少客户端播放,Edge Server都只会从Origin Server取一路流,这样可以通过扩展Edge Cluster来增加支持的播放能力,也就是CDN网络具备的重要能力:高并发。 + +> Note: Edge Cluster根据客户端播放的协议不同,可以分为[RTMP Edge Cluster](./sample-rtmp-cluster.md)或[HTTP-FLV Edge Cluster](./sample-http-flv-cluster.md),详细请参考相关Wiki。 + +对于自建源站,没有那么多播放量,为何不建议使用[SRS单源站](./k8s.md#quick-start)直接提供服务,而要用Edge Cluster呢?主要场景分析如下: + +* 防止Origin过载,即使推流非常少而且播放的流也不多,比如自建源站后使用CDN回源,在多家CDN回源时,也可能一个CDN一条流会有多个回源连接。使用Edge能保护Origin不因为回源造成Origin问题,最多就是某些Edge被回源打挂。 +* 可以使用多个Edge Cluster(只需要再加srs-edge-service就可以),对外用不同的SLB暴露,可以针对每个SLB限流,防止CDN之间互相干扰。这样能保证某些CDN是可用的,而不是Origin挂了后所有CDN都不可用。 +* 分离Origin关键业务,将下行流媒体分发业务交给Edge Cluster,Origin可以做切片、DVR、鉴权等关键业务,避免业务之间互相干扰。 + +在这个场景下,对比K8S和传统部署方式的差异: + +| 对比项 | ECS | K8S | 说明 | +| --- | --- | --- | --- | +| 资源 | 手动 | 自动 | 部署时,传统方式需要手动购买相关资源,
K8S自动购买需要的资源比如ECS、SLB和EIP等 | +| 部署 | 安装包 | 镜像 | Docker镜像可回滚,开发和生产环境一致,可Cache,
高效率和高密度,高可移植性,资源隔离可预测程序性能 | +| 看门狗 | 手动 | 自动 | SRS异常退出由看门狗重新拉起,非K8S需要手动安装,
K8S自动管理和拉起服务 | +| 迁移 | 手动 | 自动 | ECS更换时,非K8S需要手动申请,修改SLB,安装服务,
K8S自动迁移服务,更新SLB配置监听和保活等 | +| 配置 | 文件 | Volume | ECS需要手动管理配置;K8S配置在ConfigMap,
通过Volume挂载为配置文件,扩容时不用变更 | +| 扩容 | 手动 | 自动 | 需要新开进程时,ECS需要申请部署和配置,
K8S只需要修改Replicas数目即可(也可自动扩容) | +| 发现 | 手动 | 自动 | Origin变更IP时,ECS需要手动修改配置,
K8S自动通知边缘和自动发现 | +| SLB | 手动 | 自动 | 新增Edge时,ECS需要手动更新SLB配置,
K8S自动更新SLB配置 | + +实现该场景的架构图如下所示: + +![ACK: SRS Edge Cluster for High Concurrency Streaming](/img/doc-advanced-guides-k8s-004.png) + +**Step 1:** 创建SRS和Nginx源站应用和服务。 + +* `srs-origin-deploy`: 创建一个无状态应用[k8s deployment](https://v1-14.docs.kubernetes.io/docs/concepts/workloads/controllers/deployment),运行SRS Origin Server和Nginx,HLS写入共享[Volume](https://v1-14.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir): +* `srs-origin-service`: 创建一个服务[k8s service](https://v1-14.docs.kubernetes.io/docs/concepts/services-networking/service),基于ClusterIP提供Origin服务,供内部Edge Server调用。 +* `srs-http-service`: 创建一个服务[k8s service](https://v1-14.docs.kubernetes.io/docs/concepts/services-networking/service),基于SLB提供HTTP服务,Nginx对外提供HLS服务。 + +```bash +cat < + if [[ ! -f /tmp/html/index.html ]]; then + cp -R ./objs/nginx/html/* /tmp/html + fi && + sleep infinity + +--- + +apiVersion: v1 +kind: Service +metadata: + name: srs-origin-service +spec: + type: ClusterIP + selector: + app: srs-origin + ports: + - name: srs-origin-service-1935-1935 + port: 1935 + protocol: TCP + targetPort: 1935 + +--- + +apiVersion: v1 +kind: Service +metadata: + name: srs-http-service +spec: + type: LoadBalancer + selector: + app: srs-origin + ports: + - name: srs-http-service-80-80 + port: 80 + protocol: TCP + targetPort: 80 + - name: srs-http-service-1985-1985 + port: 1985 + protocol: TCP + targetPort: 1985 +EOF +``` + +> Note: Origin Server在集群内部提供流媒体源站服务,他的服务类型为ClusterIP,内部域名为`srs-origin-service`,Edge Server会通过该域名连接到Origin Server。 + +> Note: SRS和Nginx挂载了[emptyDir Volume](https://v1-14.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir)共享HLS文件,默认是空目录,会随着Pod的销毁而清空。 + +> Note: 由于共享目录是空目录,我们启动了一个`srs-cp-files`的container,拷贝SRS默认的文件,参考[#1603](https://github.com/ossrs/srs/issues/1603). + +> Note: 服务`srs-http-service`暴露的是Nginx(80)端口,对外提供HLS服务;以及SRS(1985)端口,对外提供API服务。 + +> Note: 这里我们选择ACK自动创建SLB和EIP,也可以手动指定SLB,参考[Use One SLB and EIP for All Streaming Service](./k8s.md#ack-srs-buy-slb-eip)。 + +**Step 2:** 创建SRS边缘配置、应用和服务。 + +* `srs-edge-config`: 创建一个配置[k8s ConfigMap](https://v1-14.docs.kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#understanding-configmaps-and-pods),存储了SRS Edge Server使用的配置文件。 +* `srs-edge-deploy`: 创建一个无状态应用[k8s deployment](https://v1-14.docs.kubernetes.io/docs/concepts/workloads/controllers/deployment),运行多个SRS Edge Server。 +* `srs-edge-service`: 创建一个服务[k8s service](https://v1-14.docs.kubernetes.io/docs/concepts/services-networking/service)基于SLB对外提供流媒体服务。 + +```bash +cat < Note: 我们将Edge Server的配置存储在[ConfigMap](https://v1-14.docs.kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/)中,名称为`srs-edge-config`,然后将[ConfigMap挂载](https://v1-14.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap)为配置文件`/usr/local/srs/conf/srs.conf`,其中`srs.conf`是在ConfigMap的配置项名称。 + +> Note: Edge Server读取配置文件,通过Service注册的内部域名`srs-origin-service`,连接到Origin Server。 + +> Note: 服务`srs-edge-service`暴露的是SRS的1935端口,对外提供RTMP服务;以及SRS的8080端口,对外提供HTTP-FLV服务。 + +> Note: 这里我们选择ACK自动创建SLB和EIP,也可以手动指定SLB,参考[Use One SLB and EIP for All Streaming Service](./k8s.md#ack-srs-buy-slb-eip)。 + +**Step 3:** 大功告成。你可以推拉流了,其中HLS流可以从Nginx(80)播放,RTMP和HTTP-FLV从SRS播放: + +* Publish RTMP to `rtmp://28.170.32.118/live/livestream` or [Publish Demo Streams to SRS](./k8s.md#ack-srs-publish-demo-stream-to-edge). +* Play RTMP from [rtmp://28.170.32.118/live/livestream](http://ossrs.net/players/srs_player.html?app=live&stream=livestream&server=28.170.32.118&port=1935&autostart=true&vhost=28.170.32.118) +* Play HTTP-FLV from [http://28.170.32.118:8080/live/livestream.flv](http://ossrs.net/players/srs_player.html?app=live&stream=livestream.flv&server=28.170.32.118&port=8080&autostart=true&vhost=28.170.32.118&schema=http) +* Play HLS from [http://28.170.32.118/live/livestream.m3u8](http://ossrs.net/players/srs_player.html?app=live&stream=livestream.m3u8&server=28.170.32.118&port=80&autostart=true&vhost=28.170.32.118&schema=http) + +> Note: 请将上面的EIP换成你自己的,可用命令`kubectl get svc/srs-http-service`或`kubectl get svc/srs-edge-service`查看你的EIP。 + +> Note: 如果是自动创建SLB和EIP,那么HLS和RTMP/HTTP-FLV的IP是不一样的,你可以选择手动指定SLB,这两个服务可以用同一个SLB,参考[Use One SLB and EIP for All Streaming Service](./k8s.md#ack-srs-buy-slb-eip)。 + +## SRS Origin Cluster for a Large Number of Streams + +本章描述了基于K8S,如何构建Origin Cluster支持超多推流场景。 + +[Origin Cluster](./origin-cluster.md)通过配置其他源站的信息,在本源站没有流时查询到流的位置,通过RTMP 302定向到指定源站,具体原理可以参考[#464](https://github.com/ossrs/srs/issues/464)。主要应用场景如下: + +* 源站灾备:即使流比较少,也可以用两个源站,这样可以将流分散到不同的源站,避免源站出现问题时影响所有的流。 +* 海量推流:单源站可以支持1000到3000路流,高码率的流支持的路数更少,有DVR和HLS时支持的路更少,源站集群有多个源站同时接收推流,可以支持10k~100k推流,参考[规格](https://github.com/ossrs/srs/issues/464#issuecomment-586550917)。 +* 复杂源站业务:源站除了支持推流和拉流,还有重要的功能是DVR、转码、转HLS,DVR和HLS涉及磁盘,转码涉及CPU,都是容易发生瓶颈的资源依赖,源站集群扩展能力更强。 + +在这个场景下,对比K8S和传统部署方式的差异: + +| 对比项 | ECS | K8S | 说明 | +| --- | --- | --- | --- | +| 资源 | 手动 | 自动 | 部署时,传统方式需要手动购买相关资源,
K8S自动购买需要的资源比如ECS、SLB和EIP等 | +| 部署 | 安装包 | 镜像 | Docker镜像可回滚,开发和生产环境一致,可Cache,
高效率和高密度,高可移植性,资源隔离可预测程序性能 | +| 看门狗 | 手动 | 自动 | SRS异常退出由看门狗重新拉起,非K8S需要手动安装,
K8S自动管理和拉起服务 | +| 迁移 | 手动 | 自动 | ECS更换时,非K8S需要手动申请,修改SLB,安装服务,
K8S自动迁移服务,更新SLB配置监听和保活等 | +| 配置 | 文件 | Volume | ECS需要手动管理配置;K8S配置在ConfigMap,
通过Volume挂载为配置文件,扩容时源站手动更新自动推送,
边缘扩容自动更新 | +| 扩容 | 手动 | 自动 | 需要新开进程时,ECS需要申请部署和配置,
K8S只需要修改Replicas数目即可(也可自动扩容) | +| 发现 | 手动 | 自动 | Origin变更IP时,ECS需要手动修改配置,
K8S在迁移源站Pod时会保持,或自动更新 | +| SLB | 手动 | 自动 | 新增Origin时,ECS需要手动安装和更新配置,
K8S自动安装,手动更新但自动推送配置 | +| 存储 | 手动 | 自动 | 扩容存储时,ECS需要手动安装和更新,
K8S会自动更新,不影响业务 | + +实现该场景的架构图如下所示: + +![ACK: SRS Origin Cluster for a Large Number of Streams](/img/doc-advanced-guides-k8s-005.png) + +**Step 1:** 由于SRS和Nginx不在一个Pod可能也不在一个Node,需要创建依赖的PV(Persistent Volume)持久化卷,可[购买NAS](./k8s.md#ack-create-cluster-pv-nas)例如: + +* 驱动类型(PV driver):`alicloud/nas` +* 挂载点(PV server),可在控制台创建、查看和复制:`1abb5492f7-ubq80.cn-beijing.nas.aliyuncs.com` +* NFS版本(PV vers):`3` + +在NAS基础上可以创建PV,以及PVC: + +* `pv-nas`,从NAS存储创建的PV,支持多写和多读,Pod不使用存储后会回收,也就是删除这些数据。 +* `pvc-nas`,SRS和Nginx源站使用的PVC,具有读写权限。读取SRS的静态文件和HLS并分发。 + +```bash +cat < Note: 请将上面的挂载点(PV server)替换成你的。 + +> Note: SRS和Nginx使用`pvc-nas`描述自己的存储需求,K8S会绑定和分配存储`pv-nas`。 + +**Step 2:** 创建SRS源站集群和Nginx源站应用和服务。 + +* `srs-origin-config`: 创建一个配置[k8s ConfigMap](https://v1-14.docs.kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#understanding-configmaps-and-pods),存储了SRS Origin Server使用的配置文件。 +* `socs`: 创建一个Headless服务[k8s service](https://v1-14.docs.kubernetes.io/docs/concepts/services-networking/service/#headless-services),基于Headless Service提供Origin服务,每个Origin都有自己的服务地址,例如`srs-origin-0.socs`,供内部Edge Server调用。 +* `srs-origin`: 创建一个有状态应用[k8s StatefulSet](https://v1-14.docs.kubernetes.io/docs/concepts/workloads/controllers/statefulset/),运行SRS Origin Cluster,HLS写入共享存储PV。 +* `srs-api-service`: 创建一个服务[k8s service](https://v1-14.docs.kubernetes.io/docs/concepts/services-networking/service),基于SLB提供HTTP服务,SRS第一个源站提供API服务,标签为`statefulset.kubernetes.io/pod-name: srs-origin-0`。 + +```bash +cat < Note: 配置存储在ConfigMap中`srs-origin-config`,会被以Volume方式挂载成配置文件`/usr/local/srs/conf/srs.conf`。 + +> Remark: 源站集群配置,需要配置各个源站的服务地址也就是域名。假设SRS源站有状态服务`srs-origin`配置的Replicas为2,则会生成两个源站`srs-origin-0.socs`和`srs-origin-1.socs`,若新增了源站比如Replicas为3,则需要在配置中加上`srs-origin-2.socs`。 + +> Note: Origin Server在集群内部提供流媒体源站服务,以有状态服务方式提供名字为`socs`,每个源站会自动分配内部域名,内部域名为`srs-origin-0.socs`和`srs-origin-1.socs`,Edge Server会配置这些域名连接到Origin Server。 + +> Note: 源站对外提供API服务`srs-api-service`,我们选择第一个源站对外提供API服务,实际上源站集群需要改进这点,参考[#1607](https://github.com/ossrs/srs/issues/1607#issuecomment-586549464)。 + +> Note: 这里我们选择ACK自动创建SLB和EIP,也可以手动指定SLB,参考[Use One SLB and EIP for All Streaming Service](./k8s.md#ack-srs-buy-slb-eip)。 + +* `nginx-origin-deploy`: 创建一个无状态应用[k8s deployment](https://v1-14.docs.kubernetes.io/docs/concepts/workloads/controllers/deployment),运行Nginx,将SRS静态文件写入PV,从共享存储PV读取HLS和静态文件。 +* `srs-http-service`: 创建一个服务[k8s service](https://v1-14.docs.kubernetes.io/docs/concepts/services-networking/service),基于SLB提供HTTP服务,Nginx对外提供HLS服务。 + +```bash +cat < + if [[ ! -f /tmp/html/index.html ]]; then + cp -R ./objs/nginx/html/* /tmp/html + fi && + sleep infinity + +--- + +apiVersion: v1 +kind: Service +metadata: + name: srs-http-service +spec: + type: LoadBalancer + selector: + app: nginx-origin + ports: + - name: nginx-origin-service-80-80 + port: 80 + protocol: TCP + targetPort: 80 +EOF +``` + +> Note: 由于共享目录是空目录,我们启动了一个`srs-cp-files`的container,拷贝SRS默认的文件,参考[#1603](https://github.com/ossrs/srs/issues/1603). + +> Note: Nginx通过Shared Volume(PV)读取SRS Origin生成的切片,对外提供HLS服务。 + +> Note: 这里我们选择ACK自动创建SLB和EIP,也可以手动指定SLB,参考[Use One SLB and EIP for All Streaming Service](./k8s.md#ack-srs-buy-slb-eip)。 + +**Step 3:** 创建SRS边缘配置、应用和服务。 + +* `srs-edge-config`: 创建一个配置[k8s ConfigMap](https://v1-14.docs.kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#understanding-configmaps-and-pods),存储了SRS Edge Server使用的配置文件。 +* `srs-edge-deploy`: 创建一个无状态应用[k8s deployment](https://v1-14.docs.kubernetes.io/docs/concepts/workloads/controllers/deployment),运行多个SRS Edge Server。 +* `srs-edge-service`: 创建一个服务[k8s service](https://v1-14.docs.kubernetes.io/docs/concepts/services-networking/service)基于SLB对外提供流媒体服务。 + +```bash +cat < Remark: 假设SRS源站有状态服务`srs-origin`配置的Replicas为2,则会生成两个源站`srs-origin-0.socs`和`srs-origin-1.socs`,若新增了源站比如Replicas为3,则需要在配置中加上`srs-origin-2.socs`。 + +> Note: Edge Server的配置中,通过源站在Headless Service注册的内部域名`srs-origin-0.socs`等等,连接到Origin Server。 + +> Note: 这里我们选择ACK自动创建SLB和EIP,也可以手动指定SLB,参考[Use One SLB and EIP for All Streaming Service](./k8s.md#ack-srs-buy-slb-eip)。 + +**Step 4:** 大功告成。你可以推拉流了,其中HLS流可以从Nginx(80)播放,RTMP和HTTP-FLV从SRS播放: + +* Publish RTMP to `rtmp://28.170.32.118/live/livestream` or [Publish Demo Streams to SRS](./k8s.md#ack-srs-publish-demo-stream-to-edge). +* Play RTMP from [rtmp://28.170.32.118/live/livestream](http://ossrs.net/players/srs_player.html?app=live&stream=livestream&server=28.170.32.118&port=1935&autostart=true&vhost=28.170.32.118) +* Play HTTP-FLV from [http://28.170.32.118:8080/live/livestream.flv](http://ossrs.net/players/srs_player.html?app=live&stream=livestream.flv&server=28.170.32.118&port=8080&autostart=true&vhost=28.170.32.118&schema=http) +* Play HLS from [http://28.170.32.118/live/livestream.m3u8](http://ossrs.net/players/srs_player.html?app=live&stream=livestream.m3u8&server=28.170.32.118&port=80&autostart=true&vhost=28.170.32.118&schema=http) + +> Note: 请将上面的EIP换成你自己的,可用命令`kubectl get svc/srs-http-service`或`kubectl get svc/srs-edge-service`查看你的EIP。 + +> Note: 如果是自动创建SLB和EIP,那么HLS和RTMP/HTTP-FLV的IP是不一样的,你可以选择手动指定SLB,这两个服务可以用同一个SLB,参考[Use One SLB and EIP for All Streaming Service](./k8s.md#ack-srs-buy-slb-eip)。 + +这里我们选择的是有状态集群方式,也可以选择以无状态应用(Deployment)方式部署源站,参考[Build SRS Origin Cluster as Deployment](./k8s.md#build-srs-origin-cluster-as-deployment)。 + +## SRS Cluster Update, Rollback, Gray Release with Zero Downtime + +服务的更新、回滚和灰度,是个简单的问题,如果加上一个条件"不中断服务的前提下",那么就是一个难题,如果再加上"大规模",那么就是K8S要解决的核心问题之一。 +坏消息是这个难搞的问题还真是流媒体服务的核心的、关键的、不可忽视的关键能力之一,好消息是K8S和云计算让这个难题稍微好一点点了。 + +我们在什么场景下会遇到更新、回滚和灰度的问题: + +* SRS需要升级新版本,如何知道升级后对现有业务没有影响?如果选择业务量小升级,那一般常态会是半夜三更、凌晨三四点,还要不要头发了呢? +* 改进了新的功能或优化,根据业务定制了新的东西(完全直接使用SRS也得有自己的业务服务器),如何只在一部分机器发布,看看效果有没有达到预期? +* 更新新版本后,如果发现有问题,影响了用户服务,如何在最短时间内回滚到之前的版本?问题出现时首先是要确认问题后(若由升级引起则)回滚,而不是很费时间的找Bug。 + +在这个场景下,对比K8S和传统部署方式的差异: + +| 对比项 | ECS | K8S | 说明 | +| --- | --- | --- | --- | +| 部署 | 安装包 | 镜像 | Docker镜像可回滚,开发和生产环境一致,可Cache,
高效率和高密度,高可移植性,资源隔离可预测程序性能 | +| 看门狗 | 手动 | 自动 | SRS异常退出由看门狗重新拉起,非K8S需要手动安装,
K8S自动管理和拉起服务 | +| 更新 | 手动 | 自动 | 传统方式用脚本下载和更新二进制,人工分批更新,
K8S自动Rolling Update,自动下载镜像和分批更新 | +| 灰度 | 手动 | 自动 | 传统方式手动操作SLB决定切量比例,K8S通过Replicas控制比例,自动切量 | +| 回滚 | 手动 | 自动 | 传统方式手动回滚,K8S有版本管理和回滚机制 | + +> Note: 平滑更新的关键是平滑退出,重点是边缘集群的更新,对于源站集群我们可以选择直接重启,因为一般会有边缘集群作为代理,源站断开后边缘会重试,不影响用户,参考[#1579](https://github.com/ossrs/srs/issues/1579#issuecomment-587233844) + +我们重点关注边缘集群的平滑退出,SRS边缘属于长连接无状态服务。和Nginx一样,SRS使用[SIGQUIT](./service.md#gracefully-upgrade)作为信号, +同时配置[force_grace_quit](https://github.com/ossrs/srs/issues/1579#issuecomment-587475077)认为SIGTERM也是平滑退出,收到SIGQUIT信号后,会等待[grace_start_wait](https://github.com/ossrs/srs/issues/1595#issuecomment-587516567)指定的时间,然后关闭Listeners新的连接不会分配到这个服务器, +然后开始清理并等待现有连接退出,所有连接退出后还会等待[grace_final_wait](https://github.com/ossrs/srs/issues/1579#issuecomment-587414898)指定的时间,才会退出。 + +以之前部署的SRS源站和边缘集群为例,参考[SRS Origin Cluster for a Large Number of Streams](./k8s.md#srs-origin-cluster-for-a-large-number-of-streams),SRS边缘的Pod的配置,需要指定平滑退出的参数,例如: + +```bash +cat < Remark: 一定要开启`force_grace_quit`,不开启(默认)将使用暴力更新,直接断开现有的连接,参考[#1579](https://github.com/ossrs/srs/issues/1579#issuecomment-587475077) + +> Note: 在K8S中开始删除Pod时,会快速从Service删除Pod,所以我们将`grace_start_wait`和`grace_final_wait`设置时间短一些,只需要几百毫秒就足够了。 + +SRS边缘的配置,也需要在`lifecycle.preStop`事件时启动平滑退出,并设置`terminationGracePeriodSeconds`等待时间,例如: + +```bash +cat < Note: `kubectl apply`增加了一个参数`--record`,后面回滚会用到。 + +> Note: `terminationGracePeriodSeconds`等待退出时间我们设置2分钟,线上服务可以设置更长,比如12小时。 + +> Remark: 为了更好体现平滑更新的逻辑,我们设置`Replicas=2`可以更容易演示。 + +> Remark: 我们使用SRS4演示,例如`v4.0.5`,实际上SRS3也可以的比如`v3.0-b1`等。 + +我们停掉了之前`srs-demo-deploy`推的两个DEMO流,采用手动推流到Edge,方便演示升级时有长连接需要服务的情况: + +``` +ffmpeg -re -i ./doc/source.flv -c copy -f flv rtmp://28.170.32.118/live/livestream +``` + +> Note: 请将上面的EIP换成你自己的,可用命令`kubectl get svc/srs-edge-service`查看你的EIP。 + +咱们可以看到目前启动了2个Edge,可以看下它的版本,是通过Pod(`z9gbm`)推流: + +``` +kubectl get po|grep edge +srs-edge-deploy-58d9999b7c-pnr2f 1/1 Running 0 16s +srs-edge-deploy-58d9999b7c-z9gbm 1/1 Running 0 16s + +kubectl exec srs-edge-deploy-58d9999b7c-pnr2f -- ./objs/srs -v +4.0.5 +kubectl exec srs-edge-deploy-58d9999b7c-pnr2f -- yum install -y net-tools +kubectl exec srs-edge-deploy-58d9999b7c-pnr2f -- netstat -anp|grep 1935 +tcp 0 0 0.0.0.0:1935 0.0.0.0:* LISTEN 1/./objs/srs + +kubectl exec srs-edge-deploy-58d9999b7c-z9gbm -- ./objs/srs -v +4.0.5 +kubectl exec srs-edge-deploy-58d9999b7c-z9gbm -- yum install -y net-tools +kubectl exec srs-edge-deploy-58d9999b7c-z9gbm -- netstat -anp|grep 1935 +tcp 0 0 0.0.0.0:1935 0.0.0.0:* LISTEN 1/./objs/srs +tcp 0 0 172.20.0.62:46482 172.20.0.41:1935 ESTABLISHED 1/./objs/srs +tcp 0 0 172.20.0.62:1935 172.20.0.1:12066 ESTABLISHED 1/./objs/srs +``` + +> Note: 我们只推流一个流,会有两个连接,一个是客户端到Edge的连接,一个是Edge回源到Origin的连接。 + +下面我们会分几个部分,看发布中遇到的问题: + +1. [SRS Cluster Rolling Update](./k8s.md#srs-cluster-rolling-update): 在平滑退出基础上的滚动更新,集群更新的基础机制。 +1. [SRS Cluster Rolling Back](./k8s.md#srs-cluster-rolling-back): 在平滑退出基础上的发布回滚,发布遇到问题首先考虑回滚。 +1. [SRS Cluster Canary Release](./k8s.md#srs-cluster-canary-release): 金丝雀升级,可精确控制的流量控制和回滚。 + +### SRS Cluster Rolling Update + +K8S的更新是[Rolling Update](https://v1-14.docs.kubernetes.io/docs/tutorials/kubernetes-basics/update/update-intro/),也就是修改和更新Pods时,会分批次执行。 +比如,上面的例子中SRS边缘的版本是`v4.0.5`,若我们现在需要更新到`4.0.6`,镜像已经打好了`ossrs/srs:v4.0.6`,那么我们可以用命令更新: + +```bash +kubectl set image deploy/srs-edge-deploy srs=ossrs/srs:v4.0.6 --record +``` + +可以看这两个Pod的日志,没有连接的Pod很快就退出了,而有连接的Pod经过了一定的时间才退出(若客户端连接主动断开会更快退出): + +```bash +kubectl exec srs-edge-deploy-58d9999b7c-pnr2f -- tail -f objs/srs.log +[2020-02-19 11:07:20.818][Trace][1][937] sig=3, user start gracefully quit +[2020-02-19 11:07:20.960][Trace][1][937] force gracefully quit, signo=15 +[2020-02-19 11:07:21.772][Trace][1][932] cleanup for quit signal fast=0, grace=1 +[2020-02-19 11:07:21.772][Warn][1][932][11] main cycle terminated, system quit normally. +command terminated with exit code 137 + +kubectl exec srs-edge-deploy-58d9999b7c-z9gbm -- tail -f objs/srs.log +[2020-02-19 11:07:23.095][Trace][1][1009] sig=3, user start gracefully quit +[2020-02-19 11:07:23.316][Trace][1][1009] force gracefully quit, signo=15 +[2020-02-19 11:07:23.784][Trace][1][1004] cleanup for quit signal fast=0, grace=1 +[2020-02-19 11:07:23.784][Warn][1][1004][11] main cycle terminated, system quit normally. +[2020-02-19 11:07:24.784][Trace][1][1004] wait for 1 conns to quit +[2020-02-19 11:07:26.968][Trace][1][1010] <- CPB time=120041497, okbps=0,0,0, ikbps=252,277,0, mr=0/350, p1stpt=20000, pnt=5000 +[2020-02-19 11:08:26.791][Trace][1][1004] wait for 1 conns to quit +[2020-02-19 11:08:52.602][Trace][1][1010] edge change from 200 to state 0 (init). +[2020-02-19 11:08:52.792][Trace][1][1004] wait for 0 conns to quit +command terminated with exit code 137 + +kubectl get po |grep edge +NAME READY STATUS RESTARTS AGE +srs-edge-deploy-58d9999b7c-z9gbm 0/1 Terminating 0 3m52s +srs-edge-deploy-76fcbfb848-z5rmn 1/1 Running 0 104s +srs-edge-deploy-76fcbfb848-zt4wv 1/1 Running 0 106s +``` + +> Remark: 注意我们现在是有一个Pod有客户端在推流的。同样,我们指定了参数`--record`,会在后面回滚时用得着。 + +若Rolling Update期间,我们需要暂停更新,可以用`kubectl rollout`暂停和恢复: + +```bash +kubectl rollout pause deploy/srs-edge-deploy +kubectl rollout resume deploy/srs-edge-deploy +``` + +> Remark: 注意并不是滚动过程中停止,而是暂停的下一次Rollout,参考[理解rollout pause和resume](https://blog.csdn.net/waltonwang/article/details/77461697)。 + +### SRS Cluster Rolling Back + +每次发布K8S都会记录一个Revision,若我们传递了`--record`参数(正如前面我们做的),则会记录更详细的CHANGE-CAUSE,比如: + +```bash +kubectl rollout history deploy/srs-edge-deploy +REVISION CHANGE-CAUSE +1 kubectl apply --record=true --filename=- +2 kubectl set image deploy/srs-edge-deploy srs=ossrs/srs:v4.0.6 --record=true +``` + +> Note: 默认ACK只保留10个Revision,可以通过设置`revisionHistoryLimit`增加可回滚的版本。 + +若出现异常,可以回滚到之前的版本,例如: + +```bash +kubectl rollout undo deploy/srs-edge-deploy --to-revision=1 +``` + +实际上回滚的过程也是Rolling Update的过程,只是不用指定修改什么配置,而是指定的哪个历史版本的配置。回滚后,新增了一个版本3,和1是一样的: + +``` +REVISION CHANGE-CAUSE +1 kubectl apply --record=true --filename=- +2 kubectl set image deploy/srs-edge-deploy srs=ossrs/srs:v4.0.6 --record=true +3 kubectl apply --record=true --filename=- +``` + +> Note: 可以在阿里云控制台来选择回滚到哪个版本。 + +### SRS Cluster Canary Release + +Canary是金丝雀发布,指试探性的发布一些版本,没有问题就继续扩大比例。由于涉及到具体的发布比例,所以我们要在Rolling Update基础上, +能控制新老Pods的数目,这就需要使用SLB了,参考[Kubernetes集群中使用阿里云 SLB 实现四层金丝雀发布](https://help.aliyun.com/document_detail/86751.html)。 + +> Note: 关于金丝雀发布,最初发布的版本就好比金丝雀,在以前煤矿中会把金丝雀先送下去,如果缺氧雀儿就挂了。 + +以上面的Edge集群为例,假设目前版本是`v4.0.5`,有三个Edge Pod在运行,通过SLB对外提供服务: + +![ACK: SRS Cluster Canary Release Starting Point](/img/doc-advanced-guides-k8s-006.png) + +```bash +cat < Remark: 注意Pod的labels有两个,一个是`run: srs-edge-r5`是这个应用所使用的,另外一个是`app: srs-edge`是Service用的,新老的SRS都有这个标签这样Service就可以都转发了。 + +执行命令后,可以看到三个Pod在运行: + +```bash +kubectl get po +NAME READY STATUS RESTARTS AGE +srs-edge-r5-deploy-6c84cdc77b-q2j97 1/1 Running 0 3m15s +srs-edge-r5-deploy-6c84cdc77b-s6pzh 1/1 Running 0 3m15s +srs-edge-r5-deploy-6c84cdc77b-wjdtl 1/1 Running 0 3m15s +``` + +如果我们要升级到`v4.0.6`,但是只想先升级一台,这台就是金丝雀了。我们可以创建另外一个Deployment,他们的name不一样,但使用同样的Service: + +![ACK: SRS Cluster Canary Release with One New Pod](/img/doc-advanced-guides-k8s-007.png) + +```bash +cat < Remark: 注意Pod的labels有两个,一个是`run: srs-edge-r6`是这个应用所使用的,另外一个是`app: srs-edge`是Service用的,和之前的老版本是一样的,这样Service就可以都转发了。 + +执行命令后,可以看到四个Pod在运行,三个老的,一个新的,这样就灰度了25%的流量到了新版本: + +```bash +kubectl get po +NAME READY STATUS RESTARTS AGE +srs-edge-r5-deploy-6c84cdc77b-q2j97 1/1 Running 0 3m30s +srs-edge-r5-deploy-6c84cdc77b-s6pzh 1/1 Running 0 3m30s +srs-edge-r5-deploy-6c84cdc77b-wjdtl 1/1 Running 0 3m30s +srs-edge-r6-deploy-598f4698d-kkfnb 1/1 Running 0 6s + +while true; do ffmpeg -f flv -i rtmp://r.ossrs.net/live/livestream 2>&1|grep server_version; sleep 1; done + server_version : 4.0.5 + server_version : 4.0.5 + server_version : 4.0.5 + server_version : 4.0.5 + server_version : 4.0.5 + server_version : 4.0.5 + server_version : 4.0.6 # 这是新版本 + server_version : 4.0.5 + server_version : 4.0.5 + server_version : 4.0.6 # 这是新版本 +``` + +那么接下来,只需要调整新老的Deployment的Replicas,就能调整流量的比例了,比如我们增加新版本比重,只流一台老的: + +![ACK: SRS Cluster Canary Release with More New Pods](/img/doc-advanced-guides-k8s-008.png) + +```bash +kubectl scale --replicas=3 deploy/srs-edge-r6-deploy +kubectl scale --replicas=1 deploy/srs-edge-r5-deploy +``` + +可以看到经过Gracefully Quit平滑升级和退出,最终变成了我们声明的那个样子,对业务不影响: + +```bash +kubectl get po +NAME READY STATUS RESTARTS AGE +nginx-origin-deploy-85f4695685-gn2df 3/3 Running 0 5h31m +srs-edge-r5-deploy-6c84cdc77b-s6pzh 1/1 Running 0 25m +srs-edge-r6-deploy-f6b59c6c6-ddgxw 1/1 Running 0 2m59s +srs-edge-r6-deploy-f6b59c6c6-gvnd8 1/1 Running 0 2m54s +srs-edge-r6-deploy-f6b59c6c6-j46b5 1/1 Running 0 2m58s + +while true; do ffmpeg -f flv -i rtmp://r.ossrs.net/live/livestream 2>&1|grep server_version; sleep 1; done + server_version : 4.0.6 + server_version : 4.0.6 + server_version : 4.0.6 + server_version : 4.0.6 + server_version : 4.0.6 + server_version : 4.0.6 + server_version : 4.0.5 # 这是老版本 + server_version : 4.0.6 + server_version : 4.0.6 + server_version : 4.0.6 + server_version : 4.0.6 + server_version : 4.0.6 + server_version : 4.0.5 # 这是老版本 + server_version : 4.0.6 + server_version : 4.0.6 +``` + +最终我们只要把老的Replicas设为0,然后就可以删除老的应用`srs-edge-r5-deploy`了,系统全部变成新的版本了,如下图所示: + +![ACK: SRS Cluster Canary Release with All New Pods](/img/doc-advanced-guides-k8s-009.png) + +亲,爽吗?干净利落,谈笑间,强撸灰飞湮灭啦。 + +## Useful Tips + +本章补充了一些比较实用的话题,以及前面章节用到的一些工具和场景。 + +1. [Create K8S Cluster in ACK](./k8s.md#create-k8s-cluster-in-ack): 在阿里云ACK创建你的K8S集群,我们基于ACK构建流媒体服务。 +1. [Publish Demo Streams to SRS](./k8s.md#publish-demo-streams-to-srs): 推送SRS的演示流,可直接推源站,也可以推边缘集群。 +1. [Cleanup For DVR/HLS Temporary Files](./k8s.md#cleanup-for-dvrhls-temporary-files): 定期清理临时文件,比如每天凌晨1点,删除3天前的临时文件。 +1. [Use One SLB and EIP for All Streaming Service](./k8s.md#use-one-slb-and-eip-for-all-streaming-service): 使用一个SLB(EIP)对外提供RTMP、HTTP-FLV、HLS等服务。 +1. [Build SRS Origin Cluster as Deployment](./k8s.md#build-srs-origin-cluster-as-deployment): 除了以StatefulSet有状态应用方式部署Origin Cluster,我们还可以选择Deployment无状态应用方式。 +1. [Managing Compute Resources for Containers](./k8s.md#managing-compute-resources-for-containers): 资源的申请和限制,以及如何调度和限制如何生效。 +1. [Auto Reload by Inotify](./k8s.md#auto-reload-by-inotify): SRS侦听ConfigMap的变更,并支持自动reload。 + +### Create K8S Cluster in ACK + +**Step 1:** [可选] 创建k8s集群用的专有网络[VPC](https://vpc.console.aliyun.com/vpc/cn-zhangjiakou/vpcs/new)和交换机。 + +* 专有网络,名称:`srs-k8s-vpc`,会在这个VPC创建网络资源。 +* 交换机,名称:`srs-k8s-node`,创建的Node(ECS)会在这个交换机的网段中。 + +![ACK: Create Cluster VPC and Switch](/img/doc-advanced-guides-k8s-010.png) + +**Step 2:** [可选] 创建管理机器的密钥对[KeyPair](https://ecs-cn-zhangjiakou.console.aliyun.com/#/keyPair/region/cn-zhangjiakou/create?createType=default)。 + +* 密钥对名称:`srs-k8s-key`,可以设置ssh配置免密码登陆。 + +![ACK: Create Security Pair](/img/doc-advanced-guides-k8s-011.png) + + + +**Step 3:** [可选] [购买NAS](./k8s.md#ack-create-cluster-pv-nas),创建源站集群使用的PV(Persistent Volume)持久化卷,可在[NAS](https://nasnext.console.aliyun.com/cn-zhangjiakou/filesystem)控制台`创建文件系统`。 + +* 文件系统类型:可选择`通用型`,或者要求更快的速度可选择`极速型`。 +* 区域:请选择`华北3(张家口)`,千万注意别选错了,要和ACK集群在同一VPC中。 +* 协议类型:选择`NFS`。 +* VPC网络:请选择`srs-k8s-vpc`。 +* 交换机:请选择`srs-k8s-node`。 + +![ACK: Create Cluster PV NAS Storage](/img/doc-advanced-guides-k8s-012.png) + +**Step 4:** 进入[ACK](https://cs.console.aliyun.com/#/k8s/cluster/create/managed?template=managed-default)控制台,新建K8S托管集群。 + +* 集群名称:`srs` +* 地域:`华北3(张家口)` + +![ACK: Create Cluster in Zone](/img/doc-advanced-guides-k8s-013.png) + +选择专有网络和Node(ECS)的交换机,也可以点新建创建。 + +![ACK: Apply Cluster VPC and Switch](/img/doc-advanced-guides-k8s-014.png) + +选择Worker实例的类型和规格、创建的台数(默认3台)、镜像、密钥对。把相关组件都选择上,尤其是ApiServer公网访问。 + +![ACK: Create Cluster Components](/img/doc-advanced-guides-k8s-015.png) + +> Remark: Worker推荐3台及以上,至少4CPU+8GB内存的ECS配置,太低的配置可能会造成负载太高。 + +点`创建集群`,就可以成功创建K8S集群了。 + +**Step 5:** 使用kubectl管理集群,进入[ACK](https://cs.console.aliyun.com/#/k8s/cluster),点击集群查看基本信息。 + +![ACK: Setup Tool kubectl](/img/doc-advanced-guides-k8s-016.png) + +配置好kubectl后,执行下面的命令应该是成功的: + +```bash +kubectl cluster-info +``` + +接下来,就可以创建SRS集群了,参考[QuickStart](./k8s.md#quick-start). + +### Publish Demo Streams to SRS + + + +为了演示用,若存在源站服务`srs-origin-service`,也可以创建一个无状态应用[k8s deployment](https://v1-14.docs.kubernetes.io/docs/concepts/workloads/controllers/deployment),推流到SRS源站: + +```bash +cat < + while true; do + ffmpeg -re -i ./doc/source.flv \ + -c copy -f flv rtmp://srs-origin-service/live/livestream; + sleep 3; + done +EOF +``` + +> Note: 可以创建多个应用推多个流,记得将推流域名改成你的嗯源站服务的名称。 + + + +集群中若已经有Edge(边缘)服务`srs-edge-service`,也可以创建无状态应用[k8s deployment](https://v1-14.docs.kubernetes.io/docs/concepts/workloads/controllers/deployment),推流到SRS边缘: + +```bash +cat < + while true; do + ffmpeg -re -i ./doc/source.flv -c copy \ + -f flv rtmp://srs-edge-service/live/livestream && continue; + sleep 3; + done + - name: avatar + image: ossrs/srs:encoder + imagePullPolicy: IfNotPresent + command: ["/bin/sh"] + args: + - "-c" + - > + while true; do + ffmpeg -re -i ./doc/source.flv -c copy \ + -f flv rtmp://srs-edge-service/live/avatar && continue; + sleep 3; + done +EOF +``` + +> Note: 若存在源站服务,可以直接推源站,参考[Publish Demo Streams to SRS](./k8s.md#ack-srs-publish-demo-stream-to-origin)。 + +### Cleanup For DVR/HLS Temporary Files + +可以开启一个[K8S CronJob](https://v1-14.docs.kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/)定期清理存储的临时文件: + +1. 若开启了HLS,推流结束后最后几个切片还会继续存在,当然也可以开启`hls_dispose`清理。 +1. SRS重启或Crash后,可能有临时文件不会被清理,会不断累积。 +1. 清理时注意不要删除有用的文件,比如DVR正式文件,或console等静态文件。 + +建议将清理的目标设置为: + +1. 只清理3天前的,3天之内的文件可以暂时不用管。 +1. 扩展名为`*.ts*`或`*.m3u8*`或`*.flv.tmp`或`*.mp4.tmp`文件。 +1. 清理空目录,空目录一般不影响正常功能,清理不会出现问题。 +1. 每天凌晨1点运行清理任务,时间是按K8S集群时间,一般国内是北京时间。 + +```bash +cat < + find /tmp/html -mtime +3 -name *.ts* -print -delete && + find /tmp/html -mtime +3 -name *.m3u8* -print -delete && + find /tmp/html -mtime +3 -name *.flv.tmp -print -delete && + find /tmp/html -mtime +3 -name *.mp4.tmp -print -delete && + find /tmp/html -type d -empty -print -delete && + echo "Done" + restartPolicy: Never +EOF +``` + +> Remark: K8S的Cron表达式没有秒,格式是`分 时 日 月 星期`,比如`15 1 * * *`表示每天`01:15`执行。另外,一般 + +> Remark: 注意find命令的`-nmin`和`-mtime`时间限制,是和`-name`绑定在一起的,所以每种文件用一个命令删除。 + +目前ACK还不支持Job完成后自动清理,[ttlSecondsAfterFinished](https://v1-14.docs.kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#clean-up-finished-jobs-automatically),完成后的Pod还会留在系统。 +一个可选的办法,是启动一个Deployment后,用脚本循环执行,执行一次后等待1天: + +```bash +cat < + while true; do + find /tmp/html -mtime +3 -name *.ts* -print -delete && + find /tmp/html -mtime +3 -name *.m3u8* -print -delete && + find /tmp/html -mtime +3 -name *.flv.tmp -print -delete && + find /tmp/html -mtime +3 -name *.mp4.tmp -print -delete && + find /tmp/html -type d -empty -print -delete && + echo "[`date`] Cleanup done"; + sleep 86400; + done +EOF +``` + +> Note: 一天就是86400秒,注意避开业务高峰期执行。 + + + +### Use One SLB and EIP for All Streaming Service + +在例子中,我们默认配置的是自动创建SLB和EIP,这会导致对外提供的EIP是不一样的,比如RTMP/HTTP-FLV是一个EIP,HLS是另外一个IP。 + +为了使用一个EIP对外提供服务,我们必须在创建Service时指定SLB,这需要一个已经存在的内网SLB(绑定了EIP),你可以从[Aliyun](https://package-buy.aliyun.com/?planId=1018110001137801)组合购买,比如: + +* [SLB](https://www.aliyun.com/product/slb) ID: `lb-2zetmjpao868s9yzvr5ld` +* [EIP](https://www.aliyun.com/product/eip): `28.170.32.118` + +然后可以在创建服务时,通过指定Service的`metadata.annotations`,指定你自己购买的内网SLB: + +``` +metadata: + annotations: + service.beta.kubernetes.io/alicloud-loadbalancer-address-type: intranet + service.beta.kubernetes.io/alicloud-loadbalancer-force-override-listeners: "true" + service.beta.kubernetes.io/alicloud-loadbalancer-id: lb-2zetmjpao868s9yzvr5ld +``` + +> Remark: 如果购买的是内网SLB,需要在单独买EIP,将EIP绑定到SLB对外提供服务。 + +> Remark: 也可以简单点,购买SLB时就带外网IP了,就可以直接对外提供服务,这时候去掉上面`alicloud-loadbalancer-address-type: intranet`这个条,不是内网了。 + +> Remark: 如果是专有版K8S,而不是托管版K8S,需要安装CCM(Cloud Controller Manager)才能使用SLB,否则会发现指定了SLB的ID无法使用,Service无ExternalIP等。 + +例如,我们以[Quick Start](./k8s.md#quick-start)为例,可以修改Service如下: + +```bash +cat < Note:购买时注意选择SLB的VPC,要和K8S集群在一个VPC下面。 + +> Note:关于SLB的更多配置,比如保活、计费、证书等等,可以参考[SLB for K8S Service](https://help.aliyun.com/document_detail/86531.html#title-0ok-mot-kuj)。 + +### Build SRS Origin Cluster as Deployment + +在源站集群部署中,可以选择StatefulSet(有状态应用)方式部署,参考:[SRS Origin Cluster for a Large Number of Streams](./k8s.md#srs-origin-cluster-for-a-large-number-of-streams)。 +当然也可以选择Deployment(无状态应用)方式部署,这两种方式的差异参考[#464](https://github.com/ossrs/srs/issues/464#issuecomment-586550787)。 + +| 对比项 | 无状态源站集群 | 有状态源站集群 | +| --- | ----- | ---- | +| 部署 | 容易,源站只需要创建一个StatefulSet和Service | 复杂,需要几个源站就需要创建几个应用 | +| 规模 | `<30`节点,需要将节点写入源站和边缘配置 | `<10`节点,需要将节点写入源站和边缘配置 | +| 更新 | 简单,直接修改镜像更新Pod | 复杂,需要再创建源站同等数量的应用,几个源站就几个应用 | +| 灰度 | 不支持,更新时断流有重推 | 支持灰度,可手动灰度指定的机器,
更新时断流有重推 | + +* 新增源站时,都需要修改源站和边缘的配置,修改ConfigMap。 +* 灰度时,可以手动更改某些源站的镜像版本,出现问题手动回滚,不适合较多机器的情况。 +* 更新和回滚时,都会造成源站重启,由于有边缘作为代理,所以用户不会中断,但边缘会有重试,用户可能会有感知。 + +> Note: 关于Rolling Update,参考[SRS Cluster Update, Rollback, Gray Release with Zero Downtime](./k8s.md#srs-cluster-update-rollback-gray-release-with-zero-downtime)。 + +我们以部署三个源站为例,全部以无状态应用(Deployment)方式部署: + +| 源站 | Deployment | Service | 域名 | +| --- | --- | --- | --- | +| Origin Server 0 | srs-origin-0-deploy | srs-origin-0-socs | srs-origin-0-socs | +| Origin Server 1 | srs-origin-1-deploy | srs-origin-1-socs | srs-origin-1-socs | +| Origin Server 2 | srs-origin-2-deploy | srs-origin-2-socs | srs-origin-2-socs | + +* 配置`srs-origin-config`,三个源站的配置都是一样的,都是Service的地址例如`srs-origin-0-socs`。新增源站时需要更新。 +* 配置`srs-edge-config`,边缘集群的配置项也是一样的,都是Service的地址例如`srs-origin-0-socs`。新增源站时需要更新。 + +```bash +cat < Remark: 按照上面的例子,建立第二个源站,服务地址为`srs-origin-1-socs`,Deployment应用为`srs-origin-1-deploy`,Service服务为`srs-origin-1-socs`。 + +> Remark: 按照上面的例子,建立第三个源站,服务地址为`srs-origin-2-socs`,Deployment应用为`srs-origin-2-deploy`,Service服务为`srs-origin-2-socs`。 + +* `srs-api-service`: 创建一个服务[k8s service](https://v1-14.docs.kubernetes.io/docs/concepts/services-networking/service),基于SLB提供HTTP服务,SRS第一个源站提供API服务,标签为`srs-origin-0`。 + +```bash +cat < Note: 源站对外提供API服务`srs-api-service`,我们选择第一个源站对外提供API服务,实际上源站集群需要改进这点,参考[#1607](https://github.com/ossrs/srs/issues/1607#issuecomment-586549464)。 + +> Note: 这里我们选择ACK自动创建SLB和EIP,也可以手动指定SLB,参考[Use One SLB and EIP for All Streaming Service](./k8s.md#ack-srs-buy-slb-eip)。 + +### Managing Compute Resources for Containers + +计算资源有CPU、内存、磁盘、网络等,K8S内置的资源是指[CPU和内存](https://v1-14.docs.kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-types), +K8S也支持声明和消费[扩展资源](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#extended-resources)。 + +可以指定Pod对于资源的Requests和Limits,比如: + +```bash +spec.containers[].resources.limits.cpu +spec.containers[].resources.limits.memory +spec.containers[].resources.requests.cpu +spec.containers[].resources.requests.memory +``` + +> Note: CPU单位是m(millicores或millicpu)千分之一核心的意思,0.1或100m就是10%的CPU。 + +> Note: Memory单位是字节,可以是`Ei, Pi, Ti, Gi, Mi, Ki`,比如100Mi意思就是100MB内存。 + +调度时,会根据Requests请求的资源大小,分配到合适的Pod。而Limits,对于CPU和内存的策略是不同的: + +* CPU是可以被压缩的资源,可能允许(也可能不允许)超过容器的Limits,这个会传递到容器的[cpu-quota](https://docs.docker.com/engine/reference/run/#/cpu-share-constraint%23cpu-quota-constraint),会根据CPU已经容器的状态动态调整。 +* Memory是不可以被压缩的资源,如果内存被耗光就OOM了,会杀掉容器重启或迁移走一些容器,[K8S Resource QoS](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/resource-qos.md)会根据Requests和Limits的定义,优先保障Guranteed,然后是Burstable,最低优先级是Best-Effort。 + +对于SRS源站,我们可以指定更大的内存和CPU: + +```bash +cat <off success. +[2020-03-12 14:34:18.115][Trace][1][348] reload http stream on=>off success. +[2020-03-12 14:34:18.115][Trace][1][348] vhost __defaultVhost__ maybe modified, reload its detail. +[2020-03-12 14:34:18.115][Trace][1][348] vhost __defaultVhost__ reload hls success. +[2020-03-12 14:34:18.115][Trace][1][348] vhost __defaultVhost__ http_remux reload success +[2020-03-12 14:34:18.115][Trace][1][348] vhost __defaultVhost__ reload http_remux success. +[2020-03-12 14:34:18.115][Trace][1][348] ingest nothing changed for vhost=__defaultVhost__ +[2020-03-12 14:34:18.115][Trace][1][348] reload config success. +``` + +> Note: 从ConfigMap的修改,到Pod的配置文件生效,一共花了118秒钟,时间比较久。 + +Winlin 2020.02 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/k8s) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/learning-path.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/learning-path.md new file mode 100644 index 00000000..0bdaab05 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/learning-path.md @@ -0,0 +1,69 @@ +--- +title: SRS 学习路径 +sidebar_label: SRS 学习路径 +hide_title: false +hide_table_of_contents: false +--- + +# Learning Path + +新同学专用的学习路径,请一定按照文档操作。 + +## 快速预览 + +先过第一个门槛:看到直播和 WebRTC 长什么样子,能跑出来下图的效果,需要 5 ~ 15 分钟左右。 + +![](/img/doc-learning-path-001.png) + +> Note: 这个看似很容易,甚至直接在 SRS 官网中就能点开两个页面,但是一定要自己用 SRS 搭建出来才算,而不是直接打开线上的演示网页。 + +具体怎么做呢?请参考 [Getting Started](./getting-started.md)。 + + +接触一个新的东西,首先就要有直观的体验和感觉,这个门槛虽然看起来很简单,但是它涉及到了音视频的几乎全链路的东西: +- FFmpeg,强大的音视频客户端,推拉流和编解码,以及各种处理的能力。 +- Chrome(或浏览器),H5 是最便捷的客户端,非常方便演示和学习,SRS 功能基本上都有 H5 的演示。 +- 音视频协议:RTMP,HTTP-FLV,HLS 和 WebRTC,这些操作步骤中,已经涉及到了这些协议,也是实际应用中典型的用法。 +- SRS 服务器,自己部署音视频云,或者提供音视频的云服务,SRS 本质上就是视频云的一种服务器。 + +> Note: 上面的拼图还缺少移动端,其实移动端只是一种端,而并没有新的协议,也可以下载 SRS 直播客户端,体验上面的推流和播放,也可以输入你的服务器的流地址播放。 + +## 深入场景 + +第二个门槛:了解音视频应用的各个典型场景,大约五个核心场景,总共需要 3~7 天左右。 + +典型的音视频业务场景,包括但不限于: +- 全平台直播,小荷才露尖尖角。只需要上图的 Encoders(FFmpeg/OBS)推送 RTMP 到 SRS;一台 SRS Origin(不需要 Cluster),转封装成 HTTP-FLV 流、转封装成 HLS;Players 根据平台的播放器可以选 HTTP-FLV 或 HLS 流播放。 +- WebRTC 通话业务,一对一通话,多人通话,会议室等。WebRTC 是 SRS4 引入的关键和核心的能力,从 1 到 3 秒延迟,到 100 到 300 毫秒延迟,绝对不是数字的变化,而是本质的变化。 +- 监控和广电上云,各行业风起云涌。除了使用 FFmpeg 主动拉取流到 SRS,还可以广电行业 SRT 协议推流,或监控行业 GB28181 协议推流,SRS 转换成互联网的协议观看。 +- 直播低延迟和互动,聚变近在咫尺。RTMP 转 WebRTC 播放降低播放延迟,还能做直播连麦,或者使用 WebRTC 推流,未来还会支持 WebTransport 直播等等。 +- 大规模业务,带你装逼带你飞。如果业务快速上涨,可以通过 Edge Cluster 支持海量 Players,或者 Origin Cluster 支持海量 Encoders,当然可以直接平滑迁移到视频云。未来还会支持 RTC 的级联和集群。 + +每个场景都可以自己搭建出来典型的应用。 + +## 了解细节 + +第三个门槛:了解每个纵向的技术点,应用场景,代码和问题排查,大约 3 ~ 6 月左右。 + +- [视频专栏](./introduction.md#effective-srs),包括环境搭建,代码分析,还有零声学院专业老师的讲解。 +- [解决方案](./introduction.md#solution-guides),大家在各个不同场景中,应用 SRS 的分享和探索。 +- [部署方案](./introduction.md#deployment-guides),如何部署实现不同的具体功能,这些功能可以组合起来使用。 +- [集群和扩展](./introduction.md#cluster-guides),当业务量上升,如何扩展单机到集群,如何服务不同区域的用户。 +- [集成和定制](./introduction.md#integration-guides),如何和现有系统对接,如何验证用户,安全和防盗链机制等。 +- [深度分析](./introduction.md#develop-guide),协程原理,代码分析,高性能服务器框架,性能优化等。 + +如果能踏踏实实的了解完 SRS,音视频真不难。 + +如果总想着三分钟 XXX,那可不是很难么? + +作者:winlinvip + +链接:https://www.jianshu.com/p/2662df9fe078 + +来源:简书 + +著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/learning-path) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/log-rotate.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/log-rotate.md new file mode 100644 index 00000000..2dda2c06 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/log-rotate.md @@ -0,0 +1,72 @@ +--- +title: 日志切割 +sidebar_label: 日志切割 +hide_title: false +hide_table_of_contents: false +--- + +# LogRotate + +Log Rotate就是日志切割,服务器日志越来越大,如何压缩日志,或者丢弃古老的日志?SRS将日志管理交给外部系统,提供了接口可以切割日志。 + +1. 首先,将日志文件挪走,譬如:```mv objs/srs.log /tmp/srs.`date +%s`.log``` +1. 然后,发送信号给SRS,SRS重新打开日志文件,譬如 `killall -s SIGUSR1`,SRS会关闭之前的fd,重新打开日志文件并写入。 +1. 对挪动后的日志文件处理,可以压缩存储,传输,或者删除。 + +## Use logrotate + +推荐使用程序[logrotate](https://www.jianshu.com/p/ec7f1626a3d3)管理日志文件,支持压缩和删除过期的文件。 + +1. 安装logrotate: + +``` +sudo yum install -y logrotate +``` + +1. 配置logrotate管理SRS的日志文件: + +``` +cat << END > /etc/logrotate.d/srs +/usr/local/srs/objs/srs.log { + daily + dateext + compress + rotate 7 + size 1024M + sharedscripts + postrotate + kill -USR1 `cat /usr/local/srs/objs/srs.pid` + endscript +} +END +``` + +> 备注:可以手动执行命令触发日志切割 `logrotate -f /etc/logrotate.d/srs` + +## CopyTruncate + +logrotate还有一种方式是[copytruncate](https://unix.stackexchange.com/questions/475524/how-copytruncate-actually-works), +**墙裂不推荐这种方式**因为会丢日志,但是它适用于不支持SIGUSR1信号的SRS2。 + +> 当然SRS3也是可以用这种方式,如果能接受丢日志的话;但是强烈建议不要用这种方式,仅仅作为SRS2的workaround方案。 + +配置如下,感谢[wnpllrzodiac](https://github.com/wnpllrzodiac)提交的[PR#1561](https://github.com/ossrs/srs/pull/1561#issuecomment-571408173): + +``` +cat << END > /etc/logrotate.d/srs +/usr/local/srs/objs/srs.log { + daily + dateext + compress + rotate 7 + size 1024M + copytruncate +} +END +``` + +Winlin 2016.12 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/log-rotate) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/log.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/log.md new file mode 100644 index 00000000..a79672d0 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/log.md @@ -0,0 +1,515 @@ +--- +title: 日志 +sidebar_label: 日志 +hide_title: false +hide_table_of_contents: false +--- + +# SRS Log System + +SRS支持打印到console和file,支持设置level,支持连接级别的日志,支持可追溯日志。 + +## LogTank + +LogTank即log的容器,日志打印到什么地方。 + +SRS提供两种打印日志的方式,通过配置`srs_log_tank`: +* console:打印日志到控制台。当配置文件没有加载时,也打印到控制台。 +* file: 默认,打印日志到文件。必须指定配置`srs_log_file`,日志文件。日志文件默认为:`./objs/srs.log` + +配置文件中的说明: + +```bash +# the log tank, console or file. +# if console, print log to console. +# if file, write log to file. requires srs_log_file if log to file. +# default: file. +srs_log_tank file; +``` + +## LogLevel + +LogLevel就是log的级别,什么级别的日志才会打印出来。 + +SRS支持设置日志级别,通过设置`srs_log_level`: +* verbose: 非常详细的日志,性能会很低,日志会非常多。SRS默认是编译时禁用这些日志,提高性能。 +* info:较为详细的日志,性能也受影响。SRS默认编译时禁用这些日志。 +* trace: 重要的日志,比较少,SRS默认使用这个级别。 +* warn: 警告日志,SRS在控制台以黄色显示。若SRS运行较稳定,可以只打开这个日志。建议使用trace级别。 +* error: 错误日志,SRS在控制台以红色显示。 + +配置文件中的说明: + +```bash +# the log level, for all log tanks. +# can be: verbose, info, trace, warn, error +# default: trace +srs_log_level trace; +``` + +注意事项: +* 设置了低级别的日志,自动会打印高级别的。譬如设置为trace,那么trace/warn/error日志都会打印出来。 +* 默认verbose和info是编译时禁用的,若需要打开这两个日志,需要修改`srs_kernel_log.hpp`,将对应的禁用编译宏打开。 +* 推荐使用trace级别,重要的日志不多,对于排错很方便。如果有错误,建议用gdb调试,不要依赖日志。只有在不得已时才用日志排错。 + +## 工具的日志 + +Transcode/Ingest等都是用到了外部工具,譬如FFMPEG,所以我们统称ffmpeg日志。 + +SRS可以配置ffmpeg的日志路径,若配置为`/dev/null`则禁用ffmpeg日志,在某些嵌入式系统磁盘较小时比较有用,需要减少日志时有用: +```bash +# the logs dir. +# if enabled ffmpeg, each stracoding stream will create a log file. +# "/dev/null" to disable the log. +# default: ./objs +ff_log_dir ./objs; +``` + +## 日志格式 + +SRS的日志可以定位到某个连接,可以在混杂了成千上万个连接的日志中找到某一个连接的日志,譬如要看某个客户端的日志。这个功能和SRS的日志格式设计相关,譬如: + +```bash +[2014-04-04 11:21:29.183][trace][2837][104][11] rtmp get peer ip success. ip=192.168.1.179 +``` + +日志格式如下: +* [2014-04-04 11:21:29.183] 日志的日期,毫秒数因为SRS的时间cache,分辨率定义在SRS_TIME_RESOLUTION_MS,即500毫秒更新一次时间。防止gettimeofday函数调用造成性能问题。 +* [trace] 日志的级别,参考上面对日志级别的定义。打印到控制台的日志,trace是白色,warn是黄色,error是红色。一般只有trace日志说明没有发现异常。 +* [2837] 进程pid(SrsPid)。进程之间的session id可能有重复。 +* [104] 会话ID,即SessionId(SrsId)。进程存活期间,保证id唯一,从0开始计算。这个就是找到某个连接的日志的关键。 +* [11] errno,系统错误码。这个在error时才有效,其他时候这个值没有意义。 +* rtmp get peer ip success. 日志的文本。若有错误,一般会打印出错误码,譬如:identify client failed. ret=211(Timer expired) 说明是超时。 + +下面是一些常用的日志分析方法。 + +### Tracable Log + +某个客户端如果出现问题,譬如投诉说卡,播放断开,如何排查问题?SRS提供基于连接的日志,可以根据连接的id查询这个客户端在服务器的日志(参考下面基于连接的日志)。 + +如果服务器是多层结构呢?譬如CDN集群,有时候就需要查询连接的回源连接,以及回源连接在上层服务器的日志。这个时候快速知道客户端或者边缘在上层服务器的ID就及其重要了。 + +客户端或者边缘能拿到自己在上层服务器的ID,就是可追溯日志。我举个例子: + +播放流:rtmp://dev:1935/live/livestream +![客户端显示id](/img/doc-guides-log-001.png) +能看到SrsIp,即服务器ip为192.168.1.107,对于DNS解析而言,这个很重要,知道是哪个边缘节点。SrsPid为9131,SrsId为117,所以去这个服务器上grep关键字"\[9131\]\[117\]"就可以。 +```bash +[winlin@dev6 srs]$ grep -ina "\[12665\]\[114\]" objs/edge.log +1307:[2014-05-27 19:21:27.276][trace][12665][114] serve client, peer ip=192.168.1.113 +1308:[2014-05-27 19:21:27.284][trace][12665][114] complex handshake with client success +1309:[2014-05-27 19:21:27.284][trace][12665][114] rtmp connect app success. tcUrl=rtmp://dev:1935/live, pageUrl=http://ossrs.net/players/srs_player.html?vhost=dev&stream=livestream&server=dev&port=1935, swfUrl=http://ossrs.net/players/srs_player/release/srs_player.swf?_version=1.21, schema=rtmp, vhost=__defaultVhost__, port=1935, app=live +1310:[2014-05-27 19:21:27.486][trace][12665][114] set ack window size to 2500000 +1311:[2014-05-27 19:21:27.486][trace][12665][114] identify ignore messages except AMF0/AMF3 command message. type=0x5 +1312:[2014-05-27 19:21:27.501][trace][12665][114] ignored. set buffer length to 800 +1313:[2014-05-27 19:21:27.501][trace][12665][114] identify ignore messages except AMF0/AMF3 command message. type=0x4 +1314:[2014-05-27 19:21:27.518][trace][12665][114] identity client type=play, stream_name=livestream, duration=-1.00 +1315:[2014-05-27 19:21:27.518][trace][12665][114] identify client success. type=Play, stream_name=livestream, duration=-1.00 +1316:[2014-05-27 19:21:27.518][trace][12665][114] set output chunk size to 4096 +1317:[2014-05-27 19:21:27.518][trace][12665][114] source url=__defaultVhost__/live/livestream, ip=192.168.1.113, cache=1, is_edge=1, id=-1 +1318:[2014-05-27 19:21:27.518][trace][12665][114] dispatch cached gop success. count=0, duration=0 +1319:[2014-05-27 19:21:27.518][trace][12665][114] create consumer, queue_size=30.00, tba=0, tbv=0 +1322:[2014-05-27 19:21:27.518][trace][12665][114] ignored. set buffer length to 800 +1333:[2014-05-27 19:21:27.718][trace][12665][114] update source_id=115 +1334:[2014-05-27 19:21:27.922][trace][12665][114] -> PLA time=301, msgs=12, okbps=1072,0,0, ikbps=48,0,0 +``` + +会发现回源连接的id为115(`source_id=115`),所以查找这个链接: +``` +[winlin@dev6 srs]$ grep -ina "\[12665\]\[115\]" objs/edge.log +1320:[2014-05-27 19:21:27.518][trace][12665][115] edge connected, can_publish=1, url=rtmp://dev:1935/live/livestream, server=127.0.0.1:19350 +1321:[2014-05-27 19:21:27.518][trace][12665][115] connect to server success. server=127.0.0.1, ip=127.0.0.1, port=19350 +1323:[2014-05-27 19:21:27.519][trace][12665][115] complex handshake with server success. +1324:[2014-05-27 19:21:27.561][trace][12665][115] set ack window size to 2500000 +1325:[2014-05-27 19:21:27.602][trace][12665][115] drop unknown message, type=6 +1326:[2014-05-27 19:21:27.602][trace][12665][115] connected, version=0.9.119, ip=127.0.0.1, pid=12633, id=141 +1327:[2014-05-27 19:21:27.602][trace][12665][115] set output chunk size to 60000 +1328:[2014-05-27 19:21:27.602][trace][12665][115] edge change from 100 to state 101 (ingest connected). +1329:[2014-05-27 19:21:27.603][trace][12665][115] set input chunk size to 60000 +1330:[2014-05-27 19:21:27.603][trace][12665][115] dispatch metadata success. +1331:[2014-05-27 19:21:27.603][trace][12665][115] update video sequence header success. size=46 +1332:[2014-05-27 19:21:27.603][trace][12665][115] update audio sequence header success. size=4 +1335:[2014-05-27 19:21:37.653][trace][12665][115] <- EIG time=10163, okbps=0,0,0, ikbps=234,254,231 +``` + +发现回源链接在服务器上的标识为:`connected, version=0.9.119, ip=127.0.0.1, pid=12633, id=141`,去上层服务器查找日志: +``` +[winlin@dev6 srs]$ grep -ina "\[12633\]\[141\]" objs/srs.log +783:[2014-05-27 19:21:27.518][trace][12633][141] serve client, peer ip=127.0.0.1 +784:[2014-05-27 19:21:27.519][trace][12633][141] complex handshake with client success +785:[2014-05-27 19:21:27.561][trace][12633][141] rtmp connect app success. tcUrl=rtmp://dev:1935/live, pageUrl=, swfUrl=, schema=rtmp, vhost=__defaultVhost__, port=1935, app=live +786:[2014-05-27 19:21:27.561][trace][12633][141] set ack window size to 2500000 +787:[2014-05-27 19:21:27.561][trace][12633][141] identify ignore messages except AMF0/AMF3 command message. type=0x5 +788:[2014-05-27 19:21:27.602][trace][12633][141] identity client type=play, stream_name=livestream, duration=-1.00 +789:[2014-05-27 19:21:27.602][trace][12633][141] identify client success. type=Play, stream_name=livestream, duration=-1.00 +790:[2014-05-27 19:21:27.602][trace][12633][141] set output chunk size to 60000 +791:[2014-05-27 19:21:27.602][trace][12633][141] source url=__defaultVhost__/live/livestream, ip=127.0.0.1, cache=1, is_edge=0, id=131 +792:[2014-05-27 19:21:27.602][trace][12633][141] dispatch cached gop success. count=241, duration=3638 +793:[2014-05-27 19:21:27.602][trace][12633][141] create consumer, queue_size=30.00, tba=44100, tbv=1000 +794:[2014-05-27 19:21:27.602][trace][12633][141] ignored. set buffer length to 65564526 +795:[2014-05-27 19:21:27.604][trace][12633][141] set input chunk size to 60000 +798:[2014-05-27 19:21:32.420][trace][12633][141] -> PLA time=4809, msgs=14, okbps=307,0,0, ikbps=5,0,0 +848:[2014-05-27 19:22:54.414][trace][12633][141] -> PLA time=86703, msgs=12, okbps=262,262,0, ikbps=0,0,0 +867:[2014-05-27 19:22:57.225][trace][12633][141] update source_id=149 +``` + +同样发现这个源头是149(`source_id=149`),即编码器推流上来的id。 +``` +[winlin@dev6 srs]$ grep -ina "\[12633\]\[149\]" objs/srs.log +857:[2014-05-27 19:22:56.919][trace][12633][149] serve client, peer ip=127.0.0.1 +858:[2014-05-27 19:22:56.921][trace][12633][149] complex handshake with client success +859:[2014-05-27 19:22:56.960][trace][12633][149] rtmp connect app success. tcUrl=rtmp://127.0.0.1:19350/live?vhost=__defaultVhost__, pageUrl=, swfUrl=, schema=rtmp, vhost=__defaultVhost__, port=19350, app=live +860:[2014-05-27 19:22:57.040][trace][12633][149] identify client success. type=publish(FMLEPublish), stream_name=livestream, duration=-1.00 +861:[2014-05-27 19:22:57.040][trace][12633][149] set output chunk size to 60000 +862:[2014-05-27 19:22:57.040][trace][12633][149] source url=__defaultVhost__/live/livestream, ip=127.0.0.1, cache=1, is_edge=0, id=-1 +863:[2014-05-27 19:22:57.123][trace][12633][149] set input chunk size to 60000 +864:[2014-05-27 19:22:57.210][trace][12633][149] dispatch metadata success. +865:[2014-05-27 19:22:57.210][trace][12633][149] update video sequence header success. size=46 +866:[2014-05-27 19:22:57.210][trace][12633][149] update audio sequence header success. size=4 +870:[2014-05-27 19:23:04.970][trace][12633][149] <- CPB time=8117, okbps=4,0,0, ikbps=320,0,0 +``` + +Encoder => Origin => Edge => Player,整个分发集群的日志都直接找到!O了,快速直接! + +### 可倒追溯日志 + +可追溯日志,上一节所描述的,可以从播放器追溯到边缘服务器,从边缘追溯到上层,上层到上上层,一直到源站。 + +可倒追溯日志,指的是反过来从源站知道下层的回源ID,下层知道边缘的回源ID。边缘上自然有每个连接的日志。 + +譬如开启一个源站一个边缘,查询源站的日志,关键字是`edge-srs`: + +``` +[winlin@dev6 srs]$ grep -ina "edge-srs" objs/srs.origin.log +30:[2014-08-06 09:41:31.649][trace][21433][107] edge-srs ip=192.168.1.159, version=0.9.189, pid=21435, id=108 +``` + +可以知道这个播放连接107是一个SRS回源链接,它在服务器192.168.1.159上面,进程是21435,回源链接id是108。在边缘服务器上查询这个连接的日志: + +``` +[winlin@dev6 srs]$ grep --color -ina "\[108\]" objs/srs.log +29:[2014-08-06 10:09:34.579][trace][22314][108] edge pull connected, can_publish=1, url=rtmp://dev:1935/live/livestream, server=127.0.0.1:1936 +30:[2014-08-06 10:09:34.591][trace][22314][108] complex handshake success. +31:[2014-08-06 10:09:34.671][trace][22314][108] connected, version=0.9.190, ip=127.0.0.1, pid=22288, id=107 +32:[2014-08-06 10:09:34.672][trace][22314][108] out chunk size to 60000 +33:[2014-08-06 10:09:34.672][trace][22314][108] ignore the disabled transcode: +34:[2014-08-06 10:09:34.672][trace][22314][108] edge change from 100 to state 101 (pull). +35:[2014-08-06 10:09:34.672][trace][22314][108] input chunk size to 60000 +36:[2014-08-06 10:09:34.672][trace][22314][108] got metadata, width=768, height=320, vcodec=7, acodec=10 +37:[2014-08-06 10:09:34.672][trace][22314][108] 46B video sh, codec(7, profile=100, level=32, 0x0, 0kbps, 0fps, 0s) +38:[2014-08-06 10:09:34.672][trace][22314][108] 4B audio sh, codec(10, profile=1, 2channels, 0kbps, 44100HZ), flv(16bits, 2channels, 44100HZ) +39:[2014-08-06 10:09:34.779][trace][22314][107] update source_id=108[108] +46:[2014-08-06 10:09:36.853][trace][22314][110] source url=__defaultVhost__/live/livestream, ip=192.168.1.179, cache=1, is_edge=1, source_id=108[108] +50:[2014-08-06 10:09:44.949][trace][22314][108] <- EIG time=10293, okbps=3,0,0, ikbps=441,0,0 +53:[2014-08-06 10:09:47.805][warn][22314][108][4] origin disconnected, retry. ret=1007 +``` + +可以查询这个source下面的链接,关键字是`source_id=108`。以此类推,查回源链接的信息时,可以看到所有连接到该回源链接的客户端id(grep时先过滤进程号,然后过滤id): + +``` +39:[2014-08-06 10:09:34.779][trace][22314][107] update source_id=108[108] +46:[2014-08-06 10:09:36.853][trace][22314][110] source url=__defaultVhost__/live/livestream, ip=192.168.1.179, cache=1, is_edge=1, source_id=108[108] +``` + +可以看到有两个连接,一个是107,一个是110。连接107是播放后才回源,110是已经在回源了然后播放的。 + +### 可任意追溯 + +以为支持可追溯以及可倒追溯日志,所以我们在任意节点开始都可以找到整个分发链路。 + +开启一个边缘一个源站,源站ingest推流,两个客户端连接到边缘播放,边缘回源站取流。 + +假设我知道流名称,或者不知道流名称,反正任意信息,譬如我知道播放的链接会打一个"type=Play"的标记出来,就从这一点开始。假设从源站开始: + +``` +[winlin@dev6 srs]$ grep -ina "type=Play" objs/srs.origin.log +31:[2014-08-06 10:09:34.671][trace][22288][107] client identified, type=Play, stream_name=livestream, duration=-1.00 +``` + +发现有个107的链接播放了源站信息,查看它的日志: + +``` +[winlin@dev6 srs]$ grep -ina "\[107\]" objs/srs.origin.log +27:[2014-08-06 10:09:34.589][trace][22288][107] RTMP client ip=127.0.0.1 +28:[2014-08-06 10:09:34.591][trace][22288][107] complex handshake success +29:[2014-08-06 10:09:34.631][trace][22288][107] connect app, tcUrl=rtmp://dev:1935/live, pageUrl=http://www.ossrs.net/players/srs_player.html?vhost=dev&stream=livestream&server=dev&port=1935, swfUrl=http://www.ossrs.net/players/srs_player/release/srs_player.swf?_version=1.23, schema=rtmp, vhost=__defaultVhost__, port=1935, app=live, args=(obj) +30:[2014-08-06 10:09:34.631][trace][22288][107] edge-srs ip=192.168.1.159, version=0.9.190, pid=22314, id=108 +31:[2014-08-06 10:09:34.671][trace][22288][107] client identified, type=Play, stream_name=livestream, duration=-1.00 +32:[2014-08-06 10:09:34.671][trace][22288][107] out chunk size to 60000 +33:[2014-08-06 10:09:34.671][trace][22288][107] source url=__defaultVhost__/live/livestream, ip=127.0.0.1, cache=1, is_edge=0, source_id=105[105] +34:[2014-08-06 10:09:34.672][trace][22288][107] dispatch cached gop success. count=307, duration=4515 +35:[2014-08-06 10:09:34.672][trace][22288][107] create consumer, queue_size=30.00, tba=44100, tbv=25 +36:[2014-08-06 10:09:34.672][trace][22288][107] ignored. set buffer length to 1000 +37:[2014-08-06 10:09:34.673][trace][22288][107] input chunk size to 60000 +40:[2014-08-06 10:09:44.748][trace][22288][107] -> PLA time=10007, msgs=0, okbps=464,0,0, ikbps=3,0,0 +41:[2014-08-06 10:09:47.805][warn][22288][107][104] client disconnect peer. ret=1004 +``` + +可以看到源id是105,关键字是`source_id=105`,查这个源: + +``` +[winlin@dev6 srs]$ grep --color -ina "\[105\]" objs/srs.origin.log +16:[2014-08-06 10:09:30.331][trace][22288][105] RTMP client ip=127.0.0.1 +17:[2014-08-06 10:09:30.331][trace][22288][105] srand initialized the random. +18:[2014-08-06 10:09:30.332][trace][22288][105] simple handshake success. +19:[2014-08-06 10:09:30.373][trace][22288][105] connect app, tcUrl=rtmp://127.0.0.1:1936/live?vhost=__defaultVhost__, pageUrl=, swfUrl=, schema=rtmp, vhost=__defaultVhost__, port=1936, app=live, args=null +21:[2014-08-06 10:09:30.417][trace][22288][105] client identified, type=publish(FMLEPublish), stream_name=livestream, duration=-1.00 +22:[2014-08-06 10:09:30.417][trace][22288][105] out chunk size to 60000 +23:[2014-08-06 10:09:30.418][trace][22288][105] source url=__defaultVhost__/live/livestream, ip=127.0.0.1, cache=1, is_edge=0, source_id=-1[-1] +24:[2014-08-06 10:09:30.466][trace][22288][105] got metadata, width=768, height=320, vcodec=7, acodec=10 +25:[2014-08-06 10:09:30.466][trace][22288][105] 46B video sh, codec(7, profile=100, level=32, 0x0, 0kbps, 0fps, 0s) +26:[2014-08-06 10:09:30.466][trace][22288][105] 4B audio sh, codec(10, profile=1, 2channels, 0kbps, 44100HZ), flv(16bits, 2channels, 44100HZ) +33:[2014-08-06 10:09:34.671][trace][22288][107] source url=__defaultVhost__/live/livestream, ip=127.0.0.1, cache=1, is_edge=0, source_id=105[105] +38:[2014-08-06 10:09:40.732][trace][22288][105] <- CPB time=10100, okbps=3,0,0, ikbps=332,0,0 +``` + +可见这个就是ingest的连接,即编码器推流连接。已经查到了源头。 + +同时可以看到107这个其实是srs的回源链接,关键字是`edge-srs`: + +``` +30:[2014-08-06 10:09:34.631][trace][22288][107] edge-srs ip=192.168.1.159, version=0.9.190, pid=22314, id=108 +``` + +可以去边缘服务器上查它的信息,id是108: + +``` +[winlin@dev6 srs]$ grep --color -ina "\[108\]" objs/srs.log +29:[2014-08-06 10:09:34.579][trace][22314][108] edge pull connected, can_publish=1, url=rtmp://dev:1935/live/livestream, server=127.0.0.1:1936 +30:[2014-08-06 10:09:34.591][trace][22314][108] complex handshake success. +31:[2014-08-06 10:09:34.671][trace][22314][108] connected, version=0.9.190, ip=127.0.0.1, pid=22288, id=107 +32:[2014-08-06 10:09:34.672][trace][22314][108] out chunk size to 60000 +33:[2014-08-06 10:09:34.672][trace][22314][108] ignore the disabled transcode: +34:[2014-08-06 10:09:34.672][trace][22314][108] edge change from 100 to state 101 (pull). +35:[2014-08-06 10:09:34.672][trace][22314][108] input chunk size to 60000 +36:[2014-08-06 10:09:34.672][trace][22314][108] got metadata, width=768, height=320, vcodec=7, acodec=10 +37:[2014-08-06 10:09:34.672][trace][22314][108] 46B video sh, codec(7, profile=100, level=32, 0x0, 0kbps, 0fps, 0s) +38:[2014-08-06 10:09:34.672][trace][22314][108] 4B audio sh, codec(10, profile=1, 2channels, 0kbps, 44100HZ), flv(16bits, 2channels, 44100HZ) +39:[2014-08-06 10:09:34.779][trace][22314][107] update source_id=108[108] +46:[2014-08-06 10:09:36.853][trace][22314][110] source url=__defaultVhost__/live/livestream, ip=192.168.1.179, cache=1, is_edge=1, source_id=108[108] +50:[2014-08-06 10:09:44.949][trace][22314][108] <- EIG time=10293, okbps=3,0,0, ikbps=441,0,0 +53:[2014-08-06 10:09:47.805][warn][22314][108][4] origin disconnected, retry. ret=1007 +``` + +这个边缘服务器上这个回源链接有两个客户端连接上,107和110,关键字是`source_id=108`: + +``` +[winlin@dev6 srs]$ grep --color -ina "\[107\]" objs/srs.log +18:[2014-08-06 10:09:34.281][trace][22314][107] RTMP client ip=192.168.1.179 +19:[2014-08-06 10:09:34.282][trace][22314][107] srand initialized the random. +20:[2014-08-06 10:09:34.291][trace][22314][107] complex handshake success +21:[2014-08-06 10:09:34.291][trace][22314][107] connect app, tcUrl=rtmp://dev:1935/live, pageUrl=http://www.ossrs.net/players/srs_player.html?vhost=dev&stream=livestream&server=dev&port=1935, swfUrl=http://www.ossrs.net/players/srs_player/release/srs_player.swf?_version=1.23, schema=rtmp, vhost=__defaultVhost__, port=1935, app=live, args=null +22:[2014-08-06 10:09:34.532][trace][22314][107] ignored. set buffer length to 800 +23:[2014-08-06 10:09:34.568][trace][22314][107] client identified, type=Play, stream_name=livestream, duration=-1.00 +24:[2014-08-06 10:09:34.568][trace][22314][107] out chunk size to 60000 +25:[2014-08-06 10:09:34.568][trace][22314][107] source url=__defaultVhost__/live/livestream, ip=192.168.1.179, cache=1, is_edge=1, source_id=-1[-1] +26:[2014-08-06 10:09:34.579][trace][22314][107] dispatch cached gop success. count=0, duration=0 +27:[2014-08-06 10:09:34.579][trace][22314][107] create consumer, queue_size=30.00, tba=0, tbv=0 +28:[2014-08-06 10:09:34.579][trace][22314][107] ignored. set buffer length to 800 +39:[2014-08-06 10:09:34.779][trace][22314][107] update source_id=108[108] +54:[2014-08-06 10:09:47.805][trace][22314][107] cleanup when unpublish +55:[2014-08-06 10:09:47.805][trace][22314][107] edge change from 101 to state 0 (init). +56:[2014-08-06 10:09:47.805][warn][22314][107][9] client disconnect peer. ret=1004 +``` + +107是触发回源的连接。查看110这个链接: + +``` +[winlin@dev6 srs]$ grep --color -ina "\[110\]" objs/srs.log +40:[2014-08-06 10:09:36.609][trace][22314][110] RTMP client ip=192.168.1.179 +41:[2014-08-06 10:09:36.613][trace][22314][110] complex handshake success +42:[2014-08-06 10:09:36.613][trace][22314][110] connect app, tcUrl=rtmp://dev:1935/live, pageUrl=http://www.ossrs.net/players/srs_player.html?vhost=dev&stream=livestream&server=dev&port=1935, swfUrl=http://www.ossrs.net/players/srs_player/release/srs_player.swf?_version=1.23, schema=rtmp, vhost=__defaultVhost__, port=1935, app=live, args=null +43:[2014-08-06 10:09:36.835][trace][22314][110] ignored. set buffer length to 800 +44:[2014-08-06 10:09:36.853][trace][22314][110] client identified, type=Play, stream_name=livestream, duration=-1.00 +45:[2014-08-06 10:09:36.853][trace][22314][110] out chunk size to 60000 +46:[2014-08-06 10:09:36.853][trace][22314][110] source url=__defaultVhost__/live/livestream, ip=192.168.1.179, cache=1, is_edge=1, source_id=108[108] +47:[2014-08-06 10:09:36.853][trace][22314][110] dispatch cached gop success. count=95, duration=1573 +48:[2014-08-06 10:09:36.853][trace][22314][110] create consumer, queue_size=30.00, tba=44100, tbv=25 +49:[2014-08-06 10:09:36.853][trace][22314][110] ignored. set buffer length to 800 +51:[2014-08-06 10:09:45.919][trace][22314][110] -> PLA time=8759, msgs=21, okbps=461,0,0, ikbps=3,0,0 +52:[2014-08-06 10:09:46.247][warn][22314][110][104] client disconnect peer. ret=1004 +``` + +可见110也是个flash播放连接。 + +### 系统信息 + +日志中有版本和配置信息,以及使用的pid文件,侦听的端口,启动前几条日志就是: + +```bash +[winlin@dev6 srs]$ ./objs/srs -c console.conf +[winlin@dev6 srs]$ cat objs/srs.log +[2014-04-04 11:39:24.176][trace][0][0] config parsed EOF +[2014-04-04 11:39:24.176][trace][0][0] log file is ./objs/srs.log +[2014-04-04 11:39:24.177][trace][0][0] srs 0.9.46 +[2014-04-04 11:39:24.177][trace][0][0] uname: Linux dev6 2.6.32-71.el6.x86_64 +#1 SMP Fri May 20 03:51:51 BST 2011 x86_64 x86_64 x86_64 GNU/Linux +[2014-04-04 11:39:24.177][trace][0][0] build: 2014-04-03 18:38:23, little-endian +[2014-04-04 11:39:24.177][trace][0][0] configure: --dev --with-hls --with-nginx +--with-ssl --with-ffmpeg --with-http-callback --with-http-server --with-http-api +--with-librtmp --with-bwtc --with-research --with-utest --without-gperf --without-gmc +--without-gmp --without-gcp --without-gprof --without-arm-ubuntu12 --jobs=1 +--prefix=/usr/local/srs +[2014-04-04 11:39:24.177][trace][0][0] write pid=4021 to ./objs/srs.pid success! +[2014-04-04 11:39:24.177][trace][100][16] server started, listen at port=1935, type=0, fd=6 +[2014-04-04 11:39:24.177][trace][100][16] server started, listen at port=1985, type=1, fd=7 +[2014-04-04 11:39:24.177][trace][100][16] server started, listen at port=8080, type=2, fd=8 +[2014-04-04 11:39:24.177][trace][101][16] listen cycle start, port=1935, type=0, fd=6 +[2014-04-04 11:39:24.177][trace][102][11] listen cycle start, port=1985, type=1, fd=7 +[2014-04-04 11:39:24.177][trace][103][11] listen cycle start, port=8080, type=2, fd=8 +[2014-04-04 11:39:26.799][trace][0][11] get a signal, signo=2 +[2014-04-04 11:39:26.799][trace][0][11] user terminate program +``` + +主要信息包括: +* 日志文件:[2014-04-04 11:39:24.176][trace][0][0] log file is ./objs/srs.log +* 系统版本:[2014-04-04 11:39:24.177][trace][0][0] srs 0.9.46 +* 编译系统信息:[2014-04-04 11:39:24.177][trace][0][0] uname: Linux dev6 2.6.32-71.el6.x86_64 +#1 SMP Fri May 20 03:51:51 BST 2011 x86_64 x86_64 x86_64 GNU/Linux +* 编译日期:[2014-04-04 11:39:24.177][trace][0][0] build: 2014-04-03 18:38:23, little-endian +* 编译参数:[2014-04-04 11:39:24.177][trace][0][0] configure: --dev --with-hls --with-nginx +--with-ssl --with-ffmpeg --with-http-callback --with-http-server --with-http-api --with-librtmp +--with-bwtc --with-research --with-utest --without-gperf --without-gmc --without-gmp +--without-gcp --without-gprof --without-arm-ubuntu12 --jobs=1 --prefix=/usr/local/srs +* PID文件:[2014-04-04 11:39:24.177][trace][0][0] write pid=4021 to ./objs/srs.pid success! +* 侦听端口1935(RTMP):[2014-04-04 11:39:24.177][trace][100][16] server started, listen at port=1935, type=0, fd=6 +* 侦听1985(HTTP接口):[2014-04-04 11:39:24.177][trace][100][16] server started, listen at port=1985, type=1, fd=7 +* 侦听8080(HTTP服务):[2014-04-04 11:39:24.177][trace][100][16] server started, listen at port=8080, type=2, fd=8 +* 侦听循环开始,准备接受连接:[2014-04-04 11:39:24.177][trace][101][16] listen cycle start, port=1935, type=0, fd=6 + +### 基于连接的日志 + +提供基于连接(会话, Sesssion, SrsId)的日志,对于排错至关重要。当然对于系统分析需要支持良好的api。 + +举例来说,服务器运行了1年,支持了1千万次访问。要知道这1千万个用户多少个用户点了暂停按钮,多少概率会跳过片头,多少用户观看了10分钟以上,都访问了些什么节目,这个属于大数据分析,需要集群提供api查询,集群能提供这个数据的前提是服务器能提供api查询。 + +用户投诉卡,或者观看不了,推流不成功,或者通过数据分析发现某个流的用户观看延迟很大。这种具体的问题,就需要分析某个连接的日志。 + +假设需要知道推流的编码器的日志,流是`rtmp://192.168.1.107:1935/live/livestream`,那么先需要观察日志,一般推流的日志如下: + +```bash +[2014-04-04 11:56:06.074][trace][104][11] rtmp get peer ip success. ip=192.168.1.179, +send_to=30000000us, recv_to=30000000us +[2014-04-04 11:56:06.080][trace][104][11] srand initialized the random. +[2014-04-04 11:56:06.082][trace][104][11] simple handshake with client success. +[2014-04-04 11:56:06.083][trace][104][11] rtmp connect app success. +tcUrl=rtmp://192.168.1.107:1935/live, pageUrl=, swfUrl=rtmp://192.168.1.107:1935/live, +schema=rtmp, vhost=__defaultVhost__, port=1935, app=live +[2014-04-04 11:56:06.288][trace][104][11] set ack window size to 2500000 +[2014-04-04 11:56:06.288][trace][104][11] identify ignore messages except AMF0/AMF3 +command message. type=0x5 +[2014-04-04 11:56:06.288][trace][104][11] identify client success. +type=publish(FMLEPublish), stream_name=livestream +``` + +查找标识id: +* 可以grep关键字`identify client success`,然后grep关键字`type=publish`,然后grep关键字`livestream`。 +* 如果熟悉的话,也可以直接grep关键字`identify client success. type=publish`,然后grep关键字`livestream`。 +* 也可以分步实现,先grep关键字`identify client success. type=publish`,把所有推流的连接找出来。然后观察后再加条件。 + +结果如下: + +```bash +[winlin@dev6 srs]$ cat objs/srs.log|grep -ina "identify client success. type=publish" +20:[2014-04-04 11:56:06.288][trace][104][11] identify client success. type=publish, stream_name=livestream +43:[2014-04-04 11:56:18.138][trace][105][11] identify client success. type=publish, stream_name=winlin +65:[2014-04-04 11:56:29.531][trace][106][11] identify client success. type=publish, stream_name=livestream +86:[2014-04-04 11:56:35.966][trace][107][11] identify client success. type=publish, stream_name=livestream +``` + +可见有几次推流,还有其他的流。可是根据时间过滤,或者根据流名称: + +```bash +[winlin@dev6 srs]$ cat objs/srs.log|grep -ina "identify client success. type=publish"|grep -a "livestream" +20:[2014-04-04 11:56:06.288][trace][104][11] identify client success. type=publish, stream_name=livestream +65:[2014-04-04 11:56:29.531][trace][106][11] identify client success. type=publish, stream_name=livestream +86:[2014-04-04 11:56:35.966][trace][107][11] identify client success. type=publish, stream_name=livestream +``` + +找到了三个推流连接,还可以继续筛选。假设我们看第一个,那么标识是`104`,可以grep关键字`\[104\]\[`,譬如: +```bash +[winlin@dev6 srs]$ cat objs/srs.log |grep -ina "\[104\]\[" +14:[2014-04-04 11:56:06.074][trace][104][11] rtmp get peer ip success. ip=192.168.1.179, +send_to=30000000us, recv_to=30000000us +15:[2014-04-04 11:56:06.080][trace][104][11] srand initialized the random. +16:[2014-04-04 11:56:06.082][trace][104][11] simple handshake with client success. +17:[2014-04-04 11:56:06.083][trace][104][11] rtmp connect app success. +tcUrl=rtmp://192.168.1.107:1935/live, pageUrl=, swfUrl=rtmp://192.168.1.107:1935/live, +schema=rtmp, vhost=__defaultVhost__, port=1935, app=live +18:[2014-04-04 11:56:06.288][trace][104][11] set ack window size to 2500000 +19:[2014-04-04 11:56:06.288][trace][104][11] identify ignore messages except AMF0/AMF3 +command message. type=0x5 +20:[2014-04-04 11:56:06.288][trace][104][11] identify client success. +type=publish(FMLEPublish), stream_name=livestream +21:[2014-04-04 11:56:06.288][trace][104][11] set output chunk size to 60000 +22:[2014-04-04 11:56:06.288][trace][104][11] set chunk_size=60000 success +23:[2014-04-04 11:56:07.397][trace][104][11] <- time=225273, obytes=4168, ibytes=7607, okbps=32, ikbps=59 +24:[2014-04-04 11:56:07.398][trace][104][11] dispatch metadata success. +25:[2014-04-04 11:56:07.398][trace][104][11] process onMetaData message success. +26:[2014-04-04 11:56:07.398][trace][104][11] update video sequence header success. size=67 +27:[2014-04-04 11:56:08.704][trace][104][11] <- time=226471, obytes=4168, ibytes=36842, okbps=13, ikbps=116 +28:[2014-04-04 11:56:09.901][trace][104][11] <- time=227671, obytes=4168, ibytes=67166, okbps=9, ikbps=152 +29:[2014-04-04 11:56:11.102][trace][104][11] <- time=228869, obytes=4168, ibytes=97481, okbps=6, ikbps=155 +30:[2014-04-04 11:56:11.219][trace][104][11] clear cache/metadata/sequence-headers when unpublish. +31:[2014-04-04 11:56:11.219][trace][104][11] control message(unpublish) accept, retry stream service. +32:[2014-04-04 11:56:11.219][trace][104][11] ignore AMF0/AMF3 command message. +33:[2014-04-04 11:56:11.419][trace][104][11] drop the AMF0/AMF3 command message, command_name=deleteStream +34:[2014-04-04 11:56:11.420][trace][104][11] ignore AMF0/AMF3 command message. +35:[2014-04-04 11:56:12.620][error][104][104] recv client message failed. ret=207(Connection reset by peer) +36:[2014-04-04 11:56:12.620][error][104][104] identify client failed. ret=207(Connection reset by peer) +37:[2014-04-04 11:56:12.620][warn][104][104] client disconnect peer. ret=204 +[winlin@dev6 srs]$ +``` + +这个连接的日志就都出来了,重点关注warn和error日志。可以看到这个是客户端关闭了连接:`36:[2014-04-04 11:56:12.620][error][104][104] identify client failed. ret=207(Connection reset by peer)`。 + +## 守护进程 + +为何默认启动srs时只有一条日志呢?原因是守护进程方式启动时,日志会打印到文件。 + +一个相关的配置是守护进程方式启动,这样就不要nohup启动了(实际上是程序实现了nohup): + +```bash +# whether start as deamon +# default: on +daemon on; +``` + +若希望不以daemon启动,且日志打印到console,可以使用配置`conf/console.conf`: + +```bash +# no-daemon and write log to console config for srs. +# @see full.conf for detail config. + +listen 1935; +daemon off; +srs_log_tank console; +vhost __defaultVhost__ { +} +``` + +启动方式: + +```bash +./objs/srs -c conf/console.conf +``` + +系统默认方式是daemon+log2file,具体参考`full.conf`的说明。 + +注意:[init.d脚本启动](./service.md)会将console日志也打印到文件,若没有指定文件,默认文件为`./objs/srs.log`。脚本启动尽量保证日志不丢失。 + +注意:一般以daemon后台启动,并将日志写到文件(默认),srs会提示配置解析成功,日志写到文件。 + +```bash +[winlin@dev6 srs]$ ./objs/srs -c conf/srs.conf +[2014-04-14 12:12:57.775][trace][0][0] config parse complete +[2014-04-14 12:12:57.775][trace][0][0] write log to file ./objs/srs.log +[2014-04-14 12:12:57.775][trace][0][0] you can: tailf ./objs/srs.log +[2014-04-14 12:12:57.775][trace][0][0] @see https://ossrs.net/lts/zh-cn/docs/v4/doc/log +``` + +Winlin 2014.3 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/log) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/low-latency.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/low-latency.md new file mode 100644 index 00000000..cc790696 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/low-latency.md @@ -0,0 +1,244 @@ +--- +title: 低延时直播 +sidebar_label: 低延时直播 +hide_title: false +hide_table_of_contents: false +--- + +# Low Latency Live Stream + +直播应用中,RTMP和HLS基本上可以覆盖所有客户端观看(参考:[DeliveryHLS](./hls.md)),HLS主要是延时比较大,RTMP主要优势在于延时低。 + +低延迟的部署实例参考:[Usage: Realtime](./sample-realtime.md) + +## Use Scenario + +低延时应用场景包括: +* 互动式直播:譬如2013年大行其道的美女主播,游戏直播等等各种主播,流媒体分发给用户观看。用户可以文字聊天和主播互动。 +* 视频会议:SRS的DEMO就有视频会议应用,我们要是有同事出差在外地,就用这个视频会议开内部会议。其实会议1秒延时无所谓,因为人家讲完话后,其他人需要思考,思考的延时也会在1秒左右。当然如果用视频会议吵架就不行。 +* 其他:监控,直播也有些地方需要对延迟有要求,互联网上RTMP协议的延迟基本上能够满足要求。 + +## Latency + +RTMP的特点如下: +* Adobe支持得很好:RTMP实际上是现在编码器输出的工业标准协议,基本上所有的编码器(摄像头之类)都支持RTMP输出。原因在于PC市场巨大,PC主要是Windows,Windows的浏览器基本上都支持flash,Flash又支持RTMP支持得灰常好。 +* 适合长时间播放:因为RTMP支持的很完善,所以能做到flash播放RTMP流长时间不断流,当时测试是100万秒,即10天多可以连续播放。对于商用流媒体应用,客户端的稳定性当然也是必须的,否则最终用户看不了还怎么玩?我就知道有个教育客户,最初使用播放器播放http流,需要播放不同的文件,结果就总出问题,如果换成服务器端将不同的文件转换成RTMP流,客户端就可以一直播放;该客户走RTMP方案后,经过CDN分发,没听说客户端出问题了。 +* 延迟较低:比起YY的那种UDP私有协议,RTMP算延迟大的(延迟在1-3秒),比起HTTP流的延时(一般在10秒以上)RTMP算低延时。一般的直播应用,只要不是电话类对话的那种要求,RTMP延迟是可以接受的。在一般的视频会议(参考SRS的视频会议延时)应用中,RTMP延时也能接受,原因是别人在说话的时候我们一般在听,实际上1秒延时没有关系,我们也要思考(话说有些人的CPU处理速度还没有这么快)。 +* 有累积延迟:技术一定要知道弱点,RTMP有个弱点就是累积误差,原因是RTMP基于TCP不会丢包。所以当网络状态差时,服务器会将包缓存起来,导致累积的延迟;待网络状况好了,就一起发给客户端。这个的对策就是,当客户端的缓冲区很大,就断开重连。当然SRS也提供配置。 + +## HLS LowLatency + +HLS的延迟会比RTMP要大,一般是5秒以上延迟,如果不特别配置可能在15秒以上延迟。 + +如果想降低HLS延迟,请参考[HLS LowLatency](./hls.md#hls-low-latency)。 + +## Benchmark + +如何测量延时,是个很难的问题,不过有个行之有效的方法,就是用手机的秒表,可以比较精确的对比延时。参考:[RTMP延时测量](http://blog.csdn.net/win_lin/article/details/12615591) + +经过测量发现,在网络状况良好时: +* RTMP延时可以做到0.8秒左右(SRS也可以)。 +* 多级边缘节点不会影响延迟(和SRS同源的某CDN的边缘服务器可以做到) +* Nginx-Rtmp延迟有点大,估计是缓存的处理,多进程通信导致? +* GOP是个硬指标,不过SRS可以关闭GOP的cache来避免这个影响,参考后面的配置方法。 +* 服务器性能太低,也会导致延迟变大,服务器来不及发送数据。 +* 客户端的缓冲区长度也影响延迟。譬如flash客户端的NetStream.bufferTime设置为10秒,那么延迟至少10秒以上。 + +## Min-Latency + +当开启最低延迟配置后,SRS会禁用mr(merged-read),并且在consumer队列中使用超时等待,大约每收到1-2个视频包就发送给客户端,达到最低延迟目标。 + +测试vp6纯视频流能达到0.1秒延迟,参考[#257](https://github.com/ossrs/srs/issues/257#issuecomment-66773208)。配置文件: + +``` +vhost mrw.srs.com { + # whether enable min delay mode for vhost. + # for min latence mode: + # 1. disable the publish.mr for vhost. + # 2. use timeout for cond wait for consumer queue. + # @see https://github.com/ossrs/srs/issues/257 + # default: off + min_latency off; +} +``` + +部署低延时的实例,参考:[wiki]([EN](./sample-realtime.md), [CN](./sample-realtime.md)). + +## Merged-Read + +RTMP的Read效率非常低,需要先读一个字节,判断是哪个chunk,然后读取header,接着读取payload。因此上行支持的流的路数大约只有下行的1/3,譬如SRS1.0支持下行2700上行只有1000,SRS2.0支持下行10000上行只有4500。 + +为了提高性能,SRS对于上行的read使用merged-read,即SRS在读写时一次读取N毫秒的数据,这个可以配置: + +``` +# the MR(merged-read) setting for publisher. +vhost mrw.srs.com { + # the config for FMLE/Flash publisher, which push RTMP to SRS. + publish { + # about MR, read https://github.com/ossrs/srs/issues/241 + # when enabled the mr, SRS will read as large as possible. + # default: off + mr off; + # the latency in ms for MR(merged-read), + # the performance+ when latency+, and memory+, + # memory(buffer) = latency * kbps / 8 + # for example, latency=500ms, kbps=3000kbps, each publish connection will consume + # memory = 500 * 3000 / 8 = 187500B = 183KB + # when there are 2500 publisher, the total memory of SRS atleast: + # 183KB * 2500 = 446MB + # the value recomment is [300, 2000] + # default: 350 + mr_latency 350; + } +} +``` + +也就是说,当开启merged-read之后,服务器的接收缓冲区至少会有latency毫秒的数据,延迟也就会有这么多毫秒。 + +若需要低延迟配置,关闭merged-read,服务器每次收到1个包就会解析。 + +## Merged-Write + +SRS永远使用Merged-Write,即一次发送N毫秒的包给客户端。这个算法可以将RTMP下行的效率提升5倍左右,SRS1.0每次writev一个packet支持2700客户端,SRS2.0一次writev多个packet支持10000客户端。 + +用户可以配置merged-write一次写入的包的数目,建议不做修改: + +``` +# the MW(merged-write) settings for player. +vhost mrw.srs.com { + # for play client, both RTMP and other stream clients, + # for instance, the HTTP FLV stream clients. + play { + # set the MW(merged-write) latency in ms. + # SRS always set mw on, so we just set the latency value. + # the latency of stream >= mw_latency + mr_latency + # the value recomment is [300, 1800] + # default: 350 + mw_latency 350; + } +} +``` + +若需要极低延迟(损失较多性能),可以设置为100毫秒,SRS大约一次发送几个包。 + +## GOP-Cache + +什么是`GOP`?就是视频流中两个`I帧`的时间距离,如果问什么是I帧就去百度。 + +GOP有什么影响?Flash(解码器)只有拿到GOP才能开始解码播放。也就是说,服务器一般先给一个I帧给Flash。可惜问题来了,假设GOP是10秒,也就是每隔10秒才有关键帧,如果用户在第5秒时开始播放,会怎么样? + +第一种方案:等待下一个I帧,也就是说,再等5秒才开始给客户端数据。这样延迟就很低了,总是实时的流。问题是:等待的这5秒,会黑屏,现象就是播放器卡在那里,什么也没有,有些用户可能以为死掉了,就会刷新页面。总之,某些客户会认为等待关键帧是个不可饶恕的错误,延时有什么关系?我就希望能快速启动和播放视频,最好打开就能放! + +第二种方案:马上开始放,放什么呢?你肯定知道了,放前一个I帧。也就是说,服务器需要总是cache一个gop,这样客户端上来就从前一个I帧开始播放,就可以快速启动了。问题是:延迟自然就大了。 + +有没有好的方案?有!至少有两种: +* 编码器调低GOP,譬如0.5秒一个GOP,这样延迟也很低,也不用等待。坏处是编码器压缩率会降低,图像质量没有那么好。 +* 服务器提供配置,可以选择前面两个方案之一:SRS就这么做,有个gop_cache配置项,on就会马上播放,off就低延迟。 + +SRS的配置项: + +```bash +# the listen ports, split by space. +listen 1935; +vhost __defaultVhost__ { + # for play client, both RTMP and other stream clients, + # for instance, the HTTP FLV stream clients. + play { + # whether cache the last gop. + # if on, cache the last gop and dispatch to client, + # to enabled fast startup for client, client play immediately. + # if off, send the latest media data to client, + # client need to wait for the next Iframe to decode and show the video. + # set to off if requires min delay; + # set to on if requires client fast startup. + # default: on + gop_cache off; + } +} +``` + +备注:参考conf/full.conf的min.delay.com配置。 + +## Max Queue Length + +除了GOP-Cache,还有一个有关系,就是累积延迟。SRS可以配置直播队列的长度,服务器会将数据放在直播队列中,如果超过这个长度就清空到最后一个I帧: + +```bash + # the max live queue length in seconds. + # if the messages in the queue exceed the max length, + # drop the old whole gop. + # default: 30 + queue_length 10; +``` + +当然这个不能配置太小,譬如GOP是1秒,queue_length是1秒,这样会导致有1秒数据就清空,会导致跳跃。 + +有更好的方法?有的。延迟基本上就等于客户端的缓冲区长度,因为延迟大多由于网络带宽低,服务器缓存后一起发给客户端,现象就是客户端的缓冲区变大了,譬如NetStream.BufferLength=5秒,那么说明缓冲区中至少有5秒数据。 + +处理累积延迟的最好方法,是客户端检测到缓冲区有很多数据了,如果可以的话,就重连服务器。当然如果网络一直不好,那就没有办法了。 + +## Low Latency config + +考虑GOP-Cache和累积延迟,推荐的低延时配置如下(参考min.delay.com): +```bash +# the listen ports, split by space. +listen 1935; +vhost __defaultVhost__ { + tcp_nodelay on; + min_latency on; + + play { + gop_cache off; + queue_length 10; + mw_latency 100; + } + + publish { + mr off; + } +} +``` + +当然,服务器的性能也要考虑,不可以让一个SRS进程跑太高带宽,一般CPU在80%以下不会影响延迟,连接数参考[性能](./performance.md)。 + +## Benchmark Data + +SRS: 0.9.55 + +编码器:FMLE, video(h264, profile=baseline, level=3.1, keyframe-frequency=5seconds), fps=15, input=640x480, output(500kbps, 640x480), 无音频输出(FMLE的音频切片HLS有问题) + +网络:推流为PC在北京公司内网,观看为PC北京公司内网,服务器为阿里云青岛节点。 + +服务器配置: + +```bash +listen 1935; +vhost __defaultVhost__ { + enabled on; + play { + gop_cache off; + } + hls { + enabled on; + hls_path ./objs/nginx/html; + hls_fragment 5; + hls_window 20; + } +} +``` + +结论:RTMP延迟2秒,HLS延迟24秒。 + +参考:![RTMP-HLS-latency](/img/doc-main-concepts-low-latency-001.png) + +## Edge Benchmark Data + +SRS集群不会增加延迟。这个是Edge模式比ingest要高级的地方,ingest需要启动进程,延迟会大。ingest主要适配多种协议,也可以主动从源站采集流,但Edge是专业的边缘模式。 + +参考:![Edge-latency](/img/doc-main-concepts-low-latency-002.png) + +Winlin 2015.8 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/low-latency) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/nginx-exec.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/nginx-exec.md new file mode 100644 index 00000000..5a3dd74a --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/nginx-exec.md @@ -0,0 +1,53 @@ +--- +title: Nginx RTMP EXEC +sidebar_label: Nginx RTMP EXEC +hide_title: false +hide_table_of_contents: false +--- + +# Exec + +## NGINX RTMP EXEC + +NGINX-RTMP支持的EXEC方式,参考[nginx exec](https://github.com/arut/nginx-rtmp-module/wiki/Directives#exec),SRS只支持常用的几种。下面是exec的支持情况: + +1. exec/exec_publish: 当发布流时调用,支持。 +1. exec_pull: 不支持。 +1. exec_play: 不支持。 +1. exec_record_done: 不支持。 + +> Note: 可以使用[HTTP Callback](./http-callback.md),回调你的业务服务器,再启动FFmpeg处理对应的流。这是更灵活,也是更合适的方案。 + +## Config + +SRS EXEC的配置参考`conf/exec.conf`,如下: + +``` +vhost __defaultVhost__ { + # the exec used to fork process when got some event. + exec { + # whether enable the exec. + # default: off. + enabled off; + # when publish stream, exec the process with variables: + # [vhost] the input stream vhost. + # [port] the intput stream port. + # [app] the input stream app. + # [stream] the input stream name. + # [engine] the tanscode engine name. + # other variables for exec only: + # [url] the rtmp url which trigger the publish. + # [tcUrl] the client request tcUrl. + # [swfUrl] the client request swfUrl. + # [pageUrl] the client request pageUrl. + # @remark empty to ignore this exec. + publish ./objs/ffmpeg/bin/ffmpeg -f flv -i [url] -c copy -y ./[stream].flv; + } +} +``` + +Winlin 2015.08 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/nginx-exec) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/nginx-for-hls.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/nginx-for-hls.md new file mode 100644 index 00000000..e48d85a4 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/nginx-for-hls.md @@ -0,0 +1,270 @@ +--- +title: HLS Cluster +sidebar_label: HLS Cluster +hide_title: false +hide_table_of_contents: false +--- + +# NGINX for HLS + +边缘集群(Edge Cluster)就是为了解决很多人观看的问题,可以支持非常多的人观看直播流。注意: + +* SRS Edge只支持直播流协议,比如RTMP或HTTP-FLV等,参考[RTMP Edge Cluster](./sample-rtmp-cluster.md)。 +* SRS Edge不支持HLS或DASH等切片的直播流,本质上它们不是流,就是文件分发。 +* SRS Edge不支持WebRTC的流分发,这不是Edge设计的目标,WebRTC有自己的集群方式,参考[#2091](https://github.com/ossrs/srs/issues/2091)。 + +本文描述的就是HLS或DASH等切片的边缘集群,基于NGINX实现,所以也叫NGINX Edge Cluster。 + +## Oryx + +NGINX边缘集群,可以和Oryx一起工作,可以实现HLS的分发,详细请参考[Oryx HLS CDN](https://github.com/ossrs/oryx/tree/main/scripts/nginx-hls-cdn)。 + +## NGINX Edge Cluster + +NGINX边缘集群,本质上就是带有缓存的反向代理,也就是NGNIX Proxy with Cache。 + +```text ++------------+ +------------+ +------------+ +------------+ ++ FFmpeg/OBS +--RTMP-->-+ SRS Origin +--HLS-->--+ NGINX +--HLS-->--+ Visitors + ++------------+ +------------+ + Servers + +------------+ + +------------+ +``` + +只需要配置NGINX的缓存策略就可以,不需要额外插件,NGINX本身就支持: + +```bash +http { + # For Proxy Cache. + proxy_cache_path /tmp/nginx-cache levels=1:2 keys_zone=srs_cache:8m max_size=1000m inactive=600m; + proxy_temp_path /tmp/nginx-cache/tmp; + + server { + listen 8081; + # For Proxy Cache. + proxy_cache_valid 404 10s; + proxy_cache_lock on; + proxy_cache_lock_age 300s; + proxy_cache_lock_timeout 300s; + proxy_cache_min_uses 1; + + location ~ /.+/.*\.(m3u8)$ { + proxy_pass http://127.0.0.1:8080$request_uri; + # For Proxy Cache. + proxy_cache srs_cache; + proxy_cache_key $scheme$proxy_host$uri$args; + proxy_cache_valid 200 302 10s; + } + location ~ /.+/.*\.(ts)$ { + proxy_pass http://127.0.0.1:8080$request_uri; + # For Proxy Cache. + proxy_cache srs_cache; + proxy_cache_key $scheme$proxy_host$uri; + proxy_cache_valid 200 302 60m; + } + } +} +``` + +> Note: 可以配置缓存的目录`proxy_cache_path`和`proxy_temp_path`,改成能访问的目录就可以。 + +> Note: 一般不要修改`location`配置,除非你知道代表什么含义,要改也先跑起来了再改。 + +一定不能只配置成纯Proxy,这样会把负载透传到SRS,系统支持的客户端数目,还是SRS支持的数目。 + +开启Cache后,无论NGINX多少负载,SRS都只有一个流。这样我们可以扩展多个NGINX,实现支持非常多的观看并发了。 + +比如1Mbps的HLS流,1000个客户端播放NGINX,那么NGINX的带宽就是1Gbps,而SRS只有1Mbps。 + +如果我们扩展10个NGINX,每个NGINX是10Gbps带宽,那么整个系统的带宽是100Gbps,能支持10万并发,SRS的带宽消耗只有10Mbps。 + +如何验证系统正常工作呢?这就要用到Benchmark了。 + +## Benchmark + +如何压测这个系统呢?可以用[srs-bench](https://github.com/ossrs/srs-bench#usage),使用起来非常方便,可以用docker直接启动: + +```bash +docker run --rm -it --network=host --name sb ossrs/srs:sb \ + ./objs/sb_hls_load -c 500 \ + -r http://your_server_public_ipv4/live/livestream.m3u8 +``` + +而且也可以压测RTMP和HTTP-FLV: + +```bash +docker run --rm -it --network=host --name sb ossrs/srs:sb \ + ./objs/sb_http_load -c 500 \ + -r http://your_server_public_ipv4/live/livestream.flv +``` + +> Note: 每个SB模拟的客户端并发在500到1000个,具体以CPU不要超过80%为准,可以启动多个进程压测。 + +那就让我们动手搞个HLS集群出来吧。 + +## Example + +下面我们用docker来构建一个HLS的分发集群。 + +首先,启动SRS源站: + +```bash +./objs/srs -c conf/hls.origin.conf +``` + +然后,启动NGINX源站: + +```bash +nginx -c $(pwd)/conf/hls.edge.conf +``` + +最后,推流到源站: + +```bash +ffmpeg -re -i doc/source.flv -c copy \ + -f flv rtmp://127.0.0.1/live/livestream +``` + +播放HLS: + +* SRS源站:http://127.0.0.1:8080/live/livestream.m3u8 +* NGINX边缘:http://127.0.0.1:8081/live/livestream.m3u8 + +启动压测,从NGINX取HLS: + +```bash +docker run --rm -it --network=host --name sb ossrs/srs:sb \ + ./objs/sb_hls_load -c 500 \ + -r http://192.168.0.14:8081/live/livestream.m3u8 +``` + +可是看到SRS的压力并不大,CPU消耗都在NGINX上。 + +NGINX边缘集群成功解决了HLS的分发问题,如果同时需要做低延迟直播,分发HTTP-FLV,怎么做呢?如果要支持HTTPS HLS,或者HTTPS-FLV呢? + +NGINX完全没问题,下面就看如何配合SRS Edge Server,实现HTTP-FLV和HLS通过NGINX分发。 + +## Work with SRS Edge Server + +NGINX边缘集群,也可以和SRS Edge Server一起工作,可以实现HLS和HTTP-FLV的分发。 + +```text ++------------+ +------------+ +| SRS Origin +--RTMP-->--+ SRS Edge + ++-----+------+ +----+-------+ + | | +------------+ + | +---HTTP-FLV->--+ NGINX + +-----------+ + | + Edge +--HLS/FLV-->--+ Visitors + + +-------HLS--->-------------------------+ Servers + +-----------+ + +------------+ +``` + +实现起来很简单,只需要在NGINX的服务器上,部署一个SRS,并让NGINX工作在反向代理模式就可以。 + +```bash +# For SRS streaming, for example: +# http://r.ossrs.net/live/livestream.flv +location ~ /.+/.*\.(flv)$ { + proxy_pass http://127.0.0.1:8080$request_uri; +} +``` + +这样HLS由NGINX管理缓存和回源,而FLV则由SRS Edge缓存和回源。 + +这个架构虽好,实际上NGINX可以直接作为HLS源站,这样可以更高性能,是否可以呢?完全没问题,我们看如何完全用NGINX分发HLS。 + +## NGINX Origin Server + +由于HLS就是普通的文件,因此也可以直接使用NGINX作为HLS源站。 + +在超高并发的NGINX Edge集群中,也可以形成机房级别的小集群,从某个NGINX中集中回源,这样可以支持更高的并发。 + +使用NGINX分发HLS文件,其实很简单,只需要设置root就可以了: + +```bash + # For HLS delivery + location ~ /.+/.*\.(m3u8)$ { + root /usr/local/srs/objs/nginx/html; + add_header Cache-Control "public, max-age=10"; + } + location ~ /.+/.*\.(ts)$ { + root /usr/local/srs/objs/nginx/html; + add_header Cache-Control "public, max-age=86400"; + } +``` + +> Note: 这里我们设置了m3u8的缓存时间是10秒,需要根据切片的大小调整。 + +> Note: 由于目前SRS支持HLS variant,实现HLS的播放统计,因此没有NGINX这么高效,参考 [#2995](https://github.com/ossrs/srs/issues/2995) + +> Note: SRS应该要设置`Cache-Control`,因为切片的服务才能动态设置正确的缓存时间,减少延迟,参考 [#2991](https://github.com/ossrs/srs/issues/2991) + +## Debugging + +如何判断缓存有没有生效呢?可以在NGINX日志中,加入一个字段`upstream_cache_status`,分析NGINX日志来判断缓存是否生效: + +```bash +log_format main '$upstream_cache_status $remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; +access_log /var/log/nginx/access.log main; +``` + +第一个字段就是缓存状态,可以用下面的命令分析,比如只看TS文件的缓存情况: + +```bash +cat /var/log/nginx/access.log | grep '.ts HTTP' \ + | awk '{print $1}' | sort | uniq -c | sort -r +``` + +可以看到哪些是HIT缓存了,就不会从SRS下载文件,而直接从NGINX获取文件了。 + +也可以直接在响应头加入这个字段,这样可以在浏览器中看每个请求,是否HIT了: + +```bash +add_header X-Cache-Status $upstream_cache_status; +``` + +> Note: 关于缓存生效时间,参考字段[proxy_cache_valid](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_valid)的定义,实际上若源站指定了`Cache-Control`会覆盖这个配置。 + +## aaPanel Configuration + +若使用宝塔,那么可以新增一个站点,然后在站点的配置中写入如下配置: + +```bash + # For Proxy Cache. + proxy_cache_path /tmp/nginx-cache levels=1:2 keys_zone=srs_cache:8m max_size=1000m inactive=600m; + proxy_temp_path /tmp/nginx-cache/tmp; + + server { + listen 80; + server_name your.domain.com; + + # For Proxy Cache. + proxy_cache_valid 404 10s; + proxy_cache_lock on; + proxy_cache_lock_age 300s; + proxy_cache_lock_timeout 300s; + proxy_cache_min_uses 1; + + location ~ /.+/.*\.(m3u8)$ { + proxy_pass http://127.0.0.1:8080$request_uri; + # For Proxy Cache. + proxy_cache srs_cache; + proxy_cache_key $scheme$proxy_host$uri$args; + proxy_cache_valid 200 302 10s; + } + location ~ /.+/.*\.(ts)$ { + proxy_pass http://127.0.0.1:8080$request_uri; + # For Proxy Cache. + proxy_cache srs_cache; + proxy_cache_key $scheme$proxy_host$uri; + proxy_cache_valid 200 302 60m; + } + } +``` + +> 注意:一般宝塔新增站点侦听的是80端口,域名server_name是自己填的域名,其他配置同宝塔配置。或者在宝塔的这个站点配置中,加入上面的cache和location的配置也可以。 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/nginx-for-hls) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/origin-cluster.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/origin-cluster.md new file mode 100644 index 00000000..dce14794 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/origin-cluster.md @@ -0,0 +1,68 @@ +--- +title: Origin Cluster +sidebar_label: Origin Cluster +hide_title: false +hide_table_of_contents: false +--- + +# OriginCluster + +## Design + +关于源站集群的设计参考[Issue#464](https://github.com/ossrs/srs/issues/464#issuecomment-306082751)。 +源站集群主要解决大量推流的情况,比如需要推1万路流。 + +![](/img/doc-advanced-guides-origin-cluster-001.png) + +> Remark: 源站集群只支持RTMP协议,如果需要HTTP-FLV,可以加一个Edge将RTMP转成HTTP-FLV。 + +## Config + +源站集群的配置如下: + +``` +vhost __defaultVhost__ { + # The config for cluster. + cluster { + # The cluster mode, local or remote. + # local: It's an origin server, serve streams itself. + # remote: It's an edge server, fetch or push stream to origin server. + # default: local + mode local; + + # For origin(mode local) cluster, turn on the cluster. + # @remark Origin cluster only supports RTMP, use Edge to transmux RTMP to FLV. + # default: off + # TODO: FIXME: Support reload. + origin_cluster on; + + # For origin (mode local) cluster, the co-worker's HTTP APIs. + # This origin will connect to co-workers and communicate with them. + # please read: https://ossrs.net/lts/zh-cn/docs/v4/doc/origin-cluster + # TODO: FIXME: Support reload. + coworkers 127.0.0.1:9091 127.0.0.1:9092; + } +} +``` + +其中: + +* mode: 集群的模式,对于源站集群,值应该是local。 +* origin_cluster: 是否开启源站集群。 +* coworkers: 源站集群中的其他源站的HTTP API地址。 + +> Remark: 如果流不在本源站,会通过HTTP API查询其他源站是否有流。如果流其他源站,则返回RTMP302重定向请求到该源站。如果所有源站都没有流则返回错误。 + +> Remark: 特别注意的是,如果流还没有开始推,那么服务器会返回失败,这点和源站没有在源站集群的行为不同。当源站独立工作时,会等待流推上来;当源站在源站集群中时,因为流可能不会推到本源站,所以等待流推上来没有意义。 + +## Usage + +源站集群的用法参考[#464](https://github.com/ossrs/srs/issues/464#issuecomment-366169487)。 + +推荐在源站集群前面挂一系列的Edge服务器,参考[这里](https://github.com/ossrs/srs/issues/464#issuecomment-366169962),Edge服务器可以转换协议,支持RTMP和HTTP-FLV,同时支持源站故障时自动切换,不中断客户端。 + +2018.02 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/origin-cluster) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/perf.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/perf.md new file mode 100644 index 00000000..fd63b9e6 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/perf.md @@ -0,0 +1,14 @@ +--- +title: Perf Analysis +sidebar_label: Perf Analysis +hide_title: false +hide_table_of_contents: false +--- + +# Perf + +最新更新,参考[SRS性能(CPU)、内存优化工具用法](https://www.jianshu.com/p/6d4a89359352) + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/perf) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/performance.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/performance.md new file mode 100644 index 00000000..d2293fcf --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/performance.md @@ -0,0 +1,745 @@ +--- +title: Performance +sidebar_label: Performance +hide_title: false +hide_table_of_contents: false +--- + +# Performance + +SRS提供了一系列工具来定位性能瓶颈和内存泄漏,这些在`./configure && make`后的`summary`中是有给出来用法的,不过不是很方便,所以特地把用法写到这个文章中。 + +文中所有的工具,对于其他的linux程序也是有用的。 + +> Note: 所有工具用起来都会导致SRS性能低下,所以除非是排查问题,否则不要开启这些选项。 + +## RTC + +RTC是UDP的协议,先设置`网卡队列缓冲区`,下面命令是UDP分析常用的: + +```bash +# 查看UDP缓冲区长度,默认只有200KB左右。 +sysctl net.core.rmem_max +sysctl net.core.rmem_default +sysctl net.core.wmem_max +sysctl net.core.wmem_default + +# 修改缓冲区长度为16MB +sysctl net.core.rmem_max=16777216 +sysctl net.core.rmem_default=16777216 +sysctl net.core.wmem_max=16777216 +sysctl net.core.wmem_default=16777216 +``` + +> Note: 对于Docker,在宿主机上设置后容器就自然生效了,注意需要先设置参数然后再启动容器(或者启动容器中的SRS进程),也就是Docker容器启动进程时读取的是宿主机的这个内核配置。 + +> Note:如果希望在Docker中设置这些参数,只能以`--network=host`方式启动,也就是复用宿主机的网络。注意如果不需要在Docker中设置是不依赖这种方式的。 + +也可以修改系统文件`/etc/sysctl.conf`,重启也会生效: + +```bash +# vi /etc/sysctl.conf +# For RTC +net.core.rmem_max=16777216 +net.core.rmem_default=16777216 +net.core.wmem_max=16777216 +net.core.wmem_default=16777216 +``` + +查看接收和发送的丢包信息: + +```bash +# 查看丢包 +netstat -suna +# 查看30秒的丢包差 +netstat -suna && sleep 30 && netstat -suna +``` + +实例说明: + +* `224911319 packets received`,这是接收到的总包数。 +* `65731106 receive buffer errors`,接收的丢包,来不及处理就丢了。 +* `123534411 packets sent`,这是发送的总包数。 +* `0 send buffer errors`,这是发送的丢包。 + +> Note: SRS的日志会打出UDP接收丢包和发送丢包,例如`loss=(r:49,s:0)`,意思是每秒有49个包来不及收,发送没有丢包。 + +> Note:注意Docker虽然读取了宿主机的内核网络参数,但是`netstat -su`获取的数据是和宿主机是不同的,也就是容器的丢包得在容器中执行命令获取。 + +查看接收和发送的长度: +```bash +netstat -lpun +``` + +实例说明: + +* `Recv-Q 427008`,程序的接收队列中的包数。Established: The count of bytes not copied by the user program connected to this socket. +* `Send-Q 0`,程序的发送队列中的包数目。Established: The count of bytes not acknowledged by the remote host. + +下面是netstat的一些参数: + +* `--udp|-u` 筛选UDP协议。 +* `--numeric|-n` 显示数字IP或端口,而不是别名,比如http的数字是80. +* `--statistics|-s` 显示网卡的统计信息。 +* `--all|-a` 显示所有侦听和非侦听的。 +* `--listening|-l` 只显示侦听的socket。 +* `--program|-p` 显示程序名称,谁在用这个FD。 + +## PERF + +PERF是Linux性能分析工具。 + +可以实时看到当前的SRS热点函数: + +``` +perf top -p $(pidof srs) +``` + +或者记录一定时间的数据: + +``` +perf record -p $(pidof srs) + +# 需要按CTRL+C取消record,然后执行下面的 + +perf report +``` + +记录堆栈,显示调用图: + +``` +perf record -a --call-graph fp -p $(pidof srs) +perf report --call-graph --stdio +``` + +> Note: 也可以打印到文件`perf report --call-graph --stdio >t.txt`。 + +> Remark: 由于ST的堆栈是不正常的,perf开启`-g`后记录的堆栈都是错乱的,所以perf只能看SRS的热点,不能看堆栈信息;如果需要看堆栈,请使用`GPERF: GCP`,参考下面的章节。 + +## ASAN(Google Address Sanitizer) + +SRS5+内置和默认支持[ASAN](https://github.com/google/sanitizers/wiki/AddressSanitizer),检测内存泄露、野指针和越界等问题。 + +若你的系统不支持ASAN,可以编译时关闭,相关选项如下: + +```bash +./configure -h |grep asan + --sanitizer=on|off Whether build SRS with address sanitizer(asan). Default: on + --sanitizer-static=on|off Whether build SRS with static libasan(asan). Default: off + --sanitizer-log=on|off Whether hijack the log for libasan(asan). Default: off +``` + +ASAN检查内存问题很准确,推荐开启。 + +## GPROF + +GPROF是个GNU的CPU性能分析工具。参考[SRS GPROF](./gprof.md),以及[GNU GPROF](http://www.cs.utah.edu/dept/old/texinfo/as/gprof.html)。 + +Usage: +``` +# Build SRS with GPROF +./configure --gprof=on && make + +# Start SRS with GPROF +./objs/srs -c conf/console.conf + +# Or CTRL+C to stop GPROF +killall -2 srs + +# To analysis result. +gprof -b ./objs/srs gmon.out +``` + +## GPERF + +GPERF是[google tcmalloc](https://github.com/gperftools/gperftools)提供的cpu和内存工具,参考[GPERF](./gperf.md)。 + +### GPERF: GCP + +GCP是CPU性能分析工具,就是一般讲的性能瓶颈,看哪个函数调用占用过多的CPU。参考[GCP](https://gperftools.github.io/gperftools/cpuprofile.html)。 + +Usage: + +``` +# Build SRS with GCP +./configure --gperf=on --gcp=on && make + +# Start SRS with GCP +./objs/srs -c conf/console.conf + +# Or CTRL+C to stop GCP +killall -2 srs + +# To analysis cpu profile +./objs/pprof --text objs/srs gperf.srs.gcp* +``` + +> Note: 用法可以参考[cpu-profiler](https://github.com/ossrs/srs/tree/4.0release/trunk/research/gperftools/cpu-profiler)。 + +图形化展示,在CentOS上安装dot: + +```bash +yum install -y graphviz +``` + +然后生成svg图片,可以用Chrome打开: + +```bash +./objs/pprof --svg ./objs/srs gperf.srs.gcp >t.svg +``` + +### GPERF: GMD + +GMD是GPERF提供的内存Defense工具,检测内存越界和野指针。一般在越界写入时,可能不会立刻导致破坏,而是在切换到其他线程使用被破坏的对象时才会发现破坏了,所以这种内存问题很难排查;GMD能在越界和野指针使用时直接core dump,定位在那个出问题的地方。参考[GMD](http://blog.csdn.net/win_lin/article/details/50461709)。 + +Usage: +``` +# Build SRS with GMD. +./configure --gperf=on --gmd=on && make + +# Start SRS with GMD. +env TCMALLOC_PAGE_FENCE=1 ./objs/srs -c conf/console.conf +``` + +> Note: 用法可以参考[heap-defense](https://github.com/ossrs/srs/tree/4.0release/trunk/research/gperftools/heap-defense)。 + +> Note: 注意GMD需要链接`libtcmalloc_debug.a`,并且开启环境变量`TCMALLOC_PAGE_FENCE`。 + +### GPERF: GMC + +GMC是内存泄漏检测工具,参考[GMC](https://gperftools.github.io/gperftools/heap_checker.html)。 + +Usage: + +``` +# Build SRS with GMC +./configure --gperf=on --gmc=on && make + +# Start SRS with GMC +env PPROF_PATH=./objs/pprof HEAPCHECK=normal ./objs/srs -c conf/console.conf 2>gmc.log + +# Or CTRL+C to stop gmc +killall -2 srs + +# To analysis memory leak +cat gmc.log +``` + +> Note: 用法可以参考[heap-checker](https://github.com/ossrs/srs/tree/4.0release/trunk/research/gperftools/heap-checker)。 + +### GPERF: GMP + +GMP是内存性能分析工具,譬如检测是否有频繁的申请和释放堆内存导致的性能问题。参考[GMP](https://gperftools.github.io/gperftools/heapprofile.html)。 + +Usage: +``` +# Build SRS with GMP +./configure --gperf=on --gmp=on && make + +# Start SRS with GMP +./objs/srs -c conf/console.conf + +# Or CTRL+C to stop gmp +killall -2 srs + +# To analysis memory profile +./objs/pprof --text objs/srs gperf.srs.gmp* +``` + +> Note: 用法可以参考[heap-profiler](https://github.com/ossrs/srs/tree/4.0release/trunk/research/gperftools/heap-profiler)。 + +## VALGRIND + +VALGRIND是大名鼎鼎的C分析工具,SRS3之后支持了。SRS3之前,因为使用了ST,需要给ST打PATCH才能用。 + +``` +valgrind --leak-check=full ./objs/srs -c conf/console.conf +``` + +> Remark: SRS3之前的版本,可以手动给ST打PATCH支持VALGRIND,参考[state-threads](https://github.com/ossrs/state-threads#usage),详细的信息可以参考[ST#2](https://github.com/ossrs/state-threads/issues/2)。 + +## Syscall + +系统调用的性能排查,参考[strace -c -p PID](https://man7.org/linux/man-pages/man1/strace.1.html) + +## OSX + +在OSX/Darwin/Mac系统,可以用Instruments,在xcode中选择Open Develop Tools,就可以看到Instruments,也可以直接找这个程序,参考[Profiling c++ on mac os x](https://stackoverflow.com/questions/11445619/profiling-c-on-mac-os-x) + +``` +instruments -l 30000 -t Time\ Profiler -p 72030 +``` + +> Remark: 也可以在Active Monitor中选择进程,然后选择Sample采样。 + +## Multiple Process and Softirq + +多核时,一般网卡软中断(内核网络传输)在CPU0上,可以把SRS调度到其他CPU: + +```bash +taskset -p 0xfe $(pidof srs) +``` + +或者,指定SRS运行在CPU1上: + +```bash +taskset -pc 1 $(pidof srs) +``` + +调整后,可以运行`top`,然后按数字`1`,可以看到每个CPU的负载: + +```bash +top # 进入界面后按数字1 +#%Cpu0 : 1.8 us, 1.1 sy, 0.0 ni, 90.8 id, 0.0 wa, 0.0 hi, 6.2 si, 0.0 st +#%Cpu1 : 67.6 us, 17.6 sy, 0.0 ni, 14.9 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st +``` + +或者使用`mpstat -P ALL` + +```bash +mpstat -P ALL +#01:23:14 PM CPU %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle +#01:23:14 PM all 33.33 0.00 8.61 0.04 0.00 3.00 0.00 0.00 0.00 55.02 +#01:23:14 PM 0 2.46 0.00 1.32 0.06 0.00 6.27 0.00 0.00 0.00 89.88 +#01:23:14 PM 1 61.65 0.00 15.29 0.02 0.00 0.00 0.00 0.00 0.00 23.03 +``` + +> Note: 可以使用命令`cat /proc/softirqs`,查看所有CPU的具体软中断类型,参考[Introduction to deferred interrupts (Softirq, Tasklets and Workqueues)](https://0xax.gitbooks.io/linux-insides/content/Interrupts/linux-interrupts-9.html)。 + +> Note: 如果将SRS强制绑定在CPU0上,则会导致较高的`softirq`,这可能是进程和系统的软中断都在CPU0上,可以看到si也比分开的要高很多。 + +如果是多CPU,比如4CPU,则网卡中断可能会绑定到多个CPU,可以通过下面的命令,查看网卡中断的绑定情况: + +```bash +# grep virtio /proc/interrupts | grep -e in -e out + 29: 64580032 0 0 0 PCI-MSI-edge virtio0-input.0 + 30: 1 49 0 0 PCI-MSI-edge virtio0-output.0 + 31: 48663403 0 11845792 0 PCI-MSI-edge virtio0-input.1 + 32: 1 0 0 52 PCI-MSI-edge virtio0-output.1 + +# cat /proc/irq/29/smp_affinity +1 # 意思是virtio0的接收,绑定到CPU0 +# cat /proc/irq/30/smp_affinity +2 # 意思是virtio0的发送,绑定到CPU1 +# cat /proc/irq/31/smp_affinity +4 # 意思是virtio1的接收,绑定到CPU2 +# cat /proc/irq/32/smp_affinity +8 # 意思是virtio1的发送,绑定到CPU3 +``` + +我们可以强制将网卡软中断绑定到CPU0,参考[Linux: scaling softirq among many CPU cores](http://natsys-lab.blogspot.com/2012/09/linux-scaling-softirq-among-many-cpu.html)和[SMP IRQ affinity](https://www.kernel.org/doc/Documentation/IRQ-affinity.txt): + +```bash +for irq in $(grep virtio /proc/interrupts | grep -e in -e out | cut -d: -f1); do + echo 1 > /proc/irq/$irq/smp_affinity +done +``` + +> Note:如果要绑定到`CPU 0-1`,执行`echo 3 > /proc/irq/$irq/smp_affinity` + +然后将SRS所有线程,绑定到CPU0之外的CPU: + +```bash +taskset -a -p 0xfe $(cat objs/srs.pid) +``` + +可以看到,软中断默认分配方式占用较多CPU,将软中断集中在CPU0,降低20%左右CPU。 + +如果要获取极高的性能,那么可以在SRS的启动脚本中,在启动SRS之前,执行绑核和绑软中断的命令。 + +## Process Priority + +可以设置SRS为更高的优先级,可以获取更多的CPU时间: + +```bash +renice -n -15 -p $(pidof srs) +``` + +> Note: nice的值从`-20`到`19`,默认是`0`,一般ECS的优先的进程是`-10`,所以这里设置为`-15`。 + +可以从ps中,看到进程的nice,也就是`NI`字段: + +```bash +top -n1 -p $(pidof srs) +# PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND +# 1505 root 5 -15 519920 421556 4376 S 66.7 5.3 4:41.12 srs +``` + +## Performance Banchmark + +对比SRS和高性能nginx-rtmp的Performance,SRS为单进程,nginx-rtmp支持多进程,为了对比nginx-rtmp也只开启一个进程。 + +提供详细的性能测试的过程,可以为其他性能测试做参考,譬如测试nginx-rtmp的多进程,和srs的forward对比之类。 + +最新的性能测试数据,请参考[performance](https://github.com/ossrs/srs/tree/develop#performance)。 + +### Hardware + +本次对比所用到的硬件环境,使用虚拟机,客户端和服务器都运行于一台机器,避开网络瓶颈。 + +* 硬件: 笔记本上的虚拟机 +* 系统: CentOS 6.0 x86_64 Linux 2.6.32-71.el6.x86_64 +* CPU: 3 Intel(R) Core(TM) i7-3520M CPU @ 2.90GHz +* 内存: 2007MB + +### OS + +超过1024的连接数测试需要打开linux的限制。且必须以root登录和执行。 + +* 设置连接数:`ulimit -HSn 10240` +* 查看连接数: + +```bash +[root@dev6 ~]# ulimit -n +10240 +``` + +* 重启srs:`sudo /etc/init.d/srs restart` + +* 注意:启动服务器前必须确保连接数限制打开。 + +### NGINX-RTMP + +NGINX-RTMP使用的版本信息,以及编译参数。 + +* NGINX: nginx-1.5.7.tar.gz +* NGINX-RTMP: nginx-rtmp-module-1.0.4.tar.gz +* 下载页面,包含编译脚本:[下载nginx-rtmp](http://download.csdn.net/download/winlinvip/6795467) +* 编译参数: + +```bash +./configure --prefix=`pwd`/../_release \ +--add-module=`pwd`/../nginx-rtmp-module-1.0.4 \ +--with-http_ssl_module && make && make install +``` + +* 配置nginx:`_release/conf/nginx.conf` + +```bash +user root; +worker_processes 1; +events { + worker_connections 10240; +} +rtmp{ + server{ + listen 19350; + application live{ + live on; + } + } +} +``` + +* 确保连接数没有限制: + +```bash +[root@dev6 nginx-rtmp]# ulimit -n +10240 +``` + +* 启动命令:``./_release/sbin/nginx`` +* 确保nginx启动成功: + +```bash +[root@dev6 nginx-rtmp]# netstat -anp|grep 19350 +tcp 0 0 0.0.0.0:19350 0.0.0.0:* LISTEN 6486/nginx +``` + +### SRS + +SRS接受RTMP流,并转发给nginx-rtmp做为对比。 + +SRS的版本和编译参数。 + +* SRS: [SRS 0.9](https://github.com/ossrs/srs/releases/tag/0.9) +* 编译参数:``./configure && make`` +* 配置SRS:`conf/srs.conf` + +```bash +listen 1935; +max_connections 10240; +vhost __defaultVhost__ { + forward 127.0.0.1:19350; +} +``` + +* 确保连接数没有限制: + +```bash +[root@dev6 trunk]# ulimit -n +10240 +``` + +* 启动命令:``nohup ./objs/srs -c conf/srs.conf >/dev/null 2>&1 &`` +* 确保srs启动成功: + +```bash +[root@dev6 trunk]# netstat -anp|grep "1935 " +tcp 0 0 0.0.0.0:1935 0.0.0.0:* LISTEN 6583/srs +``` + +### Publish and Play + +使用ffmpeg推送SRS的实例流到SRS,SRS转发给nginx-rtmp,可以通过vlc/srs-players观看。 + +推送RTMP流到服务器和观看。 + +* 启动FFMPEG循环推流: + +```bash +for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg \ + -re -i doc/source.flv \ + -acodec copy -vcodec copy \ + -f flv rtmp://127.0.0.1:1935/live/livestream; \ + sleep 1; +done +``` + +* 查看服务器的地址:`192.168.2.101` + +```bash +[root@dev6 nginx-rtmp]# ifconfig eth0 +eth0 Link encap:Ethernet HWaddr 08:00:27:8A:EC:94 + inet addr:192.168.2.101 Bcast:192.168.2.255 Mask:255.255.255.0 +``` + +* SRS的流地址:`rtmp://192.168.2.101:1935/live/livestream` +* nginx-rtmp的流地址:`rtmp://192.168.2.101:19350/live/livestream` + +### Client + +使用linux工具模拟RTMP客户端访问,参考:[srs-bench](https://github.com/ossrs/srs-bench) + +sb_rtmp_load为RTMP流负载测试工具,单个进程可以模拟1000至3000个客户端。为了避免过高负载,一个进程模拟800个客户端。 + +* 编译:`./configure && make` +* 启动参数:`./objs/sb_rtmp_load -c 800 -r ` + +### Record Data + +测试前,记录SRS和nginx-rtmp的各项资源使用指标,用作对比。 + +* top命令: + +```bash +srs_pid=$(pidof srs); \ +nginx_pid=`ps aux|grep nginx|grep worker|awk '{print $2}'`; \ +load_pids=`ps aux|grep objs|grep sb_rtmp_load|awk '{ORS=",";print $2}'`; \ +top -p $load_pids$srs_pid,$nginx_pid +``` + +* 查看连接数命令: + +```bash +srs_connections=`netstat -anp|grep srs|grep ESTABLISHED|wc -l`; \ +nginx_connections=`netstat -anp|grep nginx|grep ESTABLISHED|wc -l`; \ +echo "srs_connections: $srs_connections"; \ +echo "nginx_connections: $nginx_connections"; +``` + +* 查看服务器消耗带宽,其中,单位是bytes,需要乘以8换算成网络用的bits,设置dstat为30秒钟统计一次,数据更准: + +```bash +[root@dev6 nginx-rtmp]# dstat -N lo 30 +----total-cpu-usage---- -dsk/total- -net/lo- ---paging-- ---system-- +usr sys idl wai hiq siq| read writ| recv send| in out | int csw + 0 0 96 0 0 3| 0 0 |1860B 58k| 0 0 |2996 465 + 0 1 96 0 0 3| 0 0 |1800B 56k| 0 0 |2989 463 + 0 0 97 0 0 2| 0 0 |1500B 46k| 0 0 |2979 461 +``` + +* 数据见下表: + +| Server | CPU | 内存 | 连接数 | E带宽 | A带宽 | sb | C延迟 | +| ------ | -------- | ---- | ---- | ------- | ------ | ------- | -------- | +| SRS | 1.0% | 3MB | 3 | 不适用 | 不适用 | 不适用 | 0.8秒 | +| nginx-rtmp | 0.7% | 8MB | 2 | 不适用 | 不适用 | 不适用 | 0.8秒 | + +期望带宽(E带宽):譬如测试码率为200kbps时,若模拟1000个并发,应该是1000*200kbps=200Mbps带宽。 + +实际带宽(A带宽):指服务器实际的吞吐率,服务器性能下降时(譬如性能瓶颈),可能达不到期望的带宽,会导致客户端拿不到足够的数据,也就是卡顿的现象。 + +客户端延迟(C延迟):粗略计算即为客户端的缓冲区长度,假设服务器端的缓冲区可以忽略不计。一般RTMP直播播放器的缓冲区设置为0.8秒,由于网络原因,或者服务器性能问题,数据未能及时发送到客户端,就会造成客户端卡(缓冲区空),网络好时将队列中的数据全部给客户端(缓冲区变大)。 + +srs-bench(srs-bench/sb):指模拟500客户端的srs-bench的平均CPU。一般模拟1000个客户端没有问题,若模拟1000个,则CPU简单除以2。 + +其中,“不适用”是指还未开始测试带宽,所以未记录数据。 + +其中,srs的三个连接是: +* FFMPEG推流连接。 +* Forward给nginx RTMP流的一个连接。 + +其中,nginx-rtmp的两个连接是: +* SRS forward RTMP的一个连接。 + +### Benchmark SRS + +开始启动srs-bench模拟客户端并发测试SRS的性能。 + +* 启动500客户端: + +```bash +./objs/sb_rtmp_load -c 500 -r rtmp://127.0.0.1:1935/live/livestream >/dev/null & +``` + +* 客户端开始播放30秒以上,并记录数据: + +| Server | CPU | 内存 | 连接数 | E带宽 | A带宽 | sb | C延迟 | +| ------ | -------- | ---- | ---- | ------- | ------ | ------- | -------- | +| SRS | 9.0% | 8MB | 503 | 100Mbps | 112Mbps | 12.6% | 0.8秒 | + +* 再启动一个模拟500个连接的srs-bench,共1000个连接。 +* 客户端开始播放30秒以上,并记录数据: + +| Server | CPU | 内存 | 连接数 | E带宽 | A带宽 | sb | C延迟 | +| ------ | -------- | ---- | ---- | ------- | ------ | ------- | -------- | +| SRS | 23.6% | 13MB | 1003 | 200Mbps | 239Mbps | 16.6% | 0.8秒 | + +* 再启动一个模拟500个连接的srs-bench,共1500个连接。 +* 客户端开始播放30秒以上,并记录数据: + +| Server | CPU | 内存 | 连接数 | E带宽 | A带宽 | sb | C延迟 | +| ------ | -------- | ---- | ---- | ------- | ------ | ------- | -------- | +| SRS | 38.6% | 20MB | 1503 | 300Mbps | 360Mbps | 17% | 0.8秒 | + +* 再启动一个模拟500个连接的srs-bench,共2000个连接。 +* 客户端开始播放30秒以上,并记录数据: + +| Server | CPU | 内存 | 连接数 | E带宽 | A带宽 | sb | C延迟 | +| ------ | -------- | ---- | ---- | ------- | ------ | ------- | -------- | +| SRS | 65.2% | 34MB | 2003 | 400Mbps | 480Mbps | 22% | 0.8秒 | + +* 再启动一个模拟500个连接的srs-bench,共2500个连接。 +* 客户端开始播放30秒以上,并记录数据: + +| Server | CPU | 内存 | 连接数 | E带宽 | A带宽 | sb | C延迟 | +| ------ | -------- | ---- | ---- | ------- | ------ | ------- | -------- | +| SRS | 72.9% | 38MB | 2503 | 500Mbps | 613Mbps | 24% | 0.8秒 | + +由于虚拟机能力的限制,只能测试到2500并发。 + +### Benchmark NginxRTMP + +开始启动srs-bench模拟客户端并发测试SRS的性能。 + +* 启动500客户端: + +```bash +./objs/sb_rtmp_load -c 500 -r rtmp://127.0.0.1:19350/live/livestream >/dev/null & +``` +* 客户端开始播放30秒以上,并记录数据: + +| Server | CPU | 内存 | 连接数 | E带宽 | A带宽 | sb | C延迟 | +| ------ | -------- | ---- | ---- | ------- | ------ | ------- | -------- | +| nginx-rtmp | 8.3% | 13MB | 502 | 100Mbps | 120Mbps | 16.3% | 0.8秒 | + +* 再启动一个模拟500个连接的srs-bench,共1000个连接。 +* 客户端开始播放30秒以上,并记录数据: + +| Server | CPU | 内存 | 连接数 | E带宽 | A带宽 | sb | C延迟 | +| ------ | -------- | ---- | ---- | ------- | ------ | ------- | -------- | +| nginx-rtmp | 27.3% | 19MB | 1002 | 200Mbps | 240Mbps | 30% | 0.8秒 | + +* 再启动一个模拟500个连接的srs-bench,共1500个连接。 +* 客户端开始播放30秒以上,并记录数据: + +| Server | CPU | 内存 | 连接数 | E带宽 | A带宽 | sb | C延迟 | +| ------ | -------- | ---- | ---- | ------- | ------ | ------- | -------- | +| nginx-rtmp | 42.3% | 25MB | 1502 | 300Mbps | 400Mbps | 31% | 0.8秒 | + +* 再启动一个模拟500个连接的srs-bench,共2000个连接。 +* 客户端开始播放30秒以上,并记录数据: + +| Server | CPU | 内存 | 连接数 | E带宽 | A带宽 | sb | C延迟 | +| ------ | -------- | ---- | ---- | ------- | ------ | ------- | -------- | +| nginx-rtmp | 48.9% | 31MB | 2002 | 400Mbps | 520Mbps | 33% | 0.8秒 | + +* 再启动一个模拟500个连接的srs-bench,共2500个连接。 +* 客户端开始播放30秒以上,并记录数据: + +| Server | CPU | 内存 | 连接数 | E带宽 | A带宽 | sb | C延迟 | +| ------ | -------- | ---- | ---- | ------- | ------ | ------- | -------- | +| nginx-rtmp | 74.2% | 37MB | 2502 | 500Mbps | 580Mbps | 35% | 0.8秒 | + +由于虚拟机能力的限制,只能测试到2500并发。 + +### Performance Compare + +CentOS6 x86_64虚拟机,SRS和nginx-rtmp的数据对比如下: + +| Server | CPU | 内存 | 连接数 | E带宽 | A带宽 | sb | C延迟 | +| ------ | -------- | ---- | ---- | ------- | ------ | ------- | -------- | +| nginx-rtmp | 8.3% | 13MB | 502 | 100Mbps | 120Mbps | 16.3% | 0.8秒 | +| SRS | 9.0% | 8MB | 503 | 100Mbps | 112Mbps | 12.6% | 0.8秒 | +| nginx-rtmp | 27.3% | 19MB | 1002 | 200Mbps | 240Mbps | 30% | 0.8秒 | +| SRS | 23.6% | 13MB | 1003 | 200Mbps | 239Mbps | 16.6% | 0.8秒 | +| nginx-rtmp | 42.3% | 25MB | 1502 | 300Mbps | 400Mbps | 31% | 0.8秒 | +| SRS | 38.6% | 20MB | 1503 | 300Mbps | 360Mbps | 17% | 0.8秒 | +| nginx-rtmp | 48.9% | 31MB | 2002 | 400Mbps | 520Mbps | 33% | 0.8秒 | +| SRS | 65.2% | 34MB | 2003 | 400Mbps | 480Mbps | 22% | 0.8秒 | +| nginx-rtmp | 74.2% | 37MB | 2502 | 500Mbps | 580Mbps | 35% | 0.8秒 | +| SRS | 72.9% | 38MB | 2503 | 500Mbps | 613Mbps | 24% | 0.8秒 | + +### Performance Banchmark 4k + +今天做了性能优化,默认演示流(即采集doc/source.flv文件为流)达到4k以上并发没有问题。 + +``` +[winlin@dev6 srs]$ ./objs/srs -v +0.9.130 +``` + +``` +top - 19:52:35 up 1 day, 11:11, 8 users, load average: 1.20, 1.05, 0.92 +Tasks: 171 total, 4 running, 167 sleeping, 0 stopped, 0 zombie +Cpu0 : 26.0%us, 23.0%sy, 0.0%ni, 34.0%id, 0.3%wa, 0.0%hi, 16.7%si, 0.0%st +Cpu1 : 26.4%us, 20.4%sy, 0.0%ni, 34.1%id, 0.7%wa, 0.0%hi, 18.4%si, 0.0%st +Cpu2 : 22.5%us, 15.4%sy, 0.0%ni, 45.3%id, 1.0%wa, 0.0%hi, 15.8%si, 0.0%st +Mem: 2055440k total, 1972196k used, 83244k free, 136836k buffers +Swap: 2064376k total, 3184k used, 2061192k free, 926124k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND +17034 root 20 0 415m 151m 2040 R 94.4 7.6 14:29.33 ./objs/srs -c console.conf + 1063 winlin 20 0 131m 68m 1336 S 17.9 3.4 54:05.77 ./objs/sb_rtmp_load -c 800 -r rtmp://127.0.0.1:1935/live/livestream + 1011 winlin 20 0 132m 68m 1336 R 17.6 3.4 54:45.53 ./objs/sb_rtmp_load -c 800 -r rtmp://127.0.0.1:1935/live/livestream +18736 winlin 20 0 113m 48m 1336 S 17.6 2.4 1:37.96 ./objs/sb_rtmp_load -c 800 -r rtmp://127.0.0.1:1935/live/livestream + 1051 winlin 20 0 131m 68m 1336 S 16.9 3.4 53:25.04 ./objs/sb_rtmp_load -c 800 -r rtmp://127.0.0.1:1935/live/livestream +18739 winlin 20 0 104m 39m 1336 R 15.6 2.0 1:25.71 ./objs/sb_rtmp_load -c 800 -r rtmp://127.0.0.1:1935/live/livestream +``` + +``` +[winlin@dev6 ~]$ dstat -N lo 30 +----total-cpu-usage---- -dsk/total- ---net/lo-- ---paging-- ---system-- +usr sys idl wai hiq siq| read writ| recv send| in out | int csw + 3 2 92 0 0 3| 11k 27k| 0 0 | 1B 26B|3085 443 + 32 17 33 0 0 17| 273B 60k| 69M 69M| 0 0 |4878 6652 + 34 18 32 0 0 16| 0 38k| 89M 89M| 0 0 |4591 6102 + 35 19 30 0 0 17| 137B 41k| 91M 91M| 0 0 |4682 6064 + 33 17 33 0 0 17| 0 31k| 55M 55M| 0 0 |4920 7785 + 33 18 31 0 0 17|2867B 34k| 90M 90M| 0 0 |4742 6530 + 32 18 33 0 0 17| 0 31k| 66M 66M| 0 0 |4922 7666 + 33 17 32 0 0 17| 137B 39k| 65M 65M| 0 0 |4841 7299 + 35 18 30 0 0 17| 0 28k| 100M 100M| 0 0 |4754 6752 + 32 17 33 0 0 18| 0 41k| 44M 44M| 0 0 |5130 8251 + 34 18 32 0 0 16| 0 30k| 104M 104M| 0 0 |4456 5718 +``` + +![SRS监控4k并发](/img/doc-advanced-guides-performance-001.png) + +不过我是在虚拟机测试,物理机的实际情况还有待数据观察。 + +### Performance Banchmark 6k + +SRS2.0.15(注意是SRS2.0,而不是SRS1.0)支持6k客户端,522kbps的流可以跑到近4Gbps带宽,单进程。参考:https://github.com/ossrs/srs/issues/194 + +### Performance Banchmark 7.5k + +SRS2.0.30支持7.5k客户端,参考:https://github.com/ossrs/srs/issues/217 + +Winlin 2014.2 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/performance) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/raspberrypi.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/raspberrypi.md new file mode 100644 index 00000000..3811a77b --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/raspberrypi.md @@ -0,0 +1,236 @@ +--- +title: RaspBerryPi +sidebar_label: RaspBerryPi +hide_title: false +hide_table_of_contents: false +--- + +# Performance benchmark for SRS on RaspberryPi + +SRS支持arm,在树莓派上成功运行,本文记录了树莓派的性能指标。 + +## Install SRS + +树莓派下安装和运行SRS,有以下方式: +* 编译源站和运行:SRS在arm/raspberrypi下的编译,参考[Build: RaspberryPi](./arm.md#raspberrypi) +* 直接下载binary文件,然后安装运行,下载RespberryPi的安装包:[Github站点](http://ossrs.net/srs.release/releases/) 或者 [国内镜像站点](http://ossrs.net/srs/releases/)。安装方法见页面。 + +查看SRS是否启动:`/etc/init.d/srs status` + +## RaspberryPi + +本次测试的硬件环境如下: +* [RaspberryPi](http://item.jd.com/1014155.html):B型 +* SoC BroadcomBCM2835(CPU,GPU,DSP,SDRAM,USB) +* CPU ARM1176JZF-S(ARM11) 700MHz +* GPU Broadcom VideoCore IV, OpenGL ES 2.0, 1080p 30 h.264/MPEG-4 AVC decoder +* RAM 512MByte +* USB 2 x USB2.0 +* VideoOutput Composite RCA(PAL&NTSC), HDMI(rev 1.3&1.4), raw LCD Panels via DSI 14 HDMI resolution from 40x350 to 1920x1200 plus various PAL and NTSC standards +* AudioOutput 3.5mm, HDMI +* Storage SD/MMC/SDIO socket +* Network 10/100 ethernet +* Device 8xGPIO, UART, I2C, SPI bus, +3.3V, +5V, ground(nagetive) +* Power 700mA(3.5W) 5V +* Size 85.60 x 53.98 mm(3.370 x 2.125 in) +* OS Debian GNU/linux, Fedora, Arch Linux ARM, RISC OS, XBMC + +另外,直播不会用到SD卡,所以可以忽略不计,用的是class2,4GB的卡。 + +软件环境如下: +* RaspberryPi提供的img:2014-01-07-wheezy-raspbian.img +* uname: Linux raspberrypi 3.10.25+ #622 PREEMPT Fri Jan 3 18:41:00 GMT 2014 armv6l GNU/Linux +* cpu: arm61 +* 服务器: srs 0.9.38 +* 服务器类型: raspberry pi +* 客户端:[srs-bench](https://github.com/ossrs/srs-bench) +* 客户端类型: 虚拟机,CentOS6 +* 观看客户端: PC win7, flash +* 网络: 百兆交换机(pi只支持百兆) + +流信息: +* 码率:200kbps +* 分辨率:768x320 +* 音频:30kbps + +环境搭建参考:[SRS: arm](./arm.md#raspberrypi) + +## OS settings + +超过1024的连接数测试需要打开linux的限制。且必须以root登录和执行。 + +* 设置连接数:`ulimit -HSn 10240` +* 查看连接数: + +```bash +[root@dev6 ~]# ulimit -n +10240 +``` + +* 重启srs:`sudo /etc/init.d/srs restart` + +* 注意:启动服务器前必须确保连接数限制打开。 + +## Publish and Play + +可以使用centos虚拟机推流到srs,或者用FMLE推流到raspberry-pi的SRS。假设raspberry-pi服务器的ip是`192.168.1.105`,请换成你自己的服务器ip。 + +推送RTMP流到服务器和观看。 + +* 启动FFMPEG循环推流: + +```bash +for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg \ + -re -i doc/source.flv \ + -acodec copy -vcodec copy \ + -f flv rtmp://192.168.1.105:1935/live/livestream; \ + sleep 1; +done +``` + +* 查看服务器的地址:`192.168.1.105` + +```bash +[root@dev6 nginx-rtmp]# ifconfig eth0 +eth0 Link encap:Ethernet HWaddr 08:00:27:8A:EC:94 + inet addr:192.168.1.105 Bcast:192.168.2.255 Mask:255.255.255.0 +``` + +* SRS的流地址:`rtmp://192.168.1.105:1935/live/livestream` +* 通过srs-players播放SRS流:[播放SRS的流](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.flv&port=8080&schema=http) + +## Client + +使用linux工具模拟RTMP客户端访问,参考:[srs-bench](https://github.com/ossrs/srs-bench) + +sb_rtmp_load为RTMP流负载测试工具,单个进程可以模拟1000至3000个客户端。为了避免过高负载,一个进程模拟800个客户端。 + +* 编译:`./configure && make` +* 启动参数:`./objs/sb_rtmp_load -c 800 -r ` + +## Record Data + +测试前,记录SRS的各项资源使用指标,用作对比。 + +* 查看服务器端srs消耗的CPU: + +```bash +pid=`ps aux|grep srs|grep objs|awk '{print $2}'` && top -p $pid +``` + +* 查看客户端srs-bench消耗的CPU: + +```bash +pid=`ps aux|grep load|grep rtmp|awk '{print $2}'` && top -p $pid +``` + +* 查看客户端连接数命令: + +```bash +for((;;)); do \ + srs_connections=`sudo netstat -anp|grep 1935|grep ESTABLISHED|wc -l`; \ + echo "srs_connections: $srs_connections"; \ + sleep 5; \ +done +``` + +* 查看客户端消耗带宽(不影响服务器CPU),其中,单位是bytes,需要乘以8换算成网络用的bits,设置dstat为30秒钟统计一次,数据更准: + +```bash +[winlin@dev6 ~]$ dstat 30 +----total-cpu-usage---- -dsk/total- -net/lo- ---paging-- ---system-- +usr sys idl wai hiq siq| read writ| recv send| in out | int csw + 0 0 96 0 0 3| 0 0 |1860B 58k| 0 0 |2996 465 + 0 1 96 0 0 3| 0 0 |1800B 56k| 0 0 |2989 463 + 0 0 97 0 0 2| 0 0 |1500B 46k| 0 0 |2979 461 +``` + +* 数据见下表: + +| Server | CPU | Mem | Conn | E带宽 | A带宽 | sb | 延迟 | +| ------ | --- | ---- | ---- | ---- | ---- | ---- | ----- | +| SRS | 1.0% | 3MB | 3 | 不适用 | 不适用 | 不适用 | 0.8秒 | + +期望带宽(E带宽):譬如测试码率为200kbps时,若模拟1000个并发,应该是1000*200kbps=200Mbps带宽。 + +实际带宽(A带宽):指服务器实际的吞吐率,服务器性能下降时(譬如性能瓶颈),可能达不到期望的带宽,会导致客户端拿不到足够的数据,也就是卡顿的现象。 + +客户端延迟(延迟):粗略计算即为客户端的缓冲区长度,假设服务器端的缓冲区可以忽略不计。一般RTMP直播播放器的缓冲区设置为0.8秒,由于网络原因,或者服务器性能问题,数据未能及时发送到客户端,就会造成客户端卡(缓冲区空),网络好时将队列中的数据全部给客户端(缓冲区变大)。 + +srs-bench(srs-bench/sb):指模拟500客户端的srs-bench的平均CPU。一般模拟1000个客户端没有问题,若模拟1000个,则CPU简单除以2。 + +其中,“不适用”是指还未开始测试带宽,所以未记录数据。 + +## Benchmark SRS 0.9.38 + +本章测试SRS使用Epoll机制的性能。 + +开始启动srs-bench模拟客户端并发测试SRS的性能。 + +树莓派一般10个以内的连接比较常用,所以我们先测试10个链接的情况。加上推流链接实际上11个。 + +* 启动10客户端: + +```bash +./objs/sb_rtmp_load -c 10 -r rtmp://192.168.1.105:1935/live/livestream >/dev/null & +``` + +* 客户端开始播放30秒以上,并记录数据: + +| Server | CPU | Mem | Conn | E带宽 | A带宽 | sb | 延迟 | +| ------ | --- | ---- | ---- | ---- | ---- | ---- | ----- | +| SRS | 17% | 1.4MB | 11 | 2.53Mbps | 2.6Mbps | 1.3% | 1.7秒 | + +* 再启动一个模拟10个连接的srs-bench,共20个连接。 +* 客户端开始播放30秒以上,并记录数据: + +| Server | CPU | Mem | Conn | E带宽 | A带宽 | sb | 延迟 | +| ------ | --- | ---- | ---- | ---- | ---- | ---- | ----- | +| SRS | 23% | 2MB | 21 | 4.83Mbps | 5.5Mbps | 2.3% | 1.5秒 | + +* 再启动一个模拟10个连接的srs-bench,共30个连接。 +* 客户端开始播放30秒以上,并记录数据: + +| Server | CPU | Mem | Conn | E带宽 | A带宽 | sb | 延迟 | +| ------ | --- | ---- | ---- | ---- | ---- | ---- | ----- | +| SRS | 50% | 4MB | 31 | 7.1Mbps | 8Mbps | 4% | 2秒 | + +SRS使用epoll时,RaspberryPi B型,230Kbps视频性能测试如下表: + +| Server | CPU | Mem | Conn | E带宽 | A带宽 | sb | 延迟 | +| ------ | --- | ---- | ---- | ---- | ---- | ---- | ----- | +| SRS | 17% | 1.4MB | 11 | 2.53Mbps | 2.6Mbps | 1.3% | 1.7秒 | +| SRS | 23% | 2MB | 21 | 4.83Mbps | 5.5Mbps | 2.3% | 1.5秒 | +| SRS | 50% | 4MB | 31 | 7.1Mbps | 8Mbps | 4% | 2秒 | + +可见,RaspberryPi B型,SD卡class4,能支持的并发,SRS使用EPOLL时,码率为230kbps时,大约为xxxx个,网络带宽占用xxxxMbps。 + +## Benchmark SRS 0.9.72 + +一次性能测试记录: +* 硬件:raspberry-pi,B型,700MHZCPU,500MB内存,百兆有线网络 +* 编码器:SRS自己采集,视频码率516kbps,音频码率63kbps,数据码率580kbps。时长220秒。avatar宣传片。 +* 服务器:SRS 0.9.72。服务器至少有一个连接:采集程序推流到SRS。 +* 客户端:flash播放器,RTMP协议,srs-bench(RTMP负载测试工具) + +数据如下: + +| Server | CPU | Mem | Conn | E带宽 | A带宽 | sb | 延迟 | +| ------ | --- | ---- | ---- | ---- | ---- | ---- | ----- | +| SRS | 5% | 2MB | 2 | 1Mbps | 1.2Mbps | 0% | 1.5秒 | +| SRS | 20% | 2MB | 12 | 6.9Mbps | 6.6Mbps | 2.8% | 2秒 | +| SRS | 36% | 2.4MB | 22 | 12.7Mbps | 12.9Mbps | 2.3% | 2.5秒 | +| SRS | 47% | 3.1MB | 32 | 18.5Mbps | 18.5Mbps | 5% | 2.0秒 | +| SRS | 62% | 3.4MB | 42 | 24.3Mbps | 25.7Mbps | 9.3% | 3.4秒 | +| SRS | 85% | 3.7MB | 52 | 30.2Mbps | 30.7Mbps | 13.6% | 3.5秒 | + +## cubieboard benchmark + +cubieboard是armv7 CPU,双核,性能比树莓派强很多。初步测试SRS支持300个客户端,占用一个CPU80%,可惜没有多进程;要是有多进程,能支持600个客户端,比较实用了。 + +Winlin 2014.3 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/raspberrypi) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/reload.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/reload.md new file mode 100644 index 00000000..9f08fc64 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/reload.md @@ -0,0 +1,48 @@ +--- +title: 热加载配置 +sidebar_label: 热加载配置 +hide_title: false +hide_table_of_contents: false +--- + +# Reload + +SRS配置完全支持Reload,即在不中断服务时应用配置的修改。 + +## NotSupportedFeatures + +不支持reload的功能包括: +* deamon,是否后台启动。 +* mode,vhost的模式。 + +daemon选项当然是不支持reload的。 + +mode选项,即决定vhost是源站还是边缘,不支持reload。若修改mode之后reload会导致server异常退出,由看门狗重启。原因在于: +* 源站和边缘角色切换过于复杂。 +* 一般源站会建立设备组,全部做源站,不会突然变成边缘 +* 上层和源站重启后,对最终用户没有影响,只是表现会切换上层的卡顿(客户端缓冲区设为3秒以上时,卡顿都不会出现)。 + +一个修改vhost的mode属性的workaround: +* 删除vhost并reload。 +* 确认vhost已经删除了。 +* 添加vhost,使用新的mode,并reload。 + +## 应用场景 + +Reload主要应用场景: +* 配置快速生效:不用重启服务,修改配置后,只需要`killall -1 srs`即可生效配置。 +* 不中断服务:商用服务器往往时时刻刻都在服务用户,如何将一个转码流的码率调低?如何禁用某些频道的HLS?如何添加和删除频道?而且还中断现有用户的服务?使用Reload。 + +## 使用方法 + +Reload的方法为:`killall -1 srs` + +或者指定发送的SRS进程:`kill -1 7635` + +使用启动脚本:`/etc/init.d/srs reload` + +Winlin 2014.2 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/reload) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/resource.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/resource.md new file mode 100644 index 00000000..4726ed54 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/resource.md @@ -0,0 +1,93 @@ +--- +title: 端口和资源 +sidebar_label: 端口和资源 +hide_title: false +hide_table_of_contents: false +--- + +# Resources + +The resources of SRS. + +## Ports + +The ports used by SRS, kernel services: + +* `tcp://1935`, for [RTMP live streaming server](./rtmp.md). +* `tcp://1985`, HTTP API server, for [HTTP-API](./http-api.md), [WebRTC](./webrtc.md), etc. +* `tcp://8080`, HTTP live streaming server, [HTTP-FLV](./flv.md), [HLS](./hls.md) as such. +* `udp://8000`, [WebRTC Media](./webrtc.md) server. + +For optional HTTPS services, which might be provided by other web servers: + +* `tcp://8088`, HTTPS live streaming server. +* `tcp://1990`, HTTPS API server. + +For optional stream converter services, to push streams to SRS: + +* `udp://8935`, Stream Converter: [Push MPEGTS over UDP](./streamer.md#push-mpeg-ts-over-udp) server. +* `tcp://8936`, Stream Converter: [Push HTTP-FLV](./streamer.md#push-http-flv-to-srs) server. +* `udp://10080`, Stream Converter: [Push SRT Media](https://github.com/ossrs/srs/issues/1147#issuecomment-577469119) server. + +For external services to work with SRS: + +* `udp://1989`, [WebRTC Signaling](https://github.com/ossrs/signaling#usage) server. + +## APIs + +The API used by SRS: + +* `/api/v1/` The HTTP API path. +* `/rtc/v1/` The HTTP API path for RTC. +* `/sig/v1/` The [demo signaling](https://github.com/ossrs/signaling) API. + +Other API used by [ossrs.net](https://ossrs.net): + +* `/gif/v1` The statistic API. +* `/service/v1/` The latest available version API. +* `/ws-service/v1/` The latest available version API, by websocket. +* `/im-service/v1/` The latest available version API, by IM. +* `/code-service/v1/` The latest available version API, by Code verification. + +The statistic path for [ossrs.net](https://ossrs.net): + +* `/srs/xxx` The GitHub pages for [srs](https://github.com/ossrs/srs) +* `/release/xxx` The pages for [ossrs.net](https://ossrs.net) +* `/console/xxx` The pages for [console](http://ossrs.net/console/) +* `/player/xxx` The pages for [players and publishers](http://ossrs.net/players/) +* `/k8s/xxx` The template and repository deploy by K8s, like [srs-k8s-template](https://github.com/ossrs/srs-k8s-template) + +## Mirrors + +[Gitee](https://gitee.com/ossrs/srs), [the GIT usage](./git.md) + +``` +git clone https://gitee.com/ossrs/srs.git && +cd srs && git remote set-url origin https://github.com/ossrs/srs.git && git pull +``` + +> Remark: For users in China, recomment to use mirror from CSDN or OSChina, because they are much faster. +[Gitlab](https://gitlab.com/winlinvip/srs-gitlab), [the GIT usage](./git.md) + +``` +git clone https://gitlab.com/winlinvip/srs-gitlab.git srs && +cd srs && git remote set-url origin https://github.com/ossrs/srs.git && git pull +``` + +[Github](https://github.com/ossrs/srs), [the GIT usage](./git.md) + +``` +git clone https://github.com/ossrs/srs.git +``` + +| Branch | Cost | Size | CMD | +| --- | --- | --- | --- | +| 3.0release | 2m19.931s | 262MB | git clone -b 3.0release https://gitee.com/ossrs/srs.git | +| 3.0release | 0m56.515s | 95MB | git clone -b 3.0release --depth=1 https://gitee.com/ossrs/srs.git | +| develop | 2m22.430s | 234MB | git clone -b develop https://gitee.com/ossrs/srs.git | +| develop | 0m46.421s | 42MB | git clone -b develop --depth=1 https://gitee.com/ossrs/srs.git | +| min | 2m22.865s | 217MB | git clone -b min https://gitee.com/ossrs/srs.git | +| min | 0m36.472s | 11MB | git clone -b min --depth=1 https://gitee.com/ossrs/srs.git | +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/resource) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/reuse-port.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/reuse-port.md new file mode 100644 index 00000000..6ad538f3 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/reuse-port.md @@ -0,0 +1,108 @@ +--- +title: Reuse Port +sidebar_label: Reuse Port +hide_title: false +hide_table_of_contents: false +--- + +# Reuse Port + +可以在不同场景下使用REUSE PORT,实现用一个端口对外服务。 + +## For Edge Server + +SRS2的性能有大幅的提升,参考[SRS2性能](https://github.com/ossrs/srs/tree/2.0release#performance)。SRS3我们支持了源站集群, +解决了源站的性能瓶颈,参考[OriginCluster](./sample-origin-cluster.md);对于边缘服务器,我们提供了TCP代理方案, +参考[go-oryx](https://github.com/ossrs/go-oryx);对于边缘服务器,我们还可以支持SO_REUSEPORT,可以在服务器上启动多个Edge进程。 + +![](/img/doc-guides-reuse-port-001.png) + +> 注意:SO_REUSEPORT功能需要Linux Kernel 3.9+,所以如果使用CentOS6你可能需要升级你的内核,推荐使用Ubuntu20。 + +首先,我们启动一个边缘服务器,侦听在1935: + +``` +./objs/srs -c conf/edge.conf +``` + +然后,在同一个服务器,再启动一个边缘服务器,也侦听在1935: + +``` +./objs/srs -c conf/edge2.conf +``` + +> 注意:当然这两个边缘服务器的pid文件路径要不同,否则会启动失败。 + +这样就启动了两个进程,都侦听在1935: + +``` +[root@bf2e88b31f9b trunk]# ps aux|grep srs +root 381 0.1 0.0 19888 5752 pts/2 S+ 08:03 0:01 ./objs/srs -c conf/edge.conf +root 383 0.0 0.0 19204 5468 pts/1 S+ 08:04 0:00 ./objs/srs -c conf/edge2.conf + +[root@bf2e88b31f9b trunk]# lsof -p 381 +srs 381 root 7u IPv6 18835 0t0 TCP *:macromedia-fcs (LISTEN) +[root@bf2e88b31f9b trunk]# lsof -p 383 +srs 383 root 7u IPv6 17831 0t0 TCP *:macromedia-fcs (LISTEN) +``` + +接着,启动源站服务器,这两个边缘服务器从这个源站服务器取流: + +``` +./objs/srs -c conf/origin.conf +``` + +最后,我们可以推流到源站或边缘,从任意边缘服务器拉流播放: + +``` + for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy \ + -f flv rtmp://192.168.1.170/live/livestream; \ + sleep 1; \ + done +``` + +使用VLC播放RTMP流: `rtmp://192.168.1.170:1935/live/livestream` + +## For Origin Server + +Origin也可以使用REUSE PORT,此时多个Origin进程之间是独立的,如果输出是HLS,那么这种方式完全没问题: + +``` + +-----------------+ +Client --->-- + Origin Servers +------> Player + +-----------------+ +``` + +> Note: 如果需要支持输出RTMP或FLV等流协议,那么需要使用[OriginCluster](./sample-origin-cluster.md)。 + +启动第一个源站,侦听在`1935`和`8080`,输入RTMP流,输出HLS流: + +```bash +./objs/srs -c conf/origin.hls.only1.conf +``` + +启动第二个源站,同样侦听在`1935`和`8080`,输入RTMP流,输出HLS流: + +```bash +./objs/srs -c conf/origin.hls.only2.conf +``` + +推第一个流到服务器,会随机选择一个源站: + +```bash +./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv -c copy -f flv rtmp://localhost/live/livestream1 +``` + +推第二个流到服务器,会随机选择一个源站: + +```bash +./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv -c copy -f flv rtmp://localhost/live/livestream2 +``` + +> Note: 由于切片成HLS,所以只要流不同,这两个源站独立工作,是没有问题的。但是如果是输出FLV,可能就会出现找不到流的情况,这时就不能使用这种方式,需要使用[OriginCluster](./sample-origin-cluster.md)。 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/reuse-port) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/rtmp-atc.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/rtmp-atc.md new file mode 100644 index 00000000..84b78135 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/rtmp-atc.md @@ -0,0 +1,91 @@ +--- +title: RTMP ATC +sidebar_label: RTMP ATC +hide_title: false +hide_table_of_contents: false +--- + +# ATC支持HLS/HDS热备 + +RTMP的热备怎么做?当边缘回源时,上层出现故障,可以切换到另外一个上层,所以RTMP热备只需要指定多个上层/源站就可以。边缘在故障切换时,会重新连接新服务器,客户端连接还没有断开,所以看起来就像是编码器重新推流了,画面最多抖动一下或者卡一下。 + +HLS热备怎么做?边缘从某个源站拿不到ts切片时,会去另外一个服务器拿。所以就要求两个上层的ts切片一样,当然如果上层服务器都从一个源站取片,是没有问题的。 + +如果HLS的源站需要做热备,怎么办?参考:[Adobe: HDS/HLS热备](http://www.adobe.com/cn/devnet/adobe-media-server/articles/varnish-sample-for-failover.html),如下图所示: + +```bash + +----------+ +----------+ + +--ATC->-+ server +--ATC->-+ packager +-+ +---------+ ++----------+ | RTMP +----------+ RTMP +----------+ | | Reverse | +-------+ +| encoder +->-+ +->-+ Proxy +-->-+ CDN + ++----------+ | +----------+ +----------+ | | (nginx) | +-------+ + +--ATC->-+ server +--ATC->-+ packager +-+ +---------+ + RTMP +----------+ RTMP +----------+ +``` + +实际上,adobe文中所说的是encoder输出的是ATC RTMP流,也没有packager直接server就打包了。如果你需要自己做打包,譬如基于ffmpeg写个工具,自定义HLS流的打包,编码器可以将ATC RTMP流推送到SRS,SRS会以ATC RTMP形式不修改时间戳分发给你的工具。 + +所以ATC RTMP说白了就是绝对时间,server需要能接入绝对时间,若切片在server上则根据绝对时间切片,若server和ReverseProxy之间还有切片工具,那server应该给切片工具绝对时间。 + +## SRS配置ATC + +SRS默认ATC是关闭,即给客户端的RTMP流永远从0开始。若工具需要SRS不修改时间戳(只将sequence header和metadata调整为第一个音视频包的时间戳),可以打开ATC配置: + +```bash +vhost __defaultVhost__ { + # for play client, both RTMP and other stream clients, + # for instance, the HTTP FLV stream clients. + play { + # vhost for atc for hls/hds/rtmp backup. + # generally, atc default to off, server delivery rtmp stream to client(flash) timestamp from 0. + # when atc is on, server delivery rtmp stream by absolute time. + # atc is used, for instance, encoder will copy stream to master and slave server, + # server use atc to delivery stream to edge/client, where stream time from master/slave server + # is always the same, client/tools can slice RTMP stream to HLS according to the same time, + # if the time not the same, the HLS stream cannot slice to support system backup. + # + # @see http://www.adobe.com/cn/devnet/adobe-media-server/articles/varnish-sample-for-failover.html + # @see http://www.baidu.com/#wd=hds%20hls%20atc + # + # default: off + atc off; + } +} +``` + +## ATC和flash的兼容性 + +开启ATC之后,flash客户端播放SRS流时,流的起始时间不是0而是ATC时间。需要调整时间的包: +* sequence header: 调整为第一个音视频包的时间。若有gop cache,则调整为gop cache中的第一个音视频包的时间。 +* metadata: 调整为第一个音视频包的时间。nginx-rtmp没有调整metadata包的时间(为0),所以平均每20次就有一次卡死。 + +经过测试,SRS打开和关闭ATC,flash播放器都能播放SRS的RTMP流。 + +## ATC和编码器 + +编码器开启atc之后,若在metadata中自动写入"bravo_atc"="true",srs会自动的开启atc。 + +可以禁用这个功能: + +```bash +vhost atc.srs.com { + # for play client, both RTMP and other stream clients, + # for instance, the HTTP FLV stream clients. + play { + # whether enable the auto atc, + # if enabled, detect the bravo_atc="true" in onMetaData packet, + # set atc to on if matched. + # always ignore the onMetaData if atc_auto is off. + # default: off + atc_auto off; + } +} +``` + +将自动atc关闭即可。 + +Winlin 2014.3 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/rtmp-atc) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/rtmp-handshake.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/rtmp-handshake.md new file mode 100644 index 00000000..3e3bf503 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/rtmp-handshake.md @@ -0,0 +1,33 @@ +--- +title: RTMP 握手 +sidebar_label: RTMP 握手 +hide_title: false +hide_table_of_contents: false +--- + +# RTMP Handshake + +rtmp 1.0规范中,指定了RTMP的握手协议: +* c0/s0:一个字节,说明是明文还是加密。 +* c1/s1: 1536字节,4字节时间,4字节0x00,1528字节随机数 +* c2/s2: 1536字节,4字节时间1,4字节时间2,1528随机数和s1相同。 +这个就是srs以及其他开源软件所谓的simple handshake,简单握手,标准握手,FMLE也是使用这个握手协议。 + +Flash播放器连接服务器时,若服务器只支持简单握手,则无法播放h264和aac的流,可能是adobe的限制。adobe将简单握手改为了有一系列加密算法的复杂握手(complex handshake) ,详细协议分析参考[变更的RTMP握手](http://blog.csdn.net/win_lin/article/details/13006803) + +下表为总结: + +| 方式 | 依赖库 | 播放器 | 客户端 | SRS | 用途 | +| ---- | ----- | --------------------- | -------- | --- | ---- | +| Simple
标准握手
简单握手 | 不依赖 | vp6+mp3/speex | 所有 | 支持 | 编码器,譬如FMLE,FFMPEG
srs-librtmp(两种都支持,推荐用Simple) | +| Complex
复杂握手 | openssl | vp6+mp3/speex
h264+aac | Flash | 支持 | 主要是Flash播放器播放H264+aac流时需要,
其他都不需要 | + +播放器(Flash Player): Flash播放器支持的编码。 + +备注:SRS编译时若打开了SSL选项(--with-ssl),SRS会先使用复杂握手和客户端握手,若复杂握手失败,则尝试简单握手。 + +Winlin 2014.2 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/rtmp-handshake) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/rtmp-pk-http.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/rtmp-pk-http.md new file mode 100644 index 00000000..748f50e1 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/rtmp-pk-http.md @@ -0,0 +1,90 @@ +--- +title: 分发方式比较 +sidebar_label: 分发方式比较 +hide_title: false +hide_table_of_contents: false +--- + +# 分发方式比较 + +互联网上的两种主要的分发方式:直播和WebRTC,什么时候用谁,完全决定于应用场景。 + +* 直播:[HLS](./hls.md),[RTMP](./rtmp.md)和[HTTP-FLV](./flv.md),主要是娱乐和教育场景。 +* WebRTC:[RTC](./webrtc.md),主要应用于通话,直播连麦,教育等场景。 + +还有其他的分发方式,这些分发方式不属于互联网常见和通用的方式,不予以比较: +* UDP:譬如YY的实时应用,视频会议等等,或者RTSP之类。这类应用的特点就是实时性要求特别高,以毫秒计算。TCP家族协议根本就满足不了要求,所以HTTP/TCP都不靠谱。这类应用没有通用的方案,必须自己实现分发(服务端)和播放(客户端)。 +* P2P:譬如RTMFP或者各家自己的协议。这类应用的特点是节省带宽。目前PC/flash上的RTMFP比较成熟,Android上的P2P属于起步群雄纷争标准不一,IOS上P2P应该没有听说过。 +* RTSP:这种不是互联网上的主要应用,在其他领域譬如安防等有广泛应用。 + +另外,HTTP的也分为几种: +* HTTP progressive:早期流媒体服务器分发http文件时,以普通的http文件分发,这种叫做渐进式下载,意思就是如果文件很大譬如1小时时长1GB大小,想从中间开始播放是不行的。但这种方式已经是作古了,很多http服务器支持http文件的seek,就是从中间开始播放。 +* HTTP stream:支持seek的HTTP流,譬如各家视频网站的点播分发方式。或者稍微复杂点的,譬如把一个大文件切几段之后分发。目前在pc/flash上点播国内的主流分发是这种方式。 +* HLS:这种是现在适配方式最广(除了flash, 需要额外的as库支持),在PC上有vlc,Android/IOS原生播放器就支持播放HLS,HTML5里面的url可以写HLS地址。总之,在移动端是以HLS为主。 +* HDS:adobe自己的HLS,一坨屎。 +* DASH:各家提出的HLS,目前还没有广泛应用。 + +对比以下互联网上用的流媒体分发方式: +* HLS:apple的HLS,支持点播和直播。 +* HTTP:即HTTP stream,各家自己定义的http流,应用于国内点播视频网站。 +* RTMP:直播应用,对实时性有一定要求,以PC为主。 + +## RTMP + +RTMP本质上是流协议,主要的优势是: +* 实时性高:RTMP的实时性在3秒之内,经过多层CDN节点分发后,实时性也在3秒左右。在一些实时性有要求的应用中以RTMP为主。 +* 支持加密:RTMPE和RTMPS为加密协议。虽然HLS也有加密,但在PC平台上flash对RTMPE/RTMPS支持应该比较不错。 +* 稳定性高:在PC平台上flash播放的最稳定方式是RTMP,如果做CDN或者大中型集群分发,选择稳定性高的协议一定是必要的。HTTP也很稳定,但HTTP是在协议上稳定;稳定性不只是服务端的事情,在集群分发,服务器管理,主备切换,客户端的支持上,RTMP在PC分发这种方式上还是很有优势。 +* 编码器接入:编码器输出到互联网(还可以输出为udp组播之类广电应用),主要是RTMP。譬如专业编码器,或者flash网页编码器,或者FMLE,或者ffmpeg,或者安防摄像头,都支持RTMP输出。若需要接入多种设备,譬如提供云服务;或者希望网页直接采集摄像头;或者能在不同编码器之间切换,那么RTMP作为服务器的输入协议会是最好的选择。 +* 系统容错:容错有很多种级别,RTMP的集群实现时可以指定N上层,在错误时切换不会影响到下层或者客户端,另外RTMP的流没有标识,切到其他的服务器的流也可以继续播放。HLS的流热备切换没有这么容易。若对于直播的容错要求高,譬如降低出问题的概率,选择RTMP会是很好的选择。 +* 可监控:在监控系统或者运维系统的角度看,流协议应该比较合适监控。HTTP的流监控感觉没有那么完善。这个不算绝对优势,但比较有利。 + +RTMP的劣势是: +* 协议复杂:RTMP协议比起HTTP复杂很多,导致性能低下。测试发现两台服务器直连100Gbps网络中,HTTP能跑到60Gbps,但是RTMP只能跑到10Gbps,CPU占用率RTMP要高很多。复杂协议导致在研发,扩展,维护软件系统时都没有HTTP那么方便,所以HTTP服务器现在大行其道,apache/nginx/tomcat,N多HTTP服务器;而RTMP协议虽然早就公开,但是真正在大规模中分发表现良好的没有,adobe自己的FMS在CDN中都经常出问题。 +* Cache麻烦:流协议做缓存不方便。譬如点播,若做RTMP流协议,边缘缓存RTMP会很麻烦。如果是HTTP,缓存其实也很麻烦,但是HTTP服务器的缓存已经做了很久,所以只需要使用就好。这是为何点播都走HTTP的原因。 + +## HTTP + +HTTP说的是HTTP流,譬如各大视频网站的点播流。 + +HTTP本质上还是文件分发,主要的优势是: +* 性能很高:HTTP的性能没得说,协议简单,各种HTTP高性能服务器也完善。如果分发的量特别大,譬如点播视频网站,没有直播的实时性要求,HTTP协议是最好选择。 +* 没有碎片:HTTP比HLS没有碎片,HTTP分发大文件会比小文件分发方便很多。特别是存储,小文件的性能超低,是个硬伤。 +* 穿墙:互联网不可能不开放HTTP协议,否则就不叫互联网。所以任何端口封掉,也不会导致HTTP流看不了。(不过RTMP也能穿墙,用RTMPT协议)。 + +HTTP的劣势是: +* 实时性差:基本上没有实时性这个说法。 +* 原生支持不好:就PC上flash对于HTTP流支持还可以,Android/IOS上似乎只能mp4,总之移动端对于HTTP的支持不是很完善。 + +## HLS + +HLS是Apple的开放标准,在Android3?以上也原生支持. + +HLS的主要优势是: +* 性能高:和HTTP一样。 +* 穿墙:和HTTP一样。 +* 原生支持很好:IOS上支持完美。Android上支持差些。PC/flash上现在也有各种as插件支持HLS。 + +HLS的主要劣势是: +* 实时性差:基本上HLS的延迟在10秒以上。 +* 文件碎片:若分发HLS,码流低,切片较小时,小文件分发不是很友好。特别是一些对存储比较敏感的情况,譬如源站的存储,嵌入式的SD卡。 + +## 应用方式 + +参考[HTTP](./hls.md)和[RTMP](./rtmp.md) + +推荐的方式是: +* 编码器输出RTMP协议。 +* 流媒体系统接入使用RTMP协议。 +* 流媒体系统内部直播分发使用RTMP。 +* PC+直播+实时性要求高:使用flash播放RTMP。 +* PC+直播+没有实时性要求:使用RTMP或者HLS均可。 +* PC+点播:使用HTTP或者HLS。 +* Apple IOS/OSX:都使用HLS(实时性要求高得自己解析RTMP,或者使用外部库,譬如[https://www.vitamio.org](https://www.vitamio.org)) +* Andorid:和IOS一样,不过可以确定的是可以自己开发支持RTMP。 + +Winlin 2014.4 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/rtmp-pk-http) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/rtmp-url-vhost.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/rtmp-url-vhost.md new file mode 100644 index 00000000..43099fe4 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/rtmp-url-vhost.md @@ -0,0 +1,271 @@ +--- +title: RTMP URL +sidebar_label: RTMP URL +hide_title: false +hide_table_of_contents: false +--- + +# URL Specification + +RTMP的url其实很简单,vhost其实也没有什么新的概念,但是对于没有使用过的同学来讲,还是很容易混淆。 +几乎每个新人都必问的问题:RTMP那个URL推流时应该填什么,什么是vhost,什么是app? + +Vhost(Virtual Host)就是虚拟域,用来隔离客户或业务。如下图所示: + +![](/img/doc-main-concepts-rtmp-url-vhost-001.png) + +RTMP和HLS的优势参考:[HLS](./hls.md) + +## Use Scenarios + +Vhost的主要应用场景包括: +* 一个分发网络支持多个客户:譬如CDN,一个分发网络中,有N个客户公用一套流媒体系统,如何区分用户,计费,监控等等?通过app么?大家可能都叫做live之类。最好是通过各自的域名。 +* 不同的应用配置:譬如FMLE推上来的流是h264+mp3,可以将音频转码后放到其他的vhost分发hls,这样接入h264+mp3的vhost就不用切hls。 + +总之,vhost作为应用配置的单元,能隔离客户,应用不同的配置。 + +## Standard RTMP URL + +标准RTMP URL指的是最大兼容的RTMP URL,基本上所有的服务器和播放器都能识别的URL,和HTTP URL其实很相似,例如: + +| HTTP | Schema | Host | Port | App | Stream | +| ----- | ----- | ----- | ----| ---- | ---- | +| http://192.168.1.10:80/players/srs_player.html | http | 192.168.1.10 | 80 | players | srs_player.html| +| rtmp://192.168.1.10:1935/live/livestream | rtmp | 192.168.1.10 | 1935 | live | livestream | + +其中: +* Schema:协议头,HTTP为HTTP或HTTPS,RTMP为RTMP/RTMPS/RTMPE/RTMPT等众多协议,还有新出的RTMFP。 +* Host:主机,表示要连接的主机,可以为主机DNS名称或者IP地址。商用时,一般不会用IP地址,而是DNS名称,这样可以用CDN分发内容(CDN一般使用DNS调度,即不同网络和地理位置的用户,通过DNS解析到的IP不一样,实现用户的就近访问)。 +* Port:端口,HTTP默认为80,RTMP默认为1935。当端口没有指定时,使用默认端口。 +* Path:路径,HTTP访问的文件路径。 +* App:RTMP的Application(应用)名称,可以类比为文件夹。以文件夹来分类不同的流,没有特殊约定,可以任意划分。 +* Stream:RTMP的Stream(流)名称,可以类比为文件。 + +## NO Vhost + +其实,vhost大多数用户都用不到,而且不推荐用,有点复杂。一般的用户用app就可以了。因为vhost/app/stream,只是一个分类方法而已;vhost需要在配置文件中说明,app/stream都不需要配置。 + +什么时候用vhost?如果你是提供服务,譬如你有100个客户,都要用一套平台,走同样的流媒体服务器分发。那可以每个客户一个vhost,这样他们的app和stream可以相同都可以。 + +一般的用法,举个例子,有个视频网站,自己搭建服务器,所以只有他自己一个客户,就不要用vhost了,直接用app就足够了。假设视频网站提供聊天服务,聊天有不同的话题类型,每个话题就是一个app,譬如:军事栏目,读书栏目,历史栏目三个分类,每个分类下面有很多聊天室。只要这么配置就好: + +```bash +listen 1935; +vhost __defaultVhost__ { +} +``` + +生成网页时,譬如军事栏目的网页,都用app名称为`military`,某个聊天室叫做`火箭`,这个页面的流可以用:`rtmp://yourdomain.com/military/rock`,编码器也推这个流,所有观看这个`军事栏目/火箭`聊天室的页面的人,都播放这个流。 + +军事栏目另外的网页,都用同样的app名称`military`,但是流不一样,譬如某个聊天室叫做`雷达`,这个页面的流可以用:`rtmp://yourdomain.com/military/radar`,推流和观看一样。 + +如此类推,军事栏目页面生成时,不用更改srs的任何配置。也就是说,新增聊天室,不用改服务器配置;新增分类,譬如加个`公开课`的聊天室,也不用改服务器配置。足够简单! + +另外,读书栏目可以用app名称为`reader`,栏目下的某个聊天室叫`红楼梦`,这个页面的流可以用:`rtmp://yourdomain.com/reader/red_mansion`,所有在这个聊天室的人都是播放这个流。 + +## Vhost Use Scenarios + +RTMP的Vhost和HTTP的Vhost概念是一样的:虚拟主机。详见下表(假设域名demo.srs.com被解析到IP为192.168.1.10的服务器): + +| HTTP | Host | Port | Vhost | +| --- | --- | --- | ----- | +| http://demo.srs.com:80/players/srs_player.html | 192.168.1.10 | 80 | demo.srs.com | +| rtmp://demo.srs.com:1935/live/livestream | 192.168.1.10 | 1935 | demo.srs.com | + +Vhost主要的作用是: +* 支持多用户:当一台服务器需要服务多个客户,譬如CDN有cctv(央视)和wasu(华数传媒)两个客户时,如何隔离他们两个的资源?相当于不同的用户共用一台计算机,他们可以在自己的文件系统建立同样的文件目录结构,但是彼此不会冲突。 +* 域名调度:CDN分发内容时,需要让用户访问离自己最近的边缘节点,边缘节点再从源站或上层节点获取数据,达到加速访问的效果。一般的做法就是Host是DNS域名,这样可以根据用户的信息解析到不同的节点。 +* 支持多配置:有时候需要使用不同的配置,考虑一个支持多终端(PC/Apple/Android)的应用,PC上RTMP分发,Apple和Android是HLS分发,如何让PC延迟最低,同时HLS也能支持,而且终端播放时尽量地址一致(降低终端开发难度)?可以使用两个Vhost,PC和HLS;PC配置为最低延迟的RTMP,并且将流转发给HLS的Vhost,可以对音频转码(可能不是H264/AAC)后切片为HLS。PC和HLS这两个Vhost的配置肯定是不一样的,播放时,流名称是一样,只需要使用不同的Host就可以。 + +### Multiple Customers + +假设cctv和wasu都运行在一台边缘节点(192.168.1.10)上,用户访问这两个媒体的流时,Vhost的作用见下表: + +| RTMP | Host | Port | Vhost | App | Stream | +| --- | --- | -------| ----- | ---| --------| +| rtmp://show.cctv.cn/live/livestream | 192.168.1.10 | 1935 | show.cctv.cn | live | livestream | +| rtmp://show.wasu.cn/live/livestream | 192.168.1.10 | 1935 | show.wasu.cn | live | livestream | + +在边缘节点(192.168.1.10)上的SRS,需要配置Vhost,例如: + +```bash +listen 1935; +vhost show.cctv.cn { +} +vhost show.wasu.cn { +} +``` + +### DNS GSLB + +详细参考DNS和CDN的实现。 + +### Config Unit + +以上面举的例子,若cctv需要延迟最低(意味着启动时只有声音,画面是黑屏),而wasu需要快速启动(打开就能看到视频,服务器cache了最后一个gop,延迟会较大)。 + +只需要对这两个Vhost进行不同的配置,例如: + +```bash +listen 1935; +vhost show.cctv.cn { + chunk_size 128; +} +vhost show.wasu.cn { + chunk_size 4906; +} +``` + +总之,这两个Vhost的配置完全没有关系,不会相互影响。 + +## Default Vhost + +FMS的\_\_defaultVhost\_\_是默认的vhost,当用户请求的vhost没有匹配成功时,若配置了defaultVhost,则使用它来提供服务。若匹配失败,也没有defaultVhost,则返回错误。 + +譬如,服务器192.168.1.10上的SRS配置如下: + +```bash +listen 1935; +vhost demo.srs.com { + enabled on; +} +``` + +那么,当用户访问以下vhost时: +* rtmp://demo.srs.com/live/livestream:成功,匹配vhost为demo.srs.com +* rtmp://192.168.1.10/live/livestream:失败,没有找到vhost,也没有defaultVhost。 + +defaultVhost和其他vhost的规则一样,只是用来匹配那些没有匹配成功的vhost的请求的。 + +## Locate Vhost + +如何访问某台服务器上的Vhost?有两个方法: +* 配置hosts:因为Vhost实际上就是DNS解析,所以可以配置客户端的hosts,将域名(Vhost)解析到指定的服务器,就可以访问这台服务器上的指定的vhost。 +* 使用stream的参数:需要服务器支持。在stream后面带参数指定要访问的Vhost。SRS支持`?vhost=VHOST`和`?domain=VHOST`这两种方式。 + +普通用户不用这么麻烦,直接访问RTMP地址就好了,有时候运维需要看某台机器上的Vhost的流是否有问题,就需要这种特殊的访问方式。考虑下面的例子: + +```bash +RTMP URL: rtmp://demo.srs.com/live/livestream +边缘节点数目:50台 +边缘节点IP:192.168.1.100 至 192.168.1.150 +边缘节点SRS配置: + listen 1935; + vhost demo.srs.com { + mode remote; + origin: xxxxxxx; + } +``` + +各种访问方式见下表: + +| 用户 | RTMP URL | hosts设置 | 目标 | +| ---- | -------- | ------- | ------ | +| 普通用户 | rtmp://demo.srs.com/live/livestream | 无 | 由DNS
解析到指定边缘 | +| 运维 | rtmp://demo.srs.com/live/livestream | 192.168.1.100 demo.srs.com | 查看192.168.1.100上的流 | +| 运维 | rtmp://192.168.1.100/live?
vhost=demo.srs.com/livestream | 无 | 查看192.168.1.100上的流 | +| 运维 | rtmp://192.168.1.100/live
...vhost...demo.srs.com/livestream | 无 | 查看192.168.1.100上的流| + +访问其他服务器的流也类似。 + +## Parameters in URL + +RTMP URL一般是不带参数,类似于http的query,有时候为了特殊的要求,会在RTMP URL中带参数,譬如: +* Vhost:前面讲过,在stream后面加参数,可以访问指定服务器的指定Vhost。这个SRS的特殊约定,方便排错。 +* token认证:SRS还未实现。在连接服务器时,在app后面指定token(方式和vhost一样),服务器可以取出token,进行验证,若验证失败则断开连接,这种是比Refer更高级的防盗链。 + +例如,下面都是SRS可以识别的URL参数: + +* `rtmp://192.168.1.100/live/livestream?vhost=demo.srs.com` +* `rtmp://192.168.1.100/live/livestream?domain=demo.srs.com` +* `rtmp://192.168.1.100/live/livestream?token=xxx` +* `rtmp://192.168.1.100/live/livestream?vhost=demo.srs.com&token=xxx` + +> Note: 之前由于FMLE定义的参数,是传在app中的,这会造成困扰,不推荐使用。 + +除了RTMP,其他协议也是一样的用法,比如: + +* `http://192.168.1.100/live/livestream.flv?vhost=demo.srs.com&token=xxx` +* `http://192.168.1.100/live/livestream.m3u8?vhost=demo.srs.com&token=xxx` +* `webrtc://192.168.1.100/live/livestream?vhost=demo.srs.com&token=xxx` + +> Note: SRT由于协议的特殊性,无法使用这种方式,详细请参考[SRT Parameters](./srt.md) + +## URL of SRS + +SRS只做简化的事情,绝对不把简单的事情搞复杂。 + +SRS的RTMP URL使用标准的RTMP URL,一般不需要对app和stream加参数,或者更改他们的意义。除了两个地方: +* vhost支持参数访问:为了方便运维访问某台服务器的vhost,不需要设置hosts。不影响普通用户。 +* 支持token验证:为了支持token验证,在app后面带参数,这个是token验证必须的方式。 + +另外,SRS建议用户使用一级app和一级stream,不使用多级app和多级stream。譬如: + +```bash +// 不推荐使用的多级app或stream +rtmp://demo.srs.com/show/live/livestream +rtmp://demo.srs.com/show/live/livestream/2013 +``` + +srs播放器(srs_player)和srs编码器(srs_publisher)不支持多级app和stream,他们认为最后一个斜杠(/)后面的就是stream,前面的是app。即: + +```bash +// srs_player和srs_publisher的解析方式: +// play or publish the following rtmp URL: +rtmp://demo.srs.com/show/live/livestream/2013 +schema: rtmp +host/vhost: demo.srs.com +app: show/live/livestream +stream: 2013 +``` + +做此简化的好处是,srs播放器和编码器,只需要指定一个url,而且两者的url是一样的。 + +SRS常见的三种RTMP URL,详细见下表: + +| URL | 说明 | +| ---- | ------ | +| rtmp://demo.srs.com/live/livestream | 普通用户的标准访问方式,观看直播流 | +| rtmp://192.168.1.10/live/livestream?vhost=demo.srs.com | 运维对特定服务器排错 | +| rtmp://demo.srs.com/live/livestream?key=ER892ID839KD9D0A1D87D | token验证用户 | + +## Example Vhosts in SRS + +SRS的full.conf配置文件中,有很多Vhost,主要是为了说明各个功能,每个功能都单独列出一个vhost。所有功能都放在demo.srs.com这个vhost中。 + +| Category | Vhost | 说明 | +| -------- | ----- | ---- | +| RTMP | __defaultVhost__ | 默认Vhost的配置,只支持RTMP功能 | +| RTMP | chunksize.vhost.com | 如何设置chunk size的实例。其他Vhost将此配置打开,即可设置chunk size。| +| Forward | same.vhost.forward.vhost.com | Forward实例:将流转发到同一个vhost。| +| HLS | with-hls.vhost.com | HLS实例:如何开启HLS,以及HLS的相关配置。| +| HLS | no-hls.vhost.com | HLS实例:如何禁用HLS。| +| RTMP | min.delay.com | RTMP最低延迟:如何配置最低延迟的RTMP流 | +| RTMP | refer.anti_suck.com | Refer实例:如何配置Refer防盗链。| +| RTMP | removed.vhost.com | 禁用vhost实例:如何禁用vhost。| +| Callback | hooks.callback.vhost.com | 设置http callback的实例,当这些事件发生时,SRS会调用指定的http api。其他Vhost将这些配置打开,就可以支持http callback。 | +| Transcode | mirror.transcode.vhost.com | 转码实例:使用ffmpeg的实例filter,将视频做镜像翻转处理。其他Vhost添加这个配置,就可以对流进行转码。
注:所有转码的流都需要重新推送到SRS,使用不同的流名称(vhost和app也可以不一样)。| +| Transcode | crop.transcode.vhost.com | 转码实例:剪裁视频filter。其他vhost添加此filter,即可对视频进行剪裁。
注:所有转码的流都需要重新推送到SRS,使用不同的流名称(vhost和app也可以不一样)。 | +| Transcode | logo.transcode.vhost.com | 转码实例:添加图片/视频水印。其他vhost添加这些配置,可以加图片/视频水印。
注:所有转码的流都需要重新推送到SRS,使用不同的流名称(vhost和app也可以不一样)。 | +| Transcode | audio.transcode.vhost.com | 转码实例:只对音频转码。其他vhost添加此配置,可只对音频转码。
注:所有转码的流都需要重新推送到SRS,使用不同的流名称(vhost和app也可以不一样)。| +| Transcode | copy.transcode.vhost.com | 转码实例:只转封装。类似于forward功能。| +| Transcode | all.transcode.vhost.com | 转码实例:对上面的实例的汇总。| +| Transcode | ffempty.transcode.vhost.com | 调用ffempty程序转码,这个只是一个stub,打印出参数而已。用作调试用,看参数是否传递正确。| +| Transcode | app.transcode.vhost.com | 转码实例:只对匹配的app的流进行转码。| +| Transcode | stream.transcode.vhost.com | 转码实例:只对匹配的流进行转码。| + +SRS的demo.conf配置文件中,包含了demo用到的一些vhost。 + +| Category | Vhost | 说明 | +| -------- | ----- | ---- | +| DEMO | players | srs_player播放的演示流,按照Readme的Step会推流到这个vhost,demo页面打开后播放的流就是这个vhost中的流 | +| DEMO | players_pub | srs编码器推流到players这个vhost,然后转码后将流推送到这个vhost,并切片为hls,srs编码器播放的带字幕的流就是这个vhost的流 | +| DEMO | players_pub_rtmp | srs编码器演示页面中的低延时播放器,播放的就是这个vhost的流,这个vhost关闭了gop cache,关闭了hls,让延时最低(在1秒内)| +| DEMO | demo.srs.com | srs的演示vhost,Readme的step最后的12路流演示,以及播放器的12路流延时,都是访问的这个vhost。包含了SRS所有的功能。 | +| Others | dev | 开发用的,可忽略 | + +Winlin 2014.2 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/rtmp-url-vhost) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/rtmp.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/rtmp.md new file mode 100644 index 00000000..19776de4 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/rtmp.md @@ -0,0 +1,321 @@ +--- +title: RTMP +sidebar_label: RTMP +hide_title: false +hide_table_of_contents: false +--- + +# RTMP + +RTMP是直播的事实标准,这么多年以来一直是使用最广泛的直播协议。 + +然而Adobe公司没有一直更新RTMP协议,也没有提交给标准组织比如RFC,因此很多新功能都没有支持,比如HEVC或Opus。 +直到2023.03,终于[Enhanced RTMP](https://github.com/veovera/enhanced-rtmp)项目建立,开始支持了HEVC和AV1, +SRS和OBS已经支持了基于Enhanced RTMP的[HEVC](https://github.com/veovera/enhanced-rtmp/issues/4)编码。 + +在流的制作方面,最近几年,SRT、WebRTC和SRT增长迅速,很多设备都支持了SRT和RIST协议。你也可以用WebRTC做直播。 + +在流的分发上,HLS是使用最广泛的协议,所有CDN和设备都支持,比如PC,iOS,Android或平板电脑。当然HLS延迟比较大(3~5s+), +你可以选择HTTP-FLV,HTTP-TS或者WebRTC,如果需要降低延迟。 + +至今为止,在内容制作领域,RTMP还是使用最广泛的协议。比如你可以用OBS推流到B站、视频号或快手。如果要对接一个广播设备, +或者推流到某个平台,那么RTMP是最好的选择,几乎都会支持。 + +## Usage + +SRS内置RTMP的支持,可以用[docker](./getting-started.md)或者[从源码编译](./getting-started-build.md): + +```bash +docker run --rm -it -p 1935:1935 registry.cn-hangzhou.aliyuncs.com/ossrs/srs:5 \ + ./objs/srs -c conf/rtmp.conf +``` + +使用 [FFmpeg(点击下载)](https://ffmpeg.org/download.html) 或 [OBS(点击下载)](https://obsproject.com/download) 推流: + +```bash +ffmpeg -re -i ./doc/source.flv -c copy -f flv rtmp://localhost/live/livestream +``` + +打开下面的页面播放流(若SRS不在本机,请将localhost更换成服务器IP): + +* RTMP (by [VLC](https://www.videolan.org/)): `rtmp://localhost/live/livestream` + +SRS支持将RTMP转换成其他协议,下面会详细描述。 + +## Config + +RTMP协议相关配置如下: + +```bash +vhost __defaultVhost__ { + # whether enable min delay mode for vhost. + # for min latency mode: + # 1. disable the publish.mr for vhost. + # 2. use timeout for cond wait for consumer queue. + # @see https://github.com/ossrs/srs/issues/257 + # default: off (for RTMP/HTTP-FLV) + # default: on (for WebRTC) + min_latency off; + + # whether enable the TCP_NODELAY + # if on, set the nodelay of fd by setsockopt + # Overwrite by env SRS_VHOST_TCP_NODELAY for all vhosts. + # default: off + tcp_nodelay off; + + # the default chunk size is 128, max is 65536, + # some client does not support chunk size change, + # vhost chunk size will override the global value. + # Overwrite by env SRS_VHOST_CHUNK_SIZE for all vhosts. + # default: global chunk size. + chunk_size 128; + + # The input ack size, 0 to not set. + # Generally, it's set by the message from peer, + # but for some peer(encoder), it never send message but use a different ack size. + # We can chnage the default ack size in server-side, to send acknowledge message, + # or the encoder maybe blocked after publishing for some time. + # Overwrite by env SRS_VHOST_IN_ACK_SIZE for all vhosts. + # Default: 0 + in_ack_size 0; + + # The output ack size, 0 to not set. + # This is used to notify the peer(player) to send acknowledge to server. + # Overwrite by env SRS_VHOST_OUT_ACK_SIZE for all vhosts. + # Default: 2500000 + out_ack_size 2500000; + + # the config for FMLE/Flash publisher, which push RTMP to SRS. + publish { + # about MR, read https://github.com/ossrs/srs/issues/241 + # when enabled the mr, SRS will read as large as possible. + # Overwrite by env SRS_VHOST_PUBLISH_MR for all vhosts. + # default: off + mr off; + # the latency in ms for MR(merged-read), + # the performance+ when latency+, and memory+, + # memory(buffer) = latency * kbps / 8 + # for example, latency=500ms, kbps=3000kbps, each publish connection will consume + # memory = 500 * 3000 / 8 = 187500B = 183KB + # when there are 2500 publisher, the total memory of SRS at least: + # 183KB * 2500 = 446MB + # the recommended value is [300, 2000] + # Overwrite by env SRS_VHOST_PUBLISH_MR_LATENCY for all vhosts. + # default: 350 + mr_latency 350; + + # the 1st packet timeout in ms for encoder. + # Overwrite by env SRS_VHOST_PUBLISH_FIRSTPKT_TIMEOUT for all vhosts. + # default: 20000 + firstpkt_timeout 20000; + # the normal packet timeout in ms for encoder. + # Overwrite by env SRS_VHOST_PUBLISH_NORMAL_TIMEOUT for all vhosts. + # default: 5000 + normal_timeout 7000; + # whether parse the sps when publish stream. + # we can got the resolution of video for stat api. + # but we may failed to cause publish failed. + # @remark If disabled, HLS might never update the sps/pps, it depends on this. + # Overwrite by env SRS_VHOST_PUBLISH_PARSE_SPS for all vhosts. + # default: on + parse_sps on; + # When parsing SPS/PPS, whether try ANNEXB first. If not, try IBMF first, then ANNEXB. + # Overwrite by env SRS_VHOST_PUBLISH_TRY_ANNEXB_FIRST for all vhosts. + # default: on + try_annexb_first on; + # The timeout in seconds to disconnect publisher when idle, which means no players. + # Note that 0 means no timeout or this feature is disabled. + # Note that this feature conflicts with forward, because it disconnect the publisher stream. + # Overwrite by env SRS_VHOST_PUBLISH_KICKOFF_FOR_IDLE for all vhosts. + # default: 0 + kickoff_for_idle 0; + } + + # for play client, both RTMP and other stream clients, + # for instance, the HTTP FLV stream clients. + play { + # whether cache the last gop. + # if on, cache the last gop and dispatch to client, + # to enabled fast startup for client, client play immediately. + # if off, send the latest media data to client, + # client need to wait for the next Iframe to decode and show the video. + # set to off if requires min delay; + # set to on if requires client fast startup. + # Overwrite by env SRS_VHOST_PLAY_GOP_CACHE for all vhosts. + # default: on + gop_cache off; + + # Limit the max frames in gop cache. It might cause OOM if video stream has no IDR frame, so we limit to N + # frames by default. Note that it's the size of gop cache, including videos, audios and other messages. + # Overwrite by env SRS_VHOST_PLAY_GOP_CACHE_MAX_FRAMES for all vhosts. + # default: 2500 + gop_cache_max_frames 2500; + + # the max live queue length in seconds. + # if the messages in the queue exceed the max length, + # drop the old whole gop. + # Overwrite by env SRS_VHOST_PLAY_QUEUE_LENGTH for all vhosts. + # default: 30 + queue_length 10; + + # about the stream monotonically increasing: + # 1. video timestamp is monotonically increasing, + # 2. audio timestamp is monotonically increasing, + # 3. video and audio timestamp is interleaved/mixed monotonically increasing. + # it's specified by RTMP specification, @see 3. Byte Order, Alignment, and Time Format + # however, some encoder cannot provides this feature, please set this to off to ignore time jitter. + # the time jitter algorithm: + # 1. full, to ensure stream start at zero, and ensure stream monotonically increasing. + # 2. zero, only ensure stream start at zero, ignore timestamp jitter. + # 3. off, disable the time jitter algorithm, like atc. + # @remark for full, correct timestamp only when |delta| > 250ms. + # @remark disabled when atc is on. + # Overwrite by env SRS_VHOST_PLAY_TIME_JITTER for all vhosts. + # default: full + time_jitter full; + # vhost for atc for hls/hds/rtmp backup. + # generally, atc default to off, server delivery rtmp stream to client(flash) timestamp from 0. + # when atc is on, server delivery rtmp stream by absolute time. + # atc is used, for instance, encoder will copy stream to master and slave server, + # server use atc to delivery stream to edge/client, where stream time from master/slave server + # is always the same, client/tools can slice RTMP stream to HLS according to the same time, + # if the time not the same, the HLS stream cannot slice to support system backup. + # + # @see http://www.adobe.com/cn/devnet/adobe-media-server/articles/varnish-sample-for-failover.html + # @see http://www.baidu.com/#wd=hds%20hls%20atc + # + # @remark when atc is on, auto off the time_jitter + # Overwrite by env SRS_VHOST_PLAY_ATC for all vhosts. + # default: off + atc off; + # whether use the interleaved/mixed algorithm to correct the timestamp. + # if on, always ensure the timestamp of audio+video is interleaved/mixed monotonically increase. + # if off, use time_jitter to correct the timestamp if required. + # @remark to use mix_correct, atc should on(or time_jitter should off). + # Overwrite by env SRS_VHOST_PLAY_MIX_CORRECT for all vhosts. + # default: off + mix_correct off; + + # whether enable the auto atc, + # if enabled, detect the bravo_atc="true" in onMetaData packet, + # set atc to on if matched. + # always ignore the onMetaData if atc_auto is off. + # Overwrite by env SRS_VHOST_PLAY_ATC_AUTO for all vhosts. + # default: off + atc_auto off; + + # set the MW(merged-write) latency in ms. + # SRS always set mw on, so we just set the latency value. + # the latency of stream >= mw_latency + mr_latency + # the value recomment is [300, 1800] + # @remark For WebRTC, we enable pass-by-timestamp mode, so we ignore this config. + # default: 350 (For RTMP/HTTP-FLV) + # Overwrite by env SRS_VHOST_PLAY_MW_LATENCY for all vhosts. + # default: 0 (For WebRTC) + mw_latency 350; + + # Set the MW(merged-write) min messages. + # default: 0 (For Real-Time, min_latency on) + # default: 1 (For WebRTC, min_latency off) + # default: 8 (For RTMP/HTTP-FLV, min_latency off). + # Overwrite by env SRS_VHOST_PLAY_MW_MSGS for all vhosts. + mw_msgs 8; + + # the minimal packets send interval in ms, + # used to control the ndiff of stream by srs_rtmp_dump, + # for example, some device can only accept some stream which + # delivery packets in constant interval(not cbr). + # @remark 0 to disable the minimal interval. + # @remark >0 to make the srs to send message one by one. + # @remark user can get the right packets interval in ms by srs_rtmp_dump. + # Overwrite by env SRS_VHOST_PLAY_SEND_MIN_INTERVAL for all vhosts. + # default: 0 + send_min_interval 10.0; + # whether reduce the sequence header, + # for some client which cannot got duplicated sequence header, + # while the sequence header is not changed yet. + # Overwrite by env SRS_VHOST_PLAY_REDUCE_SEQUENCE_HEADER for all vhosts. + # default: off + reduce_sequence_header on; + } +} +``` + +> Note: 这里只是推流和拉流的配置,还有些其他的配置是在其他地方的,比如RTMP转[HTTP-FLV](./flv.md#config)或HTTP-TS等。 + +## On Demand Live Streaming + +有些场景下,是有需要播放时,才会邀请开始推流: + +1. 推流端连接到系统,但并不会推流到SRS。 +2. 播放器连接到系统,向系统请求播放流。 +3. 系统通知推流端,开始推流到SRS。 +4. 播放器从SRS拉流播放。 + +> Note: 请注意`系统`是指你的业务系统,而不是SRS。 + +这就是我们所说的`按需直播`或`按需推流`。如果播放器停止拉流,会怎么样? + +1. 系统需要通知推流端停止推流。 +2. 或者,在最后一个播放器停止拉流时,SRS等待一定时间后断开推流。 + +推荐第2个解决方案,这样这个功能就非常容易使用。你的系统不再需要通知推流端停止推流,因为SRS会主动断开。你只需要开启如下配置: + +```bash +# The timeout in seconds to disconnect publisher when idle, which means no players. +# Note that 0 means no timeout or this feature is disabled. +# Note that this feature conflicts with forward, because it disconnect the publisher stream. +# Overwrite by env SRS_VHOST_PUBLISH_KICKOFF_FOR_IDLE for all vhosts. +# default: 0 +kickoff_for_idle 0; +``` + +详细过程可以参考[这个PR](https://github.com/ossrs/srs/pull/3105)。 + +## Converting RTMP to HLS + +如果需要将RTMP转HLS协议,请参考[HLS](./hls.md). + +## Converting RTMP to HTTP-FLV + +如果需要将RTMP转HTTP-FLV或HTTP-TS协议,请参考[HTTP-FLV](./flv.md). + +## Converting RTMP to WebRTC + +如果需要将RTMP转WebRTC协议,请参考[WebRTC: RTMP to RTC](./webrtc.md#rtmp-to-rtc). + +## Converting RTMP to MPEGTS-DASH + +如果需要将RTMP转DASH协议是,参考[DASH](./sample-dash.md). + +## Converting SRT to RTMP + +如果需要将SRT转RTMP协议,参考[SRT](./srt.md). + +## Converting WebRTC to RTMP + +如果需要将WebRTC协议转RTMP协议,参考[WebRTC: RTC to RTMP](./webrtc.md#rtc-to-rtmp). + +## RTMP Cluster + +如果需要支持很多播放器播放,参考[Edge Cluster](./edge.md). + +如果需要支持很多推流,或者很多路流,参考[Origin Cluster](./origin-cluster.md). + +关于流媒体的负载均衡,还有很多其他方案,可以参考[load balancing](../../../blog/load-balancing-streaming-servers). + +## Low Latency RTMP + +如果希望降低RTMP的延迟,请参考[LowLatency](./low-latency.md). + +## Timestamp Jitter + +SRS支持校准RTMP的时间戳,参考[Jitter](./time-jitter.md). + +如果希望SRS能保持原始时间戳,参考[ATC](./rtmp-atc.md). + +## Performance + +SRS使用writev实现高性能RTMP分发,参考[benchmark](./performance.md##performance-banchmark) + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/rtmp) \ No newline at end of file diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-arm.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-arm.md new file mode 100644 index 00000000..b4319e8d --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-arm.md @@ -0,0 +1,83 @@ +--- +title: ARM 部署 +sidebar_label: ARM 部署 +hide_title: false +hide_table_of_contents: false +--- + +# ARM上部署SRS实例 + +SRS可以在ARM上作为服务器运行,播放器可以从ARM设备上取流播放。 + +一般的ARM都可以直接编译,使用和上面的方法是一样的。 +某些编译非常慢,或者没有编译器的嵌入式平台,才需要交叉编译,请参考[这里](./arm.md). + +## Build SRS directly + +### 第一步,获取SRS。 + +详细参考[GIT获取代码](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +或者使用git更新已有代码: + +```bash +git pull +``` + +### 第二步,编译SRS + +详细参考:[SrsLinuxArm](./arm.md) + +```bash +./configure && make +``` + +## Run SRS on ARM + +### 第三步,启动SRS + +详细参考:[SrsLinuxArm](./arm.md) + +```bash +./objs/srs -c conf/srs.conf +``` + +### 第四步,启动推流编码器 + +详细参考:[SrsLinuxArm](./arm.md) + +使用FFMPEG命令推流: + +```bash + for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy \ + -f flv rtmp://192.168.1.170/live/livestream; \ + sleep 1; \ + done +``` + +## Play Stream + +在用户的Windows机器上观看流。 + +### 第五步,观看RTMP流 + +RTMP流地址为:`rtmp://192.168.1.170/live/livestream` + +可以使用VLC观看。 + +或者使用在线SRS播放器播放:[srs-player](https://ossrs.net/players/srs_player.html) + +备注:请将所有实例的IP地址192.168.1.170都换成部署的服务器IP地址。 + +Winlin 2014.3 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/sample-arm) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-dash.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-dash.md new file mode 100644 index 00000000..012f4134 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-dash.md @@ -0,0 +1,111 @@ +--- +title: DASH 部署 +sidebar_label: DASH 部署 +hide_title: false +hide_table_of_contents: false +--- + +# DASH部署实例 + +SRS支持DASH的详细步骤。 + +**假设服务器的IP是:192.168.1.170** + +## 第一步,获取SRS + +详细参考[GIT获取代码](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +或者使用git更新已有代码: + +```bash +git pull +``` + +## 第二步,编译SRS + +详细参考[Build](./install.md) + +```bash +./configure && make +``` + +## 第三步,编写SRS配置文件 + +详细参考[DASH](https://github.com/ossrs/srs/issues/299#issuecomment-306022840) + +将以下内容保存为文件,譬如`conf/dash.conf`,服务器启动时指定该配置文件(srs的conf文件夹有该文件)。 + +```bash +# conf/dash.conf +listen 1935; +max_connections 1000; +daemon off; +srs_log_tank console; +http_server { + enabled on; + listen 8080; + dir ./objs/nginx/html; +} +vhost __defaultVhost__ { + dash { + enabled on; + dash_fragment 30; + dash_update_period 150; + dash_timeshift 300; + dash_path ./objs/nginx/html; + dash_mpd_file [app]/[stream].mpd; + } +} +``` + +## 第四步,启动SRS + +```bash +./objs/srs -c conf/dash.conf +``` + +> 备注:我们使用SRS内置的HTTP服务器分发DASH切片,也可以使用Nginx等Web服务器分发。 + +## 第五步,启动推流编码器 + +使用FFMPEG命令推流: + +```bash + for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy \ + -f flv rtmp://192.168.1.170/live/livestream; \ + sleep 1; \ + done +``` + +生成的流地址为: +* RTMP流地址为:`rtmp://192.168.1.170/live/livestream` +* DASH流地址为: `http://192.168.1.170:8080/live/livestream.mpd` + +## 第六步,观看RTMP流 + +RTMP流地址为:`rtmp://192.168.1.170/live/livestream` + +可以使用VLC观看。 + +或者使用在线SRS播放器播放:[srs-player](https://ossrs.net/players/srs_player.html) + +备注:请将所有实例的IP地址192.168.1.170都换成部署的服务器IP地址。 + +## 第七步,观看DASH流 + +DASH流地址为: `http://192.168.1.170:8080/live/livestream.mpd` + +可以使用VLC观看。 + +Winlin 2020.01 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/sample-dash) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-ffmpeg.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-ffmpeg.md new file mode 100644 index 00000000..fabdf4c1 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-ffmpeg.md @@ -0,0 +1,139 @@ +--- +title: Transcode 部署 +sidebar_label: Transcode 部署 +hide_title: false +hide_table_of_contents: false +--- + +# FFMPEG 转码部署实例 + +FFMPEG对RTMP直播流转码,SRS在收到编码器推送的直播流后,可以对直播流进行转码,输出RTMP流到服务器(也可以到SRS自己)。 +详细规则参考:[FFMPEG](./ffmpeg.md),本文列出了具体的部署的实例。 + +**假设服务器的IP是:192.168.1.170** + +## 第一步,获取SRS + +详细参考[GIT获取代码](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +或者使用git更新已有代码: + +```bash +git pull +``` + +## 第二步,编译SRS + +详细参考[Build](./install.md) + +```bash +./configure --ffmpeg-tool=on && make +``` + +## 第三步,编写SRS配置文件 + +详细参考[FFMPEG](./ffmpeg.md) + +将以下内容保存为文件,譬如`conf/ffmpeg.transcode.conf`,服务器启动时指定该配置文件(srs的conf文件夹有该文件)。 + +```bash +# conf/ffmpeg.transcode.conf +listen 1935; +max_connections 1000; +vhost __defaultVhost__ { + transcode { + enabled on; + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + engine ff { + enabled on; + vfilter { + } + vcodec libx264; + vbitrate 500; + vfps 25; + vwidth 768; + vheight 320; + vthreads 12; + vprofile main; + vpreset medium; + vparams { + } + acodec libfdk_aac; + abitrate 70; + asample_rate 44100; + achannels 2; + aparams { + } + output rtmp://127.0.0.1:[port]/[app]?vhost=[vhost]/[stream]_[engine]; + } + } +} +``` + +## 第四步,启动SRS + +详细参考[FFMPEG](./ffmpeg.md) + +```bash +./objs/srs -c conf/ffmpeg.conf +``` + +## 第五步,启动推流编码器 + +详细参考[FFMPEG](./ffmpeg.md) + +使用FFMPEG命令推流: + +```bash + for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy \ + -f flv rtmp://192.168.1.170/live/livestream; \ + sleep 1; \ + done +``` + +或使用FMLE推流: + +```bash +FMS URL: rtmp://192.168.1.170/live +Stream: livestream +``` + +涉及的流包括: +* 编码器推送流:rtmp://192.168.1.170:1935/live/livestream +* 观看原始流:rtmp://192.168.1.170:1935/live/livestream +* 观看转码流:rtmp://192.168.1.170:1935/live/livestream_ff + +## 第六步,观看RTMP流 + +详细参考[FFMPEG](./ffmpeg.md) + +RTMP流地址为:`rtmp://192.168.1.170/live/livestream` + +可以使用VLC观看。 + +或者使用在线SRS播放器播放:[srs-player](https://ossrs.net/players/srs_player.html) + +备注:请将所有实例的IP地址192.168.1.170都换成部署的服务器IP地址。 + +## 第七步,观看FFMPEG转码的RTMP流 + +详细参考[FFMPEG](./ffmpeg.md) + +RTMP流地址为:`rtmp://192.168.1.170/live/livestream_ff` + +可以使用VLC观看。 + +备注:请将所有实例的IP地址192.168.1.170都换成部署的服务器IP地址。 + +Winlin 2014.3 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/sample-ffmpeg) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-forward.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-forward.md new file mode 100644 index 00000000..e1ac6d11 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-forward.md @@ -0,0 +1,159 @@ +--- +title: Forward 部署 +sidebar_label: Forward 部署 +hide_title: false +hide_table_of_contents: false +--- + +# RTMP流转发(Forward)部署实例 + +SRS可以将送到SRS的流转发给其他RTMP服务器,实现简单集群/热备功能,也可以实现一路流热备(譬如编码器由于带宽限制,只能送一路流到RTMP服务器,要求RTMP服务器能将这路流也转发给其他RTMP备用服务器,实现主备容错集群)。 + +**假设服务器的IP是:192.168.1.170** + +Forward就是SRS将流拷贝输出给其他的RTMP服务器,以SRS转发给SRS为例: +* 主SRS:Master, 编码器推流到主SRS,主SRS将流处理的同时,将流转发到备SRS +* 备SRS:Slave, 主SRS转发流到备SRS,就像编码器推送流到备用SRS一样。 +我们的部署实例中,主SRS侦听1935端口,备SRS侦听19350端口。 + +## 第一步,获取SRS + +详细参考[GIT获取代码](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +或者使用git更新已有代码: + +```bash +git pull +``` + +## 第二步,编译SRS + +详细参考[Build](./install.md) + +```bash +./configure && make +``` + +## 第三步,编写主SRS配置文件 + +详细参考[Forward](./forward.md) + +将以下内容保存为文件,譬如`conf/forward.master.conf`,服务器启动时指定该配置文件(srs的conf文件夹有该文件)。 + +```bash +# conf/forward.master.conf +listen 1935; +max_connections 1000; +pid ./objs/srs.master.pid; +srs_log_tank file; +srs_log_file ./objs/srs.master.log; +vhost __defaultVhost__ { + forward { + enabled on; + destination 127.0.0.1:19350; + } +} +``` + +## 第四步,启动主SRS,主SRS将流转发到备SRS + +详细参考[Forward](./forward.md) + +```bash +./objs/srs -c conf/forward.master.conf +``` + +## 第五步,编写备SRS配置文件 + +详细参考[Forward](./forward.md) + +将以下内容保存为文件,譬如`conf/forward.slave.conf`,服务器启动时指定该配置文件(srs的conf文件夹有该文件)。 + +```bash +# conf/forward.slave.conf +listen 19350; +pid ./objs/srs.slave.pid; +srs_log_tank file; +srs_log_file ./objs/srs.slave.log; +vhost __defaultVhost__ { +} +``` + +## 第六步,启动备SRS,主SRS将流转发到备SRS + +详细参考[Forward](./forward.md) + +```bash +./objs/srs -c conf/forward.slave.conf +``` + +注意:启动srs后查看下srs是否启动成功,错误可以查看日志。 + +```bash +[winlin@dev6 srs]$ sudo netstat -anp|grep srs +tcp 0 0 0.0.0.0:1935 0.0.0.0:* LISTEN 7826/srs +tcp 0 0 0.0.0.0:19350 0.0.0.0:* LISTEN 7834/srs +``` + +## 第七步,启动推流编码器 + +详细参考[Forward](./forward.md) + +使用FFMPEG命令推流: + +```bash + for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy \ + -f flv rtmp://192.168.1.170/live/livestream; \ + sleep 1; \ + done +``` + +或使用FMLE推流: + +```bash +FMS URL: rtmp://192.168.1.170/live +Stream: livestream +``` + +涉及的流包括: +* 编码器推送的流:rtmp://192.168.1.170/live/livestream +* 主SRS转发的流:rtmp://192.168.1.170:19350/live/livestream +* 观看主SRS的流:rtmp://192.168.1.170/live/livestream +* 观看备SRS的流:rtmp://192.168.1.170:19350/live/livestream + +## 第八步,观看主SRS的RTMP流 + +详细参考[Forward](./forward.md) + +RTMP流地址为:`rtmp://192.168.1.170/live/livestream` + +可以使用VLC观看。 + +或者使用在线SRS播放器播放:[srs-player](https://ossrs.net/players/srs_player.html) + +备注:请将所有实例的IP地址192.168.1.170都换成部署的服务器IP地址。 + +## 第九步,观看备SRS的RTMP流 + +详细参考[Forward](./forward.md) + +RTMP流地址为:`rtmp://192.168.1.170:19350/live/livestream` + +可以使用VLC观看。 + + + +备注:请将所有实例的IP地址192.168.1.170都换成部署的服务器IP地址。 + +Winlin 2014.3 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/sample-forward) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-hls-cluster.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-hls-cluster.md new file mode 100644 index 00000000..ae1fb014 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-hls-cluster.md @@ -0,0 +1,158 @@ +--- +title: HLS 集群部署 +sidebar_label: HLS 集群部署 +hide_title: false +hide_table_of_contents: false +--- + +# HLS 边缘集群部署实例 + +如何创建分发HLS的边缘集群,就像CDN一样分发HLS流。 + +**假设服务器的IP是:192.168.1.170** + +## 第一步,获取SRS + +详细参考[GIT获取代码](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +或者使用git更新已有代码: + +```bash +git pull +``` + +## 第二步,编译SRS + +详细参考[Build](./install.md) + +```bash +./configure && make +``` + +## 第三步,编写SRS源站配置文件,生成HLS切片文件 + +详细参考[HLS分发](./hls.md)。 + +将以下内容保存为文件,譬如`conf/hls.origin.conf`,服务器启动时指定该配置文件(srs的conf文件夹有该文件)。 + +```bash +# conf/hls.origin.conf +listen 1935; +max_connections 1000; +daemon off; +srs_log_tank console; +http_server { + enabled on; + listen 8080; +} +vhost __defaultVhost__ { + hls { + enabled on; + hls_ctx off; + hls_ts_ctx off; + } +} +``` + +## 第四步,编写NGINX边缘配置文件,分发HLS文件 + +详细参考[Nginx for HLS](./nginx-for-hls.md)。 + +将以下内容保存为文件,譬如`conf/hls.edge.conf`,服务器启动时指定该配置文件(srs的conf文件夹有该文件)。 + +```bash +# conf/hls.edge.conf +worker_processes 3; +events { + worker_connections 10240; +} + +http { + # For Proxy Cache. + proxy_cache_path /tmp/nginx-cache levels=1:2 keys_zone=srs_cache:8m max_size=1000m inactive=600m; + proxy_temp_path /tmp/nginx-cache/tmp; + + server { + listen 8081; + # For Proxy Cache. + proxy_cache_valid 404 10s; + proxy_cache_lock on; + proxy_cache_lock_age 300s; + proxy_cache_lock_timeout 300s; + proxy_cache_min_uses 1; + + location ~ /.+/.*\.(m3u8)$ { + proxy_pass http://127.0.0.1:8080$request_uri; + # For Proxy Cache. + proxy_cache srs_cache; + proxy_cache_key $scheme$proxy_host$uri$args; + proxy_cache_valid 200 302 10s; + } + location ~ /.+/.*\.(ts)$ { + proxy_pass http://127.0.0.1:8080$request_uri; + # For Proxy Cache. + proxy_cache srs_cache; + proxy_cache_key $scheme$proxy_host$uri; + proxy_cache_valid 200 302 60m; + } + } +} +``` + +## 第五步,启动SRS源站和NGINX边缘 + +```bash +nginx -c $(pwd)/conf/hls.edge.conf +./objs/srs -c conf/hls.origin.conf +``` + +> Note: 请参考[NGINX](https://nginx.org/)的说明下载和安装,只要是NGINX就可以,没有特别的要求。 + +## 第六步,启动推流编码器,推流到SRS,生成HLS文件 + +使用FFMPEG命令推流: + +```bash +for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy -f flv rtmp://192.168.1.170/live/livestream; \ + sleep 1; \ +done +``` + +或使用OBS推流: + +```bash +Server: rtmp://192.168.1.170/live +StreamKey: livestream +``` + +## 第七步,观看HLS流 + +SRS源站的HLS流: `http://192.168.1.170:8080/live/livestream.m3u8` + +NGINX边缘的HLS流: `http://192.168.1.170:8081/live/livestream.m3u8` + +备注:请将所有实例的IP地址192.168.1.170都换成部署的服务器IP地址。 + +## 第八步,压测和添加更多的边缘NGINX + +可以使用[srs-bench](https://github.com/ossrs/srs-bench#usage),模拟很多客户端,播放HLS流: + +```bash +docker run --rm -it --network=host --name sb ossrs/srs:sb \ + ./objs/sb_hls_load -c 100 -r http://192.168.1.170:8081/live/livestream.m3u8 +``` + +可以多找几台服务器,用同样的配置文件启动NGINX,就成了一个边缘集群了。 + +Winlin 2014.3 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/sample-hls-cluster) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-hls.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-hls.md new file mode 100644 index 00000000..8cd69eb4 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-hls.md @@ -0,0 +1,14 @@ +--- +title: HLS 部署 +sidebar_label: HLS 部署 +hide_title: false +hide_table_of_contents: false +--- + +# HLS 部署实例 + +迁移到了[HLS](./hls.md). + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/sample-hls) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-http-flv-cluster.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-http-flv-cluster.md new file mode 100644 index 00000000..b6df4226 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-http-flv-cluster.md @@ -0,0 +1,165 @@ +--- +title: HTTP-FLV 集群部署 +sidebar_label: HTTP-FLV 集群部署 +hide_title: false +hide_table_of_contents: false +--- + +# HTTP-FLV集群部署实例 + +SRS支持HTTP FLV直播流集群分发,详细参考[HTTP FLV](./flv.md#about-http-flv) + +SRS的HTTP FLV边缘只能使用单进程,如何做到多进程呢?请参考[Reuse Port](./reuse-port.md) + +本例子部署了三个SRS,侦听不同的端口(实际可以部署到不同的机器,侦听同一个端口),一个作为Origin源站,两个作为Edge边缘。推RTMP流到源站或者边缘后,可以通过任何Edge观看,和RTMP的效果一样,集群的延迟在0.8-3秒。 + +**假设服务器的IP是:192.168.1.170** + +## 第一步,获取SRS + +详细参考[GIT获取代码](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +或者使用git更新已有代码: + +```bash +git pull +``` + +## 第二步,编译SRS + +详细参考[Build](./install.md) + +```bash +./configure && make +``` + +## 第三步,编写SRS源站配置文件 + +详细参考[HTTP FLV](./flv.md) + +将以下内容保存为文件,譬如`conf/http.flv.live.conf`,服务器启动时指定该配置文件(srs的conf文件夹有该文件)。 + +```bash +# conf/http.flv.live.conf +listen 1935; +max_connections 1000; +http_server { + enabled on; + listen 8080; + dir ./objs/nginx/html; +} +vhost __defaultVhost__ { + http_remux { + enabled on; + mount [vhost]/[app]/[stream].flv; + hstrs on; + } +} +``` + +## 第四步,编写SRS边缘配置文件 + +详细参考[HTTP FLV](./flv.md) + +将以下内容保存为文件,譬如`conf/http.flv.live.edge1.conf`和`conf/http.flv.live.edge2.conf`,服务器启动时指定该配置文件(srs的conf文件夹有该文件)。 + +```bash +# conf/http.flv.live.edge1.conf +listen 19351; +max_connections 1000; +pid objs/srs.flv.19351.pid; +srs_log_file objs/srs.flv.19351.log; +http_server { + enabled on; + listen 8081; + dir ./objs/nginx/html; +} +vhost __defaultVhost__ { + mode remote; + origin 127.0.0.1; + http_remux { + enabled on; + mount [vhost]/[app]/[stream].flv; + hstrs on; + } +} +``` + +## 第五步,启动SRS + +详细参考[HTTP FLV](./flv.md) + +```bash +./objs/srs -c conf/http.flv.live.conf & +./objs/srs -c conf/http.flv.live.edge1.conf & +./objs/srs -c conf/http.flv.live.edge2.conf & +``` + +## 第六步,启动推流编码器 + +详细参考[HTTP FLV](./flv.md) + +使用FFMPEG命令推流: + +```bash + for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy \ + -f flv rtmp://192.168.1.170/live/livestream; \ + sleep 1; \ + done +``` + +或使用支持FMLE推流: + +```bash +FMS URL: rtmp://192.168.1.170/live +Stream: livestream +``` + +源站,生成的流地址为: +* RTMP流地址为:`rtmp://192.168.1.170/live/livestream` +* HTTP FLV: `http://192.168.1.170:8080/live/livestream.flv` + +边缘1,生成的流地址为: +* RTMP流地址为:`rtmp://192.168.1.170:19351/live/livestream` +* HTTP FLV: `http://192.168.1.170:8081/live/livestream.flv` + +边缘2,生成的流地址为: +* RTMP流地址为:`rtmp://192.168.1.170:19352/live/livestream` +* HTTP FLV: `http://192.168.1.170:8082/live/livestream.flv` + +## 第七步,观看RTMP流 + +详细参考[HTTP FLV](./flv.md) + +源站RTMP流地址为:`rtmp://192.168.1.170/live/livestream`,可以使用VLC观看,或者使用在线SRS播放器播放:[srs-player](https://ossrs.net/players/srs_player.html) + +边缘1的RTMP流地址为:`rtmp://192.168.1.170:19351/live/livestream`,可以使用VLC观看,或者使用在线SRS播放器播放:[srs-player](https://ossrs.net/players/srs_player.html) + +边缘2的RTMP流地址为:`rtmp://192.168.1.170:19352/live/livestream`,可以使用VLC观看,或者使用在线SRS播放器播放:[srs-player](https://ossrs.net/players/srs_player.html) + +备注:请将所有实例的IP地址192.168.1.170都换成部署的服务器IP地址。 + +## 第八步,观看HTTP流 + +详细参考[HTTP FLV](./flv.md) + +源站HTTP FLV流地址为: `http://192.168.1.170:8080/live/livestream.flv`,可以使用VLC观看,或者使用在线SRS播放器播放:[srs-player](https://ossrs.net/players/srs_player.html) + +边缘1的HTTP FLV流地址为: `http://192.168.1.170:8081/live/livestream.flv`,可以使用VLC观看,或者使用在线SRS播放器播放:[srs-player](https://ossrs.net/players/srs_player.html) + +边缘2的HTTP FLV流地址为: `http://192.168.1.170:8082/live/livestream.flv`,可以使用VLC观看,或者使用在线SRS播放器播放:[srs-player](https://ossrs.net/players/srs_player.html) + +备注:请将所有实例的IP地址192.168.1.170都换成部署的服务器IP地址。 + +Winlin 2014.4 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/sample-http-flv-cluster) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-http-flv.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-http-flv.md new file mode 100644 index 00000000..b9148861 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-http-flv.md @@ -0,0 +1,14 @@ +--- +title: HTTP-FLV 部署 +sidebar_label: HTTP-FLV 部署 +hide_title: false +hide_table_of_contents: false +--- + +# HTTP-FLV部署实例 + +迁移到了[HTTP-FLV](./flv.md). + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/sample-http-flv) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-http.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-http.md new file mode 100644 index 00000000..2f9aa397 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-http.md @@ -0,0 +1,147 @@ +--- +title: HTTP Server 部署 +sidebar_label: HTTP Server 部署 +hide_title: false +hide_table_of_contents: false +--- + +# SRS-HTTP服务部署实例 + +SRS内嵌了http服务器,支持分发hls流和文件。 + +以分发HLS为例,使用SRS分发RTMP和HLS流,不依赖于外部服务器。 + +**假设服务器的IP是:192.168.1.170** + +## 第一步,获取SRS + +详细参考[GIT获取代码](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +或者使用git更新已有代码: + +```bash +git pull +``` + +## 第二步,编译SRS + +详细参考[Build](./install.md) + +```bash +./configure && make +``` + +## 第三步,编写SRS配置文件 + +详细参考[HLS分发](./hls.md)和[HTTP服务器](./http-server.md) + +将以下内容保存为文件,譬如`conf/http.hls.conf`,服务器启动时指定该配置文件(srs的conf文件夹有该文件)。 + +```bash +# conf/http.hls.conf +listen 1935; +max_connections 1000; +http_server { + enabled on; + listen 8080; + dir ./objs/nginx/html; +} +vhost __defaultVhost__ { + hls { + enabled on; + hls_path ./objs/nginx/html; + hls_fragment 10; + hls_window 60; + } +} +``` + +备注:hls_path必须存在,srs只会自动创建`${hls_path}`下的app的目录。参考:[HLS分发: HLS流程](./hls.md) + +## 第四步,启动SRS + +详细参考[HLS分发](./hls.md)和[HTTP服务器](./http-server.md) + +```bash +./objs/srs -c conf/http.hls.conf +``` + +备注:请确定srs-http-server已经启动,可以访问[nginx](http://localhost:8080/nginx.html),若能看到`nginx is ok`则没有问题。 + +备注:实际上提供服务的是SRS,可以看到响应头是`Server: SRS/0.9.51`之类。 + +## 第五步,启动推流编码器 + +详细参考[HLS分发](./hls.md) + +使用FFMPEG命令推流: + +```bash + for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy \ + -f flv rtmp://192.168.1.170/live/livestream; \ + sleep 1; \ + done +``` + +或使用支持h.264+aac的FMLE推流(若不支持h.264+aac,则可以使用srs转码,参考[Transcode2HLS](./sample-transcode-to-hls.md)): + +```bash +FMS URL: rtmp://192.168.1.170/live +Stream: livestream +``` + +生成的流地址为: +* RTMP流地址为:`rtmp://192.168.1.170/live/livestream` +* HLS流地址为: `http://192.168.1.170:8080/live/livestream.m3u8` + +## 第六步,观看RTMP流 + +详细参考[HLS分发](./hls.md) + +RTMP流地址为:`rtmp://192.168.1.170/live/livestream` + +可以使用VLC观看。 + +或者使用在线SRS播放器播放:[srs-player](https://ossrs.net/players/srs_player.html) + +备注:请将所有实例的IP地址192.168.1.170都换成部署的服务器IP地址。 + +## 第七步,观看HLS流 + +详细参考[HLS分发](./hls.md) + +HLS流地址为: `http://192.168.1.170:8080/live/livestream.m3u8` + +可以使用VLC观看。 + +或者使用在线SRS播放器播放:[srs-player](https://ossrs.net/players/srs_player.html) + +备注:请将所有实例的IP地址192.168.1.170都换成部署的服务器IP地址。 + +注意:VLC无法观看纯音频流。 + +## Q&A + +## RTMP流能看,HLS看不了 +* 确认srs-http-server启动并且可以访问:`nginx is ok`页面能访问。 +* 确认m3u8文件能下载:浏览器打开`http://192.168.1.170:8080/live/livestream.m3u8`,ip地址换成你服务器的IP地址。 +* 若m3u8能下载,可能是srs-player的问题,使用vlc播放地址:`http://192.168.1.170:8080/live/livestream.m3u8`,ip地址换成你服务器的IP地址。 +* 若VLC不能播放,将m3u8下载后,用文本编辑器打开,将m3u8文件内容发到群中,或者贴到issue中。寻求帮助。 +* 还有可能是编码问题,参考下面的“RTMP流和HLS流内容不一致” + +## RTMP流内容和HLS流内容不一致 +* 一般这种问题出现在使用上面的例子推流,然后换成别的编码器推流,或者换个文件推流。 +* 可能是流的编码不对(推流时使用FMLE),HLS需要h.264+aac,需要转码,参考只转码音频[Transcode2HLS](./sample-transcode-to-hls.md) + +Winlin 2014.4 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/sample-http) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-ingest.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-ingest.md new file mode 100644 index 00000000..69324f00 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-ingest.md @@ -0,0 +1,96 @@ +--- +title: Ingest 部署 +sidebar_label: Ingest 部署 +hide_title: false +hide_table_of_contents: false +--- + +# Ingest采集实例 + +SRS启动后,自动启动Ingest开始采集file/stream/device,并将流推送到SRS。详细规则参考:[Ingest](./ingest.md),本文列出了具体的部署的实例。 + +**假设服务器的IP是:192.168.1.170** + +## 第一步,获取SRS + +详细参考[GIT获取代码](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +或者使用git更新已有代码: + +```bash +git pull +``` + +## 第二步,编译SRS + +详细参考[Build](./install.md) + +```bash +./configure --ffmpeg-tool=on && make +``` + +> 注意:需要自己下载和安装FFmpeg,请看[Download](https://ffmpeg.org/download.html)。 + +> 注意:若执行失败,请查看日志,确认FFmpeg路径是SRS能检测到的。 + +## 第三步,编写SRS配置文件 + +详细参考[Ingest](./ingest.md) + +将以下内容保存为文件,譬如`conf/ingest.conf`,服务器启动时指定该配置文件(srs的conf文件夹有该文件)。 + +```bash +# conf/ingest.conf +listen 1935; +max_connections 1000; +vhost __defaultVhost__ { + ingest livestream { + enabled on; + input { + type file; + url ./doc/source.flv; + } + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + engine { + enabled off; + output rtmp://127.0.0.1:[port]/live?vhost=[vhost]/livestream; + } + } +} +``` + +> 注意:请检查软链接`./objs/ffmpeg/bin/ffmpeg`是否正常,也可以直接改成你安装的FFmpeg的路径。 + +## 第四步,启动SRS + +详细参考[Ingest](./ingest.md) + +```bash +./objs/srs -c conf/ingest.conf +``` + +涉及的流包括: +* 采集的流:rtmp://192.168.1.170:1935/live/livestream + +## 第五步,观看RTMP流 + +详细参考[Ingest](./ingest.md) + +RTMP流地址为:`rtmp://192.168.1.170/live/livestream` + +可以使用VLC观看。 + +或者使用在线SRS播放器播放:[srs-player](https://ossrs.net/players/srs_player.html) + +备注:请将所有实例的IP地址192.168.1.170都换成部署的服务器IP地址。 + +Winlin 2014.4 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/sample-ingest) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-origin-cluster.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-origin-cluster.md new file mode 100644 index 00000000..00e3bb93 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-origin-cluster.md @@ -0,0 +1,157 @@ +--- +title: RTMP 源站集群 +sidebar_label: RTMP 源站集群 +hide_title: false +hide_table_of_contents: false +--- + +# RTMP源站集群部署实例 + +RTMP源站集群部署的步骤,我们给出了一个例子,部署了两个源站做集群,还部署了一个边缘。 +实际使用中,可以部署多个源站和多个边缘,形成源站集群。 + +**假设服务器的IP是:192.168.1.170** + +## 第一步,获取SRS + +详细参考[GIT获取代码](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +或者使用git更新已有代码: + +```bash +git pull +``` + +## 第二步,编译SRS + +详细参考[Build](./install.md) + +```bash +./configure && make +``` + +## 第三步,编写SRS源站A配置文件 + +详细参考[RTMP源站集群](./origin-cluster.md) + +将以下内容保存为文件,譬如`conf/origin.cluster.serverA.conf`,服务器启动时指定该配置文件(srs的conf文件夹有该文件)。 + +```bash +# conf/origin.cluster.serverA.conf +listen 19350; +max_connections 1000; +daemon off; +srs_log_tank console; +pid ./objs/origin.cluster.serverA.pid; +http_api { + enabled on; + listen 9090; +} +vhost __defaultVhost__ { + cluster { + mode local; + origin_cluster on; + coworkers 127.0.0.1:9091; + } +} +``` + +## 第四步,编写SRS源站B配置文件 + +详细参考[RTMP源站集群](./origin-cluster.md) + +将以下内容保存为文件,譬如`conf/origin.cluster.serverB.conf`,服务器启动时指定该配置文件(srs的conf文件夹有该文件)。 + +```bash +# conf/origin.cluster.serverB.conf +listen 19351; +max_connections 1000; +daemon off; +srs_log_tank console; +pid ./objs/origin.cluster.serverB.pid; +http_api { + enabled on; + listen 9091; +} +vhost __defaultVhost__ { + cluster { + mode local; + origin_cluster on; + coworkers 127.0.0.1:9090; + } +} +``` + +## 第五步,编写SRS边缘配置文件,从多个源站拉流,实现热备和负载均衡 + +详细参考[RTMP源站集群](./origin-cluster.md) + +将以下内容保存为文件,譬如`conf/origin.cluster.edge.conf`,服务器启动时指定该配置文件(srs的conf文件夹有该文件)。 + +```bash +# conf/origin.cluster.edge.conf +listen 1935; +max_connections 1000; +pid objs/edge.pid; +daemon off; +srs_log_tank console; +vhost __defaultVhost__ { + cluster { + mode remote; + origin 127.0.0.1:19351 127.0.0.1:19350; + } +} +``` + +> Remark: 如果播放器支持RTMP302,当然可以直接播放源站的流,任意源站都能播放,如果流不在访问的源站,会返回RTMP302重定向到流所在的源站。 + +## 第六步,启动SRS + +详细参考[RTMP源站集群](./origin-cluster.md) + +```bash +./objs/srs -c conf/origin.cluster.serverA.conf & +./objs/srs -c conf/origin.cluster.serverB.conf & +./objs/srs -c conf/origin.cluster.edge.conf & +``` + +## 第七步,启动推流编码器,推流到19350 + +详细参考[RTMP源站集群](./origin-cluster.md) + +使用FFMPEG命令推流: + +```bash + for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy \ + -f flv rtmp://192.168.1.170:19350/live/livestream; \ + sleep 1; \ + done +``` + +或使用FMLE推流: + +```bash +FMS URL: rtmp://192.168.1.170:19350/live +Stream: livestream +``` + +## 第八步,观看RTMP流,不管流推到哪个源站,播放边缘的流都能从正确的源站回源取流 + +详细参考[RTMP源站集群](./origin-cluster.md) + +观看集群的RTMP流地址为:`rtmp://192.168.1.170/live/livestream`,可以使用VLC观看。或者使用在线SRS播放器播放:[srs-player](https://ossrs.net/players/srs_player.html) + +备注:请将所有实例的IP地址192.168.1.170都换成部署的服务器IP地址。 + +Winlin 2018.2 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/sample-origin-cluster) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-realtime.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-realtime.md new file mode 100644 index 00000000..0a16ccbf --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-realtime.md @@ -0,0 +1,114 @@ +--- +title: RTMP 低延时部署 +sidebar_label: RTMP 低延时部署 +hide_title: false +hide_table_of_contents: false +--- + +# RTMP低延时配置 + +配置SRS为Realtime模式,使用RTMP可以将延迟降低到0.8-3秒,可以应用到对实时性要求不苛刻的地方,譬如视频会议(其实视频会议,以及人类在开会的时候,正常时候是会有人讲,有人在听在想,然后换别人讲,其实1秒左右延迟没有问题的,除非要吵架,就需要0.3秒左右的延迟)。 + +配置最低延迟的服务器详细信息可以参考:[LowLatency](./low-latency.md),本文举例说明部署的实例步骤。 + +**假设服务器的IP是:192.168.1.170** + +## 第一步,获取SRS + +详细参考[GIT获取代码](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +或者使用git更新已有代码: + +```bash +git pull +``` + +## 第二步,编译SRS + +详细参考[Build](./install.md) + +```bash +./configure && make +``` + +## 第三步,编写SRS配置文件 + +详细参考[LowLatency](./low-latency.md) + +将以下内容保存为文件,譬如`conf/realtime.conf`,服务器启动时指定该配置文件(srs的conf文件夹有该文件)。 + +```bash +# conf/realtime.conf +listen 1935; +max_connections 1000; +vhost __defaultVhost__ { + tcp_nodelay on; + min_latency on; + + play { + gop_cache off; + queue_length 10; + mw_latency 100; + } + + publish { + mr off; + } +} +``` + +## 第四步,启动SRS + +详细参考[LowLatency](./low-latency.md) + +```bash +./objs/srs -c conf/realtime.conf +``` + +## 第五步,启动推流编码器 + +详细参考[LowLatency](./low-latency.md) + +使用FFMPEG命令推流: + +```bash + for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy \ + -f flv rtmp://192.168.1.170/live/livestream; \ + sleep 1; \ + done +``` + +或使用FMLE推流: + +```bash +FMS URL: rtmp://192.168.1.170/live +Stream: livestream +``` + +备注:测量延迟,可以使用FMLE推流时,将智能手机的秒表功能打开,用FMLE摄像头对着秒表,然后对比FMLE的摄像头的图像,和服务器分发的头像的延迟,就知道精确的延迟多大。参考:[延迟的测量](http://blog.csdn.net/win_lin/article/details/12615591),如下图所示: +![latency](/img/sample-realtime-001.png) + +## 第六步,观看RTMP流 + +详细参考[LowLatency](./low-latency.md) + +RTMP流地址为:`rtmp://192.168.1.170/live/livestream` + +注意:不要使用VLC观看,**VLC的延迟会很大**,虽然VLC能看到流。 + +或者使用在线SRS播放器播放:[srs-player](https://ossrs.net/players/srs_player.html) + +备注:请将所有实例的IP地址192.168.1.170都换成部署的服务器IP地址。 + +Winlin 2014.12 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/sample-realtime) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-rtmp-cluster.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-rtmp-cluster.md new file mode 100644 index 00000000..5b073cfd --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-rtmp-cluster.md @@ -0,0 +1,118 @@ +--- +title: RTMP 集群部署 +sidebar_label: RTMP 集群部署 +hide_title: false +hide_table_of_contents: false +--- + +# RTMP边缘集群部署实例 + +RTMP边缘集群部署的步骤。 + +**假设服务器的IP是:192.168.1.170** + +## 第一步,获取SRS + +详细参考[GIT获取代码](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +或者使用git更新已有代码: + +```bash +git pull +``` + +## 第二步,编译SRS + +详细参考[Build](./install.md) + +```bash +./configure && make +``` + +## 第三步,编写SRS源站配置文件 + +详细参考[RTMP分发](./rtmp.md)和[Edge](./edge.md) + +将以下内容保存为文件,譬如`conf/origin.conf`,服务器启动时指定该配置文件(srs的conf文件夹有该文件)。 + +```bash +# conf/origin.conf +listen 19350; +max_connections 1000; +pid objs/origin.pid; +srs_log_file ./objs/origin.log; +vhost __defaultVhost__ { +} +``` + +## 第四步,编写SRS边缘配置文件 + +详细参考[RTMP分发](./rtmp.md)和[Edge](./edge.md) + +将以下内容保存为文件,譬如`conf/edge.conf`,服务器启动时指定该配置文件(srs的conf文件夹有该文件)。 + +```bash +# conf/edge.conf +listen 1935; +max_connections 1000; +pid objs/edge.pid; +srs_log_file ./objs/edge.log; +vhost __defaultVhost__ { + cluster { + mode remote; + origin 127.0.0.1:19350; + } +} +``` + +## 第五步,启动SRS + +详细参考[RTMP分发](./rtmp.md)和[Edge](./edge.md) + +```bash +./objs/srs -c conf/origin.conf & +./objs/srs -c conf/edge.conf & +``` + +## 第六步,启动推流编码器 + +详细参考[RTMP分发](./rtmp.md)和[Edge](./edge.md) + +使用FFMPEG命令推流: + +```bash + for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy \ + -f flv rtmp://192.168.1.170/live/livestream; \ + sleep 1; \ + done +``` + +或使用FMLE推流: + +```bash +FMS URL: rtmp://192.168.1.170/live +Stream: livestream +``` + +## 第七步,观看RTMP流 + +详细参考[RTMP分发](./rtmp.md)和[Edge](./edge.md) + +源站RTMP流地址为:`rtmp://192.168.1.170:19350/live/livestream`,可以使用VLC观看。 + +边缘RTMP流地址为:`rtmp://192.168.1.170/live/livestream`,可以使用VLC观看。或者使用在线SRS播放器播放:[srs-player](https://ossrs.net/players/srs_player.html) + +备注:请将所有实例的IP地址192.168.1.170都换成部署的服务器IP地址。 + +Winlin 2014.3 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/sample-rtmp-cluster) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-rtmp.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-rtmp.md new file mode 100644 index 00000000..3473e053 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-rtmp.md @@ -0,0 +1,12 @@ +--- +title: RTMP 部署 +sidebar_label: RTMP 部署 +hide_title: false +hide_table_of_contents: false +--- + +# RTMP Delivery + +迁移到了[RTMP](./rtmp.md). + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/sample-rtmp) diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-srt.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-srt.md new file mode 100644 index 00000000..952d4538 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-srt.md @@ -0,0 +1,14 @@ +--- +title: SRT 部署 +sidebar_label: SRT 部署 +hide_title: false +hide_table_of_contents: false +--- + +# SRT部署实例 + +迁移到了[SRT](./srt.md). + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/sample-srt) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-transcode-to-hls.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-transcode-to-hls.md new file mode 100644 index 00000000..756de46f --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample-transcode-to-hls.md @@ -0,0 +1,151 @@ +--- +title: 转码 HLS 分发 +sidebar_label: 转码 HLS 分发 +hide_title: false +hide_table_of_contents: false +--- + +# 转码后分发HLS部署实例 + +HLS需要h.264+aac,若符合这个要求可以按照[Usage: HLS](./hls.md)部署,若不符合这个要求则需要转码。 + +如何知道流是否是h264+aac编码: +* [Usage: HLS](./hls.md)中的`Q&A`说明的问题。 +* 看编码器的参数,FMLE可以选视频编码为vp6或者h264,音频一般为mp3/NellyMoser。,所以FMLE肯定推流是不符合要求的。 +* 看SRS的日志,若显示`hls only support video h.264/avc codec. ret=601`,就明显说明是编码问题。 + +备注:在虚拟机上测试,一路流转码为aac,需要3%CPU,在物理机上可能稍好点。转码的开销比分发要大,实际应用需要考虑这个因素。 + +**假设服务器的IP是:192.168.1.170** + +## 第一步,获取SRS + +详细参考[GIT获取代码](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +或者使用git更新已有代码: + +```bash +git pull +``` + +## 第二步,编译SRS + +详细参考[Build](./install.md) + +```bash +./configure --ffmpeg-tool=on && make +``` + +## 第三步,编写SRS配置文件 + +详细参考[HLS分发](./hls.md) + +将以下内容保存为文件,譬如`conf/transcode2hls.audio.only.conf`,服务器启动时指定该配置文件(srs的conf文件夹有该文件)。 + +```bash +# conf/transcode2hls.audio.only.conf +listen 1935; +max_connections 1000; +http_server { + enabled on; + listen 8080; + dir ./objs/nginx/html; +} +vhost __defaultVhost__ { + hls { + enabled on; + hls_path ./objs/nginx/html; + hls_fragment 10; + hls_window 60; + } + transcode { + enabled on; + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + engine ff { + enabled on; + vcodec copy; + acodec libfdk_aac; + abitrate 45; + asample_rate 44100; + achannels 2; + aparams { + } + output rtmp://127.0.0.1:[port]/[app]?vhost=[vhost]/[stream]_[engine]; + } + } +} +``` + +备注:这个配置使用只转码音频,因为视频是h.264符合要求,若需要全转码 + +## 第四步,启动SRS + +详细参考[HLS分发](./hls.md) + +```bash +./objs/srs -c conf/transcode2hls.audio.only.conf +``` + +## 第五步,启动推流编码器 + +详细参考[HLS分发](./hls.md) + +使用FFMPEG命令推流: + +```bash + for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy \ + -f flv rtmp://192.168.1.170/live/livestream; \ + sleep 1; \ + done +``` + +或使用FMLE推流: + +```bash +FMS URL: rtmp://192.168.1.170/live +Stream: livestream +``` + +生成的流地址为: +* RTMP流地址为(FMLE推流无HLS地址):`rtmp://192.168.1.170/live/livestream` +* 转码后的RTMP流地址为:`rtmp://192.168.1.170/live/livestream_ff` +* 转码后的HLS流地址为: `http://192.168.1.170:8080/live/livestream_ff.m3u8` + +备注:因为FMLE推上来的音频有问题,不是aac,所以srs会报错(当然啦,不然就不用转码了)。这个错误可以忽略,srs是说,rtmp流没有问题,但是无法切片为hls,因为音频编码不对。没有关系,ffmpeg会转码后重新推一路流给srs。 + +备注:如何只对符合要求的流切hls?可以用vhost。默认的vhost不切hls,将转码后的流推送到另外一个vhost,这个vhost切hls。 + +## 第七步,观看RTMP流 + +详细参考[HLS分发](./hls.md) + +RTMP流地址为:`rtmp://192.168.1.170/live/livestream_ff` + +可以使用VLC观看。 + +备注:请将所有实例的IP地址192.168.1.170都换成部署的服务器IP地址。 + +## 第八步,观看HLS流 + +详细参考[HLS分发](./hls.md) + +HLS流地址为: `http://192.168.1.170:8080/live/livestream_ff.m3u8` + +可以使用VLC观看。 + +或者使用在线SRS播放器播放:[srs-player](https://ossrs.net/players/srs_player.html) + +备注:请将所有实例的IP地址192.168.1.170都换成部署的服务器IP地址。 + +Winlin 2014.3 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/sample-transcode-to-hls) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample.md new file mode 100644 index 00000000..c0b14efe --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/sample.md @@ -0,0 +1,269 @@ +--- +title: Use Scenarios +sidebar_label: Use Scenarios +hide_title: false +hide_table_of_contents: false +--- + +# Use Scenarios + +一般来讲,SRS的应用方式有以下几类: + +1. 搭建大规模CDN集群,可以在CDN内部的源站和边缘部署SRS。 +2. 小型业务快速搭建几台流媒体集群,譬如学校、企业等,需要分发的流不多,同时CDN覆盖不如自己部署几个节点,可以用SRS搭建自己的小集群。 +3. SRS作为源站,CDN作为加速边缘集群。比如推流到CDN后CDN转推到源站,播放时CDN会从源站取流。这样可以同时使用多个CDN。同时还可以在源站做DRM和DVR,输出HLS,更重要的是如果直接推CDN一般CDN之间不是互通的,当一个CDN出现故障无法快速切换到其他CDN。 +4. 编码器可以集成SRS支持拉流。一般编码器支持推RTMP/UDP流,如果集成SRS后,可以支持拉多种流。 +5. 协议转换网关,比如可以推送FLV到SRS转成RTMP协议,或者拉RTSP转RTMP,还有拉HLS转RTMP。SRS只要能接入流,就能输出能输出的协议。 +6. 学习流媒体可以用SRS。SRS提供了大量的协议的文档,wiki,和文档对应的代码,详细的issues,流媒体常见的功能实现,还有新流媒体技术的尝试。 +7. 还可以装逼用,在SRS微信群里涉及到很多流媒体和传输的问题,是个装逼的好平台。 + +## Quzhibo + +趣直播,一个知识直播平台,目前直播技术为主。 + +主要流程: + +* obs 直播 +* 有三台hls 服务器,主 srs 自动 forward 到 srs,然后那三台切割 +* 有两台 flv 服务器,remote 拉群,发现有时会挂掉,用了个监控srs的脚本,一发现挂掉立马重启 +* srs 推流到七牛,利用七牛接口,来生成 m3u8 回放 这样可以结束后,立马看到回放 + +## Europe: Forward+EXEC + +BEGINHO STREAMING PROJECT + +I needed solution for pushing streams from origin server +to edge server. On origin server all streams are avaliable +in multicast (prepared with ffmpeg, h264 in mpegts container). +But routing multicast through GRE tunnel to the edge +server was very buggy. Any networks hickups in origin-edge +route were affecting streams in bad way (freezeing, pixelation and such)... +So, I found SRS project and after some reading of docs, I +decided to give it a try. Most intereseting feature of SRS +to me was a "forward" option. It allows to push all streams +you have avaliable on local server (SRS origin) to remote +server (SRS edge) with a single line in config file. +https://ossrs.net/lts/zh-cn/docs/v4/doc/sample-forward + +SRS2 config on origin server: +``` + vhost __defaultVhost__ { + forward xxx:19350; + } +``` + +I "told" to ffmpegs on transcoder to publish stream to rtmp, +instead of multicast (and yes, I used multicast group as rtmp stream name): +``` + ffmpeg -i udp://xxx:1234 -vcodec libx264 -acodec libfdk_aac \ + -metadata service_name="Channel 1" -metadata service_provider="PBS" \ + -f flv rtmp://xxx:1935/live/xxx:1234 +``` + +Tested stream with ffprobe: +``` + [root@encoder1 ~]# ffprobe rtmp://xxx:1935/live/xxx:1234 + Input #0, flv, from 'rtmp://xxx:1935/live/xxx:1234': + Metadata: + service_name : Channel 1 + service_provider: PBS + encoder : Lavf57.24.100 + server : SRS/2.0.209(ZhouGuowen) + srs_primary : SRS/1.0release + srs_authors : winlin,wenjie.zhao + server_version : 2.0.209 + Duration: N/A, start: 0.010000, bitrate: N/A + Stream #0:0: Audio: aac (LC), 48000 Hz, stereo, fltp, 128 kb/s + Stream #0:1: Video: h264 (High), yuvj420p(pc, bt709), 720x576 [SAR 16:11 DAR 20:11], 24 fps, 24 tbr, 1k tbn +``` + +On edge server (example IP xxx), there is a streaming software +wich accepts only mpegts as source. So, after receiving rtmp streams +from origin server, I needed all streams back to mpegts. +SRS have support for several types for output (hls, hds, rtmp, http-flv...) +but not mpegts, and i need udp mpegts. Then I asked Winlin for help +and he suggested to use SRS3 on edge server, as SRS3 have an feature +that SRS2 dont, and thats "exec" option. In SRS3 config, you can use +exec option, to call ffmpeg for every incoming stream and convert it to +whatever you like. I compiled SRS3 with "--with-ffmpeg" switch +(yes, source tree comes with ffmpeg in it) on edge server and... + +SRS3 config on edge: +``` + listen 19350; + max_connections 1024; + srs_log_tank file; + srs_log_file ./objs/srs.slave.log; + srs_log_level error; + vhost __defaultVhost__ { + exec { + enabled on; + publish ./objs/ffmpeg/bin/ffmpeg -v quiet -re -i rtmp://127.0.0.1:1935/[app]?vhost=[vhost]/[stream] -c copy -vbsf h264_mp4toannexb -f mpegts "udp://[stream]?localaddr=127.0.0.1&pkt_size=1316"; + } + } +``` + +FFmpeg will convert all incoming streams to udp mpegts, binding them +to lo (127.0.0.1) interface (you dont want multicast to leak all around). +SRS3 will use [stream] for udp address, thats why rtmp stream have name +by its multicast group on origin server ;) +When converting from rtmp to mpegts, "-vbsf h264_mp4toannexb" option is needed! +After starting SRS3 with this config, i checked is stream forwarded from +master server properly. So, ffprobe again, now on edge server: +``` + [root@edge ~]# ffprobe udp://xxx:1234?localaddr=127.0.0.1 + Input #0, mpegts, from 'udp://xxx:5002?localaddr=127.0.0.1': + Duration: N/A, start: 29981.146500, bitrate: 130 kb/s + Program 1 + Metadata: + service_name : Channel 1 + service_provider: PBS + Stream #0:0[0x100]: Video: h264 (High) ([27][0][0][0] / 0x001B), yuvj420p(pc, bt709), 720x576 [SAR 16:11 DAR 20:11], 24 fps, 24 tbr, 90k tbn, 180k tbc + Stream #0:1[0x101]: Audio: aac ([15][0][0][0] / 0x000F), 48000 Hz, stereo, fltp, 130 kb/s +``` + +I keep adding new streams with ffmpeg at origin server and they are avaliable +on slave server after second or two. Its almost a year when I started this origin +and edge SRS instances and they are still working without single restart ;) + +Many thanks to Winlin! + +## LijiangTV + +[丽江热线](https://www.lijiangtv.com/live/),丽江广播电视台。 + +## UPYUN + +2015,[又拍云直播部分](https://www.upyun.com/solutions/video.html),在SRS3基础上深度定制的版本。 + +## bravovcloud + +2015,[观止云直播服务器](http://www.bravovcloud.com/product/yff/),在SRS3基础上深度定制的版本。 + +## gosun + +2014.11,[高升CDN直播部分](http://www.gosun.com/service/streaming_acceleration.html),在SRS1的基础上深度定制的版本。 + +## 北京云博视 + +2014.10.10 by 谁变 63110982
+[http://www.y-bos.com/](http://www.y-bos.com/) + +## verycdn + +[verycdn](http://www.verycdn.cn/) 开始用SRS。 + +2014.9.13 by 1163202026 11:19:35
+目前SRS在测试中,没用过别的,直接上的srs,目前测试下来比较OK,没什么大问题。 + +## SRS产品使用者 + +2014.7.23 by 阿才(1426953942) 11:04:01
+我接触srs才几个月,不敢发表什么意见,只是通过这段时间的学习,觉得这个项目做得相当棒,作者及项目团队工作相当出色,精神非常值得赞赏,目前还在学习中。 + +2014.7.23 by 随想曲(156530446) 11:04:48
+我作为使用者来说,就是这玩意完全当成正规高大上的产品用啦! + +2014.7.23 by 湖中鱼(283946467) 11:06:23
+me没怎么去具体分析srs只是觉得作者文档写得比较流畅不乏幽默感。但是目前我用到的功能只有rtmp推送直播,及hls这些nginx-rtmp都有,所以还是选择了用老外的东西 + +2014.7.23 by 我是蝈蝈(383854294) 11:11:59
+为什么用SRS?轻便,省资源,有中文说明。SRS那些一站式的脚本与演示demo就能看出来作者是很用心的 + +## web秀场 + +2014.7 by 刘重驰 + +我们目前正在调研 准备用到web秀场 和 移动端流媒体服务上 + +## 视频直播 + +2014.7 by 大腰怪 + +## 远程视频直播 + +2014.7 by 韧 + +我们的分发服务器用的就是srs,简单易用,稳定性好 + +我们以前也用过几个分发软件,都没有srs好用,真心的 + +## chnvideo + +2014.7 [chnvideo](http://chnvideo.com/)编码器内置SRS提供RTMP和HLS拉服务。 + +## 某工厂监控系统 + +2014.4 by 斗破苍穷(154554381) + +某工厂的监控系统主要组成: +* 采集端:采集端采用IPC摄像头安装在工厂重要监控位置,通过网线或者wifi连接到监控中心交换机。 +* 监控中心:中心控制服务器,负责管理采集端和流媒体服务器,提供PC/Android/IOS观看平台。 +* 流媒体服务器:负责接收采集端的流,提供观看端RTMP/HLS的流。 +* 观看端:PC/Android/IOS。要求PC端的延迟在3秒内。Android/IOS延迟在20秒之内。 + +主要流程包括: +* 采集端启动:IPC摄像头像监控中心注册,获得发布地址,并告知监控中心采集端的信息,譬如摄像头设备名,ip地址,位置信息之类。 +* 采集端开始推流:IPC摄像头使用librtmp发布到地址,即将音频视频数据推送到RTMP流媒体服务器。 +* 流媒体服务器接收流:流媒体服务器使用SRS,接收采集端的RTMP流。FMS-3/3.5/4.5都有问题,估计是和librtmp对接问题。 +* 观看端观看:用户使用PC/Android/IOS登录监控中心后,监控中心返回所有的摄像头信息和流地址。PC端使用flash,延迟在3秒之内;Android/IOS使用HLS,延迟在20秒之内。 +* 时移:监控中心会开启录制计划,将RTMP流录制为FLV文件。用户可以在监控中心观看录制的历史视频。 + +## 网络摄像机 + +2014.4 by camer(2504296471) + +网络摄像机使用hi3518芯片,如何用网页无插件直接观看网络摄像机的流呢? + +目前有应用方式如下: +* hi3518上跑采集和推流程序(用srslibrtmp) +* 同时hi3518上还跑了srs/nginx-rtmp作为服务器 +* 推流程序推到hi3518本机的nginx服务器 +* PC上网页直接观看hi3518上的流 + +## IOS可以看的监控 + +2014.3 by 独孤不孤独(378668966) + +一般监控摄像头只支持输出RTMP/RTSP,或者支持RTSP方式读取流。如果想在IOS譬如IPad上看监控的流,怎么办?先部署一套rtmp服务器譬如nginx-rtmp/crtmpd/wowza/red5之类,然后用ffmpeg把rtsp流转成rtmp(或者摄像头直接推流到rtmp服务器),然后让服务器切片成hls输出,在IOS上观看。想想都觉得比较麻烦额,如果摄像头比较多怎么办?一个服务器还扛不住,部署集群? + +最简单的方式是什么?摄像头自己支持输出HLS流不就好了?也就是摄像头有个内网ip作为服务器,摄像头给出一个hls的播放地址,IOS客户端譬如IPad可以播放这个HLS地址。 + +SRS最适合做这个事情,依赖很少,提供[arm编译脚本](./sample-arm.md),只需要[改下configure的交叉编译工具](./arm.md#%E4%BD%BF%E7%94%A8%E5%85%B6%E4%BB%96%E4%BA%A4%E5%8F%89%E7%BC%96%E8%AF%91%E5%B7%A5%E5%85%B7)就可以编译了。 + +主要流程: +* 编译arm下的srs,部署到树莓派,在摄像头中启动srs。 +* 使用ffmpeg将摄像头的rtsp以RTMP方式推到srs。或者用自己程序采集设备数据推送RTMP流到srs。 +* srs分发RTMP流和HLS流。其实PC上也可以看了。 +* IOS譬如IPad上播放HLS地址。 + +## 清华活动直播 + +2014.2 by youngcow(5706022) + +清华大学每周都会有活动,譬如名家演讲等,使用SRS支持,少量的机器即可满足高并发。 + +主要流程: +* 在教室使用播控系统(摄像机+采集卡或者摄像机+导播台)推送RTMP流到主SRS +* 主SRS自动Forward给从SRS(参考[Forward](./forward.md)) +* PC客户端(Flash)使用FlowerPlayer,支持多个服务器的负载均衡 +* FlowerPlayer支持在两个主从SRS,自动选择一个服务器,实现负载均衡 + +主要的活动包括: +* 2014-02-23,丘成桐清华演讲 + +## 某农场监控 + +2014.1 by 孙悟空 + +农场中摄像头支持RTSP访问,FFMPEG将RTSP转换成RTMP推送到SRS,flash客户端播放RTMP流。同时flash客户端可以和控制服务器通信,控制农场的浇水和施肥。 + +![农场植物开花了](http://ossrs.net/srs.release/wiki/images/application/farm.jpg) + +截图:农场的植物开花了,据说种的是萝卜。。。 + +Winlin 2014.2 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/sample) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/security.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/security.md new file mode 100644 index 00000000..b06a499f --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/security.md @@ -0,0 +1,75 @@ +--- +title: Security +sidebar_label: Security +hide_title: false +hide_table_of_contents: false +--- + +# Security + +SRS提供了禁用或允许客户端的简单安全策略。 + +## Config + +Vhost中安全策略的配置: + +``` +vhost your_vhost { + # security for host to allow or deny clients. + # @see https://github.com/ossrs/srs/issues/211 + security { + # whether enable the security for vhost. + # default: off + enabled on; + # the security list, each item format as: + # allow|deny publish|play all| + # for example: + # allow publish all; + # deny publish all; + # allow publish 127.0.0.1; + # deny publish 127.0.0.1; + # allow publish 10.0.0.0/8; + # deny publish 10.0.0.0/8; + # allow play all; + # deny play all; + # allow play 127.0.0.1; + # deny play 127.0.0.1; + # allow play 10.0.0.0/8; + # deny play 10.0.0.0/8; + # SRS apply the following simple strategies one by one: + # 1. allow all if security disabled. + # 2. default to deny all when security enabled. + # 3. allow if matches allow strategy. + # 4. deny if matches deny strategy. + allow play all; + allow publish all; + } +} +``` + +SRS应用安全策略的方式是: + +* 若securty没有开启,则允许所有。 +* 若security开启了,默认禁止所有。 +* 允许客户端,若找到了匹配的允许策略。 +* 禁用客户端,若找到了匹配的禁用策略。 + +参考配置文件`conf/security.deny.publish.conf`. + +## Kickoff Client + +可以踢掉连接的用户,参考[WIKI](./http-api.md#kickoff-client)。 + +## Bug + +关于这个功能的Bug,参考:[#211](https://github.com/ossrs/srs/issues/211) + +## Reload + +当Reload改变security配置后,只影响新连接的客户端,已经连接的客户端不受影响。 + +Winlin 2015.1 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/security) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/service.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/service.md new file mode 100644 index 00000000..9a17faa3 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/service.md @@ -0,0 +1,172 @@ +--- +title: Linux系统服务 +sidebar_label: Linux系统服务 +hide_title: false +hide_table_of_contents: false +--- + +# SRS系统服务 + +SRS提供多种启动的方式,包括: +* 在git目录直接启动,简单直接,但系统重启后需要手动启动。 +* 系统服务,init.d脚本:SRS提供`srs/trunk/etc/init.d/srs`脚本,可以作为CentOS或者Ubuntu的系统服务自动启动。 + +可以直接下载release的binary,提供了几个常见系统的安装包,安装程序会安装系统服务,直接以系统服务启动即可。参考:[Github: release](http://ossrs.net/srs.release)或者[国内镜像: release](http://ossrs.net/) + +## Manual + +若不需要添加到系统服务,每次重启后需要手动启动SRS,可以直接在srs的trunk目录执行脚本: + +```bash +cd srs/trunk && +./etc/init.d/srs start +``` + +## init.d + +SRS作为系统服务启动,需要以下几步: +* 安装srs:安装脚本会自动修改init.d脚本,将`ROOT="./"`改为安装目录。 +* 链接安装目录的`init.d/srs`到`/etc/init.d/srs` +* 添加到系统服务,CentOS和Ubuntu方法不一样。 + +Step1: 安装SRS + +编译SRS后,可执行命令安装SRS: + +```bash +make && sudo make install +``` + +安装命令会将srs默认安装到`/usr/local/srs`中,可以在configure时指定其他目录,譬如```./configure --prefix=`pwd`/_release```可以安装到当前目录的_release目录(可以不用sudo安装,直接用`make install`即可安装。 + +Step2: 链接脚本: + +```bash +sudo ln -sf \ + /usr/local/srs/etc/init.d/srs \ + /etc/init.d/srs +``` + +备注:若SRS安装到其他目录,将`/usr/local/srs`替换成其他目录。 + +备注:也可以使用其他的名称,譬如`/etc/init.d/srs`,可以任意名称,启动时也用该名称。 + +Step3:添加服务: + +```bash +#centos 6 +sudo /sbin/chkconfig --add srs +``` + +或者 + +```bash +#ubuntu12 +sudo update-rc.d srs defaults +``` + +使用init.d脚本管理SRS + +查看SRS状态: + +```bash +/etc/init.d/srs status +``` + +启动SRS: + +```bash +/etc/init.d/srs start +``` + +停止SRS: + +```bash +/etc/init.d/srs stop +``` + +重启SRS: + +```bash +/etc/init.d/srs restart +``` + +Reload SRS: + +```bash +/etc/init.d/srs reload +``` + +日志切割,给SRS发送`SIGUSR1`信号: + +```bash +/etc/init.d/srs rotate +``` + +平滑退出,给SRS发送`SIGQUIT`信号: + +```bash +/etc/init.d/srs grace +``` + +## systemctl + +Ubuntu20使用systemctl管理服务,我们在init.d的基础上新增了systemctl的配置: + +``` +./configure && make && sudo make install && +sudo ln -sf /usr/local/srs/etc/init.d/srs /etc/init.d/srs && +sudo cp -f /usr/local/srs/usr/lib/systemd/system/srs.service /usr/lib/systemd/system/srs.service && +sudo systemctl daemon-reload && sudo systemctl enable srs +``` + +> Remark: 必须拷贝srs.service,否则在enable srs时会出错。 + +使用systemctl启动SRS服务: + +``` +sudo systemctl start srs +``` + +## Gracefully Upgrade + +Gracefully Upgrade是平滑升级,就是指老的连接服务完后退出,新版本的服务继续提供服务,对业务没有影响,涉及的技术包括: + +* 解决侦听冲突的问题,新版本的服务进程也需要侦听同样的端口,才能提供服务。一定时间内,新老进程是同时提供服务的。 +* 老进程关闭侦听,不再接受新连接。老进程上就只有已经存在的连接,等老的连接服务完后再退出。 这就是Gracefully Quit平滑退出。 + +> Note: 关于这个机制,这里[#1579](https://github.com/ossrs/srs/issues/1579#issuecomment-587233844)有更多的探讨。 + +SRS3主要支持Gracefully Quit平滑退出: + +* 使用信号`SIGQUIT`作为平滑退出信号,也可以使用服务命令`/etc/init.d/srs grace`。 +* 新增配置,`grace_start_wait`,等待一定时间后开始GracefullyQuit,等待Service摘除Pod,默认2.3秒,参考[#1579](https://github.com/ossrs/srs/issues/1595#issuecomment-587516567)。 +* 新增配置,`grace_final_wait`,等待连接退出后,需要等待一定的时间,默认3.2秒,参考[#1579](https://github.com/ossrs/srs/issues/1579#issuecomment-587414898)。 +* 新增配置,`force_grace_quit`,强制使用Gracefully Quit,而不用Fast Quit,原因参考[#1579](https://github.com/ossrs/srs/issues/1579#issuecomment-587475077)。 + +```bash +# For gracefully quit, wait for a while then close listeners, +# because K8S notify SRS with SIGQUIT and update Service simultaneously, +# maybe there is some new connections incoming before Service updated. +# @see https://github.com/ossrs/srs/issues/1595#issuecomment-587516567 +# default: 2300 +grace_start_wait 2300; +# For gracefully quit, final wait for cleanup in milliseconds. +# @see https://github.com/ossrs/srs/issues/1579#issuecomment-587414898 +# default: 3200 +grace_final_wait 3200; +# Whether force gracefully quit, never fast quit. +# By default, SIGTERM which means fast quit, is sent by K8S, so we need to +# force SRS to treat SIGTERM as gracefully quit for gray release or canary. +# @see https://github.com/ossrs/srs/issues/1579#issuecomment-587475077 +# default: off +force_grace_quit off; +``` + +> Note: 关于平滑退出的命令和演示,可以查看[#1579](https://github.com/ossrs/srs/issues/1579#issuecomment-587414898)。 + +Winlin 2019.10 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/service) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/snapshot.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/snapshot.md new file mode 100644 index 00000000..0d072cf8 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/snapshot.md @@ -0,0 +1,133 @@ +--- +title: Snapshot +sidebar_label: Snapshot +hide_title: false +hide_table_of_contents: false +--- + +# Snapshot + +截图有以下几种方式可以实现: + +1. HttpCallback:使用HTTP回调,收到`on_publish`事件后开启ffmpeg进程截图,收到`on_unpublish`事件后停止ffmpeg进程。SRS提供了实例,具体参考下面的内容。 +2. Transcoder:转码可以配置为截图,SRS提供了实例,具体参考下面的内容。 + +## HttpCallback + +下面的实例使用Http回调截图。 + +先启动实例Api服务器: +``` +cd research/api-server && go run server.go 8085 +``` + +SRS的配置如下: +``` +# snapshot.conf +listen 1935; +max_connections 1000; +daemon off; +srs_log_tank console; +vhost __defaultVhost__ { + http_hooks { + enabled on; + on_publish http://127.0.0.1:8085/api/v1/snapshots; + on_unpublish http://127.0.0.1:8085/api/v1/snapshots; + } + ingest { + enabled on; + input { + type file; + url ./doc/source.flv; + } + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + engine { + enabled off; + output rtmp://127.0.0.1:[port]/live?vhost=[vhost]/livestream; + } + } +} +``` + +启动SRS时,ingest将会推流,SRS会调用Api服务器的接口,开始截图: +``` +./objs/srs -c snapshot.conf +``` + +截图生成的目录: +``` +winlin:srs winlin$ ls -lh research/api-server/static-dir/live/*.png +-rw-r--r-- 1 winlin staff 73K Oct 20 13:35 livestream-001.png +-rw-r--r-- 1 winlin staff 91K Oct 20 13:35 livestream-002.png +-rw-r--r-- 1 winlin staff 11K Oct 20 13:35 livestream-003.png +-rw-r--r-- 1 winlin staff 167K Oct 20 13:35 livestream-004.png +-rw-r--r-- 1 winlin staff 172K Oct 20 13:35 livestream-005.png +-rw-r--r-- 1 winlin staff 264K Oct 20 13:35 livestream-006.png +lrwxr-xr-x 1 winlin staff 105B Oct 20 13:35 livestream-best.png -> livestream-006.png +``` + +其中,`live-livestream-best.png`会软链到尺寸最大的那个截图,避免生成黑屏的截图。 + +可以通过HTTP访问,譬如:[http://localhost:8085/live/livestream-best.png](http://localhost:8085/live/livestream-best.png) + +## Transcoder + +也可以使用Transcoder直接截图。SRS配置如下: + +``` +listen 1935; +max_connections 1000; +daemon off; +srs_log_tank console; +vhost __defaultVhost__ { + transcode { + enabled on; + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + engine snapshot { + enabled on; + iformat flv; + vfilter { + vf fps=1; + } + vcodec png; + vparams { + vframes 6; + } + acodec an; + oformat image2; + output ./objs/nginx/html/[app]/[stream]-%03d.png; + } + } + ingest { + enabled on; + input { + type file; + url ./doc/source.flv; + } + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + engine { + enabled off; + output rtmp://127.0.0.1:[port]/live?vhost=[vhost]/livestream; + } + } +} +``` + +启动SRS就可以生成截图: +``` +winlin:srs winlin$ ls -lh objs/nginx/html/live/*.png +-rw-r--r-- 1 winlin staff 265K Oct 20 14:52 livestream-001.png +-rw-r--r-- 1 winlin staff 265K Oct 20 14:52 livestream-002.png +-rw-r--r-- 1 winlin staff 287K Oct 20 14:52 livestream-003.png +-rw-r--r-- 1 winlin staff 235K Oct 20 14:52 livestream-004.png +-rw-r--r-- 1 winlin staff 315K Oct 20 14:52 livestream-005.png +-rw-r--r-- 1 winlin staff 405K Oct 20 14:52 livestream-006.png +``` + +注意:SRS没有办法选出最佳的截图。 + +Winlin 2015.10 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/snapshot) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/special-control.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/special-control.md new file mode 100644 index 00000000..5c580daf --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/special-control.md @@ -0,0 +1,176 @@ +--- +title: Special Control +sidebar_label: Special Control +hide_title: false +hide_table_of_contents: false +--- + +# SpecialControl + +SRS提供了一些特殊的配置,主要用来和各种系统对接的设置。 + +## Send Minimal Interval + +``` +vhost __defaultVhost__ { + # for play client, both RTMP and other stream clients, + # for instance, the HTTP FLV stream clients. + play { + # the minimal packets send interval in ms, + # used to control the ndiff of stream by srs_rtmp_dump, + # for example, some device can only accept some stream which + # delivery packets in constant interval(not cbr). + # @remark 0 to disable the minimal interval. + # @remark >0 to make the srs to send message one by one. + # @remark user can get the right packets interval in ms by srs_rtmp_dump. + # default: 0 + send_min_interval 10.0; + } +} +``` + +## Reduce Sequence Header + +``` +vhost __defaultVhost__ { + # for play client, both RTMP and other stream clients, + # for instance, the HTTP FLV stream clients. + play { + # whether reduce the sequence header, + # for some client which cannot got duplicated sequence header, + # while the sequence header is not changed yet. + # default: off + reduce_sequence_header on; + } +} +``` + +## Publish 1st Packet Timeout + +``` +vhost __defaultVhost__ { + # the config for FMLE/Flash publisher, which push RTMP to SRS. + publish { + # the 1st packet timeout in ms for encoder. + # default: 20000 + firstpkt_timeout 20000; + } +} +``` + +## Publish Normal Timeout + +``` +vhost __defaultVhost__ { + # the config for FMLE/Flash publisher, which push RTMP to SRS. + publish { + # the normal packet timeout in ms for encoder. + # default: 5000 + normal_timeout 7000; + } +} +``` + +## Debug SRS Upnode + +``` +vhost __defaultVhost__ { + # when upnode(forward to, edge push to, edge pull from) is srs, + # it's strongly recommend to open the debug_srs_upnode, + # when connect to upnode, it will take the debug info, + # for example, the id, source id, pid. + # please see https://ossrs.net/lts/zh-cn/docs/v4/doc/log + # default: on + debug_srs_upnode on; +} +``` + +## UTC Time + +``` +# whether use utc_time to generate the time struct, +# if off, use localtime() to generate it, +# if on, use gmtime() instead, which use UTC time. +# default: off +utc_time off; +``` + +## HLS TS Floor + +``` +vhost __defaultVhost__ { + hls { + # whether use floor for the hls_ts_file path generation. + # if on, use floor(timestamp/hls_fragment) as the variable [timestamp], + # and use enahanced algorithm to calc deviation for segment. + # @remark when floor on, recommend the hls_segment>=2*gop. + # default: off + hls_ts_floor off; + } +} +``` + +## HLS Wait Keyframe + +``` +vhost __defaultVhost__ { + hls { + # whether wait keyframe to reap segment, + # if off, reap segment when duration exceed the fragment, + # if on, reap segment when duration exceed and got keyframe. + # default: on + hls_wait_keyframe on; + } +} +``` + +## HttpHooks On HLS Notify + +``` +vhost __defaultVhost__ { + http_hooks { + # when srs reap a ts file of hls, call this hook, + # used to push file to cdn network, by get the ts file from cdn network. + # so we use HTTP GET and use the variable following: + # [app], replace with the app. + # [stream], replace with the stream. + # [ts_url], replace with the ts url. + # ignore any return data of server. + # @remark random select a url to report, not report all. + on_hls_notify http://127.0.0.1:8085/api/v1/hls/[app]/[stream][ts_url]; + } +} +``` + +## TCP NoDelay + +``` +vhost __defaultVhost__ { + # whether enable the TCP_NODELAY + # if on, set the nodelay of fd by setsockopt + # default: off + tcp_nodelay on; +} +``` + +## ATC Auto + +``` +vhost __defaultVhost__ { + # for play client, both RTMP and other stream clients, + # for instance, the HTTP FLV stream clients. + play { + # whether enable the auto atc, + # if enabled, detect the bravo_atc="true" in onMetaData packet, + # set atc to on if matched. + # always ignore the onMetaData if atc_auto is off. + # default: off + atc_auto off; + } +} + +Winlin 2015.8 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/special-control) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/srs-lib-rtmp.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/srs-lib-rtmp.md new file mode 100644 index 00000000..e4de2c0a --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/srs-lib-rtmp.md @@ -0,0 +1,16 @@ +--- +title: Librtmp +sidebar_label: Librtmp +hide_title: false +hide_table_of_contents: false +--- + +# SRS提供的librtmp + +SRS不再维护srs-librtmp,原因请看[#32](https://github.com/ossrs/srs-librtmp/issues/32)。 + +Winlin 2014.11 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/srs-lib-rtmp) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/srt-codec.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/srt-codec.md new file mode 100644 index 00000000..d73e9915 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/srt-codec.md @@ -0,0 +1,14 @@ +--- +title: SRT Codec +sidebar_label: SRT Codec +hide_title: false +hide_table_of_contents: false +--- + +# SRT codec support + +迁移到了[SRT](./srt.md). + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/srt-codec) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/srt-params.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/srt-params.md new file mode 100644 index 00000000..53ae46f6 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/srt-params.md @@ -0,0 +1,14 @@ +--- +title: SRT 参数 +sidebar_label: SRT 参数 +hide_title: false +hide_table_of_contents: false +--- + +# SRT Config + +迁移到了[SRT](./srt.md). + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/srt-params) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/srt-url.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/srt-url.md new file mode 100644 index 00000000..8899026c --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/srt-url.md @@ -0,0 +1,14 @@ +--- +title: SRT URL +sidebar_label: SRT URL +hide_title: false +hide_table_of_contents: false +--- + +# SRT URL Specification + +迁移到了[SRT](./srt.md). + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/srt-url) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/srt.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/srt.md new file mode 100644 index 00000000..15705282 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/srt.md @@ -0,0 +1,398 @@ +--- +title: SRT +sidebar_label: SRT +hide_title: false +hide_table_of_contents: false +--- + +# SRT + +[SRT](https://github.com/Haivision/srt),全称是Secure Reliable Transport,是Haivision推出的一个广播传输协议,主要是为了 +替代RTMP,实际上OBS、vMix、FFmpeg等直播推流编码器都已经支持了SRT,在实际中有较大比例的用户使用SRT推流。 + +Adobe公司没有一直更新RTMP协议,也没有提交给标准组织比如RFC,因此很多新功能都没有支持,比如HEVC或Opus。 +直到2023.03,终于[Enhanced RTMP](https://github.com/veovera/enhanced-rtmp)项目建立,开始支持了HEVC和AV1, +SRS和OBS已经支持了基于Enhanced RTMP的[HEVC](https://github.com/veovera/enhanced-rtmp/issues/4)编码。 + +由于SRT使用的封装是TS封装,因此对于新的Codec天然就支持。而SRT基于UDP协议,因此对于延迟和弱网传输,也比RTMP要好不少。 +一般RTMP延迟在1到3秒以上,而SRT的延迟在300到500毫秒,而且在弱网下表现也很稳定。在广播电视l领域,由于长距离跨国跨地区 +传输,或者户外广播时,网络不稳定,因此SRT比RTMP的优势会更明显。 + +SRT是SRS的核心协议,SRS早在2020年即支持了SRT协议,并且在2022年实现了SRT协程化,从而大幅提高了SRT和其他核心协议的一致性。 +比如回调和API的支持,SRT和RTMP保持了非常高的一致性。 + +研发的详细过程请参考[#1147](https://github.com/ossrs/srs/issues/1147)。 + +## Usage + +SRS内置SRT的支持,可以用[docker](./getting-started.md)或者[从源码编译](./getting-started-build.md): + +```bash +docker run --rm -it -p 1935:1935 -p 8080:8080 -p 10080:10080/udp \ + registry.cn-hangzhou.aliyuncs.com/ossrs/srs:5 \ + ./objs/srs -c conf/srt.conf +``` + +使用 [FFmpeg(点击下载)](https://ffmpeg.org/download.html) 或 [OBS(点击下载)](https://obsproject.com/download) 推流: + +```bash +ffmpeg -re -i ./doc/source.flv -c copy -pes_payload_size 0 -f mpegts \ + 'srt://127.0.0.1:10080?streamid=#!::r=live/livestream,m=publish' +``` + +打开下面的页面播放流(若SRS不在本机,请将localhost更换成服务器IP): + +* RTMP(VLC/ffplay): `rtmp://localhost/live/livestream` +* HLS by SRS player: [http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html) +* SRT(VLC/ffplay): `srt://127.0.0.1:10080?streamid=#!::r=live/livestream,m=request` + +SRS支持将SRT转换成其他协议,下面会详细描述。 + +## Config + +SRT相关的配置如下: + +```bash +srt_server { + # whether SRT server is enabled. + # Overwrite by env SRS_SRT_SERVER_ENABLED + # default: off + enabled on; + # The UDP listen port for SRT. + # Overwrite by env SRS_SRT_SERVER_LISTEN + listen 10080; + # For detail parameters, please read wiki: + # @see https://ossrs.net/lts/zh-cn/docs/v5/doc/srt-params + # @see https://ossrs.io/lts/en-us/docs/v5/doc/srt-params + # The maxbw is the max bandwidth of the sender side. + # -1: Means the biggest bandwidth is infinity. + # 0: Means the bandwidth is determined by SRTO_INPUTBW. + # >0: Means the bandwidth is the configuration value. + # Overwrite by env SRS_SRT_SERVER_MAXBW + # default: -1 + maxbw 1000000000; + # Maximum Segment Size. Used for buffer allocation and rate calculation using packet counter assuming fully + # filled packets. Each party can set its own MSS value independently. During a handshake the parties exchange + # MSS values, and the lowest is used. + # Overwrite by env SRS_SRT_SERVER_MSS + # default: 1500 + mss 1500; + # The timeout time of the SRT connection on the sender side in ms. When SRT connects to a peer costs time + # more than this config, it will be close. + # Overwrite by env SRS_SRT_SERVER_CONNECT_TIMEOUT + # default: 3000 + connect_timeout 4000; + # The timeout time of SRT connection on the receiver side in ms. When the SRT connection is idle + # more than this config, it will be close. + # Overwrite by env SRS_SRT_SERVER_PEER_IDLE_TIMEOUT + # default: 10000 + peer_idle_timeout 8000; + # Default app for vmix, see https://github.com/ossrs/srs/pull/1615 + # Overwrite by env SRS_SRT_SERVER_DEFAULT_APP + # default: live + default_app live; + # The peerlatency is set by the sender side and will notify the receiver side. + # Overwrite by env SRS_SRT_SERVER_PEERLATENCY + # default: 0 + peerlatency 0; + # The recvlatency means latency from sender to receiver. + # Overwrite by env SRS_SRT_SERVER_RECVLATENCY + # default: 120 + recvlatency 0; + # This latency configuration configures both recvlatency and peerlatency to the same value. + # Overwrite by env SRS_SRT_SERVER_LATENCY + # default: 120 + latency 0; + # The tsbpd mode means timestamp based packet delivery. + # SRT sender side will pack timestamp in each packet. If this config is true, + # the receiver will read the packet according to the timestamp in the head of the packet. + # Overwrite by env SRS_SRT_SERVER_TSBPDMODE + # default: on + tsbpdmode off; + # The tlpkdrop means too-late Packet Drop + # SRT sender side will pack timestamp in each packet, When the network is congested, + # the packet will drop if latency is bigger than the configuration in both sender side and receiver side. + # And on the sender side, it also will be dropped because latency is bigger than configuration. + # Overwrite by env SRS_SRT_SERVER_TLPKTDROP + # default: on + tlpktdrop off; + # The send buffer size of SRT. + # Overwrite by env SRS_SRT_SERVER_SENDBUF + # default: 8192 * (1500-28) + sendbuf 2000000; + # The recv buffer size of SRT. + # Overwrite by env SRS_SRT_SERVER_RECVBUF + # default: 8192 * (1500-28) + recvbuf 2000000; + # The passphrase of SRT. + # If passphrase is no empty, all the srt client must be using the correct passphrase to publish or play, + # or the srt connection will reject. The length of passphrase must be in range 10~79. + # @see https://github.com/Haivision/srt/blob/master/docs/API/API-socket-options.md#srto_passphrase. + # Overwrite by env SRS_SRT_SERVER_PASSPHRASE + # default: "" + passphrase xxxxxxxxxxxx; + # The pbkeylen of SRT. + # The pbkeylen determined the AES encrypt algorithm, this option only allow 4 values which is 0, 16, 24, 32 + # @see https://github.com/Haivision/srt/blob/master/docs/API/API-socket-options.md#srto_pbkeylen. + # Overwrite by env SRS_SRT_SERVER_PBKEYLEN + # default: 0 + pbkeylen 16; +} +vhost __defaultVhost__ { + srt { + # Whether enable SRT on this vhost. + # Overwrite by env SRS_VHOST_SRT_ENABLED for all vhosts. + # Default: off + enabled on; + # Whether covert SRT to RTMP stream. + # Overwrite by env SRS_VHOST_SRT_TO_RTMP for all vhosts. + # Default: on + srt_to_rtmp on; + } +} +``` + +> Note: 这里只是推流和拉流的配置,还有些其他的配置是在其他地方的,比如RTMP转[HTTP-FLV](./flv.md#config)或HTTP-TS等。 + +SRT所有的配置参数,可以参考[libsrt](https://github.com/Haivision/srt/blob/master/docs/API/API-socket-options.md#list-of-options)文档。 +下面列出SRS支持的和重要的参数: + +* `tsbpdmode`, 按时间戳投递包模式(Timestamp based packet delivery), 给每一个报文打上时间戳,应用层读取时,会按照报文时间戳的间隔读取 +* `latency`, 单位:ms(毫秒)。这个latency配置同时配置了recvlatency和peerlatency成同一个值。如果recvlatency配置,将使用recvlatency的配置;如果peerlatency配置,将使用peerlatency的配置。 +* `recvlatency`, 单位:ms(毫秒)。这是接收方缓存时间长度,其包括报文从发送方出发,通过网络,接收方接收,直到上送给上层媒体应用。也就是说这个缓存时间长度,其应该大于RTT,并且要为多次丢包重传做好准备。 + * 低延时网络:如果应用对延时要求低,可以考虑配置的参数低于250ms(常人对音视频低于250ms的延时不会被影响) + * 长距离,RTT比较大:因为传输距离长,RTT比较大,就不能配置太小的latency;或者是重要的直播,不要求低延时,但是要求无卡顿播放,无抖动;建议配置的latency >= 3*RTT, 因为其中包含丢包重传和ack/nack的周期。 +* `peerlatency`, 单位:ms(毫秒)。是srt发送方设置peerlatency,告知接收方的latency buffer的时间长度应该是多少;如果接收方也配置了recvlatency,接收方就取两者中最大值作为latency buffer时间长度。 + * 低延时网络: 如果应用对延时要求低,可以考虑配置的参数低于250ms(常人对音视频低于250ms的延时不会被影响) + * 长距离,RTT比较大: 因为传输距离长,RTT比较大,就不能配置太小的latency;或者是重要的直播,不要求低延时,但是要求无卡顿播放,无抖动;建议配置的latency >= 3*RTT, 因为其中包含丢包重传和ack/nack的周期。 +* `tlpkdrop`, 是否丢弃太晚到达的包(Too-late Packet Drop), 因为srt是针对于音视频的传输协议,接收方是基于报文时间戳或编码bitrate来上送报文给上层应用的。也就是说,如果报文在接收方latency timeout后到达,报文也会应为太晚到达而被丢弃。在直播模式下,tlpkdrop默认是true,因为直播对延时要求高。 +* `maxbw`, 单位: bytes/s, 最大的发送带宽。`-1`: 表示最大带宽为1Gbps;`0`: 由SRTO_INPUTBW的计算决定(不推荐在直播模式下设置为0);`>0`: 带宽为bytes/s。 +* `mss`, 单位: byte。单个发送报文最大的size。这个报文的大小指的是ip报文,其包含udp和srt协议报文的。 +* `connect_timeout`,单位:ms(毫秒),SRT建立连接超时时间。 +* `peer_idle_timeout`, 单位:ms(毫秒),SRT对端超时时间。 +* `sendbuf`, 单位:byte, SRT发送buffer大小; +* `recvbuf`, 单位:byte, SRT接收buffer大小; +* `payloadsize`, 单位:byte, 因为srt承载的媒体数据是mpegts封装,而每个mpegts的最小包是188bytes,所以payloadsize是188的倍数,默认是1316bytes(188x7) +* `passphrase`, SRT连接密码,默认值为空(不加密)。SRT连接密码,长度10-79之间,客户端必须输入正确密码才能建连成功,否则连接将会被拒绝。 +* `pbkeylen`, SRT密钥长度,默认值为0。推流加密key长度,仅可输入0/16/24/32,对应不同的密钥长度的AES加密。当设置了`passphrase`选项时才需要设置这个参数。 +* `srt_to_rtmp` 是否开启SRT转换为RTMP,转换RTMP后才能以RTMP、HTTP-FLV和HLS等协议播放。 + +## Low Latency Mode + +若你希望最低延迟,可以容忍偶然的丢包,则可以考虑这个配置。 + +> Note: 注意SRT针对丢包会有重传,只有网络非常糟糕时,非常迟到达或未到达的包,在开启了`tlpktdrop`时才会丢弃,导致花屏。 + +对于赛事、活动、电视制作等长距离推流,链路一般都是提前准备好,且独占稳定的。这类场景下,需要满足固定延迟,允许一定程度的丢包(极小概率) +一般会在推流开始前探测链路的RTT, 并作为依据进行配置SRT推拉流参数。 + +推荐配置如下,假设RTT是100ms: + +```bash +srt_server { + enabled on; + listen 10080; + connect_timeout 4000; + peerlatency 300; # RTT * 3 + recvlatency 300; # RTT * 3 + latency 300; # RTT * 3 + tlpktdrop on; + tsbpdmode on; +} +``` + +本节介绍如何降低SRT的延迟,与每个环节都有关。总结如下: + +* 注意客户端的Ping和CPU,这些容易被忽视,但会影响延迟。 +* 请使用LightHouse SRS作为服务器,因为它已经调整过,不会造成额外的延迟。 +* RTT的增加会影响延迟。通常,RTT低于60ms时,可以稳定在预期延迟。 +* RTT为100ms时,延迟约为300ms;RTT为150ms时,延迟增加到约430ms。 +* 丢包会影响画质。丢包率超过10%时,会出现画面闪烁和丢帧,但对延迟影响不大,尤其是音频。 +* 目前,使用vmix或芯象推送SRT并用ffplay播放,可实现最低约200ms的延迟。 +* 使用OBS推送SRT并用ffplay播放时,延迟约为350ms。 + +特别提示:根据目前的测试,SRT的延迟上限为300ms。虽然vmix可以设置为1ms延迟,但实际上并不起作用,实际延迟只会更糟,而不是更好。但是,如果网络维护得好,300ms的延迟是足够的。 + +超高清、超低延迟SRT直播推荐方案: + +* 推流:芯象(230ms)、vMix(200ms)、OBS(300ms)。 +* 播放:ffplay(200ms)、vMix(230ms)、芯象(400ms)。 + +| - | ffplay | vMix播放 | 芯象播放 | +| --- | ---- | --- | --- | +| vMix推送 | 200ms | 300ms | - | +| OBS推送 | 300ms | - | - | +| 芯象推送(http://www.sinsam.com/) | 230ms - | 400ms | + +延迟涉及每个环节,以下是每个环节的详细配置。目录如下: + +* [CPU](https://github.com/ossrs/srs/issues/3464#lagging-cpu) 客户端CPU会导致延迟。 +* [Ping](https://github.com/ossrs/srs/issues/3464#lagging-ping) 客户端网络RTT影响延迟。 +* [编码器](https://github.com/ossrs/srs/issues/3464#lagging-encoder) 配置编码器低延迟模式。 +* [服务器](https://github.com/ossrs/srs/issues/3464#lagging-server) 配置服务器低延迟。 +* [SRT](https://github.com/ossrs/srs/issues/3464#lagging-srt) SRT服务器特殊配置。 +* [播放器](https://github.com/ossrs/srs/issues/3464#lagging-player) 配置播放器低延迟。 +* [基准测试](https://github.com/ossrs/srs/issues/3464#lagging-benchmark) 准确测量延迟。 +* [码率](https://github.com/ossrs/srs/issues/3464#lagging-bitrate) 不同码率(0.5至6Mbps)对延迟的影响。 +* [网络抖动](https://github.com/ossrs/srs/issues/3464#lagging-jitter) 丢包和不同RTT对延迟的影响。 +* [报告](https://github.com/ossrs/srs/issues/3464#lagging-report) 测试报告。 + +## High Quality Mode + +若你希望最高质量,极小概率花屏也不能容忍,可以容忍延迟变大,则可以考虑这个配置。 + +在公网环境使用SRT,链路不稳定,RTT也会动态变化。对于低延迟直播场景,需要自适应延迟,而且一定不能丢包。 + +推荐配置如下: + +``` +srt_server { + enabled on; + listen 10080; + connect_timeout 4000; + peerlatency 0; + recvlatency 0; + latency 0; + tlpktdrop off; + tsbpdmode off; +} +``` + +> Note: 如果你使用了如上配置仍然花屏,请参考[FFmpeg patch](https://github.com/FFmpeg/FFmpeg/commit/9099046cc76c9e3bf02f62a237b4d444cdaf5b20) + +## Video codec + +当前支持H264和HEVC编码。 由于SRT协议传输媒体是MPEG-TS,TS对HEVC编码格式本来就是支持的,标准类型值为(streamtype)0x24, +所以SRT传输HEVC编码的视频格式是天然支持的,不需要做修改。 + +使用下面的命令,支持HEVC编码的推流: +```bash +ffmpeg -re -i source.mp4 -c:v libx265 -c:a copy -pes_payload_size 0 -f mpegts \ + 'srt://127.0.0.1:10080?streamid=#!::r=live/livestream,m=publish' +``` + +使用下面的命令,播放HEVC编码的播放: +```bash +ffplay 'srt://127.0.0.1:10080?streamid=#!::h=live/livestream,m=request' +``` + +## Audio codec + +当前支持编码格式: +* AAC,支持采样率44100, 22050, 11025, 5512. + +## FFmpeg push SRT stream + +当使用FFmpeg推AAC音频格式的SRT流时, 建议在命令行里加上`-pes_payload_size 0`这个参数。这个参数会阻止合并多个AAC音频帧在一个PES包里, +这样可以减少延迟以及由于音视频同步问题. + +FFmpeg命令行示例: + +```bash +ffmpeg -re -i source.mp4 -c copy -pes_payload_size 0 -f mpegts \ + 'srt://127.0.0.1:10080?streamid=#!::r=live/livestream,m=publish' +``` + +## SRT URL + +SRT的URL是反人类的设计,它使用的是YAML格式,而不是一般常见的URL定义。 + +先考虑SRS对于RTMP地址的定义,请参考 [RTMP URL](./rtmp-url-vhost.md) 的定义: + +* 常规RTMP格式(无vhost) + - `rtmp://hostip:port/app/stream` + - 例子: `rtmp://10.111.1.100:1935/live/livestream` + - 上面例子中app="live", stream="livestream" +* 复杂RTMP格式(有vhost) + - `rtmp://hostip:port/app/stream?vhost=xxx` + - 例子: `rtmp://10.111.1.100:1935/live/livestream?vhost=srs.com.cn` + - 上面例子中vhost="srs.com.cn", app="live", stream="livestream" + +无论是推流还是拉流,RTMP地址都是一个地址,RTMP是使用协议层的消息来确定的。`publish消息` 表示是对该url进行推流, +`play消息` 表示是对该url进行拉流。 + +SRT是四层传输协议,所以无法确定对某个srt url操作是推流还是拉流。 在SRT文档中有对推/拉流的推荐:[AccessControl.md](https://github.com/Haivision/srt/blob/master/docs/features/access-control.md) +关键方法是通过streamid参数来明确url的作用,streamid的格式符合YAML格式。 + +下面是一个SRT的URL,没有vhost的情况: +* 推流地址: `srt://127.0.0.1:10080?streamid=#!::r=live/livestream,m=publish` +* 拉流地址: `srt://127.0.0.1:10080?streamid=#!::r=live/livestream,m=request` +* 对应的RTMP拉流地址为:`rtmp://127.0.0.1/live/livestream` + +其中: +* `#!::`, 为开始,符合yaml格式标准。 +* `r`, 映射到rtmp地址中的`app/stream`。 +* `m`, `publish`表示推流, `request`表示拉流。 + +下面是SRT的URL,支持vhost的情况: +* 推流地址: `srt://127.0.0.1:10080?streamid=#!::h=srs.srt.com.cn,r=live/livestream,m=publish` +* 拉流地址: `srt://127.0.0.1:10080?streamid=#!::h=srs.srt.com.cn,r=live/livestream,m=request` +* 对应的RTMP地址为:`rtmp://127.0.0.1/live/livestream?vhost=srs.srt.com.cn` + +其中: +* `h`, 映射到rtmp地址中的vhost + +## SRT URL without streamid + +有些设备不支持streamid的输入,或者不支持streamid里面的一些特殊符号,比如`!`,`#`,`,`等字符。 +这种情况下,允许仅用`ip:port`进行推流,比如`srt://127.0.0.1:10080`。对于这种url,SRS会将 +streamid默认为`#!::r=live/livestream,m=publish`。 + +也就是说,下面两个地址等价: +* `srt://127.0.0.1:10080` +* `srt://127.0.0.1:10080?streamid=#!::r=live/livestream,m=publish` + +## Authentication + +关于SRT URL的定义,请参考[SRT URL Schema](#srt-url)。 + +这里有一个特别说明,如何包含认证信息,参考[SRS URL: Token](./rtmp-url-vhost.md#parameters-in-url)。 +如果您需要包含诸如密钥参数的认证信息,您可以在streamid中指定,例如: + +``` +streamid=#!::r=live/livestream,secret=xxx +``` + +这是一个具体的例子: + +``` +ffmpeg -re -i doc/source.flv -c copy -f mpegts \ + 'srt://127.0.0.1:10080?streamid=#!::r=live/livestream,secret=xxx,m=publish' +``` + +对应的RTMP的地址将如下所示: + +``` +rtmp://127.0.0.1:1935/live/livestream?secret=xxx +``` + +## SRT Encoder + +SRT编码器是基于SRT自适应比特率的编码器。它根据SRT协议中的RTT、maxBw和inflight等信息预测低延迟的出站带宽,动态调整编码比特率以便适配网络出口带宽。 + +GitHub地址:[runner365/srt_encoder](https://github.com/runner365/srt_encoder) + +基于BBR的基本拥塞控制算法,编码器根据一个周期(1~2秒)内的minRTT、maxBw和当前inflight预测编码比特率的状态机(保持、增加、减少)。 + +注意: +1) 这个例子只是一个基本的BBR算法示例,用户可以实现CongestionCtrlI类中的接口来改进BBR算法。 +2) SRT仍然是一个不断发展的协议,其拥塞控制和外部参数更新的准确性也在不断提高。 + +使用简单,编译后,您可以直接使用ffmpeg命令行。 + +## Coroutine Native SRT + +SRS如何实现SRT?基于协程的SRT架构,我们需要将其适配到ST,因为SRT有自己的IO调度,这样我们才能实现最佳的可维护性。 + +* 关于具体的代码提交,请参考[#3010](https://github.com/ossrs/srs/pull/3010)或[1af30dea](https://github.com/ossrs/srs/commit/1af30dea324d0f1729aabd22536ea62e03497d7d) + +> 注意:请注意,SRS 4.0中的SRT是非ST架构,它是通过启动一个单独的线程来实现的,可能没有原生ST协程架构的可维护性。 + +## Q&A + +1. SRS是否支持将SRT流转发到Nginx? + +> 是的,支持。您可以使用OBS/FFmpeg将SRT流推送到SRS,SRS将SRT流转换为RTMP协议。然后,您可以将RTMP转换为HLS、FLV、WebRTC,并将RTMP流转发到Nginx。 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/srt) + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/streamer.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/streamer.md new file mode 100644 index 00000000..7a61af63 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/streamer.md @@ -0,0 +1,231 @@ +--- +title: Stream Converter +sidebar_label: Caster +hide_title: false +hide_table_of_contents: false +--- + +# Stream Caster + +Stream Converter侦听特殊的TCP/UDP端口,接受客户端连接和媒体流,并转成RTMP流,推送给SRS。 + +简单来说,它将其他流转成RTMP流,工作流如下: + +```text +Client ---PUSH--> Stream Converter --RTMP--> SRS --RTMP/FLV/HLS/WebRTC--> Clients +``` + +> Note: 有些流可能不止一个流,甚至有不同的传输通道。 + +## Use Scenario + +常见的应用场景包括: + +* Push MPEG-TS over UDP, 通过UDP协议,将裸流MPEGTS推送到SRS,主要是一些编码器支持。 +* Push FLV by HTTP POST, 通过HTTP POST,将FLV流推送到SRS,主要是移动端支持。 +* Push GB28181 over TCP, 通过TCP协议,将摄像头的流推送到SRS,主要是监控摄像头支持。 + +> Note: FFmpeg支持推送MPEGTS和FLV流到SRS,可以用FFmpeg测试。 + +## Build + +SRS默认开启Stream Converter的支持,不需要特别的编译参数。但某些协议可能需要特别的编译参数,请参考下面具体协议的使用介绍。 + +## Protocols + +目前Stream Converter支持的协议包括: + +* MPEG-TS over UDP: MPEG-TS裸流,基于UDP协议。 +* FLV by HTTP POST: FLV流,基于HTTP协议。 +* GB28181-2016: SIP和MPEG-PS流,基于TCP协议。 + +## Config + +Stream Converter相关的配置如下: + +``` +# Push MPEGTS over UDP to SRS. +stream_caster { + # Whether stream converter is enabled. + # Default: off + enabled on; + # The type of stream converter, could be: + # mpegts_over_udp, push MPEG-TS over UDP and convert to RTMP. + caster mpegts_over_udp; + # The output rtmp url. + # For mpegts_over_udp converter, the typically output url: + # rtmp://127.0.0.1/live/livestream + output rtmp://127.0.0.1/live/livestream; + # The listen port for stream converter. + # For mpegts_over_udp converter, listen at udp port. for example, 8935. + listen 8935; +} + +# Push FLV by HTTP POST to SRS. +stream_caster { + # Whether stream converter is enabled. + # Default: off + enabled on; + # The type of stream converter, could be: + # flv, push FLV by HTTP POST and convert to RTMP. + caster flv; + # The output rtmp url. + # For flv converter, the typically output url: + # rtmp://127.0.0.1/[app]/[stream] + # For example, POST to url: + # http://127.0.0.1:8936/live/livestream.flv + # Where the [app] is "live" and [stream] is "livestream", output is: + # rtmp://127.0.0.1/live/livestream + output rtmp://127.0.0.1/[app]/[stream]; + # The listen port for stream converter. + # For flv converter, listen at tcp port. for example, 8936. + listen 8936; +} + +# For GB28181 server, see https://github.com/ossrs/srs/issues/3176 +# For SIP specification, see https://www.ietf.org/rfc/rfc3261.html +# For GB28181 2016 spec, see https://openstd.samr.gov.cn/bzgk/gb/newGbInfo?hcno=469659DC56B9B8187671FF08748CEC89 +stream_caster { + # Whether stream converter is enabled. + # Default: off + enabled off; + # The type of stream converter, could be: + # gb28181, Push GB28181 stream and convert to RTMP. + caster gb28181; + # The output rtmp url. + # For gb28181 converter, the typically output url: + # rtmp://127.0.0.1/live/[stream] + # The available variables: + # [stream] The video channel codec ID. + output rtmp://127.0.0.1/live/[stream]; + # The listen TCP/UDP port for stream converter. + # For gb28181 converter, listen at TCP/UDP port. for example, 9000. + # @remark We always enable bundle for media streams at this port. + listen 9000; + # SIP server for GB28181. Please note that this is only a demonstrated SIP server, please never use it in your + # online production environment. Instead please use [jsip](https://github.com/usnistgov/jsip) and there is a demo + # [srs-sip](https://github.com/ossrs/srs-sip) also base on it. + sip { + # Whether enable embedded SIP server. + # Default: on + enabled on; + # The SIP listen port, for both TCP and UDP protocol. + # Default: 5060 + listen 5060; + # The SIP or media transport timeout in seconds. + # Default: 60 + timeout 60; + # When media disconnect, the wait time in seconds to re-invite device to publish. During this wait time, device + # might send bye or unregister message(expire is 0), so that we will cancel the re-invite. + # Default: 5 + reinvite 5; + # The exposed candidate IPs, response in SDP connection line. It can be: + # * Retrieve server IP automatically, from all network interfaces. + # $CANDIDATE Read the IP from ENV variable, use * if not set. + # x.x.x.x A specified IP address or DNS name, use * if 0.0.0.0. + # Default: * + candidate *; + } +} +``` + +下面描述具体协议的使用。 + +## Push MPEG-TS over UDP + +你可以推送MPEGTS UDP流到SRS,转换成其他的协议。 + +首先,使用MPEGTS相关配置启动SRS: + +```bash +./objs/srs -c conf/push.mpegts.over.udp.conf +``` + +> Note: 关于详细的配置,请参考[Config](#config)中`mpegts_over_udp`的部分。 + +然后,使用编码器推流,比如用FFmpeg: + +```bash +ffmpeg -re -f flv -i doc/source.flv -c copy -f mpegts udp://127.0.0.1:8935 +``` + +现在,就可以播放流了: + +* [http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?stream=livestream.flv) +* [http://localhost:8080/live/livestream.m3u8](http://localhost:8080/players/srs_player.html?stream=livestream.m3u8) +* [http://localhost:1985/rtc/v1/whep/?app=live&stream=livestream](http://localhost:8080/players/whep.html?autostart=true) + +注意MPEGTS UDP是每个端口对应一个具体的RTMP流。 + +> Note: 关于开发的一些细节,请参考[#250](https://github.com/ossrs/srs/issues/250). + +## Push HTTP FLV to SRS + +你可以推送HTTP FLV流到SRS,对于一些移动端设备,使用HTTP推流会很简单。 + +首先,使用HTTP FLV相关配置启动SRS: + +```bash +./objs/srs -c conf/push.flv.conf +``` + +> Note: 关于详细的配置,请参考[Config](#config)中`flv`的部分。 + +然后,使用编码器推流,比如用FFmpeg: + +```bash +ffmpeg -re -f flv -i doc/source.flv -c copy \ + -f flv http://127.0.0.1:8936/live/livestream.flv +``` + +现在,就可以播放流了: + +* [http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?stream=livestream.flv) +* [http://localhost:8080/live/livestream.m3u8](http://localhost:8080/players/srs_player.html?stream=livestream.m3u8) +* [http://localhost:1985/rtc/v1/whep/?app=live&stream=livestream](http://localhost:8080/players/whep.html?autostart=true) + +> Note: 关于开发的一些细节,请参考[#2611](https://github.com/ossrs/srs/issues/2611). + +## Push GB28181 to SRS + +GB28181是国内安防摄像头基本都会支持的协议,属于国家标准,主要是摄像头主动连接到服务器的场景。 随着互联网的普及,安防摄像头有时候也需要接入互联网,比如慢直播,或者景区直播等。 + +> Note: 一般安防摄像头的典型场景,是通过RTSP从摄像头拉流,而不是摄像头主动推流,请参考[#2304](https://github.com/ossrs/srs/issues/2304)的描述。 + +再次强调,SRS支持GB协议,其实并不是做安防场景,而只是支持了摄像头上互联网这个比较新也比较小的场景。当然如果有开发能力,也是完全能基于SRS做安防的流媒体服务器。 + +首先,使用GB28181相关配置启动SRS: + +```bash +./objs/srs -c conf/gb28181.conf +``` + +> Note: 关于详细的配置,请参考[Config](#config)中`gb28181`的部分。 + +然后,配置摄像头推流: + +![](/img/doc-2022-10-08-001.png) + +![](/img/doc-2022-10-08-002.png) + +> Note: 音频请选择AAC编码,视频选择子码流,传输协议选择TCP,协议版本选择GB28181-2016。 + +> Note: 配置中`CANDIDATE`需要配置为摄像头能访问到的IP地址,详细请参考[Protocol: GB28181: Candidate](./gb28181.md#candidate)。 + +现在,就可以播放流了,请将设备ID换成你的设备: + +* [http://localhost:8080/live/34020000001320000001.flv](http://localhost:8080/players/srs_player.html?stream=34020000001320000001.flv) +* [http://localhost:8080/live/34020000001320000001.m3u8](http://localhost:8080/players/srs_player.html?stream=34020000001320000001.m3u8) +* [webrtc://localhost/live/34020000001320000001](http://localhost:8080/players/rtc_player.html?stream=34020000001320000001) + +> Note: 关于开发的一些细节,请参考[#3176](https://github.com/ossrs/srs/issues/3176). + +## Push RTSP to SRS + +这个功能已经被删除,详细原因参考[#2304](https://github.com/ossrs/srs/issues/2304#issuecomment-826009290)。 + +2015.1 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/streamer) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/time-jitter.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/time-jitter.md new file mode 100644 index 00000000..fc6530ae --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/time-jitter.md @@ -0,0 +1,103 @@ +--- +title: 时间戳矫正 +sidebar_label: 时间戳矫正 +hide_title: false +hide_table_of_contents: false +--- + +# TimeJitter + +描述了SRS的时间戳矫正机制。 + +## RTMP Monotonically Increase Timestamp + +RTMP要求流的时间戳是单增的,视频流的时间戳单增,音频流的时间戳单增。所谓单增就是指单调递增,所谓单调递增就是包的时间戳越来越大。所谓越来越大就是......数字越来越大。 + +单增有两种情况: +* 分别单增:指的是视频是单增的,音频是单增的,但是流(混合了音频和视频)是不单增的。 +* 流单增:指的不仅仅是分别单增,而且流里面的包永远是单增的。 + +RTMP协议没有说道要求什么级别的单增,但一般指流单增。 + +如果非单增会怎样?有些服务器会断开连接,librtmp会报错,flash客户端会播放不了之类。但是,实际上并没有那么恐怖(还是保持单增好点,毕竟RTMP协议里说到这个了),所以有些编码器出来的流不是单增也能播放,特别是用vlc播放之类。 + +## Timestamp Jitter + +如果流不是单增的怎么办?SRS采用非常简单的算法保证它是单增的。如果不是单增就把时间戳增量设为40(即fps为25)。这个机制就是SRS的时间戳矫正机制。 + +有几处地方用到了时间戳矫正: +* RTMP流分发:可以设置vhost的time_jitter来选择矫正机制。分发给客户端的RTMP流的时间戳矫正机制。 +* DVR录制:可以设置vhost的dvr的time_jitter来配置矫正机制。录制为flv文件的时间戳处理机制。 +* HLS:打开时间戳矫正机制。 +* Forward:打开时间戳矫正机制。 +* HTTP Audio Stream Fast Cache: 和RTMP一样,即在vhost中配置,参考`fast_cache`. + +如果你的编码器只能做到分别单增(对音频和视频分别编码的情况很常见),那么可以关闭时间戳矫正。 + +## Config + +在vhost中配置时间戳矫正: + +```bash +vhost jitter.srs.com { + # for play client, both RTMP and other stream clients, + # for instance, the HTTP FLV stream clients. + play { + # about the stream monotonically increasing: + # 1. video timestamp is monotonically increasing, + # 2. audio timestamp is monotonically increasing, + # 3. video and audio timestamp is interleaved/mixed monotonically increasing. + # it's specified by RTMP specification, @see 3. Byte Order, Alignment, and Time Format + # however, some encoder cannot provides this feature, please set this to off to ignore time jitter. + # the time jitter algorithm: + # 1. full, to ensure stream start at zero, and ensure stream monotonically increasing. + # 2. zero, only ensure sttream start at zero, ignore timestamp jitter. + # 3. off, disable the time jitter algorithm, like atc. + # default: full + time_jitter full; + # whether use the interleaved/mixed algorithm to correct the timestamp. + # if on, always ensure the timestamp of audio+video is interleaved/mixed monotonically increase. + # if off, use time_jitter to correct the timestamp if required. + # default: off + mix_correct off; + } +} +``` + +其中,vhost的`mix_correct`配置,能将分别单增的音频和视频流,变成混合单增的流。 + +在DVR中配置时间戳矫正: + +``` +vhost dvr.srs.com { + # dvr RTMP stream to file, + # start to record to file when encoder publish, + # reap flv according by specified dvr_plan. + # http callbacks: + # @see http callback on_dvr_hss_reap_flv on http_hooks section. + dvr { + # about the stream monotonically increasing: + # 1. video timestamp is monotonically increasing, + # 2. audio timestamp is monotonically increasing, + # 3. video and audio timestamp is interleaved monotonically increasing. + # it's specified by RTMP specification, @see 3. Byte Order, Alignment, and Time Format + # however, some encoder cannot provides this feature, please set this to off to ignore time jitter. + # the time jitter algorithm: + # 1. full, to ensure stream start at zero, and ensure stream monotonically increasing. + # 2. zero, only ensure sttream start at zero, ignore timestamp jitter. + # 3. off, disable the time jitter algorithm, like atc. + # default: full + time_jitter full; + } +} +``` + +## ATC + +[RTMP ATC](./rtmp-atc.md)开启时,RTMP流分发的时间戳矫正机制变为关闭,不对时间戳做任何处理。 + +Winlin 2015.4 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/time-jitter) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/webrtc.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/webrtc.md new file mode 100644 index 00000000..e731b375 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/webrtc.md @@ -0,0 +1,567 @@ +--- +title: WebRTC +sidebar_label: WebRTC +hide_title: false +hide_table_of_contents: false +--- + +# WebRTC + +WebRTC是Google开源的在线实时通信的方案,简单来讲就是互联网音视频会议,由于是RFC标准协议,并且浏览器支持, +因此也不断的在拓展边界,应用在低延迟的音视频场景,比如在线会议、直播连麦、低延迟直播、远程机器人控制、远程桌面、 +云游戏、智能门铃、直播的网页推流等。 + +WebRTC实际上是两个Web浏览器之间直接通信的标准,主要包含了信令(Signaling)和媒体(Media)两个部分的协议。 +信令解决两个设备之间的能力的协商的问题,比如支持的编解码能力。媒体解决两个设备之间加密和低延迟媒体包传输的能力。 +除此之外,WebRTC本身还实现了语言处理技术比如3A,网络拥塞控制比如NACK、FEC和GCC,音视频编解码,平滑和低延迟播放技术。 + +```bash ++----------------+ +----------------+ ++ Browser +----<--Signaling----->--+ Browser + ++ (like Chrome) +----<----Media----->----+ (like Chrome) + ++----------------+ +----------------+ +``` + +> Note: WebRTC已经是RFC正式标准,因此各种浏览器都已经支持,而开源的实现也很多,因此不限于浏览器,移动端的浏览器和 +> Native库也有很多,因此为了沟通的简单起见,本文一般以浏览器指代所有支持WebRTC协议的客户端或设备。 + +实际上,在互联网上,两个浏览器几乎无法直接通信,特别是不在一个局域网,而且是在远距离跨城市甚至跨国家时,两个浏览器之间 +传输数据会经过非常多的网络路由器和防火墙,因此传输质量无法保障。因此,实际应用是需要经过服务器中转,而WebRTC服务器有 +几种类型: + +* Signaling Server: 信令服务,两个浏览器之间交换SDP的服务。如果是多人会议,则需要提供房间服务,本质上都是为各个浏览器交换SDP。而在流媒体领域,为了可以使用WebRTC推流和播放,像推送和播放RTMP/SRT/HLS流一样,WHIP/WHEP协议被设计出来了。 +* TURN Server: 转发服务,帮助两个浏览器之间转发媒体数据的服务。这是一种透明转发服务,并不会实现数据缓存,因此当多人会议时,浏览器之间需要传输`N*N + N*(N-2)`份数据。一般只应用在非常少的通信场景中,比如一对一。 +* SFU Server: 选择性转发服务,服务器上有缓存数据,因此浏览器只需要上传一份数据,服务器会复制给其他参会者。SRS就是SFU,关于SFU的作用可以参考[这里](https://stackoverflow.com/a/75491178/17679565)。目前主要的WebRTC服务器都是SFU服务器,会有`N*N`份流传输,比TURN少`N*(N-2)`份上行数据传输,能解决大部分的传输问题。 +* MCU Server: 多点控制服务,服务器将会议中的流合并成一路,这样浏览器只需要传输`N*2`份数据,上传一路下载一路数据。但由于需要编解码,服务器支持的流的数量比SFU要少一个量级,只有在某些特定场景才会采用,具体参考[#3625](https://github.com/ossrs/srs/discussions/3625) + +我们重点介绍SFU的工作流,因为SFU是在WebTC服务器中使用最多的,它本质上就是一个浏览器: + +```bash ++----------------+ +---------+ ++ Browser +----<--Signaling----->--+ SFU + ++ (like Chrome) +----<----Media----->----+ Server + ++----------------+ +---------+ +``` + +> Note: SFU一般都会有Signaling的能力,其实可以把RTMP地址也可以看成是一种非常简化的信令协议,只是WebRTC的信令需要协商 +> 媒体和传输能力,所以比较复杂。在复杂的WebRTC系统中,可能有独立的Signaling和Room集群,但是SFU同样也会有简化的Signaling能力, +> 只是可能是用于和其他服务通信。 + +SRS是一个媒体服务器,提供了Signaling和SFU Server的能力。和其他SFU比如Janus不同的是,SRS是基于Stream的,尽管房间中 +可以有多个参与者,本质上都是有人在推流,其他人在订阅这个流。这样可以避免将房间中的所有流,都耦合到一个SFU传输,可以分散到 +多个SFU传输,这样可以支持更多人的会议。 + +SRS支持的Signaling就是WHIP和WHEP,具体请参考[HTTP API](#http-api)部分。和直播很不一样的是,由于Signaling和Media分离, +因此需要设置Candidate,详细参考[Candidate](#config-candidate)。Media默认使用UDP传输,若UDP不可用也可以用TCP参考 +[TCP](#webrtc-over-tcp)。若遇到不可用的情况,很有可能是Candidate设置不对,也有可能是防火墙或端口不通,请参考 +[Connectivity](#connection-failures)使用工具检查。SRS还支持了不同协议的转换,比如推流RTMP后用WebRTC观看参考 +[RTMP to WebRTC](#rtmp-to-rtc),或者用WebRTC推流后用HLS观看参考[RTC to RTMP](#rtc-to-rtmp)。 + +SRS是在2020年支持的WebRTC协议,研发的详细过程请参考[#307](https://github.com/ossrs/srs/issues/307)。 + +## Config + +RTC的配置很多,详细配置参考`full.conf`,如下: + +```bash +rtc_server { + # Whether enable WebRTC server. + # Overwrite by env SRS_RTC_SERVER_ENABLED + # default: off + enabled on; + # The udp listen port, we will reuse it for connections. + # Overwrite by env SRS_RTC_SERVER_LISTEN + # default: 8000 + listen 8000; + # For WebRTC over TCP directly, not TURN, see https://github.com/ossrs/srs/issues/2852 + # Some network does not support UDP, or not very well, so we use TCP like HTTP/80 port for firewall traversing. + tcp { + # Whether enable WebRTC over TCP. + # Overwrite by env SRS_RTC_SERVER_TCP_ENABLED + # Default: off + enabled off; + # The TCP listen port for WebRTC. Highly recommend is some normally used ports, such as TCP/80, TCP/443, + # TCP/8000, TCP/8080 etc. However SRS default to TCP/8000 corresponding to UDP/8000. + # Overwrite by env SRS_RTC_SERVER_TCP_LISTEN + # Default: 8000 + listen 8000; + } + # The protocol for candidate to use, it can be: + # udp Generate UDP candidates. Note that UDP server is always enabled for WebRTC. + # tcp Generate TCP candidates. Fail if rtc_server.tcp(WebRTC over TCP) is disabled. + # all Generate UDP+TCP candidates. Ignore if rtc_server.tcp(WebRTC over TCP) is disabled. + # Note that if both are connected, we will use the first connected(DTLS done) one. + # Overwrite by env SRS_RTC_SERVER_PROTOCOL + # Default: udp + protocol udp; + # The exposed candidate IPs, response in SDP candidate line. It can be: + # * Retrieve server IP automatically, from all network interfaces. + # $CANDIDATE Read the IP from ENV variable, use * if not set. + # x.x.x.x A specified IP address or DNS name, use * if 0.0.0.0. + # @remark For Firefox, the candidate MUST be IP, MUST NOT be DNS name, see https://bugzilla.mozilla.org/show_bug.cgi?id=1239006 + # @see https://ossrs.net/lts/zh-cn/docs/v4/doc/webrtc#config-candidate + # Overwrite by env SRS_RTC_SERVER_CANDIDATE + # default: * + candidate *; +} + +vhost rtc.vhost.srs.com { + rtc { + # Whether enable WebRTC server. + # Overwrite by env SRS_VHOST_RTC_ENABLED for all vhosts. + # default: off + enabled on; + # Whether support NACK. + # default: on + nack on; + # Whether support TWCC. + # default: on + twcc on; + # The timeout in seconds for session timeout. + # Client will send ping(STUN binding request) to server, we use it as heartbeat. + # default: 30 + stun_timeout 30; + # The role of dtls when peer is actpass: passive or active + # default: passive + dtls_role passive; + # Whether enable transmuxing RTMP to RTC. + # If enabled, transcode aac to opus. + # Overwrite by env SRS_VHOST_RTC_RTMP_TO_RTC for all vhosts. + # default: off + rtmp_to_rtc off; + # Whether enable transmuxing RTC to RTMP. + # Overwrite by env SRS_VHOST_RTC_RTC_TO_RTMP for all vhosts. + # Default: off + rtc_to_rtmp off; + } +} +``` + +第一部分,`rtc_server`是全局的RTC服务器的配置,部分关键配置包括: +* `enabled`:是否开启RTC服务器,默认是off。 +* `listen`:侦听的RTC端口,注意是UDP协议。 +* `candidate`:服务器提供服务的IP地址,由于RTC的特殊性,必须配置这个地址。详细参考[Config: Candidate](./webrtc.md#config-candidate) +* `tcp.listen`: 使用TCP传输WebRTC媒体数据,侦听的TCP端口。详细参考[WebRTC over TCP](./webrtc.md#webrtc-over-tcp) + +第二部分,每个vhost中的RTC配置,部分关键配置包括: +* `rtc.enabled`:是否开启RTC能力,默认是off。 +* `rtc.rtmp_to_rtc`:是否开启RTMP转RTC。 +* `rtc.rtc_to_rtmp`:是否开启RTC转RTMP。 +* `rtc.stun_timeout`:会话超时时间,单位秒。 +* `rtc.nack`:是否开启NACK的支持,即丢包重传,默认on。 +* `rtc.twcc`:是否开启TWCC的支持,即拥塞控制的反馈机制,默认on。 +* `rtc.dtls_role`:DTLS角色,active就是DTLS Client(主动发起),passive是DTLS Server(被动接受)。 + +## Config Candidate + +由于`candidate`特别、特别、特别的重要,大概有1/3的朋友的问题都是这个配置不对。只要`candidate`配置不对,一定会出问题,没有其他可能,是一定会出问题。 + +修改`candidate`的最简单方法是在URL中指定`eip`。例如,如果您的服务器是`192.168.3.10`,请使用此URL: + +* [http://localhost:1985/rtc/v1/whip/?app=live&stream=livestream&eip=192.168.3.10](http://localhost:8080/players/whip.html?eip=192.168.3.10) + +此外,修改默认UDP端口`8000`的最简单、最直接方法(尤其是在负载均衡器或代理后面时)是使用`eip`。例如,如果您使用UDP`18000`作为端口,请考虑使用此URL: + +* [http://localhost:1985/rtc/v1/whip/?app=live&stream=livestream&eip=192.168.3.10:18000](http://localhost:8080/players/whip.html?eip=192.168.3.10:18000) + +其实,`candidate`就是服务器的`候选地址`,客户端可以连接的地址`ip:port`,在SDP交换中,就有个`candidate`的信息,比如服务器回的answer可能是这样: + +```bash +type: answer, sdp: v=0 +a=candidate:0 1 udp 2130706431 192.168.3.6 8000 typ host generation 0 +``` + +上面SDP中的`192.168.3.6 8000`,就是`candidate listen`这两个配置,即服务器的IP和端口。 既然是服务器的IP,那么目前有几种方式可以配置: +* 直接配置成固定的IP,比如:`candidate 192.168.3.6;` +* 用命令`ifconfig`获取本机的内网IP,通过环境变量传递给SRS,比如:`candidate $CANDIDATE;` +* 自动获取,先读取环境变量,然后获取本机网卡的IP,比如:`candidate *;`,下面会有详细说明。 +* 在url中通过`?eip=x`指定,比如:`webrtc://192.168.3.6/live/livestream?eip=192.168.3.6` +* 若API和SRS是同一个服务器(默认就是),可以用API的hostname作为CANDIDATE,这种情况下面单独说明。 + +此外,自动获取本机网卡IP的情况,相关配置如下: +* `candidate *;`或`candidate 0.0.0.0;`,支持任意IP,就意味着让服务器自己选择,先选公网IP,然后选内网IP。 +* `use_auto_detect_network_ip on;` 若关闭这个功能,则不会自动选择IP。 +* `ip_family ipv4;` 自动选择IP时,选择IPv4还是IPv6的地址。 + +由于WebRTC推拉流之前,必须访问HTTP API交换SDP,因此在HTTP请求中的hostname一般就是SRS的公网域名或IP。相关配置如下: +* `api_as_candidates on;` 是否开启这个功能。若API是单独的服务器,可以关闭这个功能。 +* `resolve_api_domain on;` 若API是域名,是否将域名解析为IP地址。注意Firefox不支持域名,所以一般是推荐打开的。 +* `keep_api_domain on;` 是否保留API的域名,支持域名解析的客户端可以自己解析IP地址,避免服务器实现解析。 + +> Note: 注意,如果以上途径无法获取CANDIDATE,还是会自动选择一个网卡的IP,避免失败(无CANDIDATE一定失败)。 + +简单来说,如果在SRS运行的服务器上,运行`ifconfig`获取的IP地址,是客户端访问不了的地址, 就必须通过配置`candidate`,指定一个客户端能访问的地址。 + +通过`ifconfig`获取本机IP: + +```bash +# For macOS +CANDIDATE=$(ifconfig en0 inet| grep 'inet '|awk '{print $2}') + +# For CentOS +CANDIDATE=$(ifconfig eth0|grep 'inet '|awk '{print $2}') + +# Directly set ip. +CANDIDATE="192.168.3.10" +``` + +设置环境变量,然后启动SRS: + +```bash +env CANDIDATE="192.168.3.10" \ + ./objs/srs -c conf/rtc.conf +``` + +用Docker方式运行SRS,设置环境变量的方法: + +```bash +export CANDIDATE="192.168.3.10" +docker run --rm --env CANDIDATE=$CANDIDATE \ + -p 1935:1935 -p 8080:8080 -p 1985:1985 -p 8000:8000/udp \ + registry.cn-hangzhou.aliyuncs.com/ossrs/srs:5 \ + objs/srs -c conf/rtc.conf +``` + +> Note:Docker的详细用法参考[srs-docker](https://github.com/ossrs/dev-docker/tree/v4#usage), +> 镜像地址和可用的版本参考[这里](https://hub.docker.com/r/ossrs/srs/tags)或[这里](https://cr.console.aliyun.com/repository/cn-hangzhou/ossrs/srs/images)。 + +## Stream URL + +在SRS中,直播和WebRTC的基本概念都是流(Stream),因此,流的URL定义有很高的概念一致性。参考下面SRS的几种不同协议的流地址, +安装完SRS后可以直接打开: + +* 使用RTMP推流或播放: `rtmp://localhost/live/livestream` +* 使用HTTP-FLV播放流: [http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html) +* 使用HLS播放流: [http://localhost:8080/live/livestream.m3u8](http://localhost:8080/players/srs_player.html?stream=livestream.m3u8) +* WebRTC推流: [http://localhost:1985/rtc/v1/whip/?app=live&stream=livestream](http://localhost:8080/players/whip.html) +* WebRTC播放: [http://localhost:1985/rtc/v1/whep/?app=live&stream=livestream](http://localhost:8080/players/whep.html) + +> Remark: 由于Flash已经被禁用,RTMP流无法在Chrome播放,请使用VLC播放。 + +早期还没有WHIP和WHEP时,SRS支持过另外一种格式,只是HTTP API的格式不同,做的事情还是交换SDP。现在已经不推荐使用了: + +* Publish: [webrtc://localhost/live/livestream](http://localhost:8080/players/rtc_publisher.html) +* Play: [webrtc://localhost/live/livestream](http://localhost:8080/players/rtc_player.html) + +> Note: 这里没有给出SRT的地址,因为SRT的地址设计并不是常见的URL格式。 + +## WebRTC over TCP + +在很多网络条件下,WebRTC不适合使用UDP传输,因此支持TCP传输是极其重要的能力;而且SRS支持的是直接TCP传输的方式,避免使用TURN中转带来的额外网络层问题;这对于LoadBalancer也是非常友好的,一般支持TCP会更友好。 + +* HTTP API、HTTP Stream、WebRTC over TCP,可以全部复用一个TCP端口,比如HTTPS(443)。 +* 支持直接UDP或TCP传输,不依赖TURN协议,没有额外的网元,没有额外部署和资源消耗。 +* 可部署在LoadBalancer后面(已实现),可配合[Proxy(未实现)](https://github.com/ossrs/srs/issues/3138)或者[Cluster(未实现)](https://github.com/ossrs/srs/issues/2091)实现负载均衡和扩容。 + +> Note: 注意需要升级到`v5.0.60+`,若使用Docker也请先确认SRS的版本。 + +启动SRS,指定使用TCP传输WebRTC媒体,默认使用的是TCP(8000)端口: + +```bash +docker run --rm -it -p 8080:8080 -p 1985:1985 -p 8000:8000 \ + -e CANDIDATE="192.168.3.82" \ + -e SRS_RTC_SERVER_TCP_ENABLED=on \ + -e SRS_RTC_SERVER_PROTOCOL=tcp \ + -e SRS_RTC_SERVER_TCP_LISTEN=8000 \ + registry.cn-hangzhou.aliyuncs.com/ossrs/srs:v5 +``` + +或者使用 [FFmpeg(点击下载)](https://ffmpeg.org/download.html) 或 [OBS(点击下载)](https://obsproject.com/download) 推流: + +```bash +ffmpeg -re -i ./doc/source.flv -c copy -f flv rtmp://localhost/live/livestream +``` + +* 播放(WebRTC over TCP): [http://localhost:1985/rtc/v1/whep/?app=live&stream=livestream](http://localhost:8080/players/whep.html?autostart=true) +* 播放(HTTP FLV): [http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?autostart=true) +* 播放(HLS): [http://localhost:8080/live/livestream.m3u8](http://localhost:8080/players/srs_player.html?stream=livestream.m3u8&autostart=true) + +> Note: 我们使用环境变量开启配置,直接使用配置文件也可以的。 + +> Note: 我们使用独立的TCP端口,HTTP API(1985),HTTP Stream(8080),WebRTC over TCP(8000),也可以选择全部复用HTTP Stream端口。 + +## HTTP API + +SRS支持WHIP和WHEP协议。安装好SRS后,可以直接点击下面的地址测试: + +* WebRTC推流: [http://localhost:1985/rtc/v1/whip/?app=live&stream=livestream](http://localhost:8080/players/whip.html) +* WebRTC播放: [http://localhost:1985/rtc/v1/whep/?app=live&stream=livestream](http://localhost:8080/players/whep.html) + +关于协议的具体实现细节,请参考[WHIP](./http-api.md#webrtc-publish)和[WHEP](./http-api.md#webrtc-play)。下面是交互图: + +[![](/img/doc-whip-whep-workflow.png)](https://www.figma.com/file/fA75Nl6Fr6v8hsrJba5Xrn/How-Does-WHIP%2FWHEP-Work%3F?type=whiteboard&node-id=0-1) + +如果是在Mac或Linux上安装SRS,可以通过localhost测试本机的SRS服务。但是若在Windows,或者远程Linux服务器,或者需要在其他 +设备上测试,则必须使用HTTPS WHIP推流,而WHEP则依然可以HTTP。可以开启SRS的HTTPS参考[HTTPS API](./http-api.md#https-api), +也可以使用Web服务器代理比如Nginx参考[HTTPS Proxy](./http-api.md#http-and-https-proxy)。 + +若需要测试是否HTTP API正常工作,可以使用`curl`工具,具体请参考[Connectivity Check](#connection-failures)。 + +## Connection Failures + +一些开发者来到 SRS 社区寻求帮助,因为他们在使用 OBS WHIP 连接到在线 WHIP 服务器时遇到了错误。这是因为在线服务器必须使用 HTTPS, +而且 UDP 端口可能更容易获得。此外,由于隐私或网络问题,很难调试或登录到在线服务器。 + +因此,我们找到了一些方法来解决 OBS WHIP 中的连接失败问题,通常是由于 HTTPS API 设置或 UDP 端口不可用问题导致的。 + +使用 curl 测试 WHIP HTTP 或 HTTPS API: + +```bash +curl "http://localhost:1985/rtc/v1/whip/?ice-ufrag=6pk11386&ice-pwd=l91z529147ri9163933p51c4&app=live&stream=livestream-$(date +%s)" \ + -H 'Origin: http://localhost' -H 'Referer: http://localhost' \ + -H 'Accept: */*' -H 'Content-type: application/sdp' \ + -H 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)' \ + --data-raw $'v=0\r\na=group:BUNDLE 0 1\r\nm=audio 9 UDP/TLS/RTP/SAVPF 111\r\nc=IN IP4 0.0.0.0\r\na=rtcp:9 IN IP4 0.0.0.0\r\na=ice-ufrag:J8X7\r\na=ice-pwd:Dpq7/fW/osYcPeLsCW2Ek1JH\r\na=setup:actpass\r\na=mid:0\r\na=sendonly\r\na=msid:- audio\r\na=rtcp-mux\r\na=rtpmap:111 opus/48000/2\r\na=ssrc:3184534672 cname:stream\r\nm=video 9 UDP/TLS/RTP/SAVPF 106\r\nc=IN IP4 0.0.0.0\r\na=rtcp:9 IN IP4 0.0.0.0\r\na=ice-ufrag:J8X7\r\na=ice-pwd:Dpq7/fW/osYcPeLsCW2Ek1JH\r\na=setup:actpass\r\na=mid:1\r\na=sendonly\r\na=msid:- video\r\na=rtcp-mux\r\na=rtpmap:106 H264/90000\r\na=ssrc:512761356 cname:stream' \ + -v -k +``` + +> Note: 您可以将 `http://localhost` 替换为 `https://yourdomain.com` 以测试 HTTPS API。 + +> Note: 对于Oryx,您应该指定secret,所以请将`/rtc/v1/whip?ice-ufrag=`更改为`/rtc/v1/whip?secret=xxx&ice-ufrag=`之类的。 + +> Note: 你也可以使用`eip=ip`或者`eip=ip:port`,强制SRS改写candidate的配置。详细请参考 [CANDIDATE](#config-candidate) 的说明。 + +答案包含候选项,即 UDP 服务器 IP,例如 `127.0.0.1`: + +``` +a=candidate:0 1 udp 2130706431 127.0.0.1 8000 typ host generation 0 +``` + +使用 `nc` 向 SRS WHIP 服务器发送 UDP 数据包: + +```bash +echo -en "\x00\x01\x00\x50\x21\x12\xa4\x42\x74\x79\x6d\x7a\x41\x51\x2b\x2f\x4a\x4b\x77\x52\x00\x06\x00\x0d\x36\x70\x6b\x31\x31\x33\x38\x36\x3a\x4a\x38\x58\x37\x00\x00\x00\xc0\x57\x00\x04\x00\x01\x00\x0a\x80\x2a\x00\x08\xda\xad\x1d\xce\xe8\x95\x5a\x83\x00\x24\x00\x04\x6e\x7f\x1e\xff\x00\x08\x00\x14\x56\x8f\x1e\x1e\x4f\x5f\x17\xf9\x2e\xa1\xec\xbd\x51\xd9\xa2\x27\xe4\xfd\xda\xb1\x80\x28\x00\x04\x84\xd3\x5a\x79" \ + |nc -w 3 -u 127.0.0.1 8000 |od -Ax -c -t x1 |grep '000' && \ + echo "Success" || echo "Failed" +``` + +> Note: 您还可以使用 `nc` 或 [server.go](https://github.com/ossrs/srs/pull/3837) 作为测试的 UDP 服务器。 + +如果使用 SRS 作为 WHIP 服务器,应该响应: + +``` +0000000 001 001 \0 @ ! 022 244 B t y m z A Q + / +0000010 J K w R \0 006 \0 \r 6 p k 1 1 3 8 6 +0000020 : J 8 X 7 \0 \0 \0 \0 \0 \b \0 001 376 ` +0000030 ầ ** ** 027 \0 \b \0 024 206 263 + ʼn ** 025 G 215 +0000040 I 335 P ^ " 7 } N ? 017 037 224 200 ( \0 004 +0000050 303 < 250 272 +0000054 +Success +``` + +> Note: 应为 SRS 5.0.191+,请参阅 [#3837](https://github.com/ossrs/srs/pull/3837),您还可以使用 +> [server.go](https://github.com/ossrs/srs/issues/2843) 作为测试的 UDP 服务器。 + +## RTMP to RTC + +WebRTC可以作为直播的一个播放器,播放直播流,延迟比RTMP还要低,更能抗网络抖动。 + +本机启动SRS(参考[usage](https://github.com/ossrs/srs/tree/4.0release#usage)),例如: + +```bash +export CANDIDATE="192.168.1.10" +docker run --rm --env CANDIDATE=$CANDIDATE \ + -p 1935:1935 -p 8080:8080 -p 1985:1985 -p 8000:8000/udp \ + registry.cn-hangzhou.aliyuncs.com/ossrs/srs:5 \ + objs/srs -c conf/rtmp2rtc.conf +``` + +> Note: 请将CANDIDATE设置为服务器的外网地址,详细请阅读[WebRTC: CANDIDATE](./webrtc.md#config-candidate)。 + +> Remark: SRS 4.0.14+支持RTMP推流,WebRTC播放。 + +相关的配置说明: + +* `rtc.rtmp_to_rtc`:是否将RTMP转RTC。禁用时,推RTMP流无法使用WebRTC播放。开启时,音频转码为opus(一路流消耗2%左右CPU)。 +* `rtc.keep_bframe`:是否保留B帧,RTMP流中一般会有B帧,而RTC没有,默认丢弃B帧。 +* `min_latency`:如果开启了RTC,这个配置的默认值也是on,而RTMP这个的默认值是off。 +* `play.mw_latency`:如果开启了RTC,这个配置的默认值是0。 +* `play.mw_msgs`:如果开启RTC,`min_latency`开启默认为0,否则默认为1,比直播的默认值要小。 + +使用RTMP推流到本机: + +```bash +docker run --rm -it registry.cn-hangzhou.aliyuncs.com/ossrs/srs:encoder ffmpeg -stream_loop -1 -re -i doc/source.flv \ + -c copy -f flv rtmp://host.docker.internal/live/livestream +``` + +可播放的流地址: + +* WebRTC播放:[http://localhost:1985/rtc/v1/whep/?app=live&stream=livestream](http://localhost:8080/players/whep.html?autostart=true) +* HTTP-FLV播放:[http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.flv&port=8080&schema=http) + +> Remark: 默认静音(H5自动播放要求的),可以点右下角小喇叭开启声音。 + +## RTC to RTC + +WebRTC本身是可以推流和拉流的,全链路延迟都很低。 + +本机启动SRS(参考[usage](https://github.com/ossrs/srs/tree/4.0release#usage)),例如: + +```bash +export CANDIDATE="192.168.1.10" +docker run --rm --env CANDIDATE=$CANDIDATE \ + -p 1935:1935 -p 8080:8080 -p 1985:1985 -p 8000:8000/udp \ + registry.cn-hangzhou.aliyuncs.com/ossrs/srs:5 \ + objs/srs -c conf/rtc.conf +``` + +> Note: 请将CANDIDATE设置为服务器的外网地址,详细请阅读[WebRTC: CANDIDATE](./webrtc.md#config-candidate)。 + +> Remark: SRS 4.0.76+支持WebRTC推流,WebRTC播放。 + +演示,WebRTC推流和播放,链接: + +* WebRTC推流: [http://localhost:1985/rtc/v1/whip/?app=live&stream=livestream](http://localhost:8080/players/whip.html) +* WebRTC播放: [http://localhost:1985/rtc/v1/whep/?app=live&stream=livestream](http://localhost:8080/players/whep.html) + +> Remark: 推流时,必须是HTTPS页面,当然本机localhost没这个限制。 + +## RTC to RTMP + +WebRTC推流,可以转成RTMP流播放,SRS只会对音频转码(Opus转AAC),因此要求视频是H.264编码。 + +本机启动SRS(参考[usage](https://github.com/ossrs/srs/tree/4.0release#usage)),例如: + +```bash +export CANDIDATE="192.168.1.10" +docker run --rm --env CANDIDATE=$CANDIDATE \ + -p 1935:1935 -p 8080:8080 -p 1985:1985 -p 8000:8000/udp \ + registry.cn-hangzhou.aliyuncs.com/ossrs/srs:5 \ + objs/srs -c conf/rtc2rtmp.conf +``` + +> Note: 请将CANDIDATE设置为服务器的外网地址,详细请阅读[WebRTC: CANDIDATE](./webrtc.md#config-candidate)。 + +> Remark: SRS 4.0.95+支持WebRTC推流,RTMP/HTTP-FLV播放,参考[#2303](https://github.com/ossrs/srs/pull/2303)。 + +相关的配置说明: + +* `rtc.rtc_to_rtmp`:是否开启RTC转RTMP,只会对音频转码(Opus转AAC),视频(H.264)不转码,默认off。 +* `rtc.pli_for_rtmp`:请求关键帧的间隔,单位秒,RTC没有固定GOP,而RTMP一般需要,默认6.0。 + +演示,WebRTC推流和播放,链接: + +* WebRTC推流: [http://localhost:1985/rtc/v1/whip/?app=live&stream=livestream](http://localhost:8080/players/whip.html) +* WebRTC播放: [http://localhost:1985/rtc/v1/whep/?app=live&stream=livestream](http://localhost:8080/players/whep.html) +* HTTP-FLV播放:[http://localhost:8080/live/show.flv](http://localhost:8080/players/srs_player.html?autostart=true&stream=show.flv) +* RTMP流(可用VLC播放):rtmp://localhost/live/show + +## SFU: One to One + +SRS早就具备了SFU的能力,比如一对一通话、[多人通话](./webrtc.md#sfu-video-room)、[直播连麦](./webrtc.md#room-to-live)等等。在沟通中,一对一是常用而且典型的场景, +让我们一起来看看如何用SRS做直播和RTC一体化的一对一通话。 + +> 下面以Docker中运行DEMO为例子,若希望从代码编译,请设置好对应的环境变量和启动命令。 + +本机启动SRS(参考[usage](https://github.com/ossrs/srs/tree/4.0release#usage)),例如: + +```bash +export CANDIDATE="192.168.1.10" +docker run --rm --env CANDIDATE=$CANDIDATE \ + -p 1935:1935 -p 8080:8080 -p 1985:1985 -p 8000:8000/udp \ + registry.cn-hangzhou.aliyuncs.com/ossrs/srs:5 \ + objs/srs -c conf/rtc.conf +``` + +> Note: 请将CANDIDATE设置为服务器的外网地址,详细请阅读[WebRTC: CANDIDATE](./webrtc.md#config-candidate)。 + +> Note: More images and version is [here](https://cr.console.aliyun.com/repository/cn-hangzhou/ossrs/srs/images). + +> Note: Demo的H5页面,是在SRS镜像中的。 + +本机启动信令(参考[usage](https://github.com/ossrs/signaling#usage)),例如: + +```bash +docker run --rm -p 1989:1989 registry.cn-hangzhou.aliyuncs.com/ossrs/signaling:1 +``` + +> Note: More images and version is [here](https://cr.console.aliyun.com/repository/cn-hangzhou/ossrs/signaling/images). + +启动[httpx-static](https://github.com/ossrs/go-oryx/tree/develop/httpx-static#usage),转换HTTPS和WSS协议: + +```bash +export CANDIDATE="192.168.1.10" +docker run --rm -p 80:80 -p 443:443 registry.cn-hangzhou.aliyuncs.com/ossrs/httpx:1 \ + ./bin/httpx-static -http 80 -https 443 -ssk ./etc/server.key -ssc ./etc/server.crt \ + -proxy http://$CANDIDATE:1989/sig -proxy http://$CANDIDATE:1985/rtc \ + -proxy http://$CANDIDATE:8080/ +``` + +> Note: 请将CANDIDATE设置为服务器的外网地址,详细请阅读[WebRTC: CANDIDATE](./webrtc.md#config-candidate)。 + +本机(localhost)可以直接打开[http://localhost/demos/one2one.html?autostart=true](http://localhost/demos/one2one.html?autostart=true)。 + +若非本机,则可以打开[https://192.168.3.6/demos/one2one.html?autostart=true](https://192.168.3.6/demos/one2one.html?autostart=true)。 + +> 注意:自签名证书,在空白处输入`thisisunsafe`(注意没空格)。 + +## SFU: Video Room + +SRS支持多人通话的SFU能力,请参考[一对一通话](./webrtc.md#sfu-one-to-one)搭建环境,然后访问页面: + +本机(localhost)可以直接打开[http://localhost/demos/room.html?autostart=true](http://localhost/demos/room.html?autostart=true)。 + +若非本机,则可以打开[https://192.168.3.6/demos/room.html?autostart=true](https://192.168.3.6/demos/room.html?autostart=true)。 + +> 注意:自签名证书,在空白处输入`thisisunsafe`(注意没空格)。 + +若需要会议转直播,请参考[RTC转直播](./webrtc.md#room-to-live)。 + +## Room to Live + +上面我们介绍了[一对一通话](./webrtc.md#sfu-one-to-one),如果能将这个通话合成一个流,叠加视频和混音, +转成RTMP流推送到直播,这就是连麦了。 + +> Note: [多人通话](./webrtc.md#sfu-video-room)也是可以转直播的,原理一样,只是多人通话的流更多。 + +注意请开启RTC转RTMP,我们合并的是RTMP流,例如: + +```bash +export CANDIDATE="192.168.1.10" +docker run --rm --env CANDIDATE=$CANDIDATE \ + -p 1935:1935 -p 8080:8080 -p 1985:1985 -p 8000:8000/udp \ + registry.cn-hangzhou.aliyuncs.com/ossrs/srs:5 \ + objs/srs -c conf/rtc2rtmp.conf +``` + +> Note: 请将CANDIDATE设置为服务器的外网地址,详细请阅读[WebRTC: CANDIDATE](./webrtc.md#config-candidate)。 + +> Note: More images and version is [here](https://cr.console.aliyun.com/repository/cn-hangzhou/ossrs/srs/images). + +> Note: 请参考[一对一通话](./webrtc.md#sfu-one-to-one)启动Signaling和httpx-static。 + +视频合流非常非常消耗CPU,而且有很多种方式: +* SRS+FFmpeg,SRS将WebRTC流转RTMP,FFmpeg将多路RTMP合流。优势:延迟小,音质好;缺点是命令行难度高。 +* SRS+OBS,方案和SRS+FFmpeg一样,不过用OBS来实现合流。优势:图形化界面更友好,音质好;缺点是延迟大有不同步风险较大。 +* OBS抓浏览器,OBS直接捕获浏览器窗口和电脑的音频。优势:可见即所得,依赖少;缺点是音质不如前面的方案。 + +SRS+FFmpeg方案,我们在一对一通话的DEMO中,给出了使用FFmpeg合流的命令,比如: + +```bash +ffmpeg -f flv -i rtmp://192.168.3.6/live/alice -f flv -i rtmp://192.168.3.6/live/314d0336 \ + -filter_complex "[1:v]scale=w=96:h=72[ckout];[0:v][ckout]overlay=x=W-w-10:y=H-h-10[out]" -map "[out]" \ + -c:v libx264 -profile:v high -preset medium \ + -filter_complex amix -c:a aac \ + -f flv rtmp://192.168.3.6/live/merge +``` + +输入: +* rtmp://192.168.3.6/live/alice +* rtmp://192.168.3.6/live/314d0336 + +输出: +* rtmp://192.168.3.6/live/merge + +SRS+OBS可以添加多个MediaSource(媒体源),将File(文件)的勾选去掉,就可以输入上面的两个RTMP流。 + +OBS直接捕获浏览器,可以选择WindowCapature(窗口捕获),直接选择浏览器即可。 + +> Note: 转直播后,就可以使用SRS的直播录制(DVR)功能,将每个RTC流录下来,也可以录合并的流。 + +Winlin 2020.03 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/webrtc) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/windows.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/windows.md new file mode 100644 index 00000000..08ac1646 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/doc/windows.md @@ -0,0 +1,89 @@ +--- +title: Windows +sidebar_label: Windows +hide_title: false +hide_table_of_contents: false +--- + +# SRS for Windows + +SRS 5.0.89+正式支持Windows,基于Cygwin64平台,支持代码编译,以及流水线,每个5.0的版本都会提供安装包。 + +## Build from code + +如果你需要自己从代码编译Windows版本的SRS,请先安装[Cygwin64](https://cygwin.com/install.html)。 + +另外,还需要安装工具`gcc-g++` `make` `automake` `patch` `pkg-config` `tcl` `cmake`,可以参考流水线[说明](https://github.com/cygwin/cygwin-install-action#parameters)。 + +安装好环境后,在Cygwin终端中执行命令: + +```bash +git checkout develop +./configure +make +``` + +这样就可以编译出Windows版本的SRS了,可执行文件在`./objs/srs.exe`,其他使用说明参考[Getting Started](./getting-started.md)。 + +## Install from binary + +从5.0.89之后,SRS 5.0每个版本[release](https://github.com/ossrs/srs/releases),都会附带Windows的安装包。你可以下载后,快速安装和使用SRS。 + +下面是一些安装包的链接,注意你应该用最新版本,而不是使用某个固定版本[release](https://github.com/ossrs/srs/releases): + +* [最新版本下载](https://github.com/ossrs/srs/releases) +* [SRS-Windows-x86_64-5.0.89-setup.exe](https://github.com/ossrs/srs/releases/tag/v5.0.89) +* [SRS-Windows-x86_64-5.0.19-setup.exe](https://github.com/ossrs/srs/releases/tag/v5.0.19) + +> Note: SRS 5.0.89+之后,使用流水线构建Windows安装包,GitHub Actions自动生成。 + +![](/img/windows-2022-11-20-001.png) + +安装后,使用管理员权限启动SRS: + +![](/img/windows-2022-11-20-002.png) + +使用FFmpeg或OBS推流到SRS: + +```bash +ffmpeg -re -i ~/srs/doc/source.flv -c copy -f flv rtmp://win11/live/livestream +``` + +使用VLC或[srs-player](http://win11:8080/)播放流: + +![](/img/windows-2022-11-20-003.png) + +基本上SRS现有的功能都能用,比如RTMP, HTTP-FLV, HLS, WebRTC, HTTP-API, Prometheus Exporter等等。 + +## Package by NSIS + +如果你需要自己修改代码并打包,可以使用[NSIS](https://nsis.sourceforge.io/Download),在Cygwin终端中执行命令: + +```bash +"/cygdrive/c/Program Files (x86)/NSIS/makensis.exe" \ + /DSRS_VERSION=$(./objs/srs -v 2>&1) \ + /DCYGWIN_DIR="C:\cygwin64" \ + packaging/nsis/srs.nsi +``` + +## Known Issues + +* [Cygwin: Build with SRT is ok, but crash when running. #3251](https://github.com/ossrs/srs/issues/3251) +* [Cygwin: Support address sanitizer for windows. #3252](https://github.com/ossrs/srs/issues/3252) +* [Cygwin: ST stuck when working in multiple threads mode. #3253](https://github.com/ossrs/srs/issues/3253) +* [Cygwin: Support iocp and windows native build. #3256](https://github.com/ossrs/srs/issues/3256) +* [Cygwin: Build srtp with openssl fail for no srtp_aes_icm_ctx_t #3254](https://github.com/ossrs/srs/issues/3254) + +## Links + +ST supports windows: https://github.com/ossrs/state-threads/issues/20 + +Commits about SRS Windows: https://github.com/ossrs/srs-windows/issues/2 + +Windows docker also works for SRS, however, `srs.exe` is more popular for windows developers. + +Winlin 2022.11 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/zh/v6/windows) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tools/demo.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tools/demo.md new file mode 100644 index 00000000..e83867bf --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tools/demo.md @@ -0,0 +1,23 @@ +--- +title: 在线演示 +sidebar_label: 在线演示 +hide_title: false +hide_table_of_contents: false +--- + +# Demo + +### FLV +* [HTTP-FLV](http://ossrs.net/players/srs_player.html?app=live&stream=livestream.flv&server=d.ossrs.net&port=80&autostart=true&vhost=d.ossrs.net&schema=http) +* [HTTPS-FLV](https://ossrs.net/players/srs_player.html?app=live&stream=livestream.flv&server=d.ossrs.net&port=443&autostart=true&vhost=d.ossrs.net&schema=https) + +### HLS +* [HLS](http://ossrs.net/players/srs_player.html?app=live&stream=livestream.m3u8&server=d.ossrs.net&port=80&autostart=true&vhost=d.ossrs.net&schema=http) +* [HTTPS HLS](https://ossrs.net/players/srs_player.html?app=live&stream=livestream.m3u8&server=d.ossrs.net&port=443&autostart=true&vhost=d.ossrs.net&schema=https) + +### WebRTC +* [WebRTC](http://ossrs.net/players/rtc_player.html?vhost=d.ossrs.net&server=d.ossrs.net&port=1985&autostart=true) + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/tools/zh/v6/demo) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tools/specs.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tools/specs.md new file mode 100644 index 00000000..560bbbde --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tools/specs.md @@ -0,0 +1,82 @@ +--- +title: 标准文档 +sidebar_label: 标准文档 +hide_title: false +hide_table_of_contents: false +--- + +# Specifications + +## Live Streaming + +1. [amf0_spec_121207.pdf](/files/amf0_spec_121207.pdf), adobe amf0标准 +1. [amf3_spec_121207.pdf](/files/amf3_spec_121207.pdf), adobe amf3标准。 +1. [hls-m3u8-draft-pantos-http-live-streaming-12.txt](/files/hls-m3u8-draft-pantos-http-live-streaming-12.txt), [hls-m3u8-draft-pantos-http-live-streaming-12.pdf](/files/hls-m3u8-draft-pantos-http-live-streaming-12.pdf), m3u8标准。 +1. [hls-mpeg-ts-iso13818-1.pdf](/files/hls-mpeg-ts-iso13818-1.pdf), ts标准。 +1. [hls-mpeg-ts-VB_WhitePaper_TransportStreamVSProgramStream_rd2.pdf](/files/hls-mpeg-ts-VB_WhitePaper_TransportStreamVSProgramStream_rd2.pdf), ts的介绍。 +1. [rtmp.part1.Chunk-Stream.pdf](/files/rtmp.part1.Chunk-Stream.pdf), [rtmp.part2.Message-Formats.pdf](/files/rtmp.part2.Message-Formats.pdf), [rtmp.part3.Commands-Messages.pdf](/files/rtmp.part3.Commands-Messages.pdf), [rtmp_specification_1.0.pdf](/files/rtmp_specification_1.0.pdf), adobe rtmp标准。 +1. [flv_v10_1.pdf](/files/flv_v10_1.pdf), adobe flv。 +1. [video_file_format_spec_v10_1.pdf](/files/video_file_format_spec_v10_1.pdf) flv/f4v. + +## Codec + +1. [mp3.id3v2.3.0.pdf](/files/mp3.id3v2.3.0.pdf), http://id3.org/id3v2.3.0 +1. [aac-iso-13818-7.pdf, ISO_IEC_13818-7-AAC-2004.pdf](/files/ISO_IEC_13818-7-AAC-2004.pdf), aac标准,aac的编码等。 +1. [aac-mp4a-format-ISO_IEC_14496-3+2001.pdf, ISO_IEC_14496-3-AAC-2001.pdf](/files/ISO_IEC_14496-3-AAC-2001.pdf), aac的封装标准,即ts/flv里面的aac raw data标准。 +1. [ISO_IEC_14496-1-System-2010.pdf](/files/ISO_IEC_14496-1-System-2010.pdf) mp4 base box. +1. [H.264-AVC-ISO_IEC_14496-10.pdf, ISO_IEC_14496-10-AVC-2003.pdf](/files/ISO_IEC_14496-10-AVC-2003.pdf), avc标准,编码部分。 +1. [H.264-AVC-ISO_IEC_14496-12_2012-mp4.pdf, ISO_IEC_14496-12-base-format-2012.pdf](/files/ISO_IEC_14496-12-base-format-2012.pdf), mp4标准。 +1. [ISO_14496-14_2003_mp4-file-format.pdf, ISO_IEC_14496-14-MP4-2003.pdf](/files/ISO_IEC_14496-14-MP4-2003.pdf), mp4文件格式。 +1. [ISO_IEC_14496-14-MP4-2020.pdf](/files/ISO_IEC_14496-14-MP4-2020.pdf), mp4文件格式。2020版本。 +1. [H.264-AVC-ISO_IEC_14496-10-2012.pdf, ISO_IEC_14496-10-AVC-2012.pdf](/files/ISO_IEC_14496-10-AVC-2012.pdf), avc标准,编码部分。上面的标准是2003年的,和下面的15是2010年的对不上。http://www.itu.int/ITU-T/recommendations/rec.aspx?rec=11466 +1. [H.264-AVC-ISO_IEC_14496-15.pdf, ISO_IEC_14496-15-AVC-format-2012.pdf](/files/ISO_IEC_14496-15-AVC-format-2012.pdf), avc标准,封装部分。 +1. [ISO_IEC_14496-15-AVC-format-2017.pdf](/files/ISO_IEC_14496-15-AVC-format-2017.pdf), avc标准,封装部分。2017版本。 +1. [H.264_MPEG-4-Part-10-White-Paper.pdf](/files/H.264_MPEG-4-Part-10-White-Paper.pdf), h264简介。 +1. [ISO_IEC_11172-3-MP3-1993.pdf](/files/ISO_IEC_11172-3-MP3-1993.pdf), MPEG-1 Audio, for MP3. +1. [ISO_IEC_13818-3-MP3-1997.pdf](/files/ISO_IEC_13818-3-MP3-1997.pdf), MPEG-2 Audio, for MP3. +1. [ITU-T-H.265-2021.pdf](/files/ITU-T-H.265-2021.pdf), H.265 or HEVC. + +## HTTP + +1. [http1.0-rfc1945.txt, rfc1945-1996-http1.0.txt](/files/rfc1945-1996-http1.0.txt), http://www.rfc-editor.org/rfc/rfc1945.txt +1. [http1.1-rfc2616.txt, rfc2616-1999-http1.1.txt](/files/rfc2616-1999-http1.1.txt), http://www.rfc-editor.org/rfc/rfc2616.txt +1. [arpa-internet-text-messages-rfc822.txt, rfc822-1982-arpa-internet-text-messages.txt](/files/rfc822-1982-arpa-internet-text-messages.txt), http://www.rfc-editor.org/rfc/rfc822.txt + +## RTC + +1. [STUN, rfc5389-2008-stun.pdf](/files/rfc5389-2008-stun.pdf): https://tools.ietf.org/html/rfc5389 +1. [TURN, rfc5766-2010-turn.pdf](/files/rfc5766-2010-turn.pdf): https://tools.ietf.org/html/rfc5766 +1. [ICE, rfc5245-2010-ice.pdf](/files/rfc5245-2010-ice.pdf): https://tools.ietf.org/html/rfc5245 +1. [SIP, rfc3261-2002-sip.pdf](/files/rfc3261-2002-sip.pdf): https://tools.ietf.org/html/rfc3261 + +## SRT + +1. [Haivision_SRT_Open_Source_White_Paper.pdf](/files/Haivision_SRT_Open_Source_White_Paper.pdf) +1. [SRT_Alliance_Deployment_Guide.pdf](/files/SRT_Alliance_Deployment_Guide.pdf) +1. [SRT_Protocol_TechnicalOverview_DRAFT_2018-10-17.pdf](/files/SRT_Protocol_TechnicalOverview_DRAFT_2018-10-17.pdf) + +## Others + +1. [kafka-160915-0553-82964.pdf](/files/kafka-160915-0553-82964.pdf), https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol +1. [rtmfp-rfc7016.pdf, rfc7016-2013-rtmfp.pdf](/files/rfc7016-2013-rtmfp.pdf), adobe RTMFP, http://tools.ietf.org/html/rfc7016. +1. [rtmfp-tsvarea-1.pdf](/files/rtmfp-tsvarea-1.pdf), http://www.ietf.org/proceedings/10mar/slides/tsvarea-1.pdf +1. [MPEG-DASH-ISO_IEC_23009-1_2012.pdf, ISO_IEC_23009-1-DASH-2012.pdf](/files/ISO_IEC_23009-1-DASH-2012.pdf), MPEG-DASH标准。 +1. [rfc2326-1998-rtsp.pdf](/files/rfc2326-1998-rtsp.pdf) +1. [rfc3550-2003-rtp.pdf](/files/rfc3550-2003-rtp.pdf) +1. [adobe-hds-specification.pdf](/files/adobe-hds-specification.pdf) +1. [adobe-media-manifest-specification.pdf](/files/adobe-media-manifest-specification.pdf) +1. [HTTPDynamicStreamingSpecificationErrataMay2014.pdf](/files/HTTPDynamicStreamingSpecificationErrataMay2014.pdf) +1. [FlashMediaManifestFormatSpecificationErrataMay2014.pdf](/files/FlashMediaManifestFormatSpecificationErrataMay2014.pdf) + +## GB28181 + +1. [GB28181-2016](/files/GBT28181-2016.pdf) +1. [GB28181-2022](/files/GBT28181-2022.pdf) + +## Files + +1. [ffmpeg-logo.png](/files/ffmpeg-logo.png), [ffmpeg-min.png](/files/ffmpeg-min.png), ffmpeg的logo。 +1. [source.flv](/files/source.flv), [source.200kbps.768x320.flv](/files/source.200kbps.768x320.flv), avatar预告片,400kbps。 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/tools/zh/v6/specs) + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tools/utility.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tools/utility.md new file mode 100644 index 00000000..51e15636 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tools/utility.md @@ -0,0 +1,19 @@ +--- +title: 实用工具 +sidebar_label: 实用工具 +hide_title: false +hide_table_of_contents: false +--- + +# Utility + +* [Console 控制台](https://ossrs.net/console/ng_index.html) +* [App 应用下载](https://ossrs.net/releases/app.html) +* [HTTP-FLV Player](https://ossrs.net/players/srs_player.html) +* [WebRTC WHIP Publisher](https://ossrs.net/players/whip.html) +* [WebRTC WHEP Player](https://ossrs.net/players/whep.html) +* [HTTP-REST](https://ossrs.net/http-rest/) + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/tools/zh/v6/utility) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tutorial/oryx.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tutorial/oryx.md new file mode 100644 index 00000000..704f6d06 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tutorial/oryx.md @@ -0,0 +1,79 @@ +--- +title: Oryx +sidebar_label: Oryx +hide_title: false +hide_table_of_contents: false +--- + +# Oryx + +Oryx(SRS Stack),面向鼠标编程,没有服务器和命令行经验,也可以构建自己的在线视频服务。入门的最佳推荐。 + +Oryx是一个开源的音视频服务方案,多个音视频场景中开箱即用,组合使用了SRS、FFmpeg、WebRTC等项目,无门槛赋能小微企业,让小微企业轻松具备数字化能力。 + +Oryx:让音视频的力量,赋能所有小微企业。 + +## 24/7 Live Stream: Easy Stream Your Camera to YouTube with DDNS & VPS - No PC or OBS Required! + +https://youtu.be/JMz68_wuVvM + +在本逐步教程中,探索如何使用DDNS和VPS轻松地将您的摄像头24/7实时流式传输到YouTube。告别使用PC或OBS的需求,享受更稳定、 +无缝的直播体验。 + +## Ultimate Unmanned Live Streaming Solution: Easy, Affordable & No PC Required! Perfect for Slow Media, Sleep Music, ASMR, Movie Streaming & More! + +https://youtu.be/CjXkRmNRtHA + +无人直播终极方案,简单、便宜、不占用电脑,自媒体慢直播、无人直播带货、睡眠音乐直播、ASMR直播、电影直播等必备方案 + +## Oryx:起步、购买和入门 +* 地址: https://www.bilibili.com/video/BV1844y1L7dL +* 说明: 如何购买Oryx?如何使用云厂商的镜像一键部署Oryx?如何在宝塔或aaPanel中安装Oryx?如何设置防火墙和安全组? + +## Oryx:音视频无门槛赋能小微企业 +* 地址: https://www.bilibili.com/video/BV1cq4y1e7Au +* 说明: 什么是Oryx?为何要做Oryx?Oryx可以带来什么价值?Oryx适用哪些用户和场景? + +## Oryx:直播录制和多平台转播 +* 地址: https://www.bilibili.com/video/BV1KY411V7uc +* 说明: 如何使用Oryx实现多平台直播,比如同时在视频号、B站和快手直播,同时不增加上行带宽。上行带宽增加会增加推流卡顿的风险,导致直播翻车。 + +## 周亮:自建CDN的高质量源站 +* 地址: https://www.bilibili.com/video/BV1gT4y1U76d +* 说明: 构建自己的高质量源站,BGP带宽支持不同运营商回源,专线带宽有保障,可实现源站的录制和转推等业务能力。 + +## 唐为:Oryx解决二手货交易的信任问题 +* 地址: https://www.bilibili.com/video/BV14S4y1k7gr +* 说明: 二手货电商中,买家不能确定货物是否有质量问题,也不能确定是货物本身还是运输导致的问题,Oryx直播和录制可以让这个过程“眼见为实”。 + +## Oryx:如何让村晚直播实现远程制作 +* 地址: https://www.bilibili.com/video/BV1Nb4y1t7ij +* 说明: “村晚”就是每个村的晚会,丰富了广大人民的生活。村晚节目在现场直播时,需要对节目再进行处理,加台标和加效果,然后再播出。Oryx让节目可以远程制作,让村晚更便捷。 + +## 王大江:Oryx多种场景应用 +* 地址: https://www.bilibili.com/video/BV16r4y1q7ZT +* 说明: Oryx面向鼠标编程,让每个人都能做音视频业务。不懂音视频的可以,懂音视频的也可以,种地的可以,撸网线的可以,剪电影的可以,背摄像机的也可以,跳舞的可以,唱歌的可以,卖二手货的也可以,开源项目交流也可以,多平台直播也可以,会用电脑有微信就可以,守法公民都可以。 + +## 崔国栋:如何用Oryx的SRT实现300ms低延迟直播 +* 地址: https://www.bilibili.com/video/BV1aS4y1G7iG +* 说明: 在直播推流和编码器中,除了RTMP,SRT是支持得很普及的协议。配合Oryx,我们使用SRT可以做到300ms左右延迟,实现真正秒内的低延迟直播。 + +## 徐光磊:如何用Oryx搭建私人专享直播间 +* 地址: https://www.bilibili.com/video/BV1RS4y1G7tb +* 说明: 如何一键快速拥有自己的私人直播间,而且带有公网IP,可以任何地方都能访问,用任何设备都能访问,实现自己的私人的专享的直播间。 + +## 程晓龙:Oryx如何一键HTTPS +* 地址: https://www.bilibili.com/video/BV1tZ4y1R7qp +* 说明: Oryx面向鼠标编程,让每个人都能做音视频业务。不懂音视频的可以,懂音视频的也可以,种地的可以,撸网线的可以,剪电影的可以,背摄像机的也可以,跳舞的可以,唱歌的可以,会用电脑有微信就可以,守法公民都可以。 + +## 需求介绍:Oryx支持GB +* 地址: https://www.bilibili.com/video/BV1SL411K7KS +* 说明: Oryx面向鼠标编程,让每个人都能做音视频业务。不懂音视频的可以,懂音视频的也可以,种地的可以,撸网线的可以,剪电影的可以,背摄像机的也可以,跳舞的可以,唱歌的可以,会用电脑有微信就可以,守法公民都可以。 + +## 马景瑞:Oryx的SRT超清直播应用 +* 地址: https://www.bilibili.com/video/BV1c341177e7 +* 说明: Oryx面向鼠标编程,让每个人都能做音视频业务。不懂音视频的可以,懂音视频的也可以,种地的可以,撸网线的可以,剪电影的可以,背摄像机的也可以,跳舞的可以,唱歌的可以,会用电脑有微信就可以,守法公民都可以。 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/tutorial/zh/v6/oryx) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tutorial/srs-books.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tutorial/srs-books.md new file mode 100644 index 00000000..ccff27e7 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tutorial/srs-books.md @@ -0,0 +1,58 @@ +--- +title: 书籍教程 +sidebar_label: 书籍教程 +hide_title: false +hide_table_of_contents: false +--- + +# 书籍和培训教程 + +重点推荐: + +* [《零声学院:SRS流媒体服务器实战》](https://www.bilibili.com/video/BV1XZ4y1P7um) +* [《夏立新:SRS代码分析》](https://github.com/xialixin/srs_code_note/blob/master/doc/srs_note.md) +* [《罗上文:SRS原理》](https://srs.xianwaizhiyin.net/) + +## Solution Guides + +* [陈海博:SRS在安防中的应用](https://www.bilibili.com/video/BV11S4y197Zx) +* 最佳实践:[一对一通话](https://mp.weixin.qq.com/s/xWe6f9WRhtwnpJQ8SO0Eeg),[多人通话](https://mp.weixin.qq.com/s/CM2h99A1e_masL5sjkp4Zw)和[直播连麦](https://mp.weixin.qq.com/s/7xexl07rrWBdh8xennXK3w) +* [最佳实践:如何扩展你的SRS并发能力?](https://mp.weixin.qq.com/s/pd9YQS0WR3hSuHybkm1F7Q) +* SRS是单进程模型,不支持多进程;可以使用[集群](https://mp.weixin.qq.com/s/pd9YQS0WR3hSuHybkm1F7Q) 或者[ReusePort](../doc/reuse-port.md)扩展多进程(多核)能力。 +* [基于HLS-TS&RTMP-FLV的微信小程序点直播方案](https://mp.weixin.qq.com/s/xhScUrkoroM7Q7ziODHyMA) +* [借力SRS落地实际业务的几个关键事项](https://mp.weixin.qq.com/s/b19kBer_phZl4n4oUBOvxQ) +* [干货 | 基于SRS直播平台的监控系统之实现思路与过程](https://mp.weixin.qq.com/s/QDTtW85giKmryhvCBkyyCg) +* [Android直播实现](https://blog.csdn.net/dxpqxb/article/details/83012950) +* [SRS直播服务器与APP用户服务器的交互](https://www.jianshu.com/p/f3dfa727475a) +* [使用flvjs实现摄像头flv流低延时实时直播](https://www.jianshu.com/p/2647393f956a) +* [IOS 直播方面探索(服务器搭建,推流,拉流)](https://www.jianshu.com/p/1aa677d99d17) +* [国产开源流媒体SRS4.0对视频监控GB28181的支持](https://mp.weixin.qq.com/s/VIPSPaBB5suUk7_I2oOkMw) + +## Develop Guide + +* [高性能网络服务器设计](https://blog.csdn.net/win_lin/article/details/8242653),分析高性能网络服务器的设计要点。 +* [SRS高精度、低误差定时器](https://mp.weixin.qq.com/s/DDSzRKHyJ-uYQ9QQC9VOZg),论高并发服务器的定时器问题。 +* [协程原理:函数调用过程、参数和寄存器](https://mp.weixin.qq.com/s/2TsYSiV8ysyLrELHdlHtjg),剖析SRS协程实现的最底层原理。 +* [性能优化:SRS为何能做到同类的三倍](https://mp.weixin.qq.com/s/r2jn1GAcHe08IeTW32OyuQ),论性能优化的七七八八、前前后后。 +* [SRS代码分析](https://github.com/xialixin/srs_code_note/blob/master/doc/srs_note.md),分析SRS结构和代码逻辑,类结构图,线程模型,模块架构。 +* [Third-party Client SDK](../doc/client-sdk.md): 第三方厂商提供的客户端推流和播放的SDK,一般是移动端包括Andoird和iOS。 +* [轻量线程分析](https://github.com/ossrs/state-threads#analysis),分析SRS依赖的库ST的关键技术。 +* [SRS代码分析](https://github.com/xialixin/srs_code_note/blob/master/doc/srs_note.md),分析SRS结构和代码逻辑,类结构图,线程模型,模块架构。 +* [深度: 掀起你的汇编来:如何移植ST协程到其他系统或CPU?](https://mp.weixin.qq.com/s/dARz99INVlGuoFW6K7SXaw) +* [肖志宏:SRS支持WebRTC级联和QUIC协议](https://www.bilibili.com/video/BV1Db4y1b77J) +* [StateThreads源码分析](https://www.xianwaizhiyin.net/?cat=24) +* [SRS 4.0源码分析](https://www.xianwaizhiyin.net/?cat=21) + +## Tech Docs + +* [历经5代跨越25年的RTC架构演化史](https://mp.weixin.qq.com/s/fO-FcKU_9Exdqh4xb_U5Xw) +* [技术解码 | SRT和RIST协议综述](https://mp.weixin.qq.com/s/jjtD4ik-9noMyWbecogXHg) +* [公众号专栏:SRS,知识库,重要功能和阶段性结果,解决方案和DEMO](https://mp.weixin.qq.com/mp/appmsgalbum?action=getalbum&__biz=MzA4NTQ3MzQ5OA==&scene=1&album_id=1703565147509669891&count=10#wechat_redirect) +* [公众号专栏:深度,底层技术分析,服务器模型,协议处理,性能优化等](https://mp.weixin.qq.com/mp/appmsgalbum?__biz=MzA4NTQ3MzQ5OA==&action=getalbum&album_id=2156820160114900994#wechat_redirect) +* [公众号专栏:动态,关于最新的会议和动态,新闻,社区等](https://mp.weixin.qq.com/mp/appmsgalbum?__biz=MzA4NTQ3MzQ5OA==&action=getalbum&album_id=1683217451712299009&count=10#wechat_redirect) +* [WebRTC 的现状和未来:专访 W3C WebRTC Chair Bernard Aboba](https://mp.weixin.qq.com/s/0HzzWSb5irvpNKNnSJL6Bg) +* [B站专栏(视频):SRS开源服务器](https://space.bilibili.com/430256302/channel/detail?cid=136049) +* [零声学院(视频):SRS流媒体服务器实战](https://www.bilibili.com/video/BV1XZ4y1P7um) +* [音视频开发为什么要学SRS流媒体服务器](https://zhuanlan.zhihu.com/p/190182314) + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/tutorial/zh/v6/srs-books) diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tutorial/srs-faq.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tutorial/srs-faq.md new file mode 100644 index 00000000..1df8a28e --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tutorial/srs-faq.md @@ -0,0 +1,54 @@ +--- +title: SRS FAQ +sidebar_label: 答疑回看 +hide_title: false +hide_table_of_contents: false +--- + +# SRS 答疑FAQ合集 + +SRS精彩答疑话题 + +## FAQ: SRS有哪些文档和资料 +* 地址: https://www.bilibili.com/video/BV1QA4y1S7iU +* 说明: SRS有哪些文档资料?先看Usage,在看FAQ,接着是Wiki,还有Issues。如果GitHub访问慢怎么办呢?可以把资料Clone到本地,或者访问Gitee镜像。 + +## FAQ:SRS是否支持STUN和WebRTC的P2P +* 地址: https://www.bilibili.com/video/BV13t4y1x7QV +* 说明: SRS是否支持STUN协议?如何支持WebRTC P2P打洞?SFU和P2P的区别? + +## FAQ:SRS导致WebRTC丢帧如何排查 +* 地址: https://www.bilibili.com/video/BV1LS4y187xU +* 说明: RTMP推流到SRS使用WebRTC播放是常见的用法,RTMP是30帧,WebRTC只有10帧,看起来就会卡顿不流畅,这个视频分享了如何排查这类问题。 + +## FAQ:SRS有哪些Docker镜像 +* 地址: https://www.bilibili.com/video/BV1BZ4y1a7Fg +* 说明: Docker是非常好用的技术,SRS提供了完善的Docker镜像,也可以自己打SRS的Docker镜像。 + +## FAQ:SRS如何提交Issue +* 地址: https://www.bilibili.com/video/BV13v4y1A74N +* 说明: 如果碰到问题,怎么判断是否是Issue?怎么排查Issue?如何提交新的Issue?为何提交的Issue被删除? + +## FAQ:SRS为何不支持WebRTC的FEC等复杂算法 +* 地址: https://www.bilibili.com/video/BV1CA4y1f7JW +* 说明: 什么是WebRTC的拥塞控制算法?FEC和NACK有何不同、如何选择?为何SRS没有支持复杂的算法?为何说复杂牛逼的算法一般没什么鸟用? + +## FAQ:CDN支持WebRTC的完善度 +* 地址: https://www.bilibili.com/video/BV14r4y1b7cH +* 说明: CDN或云厂商是否都支持WebRTC了?为何说是差不多支持了?目前还有哪些问题或坑?都有哪些CDN的直播是支持WebRTC协议的? + +## FAQ:如何实现直播混流或WebRTC的MCU +* 地址: https://www.bilibili.com/video/BV1L34y1E7D5 +* 说明: 如何给直播添加LOGO?如何实现直播画中画?如何实现WebRTC转直播?如何实现WebRTC的MCU功能?为何RTC架构大多是SFU而不是MCU?什么时候必须用MCU? + +## FAQ:开源SFU如何选?Janus有哪些问题,何解? +* 地址: https://www.bilibili.com/video/BV1bR4y1w7X1 +* 说明: Janus是WebRTC领域使用最广泛也是最好的SFU之一,当然和所有SFU一样它也有一堆的问题,选择开源选的不仅是代码和架构,选择的更是活跃的社区和对方向的判断。 + +## FAQ:如何更低码率达到同等画质 +* 地址: https://www.bilibili.com/video/BV1qB4y197ov +* 说明: 在保证画质的前提下,如何降低码率?我们可以使用动态码率,还可以使用相对空闲的客户端CPU交换码率,还可以在业务上优化,特别多平台推流时需要避免上行码率过高。 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/tutorial/zh/v6/srs-faq) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tutorial/srs-other.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tutorial/srs-other.md new file mode 100644 index 00000000..49f9a142 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tutorial/srs-other.md @@ -0,0 +1,72 @@ +--- +title: 其他 +sidebar_label: 其他视频 +hide_title: false +hide_table_of_contents: false +--- + +# 其他视频教程 + +## 龙芯音视频 + +大师兄龙芯专题,音视频在龙芯架构的应用,一起期待。 + +### Loongarch-001-龙芯开机体验-FFmpeg +* 地址: https://www.bilibili.com/video/BV19L411L7w6 +* 说明: 大师兄龙芯专题:体验龙芯机器上如何跑FFmpeg,下一集就是龙芯SRS,一起期待。 + +FFmpeg是音视频的广泛使用的、功能丰富的、几乎人手一个的,音视频封装格式和编解码转换的工具,有非常丰富的滤镜插件,在各种知名的播放器中也可以看到FFmpeg的应用。 + +### Loongarch-002-龙芯开机体验-SRS +* 地址: https://www.bilibili.com/video/BV1D34y1X7uG +* 说明: 大师兄龙芯专题:龙芯SRS来了。 + +### 龙芯FFmpeg进入5.0时代 +* 地址: https://www.bilibili.com/video/BV1X44y1V7rb +* 说明: FFmpeg 5.0发布,龙芯FFmpeg进入5.0时代 + + +## 用OBS做直播 +如何用OBS做直播的经验分享,尝试各种直播的技术和新姿势,SRS每周六直播答疑都是用OBS做的直播。 + +### OBS如何做直播连麦和多人连麦 +* 地址: https://www.bilibili.com/video/BV1E44y1Y7yX +* 说明: 简简单单给OBS增加连麦能力,非常的简单容易操作,支持全球连麦,而且不影响OBS现有的流程和使用习惯。所有主播都可以学会,不需要额外的插件,轻轻松松给直播间增加连麦能力。 + +### OOBS-005-OBS直播流嵌入观众评论消息 +* 地址: https://www.bilibili.com/video/BV16g411A7EK +* 说明: OBS推流到视频号和B站直播时,如何把直播间的评论区抓到直播流中,所有观众都能看到,而且可以看到上墙的评论。 + +OBS是使用非常广泛也非常好用的,一个直播推流工具。它有非常强大的功能,支持多设备捕捉、窗口捕捉、浏览器捕捉,支持场景切换可以当导播台,还有丰富的各种插件。OBS是推流客户端,配合Oryx可以快速构建直播应用。 + +### OBS-004-OBS快速裁剪窗口 +* 地址: https://www.bilibili.com/video/BV1Pf4y1T7Ax +* 说明: OBS的画布上的元素比如窗口,都可以调整大小,这也是非常非常高频的操作,有一个方法可以非常简单就可以操作,而且是所见即所得的。 + +OBS是使用非常广泛也非常好用的,一个直播推流工具。它有非常强大的功能,支持多设备捕捉、窗口捕捉、浏览器捕捉,支持场景切换可以当导播台,还有丰富的各种插件。OBS是推流客户端,配合Oryx可以快速构建直播应用 + +### OBS-006-OBS如何避免网络翻车 +* 地址: https://www.bilibili.com/video/BV1DM4y1w7xU +* 说明: 主播网络如果有问题,就直接翻车了,所有人看起来都会卡,WiFi是主播最容易翻车的点之一。 + +### OBS-007-OBS直播间状态 +* 地址: https://www.bilibili.com/video/BV16q4y1m7Cv +* 说明: OBS把直播间的状态放到流中,和观众更好的互动。 + +### OBS-008-如何放大屏幕局部区域 +* 地址: https://www.bilibili.com/video/BV1Ai4y1X7uw +* 说明: 使用OBS做直播,或者OBS录制视频时,如何平滑的放大某个局部区域,这样可以看得更清楚。 + +OBS是使用非常广泛也非常好用的,一个直播推流工具。它有非常强大的功能,支持多设备捕捉、窗口捕捉、浏览器捕捉,支持场景切换可以当导播台,还有丰富的各种插件。OBS是推流客户端,配合Oryx可以快速构建直播应用。 + +## 其他 + +开源圆桌,访谈等。 + +### LVS开源圆桌 +* 地址: https://www.bilibili.com/video/BV1GL4y177FR +* 说明: LVS开源圆桌。 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/tutorial/zh/v6/srs-other) + + diff --git a/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tutorial/srs-server.md b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tutorial/srs-server.md new file mode 100644 index 00000000..3190dc69 --- /dev/null +++ b/i18n/zh-cn/docusaurus-plugin-content-docs/version-6.0/tutorial/srs-server.md @@ -0,0 +1,64 @@ +--- +title: SRS Server +sidebar_label: SRS Server +hide_title: false +hide_table_of_contents: false +--- + +# SRS Server + +## 开源视频服务器 +* 地址: https://www.bilibili.com/video/BV1M54y1z7jo +* 说明: SRS是开源视频服务器,支持直播和WebRTC,高效稳定,一直在更新,社区和开发者很活跃。本视频介绍了SRS的定位、发展、思路和里程碑,推荐想了解SRS的朋友观看。 + +## SRS如何支持WebRTC +* 地址: https://www.bilibili.com/video/BV1r54y1S77q +* 说明: WebRTC是Google推出的做Web视频会议的框架,可以用浏览器就可以实现多人视频通话;同时随着Flash的禁用,WebRTC也在低延迟直播中也有应用。 + +## SRS开发和定制 +* 地址: https://www.bilibili.com/video/BV1az4y1Q7zL +* 说明: 开源产品降低了开发的难度和工作量,同时任何产品都不可能完全满足所有的业务需求,这需要工程师定制和开发,这个视频介绍了如何在SRS基础上定制自己的流媒体服务器。 + +## SRS运行环境 +* 地址: https://www.bilibili.com/video/BV19A411v7Zz +* 说明: SRS都可以在哪些环境运行?如何用Docker或安装包运行?或者从源代码编译和运行?如何修改代码和打安装包? + +## SRS配置和热加载 +* 地址: https://www.bilibili.com/video/BV1SZ4y1M7Ag +* 说明: SRS有非常多的配置项,怎么了解这些配置。如何修改配置后,不重启服务,实现配置的热加载。 + +## SRS关于HTTPS和WebRTC推流 +* 地址: https://www.bilibili.com/video/BV1bK4y1x7Ut +* 说明: WebRTC推流除了本机可以HTTP,一般都必须用HTTPS。另外HTTPS-FLV,HTTPS HLS这些也是常用的能力。本视频介绍了如何让SRS支持HTTPS。 + +## 陈海博:SRS在安防中的应用 +* 地址: https://www.bilibili.com/video/BV11S4y197Zx +* 说明: 安防领域是音视频的垂直细分行业中庞大的市场之一,安防也是物联网的应用领域之一,各种嵌入式的摄像头正在和互联网产生连接,SRS是其中关键的一环,实现了GB28181接入,转换成互联网直播和WebRTC协议,陈海博是SRS技术委员TOC成员,在安防领域有多年的丰富的工作经验,通过这次分享可以详细了解安防的音视频和互联网的差别,SRS解决了什么问题,安防领域要解决的关键问题是什么,哪些问题不能使用SRS解决,SRS未来对安防的支持的方向是什么。 + +## 肖志宏:RTC级联和QUIC协议 +* 地址: https://www.bilibili.com/video/BV1Db4y1b77J +* 说明: WebRTC的集群一般叫级联,是扩展服务器并发能力的一种方式,单台服务器支持的并发有限,通过级联可以支持更多的并发。WebRTC是基于UDP的,因此我们选择QUIC协议作为集群之间的通信协议。 + +## SRS日志和错误 +* 地址: https://www.bilibili.com/video/BV1mD4y1S7jy +* 说明: SRS的面向会话的日志,让排查长连接问题非常高效,可以分离出会话整个长时间生命周期中的、上下文相关的日志。SRS的错误带有堆栈,可以在出现错误时一眼能看出来问题发生的上下文。 + +## IDE高效调试 +* 地址: https://www.bilibili.com/video/BV1bF411q7R4 +* 说明: 如何调试SRS,同时开多个窗口看调用链函数,还能快速搜索 + +## SRS高效理解代码 +* 地址: https://www.bilibili.com/video/BV1Bp4y1v7hR +* 说明: 如何高效的了解SRS的代码,推荐使用工具,能够更快速和全面的掌握代码上下文。 + +## SRS:如何用NGINX搭建HLS分发集群 +* 地址: https://www.bilibili.com/video/BV1DP4y1K7Jc +* 说明: SRS作为源站,用NGINX作为边缘集群,实现大规模的HLS或DASH的分发,也可以配合SRS Edge分发FLV,也可以用NGINX支持HTTPS HLS或FLV。SRS的集群,终于补上了重要一块拼图。 + +## SRS十年岔路:SRS 5.0核心问题定义和解法 +* 地址: https://www.bilibili.com/video/BV1bY4y1L7Kn +* 说明: 时光过隙,SRS已经进入第十年了,十年岔路有非常多的挑战和问题,SRS又如何做出调整和选择,SRS对于音视频开源服务器的核心任务有哪些认知的变化,开源社区对SRS的影响又有哪些。 + +![](https://ossrs.net/gif/v1/sls.gif?site=ossrs.net&path=/lts/tutorial/zh/v6/srs-server) + + diff --git a/versioned_docs/version-6.0/doc/arm.md b/versioned_docs/version-6.0/doc/arm.md new file mode 100644 index 00000000..dfbbc23a --- /dev/null +++ b/versioned_docs/version-6.0/doc/arm.md @@ -0,0 +1,178 @@ +--- +title: ARM and CrossBuild +sidebar_label: ARM and CrossBuild +hide_title: false +hide_table_of_contents: false +--- + +# SRS for linux-arm + +How to run SRS on ARM pcu? + +* Run SRS on ARM: Client can play stream from ARM server. + +## Why run SRS on ARM? + +The use scenario: + +* Run SRS on ARM server, see [#1282](https://github.com/ossrs/srs/issues/1282#issue-386077124). +* Crossbuild for ARM embeded device, see [#1547](https://github.com/ossrs/srs/issues/1547#issue-543780097). + +## RaspberryPi + +User is able to build and run SRS on RespberryPI. Please don't use crossbuild. + + + +## ARM Server: armv7, armv8(aarch64) + +User is able to build and run SRS on ARM servers. Please don't use crossbuild. + +``` +./configure && make +``` + +Build SRS in ARM server docker, see [aarch64](https://github.com/ossrs/dev-docker/tree/aarch64#usage) + +``` +docker run -it --rm -v `pwd`:/srs -w /srs ossrs/srs:aarch64 \ + bash -c "./configure && make" +``` + +For armv8 or aarch64, user should specify the arch, if the CPU arch is not identified automatically, see [#1282](https://github.com/ossrs/srs/issues/1282#issuecomment-568891854): + +```bash +./configure --extra-flags='-D__aarch64__' && make +``` + +Run SRS: + +``` +./objs/srs -c conf/console.conf +``` + +Publish stream: + +``` +ffmpeg -re -i doc/source.flv -c copy -f flv rtmp://127.0.0.1:1935/live/livestream +``` + +Play stream:[http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.flv&port=8080&schema=http) + +![image](https://user-images.githubusercontent.com/2777660/72774670-7108c980-3c46-11ea-9e8b-d4fb3a475ea2.png) + + + +## Ubuntu Cross Build SRS: ARMv8(aarch64) + +Build SRS in docker(Ubuntu20(xenial)): + +``` +cd ~/git/srs/trunk +docker run --rm -it -v `pwd`:/srs -w /srs ossrs/srs:ubuntu20 bash +``` + +Install toolchain(optional): + +``` +apt-get install -y gcc-aarch64-linux-gnu g++-aarch64-linux-gnu +``` + +Cross build SRS: + +``` +./configure --cross-build --cross-prefix=aarch64-linux-gnu- +make +``` + +Run SRS on [aarch64 docker](https://hub.docker.com/r/arm64v8/ubuntu): + +``` +cd ~/git/srs/trunk && docker run --rm -it -v `pwd`:/srs -w /srs \ + -p 1935:1935 -p 1985:1985 -p 8080:8080 arm64v8/ubuntu \ + ./objs/srs -c conf/console.conf +``` + +Publish stream: + +``` +ffmpeg -re -i doc/source.flv -c copy -f flv rtmp://127.0.0.1:1935/live/livestream +``` + +Play stream:[http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.flv&port=8080&schema=http) + +## Ubuntu Cross Build SRS: ARMv7 + +Cross build ST and OpenSSL on Ubuntu20. + +Build SRS in docker(Ubuntu20(xenial)): + +``` +cd ~/git/srs/trunk +docker run --rm -it -v `pwd`:/srs -w /srs ossrs/srs:ubuntu20 bash +``` + +Install toolchain(optional), for example [Acqua or RoadRunner board](https://www.acmesystems.it/arm9_toolchain) + +``` +apt-get install -y gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf +``` + +Cross build SRS: + +``` +./configure --cross-build --cross-prefix=arm-linux-gnueabihf- +make +``` + +Run SRS on [ARMv7 docker](https://hub.docker.com/r/armv7/armhf-ubuntu): + +``` +cd ~/git/srs/trunk && docker run --rm -it -v `pwd`:/srs -w /srs \ + -p 1935:1935 -p 1985:1985 -p 8080:8080 armv7/armhf-ubuntu \ + ./objs/srs -c conf/console.conf +``` + +Publish stream: + +``` +ffmpeg -re -i doc/source.flv -c copy -f flv rtmp://127.0.0.1:1935/live/livestream +``` + +Play stream:[http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.flv&port=8080&schema=http) + +## Ubuntu Cross Build SRS: hisiv500(arm) + +TBD. + +## Use Other Cross build tools + +SRS configure options for cross build: + +```bash +./configure -h + +Presets: + --cross-build Enable cross-build, please set bellow Toolchain also. Default: off + +Cross Build options: @see https://ossrs.io/lts/en-us/docs/v6/doc/arm#ubuntu-cross-build-srs + --cpu= Toolchain: Select the minimum required CPU. For example: --cpu=24kc + --arch= Toolchain: Select architecture. For example: --arch=aarch64 + --host= Toolchain: Build programs to run on HOST. For example: --host=aarch64-linux-gnu + --cross-prefix= Toolchain: Use PREFIX for tools. For example: --cross-prefix=aarch64-linux-gnu- + +Toolchain options: + --static=on|off Whether add '-static' to link options. Default: off + --cc= Toolchain: Use c compiler CC. Default: gcc + --cxx= Toolchain: Use c++ compiler CXX. Default: g++ + --ar= Toolchain: Use archive tool AR. Default: g++ + --ld= Toolchain: Use linker tool LD. Default: g++ + --randlib= Toolchain: Use randlib tool RANDLIB. Default: g++ + --extra-flags= Set EFLAGS as CFLAGS and CXXFLAGS. Also passed to ST as EXTRA_CFLAGS. +``` + +Winlin 2014.11 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/arm) + + diff --git a/versioned_docs/version-6.0/doc/client-sdk.md b/versioned_docs/version-6.0/doc/client-sdk.md new file mode 100644 index 00000000..3e670778 --- /dev/null +++ b/versioned_docs/version-6.0/doc/client-sdk.md @@ -0,0 +1,52 @@ +--- +title: Client SDK +sidebar_label: Client SDK +hide_title: false +hide_table_of_contents: false +--- + +# Client SDK + +The workflow of live streaming: + +``` ++---------+ +-----------------+ +---------+ +| Encoder +-->---+ SRS/CDN Network +--->---+ Player | ++---------+ +-----------------+ +---------+ +``` + +## EXOPlayer + +The [EXOPlayer](https://github.com/google/ExoPlayer) is a Android player which support HTTP-FLV and HLS. + +## IJKPlayer + +[ijkplayer](https://github.com/Bilibili/ijkplayer) is a player from [bilibili](http://www.bilibili.com/), for both Android and iOS. + +## FFmpeg + +[FFmpeg](https://ffmpeg.org) is a complete, cross-platform solution to record, convert and stream audio and video. + +## LIBRTMP + +The [LIBRTMP](https://github.com/ossrs/librtmp) or [SRS-LIBRTMP](https://github.com/ossrs/srs-librtmp) only provides transport over RTMP. + +## WebRTC + +[WebRTC](https://webrtc.org/) is Real-time communication for the web. + +## PC + +Although the number of PC users are smaller, there are still some use scenarios for [OBS](https://obsproject.com). + +> Remark: For publishing by OBS, the **Stream Key** should be filled by stream name. + +![OBS](/img/doc-integration-client-sdk-001.png) + +![OBS](/img/doc-integration-client-sdk-002.png) + +Winlin 2017.4 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/client-sdk) + + diff --git a/versioned_docs/version-6.0/doc/cloud.md b/versioned_docs/version-6.0/doc/cloud.md new file mode 100644 index 00000000..336a1e8e --- /dev/null +++ b/versioned_docs/version-6.0/doc/cloud.md @@ -0,0 +1,12 @@ +--- +title: Cloud +sidebar_label: Cloud +hide_title: false +hide_table_of_contents: false +--- + +# Cloud + +Migrated to [Cloud](/cloud) + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/cloud) diff --git a/versioned_docs/version-6.0/doc/delivery-hds.md b/versioned_docs/version-6.0/doc/delivery-hds.md new file mode 100644 index 00000000..e5939628 --- /dev/null +++ b/versioned_docs/version-6.0/doc/delivery-hds.md @@ -0,0 +1,59 @@ +--- +title: HDS Delivery +sidebar_label: HDS Delivery +hide_title: false +hide_table_of_contents: false +--- + +# HDS Delivery + +HDS is the Http Dynamic Stream of Adobe,similar to Apple [HLS](./hls.md). + +For specification of HDS, read http://www.adobe.com/devnet/hds.html + +## Build + +We can disable or enable HDS when build SRS, read [Build](./install.md) + +``` +./configure --hds=on +``` + +## Player + +The OSMF player can play HDS. For example, use VLC to play the following HDS: + +``` +http://ossrs.net:8081/live/livestream.f4m +``` + +## HDS Config + +The vhost hds.srs.com of conf/full.conf describes the config for HDS: + +``` +vhost __defaultVhost__ { + hds { + # whether hds enabled + # default: off + enabled on; + # the hds fragment in seconds. + # default: 10 + hds_fragment 10; + # the hds window in seconds, erase the segment when exceed the window. + # default: 60 + hds_window 60; + # the path to store the hds files. + # default: ./objs/nginx/html + hds_path ./objs/nginx/html; + } +} +``` + +The config items are similar to HLS, read [HLS config](./hls.md#hls-config) + +Winlin 2015.3 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/delivery-hds) + + diff --git a/versioned_docs/version-6.0/doc/delivery-hls.md b/versioned_docs/version-6.0/doc/delivery-hls.md new file mode 100644 index 00000000..efb8c54e --- /dev/null +++ b/versioned_docs/version-6.0/doc/delivery-hls.md @@ -0,0 +1,14 @@ +--- +title: HLS Delivery +sidebar_label: HLS Delivery +hide_title: false +hide_table_of_contents: false +--- + +# HLS Delivery + +Migrated to [HLS](./hls.md). + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/delivery-hls) + + diff --git a/versioned_docs/version-6.0/doc/delivery-http-flv.md b/versioned_docs/version-6.0/doc/delivery-http-flv.md new file mode 100644 index 00000000..ec3ddf2a --- /dev/null +++ b/versioned_docs/version-6.0/doc/delivery-http-flv.md @@ -0,0 +1,14 @@ +--- +title: HTTP-FLV Delivery +sidebar_label: HTTP-FLV Delivery +hide_title: false +hide_table_of_contents: false +--- + +# HTTP-FLV Delivery + +Migrated to [HTTP-FLV](./flv.md). + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/delivery-http-flv) + + diff --git a/versioned_docs/version-6.0/doc/delivery-rtmp.md b/versioned_docs/version-6.0/doc/delivery-rtmp.md new file mode 100644 index 00000000..e4299c3b --- /dev/null +++ b/versioned_docs/version-6.0/doc/delivery-rtmp.md @@ -0,0 +1,14 @@ +--- +title: RTMP Delivery +sidebar_label: RTMP Delivery +hide_title: false +hide_table_of_contents: false +--- + +# RTMP Delivery + +Migrated to [RTMP](./rtmp.md). + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/delivery-rtmp) + + diff --git a/versioned_docs/version-6.0/doc/drm.md b/versioned_docs/version-6.0/doc/drm.md new file mode 100644 index 00000000..af4c4e95 --- /dev/null +++ b/versioned_docs/version-6.0/doc/drm.md @@ -0,0 +1,131 @@ +--- +title: DRM +sidebar_label: DRM +hide_title: false +hide_table_of_contents: false +--- + +# DRM + +DRM use to protect the content, can use many strategys: +* Referer Anti-suck: Check the referer(PageUrl) of RTMP connect params, which is set by flash player. +* Token Authentication: Check the token of RTMP connect params, SRS can use http-callback to verify the token. +* FMS token tranverse: Edge server will verify each connection on origin server. +* Access Server: Adobe Access Server. +* Publish Authentication: The authentication protocol for publish. + + + + +## Referer Anti-suck + +SRS support config the referer to anti-suck. + +When play RTMP url, adobe flash player will send the page url in the connect params PageUrl, +which is cannot changed by as code, server can check the web page url to ensure the user is ok. + +While user use client application, the PageUrl can be any value, for example, +use srs-librtmp to play RTMP url, the Referer anti-suck is not work. + +To config the referer anti-suck in srs: + +```bash +# the vhost for anti-suck. +vhost refer.anti_suck.com { + # refer hotlink-denial. + refer { + # whether enable the refer hotlink-denial. + # default: off. + enabled on; + # the common refer for play and publish. + # if the page url of client not in the refer, access denied. + # if not specified this field, allow all. + # default: not specified. + all github.com github.io; + # refer for publish clients specified. + # the common refer is not overrided by this. + # if not specified this field, allow all. + # default: not specified. + publish github.com github.io; + # refer for play clients specified. + # the common refer is not overrided by this. + # if not specified this field, allow all. + # default: not specified. + play github.com github.io; + } +} +``` + +> Remark: SRS3 use new style config for referer, which is compatible with SRS1/2. + +The bellow protocols support referer: + +* RTMP: Both publisher and player. + +## Token Authentication + +The token authentication similar to referer, but the token is put in the url, not in the args of connect: + +``` +rtmp://vhost/app/stream?token=xxxx +http://vhost/app/stream.flv?token=xxxx +http://vhost/app/stream.m3u8?token=xxxx +http://vhost/rtc/v1/whip/?app=live&stream=livestream&token=xxx +http://vhost/rtc/v1/whep/?app=live&stream=livestream&token=xxx +``` + +SRS will pass the token in the http-callback. read [HTTP callback](./http-callback.md) + +Token is robust then referer, can specifies more params, for instance, the expire time. For example: + +1. When user access the web page, web application server can generate a token in the URL, for example, `token = md5(time + id + salt + expire) = 88195f8943e5c944066725df2b1706f8` +1. The RTMP URL to publish is, for instance, `rtmp://192.168.1.10/live/livestream?time=1402307089&expire=3600&token=88195f8943e5c944066725df2b1706f8` +1. Config the http callback of SRS `on_publish http://127.0.0.1:8085/api/v1/streams;` , read [HTTP callback](./http-callback.md#config-srs) +1. When user publishing stream, SRS will callback the url with token to verify, if invalid, the http callback can return none zero which indicates error. + +> Note: You're able to verify the play. + +## TokenTraverse + +The FMS token tranverse is when user connect to edge server, +the edge server will send the client info which contains token +to origin server to verify. It seems that the token from client +tranverse from edge to origin server. + +FMS edge and origin use private protocol, use a connection to fetch data, +another to transport the control message, for example, the token tranverse +is a special command, @see https://github.com/ossrs/srs/issues/104 + +Recomment the token authentication to use http protocol; +the token tranverse must use RTMP protocol, so many RTMP servers do not +support the token tranverse. + +SRS supports token tranverse like FMS, but SRS always create a new connection +to verify the client info on origin server. + +THe config for token tranverse, see `edge.token.traverse.conf`: + +```bash +listen 1935; +vhost __defaultVhost__ { + cluster { + mode remote; + origin 127.0.0.1:19350; + token_traverse on; + } +} +``` + +## Access Server + +SRS does not support. + +## Publish Authentication + +SRS does not support. + +Winlin 2015.8 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/drm) + + diff --git a/versioned_docs/version-6.0/doc/dvr.md b/versioned_docs/version-6.0/doc/dvr.md new file mode 100644 index 00000000..bfe18a76 --- /dev/null +++ b/versioned_docs/version-6.0/doc/dvr.md @@ -0,0 +1,267 @@ +--- +title: DVR +sidebar_label: DVR +hide_title: false +hide_table_of_contents: false +--- + +# DVR + +SRS supports DVR RTMP stream to FLV/MP4 file. Although the bellow using FLV as example, but MP4 is also available. + +When FFmpeg/OBS publish RTMP stream to SRS, SRS will write the stream to FLV/MP4 file. The workflow is: + +```text ++------------+ +-------+ +---------------+ ++ FFmpeg/OBS +---RTMP-->--+ SRS +---DVR-->--+ FLV/MP4 File + ++------------+ +-------+ +---------------+ +``` + +Many users want more features about DVR, please consider use [Oryx](./getting-started-oryx.md#dvr) instead, +for example: + +* Oryx supports S3 cloud storage, move the final MP4 file to S3 cloud storage. +* Oryx supports glob filters, to only record specified streams, not all streams. +* Oryx supports merge multiple publishing sessions to one MP4 file. + +In facts, DVR feature can be very complicated, SRS only support basic DVR feature, while Oryx will continue +to improve the DVR features. + +## Build + +DVR is always enabled for SRS3+. + +For information about the dvr option, read [Build](./install.md) + +## Config + +The difficult of DVR is about the flv name, while SRS use app/stream+random name. +User can use http-callback to rename, for example, when DVR reap flv file. + +Config for DVR: + +```bash +vhost yourvhost { + # DVR RTMP stream to file, + # start to record to file when encoder publish, + # reap flv/mp4 according by specified dvr_plan. + dvr { + # whether enabled dvr features + # default: off + enabled on; + # the filter for dvr to apply to. + # all, dvr all streams of all apps. + # /, apply to specified stream of app. + # for example, to dvr the following two streams: + # live/stream1 live/stream2 + # default: all + dvr_apply all; + # the dvr plan. canbe: + # session reap flv/mp4 when session end(unpublish). + # segment reap flv/mp4 when flv duration exceed the specified dvr_duration. + # @remark The plan append is removed in SRS3+, for it's no use. + # default: session + dvr_plan session; + # the dvr output path, *.flv or *.mp4. + # we supports some variables to generate the filename. + # [vhost], the vhost of stream. + # [app], the app of stream. + # [stream], the stream name of stream. + # [2006], replace this const to current year. + # [01], replace this const to current month. + # [02], replace this const to current date. + # [15], replace this const to current hour. + # [04], replace this const to current minute. + # [05], replace this const to current second. + # [999], replace this const to current millisecond. + # [timestamp],replace this const to current UNIX timestamp in ms. + # @remark we use golang time format "2006-01-02 15:04:05.999" as "[2006]-[01]-[02]_[15].[04].[05]_[999]" + # for example, for url rtmp://ossrs.net/live/livestream and time 2015-01-03 10:57:30.776 + # 1. No variables, the rule of SRS1.0(auto add [stream].[timestamp].flv as filename): + # dvr_path ./objs/nginx/html; + # => + # dvr_path ./objs/nginx/html/live/livestream.1420254068776.flv; + # 2. Use stream and date as dir name, time as filename: + # dvr_path /data/[vhost]/[app]/[stream]/[2006]/[01]/[02]/[15].[04].[05].[999].flv; + # => + # dvr_path /data/ossrs.net/live/livestream/2015/01/03/10.57.30.776.flv; + # 3. Use stream and year/month as dir name, date and time as filename: + # dvr_path /data/[vhost]/[app]/[stream]/[2006]/[01]/[02]-[15].[04].[05].[999].flv; + # => + # dvr_path /data/ossrs.net/live/livestream/2015/01/03-10.57.30.776.flv; + # 4. Use vhost/app and year/month as dir name, stream/date/time as filename: + # dvr_path /data/[vhost]/[app]/[2006]/[01]/[stream]-[02]-[15].[04].[05].[999].flv; + # => + # dvr_path /data/ossrs.net/live/2015/01/livestream-03-10.57.30.776.flv; + # 5. DVR to mp4: + # dvr_path ./objs/nginx/html/[app]/[stream].[timestamp].mp4; + # => + # dvr_path ./objs/nginx/html/live/livestream.1420254068776.mp4; + # @see https://ossrs.io/lts/en-us/docs/v4/doc/dvr#custom-path + # @see https://ossrs.io/lts/en-us/docs/v4/doc/dvr#custom-path + # segment,session apply it. + # default: ./objs/nginx/html/[app]/[stream].[timestamp].flv + dvr_path ./objs/nginx/html/[app]/[stream].[timestamp].flv; + # the duration for dvr file, reap if exceed, in seconds. + # segment apply it. + # session,append ignore. + # default: 30 + dvr_duration 30; + # whether wait keyframe to reap segment, + # if off, reap segment when duration exceed the dvr_duration, + # if on, reap segment when duration exceed and got keyframe. + # segment apply it. + # session,append ignore. + # default: on + dvr_wait_keyframe on; + # about the stream monotonically increasing: + # 1. video timestamp is monotonically increasing, + # 2. audio timestamp is monotonically increasing, + # 3. video and audio timestamp is interleaved monotonically increasing. + # it's specified by RTMP specification, @see 3. Byte Order, Alignment, and Time Format + # however, some encoder cannot provides this feature, please set this to off to ignore time jitter. + # the time jitter algorithm: + # 1. full, to ensure stream start at zero, and ensure stream monotonically increasing. + # 2. zero, only ensure stream start at zero, ignore timestamp jitter. + # 3. off, disable the time jitter algorithm, like atc. + # apply for all dvr plan. + # default: full + time_jitter full; + + # on_dvr, never config in here, should config in http_hooks. + # for the dvr http callback, @see http_hooks.on_dvr of vhost hooks.callback.srs.com + # @see https://ossrs.io/lts/en-us/docs/v4/doc/dvr#http-callback + # @see https://ossrs.io/lts/en-us/docs/v4/doc/dvr#http-callback + } +} +``` + +The plan of DVR used to reap flv file: + +* session: When start publish, open flv file, close file when unpublish. +* segment: Reap flv file by the dvr_duration and dvr_wait_keyframe. +* time_jitter: The time jitter algorithm to use. +* dvr_path: The path of dvr, the rules is specified at below. + +The config file can also use `conf/dvr.segment.conf` or `conf/dvr.session.conf`. + +## Apply + +The dvr apply is a filter which enable or disable the dvr of specified stream. +This feature is similar to nginx control module, but stronger than nginx. +User can use [http raw api](./http-api.md) to control when to dvr specified stream. +Please read [351](https://github.com/ossrs/srs/issues/459#issuecomment-134983742). + +The following exmaple dvr `live/stream1`和`live/stream2`, the config: +``` +vhost xxx { + dvr { + dvr_apply live/stream1 live/stream2; + } +} +``` + +About the RAW API to control DVR, read [319](https://github.com/ossrs/srs/issues/319) and [wiki](./http-api.md#raw-dvr). + +## Custom Path + +We can custom the dvr path(dir and filename) by rules: + +* Use date and time and stream info as dir name, to avoid too many files in a dir. +* Use date and time and stream info as filename, for better search. +* Provides the data/time and stream info variables, use brackets to identify them. +* Keep SRS1.0 rule, supports write to a specified dir and uses timestamp as filename. If no filename specified(dir specified only), use `[stream].[timestamp].flv` as filename to compatible with SRS1.0 rule. + +About the data and time variable, refer to go time format string, for example, use an actual year 2006 instead YYYY, it's a good design: + +``` +2006-01-02 15:04:05.999 +``` + +The variables of dvr: + +1. Year, [2006], replace this const to current year. +1. Month, [01], replace this const to current month. +1. Date, [02], replace this const to current date. +1. Hour, [15], replace this const to current hour. +1. Minute, [04], repleace this const to current minute. +1. Second, [05], repleace this const to current second. +1. Millisecond, [999], repleace this const to current millisecond. +1. Timestamp, [timestamp],replace this const to current UNIX timestamp in ms. +1. Stream info, refer to transcode output, variables are [vhost], [app], [stream] + +For example, for url `rtmp://ossrs.net/live/livestream` and time `2015-01-03 10:57:30.776`: + +1. No variables, the rule of SRS1.0(auto add `[stream].[timestamp].flv` as filename): + * dvr_path ./objs/nginx/html; + * => + * dvr_path ./objs/nginx/html/live/livestream.1420254068776.flv; + +1. Use stream and date as dir name, time as filename: + * dvr_path /data/[vhost]/[app]/[stream]/[2006]/[01]/[02]/[15].[04].[05].[999].flv; + * => + * dvr_path /data/ossrs.net/live/livestream/2015/01/03/10.57.30.776.flv; + +1. Use stream and year/month as dir name, date and time as filename: + * dvr_path /data/[vhost]/[app]/[stream]/[2006]/[01]/[02]-[15].[04].[05].[999].flv; + * => + * dvr_path /data/ossrs.net/live/livestream/2015/01/03-10.57.30.776.flv; + +1. Use vhost/app and year/month as dir name, stream/date/time as filename: + * dvr_path /data/[vhost]/[app]/[2006]/[01]/[stream]-[02]-[15].[04].[05].[999].flv; + * => + * dvr_path /data/ossrs.net/live/2015/01/livestream-03-10.57.30.776.flv; + +1. Use app as dirname, stream and timestamp as filename(the SRS1.0 rule): + * dvr_path /data/[app]/[stream].[timestamp].flv; + * => + * dvr_path /data/live/livestream.1420254068776.flv; + +## Http Callback + +Enable the `on_dvr` of `http_hooks`: + +``` +vhost your_vhost { + dvr { + enabled on; + dvr_path ./objs/nginx/html/[app]/[stream]/[2006]/[01]/[02]/[15].[04].[05].[999].flv; + dvr_plan segment; + dvr_duration 30; + dvr_wait_keyframe on; + } + http_hooks { + enabled on; + on_dvr http://127.0.0.1:8085/api/v1/dvrs; + } +} +``` + +The log of api-server for api dvrs: + +``` +[2015-01-03 15:25:48][trace] post to dvrs, req={"action":"on_dvr","client_id":108,"ip":"127.0.0.1","vhost":"__defaultVhost__","app":"live","stream":"livestream","cwd":"/home/winlin/git/srs/trunk","file":"./objs/nginx/html/live/livestream/2015/1/3/15.25.18.442.flv"} +[2015-01-03 15:25:48][trace] srs on_dvr: client id=108, ip=127.0.0.1, vhost=__defaultVhost__, app=live, stream=livestream, cwd=/home/winlin/git/srs/trunk, file=./objs/nginx/html/live/livestream/2015/1/3/15.25.18.442.flv +127.0.0.1 - - [03/Jan/2015:15:25:48] "POST /api/v1/dvrs HTTP/1.1" 200 1 "" "SRS(Simple RTMP Server)2.0.88" +``` + +For more information, read about [HttpCallback](./http-callback.md) + +## Bug + +The bugs of dvr: + +* The dir and filename rules: [#179](https://github.com/ossrs/srs/issues/179) +* The http callback for dvr: [#274](https://github.com/ossrs/srs/issues/274) +* The MP4 format support: [#738](https://github.com/ossrs/srs/issues/738) +* How to DVR multiple segments to a file? Read [#776](https://github.com/ossrs/srs/pull/776). + +## Reload + +The changing of dvr and reload will restart the dvr, that is, to close current dvr file then apply new config. + +Winlin 2015.1 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/dvr) + + diff --git a/versioned_docs/version-6.0/doc/edge.md b/versioned_docs/version-6.0/doc/edge.md new file mode 100644 index 00000000..fbf38912 --- /dev/null +++ b/versioned_docs/version-6.0/doc/edge.md @@ -0,0 +1,151 @@ +--- +title: Edge Cluster +sidebar_label: Edge Cluster +hide_title: false +hide_table_of_contents: false +--- + +# Edge Server + +SRS edge dedicates to support huge players for a small set of streams. + +![](/img/doc-main-concepts-edge-001.png) + +Note: The edge server need to serve many clients, the SRS performance is ok. + +Use Scenarios for Edge: +* CDN/VDN RTMP cluster, for many clients to upload(publish) or download(play). +* Small cluster, but many clients to publish. Forward is not ok for all stream is forwarded, +while edge is ok for it only fetch when user play the specified stream. +* The BGP server is costly, while the edge is cheap. Use multiple levels edge +to ensure the BGP server low bandwidth. + +Note: Edge can fetch stream from or push stream to origin. When user play +a stream on edge, edge will fetch from origin. When user publish stream to +edge, edge will push to origin. + +Note: Always use Edge, except you actually know the forward. The forward will +always forward stream to multiple servers; while the edge only fetch or push +stream to a server and switch to next when error. + +## Concepts + +When a vhost set mode to remote, the vhost in server is edge. +When a vhost set mode to local, the vhost in server is origin. +Edge is used to cache the stream of origin. + +When user publish stream to the edge server, edge will forward the stream +to origin. For example, the origin server is in beijing, a user at shanghai needs +to pubish stream to origin server, we can add a edge server at shanghai, when +user publish stream to shanghai edge server, the edge server will forward stream to +beijing. + +When user play the stream on edge, edge will fetch from origin when it has not +cache it yet. When edge already cached the stream, edge will directly delivery +stream to client. That is, when many clients connect to edge, there is only one +connection to origin for each stream. This is the CDN(content delivery network). +For example, the origin server is at beijing, there are 320 edge servers on other +provience, each edge server serves 2000 clients. There are 640,000 users play this +stream, and the bandwidth of CDN consumed 640Gbps; the origin server only serves 320 +connections from all edge servers. + +The edge server is design for huge cluster. Futhermore, the SRS edge can config with +multiple origin servers, SRS will switch to next when current origin server crash, and +the end user never disconnect when edge switch origin server. + +## Config + +Config the edge in vhost: + +```bash +vhost __defaultVhost__ { + # The config for cluster. + cluster { + # The cluster mode, local or remote. + # local: It's an origin server, serve streams itself. + # remote: It's an edge server, fetch or push stream to origin server. + # default: local + mode remote; + + # For edge(mode remote), user must specifies the origin server + # format as: [:port] + # @remark user can specifies multiple origin for error backup, by space, + # for example, 192.168.1.100:1935 192.168.1.101:1935 192.168.1.102:1935 + origin 127.0.0.1:1935 localhost:1935; + + # For edge(mode remote), whether open the token traverse mode, + # if token traverse on, all connections of edge will forward to origin to check(auth), + # it's very important for the edge to do the token auth. + # the better way is use http callback to do the token auth by the edge, + # but if user prefer origin check(auth), the token_traverse if better solution. + # default: off + token_traverse off; + + # For edge(mode remote), the vhost to transform for edge, + # to fetch from the specified vhost at origin, + # if not specified, use the current vhost of edge in origin, the variable [vhost]. + # default: [vhost] + vhost same.edge.srs.com; + + # For edge(mode remote), when upnode(forward to, edge push to, edge pull from) is srs, + # it's strongly recommend to open the debug_srs_upnode, + # when connect to upnode, it will take the debug info, + # for example, the id, source id, pid. + # please see https://ossrs.io/lts/en-us/docs/v4/doc/log + # default: on + debug_srs_upnode on; + } +} +``` + +The origin can specifies multiple servers. + +## Example + +The example below specifies how to config a origin and edge. + +The config of origin, see `origin.conf`: + +```bash +listen 19350; +pid objs/origin.pid; +srs_log_file ./objs/origin.log; +vhost __defaultVhost__ { +} +``` + +The config of edge, see `edge.conf`: + +```bash +listen 1935; +pid objs/edge.pid; +srs_log_file ./objs/edge.log; +vhost __defaultVhost__ { + cluster { + mode remote; + origin 127.0.0.1:19350; + } +} +``` + +## HLS Edge + +The edge is for RTMP, that is, when publish stream to origin, only origin server output +the HLS, all edge server never output HLS util client access the RTMP stream on edge. + +That is, never config HLS on edge server, it's no use. The HLS delivery must use squid or +traffic server to cache the HTTP origin server. + +## Transform Vhost + +The design of CDN stream system, always use `up.xxxx` and `down.xxxx` to operate them, for example, user publish to cdn by host `up.srs.com` and play by `down.srs.com`. + +SRS can config the edge mode to transform the host to origin, use the config `vhost down.srs.com` for up edge server. + +For more information, read the config of edge server. + +Winlin 2015.4 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/edge) + + diff --git a/versioned_docs/version-6.0/doc/exporter.md b/versioned_docs/version-6.0/doc/exporter.md new file mode 100644 index 00000000..884f9a98 --- /dev/null +++ b/versioned_docs/version-6.0/doc/exporter.md @@ -0,0 +1,177 @@ +--- +title: Prometheus Exporter +sidebar_label: Exporter +hide_title: false +hide_table_of_contents: false +--- + +# Prometheus Exporter + +The observability of SRS is about metrics(Prometheus Exporter), tracing(APM) and logging(Cloud Logging). + +## Introduction + +For detail specs, please read [OpenTelemetry](https://opentelemetry.io/docs/concepts/observability-primer). + +![](/img/doc-2022-10-30-001.png) + +> Note: Please see [Metrics, tracing, and logging](https://peter.bourgon.org/blog/2017/02/21/metrics-tracing-and-logging.html) + +The architecture for Prometheus exporter: + +``` ++-----+ +-----------+ +---------+ +| SRS +--Exporter-->--| Promethus +-->--+ Grafana + ++-----+ (HTTP) +-----------+ +---------+ +``` + +There is special config for exporter. + +## Config + +The config for exporter is bellow. Highly recommend using environment variables to enable it: + +```bash +# Prometheus exporter config. +# See https://prometheus.io/docs/instrumenting/exporters +exporter { + # Whether exporter is enabled. + # Overwrite by env SRS_EXPORTER_ENABLED + # Default: off + enabled off; + # The http api listen port for exporter metrics. + # Overwrite by env SRS_EXPORTER_LISTEN + # Default: 9972 + # See https://github.com/prometheus/prometheus/wiki/Default-port-allocations + listen 9972; + # The logging label to category the cluster servers. + # Overwrite by env SRS_EXPORTER_LABEL + label cn-beijing; + # The logging tag to category the cluster servers. + # Overwrite by env SRS_EXPORTER_TAG + tag cn-edge; +} +``` + +Let's start SRS exporter to export metrics to Prometheus. + +## Usage for SRS Exporter + +Build and start `SRS 5.0.86+`: + +```bash +./configure && make +env SRS_ENV_ONLY=on SRS_EXPORTER_ENABLED=on SRS_LISTEN=1935 \ + ./objs/srs -e +``` + +> Note: We use envrionment variables to config SRS, without config file. However, you're able to use config file `conf/prometheus.conf` to start the demo. + +> Note: Please open [http://localhost:9972/metrics](http://localhost:9972/metrics) to verify SRS. + +Then, use FFmpeg to push a live stream to SRS: + +```bash +docker run --rm -it ossrs/srs:encoder ffmpeg -re -i doc/source.flv -c copy \ + -f flv rtmp://host.docker.internal/live/livestream +``` + +Next, run [node_exporter](https://github.com/prometheus/node_exporter) to collect the node data: + +```bash +docker run --rm -p 9100:9100 prom/node-exporter +``` + +> Note: Highly recommend downloading from [here](https://github.com/prometheus/node_exporter/releases) and startting by binary file. + +> Note: Please open [http://localhost:9100/metrics](http://localhost:9100/metrics) to verify it. + +Finally, create a `prometheus.yml` for prometheus: + +```yml +scrape_configs: + - job_name: "node" + metrics_path: "/metrics" + scrape_interval: 5s + static_configs: + - targets: ["host.docker.internal:9100"] + - job_name: "srs" + metrics_path: "/metrics" + scrape_interval: 5s + static_configs: + - targets: ["host.docker.internal:9972"] +``` + +> Note: We set the `scrape_interval` to `5s`, which is default to `1m` or one minute. + +Start Prometheus by: + +```bash +docker run --rm -v $(pwd)/prometheus.yml:/etc/prometheus/prometheus.yml \ + -p 9090:9090 prom/prometheus +``` + +Please ope [Prometheus: Targets](http://localhost:9090/targets), or [Prometheus: Graph](http://localhost:9090/graph) to query the input bitrate: + +```sql +rate(srs_receive_bytes_total[10s])*8 +``` + +This query is used to query the input bitrate, which is the bitrate of stream: + +![](/img/doc-2022-10-30-002.png) + +Normally we use Grafana to render the graph. + +## Usage for Grafana + +First, start Grafana in docker: + +```bash +docker run --rm -it -p 3000:3000 \ + -e GF_SECURITY_ADMIN_USER=admin \ + -e GF_SECURITY_ADMIN_PASSWORD=12345678 \ + -e GF_USERS_DEFAULT_THEME=light \ + grafana/grafana +``` + +Please access Grafana console by [http://localhost:3000/](http://localhost:3000/) + +> Note: Please input username `admin` and password `12345678` then click login. + +Run command to [add](https://grafana.com/docs/grafana/latest/developers/http_api/data_source/#create-a-data-source) a Prometheus DataSource: + +```bash +curl -s -H "Content-Type: application/json" \ + -XPOST http://admin:12345678@localhost:3000/api/datasources \ + -d '{ + "name": "prometheus", + "type": "prometheus", + "access": "proxy", "isDefault": true, + "url": "http://host.docker.internal:9090" +}' +``` + +Run command to [import](https://grafana.com/docs/grafana/latest/developers/http_api/dashboard/#create--update-dashboard) the HelloWorld dashboard: + +```bash +data=$(curl https://raw.githubusercontent.com/ossrs/srs-grafana/main/dashboards/helloworld-import.json 2>/dev/null) +curl -s -H "Content-Type: application/json" \ + -XPOST http://admin:12345678@localhost:3000/api/dashboards/db \ + --data-binary "{\"dashboard\":${data},\"overwrite\":true,\"inputs\":[],\"folderId\":0}" +``` + +> Note: For other dashboards, please see [srs-grafana](https://github.com/ossrs/srs-grafana/tree/main/dashboards). + +Then open [Dashboards](http://localhost:3000/dashboards) in browser, you will see the imported dashboard: + +![](/img/doc-2022-10-30-003.png) + +There are more other dashboards, please get them in [srs-grafana](https://github.com/ossrs/srs-grafana/tree/main/dashboards). + +![](/img/doc-2022-10-30-004.png) + +Any patch is welcome. + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/exporter) + diff --git a/versioned_docs/version-6.0/doc/ffmpeg.md b/versioned_docs/version-6.0/doc/ffmpeg.md new file mode 100644 index 00000000..2cd84f23 --- /dev/null +++ b/versioned_docs/version-6.0/doc/ffmpeg.md @@ -0,0 +1,368 @@ +--- +title: FFMPEG +sidebar_label: FFMPEG +hide_title: false +hide_table_of_contents: false +--- + +# Live Streaming Transcode + +SRS can transcode RTMP streams and output to any RTMP server, typically itself. + +## Use Scenario + +The important use scenario of FFMPEG: +* One in N out: Publish a high resolution video with big bitrate, for intance, h.264 5Mbps 1080p. Then use FFMPEG to transcode to multiple bitrates, for example, 1080p/720p/576p, the 576p is for mobile devices. +* Support multiple screen: The stream published by flash is in h264/vp6/mp3/speex codec. Use FFMPEG to transcode to HLS(h264+aac) for IOS/Android. +* Stream filters: For example, add logo to stream. SRS supports all filters from FFMPEG. +* Snapshot: Please read [snapshot by transcoder](./snapshot.md#transcoder) + +## Workflow + +The workflow of SRS transcoding: + +1. Encoder publishes RTMP to SRS. +1. SRS forks a process for FFMPEG when transcoding is configured. +1. The forked FFMPEG transcodes the stream and publishes it to SRS or other servers. + +## Transcode Config + +The SRS transcoding feature can apply on vhost, app or a specified stream. + +```bash +listen 1935; +vhost __defaultVhost__ { + # the streaming transcode configs. + transcode { + # whether the transcode enabled. + # if off, donot transcode. + # default: off. + enabled on; + # the ffmpeg + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + # the transcode engine for matched stream. + # all matched stream will transcoded to the following stream. + # the transcode set name(ie. hd) is optional and not used. + engine example { + # whether the engine is enabled + # default: off. + enabled on; + # input format, can be: + # off, do not specifies the format, ffmpeg will guess it. + # flv, for flv or RTMP stream. + # other format, for example, mp4/aac whatever. + # default: flv + iformat flv; + # ffmpeg filters, follows the main input. + vfilter { + # the logo input file. + i ./doc/ffmpeg-logo.png; + # the ffmpeg complex filter. + # for filters, @see: http://ffmpeg.org/ffmpeg-filters.html + filter_complex 'overlay=10:10'; + } + # video encoder name. can be: + # libx264: use h.264(libx264) video encoder. + # png: use png to snapshot thumbnail. + # copy: donot encoder the video stream, copy it. + # vn: disable video output. + vcodec libx264; + # video bitrate, in kbps + # @remark 0 to use source video bitrate. + # default: 0 + vbitrate 1500; + # video framerate. + # @remark 0 to use source video fps. + # default: 0 + vfps 25; + # video width, must be even numbers. + # @remark 0 to use source video width. + # default: 0 + vwidth 768; + # video height, must be even numbers. + # @remark 0 to use source video height. + # default: 0 + vheight 320; + # the max threads for ffmpeg to used. + # default: 1 + vthreads 12; + # x264 profile, @see x264 -help, can be: + # high,main,baseline + vprofile main; + # x264 preset, @see x264 -help, can be: + # ultrafast,superfast,veryfast,faster,fast + # medium,slow,slower,veryslow,placebo + vpreset medium; + # other x264 or ffmpeg video params + vparams { + # ffmpeg options, @see: http://ffmpeg.org/ffmpeg.html + t 100; + # 264 params, @see: http://ffmpeg.org/ffmpeg-codecs.html#libx264 + coder 1; + b_strategy 2; + bf 3; + refs 10; + } + # audio encoder name. can be: + # libfdk_aac: use aac(libfdk_aac) audio encoder. + # copy: donot encoder the audio stream, copy it. + # an: disable audio output. + acodec libfdk_aac; + # audio bitrate, in kbps. [16, 72] for libfdk_aac. + # @remark 0 to use source audio bitrate. + # default: 0 + abitrate 70; + # audio sample rate. for flv/rtmp, it must be: + # 44100,22050,11025,5512 + # @remark 0 to use source audio sample rate. + # default: 0 + asample_rate 44100; + # audio channel, 1 for mono, 2 for stereo. + # @remark 0 to use source audio channels. + # default: 0 + achannels 2; + # other ffmpeg audio params + aparams { + # audio params, @see: http://ffmpeg.org/ffmpeg-codecs.html#Audio-Encoders + # @remark SRS supported aac profile for HLS is: aac_low, aac_he, aac_he_v2 + profile:a aac_low; + bsf:a aac_adtstoasc; + } + # output format, can be: + # off, do not specifies the format, ffmpeg will guess it. + # flv, for flv or RTMP stream. + # image2, for vcodec png to snapshot thumbnail. + # other format, for example, mp4/aac whatever. + # default: flv + oformat flv; + # output stream. variables: + # [vhost] the input stream vhost. + # [port] the intput stream port. + # [app] the input stream app. + # [stream] the input stream name. + # [engine] the tanscode engine name. + output rtmp://127.0.0.1:[port]/[app]?vhost=[vhost]/[stream]_[engine]; + } + } +} +``` + +The configuration applies to all streams of this vhost, for example: +* Publish stream to: rtmp://dev:1935/live/livestream +* Play the origin stream: rtmp://dev:1935/live/livestream +* Play the transcoded stream: rtmp://dev:1935/live/livestream_ff + +The output URL contains some variables: +* [vhost] The input stream vhost, for instance, dev.ossrs.net +* [port] The input stream port, for instance, 1935 +* [app] The input stream app, for instance, live +* [stream] The input stream name, for instance, livestream +* [engine] The transcode engine name, which follows the keyword engine, for instance, ff + +Add the app or app/stream when you need to apply transcoding to it: + +```bash +listen 1935; +vhost __defaultVhost__ { + # Transcode all streams of app "live" + transcode live { + } +} +``` + +Or for streams: + +```bash +listen 1935; +vhost __defaultVhost__ { + # Transcode stream name is "livestream" and app is "live" + transcode live/livestream{ + } +} +``` + +## Transcode Rulers + +All params of SRS transcode are for FFMPEG, and SRS renames some parameters: + +| SRS | FFMPEG | Exammple | Description | +| ------ | --------- | ---- | ----- | +| vcodec | vcodec | ffmpeg ... -vcodec libx264 ... | The codec to use. | +| vbitrate | b:v | ffmpeg ... -b:v 500000 ... | The bitrate in kbps (for SRS) or bps (for FFMPEG) at which to output the transcoded stream. | +| vfps | r | ffmpeg ... -r 25 ... | The output framerate. | +| vwidth/vheight | s | ffmpeg ... -s 400x300 -aspect 400:300 ... | The output video size, the width x height and the aspect set to width:height. | +| vthreads | threads | ffmpeg ... -threads 8 ... | The number of encoding threads for x264. | +| vprofile | profile:v | ffmpeg ... -profile:v high ... | The profile for x264. | +| vpreset | preset | ffmpeg ... -preset medium ... | The preset for x264. | +| acodec | acodec | ffmpeg ... -acodec libfdk_aac ... | The codec for audio. | +| abitrate | b:a | ffmpeg ... -b:a 70000 ... | The bitrate in kbps (for SRS) and bps (for FFMPEG) for output audio. For libaacplus:16-72k. No limit for libfdk_aac. | +| asample_rate | ar | ffmpeg ... -ar 44100 ... | The audio sample rate. | +| achannels | ac | ffmpeg ... -ac 2 ... | The audio channel. | + +There are more parameters for SRS: +* vfilter:Parameters added before the vcodec, for the FFMPEG filters. +* vparams:Parameters added after the vcodec, for the video transcode parameters. +* aparams:Parameters added after the acodec and before the -y, for the audio transcode parameters. + +These parameters will generated by the sequence: + +```bash +ffmpeg -f flv -i {vfilter} -vcodec ... {vparams} -acodec ... {aparams} -f flv -y {output} +``` + +The actual parameters used to fork FFMPEG can be found in the log by the keywords `start transcoder`: + +```bash +[2014-02-28 21:38:09.603][4][trace][start] start transcoder, +log: ./objs/logs/encoder-__defaultVhost__-live-livestream.log, +params: ./objs/ffmpeg/bin/ffmpeg -f flv -i +rtmp://127.0.0.1:1935/live?vhost=__defaultVhost__/livestream +-vcodec libx264 -b:v 500000 -r 25.00 -s 768x320 -aspect 768:320 +-threads 12 -profile:v main -preset medium -acodec libfdk_aac +-b:a 70000 -ar 44100 -ac 2 -f flv +-y rtmp://127.0.0.1:1935/live?vhost=__defaultVhost__/livestream_ff +``` + +## FFMPEG Log Path + +When an FFMPEG process is forked, SRS will redirect the stdout and stderr to the log file, for instance, `./objs/logs/encoder-__defaultVhost__-live-livestream.log`. Sometimes the log file is very large, so users can add parameters to vfilter to tell FFMPEG to generate less verbose logs: + +```bash +listen 1935; +vhost __defaultVhost__ { + transcode { + enabled on; + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + engine ff { + enabled on; + vfilter { + # -v quiet + v quiet; + } + vcodec libx264; + vbitrate 500; + vfps 25; + vwidth 768; + vheight 320; + vthreads 12; + vprofile main; + vpreset medium; + vparams { + } + acodec libfdk_aac; + abitrate 70; + asample_rate 44100; + achannels 2; + aparams { + } + output rtmp://127.0.0.1:[port]/[app]?vhost=[vhost]/[stream]_[engine]; + } + } +} +``` + +That is, add the parameter `-v quiet` to FFMPEG. + +## Copy Without Transcode + +Set the vcodec/acodec to copy, FFMPEG will demux and mux without transcoding, like the forward of SRS. Users can copy video and transcode audio, for example, when flash is publishing the stream with h264+speex. + +```bash +listen 1935; +vhost __defaultVhost__ { + transcode { + enabled on; + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + engine ff { + enabled on; + vcodec copy; + acodec libfdk_aac; + abitrate 70; + asample_rate 44100; + achannels 2; + aparams { + } + output rtmp://127.0.0.1:[port]/[app]?vhost=[vhost]/[stream]_[engine]; + } + } +} +``` + +Or, copy video and audio: +```bash +listen 1935; +vhost __defaultVhost__ { + transcode { + enabled on; + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + engine ff { + enabled on; + vcodec copy; + acodec copy; + output rtmp://127.0.0.1:[port]/[app]?vhost=[vhost]/[stream]_[engine]; + } + } +} +``` + +## Drop Video or Audio + +FFMPEG can drop video or audio streams by configuring vcodec to vn and acodec to an. For example: + +```bash +listen 1935; +vhost __defaultVhost__ { + transcode { + enabled on; + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + engine vn { + enabled on; + vcodec vn; + acodec libfdk_aac; + abitrate 45; + asample_rate 44100; + achannels 2; + aparams { + } + output rtmp://127.0.0.1:[port]/[app]?vhost=[vhost]/[stream]_[engine]; + } + } +} +``` + +The configuration above will output pure audio in the aac codec. + +## Other Transcoding Configuration + +There are lots of vhost in conf/full.conf for transcoding, or refer to FFMPEG: +* mirror.transcode.srs.com +* drawtext.transcode.srs.com +* crop.transcode.srs.com +* logo.transcode.srs.com +* audio.transcode.srs.com +* copy.transcode.srs.com +* all.transcode.srs.com +* ffempty.transcode.srs.com +* app.transcode.srs.com +* stream.transcode.srs.com +* vn.transcode.srs.com + +## FFMPEG Transcoding Streams by Flash Encoder + +Flash web pages can encode and publish RTMP streams to the server, and the audio codec must be speex, nellymoser or pcma/pcmu. + +Flash will disable audio when no audio is published, so FFMPEG may cannot discover the audio in the stream and will disable the audio. + +## FFMPEG + +FFMPEG links: +* [ffmpeg.org](http://ffmpeg.org) +* [ffmpeg CLI](http://ffmpeg.org/ffmpeg.html) +* [ffmpeg filters](http://ffmpeg.org/ffmpeg-filters.html) +* [ffmpeg codecs](http://ffmpeg.org/ffmpeg-codecs.html) + +Winlin 2015.6 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/ffmpeg) + + diff --git a/versioned_docs/version-6.0/doc/flv-vod-stream.md b/versioned_docs/version-6.0/doc/flv-vod-stream.md new file mode 100644 index 00000000..f3e541c5 --- /dev/null +++ b/versioned_docs/version-6.0/doc/flv-vod-stream.md @@ -0,0 +1,51 @@ +--- +title: FLV Vod Streaming +sidebar_label: FLV Vod Streaming +hide_title: false +hide_table_of_contents: false +--- + +# FLV vod streaming + +## HTTP VOD + +I recomment: + +* Vod stream should always use HTTP protocol, never use RTMP. +SRS can dvr RTMP live stream to flv file, and provides some tools for vod stream, +but user should use other HTTP server to delivery flv file as vod stream. +* In a word, SRS does not support vod, only support live. + +The workflow of flv vod stream: + +* SRS dvr live stream to flv file, or upload flv vod file, to the HTTP root dir: `objs/nginx/html` +* HTTP server must support flv?start=offset, for example, flv module of nginx, or use experiment SRS HTTP server. +* Use `research/librtmp/objs/srs_flv_injecter` inject the keyframe offset to metadata of flv. +* Flash player play http flv url, for instance, `http://192.168.1.170:8080/sample.flv` +* When user seek, for instance, seek to 300s. +* Player use the keyframe offset in metadata to calc the offset of 300s, for instance, 300s offset=`6638860` +* Start new request, url is `http://192.168.1.170:8080/sample.flv?start=6638860` + +Note: SRS HTTP server is experiment, do not limit the bandwidth. +Note: SRS provides flv view tool `research/librtmp/objs/srs_flv_parser`, to list the seconds:offsets in metadata. + +## SRS Embeded HTTP server + +SRS supports http-api, so SRS can also parse HTTP protocol(partial HTTP right now), +so SRS also implements a experiment HTTP server. + +SRS HTTP server is rewrite, table and partial HTTP protocol support, +ok for online service. + +For some emebeded device, for instance, arm linux, user can use SRS HTTP server, +for arm is not easy to build some server. + +## Config + +Read [HTTP Server](./http-server.md#config) + +Winlin 2015.1 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/flv-vod-stream) + + diff --git a/versioned_docs/version-6.0/doc/flv.md b/versioned_docs/version-6.0/doc/flv.md new file mode 100644 index 00000000..bdf786dd --- /dev/null +++ b/versioned_docs/version-6.0/doc/flv.md @@ -0,0 +1,213 @@ +--- +title: HTTP-FLV +sidebar_label: HTTP-FLV +hide_title: false +hide_table_of_contents: false +--- + +# HTTP-FLV + +HTTP-FLV is a live streaming protocol, sometimes simply called FLV, which is used to transmit live streams in FLV +format over an HTTP connection. + +Unlike file downloads, live streams have an indefinite or uncertain length, so they are usually implemented using +the HTTP Chunked protocol. Similar to HTTP-FLV, there are also HTTP-TS and HTTP-MP3. TS is mainly used in broadcasting +and television, while MP3 is mainly used in the audio field. + +Different from HLS, which is essentially an HTTP file download, HTTP-FLV is a streaming protocol. CDN support for +HTTP file downloads is well-developed, making HLS more compatible than HTTP-FLV. However, HTTP-FLV has lower latency +than HLS, typically achieving a delay of around 3 to 5 seconds, while HLS latency is generally 8 to 10 seconds or more. + +In terms of protocol implementation, RTMP and HTTP-FLV are very similar. RTMP is based on the TCP protocol, and +HTTP-FLV is based on HTTP, which is also a TCP protocol. Therefore, their characteristics are very similar. RTMP +is generally used for streaming and live production because most live production devices support RTMP. For playback +and consumption, HTTP-FLV or HLS is used because playback devices have better support for HTTP. + +HTTP-FLV is highly compatible, supported by almost all platforms and browsers except for the native iOS browser. +You can refer to [MSE](https://caniuse.com/?search=mse) for more information. To support the iOS browser, you can +consider using HLS or WASM. Note that for native iOS apps, the ijkplayer can be used as a playback option. + +## Usage + +SRS supports HTTP-FLV distribution, you can use [docker](./getting-started.md) or [build from source](./getting-started-build.md): + +```bash +docker run --rm -it -p 1935:1935 -p 8080:8080 ossrs/srs:5 \ + ./objs/srs -c conf/http.flv.live.conf +``` + +Use [FFmpeg(click to download)](https://ffmpeg.org/download.html) or [OBS(click to download)](https://obsproject.com/download) to push the stream: + +```bash +ffmpeg -re -i ./doc/source.flv -c copy -f flv rtmp://localhost/live/livestream +``` + +Open the following page to play the stream (if SRS is not on your local machine, please replace localhost with the server IP): + +* HLS by SRS player: [http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html) + +## Config + +The configuration for HTTP-FLV is as follows: + +```bash +http_server { + # whether http streaming service is enabled. + # Overwrite by env SRS_HTTP_SERVER_ENABLED + # default: off + enabled on; + # the http streaming listen entry is <[ip:]port> + # for example, 192.168.1.100:8080 + # where the ip is optional, default to 0.0.0.0, that is 8080 equals to 0.0.0.0:8080 + # @remark, if use lower port, for instance 80, user must start srs by root. + # Overwrite by env SRS_HTTP_SERVER_LISTEN + # default: 8080 + listen 8080; + # whether enable crossdomain request. + # for both http static and stream server and apply on all vhosts. + # Overwrite by env SRS_HTTP_SERVER_CROSSDOMAIN + # default: on + crossdomain on; +} +vhost __defaultVhost__ { + # http flv/mp3/aac/ts stream vhost specified config + http_remux { + # whether enable the http live streaming service for vhost. + # Overwrite by env SRS_VHOST_HTTP_REMUX_ENABLED for all vhosts. + # default: off + enabled on; + # the fast cache for audio stream(mp3/aac), + # to cache more audio and send to client in a time to make android(weixin) happy. + # @remark the flv/ts stream ignore it + # @remark 0 to disable fast cache for http audio stream. + # Overwrite by env SRS_VHOST_HTTP_REMUX_FAST_CACHE for all vhosts. + # default: 0 + fast_cache 30; + # Whether drop packet if not match header. For example, there is has_audio and has video flag in FLV header, if + # this is set to on and has_audio is false, then SRS will drop audio packets when got audio packets. Generally + # it should work, but sometimes you might need SRS to keep packets even when FLV header is set to false. + # See https://github.com/ossrs/srs/issues/939#issuecomment-1348740526 + # TODO: Only support HTTP-FLV stream right now. + # Overwrite by env SRS_VHOST_HTTP_REMUX_DROP_IF_NOT_MATCH for all vhosts. + # Default: on + drop_if_not_match on; + # Whether stream has audio track, used as default value for stream metadata, for example, FLV header contains + # this flag. Sometimes you might want to force the metadata by disable guess_has_av. + # For HTTP-FLV, use this as default value for FLV header audio flag. See https://github.com/ossrs/srs/issues/939#issuecomment-1351385460 + # For HTTP-TS, use this as default value for PMT table. See https://github.com/ossrs/srs/issues/939#issuecomment-1365086204 + # Overwrite by env SRS_VHOST_HTTP_REMUX_HAS_AUDIO for all vhosts. + # Default: on + has_audio on; + # Whether stream has video track, used as default value for stream metadata, for example, FLV header contains + # this flag. Sometimes you might want to force the metadata by disable guess_has_av. + # For HTTP-FLV, use this as default value for FLV header video flag. See https://github.com/ossrs/srs/issues/939#issuecomment-1351385460 + # For HTTP-TS, use this as default value for PMT table. See https://github.com/ossrs/srs/issues/939#issuecomment-1365086204 + # Overwrite by env SRS_VHOST_HTTP_REMUX_HAS_VIDEO for all vhosts. + # Default: on + has_video on; + # Whether guessing stream about audio or video track, used to generate the flags in, such as FLV header. If + # guessing, depends on sequence header and frames in gop cache, so it might be incorrect especially your stream + # is not regular. If not guessing, use the configured default value has_audio and has_video. + # For HTTP-FLV, enable guessing for av header flag, because FLV can't change the header. See https://github.com/ossrs/srs/issues/939#issuecomment-1351385460 + # For HTTP-TS, ignore guessing because TS refresh the PMT when codec changed. See https://github.com/ossrs/srs/issues/939#issuecomment-1365086204 + # Overwrite by env SRS_VHOST_HTTP_REMUX_GUESS_HAS_AV for all vhosts. + # Default: on + guess_has_av on; + # the stream mount for rtmp to remux to live streaming. + # typical mount to [vhost]/[app]/[stream].flv + # the variables: + # [vhost] current vhost for http live stream. + # [app] current app for http live stream. + # [stream] current stream for http live stream. + # @remark the [vhost] is optional, used to mount at specified vhost. + # the extension: + # .flv mount http live flv stream, use default gop cache. + # .ts mount http live ts stream, use default gop cache. + # .mp3 mount http live mp3 stream, ignore video and audio mp3 codec required. + # .aac mount http live aac stream, ignore video and audio aac codec required. + # for example: + # mount to [vhost]/[app]/[stream].flv + # access by http://ossrs.net:8080/live/livestream.flv + # mount to /[app]/[stream].flv + # access by http://ossrs.net:8080/live/livestream.flv + # or by http://192.168.1.173:8080/live/livestream.flv + # mount to [vhost]/[app]/[stream].mp3 + # access by http://ossrs.net:8080/live/livestream.mp3 + # mount to [vhost]/[app]/[stream].aac + # access by http://ossrs.net:8080/live/livestream.aac + # mount to [vhost]/[app]/[stream].ts + # access by http://ossrs.net:8080/live/livestream.ts + # @remark the port of http is specified by http_server section. + # Overwrite by env SRS_VHOST_HTTP_REMUX_MOUNT for all vhosts. + # default: [vhost]/[app]/[stream].flv + mount [vhost]/[app]/[stream].flv; + } +} +``` + +> Note: These settings are only for playing HLS. For streaming settings, please follow your protocol, like referring to [RTMP](./rtmp.md#config), [SRT](./srt.md#config), or [WebRTC](./webrtc.md#config) streaming configurations. + +The important settings are explained below: + +* `has_audio`: If there is an audio stream or not. If your stream doesn't have audio, set this to `off`. Otherwise, the player might wait for audio. +* `has_video`: If there is a video stream or not. If your stream doesn't have video, set this to `off`. Otherwise, the player might wait for video. + +## Cluster + +SRS supports HTTP-FLV cluster distribution, which can handle a large number of viewing clients. Please refer to [HTTP-FLV Cluster](./sample-http-flv-cluster.md) and [Edge](./edge.md). + +## Crossdomain + +SRS supports HTTP CORS by default. Please refer to [HTTP CORS](./http-server.md#crossdomain). + +## Websocket FLV + +You can convert HTTP-FLV to WebSocket-FLV stream. Please refer to [videojs-flow](https://github.com/winlinvip/videojs-flow). + +For HTTP to WebSocket conversion, please refer to [mse.go](https://github.com/winlinvip/videojs-flow/blob/master/demo/mse.go). + +## HTTP FLV VOD Stream + +For HTTP FLV on-demand streaming, please refer to: [v4_CN_FlvVodStream](./flv-vod-stream.md). + +## HTTP and HTTPS Proxy + +SRS works well with HTTP/HTTPS proxies such as [Nginx](https://github.com/ossrs/srs/issues/2881#nginx-proxy), [HTTPX](https://github.com/ossrs/srs/issues/2881#httpx-proxy), [CaddyServer](https://github.com/ossrs/srs/issues/2881#caddy-proxy), etc. For detailed configuration, please refer to [#2881](https://github.com/ossrs/srs/issues/2881). + +## HTTPS FLV Live Stream + +SRS supports converting RTMP streams to HTTPS FLV streams. When publishing RTMP streams, a corresponding HTTP address is mounted in the SRS HTTP module (according to the configuration). Users can access this HTTPS FLV file, and the RTMP stream is converted to FLV for distribution. + +Please refer to [HTTPS Server](./http-server.md#https-server) or the `conf/https.flv.live.conf` configuration file. + +## HTTP TS Live Stream + +SRS supports converting RTMP streams to HTTP TS streams. When publishing RTMP streams, a corresponding HTTP address is mounted in the SRS HTTP module (according to the configuration). Users can access this HTTP TS file, and the RTMP stream is converted to TS for distribution. + +Please refer to the `conf/http.ts.live.conf` configuration file. + +## HTTP Mp3 Live Stream + +SRS supports discarding video from RTMP streams and converting audio streams to MP3 format. A corresponding HTTP address is mounted in the SRS HTTP module (according to the configuration). Users can access this HTTP MP3 file, and the RTMP stream is converted to MP3 for distribution. + +Please refer to the `conf/http.mp3.live.conf` configuration file. + +## HTTP Aac Live Stream + +SRS supports discarding video from RTMP streams and converting audio streams to AAC format. A corresponding HTTP address is mounted in the SRS HTTP module (according to the configuration). Users can access this HTTP AAC file, and the RTMP stream is converted to AAC for distribution. + +Please refer to the `conf/http.aac.live.conf` configuration file. + +## Why HTTP FLV + +Why use HTTP FLV? HTTP FLV streaming is becoming more popular. The main advantages are: + +1. In the field of real-time Internet streaming media, RTMP is still dominant. HTTP-FLV has the same latency as RTMP, so it can meet latency requirements. +2. Firewall penetration: Many firewalls block RTMP but not HTTP, so HTTP FLV is less likely to have strange issues. +3. Scheduling: RTMP has a 302 feature, but it's only supported in the player's ActionScript. HTTP FLV supports 302, making it easier for CDNs to correct DNS errors. +4. Fault tolerance: SRS's HTTP FLV can have multiple sources, just like RTMP, supporting multi-level hot backup. +5. Universality: Flash can play both RTMP and HTTP FLV. Custom apps and mainstream players also support HTTP FLV playback. +6. Simplicity: FLV is the simplest streaming media encapsulation, and HTTP is the most widely used protocol. Combining these two makes maintenance much easier than RTMP. + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/en/v6/flv) + diff --git a/versioned_docs/version-6.0/doc/forward.md b/versioned_docs/version-6.0/doc/forward.md new file mode 100644 index 00000000..58310946 --- /dev/null +++ b/versioned_docs/version-6.0/doc/forward.md @@ -0,0 +1,272 @@ +--- +title: Forward +sidebar_label: Forward +hide_title: false +hide_table_of_contents: false +--- + +# Forward For Small Cluster + +SRS is design for live server, the forward is a important feature, used to +forward stream on server to other live servers. + +Note: The information about edge, read [Edge](./edge.md), +the best solution for large cluster and huge concurrency. + +Note: The edge is for both play and publish. + +Note: Use edge first, except need to copy a stream to multiple servers in a time. + +The forward is used for fault backup, the origin can forward a stream to multiple origin servers, +the edge can use multiple origin server for backup. + +For the usage of forward, read [Usage: Forward](./sample-forward.md) + +## Keywords + +The forward defined some roles: + +* master: The master server which forward stream to slave server. +* slave: The slave server which accept stream from master. + +Although the origin/edge can be master/slave, but it is too complex, it is strongly recomments that +the forward(master/slave) only for origin, never use edge to forward stream. + +## Config + +Please refer to the vhost `same.vhost.forward.srs.com` of `full.conf`: + +``` +vhost __defaultVhost__ { + # forward stream to other servers. + forward { + # whether enable the forward. + # default: off + enabled on; + # forward all publish stream to the specified server. + # this used to split/forward the current stream for cluster active-standby, + # active-active for cdn to build high available fault tolerance system. + # format: {ip}:{port} {ip_N}:{port_N} + destination 127.0.0.1:1936 127.0.0.1:1937; + + # when client(encoder) publish to vhost/app/stream, call the hook in creating backend forwarder. + # the request in the POST data string is a object encode by json: + # { + # "action": "on_forward", + # "server_id": "vid-k21d7y2", + # "client_id": "9o7g1330", + # "ip": "127.0.0.1", + # "vhost": "__defaultVhost__", + # "app": "live", + # "tcUrl": "rtmp://127.0.0.1:1935/live", + # "stream": "livestream", + # "param": "" + # } + # if valid, the hook must return HTTP code 200(Status OK) and response + # an int value specifies the error code(0 corresponding to success): + # { + # "code": 0, + # "data": { + # "urls":[ + # "rtmp://127.0.0.1:19350/test/teststream" + # ] + # } + # } + # PS: you can transform params to backend service, such as: + # { "param": "?forward=rtmp://127.0.0.1:19351/test/livestream" } + # then backend return forward's url in response. + # if backend return empty urls, destanition is still disabled. + # only support one api hook, format: + # backend http://xxx/api0 + backend http://127.0.0.1:8085/api/v1/forward; + } +} +``` + +## Dynamic Forward + +SRS support dynamic forwarding, to query the forwarding config from your backend API. + +So you must write a backend server, which is an HTTP server, or web server. It accepts HTTP requests from SRS, and then +responses the content with configs for SRS to do forward. It works like this: + +```text + +------+ +Client ---Push-RTMP-->--+ SRS +---HTTP-Request---> Your Backend Server + | | + + + +--<---Forward-Config----+ + | | + + +----Push-RTMP----> RTMP Server + +------+ +``` + +First, config the `backend` of forward: + +``` +vhost __defaultVhost__ { + forward { + enabled on; + backend http://127.0.0.1:8085/api/v1/forward; + } +} +``` + +While client publishing to SRS, SRS will request your HTTP backend server, with request body: + +```json +{ + "action": "on_forward", + "server_id": "vid-k21d7y2", + "client_id": "9o7g1330", + "ip": "127.0.0.1", + "vhost": "__defaultVhost__", + "app": "live", + "tcUrl": "rtmp://127.0.0.1:1935/live", + "stream": "livestream", + "param": "" +} +``` + +If your backend server responses with RTMP urls, SRS will start forwarding to the RTMP server: + +```json +{ + "code": 0, + "data": { + "urls":[ + "rtmp://127.0.0.1:19350/test/teststream" + ] + } +} +``` + +> Note: If urls is empty array, SRS won't forward it. + +For more details about dynamic forwarding, please read [#1342](https://github.com/ossrs/srs/issues/1342). + +## For Small Cluster + +Forward can also used to build a small cluster: + +```bash + +-------------+ +---------------+ + +-->+ Slave(1935) +->--+ Player(3000) + + | +-------------+ +---------------+ + | +-------------+ +---------------+ + |-->+ Slave(1936) +->--+ Player(3000) + + publish forward | +-------------+ +---------------+ ++-----------+ +--------+ | 192.168.1.6 +| Encoder +-->-+ Master +-->-| ++-----------+ +--------+ | +-------------+ +---------------+ + 192.168.1.3 192.168.1.5 +-->+ Slave(1935) +->--+ Player(3000) + + | +-------------+ +---------------+ + | +-------------+ +---------------+ + +-->+ Slave(1936) +->--+ Player(3000) + + +-------------+ +---------------+ + 192.168.1.7 +``` + +The below sections is the example for this small cluster. + +### Encoder + +Use FFMPEG as encoder to publish stream to master: + +```bash +for((;;)); do\ + ./objs/ffmpeg/bin/ffmpeg -re -i doc/source.flv \ + -c copy -f flv rtmp://192.168.1.5:1935/live/livestream; \ +done +``` + +### SRS-Master Server + +The SRS master server(192.168.1.5) config: + +```bash +listen 1935; +pid ./objs/srs.pid; +max_connections 10240; +vhost __defaultVhost__ { + forward { + enabled on; + destination 192.168.1.6:1935 192.168.1.6:1936 192.168.1.7:1935 192.168.1.7:1936; + } +} +``` + +The RTMP play url on master is: `rtmp://192.168.1.5/live/livestream` + +The master will forward stream to four slaves on two servers. + +### SRS-Slave Server + +The slave server can use different port to run on multiple cpu server. +The slave on the same server must use different port and pid file. + +For example, the slave server 192.168.1.6, start two SRS servers, listen at 1935 and 1936. + +The config file for port 1935 `srs.1935.conf`: + +```bash +listen 1935; +pid ./objs/srs.1935.pid; +max_connections 10240; +vhost __defaultVhost__ { +} +``` + +The config file for port 1936 `srs.1936.conf`: + +```bash +listen 1936; +pid ./objs/srs.1936.pid; +max_connections 10240; +vhost __defaultVhost__ { +} +``` + +Start these two SRS processes: + +```bash +nohup ./objs/srs -c srs.1935.conf >/dev/null 2>&1 & +nohup ./objs/srs -c srs.1936.conf >/dev/null 2>&1 & +``` + +The player random access these streams: +* `rtmp://192.168.1.6:1935/live/livestream` +* `rtmp://192.168.1.6:1936/live/livestream` + +The other slave server 192.168.1.7 is similar to 192.168.1.6 + +### Stream in Service + +The stream in service: + +| Url | Server | Port | Clients | +| ---- | ----- | ----- | ------- | +| rtmp://192.168.1.6:1935/live/livestream | 192.168.1.6 | 1935 | 3000 | +| rtmp://192.168.1.6:1936/live/livestream | 192.168.1.6 | 1936 | 3000 | +| rtmp://192.168.1.7:1935/live/livestream | 192.168.1.7 | 1935 | 3000 | +| rtmp://192.168.1.7:1936/live/livestream | 192.168.1.7 | 1936 | 3000 | + +This architecture can support 12k clients. +User can add more slave or start new ports. + +## Forward VS Edge + +The forward is not used in cdn, because CDN has thousands of servers, thousands of streams. +The forward will always forward all stream to slave servers. + +CDN or large cluster must use edge, never use forward. + +## Other Use Scenarios + +Forward used for transcoder, we can transcode a h.264+speex stream to a vhost, while this vhost forward +stream to slave. Then all stream on slave is h.264+aac, to delivery HLS. + +Winlin 2014.11 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/forward) + + diff --git a/versioned_docs/version-6.0/doc/gb28181.md b/versioned_docs/version-6.0/doc/gb28181.md new file mode 100644 index 00000000..ae237c30 --- /dev/null +++ b/versioned_docs/version-6.0/doc/gb28181.md @@ -0,0 +1,15 @@ +--- +title: GB28181 +sidebar_label: GB28181 +hide_title: false +hide_table_of_contents: false +--- + +# GB28181 + +On the way. + +## Candidate + +On the way. + diff --git a/versioned_docs/version-6.0/doc/getting-started-build.md b/versioned_docs/version-6.0/doc/getting-started-build.md new file mode 100644 index 00000000..2c72f187 --- /dev/null +++ b/versioned_docs/version-6.0/doc/getting-started-build.md @@ -0,0 +1,203 @@ +--- +title: Build +sidebar_label: Build +hide_title: false +hide_table_of_contents: false +--- + +# Build + +You can build SRS from source code, but [docker](./getting-started.md) is highly recommend. + +## Live Streaming + +SRS supports live streaming. + +Get SRS source, recommend [Ubuntu20](./install.md): + +``` +git clone -b develop https://github.com/ossrs/srs.git +``` + +Build SRS in `srs/trunk`: + +``` +cd srs/trunk +./configure +make +``` + +Run SRS server: + +``` +./objs/srs -c conf/srs.conf +``` + +Check SRS by [http://localhost:8080/](http://localhost:8080/) or: + +``` +# Check the process status +./etc/init.d/srs status + +# Check the SRS logs +tail -n 30 -f ./objs/srs.log +``` + +Publish stream by [FFmpeg](https://ffmpeg.org/download.html) or [OBS](https://obsproject.com/download) : + +```bash +ffmpeg -re -i ./doc/source.flv -c copy -f flv rtmp://localhost/live/livestream +``` + +> Note: The file `./doc/source.flv` is under the source repository of SRS. + +Play stream by: + +* RTMP (by [VLC](https://www.videolan.org/)): `rtmp://localhost/live/livestream` +* H5(HTTP-FLV): [http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.flv&port=8080&schema=http) +* H5(HLS): [http://localhost:8080/live/livestream.m3u8](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.m3u8&port=8080&schema=http) + +## WebRTC + +SRS supports WebRTC for video chat. + +Get SRS source, recommend [Ubuntu20](./install.md): + +``` +git clone -b develop https://github.com/ossrs/srs.git +``` + +Build SRS in `srs/trunk`: + +``` +cd srs/trunk +./configure +make +``` + +Run SRS server: + +``` +CANDIDATE="192.168.1.10" +./objs/srs -c conf/srs.conf +``` + +> Note: Please replace the IP with your server IP. + +> Note: About CANDIDATE, please read [CANDIDATE](./webrtc.md#config-candidate) + +Check SRS by [http://localhost:8080/](http://localhost:8080/) or: + +``` +# Check the process status +./etc/init.d/srs status + +# Check the SRS logs +tail -n 30 -f ./objs/srs.log +``` + +If SRS runs on localhost, push stream to SRS by [WebRTC: Publish](http://localhost:8080/players/rtc_publisher.html?autostart=true&stream=livestream&port=8080&schema=http) + +> Note: If not localhost, browser(WebRTC) requires HTTPS, please see [WebRTC using HTTPS](./getting-started.md#webrtc-using-https) for detail. + +Play stream of SRS by [WebRTC: Play](http://localhost:8080/players/rtc_player.html?autostart=true&stream=livestream&schema=http) + +> Note: If use different streams, you're able to do video chat application. + +## WebRTC for Live Streaming + +SRS supports converting live streaming to WebRTC. + +Get SRS source, recommend [Ubuntu20](./install.md): + +``` +git clone -b develop https://github.com/ossrs/srs.git +``` + +Build SRS in `srs/trunk`: + +``` +cd srs/trunk +./configure +make +``` + +Run SRS server: + +``` +CANDIDATE="192.168.1.10" +./objs/srs -c conf/rtmp2rtc.conf +``` + +> Note: Please replace the IP with your server IP. + +> Note: About CANDIDATE, please read [CANDIDATE](./webrtc.md#config-candidate) + +> Note: If convert RTMP to WebRTC, please use [`rtmp2rtc.conf`](https://github.com/ossrs/srs/issues/2728#rtmp2rtc-cn-guide) + +Publish stream by [FFmpeg](https://ffmpeg.org/download.html) or [OBS](https://obsproject.com/download) : + +```bash +ffmpeg -re -i ./doc/source.flv -c copy -f flv rtmp://localhost/live/livestream +``` + +> Note: The file `./doc/source.flv` is under the source repository of SRS. + +Play stream by: + +* WebRTC: [http://localhost:1985/rtc/v1/whep/?app=live&stream=livestream](http://localhost:8080/players/whep.html?autostart=true) +* H5(HTTP-FLV): [http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.flv&port=8080&schema=http) +* H5(HLS): [http://localhost:8080/live/livestream.m3u8](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.m3u8&port=8080&schema=http) + +## WebRTC using HTTPS + +If not localhost, for example, to view WebRTC on pad or mobile phone, when SRS is running on remote server. + +Get SRS source, recommend [Ubuntu20](./install.md): + +``` +git clone -b develop https://github.com/ossrs/srs.git +``` + +Build SRS in `srs/trunk`: + +``` +cd srs/trunk +./configure +make +``` + +Run SRS server: + +``` +CANDIDATE="192.168.1.10" +./objs/srs -c conf/https.rtc.conf +``` + +> Note: Please replace the IP with your server IP. + +> Note: About CANDIDATE, please read [CANDIDATE](./webrtc.md#config-candidate) + +> Remark: Please use your HTTPS key and cert file, please read +> **[HTTPS API](./http-api.md#https-api)** +> and **[HTTPS Callback](./http-callback.md#https-callback)** +> and **[HTTPS Live Streaming](./flv.md#https-flv-live-stream)**, +> however HTTPS proxy also works perfect with SRS such as Nginx. + +Push stream to SRS by [WebRTC: Publish](https://192.168.3.82:8088/players/rtc_publisher.html?autostart=true&stream=livestream&api=1990&schema=https) + +Play stream of SRS by [WebRTC: Play](https://192.168.3.82:8088/players/rtc_player.html?autostart=true&stream=livestream&api=1990&schema=https) + +> Note: For self-sign certificate, please type `thisisunsafe` to accept it. + +> Note: If use different streams, you're able to do video chat application. + +## Cross Build + +Normally you're able to build SRS on both ARM or MIPS servers. + +If need to cross-build SRS for embed devices, pelase read [ARM and CrossBuild](./arm.md). + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/getting-started-build) + + diff --git a/versioned_docs/version-6.0/doc/getting-started-k8s.md b/versioned_docs/version-6.0/doc/getting-started-k8s.md new file mode 100644 index 00000000..50e956bc --- /dev/null +++ b/versioned_docs/version-6.0/doc/getting-started-k8s.md @@ -0,0 +1,22 @@ +--- +title: K8s +sidebar_label: K8s +hide_title: false +hide_table_of_contents: false +--- + +# K8s + +We recommend using the HELM method to deploy SRS, see [srs-helm](https://github.com/ossrs/srs-helm). Of course, +SRS also supports direct deployment with K8s, refer to [SRS K8s](./k8s.md). + +Actually, HELM is based on K8s and deploys K8s pods, which can be managed with kubectl. However, HELM offers a +more convenient way to manage and install applications, so SRS will mainly support HELM in the future. + +Compared to Docker, HELM and K8s are mainly for medium to large scale deployments. If your business is not that +big, we recommend using Docker or Oryx directly. Generally, if you have less than a thousand streams, please +do not use HELM or K8s. + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/getting-started-k8s) + + diff --git a/versioned_docs/version-6.0/doc/getting-started-oryx.md b/versioned_docs/version-6.0/doc/getting-started-oryx.md new file mode 100644 index 00000000..283337bf --- /dev/null +++ b/versioned_docs/version-6.0/doc/getting-started-oryx.md @@ -0,0 +1,424 @@ +--- +title: Oryx +sidebar_label: Oryx +hide_title: false +hide_table_of_contents: false +--- + +# Oryx + +Oryx(SRS Stack) is a video cloud solution that is lightweight, open-source, and based on Go, +Reactjs, SRS, FFmpeg, WebRTC, etc. + +## Introduction + +Oryx, an open-source out-of-the-box audio and video solution, is built entirely based on various scenarios. +Common examples include push-pull streaming scenarios that support different protocols and can be embedded into +websites like WordPress. + +In recording scenarios, it supports merging multiple streams, setting filters, and recording specific streams only. +For forwarding and virtual live streaming, files and other streams can be sent to different platforms or to Oryx +itself. With AI automatic subtitles, OpenAI's capabilities can be utilized to automatically recognize and embed +subtitles into the video stream. One-click automatic HTTPS makes it easy to enable HTTPS capabilities. + +More diverse scenarios will be available in the future. + +## FAQ + +If you encounter issues while using Oryx, please read the [FAQ](../../../faq-oryx) first. + +## Usage + +Please select your platform. + +> Remark: Please choose the Ubuntu 20 system, as other systems may encounter some strange issues. + +### Docker + +Strongly recommend running Oryx with docker: + +```bash +docker run --restart always -d -it --name oryx -v $HOME/data:/data \ + -p 80:2022 -p 443:2443 -p 1935:1935 -p 8000:8000/udp -p 10080:10080/udp \ + ossrs/oryx:5 +``` + +Then you can open [http://localhost](http://localhost) to use Oryx. + +For more details, please refer to [Oryx Docker](https://github.com/ossrs/oryx#usage). + +### HELM + +Strongly recommend running Oryx with HELM: + +```bash +helm repo add srs http://helm.ossrs.io/stable +helm install srs srs/oryx --set persistence.path=$HOME/data \ + --set service.http=80 --set service.https=443 --set service.rtmp=1935 \ + --set service.rtc=8000 --set service.srt=10080 +``` + +Then you can open [http://localhost](http://localhost) to use Oryx. + +### Script + +For Ubuntu 20+, you can download the [linux-oryx-en.tar.gz](https://github.com/ossrs/oryx/releases/latest/download/linux-oryx-en.tar.gz) +and install it. + +### AWS Lightsail + +Oryx supports AWS Lightsail, which is a virtual private server (VPS) service offered by AWS. Please +follow [How to Establish a Video Streaming Service with a Single Click](../../../blog/Oryx-Tutorial). + +### DigitalOcean Droplet + +Easily set up an Oryx with just one click. For more information, check out +[How to Establish a Video Streaming Service with a Single Click](../../../blog/Oryx-Tutorial). + +### aaPanel + +Oryx offers a BaoTa plugin, for usage instructions refer to the [Oryx aaPanel Plugin](../../../blog/BT-aaPanel). + +## Changelog + +For the update log of the Oryx, please refer to [CHANGELOG](https://github.com/ossrs/oryx/blob/main/DEVELOPER.md#changelog). + +For specific features supported by a particular version, you can view the CHANGELOG in the version release, see [Releases](https://github.com/ossrs/oryx/releases). + +## Features + +About the features of Oryx and comparison with SRS,for more details please read [Features](https://github.com/ossrs/oryx?tab=readme-ov-file#features). + +### Compare to SRS + +Comparing Oryx and SRS, both offer media streaming capabilities at a similar level. +However, Oryx provides a more powerful and feature-rich experience for end users, +eliminating the need to write any code. Users can directly utilize Oryx for your +media services needs. + +| Comparison | Oryx | SRS | Notes | +|----------------|-------------------|---------------|-----------------------------------------------------------------| +| License | MIT | MIT | SRS is licenced under MIT, Oryx is MIT. | +| Live Streaming | Yes | Yes | Both support RTMP, HLS, and HTTP-FLV protocols. | +| WebRTC | Yes | Yes | WebRTC is supported by both. | +| Auto HTTPS | Yes | No | Oryx supports automatic request and update HTTPS certs. | +| Console | Enhanced | HTTP API | Oryx offers a more powerful console. | +| Authentication | Yes | HTTP Callback | Oryx has built-in authentication, while SRS uses callbacks. | +| DVR | Enhanced | File-based | Oryx supports DVR to file and cloud storage. | +| Forwarding | Enhanced | Basic | Oryx can forward to multiple platforms via various protocols. | +| Virtual Live | Yes | No | Oryx provides advanced virtual live streaming capabilities. | +| WordPress | Yes | No | Oryx offers a WordPress plugin and step-by-step guidelines. | +| Transcoding | Yes | No | Oryx supports live stream transcoding. | +| Transcription | Yes | No | Convert live speech to subtitle and overlay to video stream. | +| Live Room | Yes | No | Support live room feature. | +| Dubbing | Yes | No | Support dubbing VoD videos. | + +### Streaming and Authentication + +Oryx support enhanced streaming with authentication, based on SRS callback. Oryx generate and save +the stream token to Redis, and verify the stream token when user publish stream via RTMP, SRT, or WHIP/WebRTC. + +Oryx also proxies and secures all the HTTP API of SRS, so only authenticated user can access the HTTP API +and the console. + +### DVR + +Oryx support DVR or Recording, to convert live stream to file, then save to local disk or cloud storage. +We also support merge multiple republish session to one DVR file, and support set filters for recording specified +streams. + +See [A Step-by-Step Guide to Server-Side Recording and AWS S3 Integration](../../../blog/Record-Live-Streaming) for details. + +### Automatic HTTPS + +Oryx support automatic HTTPS, just by one click, you can enable HTTPS for your Oryx. Oryx will +automatically request and update the HTTPS certificate from [Let's Encrypt](https://letsencrypt.org/). Automatic HTTPS +allows WHIP or publish by webpage, and also support WebRTC, and access user's microphones. + +See [How to Secure SRS with Let's Encrypt by 1-Click](../../../blog/Oryx-HTTPS) for details. + +### Virtual Live Events + +You can use prerecorded videos to simulate live events. You can do 7x24 live stream with only 1 video file. You can +also pull stream to your live room, to make the live stream powerful. You can even pull your IP camera stream to your +live room. + +See [Harness the Power of Pre-Recorded Content for Seamless and Engaging Live Streaming Experiences](../../../blog/Virtual-Live-Events) and +[Easily Stream Your RTSP IP Camera to YouTube, Twitch, or Facebook](../../../blog/Stream-IP-Camera-Events). + +### Restream + +With Oryx, you can restream to multiple platforms, like YouTube, Twitch, Facebook, etc. Oryx will +automatically select a stream to forward, so you can publish multiple streams as fault-tolerant or backup +stream, when a stream is down, Oryx will switch to another one. + +See [Effortlessly Restream Live Content Across Multiple Platforms with Oryx](../../../blog/Multi-Platform-Streaming) for details. + +### AI Transcription + +Oryx supports AI transcription, which is powered by OpenAI, to convert live speech to text and overlay to +the video stream as a new live stream. With this feature, allows you to engage more audiences, especially for people +with hearing disabilities or those who are non-native speakers. + +See [Creating Accessible, Multilingual Subtitles for Diverse Audiences](../../../blog/live-streams-transcription) for details. + +### Transcode + +Oryx suppport transcoding live stream, to decrease the bitrate and save bandwidth and cost, or filter the +live stream content to make it better. + +See [Efficient Live Streaming Transcoding for Reducing Bandwidth and Saving Costs](../../../blog/Live-Transcoding) for details. + +## AI Products + +We are implementing various AI tools and products in the Oryx, and here is the latest status. We will continue +to update this document. + +1. AI Transcript: Implement voice-to-text by connecting to OpenAI's Whisper, and overlay the text captions onto the live broadcast, enabling automatic subtitles for streaming. + * Status: Completed and available in the Oryx. Refer to [Creating Accessible, Multilingual Subtitles for Diverse Audiences](../../../blog/live-streams-transcription). +1. Streamer AI Asssistant: Easily create a personal, voice-driven GPT AI assistant with Oryx for enhanced language learning, multi-language chats, and convenient assistance in any setting. Perfect for interactive streaming and daily tasks. It offers numerous possibilities for living room and streaming hosts with AI assistance. + * Status: Beta version available in the Oryx. Refer to [Speak to the Future - Transform Your Browser into a Personal Voice-Driven GPT AI Assistant with Oryx](../../../blog/browser-voice-driven-gpt). +1. VoD Translation: Translate English videos into Chinese for English learning or create multilingual videos, frequently used in education and e-commerce. + * Beta version available in the Oryx. Refer to [Revolutionize Video Content with Oryx - Effortless Dubbing and Translating to Multiple Languages Using OpenAI](../../../blog/dubbing-translating). +1. Stream OCR: Extract text from images in live streams, enabling real-time text recognition and translation for a variety of applications. + * Beta version available in the Oryx. Refer to [Oryx - Leveraging OpenAI for OCR and Object Recognition in Video Streams](../../../blog/ocr-video-streams). + +If you are interested in our AI products, feel free to join our [Discord](https://discord.gg/yZ4BnPmHAd) server to discuss with us. + +## HTTP API + +You can open the `System > OpenAPI` to get the Bearer token and try the HTTP API. + +You can click the button on the web to request a HTTP API, you can also use the curl or js code to request the +HTTP API. Please follow the instructions on the web, for example, use curl to request the HTTP API: + +```bash +curl http://localhost/terraform/v1/mgmt/versions +``` + +Or with the Bearer token: + +```bash +curl http://localhost/terraform/v1/hooks/srs/secret/query \ + -X POST -H 'Authorization: Bearer xxxxxx' \ + -H 'Content-Type: application/json' --data '{}' +``` + +> Note: You can open the `System > OpenAPI` to get the Bearer token and try the HTTP API. + +> Note: The web may use JWT token, but you can also use Bearer token to request the HTTP API. + +In addition to the sample APIs listed on this page, users can perform all web-based actions through the +HTTP API. To identify the requests and responses for each API, open Google Chrome, navigate to +`View > Developer > Developer Tools` click on the `Network` tab, and examine the relevant API interactions. + +Oryx also proxy the [SRS HTTP API](./http-api.md), which prefix with `/api/v1/` such as: + +```bash +curl http://localhost/api/v1/versions +``` + +Or with the Bearer token: + +```bash +curl http://localhost/api/v1/vhosts/ \ + -X GET -H 'Authorization: Bearer xxxxxx' \ + -H 'Content-Type: application/json' +``` + +> Note: You can open the `System > OpenAPI` to get the Bearer token and try the HTTP API. + +Please read the detail about the API from the [SRS HTTP API](./http-api.md). + +## HTTP Callback + +HTTP Callback refers to the Oryx running within a Docker container, initiating an HTTP request to +a target URL. For instance, the following process illustrates that when OBS publishs an RTMP stream to Oryx, +the Oryx informs your server about the event by sending an HTTP request to the target URL. + +```bash + +-----------------------+ + + + ++-------+ + +-----------+ + +--------------+ ++ OBS +--RTMP->--+-----+ Oryx +-----+----HTTP--->-----+ Your Server + ++-------+ + +-----------+ + (Target URL) +--------------+ + + + + + Docker + + +-----------------------+ +``` + +All HTTP requests should be: + +* `Content-Type: application-json` + +All responses should use: + +* `Status: 200 OK` and `{"code": 0}` for success. +* Otherwise, error or fail. + +See examples in [HTTP Callback](../docs/v6/doc/http-callback#go-example) + +### HTTP Callback: Connectivity Check + +Occasionally, you might need to verify if the network is accessible and determine the appropriate target URL to +use. By using the curl command inside the Docker container, you can simulate this request and confirm if the +target URL can be accessed by curl or the Oryx. + +First, install curl in Oryx: + +```bash +docker exec -it oryx apt-get update -y +docker exec -it oryx apt-get install -y curl +``` + +Then, simulate an HTTP request to your server: + +```bash +docker exec -it oryx curl http://your-target-URL +``` + +You can use any target URL to test, such as: + +* Intranet IP: `http://192.168.1.10/check` +* Internet IP: `http://159.133.96.20/check` +* URL via HTTP: `http://your-domain.com/check` +* URL via HTTPS: `https://your-domain.com/check` + +Keep in mind that you should test the connection to the target URL within the Oryx Docker, and avoid +running the curl command from a different server. + +### HTTP Callback: on_publish + +For HTTP callback `on_publish` event: + +```json +Request: +{ + "request_id": "3ab26a09-59b0-42f7-98e3-a281c7d0712b", + "action": "on_unpublish", + "opaque": "mytoken", + "vhost": "__defaultVhost__", + "app": "live", + "stream": "livestream", + "param": "?secret=8f7605d657c74d69b6b48f532c469bc9" +} + +Response: +{ + "code": 0 +} +``` + +* Allow publishing if response success. +* Reject publishing if response error. + +### HTTP Callback: on_unpublish + +For HTTP callback `on_unpublish` event: + +```json +Request: +{ + "request_id": "9ea987fa-1563-4c28-8c6c-a0e9edd4f536", + "action": "on_unpublish", + "opaque": "mytoken", + "vhost": "__defaultVhost__", + "app": "live", + "stream": "livestream" +} + +Response: +{ + "code": 0 +} +``` + +* Ignore any response error. + +### HTTP Callback: on_record_begin + +For HTTP callback `on_record_begin` event: + +```json +Request: +{ + "request_id": "80ad1ddf-1731-450c-83ec-735ea79dd6a3", + "action": "on_record_begin", + "opaque": "mytoken", + "vhost": "__defaultVhost__", + "app": "live", + "stream": "livestream", + "uuid": "824b96f9-8d51-4046-ba1e-a9aec7d57c95" +} + +Response: +{ +"code": 0 +} +``` + +* Ignore any response error. + +### HTTP Callback: on_record_end + +For HTTP callback `on_record_end` event: + +```json +Request: +{ + "request_id": "d13a0e60-e2fe-42cd-a8d8-f04c7e71b5f5", + "action": "on_record_end", + "opaque": "mytoken", + "vhost": "__defaultVhost__", + "app": "live", + "stream": "livestream", + "uuid": "824b96f9-8d51-4046-ba1e-a9aec7d57c95", + "artifact_code": 0, + "artifact_path": "/data/record/824b96f9-8d51-4046-ba1e-a9aec7d57c95/index.mp4", + "artifact_url": "http://localhost/terraform/v1/hooks/record/hls/824b96f9-8d51-4046-ba1e-a9aec7d57c95/index.mp4" +} + +Response: +{ + "code": 0 +} +``` + +* The `uuid` is the UUID of record task. +* The `artifact_code` indicates the error code. If no error, it's 0. +* The `artifact_path` is the path of artifact mp4 in the container. +* The `artifact_url` is the URL path to access the artifact mp4. +* Ignore any response error. + +### HTTP Callback: on_ocr + +For HTTP callback `on_ocr` event: + +```json +Request: +{ + "request_id": "d13a0e60-e2fe-42cd-a8d8-f04c7e71b5f5", + "action": "on_ocr", + "opaque": "mytoken", + "vhost": "__defaultVhost__", + "app": "live", + "stream": "livestream", + "uuid": "824b96f9-8d51-4046-ba1e-a9aec7d57c95", + "prompt": "What is in the image?", + "result": "The image shows a scene featuring a character from a film, likely set in a military or high-tech environment." +} + +Response: +{ + "code": 0 +} +``` + +* The `uuid` is the UUID of OCR task. +* The `prompt` the AI model used for OCR. +* The `result` is the OCR result. +* Ignore any response error. + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/getting-started-oryx) + + diff --git a/versioned_docs/version-6.0/doc/getting-started.md b/versioned_docs/version-6.0/doc/getting-started.md new file mode 100644 index 00000000..deaaf62e --- /dev/null +++ b/versioned_docs/version-6.0/doc/getting-started.md @@ -0,0 +1,190 @@ +--- +title: Docker +sidebar_label: Docker +hide_title: false +hide_table_of_contents: false +--- + +# Docker + +Please run SRS with docker. + +## Live Streaming + +SRS supports live streaming. + +Run SRS using docker: + +```bash +docker run --rm -it -p 1935:1935 -p 1985:1985 -p 8080:8080 ossrs/srs:5 +``` + +> Note: The available images is [here](https://hub.docker.com/r/ossrs/srs/tags). + +Use docker of FFmpeg to publish: + +```bash +docker run --rm -it ossrs/srs:encoder ffmpeg -stream_loop -1 -re -i doc/source.flv \ + -c copy -f flv rtmp://host.docker.internal/live/livestream +``` + +Or publish stream by [FFmpeg](https://ffmpeg.org/download.html) or [OBS](https://obsproject.com/download) : + +```bash +ffmpeg -re -i ./doc/source.flv -c copy -f flv rtmp://localhost/live/livestream +``` + +> Note: The file `./doc/source.flv` is under the source repository of SRS. + +Play stream by: + +* RTMP (by [VLC](https://www.videolan.org/)): `rtmp://localhost/live/livestream` +* H5(HTTP-FLV): [http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.flv&port=8080&schema=http) +* H5(HLS): [http://localhost:8080/live/livestream.m3u8](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.m3u8&port=8080&schema=http) + +## WebRTC + +SRS supports WebRTC for video chat. + +Run SRS using docker: + +```bash +CANDIDATE="192.168.1.10" +docker run --rm -it -p 1935:1935 -p 1985:1985 -p 8080:8080 -p 1990:1990 -p 8088:8088 \ + --env CANDIDATE=$CANDIDATE -p 8000:8000/udp \ + ossrs/srs:5 +``` + +> Note: Please replace the IP with your server IP. + +> Note: About CANDIDATE, please read [CANDIDATE](./webrtc.md#config-candidate) + +If SRS runs on localhost, push stream to SRS by [WebRTC: Publish](http://localhost:8080/players/rtc_publisher.html?autostart=true&stream=livestream&port=8080&schema=http) + +> Note: If not localhost, browser(WebRTC) requires HTTPS, please see [WebRTC using HTTPS](./getting-started.md#webrtc-using-https) for detail. + +Play stream of SRS by [WebRTC: Play](http://localhost:8080/players/rtc_player.html?autostart=true&stream=livestream&schema=http) + +> Note: If use different streams, you're able to do video chat application. + +## WebRTC for Live Streaming + +SRS supports coverting live streaming to WebRTC. + +Run SRS using docker: + +```bash +CANDIDATE="192.168.1.10" +docker run --rm -it -p 1935:1935 -p 1985:1985 -p 8080:8080 \ + --env CANDIDATE=$CANDIDATE -p 8000:8000/udp \ + ossrs/srs:5 ./objs/srs -c conf/rtmp2rtc.conf +``` + +> Note: Please replace the IP with your server IP. + +> Note: About CANDIDATE, please read [CANDIDATE](./webrtc.md#config-candidate) + +> Note: If convert RTMP to WebRTC, please use [`rtmp2rtc.conf`](https://github.com/ossrs/srs/issues/2728#rtmp2rtc-en-guide) + +Use docker of FFmpeg to publish: + +```bash +docker run --rm -it ossrs/srs:encoder ffmpeg -stream_loop -1 -re -i doc/source.flv \ + -c copy -f flv rtmp://host.docker.internal/live/livestream +``` + +Or publish stream by [FFmpeg](https://ffmpeg.org/download.html) or [OBS](https://obsproject.com/download) : + +```bash +ffmpeg -re -i ./doc/source.flv -c copy -f flv rtmp://localhost/live/livestream +``` + +> Note: The file `./doc/source.flv` is under the source repository of SRS. + +Play stream by: + +* WebRTC: [http://localhost:1985/rtc/v1/whep/?app=live&stream=livestream](http://localhost:8080/players/whep.html?autostart=true) +* H5(HTTP-FLV): [http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.flv&port=8080&schema=http) +* H5(HLS): [http://localhost:8080/live/livestream.m3u8](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.m3u8&port=8080&schema=http) + +## WebRTC using HTTPS + +When pushing stream to SRS, if not localhost, for example, to view WebRTC on pad or mobile phone, when SRS is running on remote server. + +> Note: If only need to play WebRTC stream, HTTP is ok. If wants to push stream, and not localhost, you need HTTPS. + +Run SRS using docker: + +```bash +CANDIDATE="192.168.1.10" +docker run --rm -it -p 1935:1935 -p 1985:1985 -p 8080:8080 -p 1990:1990 -p 8088:8088 \ + --env CANDIDATE=$CANDIDATE -p 8000:8000/udp \ + ossrs/srs:5 ./objs/srs -c conf/https.docker.conf +``` + +> Note: Please replace the IP with your server IP. + +> Note: About CANDIDATE, please read [CANDIDATE](./webrtc.md#config-candidate) + +> Remark: Please use your HTTPS key and cert file, please read +> **[HTTPS API](./http-api.md#https-api)** +> and **[HTTPS Callback](./http-callback.md#https-callback)** +> and **[HTTPS Live Streaming](./flv.md#https-flv-live-stream)**, +> however HTTPS proxy also works perfect with SRS such as Nginx. + +Push stream to SRS by [WebRTC: Publish](https://192.168.3.82:8088/players/rtc_publisher.html?autostart=true&stream=livestream&api=1990&schema=https) + +Play stream of SRS by [WebRTC: Play](https://192.168.3.82:8088/players/rtc_player.html?autostart=true&stream=livestream&api=1990&schema=https) + +> Note: For self-sign certificate, please type `thisisunsafe` to accept it. + +> Note: If use different streams, you're able to do video chat application. + +## SRT for Live Streaming + +SRS supports publishing by SRT for live streaming, and play by SRT or other protocols. + +First, start SRS with Docker: + +```bash +docker run --rm -it -p 1935:1935 -p 1985:1985 -p 8080:8080 -p 10080:10080/udp \ + ossrs/srs:5 ./objs/srs -c conf/srt.conf +``` + +Publish stream by [FFmpeg](https://ffmpeg.org/download.html) or [OBS](https://obsproject.com/download) : + +```bash +ffmpeg -re -i ./doc/source.flv -c copy -pes_payload_size 0 -f mpegts \ + 'srt://127.0.0.1:10080?streamid=#!::r=live/livestream,m=publish' +``` + +Play stream by [ffplay](https://ffmpeg.org/download.html) or [OBS](https://obsproject.com/download) + +```bash +ffplay 'srt://127.0.0.1:10080?streamid=#!::r=live/livestream,m=request' +``` + +## Multiple Streams + +You can send multiple streams to SRS by using different URLs. There's no need to change any settings; +just change the URL for the stream you're publishing and playing. It's very easy and straightforward. + +* `rtmp://ip/live/livesteam` +* `rtmp://ip/live/livesteamN` +* `rtmp://ip/liveN/livestreamN` +* `rtmp://ip/whatever/doesnotmatter` +* `srt://ip:10080?streamid=#!::r=anyM/streamN,m=publish` +* `http://ip:1985/rtc/v1/whip/?app=anyM&stream=streamN` +* `http://ip:1985/rtc/v1/whep/?app=anyM&stream=streamN` +* `http://ip:8080/anyM/streamN.flv` +* `http://ip:8080/anyM/streamN.m3u8` +* `https://ip:8080/anyM/streamN.flv` +* `https://ip:8080/anyM/streamN.m3u8` + +SRS uses a configuration at the virtual host (vhost) level. All applications(app) and streams within the +same vhost share this configuration. For more information, please refer to the [RTMP URL](./rtmp-url-vhost.md) +documentation. + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/getting-started) + + diff --git a/versioned_docs/version-6.0/doc/git.md b/versioned_docs/version-6.0/doc/git.md new file mode 100644 index 00000000..146c19b7 --- /dev/null +++ b/versioned_docs/version-6.0/doc/git.md @@ -0,0 +1,59 @@ +--- +title: Git +sidebar_label: Git +hide_title: false +hide_table_of_contents: false +--- + +# Git Usage + +How to use stable version of SRS? How to update code? + +## Checkout Branch + +Some features are introduced in SRS2.0, the SRS1.0 does not support. +The wiki url specifies the version of SRS supports it. + +To checkout SRS1.0 branch: + +``` +git pull && git checkout 1.0release +``` + +To checkout SRS2.0 branch: + +``` +git pull && git checkout 2.0release +``` + +To checkout SRS3.0 branch: + +``` +git pull && git checkout 3.0release +``` + +To checkout SRS4.0 branch: + +``` +git pull && git checkout 4.0release +``` + +To checkout SRS5.0 branch(if no 5.0release branch, it's develop): + +``` +git pull && git checkout develop +``` + +## SRS Branches + +The release branch is more stable than develop. + +* 3.0release, stable release branch. +* 4.0release, stable release branch. +* develop(5.0), not stable. + +Winlin 2014.11 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/git) + + diff --git a/versioned_docs/version-6.0/doc/gperf.md b/versioned_docs/version-6.0/doc/gperf.md new file mode 100644 index 00000000..030d9c21 --- /dev/null +++ b/versioned_docs/version-6.0/doc/gperf.md @@ -0,0 +1,14 @@ +--- +title: GPERF +sidebar_label: GPERF +hide_title: false +hide_table_of_contents: false +--- + +# GPerf + +No English version, please read [v4_CN_GPERF](./gperf.md) or [SRS性能(CPU)、内存优化工具用法](https://www.jianshu.com/p/6d4a89359352) + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/gperf) + + diff --git a/versioned_docs/version-6.0/doc/gprof.md b/versioned_docs/version-6.0/doc/gprof.md new file mode 100644 index 00000000..021e5d83 --- /dev/null +++ b/versioned_docs/version-6.0/doc/gprof.md @@ -0,0 +1,14 @@ +--- +title: GPROF +sidebar_label: GPROF +hide_title: false +hide_table_of_contents: false +--- + +# Gprof + +Please read [SRS性能(CPU)、内存优化工具用法](https://www.jianshu.com/p/6d4a89359352) + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/gprof) + + diff --git a/versioned_docs/version-6.0/doc/hevc.md b/versioned_docs/version-6.0/doc/hevc.md new file mode 100644 index 00000000..f4f0bf7a --- /dev/null +++ b/versioned_docs/version-6.0/doc/hevc.md @@ -0,0 +1,333 @@ +--- +title: HEVC +sidebar_label: HEVC +hide_title: false +hide_table_of_contents: false +--- + +# HEVC + +HEVC, also known as H.265, is the next-generation encoding after H.264 and belongs to the same generation of codecs +as AV1. H.265 can save about half the bandwidth compared to H.264, or provide double the clarity and image quality at +the same bandwidth. + +However, the problem with H.265 is that it's not yet widely supported by clients. Almost all devices support H.264, +including low-performance phones or boxes, which have dedicated chips for H.264 support. Although H.265 has been +developed for almost ten years, there are still not enough devices that support it. In specific scenarios, like when +the device clearly supports H.265, you can choose H.265; otherwise, stick with H.264. + +Additionally, the support for H.265 in transport protocols is gradually improving, but not all protocols support it +yet. MPEG-TS was the first to support H.265, and since SRT and HLS are based on TS, they also support it. RTMP and +HTTP-FLV only started supporting HEVC and AV1 in March 2023 with the [Enhanced RTMP](https://github.com/veovera/enhanced-rtmp) +project. As for WebRTC, only Safari supports it currently, and Chrome is said to be in development. + +SRS 6.0 officially supports the H.265 feature. If you want to use the H.265 function, please switch to the SRS +6.0 version. Please refer to [#465](https://github.com/ossrs/srs/issues/465) for the detailed research and development process. + +## Overview + +The architecutre for SRS to support H.265(or HEVC): + +```text +FFmpeg --RTMP(h.265)---> SRS ----RTMP/FLV/TS/HLS/WebRTC(h.265)--> Chrome/Safari +``` + +For live streaming: + +* [Chrome 105+](https://caniuse.com/?search=HEVC) supports HEVC by default, see [this post](https://zhuanlan.zhihu.com/p/541082191). + * You're able to play mp4 directly by H5 video, or by MSE if HTTP-FLV/HTTP-TS/HLS etc. + * Please use [mpegts.js](https://github.com/xqq/mpegts.js) to play HTTP-TS with HEVC. + * There is a plan for mpegts.js to support HTTP-FLV with HEVC, see [mpegts.js#64](https://github.com/xqq/mpegts.js/issues/64) +* [OBS 29+](https://github.com/obsproject/obs-studio/releases/tag/29.1.3) supports HEVC over RTMP. +* FFmpeg or ffplay supports libx265 + * FFmpeg 6 supports HEVC over RTMP, see [637c761b](https://github.com/FFmpeg/FFmpeg/commit/637c761be1bf9c3e1f0f347c5c3a390d7c32b282) for detail. + * FFmpeg 4 or 5, need some patch for HEVC over RTMP/FLV, see **[FFmpeg Tools](#ffmpeg-tools)** bellow. +* SRS also supports HEVC. + * We have merged HEVC support into SRS 6.0 + * The original supports for HEVC is [srs-gb28181/feature/h265](https://github.com/ossrs/srs-gb28181/commits/feature/h265) by [runner365](https://github.com/runner365) + +> Note: To check if your Chrome support HEVC, please open `chrome://gpu` and search `hevc`. + +For WebRTC: + +* Chrome does not support HEVC right now(2022.11), but supports AV1, please see [#2324](https://github.com/ossrs/srs/pull/2324) +* Safari supports HEVC if user enable it, please see this [section](#safari-webrtc) +* SRS also only supports AV1, because Chrome does not support HEVC yet. + +## Usage + +Please make sure your SRS is `6.0.4+`, build with h265: + +```bash +docker run --rm -it -p 1935:1935 -p 8080:8080 ossrs/srs:6 \ + ./objs/srs -c conf/hevc.flv.conf +``` + +> Note: Besides environment variables, you can also use `conf/hevc.flv.conf` or `conf/hevc.ts.conf` config files. +> Note: Recommend `conf/hevc.ts.conf` because TS is better for HEVC. + +Build and patch FFmpeg, see [FFmpeg Tools](#ffmpeg-tools): + +```bash +# For macOS +docker run --rm -it ossrs/srs:encoder ffmpeg -stream_loop -1 -re -i doc/source.flv \ + -acodec copy -vcodec libx265 -f flv rtmp://host.docker.internal/live/livestream + +# For linux +docker run --net=host --rm -it ossrs/srs:encoder ffmpeg -stream_loop -1 -re -i doc/source.flv \ + -acodec copy -vcodec libx265 -f flv rtmp://127.0.0.1/live/livestream +``` + +> Note: Please change the ip `host.docker.internal` to your SRS's IP. + +Play the HEVC live streams by: + +* HTTP-FLV(by H5): [http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?autostart=true) +* HLS(by VLC or fflay): `http://localhost:8080/live/livestream.m3u8` + +> Note: Please enable MPEG-DASH by `SRS_VHOST_DASH_ENABLED=on` then use VLC/ffplay to play stream `http://localhost:8080/live/livestream.mpd` + +> Note: Please enable HTTP-TS by `SRS_VHOST_HTTP_REMUX_MOUNT=[vhost]/[app]/[stream].ts` then use H5/VLC/ffplay to play stream `http://localhost:8080/live/livestream.ts` + +> Note: Please enable DVR MP4 by `SRS_VHOST_DVR_ENABLED=on SRS_VHOST_DVR_DVR_PATH=./objs/nginx/html/[app]/[stream].[timestamp].mp4` if want to covert live stream to MP4 file. + +> Note: The detail about available protocols and tools for HEVC, please see [Status of HEVC in SRS](#status-of-hevc-in-srs). + +> Note: The H5 player uses [mpegts.js](https://github.com/xqq/mpegts.js). + +## Status of HEVC in SRS + +The status of protocols and HEVC: + +* [x] PUSH HEVC over RTMP by FFmpeg. [v6.0.2](https://github.com/ossrs/srs/commit/178e40a5fc3cf0856ace914ae61696a73007f5bf) +* [x] PUSH HEVC over SRT by FFmpeg. [v6.0.20](https://github.com/ossrs/srs/pull/3366) +* [x] PUSH HEVC over RTMP by OBS. [#3464](https://github.com/ossrs/srs/issues/3464) https://github.com/obsproject/obs-studio/pull/8522 +* [x] PUSH HEVC over SRT by OBS. [v6.0.20](https://github.com/ossrs/srs/pull/3366) +* [x] PUSH HEVC over GB28181. [v6.0.25](https://github.com/ossrs/srs/pull/3408) +* [x] PULL HEVC over RTMP by FFmpeg, with [patch](#ffmpeg-tools) for FFmpeg. [v6.0.2](https://github.com/ossrs/srs/commit/178e40a5fc3cf0856ace914ae61696a73007f5bf) +* [x] PULL HEVC over HTTP-FLV by FFmpeg, with [patch](#ffmpeg-tools) for FFmpeg. [v6.0.2](https://github.com/ossrs/srs/commit/178e40a5fc3cf0856ace914ae61696a73007f5bf) +* [x] PULL HEVC over HTTP-TS by FFmpeg [v6.0.4](https://github.com/ossrs/srs/commit/70d5618979e5c8dc41b7cd87c78db7ca2b8a10e8) +* [x] PULL HEVC over HLS by FFmpeg [v6.0.11](https://github.com/ossrs/srs/commit/fff8d9863c3fba769b01782428257edf40f80a12) +* [x] PULL HEVC over MPEG-DASH by FFmpeg [v6.0.14](https://github.com/ossrs/srs/commit/edba2c25f13c0fa915bd8e8093a4005df6077858) +* [x] PULL HEVC over SRT by FFmpeg. [v6.0.20](https://github.com/ossrs/srs/pull/3366) +* [x] PUSH HEVC over WebRTC by Safari. [v6.0.34](https://github.com/ossrs/srs/pull/3441) +* [x] PULL HEVC over WebRTC by Safari. [v6.0.34](https://github.com/ossrs/srs/pull/3441) +* [ ] PUSH HEVC over WebRTC by Chrome/Firefox +* [ ] PULL HEVC over WebRTC by Chrome/Firefox +* [x] Play HEVC over HTTP-TS by [mpegts.js](https://github.com/xqq/mpegts.js), by Chrome 105+ MSE, **NO WASM**. [v6.0.1](https://github.com/ossrs/srs/commit/7e02d972ea74faad9f4f96ae881d5ece0b89f33b) +* [x] Play pure video(no audio) HEVC over HTTP-TS by [mpegts.js](https://github.com/xqq/mpegts.js). [v6.0.9](https://github.com/ossrs/srs/commit/d5bf0ba2da30698e18700b210d2b12eed5b21d29) +* [x] Play HEVC over HTTP-FLV by [mpegts.js](https://github.com/xqq/mpegts.js), by Chrome 105+ MSE, **NO WASM**. [v6.0.1](https://github.com/ossrs/srs/commit/7e02d972ea74faad9f4f96ae881d5ece0b89f33b) +* [ ] Play HEVC over HLS by [hls.js](https://github.com/video-dev/hls.js) +* [ ] Play HEVC over MPEG-DASH by [dash.js](https://github.com/Dash-Industry-Forum/dash.js) +* [x] Play HEVC over HTTP-TS by ffplay, by offical release. [v6.0.4](https://github.com/ossrs/srs/commit/70d5618979e5c8dc41b7cd87c78db7ca2b8a10e8) +* [x] PULL HEVC over RTMP by ffplay, with [patch](#ffmpeg-tools) for FFmpeg. [v6.0.2](https://github.com/ossrs/srs/commit/178e40a5fc3cf0856ace914ae61696a73007f5bf) +* [x] Play HEVC over HTTP-FLV by ffplay, with [patch](#ffmpeg-tools) for FFmpeg. [v6.0.2](https://github.com/ossrs/srs/commit/178e40a5fc3cf0856ace914ae61696a73007f5bf) +* [x] Play pure video(no audio) HEVC by ffplay. +* [x] Play HEVC over HLS by ffplay. [v6.0.11](https://github.com/ossrs/srs/commit/fff8d9863c3fba769b01782428257edf40f80a12) +* [x] Play HEVC over MPEG-DASH by ffplay. [v6.0.14](https://github.com/ossrs/srs/commit/edba2c25f13c0fa915bd8e8093a4005df6077858) +* [x] Play HEVC over SRT by ffplay. [v6.0.20](https://github.com/ossrs/srs/pull/3366) +* [x] Play HEVC over HTTP-TS by VLC, by official release. [v6.0.4](https://github.com/ossrs/srs/commit/70d5618979e5c8dc41b7cd87c78db7ca2b8a10e8) +* [x] Play HEVC over SRT by VLC, by official. [v6.0.20](https://github.com/ossrs/srs/pull/3366) +* [x] Play pure video(no audio) HEVC by VLC. +* [ ] Play HEVC over RTMP by VLC. +* [ ] Play HEVC over HTTP-FLV by VLC. +* [x] Play HEVC over HLS by VLC. [v6.0.11](https://github.com/ossrs/srs/commit/fff8d9863c3fba769b01782428257edf40f80a12) +* [x] Play HEVC over MPEG-DASH by VLC. [v6.0.14](https://github.com/ossrs/srs/commit/edba2c25f13c0fa915bd8e8093a4005df6077858) +* [x] DVR HEVC to MP4/FLV file. [v6.0.14](https://github.com/ossrs/srs/commit/edba2c25f13c0fa915bd8e8093a4005df6077858) +* [x] HTTP API contains HEVC metadata. +* [ ] HTTP Callback takes HEVC metadata. +* [ ] Prometheus Exporter supports HEVC metadata. +* [ ] Improve coverage for HEVC. +* [x] Add regression/blackbox tests for HEVC. +* [ ] Supports benchmark for HEVC by [srs-bench](https://github.com/ossrs/srs-bench). +* [x] Support patched FFmpeg for SRS dockers: [CentOS7](https://github.com/ossrs/dev-docker/commit/0691d016adfe521f77350728d15cead8086d527d), [Ubuntu20](https://github.com/ossrs/dev-docker/commit/0e36323d15544ffe2901d10cfd255d9ef08fb250) and [Encoder](https://github.com/ossrs/dev-docker/commit/782bb31039653f562e0765a0c057d9f9babd1d1f). +* [x] Update [WordPress plugin SrsPlayer](https://github.com/ossrs/WordPress-Plugin-SrsPlayer) for HEVC. +* [ ] Update [srs-cloud](https://github.com/ossrs/srs-cloud) for HEVC. +* [ ] Edge server supports publish HEVC stream to origin. +* [ ] Edge server supprots play HEVC stream from origin. +* [ ] [HEVC: Error empty SPS/PPS when coverting RTMP to HEVC.](https://github.com/ossrs/srs/issues/3407) + +> Note: We're merging HEVC support to SRS 6.0, the original supports for HEVC is [srs-gb28181/feature/h265](https://github.com/ossrs/srs-gb28181/commits/feature/h265) by [runner365](https://github.com/runner365) + +## FFmpeg Tools + +The FFmpeg in `ossrs/srs:encoder` or `ossrs/srs:6` is built with libx265 and patched with HEVC over RTMP support. So you're able to directly use: + +```bash +docker run --rm -it --net host ossrs/srs:encoder \ + ffmpeg -re -i doc/source.flv -acodec copy -vcodec libx265 \ + -f flv rtmp://localhost/live/livestream +``` + +If you want to build from code, please read the bellow instructions. Before build FFmpeg, we must build +[libx264](https://www.videolan.org/developers/x264.html): + +```bash +git clone https://code.videolan.org/videolan/x264.git ~/git/x264 +cd ~/git/x264 +./configure --prefix=$(pwd)/build --disable-asm --disable-cli --disable-shared --enable-static +make -j10 +make install +``` + +And then [libx265](https://www.videolan.org/developers/x265.html): + +```bash +git clone https://bitbucket.org/multicoreware/x265_git.git ~/git/x265_git +cd ~/git/x265_git/build/linux +cmake -DCMAKE_INSTALL_PREFIX=$(pwd)/build -DENABLE_SHARED=OFF ../../source +make -j10 +make install +``` + +Keep in mind that FFmpeg 6.0 does not support HEVC over RTMP until the following commit +[637c761b](https://github.com/FFmpeg/FFmpeg/commit/637c761be1bf9c3e1f0f347c5c3a390d7c32b282): + +``` +commit 637c761be1bf9c3e1f0f347c5c3a390d7c32b282 +Author: Steven Liu +Date: Mon Aug 28 09:59:24 2023 +0800 + + avformat/rtmpproto: support enhanced rtmp + + add option named rtmp_enhanced_codec, + it would support hvc1,av01,vp09 now, + the fourcc is using Array of strings. + + Signed-off-by: Steven Liu +``` + +So, if you are using FFmpeg 6, you can build FFmpeg without any patch, directly by the following commands: + +```bash +git clone -b master https://github.com/FFmpeg/FFmpeg.git ~/git/FFmpeg +cd ~/git/FFmpeg +env PKG_CONFIG_PATH=~/git/x264/build/lib/pkgconfig:~/git/x265_git/build/linux/build/lib/pkgconfig \ +./configure \ + --prefix=$(pwd)/build \ + --enable-gpl --enable-nonfree --enable-pthreads --extra-libs=-lpthread \ + --disable-asm --disable-x86asm --disable-inline-asm \ + --enable-decoder=aac --enable-decoder=aac_fixed --enable-decoder=aac_latm --enable-encoder=aac \ + --enable-libx264 --enable-libx265 \ + --pkg-config-flags='--static' +make -j10 +``` + +Push HEVC over RTMP to SRS: + +```bash +./ffmpeg -stream_loop -1 -re -i ~/srs/doc/source.flv -acodec copy -vcodec libx265 \ + -f flv rtmp://localhost/live/livestream +``` + +Play HEVC over RTMP by ffplay: + +```bash +./ffplay rtmp://localhost/live/livestream +``` + +It works like magic! + +If you want to use HEVC over RTMP in FFmpeg 4.1 or 5.1, please read the following instructions. Please clone FFmepg +and checkout to 5.1: + +> Note: The [specfication](https://github.com/ksvc/FFmpeg/wiki) and [usage](https://github.com/ksvc/FFmpeg/wiki/hevcpush) +to support HEVC over RTMP or FLV. There is a [patch for FFmpeg 4.1/5.1/6.0](https://github.com/runner365/ffmpeg_rtmp_h265) +from [runner365](https://github.com/runner365) for FFmpeg to support HEVC over RTMP or FLV. There is also a +[patch](https://github.com/VCDP/CDN/blob/master/FFmpeg_patches/0001-Add-SVT-HEVC-FLV-support-on-FFmpeg.patch) +from Intel for this feature. + +```bash +git clone -b n5.1.2 https://github.com/FFmpeg/FFmpeg.git ~/git/FFmpeg +``` + +Then, patch for [HEVC over RTMP/FLV](https://github.com/runner365/ffmpeg_rtmp_h265): + +```bash +git clone -b 5.1 https://github.com/runner365/ffmpeg_rtmp_h265.git ~/git/ffmpeg_rtmp_h265 +cp ~/git/ffmpeg_rtmp_h265/flv.h ~/git/FFmpeg/libavformat/ +cp ~/git/ffmpeg_rtmp_h265/flv*.c ~/git/FFmpeg/libavformat/ +``` + +Finally, follow the previous instructions to build FFmpeg. + +## MSE for HEVC + +[MSE](https://caniuse.com/?search=mse) is a base technology for [mpegts.js](https://github.com/xqq/mpegts.js), [hls.js](https://github.com/video-dev/hls.js/) and [dash.js](https://github.com/Dash-Industry-Forum/dash.js). + +Now [Chrome 105+](https://caniuse.com/?search=HEVC) supports HEVC by default, see [this post](https://zhuanlan.zhihu.com/p/541082191), which means, MSE(Chrome 105+) is available for HEVC. + +You can verify this feature, by generating a HEVC mp4 file: + +```bash +ffmpeg -i ~/git/srs/trunk/doc/source.flv -acodec copy \ + -vcodec libx265 -y source.hevc.mp4 +``` + +> Note: Please make sure your FFmpeg is 5.0 and libx265 is enabled. + +Open `source.hevc.mp4` in Chrome 105+ directly, it should works. + +You can also move the file to SRS webserver: + +```bash +mkdir -p ~/git/srs/trunk/objs/nginx/html/vod/ +mv source.hevc.mp4 ~/git/srs/trunk/objs/nginx/html/vod +``` + +Then open by [srs-player](http://localhost:8080/players/srs_player.html?app=vod&stream=source.hevc.mp4&autostart=true) + +## Safari WebRTC + +Safari supports WebRTC, if you enable it by: + +* English version: `Develop > Experimental Features > WebRTC H265 codec` +* Chinese version: `Development > Experimental Features > WebRTC H265 codec` + +Then open the url in safari, to publish or play WebRTC stream: + +* Play [http://localhost:1985/rtc/v1/whep/?app=live&stream=livestream&codec=hevc](http://localhost:8080/players/whep.html?autostart=true&codec=hevc) +* Publish [http://localhost:1985/rtc/v1/whip/?app=live&stream=livestream&codec=hevc](http://localhost:8080/players/whip.html?autostart=true&codec=hevc) + +Please follow other section to publish HEVC stream. + +## Thanks for Contributors + +There is a list of commits and contributors about HEVC in SRS: + +* [H265: For #1747, Support HEVC/H.265 in SRT/RTMP/HLS.](https://github.com/ossrs/srs-gb28181/commit/3ca11071b45495e82d2d6958e5d0f7eab05e71e5) +* [H265: For #1747, Fix build fail bug for H.265](https://github.com/ossrs/srs-gb28181/commit/e355f3c37228f3602c88fed68e8fe5e6ba1153ea) +* [H265: For #1747, GB28181 support h.265 (#2037)](https://github.com/ossrs/srs-gb28181/commit/b846217bc7f94034b33bdf918dc3a49fb17947e0) +* [H265: fix some important bugs (#2156)](https://github.com/ossrs/srs-gb28181/commit/26218965dd083d13173af6eb31fcdf9868b753c6) +* [H265: Deliver the right hevc nalu and dump the wrong nalu. (#2447)](https://github.com/ossrs/srs-gb28181/commit/a13b9b54938a14796abb9011e7a8ee779439a452) +* [H265: Fix multi nal hevc frame demux fail. #2494](https://github.com/ossrs/srs-gb28181/commit/6c5e6090d7c82eb37530e109c230cabaedf948e1) +* [H265: Fix build error #2657 #2664](https://github.com/ossrs/srs-gb28181/commit/eac99e19fba6063279b9e47272523014f5e3334a) +* [H265: Update mpegts demux in srt. #2678](https://github.com/ossrs/srs-gb28181/commit/391c1426fc484c990e4324a4ae2f0de900074578) +* [H265: Fix the stat issue for h265. (#1949)](https://github.com/ossrs/srs-gb28181/commit/b4486e3b51281b4c227b2cc4f58d2b06db599ce0) +* [H265: Add h265 codec written support for MP4 format. (#2697)](https://github.com/ossrs/srs-gb28181/commit/3175d7e26730a04b27724e55dc95ef86c1f2886e) +* [H265: Add h265 for SRT.](https://github.com/runner365/srs/commit/0fa86e4f23847e8a46e3d0e91e0acd2c27047e11) + +We will merge some of these commits to SRS 6.0, but not all commits. + +* [PULL HEVC over WebRTC by Safari. v6.0.34](https://github.com/ossrs/srs/pull/3441) +* [GB: Support H.265 for GB28181. v6.0.25 (#3408)](https://github.com/ossrs/srs/pull/3408) +* [H265: Support HEVC over SRT. v6.0.20 (#465) (#3366)](https://github.com/ossrs/srs/pull/3366) +* [H265: Support DVR HEVC stream to MP4. v6.0.14](https://github.com/ossrs/srs/pull/3360) +* HLS: Support HEVC over HLS. v6.0.11 +* [HEVC: The codec information is incorrect. v6.0.5](https://github.com/ossrs/srs/issues/3271) +* FFmpeg support libx265 and HEVC over RTMP/FLV: [CentOS7](https://github.com/ossrs/dev-docker/commit/0691d016adfe521f77350728d15cead8086d527d), [Ubuntu20](https://github.com/ossrs/dev-docker/commit/0e36323d15544ffe2901d10cfd255d9ef08fb250) and [Encoder](https://github.com/ossrs/dev-docker/commit/782bb31039653f562e0765a0c057d9f9babd1d1f). +* [H265: Support HEVC over HTTP-TS. v6.0.4](https://github.com/ossrs/srs/commit/70d5618979e5c8dc41b7cd87c78db7ca2b8a10e8) +* [H265: Support parse multiple NALUs in a frame. v6.0.3](https://github.com/ossrs/srs/commit/f316e9a0de3a892d25f2d8e7efd28ee9334f5bd6) +* [H265: Support HEVC over RTMP or HTTP-FLV. v6.0.2](https://github.com/ossrs/srs/commit/178e40a5fc3cf0856ace914ae61696a73007f5bf) +* [H265: Update mpegts.js to play HEVC over HTTP-TS/FLV. v6.0.1](https://github.com/ossrs/srs/commit/7e02d972ea74faad9f4f96ae881d5ece0b89f33b) + +## Known Issues + +1. HEVC over Safari WebRTC, only support WebRTC to WebRTC, doesn't support converting to RTMP. +2. Chrome/Firefox does not support HEVC, no any plan as I know. +3. Almost all browsers supports MSE, except iOS. HEVC over MSE requires hardware decoder. +4. Apart from mpegts.js, other H5 players such as hls.js/dash.js doesn't support HEVC. + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/hevc) + + diff --git a/versioned_docs/version-6.0/doc/hls.md b/versioned_docs/version-6.0/doc/hls.md new file mode 100644 index 00000000..2f7d2946 --- /dev/null +++ b/versioned_docs/version-6.0/doc/hls.md @@ -0,0 +1,499 @@ +--- +title: HLS +sidebar_label: HLS +hide_title: false +hide_table_of_contents: false +--- + +# HLS + +HLS is the best streaming protocol for adaptability and compatibility. Almost all devices in the world support HLS, +including PCs, Android, iOS, OTT, SmartTV, and more. Various browsers also support HLS well, including Chrome, Safari, +Firefox, Edge, and mobile browsers. + +If your users are diverse, especially if their devices have lower performance, HLS is the best choice. If you want +to be compatible with more devices, HLS is the best choice. If you want to distribute your live stream on any CDN +and globally, HLS is the best choice. + +Of course, HLS is not perfect; its main issue is high latency, usually around 30 seconds. Although it can be optimized +to about 8 seconds, different players' behavior may vary. Compared to other streaming protocols, the optimized latency +is still high. So if you care about live streaming latency, please use RTMP or HTTP-FLV protocols. + +The main application scenarios of HLS include: +* Cross-platform: The main live streaming solution for PCs is HLS, which can be played using the hls.js library. So if you choose one protocol for PC/Android/iOS, it's HLS. +* Strict stability requirements on iOS: HLS is the most stable on iOS, with stability comparable to RTMP and HTTP-FLV. +* Friendly CDN distribution: HLS is based on HTTP, so CDN integration and distribution are more complete than RTMP. HLS can switch between various CDNs. +* Fewer simple issues: HLS is a very simple streaming protocol, well supported by Apple. Android's support for HLS will also improve. + +HLS is the core protocol of SRS and will be continuously maintained and updated, constantly improving support for HLS. +SRS converts RTMP, SRT, or WebRTC streams into HLS streams, especially WebRTC, where SRS implements audio transcoding +capabilities. + +## Usage + +SRS has built-in HLS support, which you can use with [docker](./getting-started.md) or [compile from source](./getting-started-build.md): + +```bash +docker run --rm -it -p 1935:1935 -p 8080:8080 ossrs/srs:5 \ + ./objs/srs -c conf/hls.conf +``` + +Use [FFmpeg(click to download)](https://ffmpeg.org/download.html) or [OBS(click to download)](https://obsproject.com/download) to stream: + +```bash +ffmpeg -re -i ./doc/source.flv -c copy -f flv rtmp://localhost/live/livestream +``` + +Open the following page to play the stream (if SRS is not on your local machine, replace localhost with the server IP): + +* HLS by SRS player: [http://localhost:8080/live/livestream.m3u8](http://localhost:8080/players/srs_player.html?stream=livestream.m3u8) + +> Note: Please wait about 10 seconds before playing the stream, otherwise it will fail, as it takes some time to generate the first segment. + +## Config + +The config for HLS: + +```bash +vhost __defaultVhost__ { + hls { + # whether the hls is enabled. + # if off, do not write hls(ts and m3u8) when publish. + # Overwrite by env SRS_VHOST_HLS_ENABLED for all vhosts. + # default: off + enabled on; + + # the hls fragment in seconds, the duration of a piece of ts. + # Overwrite by env SRS_VHOST_HLS_HLS_FRAGMENT for all vhosts. + # default: 10 + hls_fragment 10; + # the hls m3u8 target duration ratio, + # EXT-X-TARGETDURATION = hls_td_ratio * hls_fragment // init + # EXT-X-TARGETDURATION = max(ts_duration, EXT-X-TARGETDURATION) // for each ts + # Overwrite by env SRS_VHOST_HLS_HLS_TD_RATIO for all vhosts. + # default: 1.0 + hls_td_ratio 1.0; + # the audio overflow ratio. + # for pure audio, the duration to reap the segment. + # for example, the hls_fragment is 10s, hls_aof_ratio is 1.2, + # the segment will reap to 12s for pure audio. + # Overwrite by env SRS_VHOST_HLS_HLS_AOF_RATIO for all vhosts. + # default: 1.2 + hls_aof_ratio 1.2; + # the hls window in seconds, the number of ts in m3u8. + # Overwrite by env SRS_VHOST_HLS_HLS_WINDOW for all vhosts. + # default: 60 + hls_window 60; + # the error strategy. can be: + # ignore, disable the hls. + # disconnect, require encoder republish. + # continue, ignore failed try to continue output hls. + # Overwrite by env SRS_VHOST_HLS_HLS_ON_ERROR for all vhosts. + # default: continue + hls_on_error continue; + # the hls output path. + # the m3u8 file is configured by hls_path/hls_m3u8_file, the default is: + # ./objs/nginx/html/[app]/[stream].m3u8 + # the ts file is configured by hls_path/hls_ts_file, the default is: + # ./objs/nginx/html/[app]/[stream]-[seq].ts + # @remark the hls_path is compatible with srs v1 config. + # Overwrite by env SRS_VHOST_HLS_HLS_PATH for all vhosts. + # default: ./objs/nginx/html + hls_path ./objs/nginx/html; + # the hls m3u8 file name. + # we supports some variables to generate the filename. + # [vhost], the vhost of stream. + # [app], the app of stream. + # [stream], the stream name of stream. + # Overwrite by env SRS_VHOST_HLS_HLS_M3U8_FILE for all vhosts. + # default: [app]/[stream].m3u8 + hls_m3u8_file [app]/[stream].m3u8; + # the hls ts file name. + # we supports some variables to generate the filename. + # [vhost], the vhost of stream. + # [app], the app of stream. + # [stream], the stream name of stream. + # [2006], replace this const to current year. + # [01], replace this const to current month. + # [02], replace this const to current date. + # [15], replace this const to current hour. + # [04], replace this const to current minute. + # [05], replace this const to current second. + # [999], replace this const to current millisecond. + # [timestamp],replace this const to current UNIX timestamp in ms. + # [seq], the sequence number of ts. + # [duration], replace this const to current ts duration. + # @see https://ossrs.net/lts/zh-cn/docs/v4/doc/dvr#custom-path + # @see https://ossrs.net/lts/zh-cn/docs/v4/doc/delivery-hls#hls-config + # Overwrite by env SRS_VHOST_HLS_HLS_TS_FILE for all vhosts. + # default: [app]/[stream]-[seq].ts + hls_ts_file [app]/[stream]-[seq].ts; + # the hls entry prefix, which is base url of ts url. + # for example, the prefix is: + # http://your-server/ + # then, the ts path in m3u8 will be like: + # http://your-server/live/livestream-0.ts + # http://your-server/live/livestream-1.ts + # ... + # Overwrite by env SRS_VHOST_HLS_HLS_ENTRY_PREFIX for all vhosts. + # optional, default to empty string. + hls_entry_prefix http://your-server; + # the default audio codec of hls. + # when codec changed, write the PAT/PMT table, but maybe ok util next ts. + # so user can set the default codec for mp3. + # the available audio codec: + # aac, mp3, an + # Overwrite by env SRS_VHOST_HLS_HLS_ACODEC for all vhosts. + # default: aac + hls_acodec aac; + # the default video codec of hls. + # when codec changed, write the PAT/PMT table, but maybe ok util next ts. + # so user can set the default codec for pure audio(without video) to vn. + # the available video codec: + # h264, vn + # Overwrite by env SRS_VHOST_HLS_HLS_VCODEC for all vhosts. + # default: h264 + hls_vcodec h264; + # whether cleanup the old expired ts files. + # Overwrite by env SRS_VHOST_HLS_HLS_CLEANUP for all vhosts. + # default: on + hls_cleanup on; + # If there is no incoming packets, dispose HLS in this timeout in seconds, + # which removes all HLS files including m3u8 and ts files. + # @remark 0 to disable dispose for publisher. + # @remark apply for publisher timeout only, while "etc/init.d/srs stop" always dispose hls. + # Overwrite by env SRS_VHOST_HLS_HLS_DISPOSE for all vhosts. + # default: 120 + hls_dispose 120; + # whether wait keyframe to reap segment, + # if off, reap segment when duration exceed the fragment, + # if on, reap segment when duration exceed and got keyframe. + # Overwrite by env SRS_VHOST_HLS_HLS_WAIT_KEYFRAME for all vhosts. + # default: on + hls_wait_keyframe on; + # whether use floor for the hls_ts_file path generation. + # if on, use floor(timestamp/hls_fragment) as the variable [timestamp], + # and use enhanced algorithm to calc deviation for segment. + # @remark when floor on, recommend the hls_segment>=2*gop. + # Overwrite by env SRS_VHOST_HLS_HLS_TS_FLOOR for all vhosts. + # default: off + hls_ts_floor off; + # the max size to notify hls, + # to read max bytes from ts of specified cdn network, + # @remark only used when on_hls_notify is config. + # Overwrite by env SRS_VHOST_HLS_HLS_NB_NOTIFY for all vhosts. + # default: 64 + hls_nb_notify 64; + + # Whether enable hls_ctx for HLS streaming, for which we create a "fake" connection for HTTP API and callback. + # For each HLS streaming session, we use a child m3u8 with a session identified by query "hls_ctx", it simply + # work as the session id. + # Once the HLS streaming session is created, we will cleanup it when timeout in 2*hls_window seconds. So it + # takes a long time period to identify the timeout. + # Now we got a HLS stremaing session, just like RTMP/WebRTC/HTTP-FLV streaming, we're able to stat the session + # as a "fake" connection, do HTTP callback when start playing the HLS streaming. You're able to do querying and + # authentication. + # Note that it will make NGINX edge cache always missed, so never enable HLS streaming if use NGINX edges. + # Overwrite by env SRS_VHOST_HLS_HLS_CTX for all vhosts. + # Default: on + hls_ctx on; + # For HLS pseudo streaming, whether enable the session for each TS segment. + # If enabled, SRS HTTP API will show the statistics about HLS streaming bandwidth, both m3u8 and ts file. Please + # note that it also consumes resource, because each ts file should be served by SRS, all NGINX cache will be + # missed because we add session id to each ts file. + # Note that it will make NGINX edge cache always missed, so never enable HLS streaming if use NGINX edges. + # Overwrite by env SRS_VHOST_HLS_HLS_TS_CTX for all vhosts. + # Default: on + hls_ts_ctx on; + + # whether using AES encryption. + # Overwrite by env SRS_VHOST_HLS_HLS_KEYS for all vhosts. + # default: off + hls_keys on; + # the number of clear ts which one key can encrypt. + # Overwrite by env SRS_VHOST_HLS_HLS_FRAGMENTS_PER_KEY for all vhosts. + # default: 5 + hls_fragments_per_key 5; + # the hls key file name. + # we supports some variables to generate the filename. + # [vhost], the vhost of stream. + # [app], the app of stream. + # [stream], the stream name of stream. + # [seq], the sequence number of key corresponding to the ts. + # Overwrite by env SRS_VHOST_HLS_HLS_KEY_FILE for all vhosts. + hls_key_file [app]/[stream]-[seq].key; + # the key output path. + # the key file is configed by hls_path/hls_key_file, the default is: + # ./objs/nginx/html/[app]/[stream]-[seq].key + # Overwrite by env SRS_VHOST_HLS_HLS_KEY_FILE_PATH for all vhosts. + hls_key_file_path ./objs/nginx/html; + # the key root URL, use this can support https. + # @remark It's optional. + # Overwrite by env SRS_VHOST_HLS_HLS_KEY_URL for all vhosts. + hls_key_url https://localhost:8080; + + # Special control controls. + ########################################### + # Whether calculate the DTS of audio frame directly. + # If on, guess the specific DTS by AAC samples, please read https://github.com/ossrs/srs/issues/547#issuecomment-294350544 + # If off, directly turn the FLV timestamp to DTS, which might cause corrupt audio stream. + # @remark Recommend to set to off, unless your audio stream sample-rate and timestamp is not correct. + # Overwrite by env SRS_VHOST_HLS_HLS_DTS_DIRECTLY for all vhosts. + # Default: on + hls_dts_directly on; + + # on_hls, never config in here, should config in http_hooks. + # for the hls http callback, @see http_hooks.on_hls of vhost hooks.callback.srs.com + # @see https://ossrs.net/lts/zh-cn/docs/v4/doc/delivery-hls#http-callback + # @see https://ossrs.io/lts/en-us/docs/v4/doc/delivery-hls#http-callback + + # on_hls_notify, never config in here, should config in http_hooks. + # we support the variables to generate the notify url: + # [app], replace with the app. + # [stream], replace with the stream. + # [param], replace with the param. + # [ts_url], replace with the ts url. + # for the hls http callback, @see http_hooks.on_hls_notify of vhost hooks.callback.srs.com + # @see https://ossrs.net/lts/zh-cn/docs/v4/doc/delivery-hls#on-hls-notify + # @see https://ossrs.io/lts/en-us/docs/v4/doc/delivery-hls#on-hls-notify + } +} +``` + +> Note: These settings are only for playing HLS. For streaming settings, please follow your protocol, like referring to [RTMP](./rtmp.md#config), [SRT](./srt.md#config), or [WebRTC](./webrtc.md#config) streaming configurations. + +Here are the main settings: +* enabled: Turn HLS on/off, default is off. +* hls_fragment: Seconds, specify the minimum length of ts slices. For the actual length of ts files, please refer to the detailed description of [HLS TS Duration](#hls-ts-duration). +* hls_td_ratio: Normal slice duration multiple. For the actual length of ts files, please refer to the detailed description of [HLS TS Duration](#hls-ts-duration). +* hls_wait_keyframe: Whether to slice by top, i.e., wait for the keyframe before slicing. For the actual length of ts files, please refer to the detailed description of [HLS TS Duration](#hls-ts-duration). +* hls_aof_ratio: Pure audio slice duration multiple. For pure audio, when the ts duration exceeds the configured ls_fragment multiplied by this factor, the file is cut. For the actual length of ts files, please refer to the detailed description of [HLS TS Duration](#hls-ts-duration). +* hls_window: Seconds, specify the HLS window size, i.e., the sum of ts file durations in the m3u8, which determines the number of ts files in the m3u8. For more details, refer to [HLS TS Files](#hls-ts-files). +* hls_path: The path where the HLS m3u8 and ts files are saved. Both m3u8 and ts files are saved in this directory. +* hls_m3u8_file: The file name of the HLS m3u8, including replaceable `[vhost]`, `[app]`, and `[stream]` variables. +* hls_ts_file: The file name of the HLS ts, including a series of replaceable variables. Refer to [dvr variables](./dvr.md#custom-path). Also, `[seq]` is the ts sequence number. +* hls_entry_prefix: The base url of TS. Optional, default is an empty string; when not empty, it is added in front of ts as the base url. +* hls_acodec: Default audio codec. When the stream codec changes, the PMT/PAT information will be updated; the default is aac, so the default PMT/PAT information is aac; if the stream is mp3, this parameter can be set to mp3 to avoid PMT/PAT changes. +* hls_vcodec: Default video codec. When the stream codec changes, the PMT/PAT information will be updated; the default is h264. If it is a pure audio HLS, it can be set to vn, which can reduce the time for SRS to detect pure audio and directly enter pure audio mode. +* hls_cleanup: Whether to delete expired ts slices that are not in the hls_window. You can turn off ts slice cleanup to implement time-shifting and storage, using your own slice management system. +* hls_dispose: When there is no stream, the HLS cleanup expiration time (seconds). When the system restarts or exceeds this time, all HLS files, including m3u8 and ts, will be cleaned up. If set to 0, no cleanup will be done. +* hls_nb_notify: The length of data read from the notify server. +* on_hls: When a slice is generated, callback this url using POST. Used to integrate with your own system, such as implementing slice movement. +* on_hls_notify: When a slice is generated, callback this url using GET. Used to integrate with the system, you can use the `[ts_url]` variable to implement pre-distribution (i.e., download a ts slice once). + +## HLS TS Duration + +How is the duration of HLS TS segments determined? It depends on the configuration and the characteristics of the stream. + +If there is video, the segment duration is `max(hls_fragment*hls_td_ratio, gop_size*N)`, which is the maximum value of `hls_fragment` and `gop_size`. The `gop_size` is determined by the encoder, for example, OBS can set the GOP size in seconds, while FFmpeg uses the number of frames combined with the frame rate to calculate seconds. + +For example, if the stream's frame rate is 25 and the GOP is 50 frames, then the `gop_size` is 2 seconds: + +* If `hls_fragment` is 10 seconds, the final TS segment duration is 10 seconds. +* If `hls_fragment` is 5 seconds, the final TS segment duration is 6 seconds, with 3 GOPs. +* If `hls_fragment` is 5 seconds and `hls_td_ratio` is 2, the final TS segment duration is 10 seconds. + +If `hls_wait_keyframe off` is configured, the GOP size is no longer considered, and the TS segment duration is determined by `hls_fragment` regardless of the GOP size. For example, if the GOP is 10 seconds: + +* If `hls_fragment` is 10 seconds, the final TS segment duration is 10 seconds. +* If `hls_fragment` is 5 seconds, the final TS segment duration is 5 seconds. +* If `hls_fragment` is 3 seconds and `hls_td_ratio` is 2, the final TS segment duration is 6 seconds. + +> Note: Turning off `hls_wait_keyframe` can reduce segment size and latency, but some players may experience screen artifacts when starting playback with a non-keyframe. + +For audio-only HLS, the segment duration is determined by `hls_fragment*hls_aof_ratio`: + +* If `hls_fragment` is 10 seconds and `hls_aof_ratio` is 1.2, the final TS segment duration is 12 seconds. +* If `hls_fragment` is 5 seconds and `hls_aof_ratio` is 1, the final TS segment duration is 5 seconds. + +Note that if the segment duration is unusually long, exceeding a certain size (usually 3 times the maximum segment length), it will be discarded. + +## HLS TS Files + +The number of TS files in the m3u8 is determined by the TS duration and `hls_window`. When the total duration of TS files exceeds `hls_window`, the first segment in the m3u8 is discarded until the total TS duration is within the configured range. + +SRS ensures the following formula: + +```bash +hls_window >= sum(duration of each ts in m3u8) +``` + +For example, if `hls_window` is 60 seconds and `hls_fragment` is 10 seconds, and the actual TS segment duration is 10 seconds, there will be 6 TS files in the m3u8. The actual TS segment duration may be larger than `hls_fragment`, see [HLS TS Duration](#hls-ts-duration) for details. + +## HTTP Callback + +You can set up an `on_hls` callback in the `http_hooks` section, not in the HLS section. + +Note: HLS hot backup can be implemented based on this callback, see [#351](https://github.com/ossrs/srs/issues/351). + +Note: HLS hot backup must ensure that the slices on both servers are exactly the same, because the load balancer or edge may fetch slices from both servers. Ensuring that the slices on both servers are exactly the same is a very complex streaming media issue. However, through the callback and business system, you can achieve a simple and reliable HLS hot backup system by choosing slices from both servers. + +## HLS Authentication + +SRS supports HLS client playback and online user statistics. By default, it will enable `hls_ctx` and `hls_ts_ctx`. This way, HLS and other protocols can implement authentication playback and data statistics through callbacks. For example, when playing HLS, you can use the `on_play` callback to return an error and reject client playback. + +```bash +vhost __defaultVhost__ { + hls { + enabled on; + hls_ctx on; + hls_ts_ctx on; + } +} +``` + +However, this feature will cause HLS cache to fail on CDN because each playback will have a different ctx_id, similar to a session ID function. Therefore, in [HLS Cluster](./nginx-for-hls.md), you must disable these two options. + +## HLS Dispose + +If the stream is stopped, the HLS client can still play the previous content because the slice files still exist. + +Sometimes during a live broadcast, you may need to temporarily stop the stream, change encoding parameters or streaming devices, and then restart the stream. Therefore, SRS should not delete HLS files when stopping the stream. + +By default, SRS will clean up the HLS slice files after the `hls_dispose` configured time. This time is set to 120 seconds (2 minutes) by default. + +```bash +vhost __defaultVhost__ { + hls { + enabled on; + hls_dispose 120; + } +} +``` + +If you need to clean up faster, you can shorten this cleanup time. However, this configuration should not be too short. It is recommended not to be less than `hls_window`, otherwise, it may cause early cleanup when restarting the stream, making the HLS stream inaccessible to the player. + +## HLS in RAM + +If you need to increase the number of concurrent HLS streams, you can try distributing HLS directly from memory without writing to disk. + +You can mount memory as a disk directory and then write HLS slices to the memory disk: + +```bash +mkdir -p /ramdisk && +mount -o size=7G -t tmpfs none /ramdisk +``` + +> Note: To unmount the memory disk, use the command `unmount /randisk`. + +> Note: If you don't have many streams and don't need much disk space, you can write HLS slices to the `/tmp` directory, which is a memory disk by default. + +Then configure `hls_path` or create a soft link to the directory. + +## HLS Delivery Cluster + +To deploy an HLS distribution cluster and edge distribution cluster for your own CDN to handle a large number of viewers, please refer to [Nginx for HLS](./nginx-for-hls.md). + +## HLS Low Latency + +How to reduce HLS latency? The key is to reduce the number of slices and the number of TS files in the m3u8. SRS's default configuration is 10 seconds per slice and 60 seconds per m3u8, resulting in a latency of about 30 seconds. Some players start requesting slices from the middle position, so there will be a delay of 3 slices. + +You can adjust the following three settings to reduce latency to about 6-8 seconds: + +* Reduce the GOP size, e.g., set OBS's GOP to 1 second or FFmpeg's GOP to the number of FPS frames. +* Reduce the encoder's delay, for example, set OBS to `Profile` as `baseline` and choose `Tune` as `zerolatency`. +* Reduce `hls_fragment`, e.g., set it to 2 seconds or 1 second. +* Reduce `hls_window`, e.g., set it to 10 seconds or 5 seconds. +* Use low-latency players like hls.js, ijkplayer, or ffplay, and avoid high-latency players like VLC. + +Refer to the configuration file `conf/hls.realtime.conf`: + +```bash +vhost __defaultVhost__ { + hls { + enabled on; + hls_fragment 2; + hls_window 10; + } +} +``` + +> Note: If you can't adjust the encoder's OGP size, consider setting `hls_wait_keyframe off` to ignore GOP, but this may cause screen artifacts. Test your device's compatibility. + +Of course, you can't reduce it too much, as it may cause insufficient buffering for the player or skipping when the player's network is poor, possibly resulting in playback failure. The lower the latency, the higher the chance of buffering. HLS latency cannot be less than 5 seconds, especially considering CDN and player compatibility. + +Even after adjusting, the HLS delay won't be less than 5 seconds, and the LLHLS protocol can't reduce it further. This is because LLHLS only tries to solve the impact of the initial GOP during playback. In the above settings, we also reduced the GOP's impact through the encoder's configuration. However, network jitter and player strategy are reasons for the higher HLS delay, and they can't be solved. +If you need latency within 5 seconds, consider using protocols like [HTTP-FLV](./flv.md), [SRT](./srt.md), or [WebRTC](./webrtc.md). + +## ON HLS Notify + +You can configure `on_hls_notify` for CDN pre-distribution. This should be set in `http_hooks` rather than in the HLS configuration. + +## HLS Audio Corrupt + +HLS might have loud noise issues, which is caused by the sampling rate of AAC causing a small error when switching between FLV (tbn=1000) and TS (tbn=90000). SRS3 uses the number of samples to calculate the exact timestamp, for more details, refer to [HLS Loud Noise](https://github.com/ossrs/srs/issues/547#issuecomment-294350544). + +> Note: To solve the HLS loud noise problem, you need to manually disable `hls_dts_directly` (set to off). + +After SRS3 is corrected, it is found that some audio streams have problems with their timestamps, causing the timestamps calculated from the AAC sample count to be incorrect. Therefore, the configuration item `hls_dts_directly` is provided to force the use of the original timestamp, refer to [HLS Force Original Timestamp](https://github.com/ossrs/srs/issues/547#issuecomment-563942711). + +## HLS Audio Only + +SRS supports distributing HLS pure audio streams. When the RTMP stream has no video and the audio is AAC (you can use transcoding to convert to AAC, refer to [Usage: Transcode2HLS](./sample-transcode-to-hls.md)), SRS only slices the audio. + +If the RTMP stream already has video and audio, and you need to support pure audio HLS streams, you can use transcoding to remove the video, refer to: [Transcoding: Disable Stream](./ffmpeg.md#%E7%A6%81%E7%94%A8). Then distribute the audio stream. + +Distributing pure audio streams does not require special configuration, just like HLS distribution. + +## HLS and Forward + +Forward streams are not distinguished from ordinary streams. If the forward stream's VHOST is configured with HLS, the HLS configuration will be applied for slicing. + +Therefore, you can transcode the original stream to ensure that the stream meets the h.264/aac standard, and then forward it to multiple VHOSTs configured with HLS for slicing. This supports hot backup for multiple source stations. + +## HLS and Transcode + +HLS requires the RTMP stream encoding to be h.264+aac/mp3, otherwise, HLS will be automatically disabled, and you may see RTMP streams but not HLS streams (or the HLS streams you see are from previous streams). + +Transcoding the RTMP stream allows SRS to access any encoded RTMP stream and then convert it to the h.264/aac/mp3 encoding required by HLS. + +When configuring Transcode, if you need to control the ts length, you need to [configure the ffmpeg encoding gop](http://ffmpeg.org/ffmpeg-codecs.html#Options-7), for example: +```bash +vhost hls.transcode.vhost.com { + transcode { + enabled on; + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + engine hls { + enabled on; + vfilter { + } + vcodec libx264; + vbitrate 500; + vfps 20; + vwidth 768; + vheight 320; + vthreads 2; + vprofile baseline; + vpreset superfast; + vparams { + g 100; + } + acodec libaacplus; + abitrate 45; + asample_rate 44100; + achannels 2; + aparams { + } + output rtmp://127.0.0.1:[port]/[app]?vhost=[vhost]/[stream]_[engine]; + } + } +} +``` +This FFMPEG transcoding parameter specifies the gop duration as 100/20=5 seconds, fps frame rate (vfps=20), and gop frame count (g=100). + +## HLS Multiple Bitrate + +SRS currently does not support HLS adaptive bitrate, as it generally requires transcoding a single stream into multiple streams and requires GOP alignment. You can use FFmpeg to achieve this, refer to [How to generate multiple resolutions HLS using FFmpeg for live streaming](https://stackoverflow.com/a/71985380/17679565). + +## Apple Examples + +Apple's HLS example files: + +https://developer.apple.com/library/ios/technotes/tn2288/_index.html + +## HLS Encryption + +SRS3 supports slice encryption, for specific usage, refer to [#1093](https://github.com/ossrs/srs/issues/1093#issuecomment-415971022). + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/hls) + diff --git a/versioned_docs/version-6.0/doc/http-api.md b/versioned_docs/version-6.0/doc/http-api.md new file mode 100644 index 00000000..6b266610 --- /dev/null +++ b/versioned_docs/version-6.0/doc/http-api.md @@ -0,0 +1,533 @@ +--- +title: HTTP API +sidebar_label: HTTP API +hide_title: false +hide_table_of_contents: false +--- + +# HTTP API + +SRS provides HTTP api, to external application to manage SRS, and support crossdomain for js. + +Once HTTP API enabled, you can use [srs-console](http://ossrs.net/console/) to connect to your SRS server. + +The workflow is: + +```text ++-------------------------+ +-------+ ++ Chrome/Your Application +--HTTP-API-->--+ SRS + ++-------------------------+ +-------+ +``` + +You can use Chrome or your application, to request the HTTP API of SRS to get the state of SRS. + +## Goals + +The HTTP API of SRS follows the simple priciple: + +* Only provides API in json format, both request and json are json. +* Please use [srs-console](https://github.com/ossrs/srs-console) to access API. +* When error, response in HTTP status or code in json. + +## Build + +SRS always enable the http api, read [configure](./install.md) + +```bash +./configure && make +``` + +## Config + +The config also need to enable it: + +```bash +listen 1935; +# system statistics section. +# the main cycle will retrieve the system stat, +# for example, the cpu/mem/network/disk-io data, +# the http api, for instance, /api/v1/summaries will show these data. +# @remark the heartbeat depends on the network, +# for example, the eth0 maybe the device which index is 0. +stats { + # the index of device ip. + # we may retrieve more than one network device. + # default: 0 + network 0; + # the device name to stat the disk iops. + # ignore the device of /proc/diskstats if not configed. + disk sda sdb xvda xvdb; +} +# api of srs. +# the http api config, export for external program to manage srs. +# user can access http api of srs in browser directly, for instance, to access by: +# curl http://192.168.1.170:1985/api/v1/reload +# which will reload srs, like cmd killall -1 srs, but the js can also invoke the http api, +# where the cli can only be used in shell/terminate. +http_api { + # whether http api is enabled. + # default: off + enabled on; + # the http api listen entry is <[ip:]port> + # for example, 192.168.1.100:1985 + # where the ip is optional, default to 0.0.0.0, that is 1985 equals to 0.0.0.0:1985 + # default: 1985 + listen 1985; + # whether enable crossdomain request. + # default: on + crossdomain on; + # the HTTP RAW API is more powerful api to change srs state and reload. + raw_api { + # whether enable the HTTP RAW API. + # Overwrite by env SRS_HTTP_API_RAW_API_ENABLED + # default: off + enabled off; + # whether enable rpc reload. + # Overwrite by env SRS_HTTP_API_RAW_API_ALLOW_RELOAD + # default: off + allow_reload off; + # whether enable rpc query. + # Always off by https://github.com/ossrs/srs/issues/2653 + #allow_query off; + # whether enable rpc update. + # Always off by https://github.com/ossrs/srs/issues/2653 + #allow_update off; + } + # the auth is authentication for http api + auth { + # whether enable the HTTP AUTH. + # Overwrite by env SRS_HTTP_API_AUTH_ENABLED + # default: off + enabled on; + # The username of Basic authentication: + # Overwrite by env SRS_HTTP_API_AUTH_USERNAME + username admin; + # The password of Basic authentication: + # Overwrite by env SRS_HTTP_API_AUTH_PASSWORD + password admin; + } + # For https_api or HTTPS API. + https { + # Whether enable HTTPS API. + # default: off + enabled on; + # The listen endpoint for HTTPS API. + # default: 1986 + listen 1986; + # The SSL private key file, generated by: + # openssl genrsa -out server.key 2048 + # default: ./conf/server.key + key ./conf/server.key; + # The SSL public cert file, generated by: + # openssl req -new -x509 -key server.key -out server.crt -days 3650 -subj "/C=CN/ST=Beijing/L=Beijing/O=Me/OU=Me/CN=ossrs.net" + # default: ./conf/server.crt + cert ./conf/server.crt; + } +} +vhost __defaultVhost__ { +} +``` + +The `http_api` enable the HTTP API, and `stats` used for SRS to stat the system info, including: + +* network: Used for heartbeat to report the network info, where heartbeat used to report system info. +* disk: Used to stat the specified disk iops. You can use command `cat /proc/diskstats` to get the right disk names, for instance, xvda. + +## Start + +Start SRS: `./objs/srs -c http-api.conf` + +Access api, open the url in web browser: + +* [http://127.0.0.1:1985/api/v1](http://127.0.0.1:1985/api/v1) +* [https://127.0.0.1:1986/api/v1](https://127.0.0.1:1986/api/v1) + +> Remark: Please use your server ip instead. + +## Performance + +The HTTP api supports 370 request per seconds, please test by AB(Apache Benchmark). + +## Access Api + +Use web brower, or curl, or other http library. + +SRS provides api urls list, no need to remember: +* code, an int error code. 0 is success. +* urls, the url lists, can be access. +* data, the last level api serve data. + +Root directory: + +```bash +# curl http://192.168.1.102:1985/ + "urls": { + "api": "the api root" + } +``` + +Go on: + +```bash +# curl http://192.168.1.102:1985/api/v1/versions + "major": 0, + "minor": 9, + "revision": 43, + "version": "0.9.43" +``` + +Or: + +```bash +# curl http://192.168.1.102:1985/api/v1/authors + "primary_authors": "xxx", + "contributors_link": "https://github.com/ossrs/srs/blob/master/AUTHORS.txt", + "contributors": "xxx" +``` + +The Api of SRS is self-describes api. + +## Error Code + +SRS response error in both HTTP status or HTTP body. + +For example, SRS response HTTP error, where HTTP status not 200: + +``` +winlin:~ winlin$ curl -v http://127.0.0.1:1985 && echo "" +< HTTP/1.1 404 Not Found +< Connection: Keep-Alive +< Content-Length: 9 +< Content-Type: text/plain; charset=utf-8 +< Server: SRS/2.0.184 +< +Not Found +``` + +For example, SRS response code not 0 when HTTTP Status 200: + +``` +winlin:~ winlin$ curl -v http://127.0.0.1:1985/api/v1/tests/errors && echo "" +< HTTP/1.1 200 OK +< Connection: Keep-Alive +< Content-Length: 12 +< Content-Type: application/json +< Server: SRS/2.0.184 +< +{"code":100} +``` + +User should handle these two error style. + +## Crossdomain + +SRS HTTP API supports js crossdomain, so the html/js can invoke http api of srs。 + +SRS support two main CROS styles: + +* OPTIONS: JQuery can directly access the CROS, where the brower will send an OPTIONS first, then the API request. +* JSONP: JQuery/Angularjs can send JSONP CROS request to SRS API, where specifes the function name by QueryString `callback`. +* JSONP-DELETE: JSONP only support GET, so we use the `method` in QueryString to override the HTTP method for JSONP. + +For example, the JSONP crossdomain request: + +``` +GET http://localhost:1985/api/v1/vhosts/?callback=JSON_CALLBACK +JSON_CALLBACK({"code":0,"server":13449}) +GET http://localhost:1985/api/v1/vhosts/100?callback=JSON_CALLBACK&method=DELETE +JSON_CALLBACK({"code":0}) +``` + +## HTTPS API + +SRS supports HTTPS API by turn the `https` on: + +``` +http_api { + enabled on; + listen 1985; + https { + # Whether enable HTTPS API. + # default: off + enabled on; + # The listen endpoint for HTTPS API. + # default: 1990 + listen 1990; + # The SSL private key file, generated by: + # openssl genrsa -out server.key 2048 + # default: ./conf/server.key + key ./conf/server.key; + # The SSL public cert file, generated by: + # openssl req -new -x509 -key server.key -out server.crt -days 3650 -subj "/C=CN/ST=Beijing/L=Beijing/O=Me/OU=Me/CN=ossrs.net" + # default: ./conf/server.crt + cert ./conf/server.crt; + } +} +``` + +> Remark: Please use your HTTPS key and cert file. + +> Note: To enable the HTTPS live streaming, please read [HTTPS FLV Live Stream](./flv.md#https-flv-live-stream) + +## HTTP and HTTPS Proxy + +SRS works well with HTTP/HTTPS proxies such as [Nginx](./http-server.md#nginx-proxy), [HTTPX](./http-server.md#httpx-proxy), +[CaddyServer](./http-server.md#caddy-proxy), and so on. + +## Server ID + +Each response of api contains a `server` field, which identify the server. When ServerID changed, SRS already restarted, all information before is invalid. + +## API Navigation + +SRS provides the Navigation of APIs. + +User can access the `http://192.168.1.102:1985/api/v1`, where: + +| API | Example | Description | +| --- | -------- | --------- | +| server | 4481 | The identity of SRS | +| versions | /api/v1/versions | the version of SRS | +| summaries | /api/v1/summaries | the summary(pid, argv, pwd, cpu, mem) of SRS | +| rusages | /api/v1/rusages | the rusage of SRS | +| self_proc_stats | /api/v1/self_proc_stats | the self process stats | +| system_proc_stats | /api/v1/system_proc_stats | the system process stats | +| meminfos | /api/v1/meminfos | the meminfo of system | +| authors | /api/v1/authors | the license, copyright, authors and contributors | +| features | /api/v1/features | the supported features of SRS | +| requests | /api/v1/requests | the request itself, for http debug | +| vhosts | /api/v1/vhosts | manage all vhosts or specified vhost | +| streams | /api/v1/streams | manage all streams or specified stream | +| clients | /api/v1/clients | manage all clients or specified client, default query top 10 clients | +| configs | /api/v1/configs | RAW API for CUID the configs | +| publish | /rtc/v1/publish/ | The push stream API for WebRTC | +| play | /rtc/v1/play/ | The play stream API for WebRTC | + +## WebRTC Publish + +In order to push stream over WebRTC to SRS, SRS supports [WHIP](https://datatracker.ietf.org/doc/draft-ietf-wish-whip/). +The request is defined as: + +```text +POST /rtc/v1/whip/?app=live&stream=livestream + +Body in SDP, the Content-type is application/sdp: + +v=0 +...... +a=ssrc:2064016335 label:c8243ce9-ace5-4d17-9184-41a2543101b5 +``` + +SRS responses the SDP answer as the HTTP response: + +```text +v=0 +...... +a=candidate:1 1 udp 2130706431 172.18.0.4 8000 typ host generation 0 +``` + +> Note: The HTTP Status is 201, not 200, according to WHIP specification. + +Please also see examples at [srs.sdk.js](https://github.com/ossrs/srs/blob/develop/trunk/research/players/js/srs.sdk.js) and [srs-unity: Publisher](https://github.com/ossrs/srs-unity#usage-publisher). + +## WebRTC Play + +In order to pull stream over WebRTC from SRS, SRS also supports [WHEP](https://datatracker.ietf.org/doc/draft-murillo-whep/). +The request is defined as: + +```text +POST /rtc/v1/whep/?app=live&stream=livestream + +Body in SDP, the Content-type is application/sdp: + +v=0 +...... +a=ssrc:2064016335 label:c8243ce9-ace5-4d17-9184-41a2543101b5 +``` + +> Note: Although WHIP is defined to push stream to SRS, but you're able to use it for pulling stream from SRS. There is another [WHEP](https://datatracker.ietf.org/doc/draft-murillo-whep/) for players, but not a RFC draft. + +SRS responses the SDP answer as the HTTP response: + +``` +v=0 +...... +a=candidate:1 1 udp 2130706431 172.18.0.4 8000 typ host generation 0 +``` + +> Note: The HTTP Status is 201, not 200, according to WHIP specification. + +Please also see examples at [srs.sdk.js](https://github.com/ossrs/srs/blob/develop/trunk/research/players/js/srs.sdk.js) and [srs-unity: Player](https://github.com/ossrs/srs-unity#usage-player). + +## Summaries + +User can get the system summaries, for instance, the memory, cpu, network, load usage. + +Please access the url `http://192.168.1.170:1985/api/v1/summaries` + +## Vhosts + +SRS provides http api to query all vhosts. + +The http api vhost url: `http://192.168.1.102:1985/api/v1/vhosts` + +To process specified vhost by id, for instance `http://192.168.1.102:1985/api/v1/vhosts/3756` + +## Streams + +SRS provides http api to query all streams. + +The http api stream url: `http://192.168.1.102:1985/api/v1/streams` + +Parameters in query string: + +* `?start=N`: The start index, default is 0. +* `?count=N`: The max number of result, default is 10. + +To process specified stream by id, for instance `http://192.168.1.102:1985/api/v1/streams/3756` + +## Clients + +SRS provides http api to query clients. + +The http api client url: `http://192.168.1.102:1985/api/v1/clients` + +Parameters in query string: + +* `?start=N`: The start index, default is 0. +* `?count=N`: The max number of result, default is 10. + +To process specified client by id, for instance `http://192.168.1.102:1985/api/v1/clients/3756` + +## Kickoff Client + +SRS provides HTTP RESTful api to kickoff user: + +``` +DELETE /api/v1/clients/{id} +``` + +User can get the id of client to kickoff: + +``` +GET /api/v1/clients +``` + +User can get the id of publish client from streams api: + +``` +GET /api/v1/streams +or GET /api/v1/streams/6745 +``` + +The client cid is the info from stream api `stream.publish.cid`: + +``` +1. GET http://192.168.1.170:1985/api/v1/streams/6745 +2. Response stream.publish.cid: +stream: { + publish: { + active: true, + cid: 107 + } +} +3. DELETE http://192.168.1.170:1985/api/v1/clients/107 +``` + +Remark: User can use [HTTP REST Tool](http://ossrs.net/srs.release/http-rest/index.html) to send a request. + +Remark: User can use linux tool `curl` to start HTTP request. For example: + +``` +curl -v -X GET http://192.168.1.170:1985/api/v1/clients/426 && echo "" +curl -v -X DELETE http://192.168.1.170:1985/api/v1/clients/426 && echo "" +``` + +## Persistence Config + +This feature is disabled by SRS 4.0. + +## HTTP RAW API + +SRS supports powerful HTTP RAW API, while other server only support `Read API`, for instance, to get the stat of server. SRS supports `Write API`, which can `Reload` or change server state. + +Remark: User must enable the HTTP RAW API, in config section `http_api` to enable the `http_api.raw_api.enabled`, or SRS will response error code 1061. + +``` +http_api { + enabled on; + listen 1985; + raw_api { + enabled on; + allow_reload on; + } +} +``` + +The supported HTTP RAW APi of SRS is: + +* `Raw`: To query the HTTP RAW API config. +* `Reload`: To reload the SRS. + +### Raw + +| Key | DESC | +| ---- | ---- | +| feature | Query the HTTP RAW API info. | +| url | `/api/v1/raw?rpc=raw` | +| curl | `curl http://127.0.0.1:1985/api/v1/raw?rpc=raw` | +| config | No config | +| params | No params| + +### RAW Reload + +| Key | DESC | +| ---- | ---- | +| feature | Reload is the same to `killall -1 srs` to reload the config | +| url | `/api/v1/raw?rpc=reload` | +| curl | `curl http://127.0.0.1:1985/api/v1/raw?rpc=reload` | +| params | No params | + +### Other RAW APIs + +Other RAW APIs are disabled by SRS 4.0. + +## Authentication + +Starting from version `5.0.152+` or `6.0.40+`, SRS supports HTTP API authentication, which can be enabled by configuring `http_api.auth`. + +```bash +# conf/http.api.auth.conf +http_api { + enabled on; + listen 1985; + auth { + enabled on; + username admin; + password admin; + } +} +``` + +Otherwise, you can use environment variables to enable it: + +```bash +env SRS_HTTP_API_ENABLED=on SRS_HTTP_SERVER_ENABLED=on \ + SRS_HTTP_API_AUTH_ENABLED=on SRS_HTTP_API_AUTH_USERNAME=admin SRS_HTTP_API_AUTH_PASSWORD=admin \ + ./objs/srs -e +``` + +Then, you can access the following urls to verify it: +- Prompt for username and password: http://localhost:1985/api/v1/versions +- URL with authentication: http://admin:admin@localhost:1985/api/v1/versions + +To clean up the username and password, you can access the HTTP API with the username only: +- http://admin@localhost:1985/api/v1/versions + +> Note: authentication is only enabled for the HTTP APIs, neither for the HTTP server nor the WebRTC HTTP APIs. + +Winlin 2015.8 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/http-api) + + diff --git a/versioned_docs/version-6.0/doc/http-callback.md b/versioned_docs/version-6.0/doc/http-callback.md new file mode 100644 index 00000000..ec54623c --- /dev/null +++ b/versioned_docs/version-6.0/doc/http-callback.md @@ -0,0 +1,426 @@ +--- +title: HTTP Callback +sidebar_label: HTTP Callback +hide_title: false +hide_table_of_contents: false +--- + +# HTTPCallback + +SRS supports HTTP callback to extends SRS. The workflow is: + +```text ++--------+ +--------+ +-----------------------+ +| FFmpeg |-->--+ SRS |--HTTP-Callback-->--+ Your Business Server | ++--------+ +--------+ +-----------------------+ +``` + +When FFmpeg/OBS publish or play a stream to SRS, SRS will call your business server to notify the event. + +## Usage + +First, run SRS with HTTP callback enabled: + +```bash +./objs/srs -c conf/http.hooks.callback.conf +``` + +Start the demo HTTP callback server, which is your business server: + +```bash +go run research/api-server/server.go +``` + +Publish a stream to SRS, with the params: + +```bash +ffmpeg -re -i doc/source.flv -c copy -f flv rtmp://localhost/live/livestream?k=v +``` + +Your business server will got the HTTP event: + +```text +Got action=on_publish, client_id=3y1tcaw2, ip=127.0.0.1, vhost=__defaultVhost__, stream=livestream, param=?k=v +``` + +Note that the `k=v` can be used for authentication, for token authentication based on HTTP callbacks, +read [Token Authentication](./drm.md#token-authentication) + +## Compile + +SRS always enable http callbacks. + +For more information, read [Build](./install.md) + +## Configuring SRS + +An example [conf/http.hooks.callback.conf](https://github.com/ossrs/srs/blob/develop/trunk/conf/http.hooks.callback.conf) +is available, demonstrating the configuration of common callback events for direct use. + +The config for HTTP hooks is: + +```bash +vhost your_vhost { + http_hooks { + # whether the http hooks enable. + # default off. + enabled on; + # when client(encoder) publish to vhost/app/stream, call the hook, + # the request in the POST data string is a object encode by json: + # { + # "action": "on_publish", + # "client_id": "9308h583", + # "ip": "192.168.1.10", "vhost": "video.test.com", "app": "live", + # "stream": "livestream", "param":"?token=xxx&salt=yyy", "server_id": "vid-werty", + # "stream_url": "video.test.com/live/livestream", "stream_id": "vid-124q9y3" + # } + # if valid, the hook must return HTTP code 200(Status OK) and response + # an int value specifies the error code(0 corresponding to success): + # 0 + # support multiple api hooks, format: + # on_publish http://xxx/api0 http://xxx/api1 http://xxx/apiN + # @remark For SRS4, the HTTPS url is supported, for example: + # on_publish https://xxx/api0 https://xxx/api1 https://xxx/apiN + on_publish http://127.0.0.1:8085/api/v1/streams http://localhost:8085/api/v1/streams; + # when client(encoder) stop publish to vhost/app/stream, call the hook, + # the request in the POST data string is a object encode by json: + # { + # "action": "on_unpublish", + # "client_id": "9308h583", + # "ip": "192.168.1.10", "vhost": "video.test.com", "app": "live", + # "stream": "livestream", "param":"?token=xxx&salt=yyy", "server_id": "vid-werty", + # "stream_url": "video.test.com/live/livestream", "stream_id": "vid-124q9y3" + # } + # if valid, the hook must return HTTP code 200(Status OK) and response + # an int value specifies the error code(0 corresponding to success): + # 0 + # support multiple api hooks, format: + # on_unpublish http://xxx/api0 http://xxx/api1 http://xxx/apiN + # @remark For SRS4, the HTTPS url is supported, for example: + # on_unpublish https://xxx/api0 https://xxx/api1 https://xxx/apiN + on_unpublish http://127.0.0.1:8085/api/v1/streams http://localhost:8085/api/v1/streams; + # when client start to play vhost/app/stream, call the hook, + # the request in the POST data string is a object encode by json: + # { + # "action": "on_play", + # "client_id": "9308h583", + # "ip": "192.168.1.10", "vhost": "video.test.com", "app": "live", + # "stream": "livestream", "param":"?token=xxx&salt=yyy", + # "pageUrl": "http://www.test.com/live.html", "server_id": "vid-werty", + # "stream_url": "video.test.com/live/livestream", "stream_id": "vid-124q9y3" + # } + # if valid, the hook must return HTTP code 200(Status OK) and response + # an int value specifies the error code(0 corresponding to success): + # 0 + # support multiple api hooks, format: + # on_play http://xxx/api0 http://xxx/api1 http://xxx/apiN + # @remark For SRS4, the HTTPS url is supported, for example: + # on_play https://xxx/api0 https://xxx/api1 https://xxx/apiN + on_play http://127.0.0.1:8085/api/v1/sessions http://localhost:8085/api/v1/sessions; + # when client stop to play vhost/app/stream, call the hook, + # the request in the POST data string is a object encode by json: + # { + # "action": "on_stop", + # "client_id": "9308h583", + # "ip": "192.168.1.10", "vhost": "video.test.com", "app": "live", + # "stream": "livestream", "param":"?token=xxx&salt=yyy", "server_id": "vid-werty", + # "stream_url": "video.test.com/live/livestream", "stream_id": "vid-124q9y3" + # } + # if valid, the hook must return HTTP code 200(Status OK) and response + # an int value specifies the error code(0 corresponding to success): + # 0 + # support multiple api hooks, format: + # on_stop http://xxx/api0 http://xxx/api1 http://xxx/apiN + # @remark For SRS4, the HTTPS url is supported, for example: + # on_stop https://xxx/api0 https://xxx/api1 https://xxx/apiN + on_stop http://127.0.0.1:8085/api/v1/sessions http://localhost:8085/api/v1/sessions; + # when srs reap a dvr file, call the hook, + # the request in the POST data string is a object encode by json: + # { + # "action": "on_dvr", + # "client_id": "9308h583", + # "ip": "192.168.1.10", "vhost": "video.test.com", "app": "live", + # "stream": "livestream", "param":"?token=xxx&salt=yyy", + # "cwd": "/usr/local/srs", + # "file": "./objs/nginx/html/live/livestream.1420254068776.flv", "server_id": "vid-werty", + # "stream_url": "video.test.com/live/livestream", "stream_id": "vid-124q9y3" + # } + # if valid, the hook must return HTTP code 200(Status OK) and response + # an int value specifies the error code(0 corresponding to success): + # 0 + on_dvr http://127.0.0.1:8085/api/v1/dvrs http://localhost:8085/api/v1/dvrs; + # when srs reap a ts file of hls, call the hook, + # the request in the POST data string is a object encode by json: + # { + # "action": "on_hls", + # "client_id": "9308h583", + # "ip": "192.168.1.10", "vhost": "video.test.com", "app": "live", + # "stream": "livestream", "param":"?token=xxx&salt=yyy", + # "duration": 9.36, // in seconds + # "cwd": "/usr/local/srs", + # "file": "./objs/nginx/html/live/livestream/2015-04-23/01/476584165.ts", + # "url": "live/livestream/2015-04-23/01/476584165.ts", + # "m3u8": "./objs/nginx/html/live/livestream/live.m3u8", + # "m3u8_url": "live/livestream/live.m3u8", + # "seq_no": 100, "server_id": "vid-werty", + # "stream_url": "video.test.com/live/livestream", "stream_id": "vid-124q9y3" + # } + # if valid, the hook must return HTTP code 200(Status OK) and response + # an int value specifies the error code(0 corresponding to success): + # 0 + on_hls http://127.0.0.1:8085/api/v1/hls http://localhost:8085/api/v1/hls; + # when srs reap a ts file of hls, call this hook, + # used to push file to cdn network, by get the ts file from cdn network. + # so we use HTTP GET and use the variable following: + # [server_id], replace with the server_id + # [app], replace with the app. + # [stream], replace with the stream. + # [param], replace with the param. + # [ts_url], replace with the ts url. + # ignore any return data of server. + # @remark random select a url to report, not report all. + on_hls_notify http://127.0.0.1:8085/api/v1/hls/[server_id]/[app]/[stream]/[ts_url][param]; + } +} +``` + +Description about some fields: + +* `stream_url`: The stream identify without extension, such as `/live/livestream`. +* `stream_id`: The id of stream, by which you can query the stream information. + +> Note: The callbacks for streaming are `on_publish` and `on_unpublish`, while the callbacks for playback are `on_play` and `on_stop`. + +> Note: Before SRS 4, there were `on_connect` and `on_close`, which are events defined by RTMP and only applicable to RTMP streams. These events overlap with streaming and playback events, so their use is not recommended. + +> Note: You can refer to the hooks.callback.vhost.com example in the conf/full.conf configuration file. + +## Protocol + +The detail protocol, for example, `on_publish`: + +```text +POST /api/v1/streams HTTP/1.1 +Content-Type: application-json + +Body: +{ + "server_id": "vid-0xk989d", + "action": "on_publish", + "client_id": "341w361a", + "ip": "127.0.0.1", + "vhost": "__defaultVhost__", + "app": "live", + "tcUrl": "rtmp://127.0.0.1:1935/live?vhost=__defaultVhost__", + "stream": "livestream", + "param": "", + "stream_url": "video.test.com/live/livestream", + "stream_id": "vid-124q9y3" +} +``` + +> Note: You can use wireshark or tcpdump to verify it. + +## Heartbeat + +SRS will send heartbeat to the HTTP callback server. This allows you to monitor the health of SRS server. +Enable this feature by: + +```bash +# heartbeat to api server +# @remark, the ip report to server, is retrieve from system stat, +# which need the config item stats.network. +heartbeat { + # whether heartbeat is enabled. + # Overwrite by env SRS_HEARTBEAT_ENABLED + # default: off + enabled off; + # the interval seconds for heartbeat, + # recommend 0.3,0.6,0.9,1.2,1.5,1.8,2.1,2.4,2.7,3,...,6,9,12,.... + # Overwrite by env SRS_HEARTBEAT_INTERVAL + # default: 9.9 + interval 9.3; + # when startup, srs will heartbeat to this api. + # @remark: must be a restful http api url, where SRS will POST with following data: + # { + # "device_id": "my-srs-device", + # "ip": "192.168.1.100" + # } + # Overwrite by env SRS_HEARTBEAT_URL + # default: http://127.0.0.1:8085/api/v1/servers + url http://127.0.0.1:8085/api/v1/servers; + # the id of device. + # Overwrite by env SRS_HEARTBEAT_DEVICE_ID + device_id "my-srs-device"; + # whether report with summaries + # if on, put /api/v1/summaries to the request data: + # { + # "summaries": summaries object. + # } + # @remark: optional config. + # Overwrite by env SRS_HEARTBEAT_SUMMARIES + # default: off + summaries off; +} +``` + +By enable the `summaries`, you can get the SRS server states, such as `self.pid` and `self.srs_uptime`, so you +can use this to determine whether SRS restarted. + +> Note: About fileds of `summaries`, see [HTTP API: summaries](./http-api.md#summaries) for details. + +## Go Example + +Write Go code to handle SRS callback, for example, handling `on_publish`: + +```go +http.HandleFunc("/api/v1/streams", func(w http.ResponseWriter, r *http.Request) { + b, err := ioutil.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + + fmt.Println(string(b)) + + res, err := json.Marshal(struct { + Code int `json:"code"` + Message string `json:"msg"` + }{ + 0, "OK", + }) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + w.Write(res) +}) + +_ = http.ListenAndServe(":8085", nil) +``` + +## Nodejs Koa Example + +Write Nodejs/Koa code to handle SRS callback, for example, handling `on_publish`: + +```js +const Router = require('koa-router'); +const router = new Router(); + +router.all('/api/v1/streams', async (ctx) => { + console.log(ctx.request.body); + + ctx.body = {code: 0, msg: 'OK'}; +}); +``` + +## PHP Example + +Write PHP code to handle SRS callback, for example, handling `on_publish`: + +```php +$body = json_decode(file_get_contents('php://input')); +printf($body); + +echo json_encode(array("code"=>0, "msg"=>"OK")); +``` + +## HTTP Callback Events + +SRS can call HTTP callbacks for events: + +* `on_publish`: When a client publishes a stream, for example, using flash or FMLE to publish a stream to the server. +* `on_unpublish`: When a client stops publishing a stream. +* `on_play`: When a client starts playing a stream. +* `on_stop`: When a client stops playback. +* `on_dvr`: When reap a DVR file. +* `on_hls`: When reap a HLS file. + +For events `on_publish`和`on_play`: +* Return Code: SRS requires that the response is an int indicating the error, 0 is success. + +Notes: +* Event: When this event occurs, call back to the specified HTTP URL. +* HTTP URL: Can be multiple URLs, split by spaces, SRS will notify all one by one. +* Data: SRS will POST the data to specified HTTP API. + +SRS will disconnect the connection when the response is not 0, or HTTP status is not 200. + +## SRS HTTP Callback Server + +SRS provides a default HTTP callback server, using golang native http framework. + +To start it: + +```bash +cd research/api-server && go run server.go 8085 +``` + +```bash +#2023/01/18 22:57:40.835254 server.go:572: api server listen at port:8085, static_dir:/Users/panda/srs/trunk/static-dir +#2023/01/18 22:57:40.835600 server.go:836: start listen on::8085 +``` + +> Remark: For SRS4, the HTTP/HTTPS url is supported, see [#1657](https://github.com/ossrs/srs/issues/1657#issuecomment-720889906). + +## HTTPS Callback + +HTTPS Callback is supported by SRS4, only change the callback URL from `http://` to `https://`, for example: + +``` +vhost your_vhost { + http_hooks { + enabled on; + on_publish https://127.0.0.1:8085/api/v1/streams; + on_unpublish https://127.0.0.1:8085/api/v1/streams; + on_play https://127.0.0.1:8085/api/v1/sessions; + on_stop https://127.0.0.1:8085/api/v1/sessions; + on_dvr https://127.0.0.1:8085/api/v1/dvrs; + on_hls https://127.0.0.1:8085/api/v1/hls; + on_hls_notify https://127.0.0.1:8085/api/v1/hls/[app]/[stream]/[ts_url][param]; + } +} +``` + +## Response + +If success, you must response `something` to identify the success, or SRS will reject the client, which enable you to reject the illegal client, please read [Callback Error Code](./http-api.md#error-code). + +> Note: The `on_publish` callback also could be used as advanced security, to `allow` or `deny` a client by its IP, or token in request url, or any other information of client. + +Where `something` means: + +* HTTP/200, which is HTTP success. +* `AND` response and int value 0, or JSON object with field code 0. + +Like this: + +``` +HTTP/1.1 200 OK +Content-Length: 1 +0 +``` + +OR: + +``` +HTTP/1.1 200 OK +Content-Length: 11 +{"code": 0} +``` + +You could run the example HTTP callback server by: + +``` +cd srs/trunk/research/api-server && go run server.go 8085 +``` + +And you will finger out what's the `right` response. + +## Snapshot + +The HttpCallback can used to snapshot, please read [snapshot](./snapshot.md#httpcallback) + +Winlin 2015.1 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/http-callback) + + diff --git a/versioned_docs/version-6.0/doc/http-server.md b/versioned_docs/version-6.0/doc/http-server.md new file mode 100644 index 00000000..fdf72dbd --- /dev/null +++ b/versioned_docs/version-6.0/doc/http-server.md @@ -0,0 +1,308 @@ +--- +title: HTTP Server +sidebar_label: HTTP Server +hide_title: false +hide_table_of_contents: false +--- + +# HTTP Server + +SRS Embeded a HTTP web server, supports api and simple HTTP file for HLS. + +To deploy SRS HTTP server, read [Usage: HTTP](./sample-http.md) + +The SRS Embeded HTTP server is rewrite refer to go http module, so it's ok to use srs as http server. Read [#277](https://github.com/ossrs/srs/issues/277) + +> Remark: The SRS HTTP server is just a origin HTTP server, for HTTP edge server, please use NGINX, SQUID and ATS. + +SRS also works well with HTTP reverse proxy servers, like [NGINX](#nginx-proxy) and [Caddy](#caddy-proxy). + +## Use Scenario + +The SRS Embeded HTTP server is design to provides basic HTTP service, +like the camera of mobile phone. + +SRS should provides HTTP api, which is actually a embeded HTTP server. + +Actually, RTMP is more complex than HTTP, so HTTP server on st is absolutely ok. +The HTTP Server in SRS1.0 is expirement, I will enhance it future. + +## Config + +Config the HTTP port and root. + +```bash +# embeded http server in srs. +# the http streaming config, for HLS/HDS/DASH/HTTPProgressive +# global config for http streaming, user must config the http section for each vhost. +# the embed http server used to substitute nginx in ./objs/nginx, +# for example, srs runing in arm, can provides RTMP and HTTP service, only with srs installed. +# user can access the http server pages, generally: +# curl http://192.168.1.170:80/srs.html +# which will show srs version and welcome to srs. +# @remeark, the http embeded stream need to config the vhost, for instance, the __defaultVhost__ +# need to open the feature http of vhost. +http_server { + # whether http streaming service is enabled. + # default: off + enabled on; + # the http streaming port + # @remark, if use lower port, for instance 80, user must start srs by root. + # default: 8080 + listen 8080; + # the default dir for http root. + # default: ./objs/nginx/html + dir ./objs/nginx/html; +} +``` + +And, each vhost can specifies the dir. + +```bash +vhost your_vhost { + # http static vhost specified config + http_static { + # whether enabled the http static service for vhost. + # default: off + enabled on; + # the url to mount to, + # typical mount to [vhost]/ + # the variables: + # [vhost] current vhost for http server. + # @remark the [vhost] is optional, used to mount at specified vhost. + # @remark the http of __defaultVhost__ will override the http_stream section. + # for example: + # mount to [vhost]/ + # access by http://ossrs.net:8080/xxx.html + # mount to [vhost]/hls + # access by http://ossrs.net:8080/hls/xxx.html + # mount to / + # access by http://ossrs.net:8080/xxx.html + # or by http://192.168.1.173:8080/xxx.html + # mount to /hls + # access by http://ossrs.net:8080/hls/xxx.html + # or by http://192.168.1.173:8080/hls/xxx.html + # default: [vhost]/ + mount [vhost]/hls; + # main dir of vhost, + # to delivery HTTP stream of this vhost. + # default: ./objs/nginx/html + dir ./objs/nginx/html/hls; + } +} +``` + +Remark: The `http_stream` of SRS1 renamed to `http_server` in SRS2, which specifies the global HTTP server config, used to delivery http static files, for dvr files(HLS/FLV/HDS/MPEG-DASH). + +Remark: The `http` of vhost of SRS1 renamed to `http_static`, similar to global `http_server` for HTTP static files delivery. While the `http_remux` introduced in SRS2 is dynamic remux RTMP to HTTP Live FLV/Mp3/Aac/HLS/Hds/MPEG-DASH stream. + +## HTTPS Server + +SRS supports HTTPS, just enable it in the configuration. By default, it uses a sub-signed certificate. If you need +to use a CA-issued certificate, please replace the relevant files. The related configuration is as follows: + +```bash +http_server { + https { + # Whether enable HTTPS Streaming. + # Overwrite by env SRS_HTTP_SERVER_HTTPS_ENABLED + # default: off + enabled on; + # The listen endpoint for HTTPS Streaming. + # Overwrite by env SRS_HTTP_SERVER_HTTPS_LISTEN + # default: 8088 + listen 8088; + # The SSL private key file, generated by: + # openssl genrsa -out server.key 2048 + # Overwrite by env SRS_HTTP_SERVER_HTTPS_KEY + # default: ./conf/server.key + key ./conf/server.key; + # The SSL public cert file, generated by: + # openssl req -new -x509 -key server.key -out server.crt -days 3650 -subj "/C=CN/ST=Beijing/L=Beijing/O=Me/OU=Me/CN=ossrs.net" + # Overwrite by env SRS_HTTP_SERVER_HTTPS_CERT + # default: ./conf/server.crt + cert ./conf/server.crt; + } +} +``` + +## Crossdomain + +SRS has CORS (Cross-Origin Resource Sharing) support enabled by default. The related configuration is as follows: + +```bash +http_server { + # whether enable crossdomain request. + # for both http static and stream server and apply on all vhosts. + # Overwrite by env SRS_HTTP_SERVER_CROSSDOMAIN + # default: on + crossdomain on; +} +``` + +## MIME + +Only some MIME is supported: + +| File ext name | Content-Type | +| ------------- | ----------- | +| .ts | Content-Type: video/MP2T;charset=utf-8 | +| .m3u8 | Content-Type: application/x-mpegURL;charset=utf-8 | +| .json | Content-Type: application/json;charset=utf-8 | +| .css | Content-Type: text/css;charset=utf-8 | +| .swf | Content-Type: application/x-shockwave-flash;charset=utf-8 | +| .js | Content-Type: text/javascript;charset=utf-8 | +| .xml | Content-Type: text/xml;charset=utf-8 | +| Others | Content-Type: text/html;charset=utf-8 | + +## Method + +Supported HTTP method: +* GET: Query API, or download file. + +## Paths + +HTTP/HTTPS API: + +* `/api/` SRS HTTP API +* `/rtc/` SRS WebRTC API + +HTTP/HTTPS Stream: + +* `/{app}/{stream}` HTTP Stream mounted by publisher. + +The bellow is some reverse proxy to work with SRS. + +> Note: Generally, a proxy can be used to route API and Stream together based on the path. + +## Nginx Proxy + +The config for NGINX as file [nginx.conf](https://github.com/ossrs/srs/blob/develop/trunk/conf/nginx.proxy.conf): + +``` +worker_processes 1; +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + + server { + listen 80; + listen 443 ssl http2; + server_name _; + ssl_certificate /usr/local/srs/conf/server.crt; + ssl_certificate_key /usr/local/srs/conf/server.key; + + # For SRS homepage, console and players + # http://r.ossrs.net/console/ + # http://r.ossrs.net/players/ + location ~ ^/(console|players)/ { + proxy_pass http://127.0.0.1:8080/$request_uri; + } + # For SRS streaming, for example: + # http://r.ossrs.net/live/livestream.flv + # http://r.ossrs.net/live/livestream.m3u8 + location ~ ^/.+/.*\.(flv|m3u8|ts|aac|mp3)$ { + proxy_pass http://127.0.0.1:8080$request_uri; + } + # For SRS backend API for console. + # For SRS WebRTC publish/play API. + location ~ ^/(api|rtc)/ { + proxy_pass http://127.0.0.1:1985$request_uri; + } + } +} +``` + +## Caddy Proxy + +The config for [CaddyServer](https://caddyserver.com/docs/getting-started) with automatic HTTPS, use the config file `Caddyfile`. + +For HTTP server, note that to set the default port: + +``` +:80 +reverse_proxy /* 127.0.0.1:8080 +reverse_proxy /api/* 127.0.0.1:1985 +reverse_proxy /rtc/* 127.0.0.1:1985 +``` + +For HTTPS server, please enable a domain name: + +``` +example.com { + reverse_proxy /* 127.0.0.1:8080 + reverse_proxy /api/* 127.0.0.1:1985 + reverse_proxy /rtc/* 127.0.0.1:1985 +} +``` + +Start the CaddyServer: + +``` +caddy start -config Caddyfile +``` + +## Nodejs KOA Proxy + +The nodejs koa proxy also works well for SRS, please use [koa-proxies](https://www.npmjs.com/package/koa-proxies) based by [node-http-proxy](https://github.com/nodejitsu/node-http-proxy), here is an example: + +```js +const Koa = require('koa'); +const proxy = require('koa-proxies'); +const BodyParser = require('koa-bodyparser'); +const Router = require('koa-router'); + +const app = new Koa(); +app.use(proxy('/api/', {target: 'http://127.0.0.1:1985/'})); +app.use(proxy('/rtc/', {target: 'http://127.0.0.1:1985/'})); +app.use(proxy('/*/*.(flv|m3u8|ts|aac|mp3)', {target: 'http://127.0.0.1:8080/'})); +app.use(proxy('/console/', {target: 'http://127.0.0.1:8080/'})); +app.use(proxy('/players/', {target: 'http://127.0.0.1:8080/'})); + +// Start body-parser after proxies, see https://github.com/vagusX/koa-proxies/issues/55 +app.use(BodyParser()); + +// APIs that depends on body-parser +const router = new Router(); +router.all('/', async (ctx) => { + ctx.body = 'Hello World'; +}); +app.use(router.routes()); + +app.listen(3000, () => { + console.log(`Server start on http://localhost:3000`); +}); +``` + +Save it as `index.js`, then run: + +``` +npm init -y +npm install koa koa-proxies koa-proxies koa-bodyparser koa-router +node . +``` + +## HTTPX Proxy + +Well [httpx-static](https://github.com/ossrs/go-oryx/tree/develop/httpx-static#usage) is a simple HTTP/HTTPS proxy written by Go: + +``` +go get github.com/ossrs/go-oryx/httpx-static +cd $GOPATH/bin +./httpx-static -http=80 -https=443 \ + -skey /usr/local/srs/etc/server.key -scert /usr/local/srs/etc/server.crt \ + -proxy=http://127.0.0.1:1985/api/v1/ \ + -proxy=http://127.0.0.1:1985/rtc/v1/ \ + -proxy=http://127.0.0.1:8080/ +``` + +> Please make sure the path `/` is the last one. + +Winlin 2015.1 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/http-server) + + diff --git a/versioned_docs/version-6.0/doc/ide.md b/versioned_docs/version-6.0/doc/ide.md new file mode 100644 index 00000000..ae651485 --- /dev/null +++ b/versioned_docs/version-6.0/doc/ide.md @@ -0,0 +1,22 @@ +--- +title: IDE +sidebar_label: IDE +hide_title: false +hide_table_of_contents: false +--- + +# IDE + +SRS supports JetBrains [CLion](http://www.jetbrains.com/clion/) + +## JetBrains + +The clion of JetBrains, please open `trunk/ide/srs_clion/CMakeLists.txt` + +Read [http://www.jetbrains.com/clion/](http://www.jetbrains.com/clion/) + +Winlin 2015.10 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/ide) + + diff --git a/versioned_docs/version-6.0/doc/ingest.md b/versioned_docs/version-6.0/doc/ingest.md new file mode 100644 index 00000000..5873e2ba --- /dev/null +++ b/versioned_docs/version-6.0/doc/ingest.md @@ -0,0 +1,109 @@ +--- +title: Ingest +sidebar_label: Ingest +hide_title: false +hide_table_of_contents: false +--- + +# Ingest + +Ingest is used to ingest file(flv, mp4, mkv, avi, rmvb...), +stream(RTMP, RTMPT, RTMPS, RTSP, HTTP, HLS...) and device, +encode or passthrough then publish as RTMP to SRS. + +Ingest actually use FFmpeg, or your tool, to encode or remux +to suck known data to RTMP to SRS. + +How to deploy ingest, read [Ingest](./sample-ingest.md) + +## Use Scenario + +The main use scenarios: +* Virtual Live Stream: Convert vod file to live stream. +* Input RTSP IP Camera: Many IP Camera supports to pull in RTSP, user can ingest the RTSP to RTMP to SRS. +* Directly ingest device, use the FFmpeg as encoder actually. +* Ingest HTTp stream to RTMP for some old stream server. + +In a word, the Ingest is used to ingest any stream supported by FFMPEG to SRS. + +SRS server is support encoder to publish stream, while ingest can enable SRS to act like a client to pull +stream from other place. + +## Build + +Config SRS with option `--with-ingest`, read [Build](./install.md) + +The ingest tool of SRS can use FFMPEG, or use your own tool. + +## Config + +The config to use ingest: + +```bash +vhost your_vhost { + # ingest file/stream/device then push to SRS over RTMP. + # the name/id used to identify the ingest, must be unique in global. + # ingest id is used in reload or http api management. + ingest livestream { + # whether enabled ingest features + # default: off + enabled on; + # input file/stream/device + # @remark only support one input. + input { + # the type of input. + # can be file/stream/device, that is, + # file: ingest file specifies by url. + # stream: ingest stream specifeis by url. + # device: not support yet. + # default: file + type file; + # the url of file/stream. + url ./doc/source.flv; + } + # the ffmpeg + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + # the transcode engine, @see all.transcode.srs.com + # @remark, the output is specified following. + engine { + # @see enabled of transcode engine. + # if disabled or vcodec/acodec not specified, use copy. + # default: off. + enabled off; + # output stream. variables: + # [vhost] current vhost which start the ingest. + # [port] system RTMP stream port. + output rtmp://127.0.0.1:[port]/live?vhost=[vhost]/livestream; + } + } +} +``` + +The word after ingest keyword is the id of ingest, the id must be unique. + +The `type` specifies the ingest type: +* file: To ingest file to RTMP, SRS will add `-re` for FFMPEG. +* stream: To ingest stream to RTMP. +* device: Not support yet. + +The `engine` specifies the transcode engine and output: +* enabled: Whether transcode, remux when off. +* output:The output RTMP url. The vhost and port is variable. +* others is same to [FFMPEG](./ffmpeg.md) + +Note: Engine is copy, when: +* The enabled is off. +* The vcodec and acodec is not specified. + +## Ingest File list + +SRS does not ingest a file list, a wordaround: +* Use script as the ingest tool, which use ffmpeg to copy file to RTMP stream one by one. + +Read https://github.com/ossrs/srs/issues/55 + +Winlin 2014.11 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/ingest) + + diff --git a/versioned_docs/version-6.0/doc/install.md b/versioned_docs/version-6.0/doc/install.md new file mode 100644 index 00000000..af57d9cb --- /dev/null +++ b/versioned_docs/version-6.0/doc/install.md @@ -0,0 +1,66 @@ +--- +title: Build and Install +sidebar_label: Build and Install +hide_title: false +hide_table_of_contents: false +--- + +# Install + +You can directly use the release binaries, or build SRS step by step. See: [Github: release](http://ossrs.net/srs.release/releases/) or [Mirror of China: release](http://www.ossrs.net/srs.release/releases/) + +## OS + +* Ubuntu20 is recommended. +* Use [srs-docker](https://github.com/ossrs/dev-docker/tree/dev) to build SRS. +* Use [srs-docker](https://github.com/ossrs/dev-docker) to run SRS. + +## Iptables and Selinux + +Sometimes the stream play failed, but without any error message, or server cann't connect to. Please check the iptables and selinux. + +Turn off iptables: + +```bash +# disable the firewall +sudo /etc/init.d/iptables stop +sudo /sbin/chkconfig iptables off +``` + +Disable the selinux, to run `getenforce` to ensure the result is `Disabled`: + +1. Edit the config of selinux: `sudo vi /etc/sysconfig/selinux` +1. Change the SELINUX to disabled: `SELINUX=disabled` +1. Rebot: `sudo init 6` + +## Build and Run SRS + +It's very easy to build SRS: + +``` +./configure && make +``` + +Also easy to start SRS: + +```bash +./objs/srs -c conf/srs.conf +``` + +Publish RTMP, please read: [Usage: RTMP](./rtmp.md) + +For service management, please read [Service](./service.md) + +Run SRS in docker, please read [srs-docker](https://github.com/ossrs/dev-docker#usage) + +## ARM + +It's also ok to directly build on ARM server. + +For ARM/MIPS or crossbuild, please read [here](./arm.md) + +Winlin 2014.11 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/install) + + diff --git a/versioned_docs/version-6.0/doc/introduction.md b/versioned_docs/version-6.0/doc/introduction.md new file mode 100644 index 00000000..5e3312c9 --- /dev/null +++ b/versioned_docs/version-6.0/doc/introduction.md @@ -0,0 +1,177 @@ +--- +title: Introduction +sidebar_label: Introduction +hide_title: false +hide_table_of_contents: false +--- + +# Introduction + +> Remark: SRS6 is developing and not stable. + +SRS is a open-source ([MIT Licensed](../../../license)), simple, high-efficiency, real-time video server supporting RTMP, +WebRTC, HLS, HTTP-FLV, SRT, MPEG-DASH, and GB28181. SRS media server works with clients like [FFmpeg](https://ffmpeg.org), +[OBS](https://obsproject.com), [VLC](https://www.videolan.org), and [WebRTC](https://webrtc.org) to provide +the ability to [receive and distribute streams](./getting-started.md) in a typical publish (push) and +subscribe (play) server model. SRS supports widely used internet audio and video protocol conversions, +such as converting [RTMP](./rtmp.md) or [SRT](./srt.md) to [HLS](./hls.md), [HTTP-FLV](./flv.md), or +[WebRTC](./webrtc.md). + +SRS is primarily used in the Live streaming and WebRTC fields. In the live streaming domain, SRS supports typical +protocols such as RTMP, HLS, SRT, MPEG-DASH, and HTTP-FLV. In the WebRTC field, SRS supports protocols like WebRTC, +WHIP, and WHEP. SRS facilitates protocol conversion for both Live streaming and WebRTC. As a media server, SRS +typically works alongside other open-source projects such as FFmpeg, OBS, and WebRTC. Oryx as an out-of-the-box +media solution, incorporating numerous open-source projects and tools, please refer to the [introduction](./getting-started-oryx.md#introduction) +of Oryx. + +SRS provides an [HTTP API](./http-api.md) open interface to query system and stream status. It also supports +[HTTP Callback](./http-callback.md) for callback capabilities, actively notifying your system and implementing +stream authentication and business customization (such as dynamic DVR). SRS also supports the official +[Prometheus Exporter](./exporter.md) for integration with cloud-native monitoring systems, offering powerful +observability. SRS supports session-level [traceable logs](./log.md), greatly reducing system maintenance costs. + +If you are new to audio, video, and streaming media or new to SRS, we recommend reading [Getting Started](./getting-started.md) +and [Learning Path](/guide). Please take the time to read the following documentation, as reading and +familiarizing yourself with the documentation is a basic requirement of the community. If you encounter any +problems, please first search in the [FAQ](../../../faq), then in [Issues](https://github.com/ossrs/srs/issues) and +[Discussions](https://github.com/ossrs/srs/discussions) to find answers to almost all questions. + +SRS is developed using ANSI C++ (98) and only uses basic C++ capabilities. It can run on multiple platforms +such as Linux, Windows, and macOS. We recommend using Ubuntu 20+ for development and debugging. The image +we provide [ossrs/srs](https://hub.docker.com/r/ossrs/srs) is also built on Ubuntu 20 (focal). + +> Note: To solve the long connection and complex state machine problems in complex streaming media processing, +> SRS uses [ST(State Threads)](https://github.com/ossrs/state-threads) coroutine technology (similar +> to [Goroutine](https://go.dev/doc/effective_go#goroutines)) and continuously enhances and maintains +> ST's capabilities, supporting multiple platforms such as Linux, Windows, macOS, and various CPU +> architectures like X86_64, ARMv7, AARCH64, M1, RISCV, LOONGARCH, and MIPS. + +## Features + +Functionality is often a major concern for people and the richness of features is an important reason for choosing a +project. You can view the detailed feature list at [Features](https://github.com/ossrs/srs/blob/develop/trunk/doc/Features.md#features). +We have listed the main features' versions, along with related Issue and PR links. + +Additionally, in the detailed description of [Milestones](/product), the supported features for each major version +are introduced. + +> Note: If you want to see the Issues for each milestone, you can check them at [Milestones](https://github.com/ossrs/srs/milestones). + +Please note that although not many, SRS still marks some features as [Deprecated](https://github.com/ossrs/srs/blob/develop/trunk/doc/Features.md#features). +You can search for 'Deprecated' or 'Removed' on the page. We will also explain in detail why we are removing a +particular feature. + +If you want to know about the features we are currently working on, you can join our [Discord](/contact#discussion) +and [Blog](../../../blog). Once new features are completed, we will post articles on Discord and Blog, so stay tuned. + +## Who's using SRS? + +SRS users are spread all over the world, and we welcome everyone to showcase their SRS applications +in [SRS Use Cases](https://github.com/ossrs/srs/discussions/3771). + +## Governance + +We welcome everyone to participate in the development and maintenance of SRS. We recommend starting by +resolving issues from [Contribute](https://github.com/ossrs/srs/contribute) and [submitting PRs](/how-to-file-pr). +All contributors will be showcased in [Contributors](https://github.com/ossrs/srs#authors). + +SRS is a non-commercial open-source community where active developers have their own jobs and contribute to SRS's +development in their spare time. + +Since the SRS system is highly efficient, we can spend minimal time making continuous improvements, delivering +feature-rich and stable high-quality products. Customizing based on SRS is also easy. + +We are a global open-source community with developer communities both domestically and abroad. We welcome developers +to join us: + +* Great sense of accomplishment: Your code can impact global users, change the audio and video industry, and transform various sectors as SRS is widely used. +* Solid technical progress: You can interact with top audio and video developers worldwide, master high-quality software development skills, and mutually enhance technical capabilities. + +SRS currently uses the following techniques and rules to ensure high quality and efficiency: + +* Long-term discussions on architecture and solutions. For significant features and plans, extensive discussions are required, such as the 7-year discussion on [HEVC/H.265](https://github.com/ossrs/srs/issues/465) support. +* Careful and thorough code reviews. Each pull request must be approved by at least two TOCs and developers and pass all Actions before merging. +* Comprehensive unit tests (over 500), code coverage (around 60%), black-box testing, etc., ensuring ample testing time with one year of development and one year of testing. +* Full pipeline: Each pull request has a pipeline, and each release is automatically completed by the pipeline. + +We welcome you to join us. For more information, please visit [Contribute](https://github.com/ossrs/srs/contribute) +and submit a pull request as required. + +## Milestone + +SRS releases a major version approximately every two years, with one year for development and one year +for stability improvement. For more details, please refer to [Milestone](/product). + +If you want to use SRS online, it's recommended to use the stable version. If you want to use new features, use +the development version. + +SRS has branches based on versions, such as: + +* [develop](https://github.com/ossrs/srs/tree/develop) SRS 6.0, development branch, unstable, but with the most new features. +* [5.0release](https://github.com/ossrs/srs/tree/5.0release#releases) SRS 5.0, currently stable, depending on the branch's status. +* [4.0release](https://github.com/ossrs/srs/tree/4.0release#releases) SRS 4.0, currently the stable branch, and will become more stable over time. + +To determine if a branch is stable, check the Releases tag, such as [SRS 4.0](https://github.com/ossrs/srs/tree/4.0release#releases): + +* 2022-06-11, Release v4.0-r0, this is a stable release version. +* 2021-12-01, Release v4.0-b0, this is a relatively stable beta version, also known as a public test version. +* 2021-11-15, Release v4.0.198, this version is an unstable development version. + +> Note: In addition to beta versions, there are alpha versions, such as `v5.0-a0`, which are less stable internal +> test versions compared to beta. + +> Note: Each alpha, beta, and release version will correspond to a specific version number, such as `v5.0-a0` +> corresponding to `v5.0.98`. + +For SRS, generally, once it reaches the beta version, it can be used online. + +## Strategy + +SRS doesn't develop client-side applications because there are already mature and large open-source communities like +FFmpeg, OBS, VLC, and WebRTC. SRS collaborates with these communities and uses their products. + +In addition to the SRS server, they also work on Oryx and WordPress plugins, with the main goal of creating +simpler application methods for different industries, including: + +* [Oryx](https://github.com/ossrs/oryx): Oryx(SRS Stack) is an out-of-the-box, single-machine video cloud solution that includes FFmpeg and SRS. It's designed for users who aren't familiar with command lines and allows them to set up audio and video applications through Tencent Cloud images or BaoTa with mouse operations. +* [WordPress-Plugin-SrsPlayer](https://github.com/ossrs/WordPress-Plugin-SrsPlayer): This plugin is for the publishing industry, such as personal blogs and media websites, making it easy for users to utilize audio and video capabilities. +* [srs-unity](https://github.com/ossrs/srs-unity): This project is for the gaming industry, integrating Unity's WebRTC SDK to use audio and video capabilities. + +SRS will continue to improve its toolchain. Developers may not use SRS but might have used the SB stress testing tool: + +* [srs-bench](https://github.com/ossrs/srs-bench): An audio and video stress testing tool that supports RTMP, FLV, WebRTC, GB28181, etc., with plans for future improvements. +* [state-threads](https://github.com/ossrs/state-threads): A C coroutine library that can be considered a C version of Go. It's a small but powerful server library that will be continuously improved. +* [tea](https://github.com/ossrs/tea): This project explores eBPF for network simulation in weak network conditions and load balancing. + +SRS aims to continuously improve audio and video toolchains, solutions, and scenario-based capabilities, making it +possible for various industries to utilize audio and video capabilities. + +## Sponsors + +SRS is committed to building a non-profit open-source project and community. We provide special community +support for friends who sponsor SRS. Please see [Sponsor](/contact#donation). + +Audio and video developers often face challenges, and they might be used to the close support from cloud +service providers. When joining an open-source community, it might feel unfamiliar. + +Don't panic when encountering issues. Most problems have solutions that can be found in the [FAQ](../../../faq) +or the documentation [Docs](./getting-started.md). + +You can also join the Discord channel through [Support](/contact) to communicate with other developers. +However, please follow community guidelines, or you won't receive help. + +As developers, we must learn to read documentation, investigate issues, and then discuss them within the community. + +For advanced developers, we suggest becoming a `Backer/Sponsor`. See [Support](/contact#donation). + +SRS has no commercial plans. We are currently working hard to build a global, active developer community. +The value of open-source will grow, and community support will increase. + +## About Oryx + +Oryx is a lightweight, open-source video cloud solution based on Go, Reactjs, SRS, FFmpeg, WebRTC, +and more. For more details, please refer to [Oryx](./getting-started-oryx.md). + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/introduction) + + diff --git a/versioned_docs/version-6.0/doc/k8s.md b/versioned_docs/version-6.0/doc/k8s.md new file mode 100644 index 00000000..4e3ea936 --- /dev/null +++ b/versioned_docs/version-6.0/doc/k8s.md @@ -0,0 +1,579 @@ +--- +title: K8s Guide +sidebar_label: K8s Guide +hide_title: false +hide_table_of_contents: false +--- + +# K8S + +> Cloud+Docker+K8S enable everyone to build live video streaming cluster and service. + +Why should you use [k8s](https://docs.kubernetes.io/docs/concepts/overview/what-is-kubernetes) to build your SRS cluster? + +* Simple: It's really simple and convenient, let's figure it out by [QuickStart](./k8s.md#quick-start). +* Declarative deployment: We declare a desired SRS cluster and it'll always be there, without starting and migrating service, watchdog and SLB configuration. +* Expand easily: K8S allows you to expand infrastructure automatically, and you can expand your business cluster easily by change the number of Pods. +* Rolling Update: K8S allows deployment update, rollback and gray release with zero downtime. +* XXX: Coming soon... + +This tutorial highlights how to build SRS cluster for a variety of scenarios in [ACK(AlibabaCloud Container Service for Kubernetes)](https://www.alibabacloud.com/product/kubernetes). + +1. [Deploy to Cloud Platforms](./k8s.md#deploy-to-cloud-platforms): Clone template project and use actions to deploy. +2. [Quick Start](./k8s.md#quick-start): Deployment an SRS origin server in ACK. +3. [SRS Shares Volume with Nginx](./k8s.md#srs-shares-volume-with-nginx): SRS is able to deliver simple HTTP content, or work with Nginx, SRS delivers RTMP/HTTP-FLV and write HLS to a share volume, then Nginx reads and delivers HLS. +4. [SRS Edge Cluster for High Concurrency Streaming](./k8s.md#srs-edge-cluster-with-slb): SRS edge cluster, which is configured and updated automatically, to provide services for huge players. +5. [SRS Origin Cluster for a Large Number of Streams](./k8s.md#srs-origin-cluster-for-a-large-number-of-streams): SRS origin cluster is designed to serve a large number of streams. +6. [SRS Cluster Update, Rollback, Gray Release with Zero Downtime](./k8s.md#srs-cluster-update-rollback-gray-release-with-zero-downtime): K8S allows deployment update, rollback and gray release with zero downtime. +7. [Useful Tips](./k8s.md#useful-tips) + 1. [Create K8S Cluster in ACK](./k8s.md#create-k8s-cluster-in-ack): Create your own k8s cluster in ACK. + 2. [Publish Demo Streams to SRS](./k8s.md#publish-demo-streams-to-srs): Publish the demo streams to SRS. + 3. [Cleanup For DVR/HLS Temporary Files](./k8s.md#cleanup-for-dvrhls-temporary-files): Remove the temporary files for DVR/HLS. + 4. [Use One SLB and EIP for All Streaming Service](./k8s.md#use-one-slb-and-eip-for-all-streaming-service): Use one SLB for RTMP/HTTP-FLV/HLS streaming service. + 5. [Build SRS Origin Cluster as Deployment](./k8s.md#build-srs-origin-cluster-as-deployment): Rather than StatefulSet, we can also use deployment to build Origin Cluster. + 6. [Managing Compute Resources for Containers](./k8s.md#managing-compute-resources-for-containers): Resource requests and limits, and how pods requests are scheduled and limits are run. + 7. [Auto Reload by Inotify](./k8s.md#auto-reload-by-inotify): SRS supports auto reload by inotify watching ConfigMap changes. + +## Deploy to Cloud Platforms + +SRS provides a set of template repository for fast deploy: + +* [General K8s](https://github.com/ossrs/srs-k8s-template) +* [TKE(Tencent Kubernetes Engine)](https://github.com/ossrs/srs-tke-template) +* [ACK(Alibaba Cloud Container Service for Kubernetes)](https://github.com/ossrs/srs-ack-template) +* [EKS(Amazon Elastic Kubernetes Service)](https://github.com/ossrs/srs-eks-template) +* [AKS(Azure Kubernetes Service)](https://github.com/ossrs/srs-aks-template) + +## Quick Start + +Assumes you have access to a k8s cluster: + +```bash +kubectl cluster-info +``` + +Let's take a look at a single SRS origin server in k8s. + +![SRS: Single Origin Server](/img/doc-advanced-guides-k8s-001.png) + +**Step 1:** Create a [k8s deployment](https://v1-14.docs.kubernetes.io/docs/concepts/workloads/controllers/deployment) for SRS origin server: + +```bash +cat < + if [[ ! -f /tmp/html/index.html ]]; then + cp -R ./objs/nginx/html/* /tmp/html + fi && + sleep infinity +EOF +``` + +> Note: Nginx’s default directory is /usr/share/nginx/html, please be awared, and change it to your own directory + +> Note: To share HLS segments, both SRS and Nginx are mounted to the emptyDir Volume at different paths, the emptyDir volume is initially empty and will be emptied as the pod is destoryed. + +> Note: Since the shared emptyDir Volume is initially empty, we start a srs-cp-files container, and copied the SRS default files, please refer to [#1603](https://github.com/ossrs/srs/issues/1603). + +Step 2: create a [k8s Service](https://kubernetes.io/docs/concepts/services-networking/service/), using SLB to provide external streaming service. + +``` +cat < Note: We expose ports for external services through k8s LoadBalancer Service, where RTMP(1935)/FLV(8080)/API(1985) is served by SRS and HLS(80) is served by Nginx. + +> Note: Here we choose ACK to create SLB and EIP automatically, or you can specify SLB manually, refer to [Use One SLB and EIP for All Streaming Service](./k8s.md#ack-srs-buy-slb-eip). + +Step 3: Great job. You can publish and play streams now. the HLS stream can by played from SRS(8080) or Nginx(80). +* Publish RTMP to rtmp://28.170.32.118/live/livestream or Publish Demo Streams to SRS. +* Play RTMP from rtmp://28.170.32.118/live/livestream +* Play HTTP-FLV from http://28.170.32.118:8080/live/livestream.flv +* Play HLS from http://28.170.32.118:8080/live/livestream.m3u8 +* Play HLS from http://28.170.32.118/live/livestream.m3u8 + +> Note: Please replace the above EIP with your own, and use 'kubectl get svc/srs-origin-service’ to check your EIP + +## SRS Edge Cluster for High Concurrency Streaming + +This chapter will show you how to build Edge Cluster for high concurrency streaming based on k8s. + +Edge Cluster realizes merging the request of origin source. When there are many players, but request for a same stream, the Edge server still only request one stream from origin. So you can scale up the Edge Cluster to facilitate more clients requests. This is CDN’s important capability:high concurreny. + +> Note: Edge Cluster can be classified as RTMP Edge Cluster or HTTP-FLV Edge Cluster depending on the player’s stream protocol, for more details, can refer to the related Wiki. + +For self-host origin, without so many play requests, why is it not recommended to use SRS Single Origin mode, and instead use Edge Cluster mode? look at the related scenarios: + +* Avoid overloading of origin. Even if it’s a bit push and play scene, in the case of many CDN requests for one origin stream, may be result in one stream has many request connections. Use Edge cluster can protect origin from too many requests, and transfer the danger to edge. + +* Can easily scale up multi Edge Clusters with different SLB exported, and to avoid interference of multiple CDNs, can execute traffic limit on sperate SLB. Use many Edge Clusters can ensure some CDN is available when Origin is down. + +* Can let Edge Cluster to handle stream distribution, and let Origin focus on Slice segments、 DVR、 Authentication functions. + +the difference of traditional package deployment vs K8S: + +| | ECS | K8S | comment | +| :----: | :---- | :---- | :---- | +| Resources | Manually |Automatically| From the traditional deployment, the resources SLB、EIP and ECS, you need to buy and configure one by one yourself, use k8s the mentioned resources can be acquired and configured automatically. | +| Deployment | Package |Image| From the K8s deployment, the pod’s docker image can easily rollback, and can keep the dev environment in touch with the prod, and the image can be cached on the node.
So with docker image, you will get the required high efficiency、high density、high portability、resource isolation. | +| Watchdog | Manually |Automatically| When srs exited abnormally, the event should be monitored and auto restarted, you need do it by yourself from the traditional way.
K8s provides liveness probes and ensuring automatically recovered when anormaly appears. | +| Migration | Manually |Automatically| From the traditional deployment way, when change ECS, you need to apply the new machine resource, modify SLB, and install application by yourself.
Based on K8S, it can auto complete the service migration, update SLB, configure liveness, readiness and startup probes. | +| Configure | File |Volume| From the traditional deployment way, you need to configure ECS manually.
K8s stores configuration data in ConfigMap, the data can be added to a specific path in the Volume, and consumed by the Pods. Allow you to decouple ECS scale up. | +| Scale Up | Manually |Automatically| From the traditional deployment way, you need to deploy and configure for the new applied ECS.
Based on K8s, just modify the Replicas is ok, you can also enable auto scale. | +| Service Discovery | Manually |Automatically| From the traditional deployment way, when the Origin’s ip changed, you need to configure the new ip in Edge's config file.
Use K8s, it will discover and notify the change to the Edge. | +| SLB | Manually |Automatically| From the traditional deployment way, when add a new Edge server, you need to update SLB config manually.
Based on K8s, it will get updated automatically. | + +The following architecture of The K8s deployment: + +![avatar](/img/doc-advanced-guides-k8s-004.png) + +Step 1: Create Deployment and Service for SRS origin and Nginx origin. + +* srs-origin-deploy: create a [k8s deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) of stateless application, which contains SRS Server(with origin config)、Nginx containers and a shared volume for containers to mount. The srs container will write the HLS segment to the shared [volume](https://kubernetes.io/docs/concepts/storage/volumes/). + +* srs-origin-service: create a k8s ClusterIP [Service](https://kubernetes.io/docs/concepts/services-networking/service/) to provide Origin service, which can only be accessed inside the cluster. + +* srs-http-service: create a k8s LoadBalancer [Service](https://kubernetes.io/docs/concepts/services-networking/service/) to provide the SLB based HTTP distribution service of HLS segments powered by Nginx. + +``` +cat < + if [[ ! -f /tmp/html/index.html ]]; then + cp -R ./objs/nginx/html/* /tmp/html + fi && + sleep infinity + +--- + +apiVersion: v1 +kind: Service +metadata: + name: srs-origin-service +spec: + type: ClusterIP + selector: + app: srs-origin + ports: + - name: srs-origin-service-1935-1935 + port: 1935 + protocol: TCP + targetPort: 1935 + +--- + +apiVersion: v1 +kind: Service +metadata: + name: srs-http-service +spec: + type: LoadBalancer + selector: + app: srs-origin + ports: + - name: srs-http-service-80-80 + port: 80 + protocol: TCP + targetPort: 80 + - name: srs-http-service-1985-1985 + port: 1985 + protocol: TCP + targetPort: 1985 +EOF +``` + +> Note: The Origin server only can be accessed inside the cluster, for it’s service type is ClsterIP, the Edge Server can connect to the remote Origin Server through the internal domain srs-origin-service. + +> Note: For share HLS segments, both SRS and Nginx are mounted to the [emptyDir Volume](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) at different paths, the emptyDIr volume is initially empty and the data in the emptyDir is deleted when the pod is removed. + +> Note: As the emptyDir is initially empty, so we start a srs-cp-files container, which will copy srs’s cached files to the shared volume. please refer[1603](https://github.com/ossrs/srs/issues/1603) + +> Note: The srs-http-service provide HLS distribution service with Nginx’s 80 port exported, and provide API service with SRS’s 1985 port exported. + +> Note: Here we choose ACK to create SLB and EIP automatically, or you can specify SLB manually, refer to [Use One SLB and EIP for All Streaming Service](./k8s.md#ack-srs-buy-slb-eip) + +Step 2: Create Deployment and Service for SRS edge. + +* srs-edge-config: create a k8s [ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/), which stores configuration of SRS Edge Server. + +* sts-edge-deploy: create a [k8s deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), which will deploy a stateless application, and running multi replicas of SRS Edge Server. + +* srs-edge-service: create a [k8s Service](https://kubernetes.io/docs/concepts/services-networking/service/), using SLB to provide external streaming service + +``` +cat < Note: Please change the EIP in the stream address to yourself. you can exec 'kubectl get svc/srs-http-service' or 'kubectl get svc/srs-edge-service’ command to check your EIP address. + +> Note: If the SLB and EIP are created automatically, the HLS and RTMP/HTTP-FLV’s EIP are different. you can choose to specify the SLB manually, and both services can use the same SLB, for details please refer to [Use One SLB and EIP for All Streaming Service](./k8s.md#ack-srs-buy-slb-eip). + +## SRS Origin Cluster for a Large Number of Streams + +Coming soon... + +## SRS Cluster Update, Rollback, Gray Release with Zero Downtime + +Coming soon... + +## Useful Tips + +There are some useful tips for you. + +1. [Create K8S Cluster in ACK](./k8s.md#create-k8s-cluster-in-ack): Create your own k8s cluster in ACK. +1. [Publish Demo Streams to SRS](./k8s.md#publish-demo-streams-to-srs): Publish the demo streams to SRS. +1. [Use One SLB and EIP for All Streaming Service](./k8s.md#use-one-slb-and-eip-for-all-streaming-service): Use one SLB for RTMP/HTTP-FLV/HLS streaming service. +1. [Build SRS Origin Cluster as Deployment](./k8s.md#build-srs-origin-cluster-as-deployment): Rather than StatefulSet, we can also use deployment to build Origin Cluster. +1. [Managing Compute Resources for Containers](./k8s.md#managing-compute-resources-for-containers): Resource requests and limits, and how pods requests are scheduled and limits are run. +1. [Auto Reload by Inotify](./k8s.md#auto-reload-by-inotify): SRS supports auto reload by inotify watching ConfigMap changes. + +### Create K8S Cluster in ACK + +Coming soon... + +### Publish Demo Streams to SRS + +Coming soon... + +### Use One SLB for All Streaming Service + +Coming soon... + +### Build SRS Origin Cluster as Deployment + +Coming soon... + +### Managing Compute Resources for Containers + +Coming soon... + +### Auto Reload by Inotify + +Coming soon... + +Winlin 2020.02 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/k8s) + + diff --git a/versioned_docs/version-6.0/doc/learning-path.md b/versioned_docs/version-6.0/doc/learning-path.md new file mode 100644 index 00000000..adde01c2 --- /dev/null +++ b/versioned_docs/version-6.0/doc/learning-path.md @@ -0,0 +1,66 @@ +--- +title: Learning Path +sidebar_label: Learning Path +hide_title: false +hide_table_of_contents: false +--- + +# Learning Path + +A learning path for newcomers, please be sure to follow the documentation. + +## Quick Preview + +First, It takes about 5 to 15 minutes to see what live streaming and WebRTC look like, as the following picture shown:。 + +![](/img/doc-learning-path-001.png) + +> Note: This may seem easy, even if you can open two pages directly from the SRS website, but you must build it yourself with SRS, not just open the online demo page. + +How do you do it?Please refer to [Getting Started](./getting-started.md)。 + +The first step in approaching something new is to have an intuitive experience and feel for it. Although it seems simple, it involves almost the whole chain of things in the audio/video field: +- FFmpeg, a powerful audio/video client that supports publish and pull streaming, codecs encoding and decoding , as well as various processing capabilities. +- Chrome (or browser), H5 is the most convenient client, very convenient for demo and learning, SRS’s features basically have H5 demo. +- Audio and video protocols: RTMP, HTTP-FLV, HLS and WebRTC. +- SRS server, deploying audio and video cloud by itself, or providing cloud services for audio and video, SRS is essentially a kind of server for video cloud. + +> Note: The above diagram is still missing the mobile end, in fact, the mobile end is just a kind of end, and there is no new protocol, you can also download the SRS live streaming client, experience the above push stream and play, you can also enter your server's stream address to play. + +## Deeper + +Second, understand each typical scenario of audio and video applications, about five core scenarios, which takes about 3~7 days in total: + +Typical audio and video business scenarios, including but not limited to: +- All-platform live streaming. The Encoders (FFmpeg/OBS) above can publish RTMP to SRS; an SRS Origin (no need Cluster), which is muxed into HTTP-FLV streams and HLS; Players can choose HTTP-FLV or HLS streams to play according to the platform's player. +- WebRTC call services, one-to-one calls, multi-person calls, conference rooms, etc. WebRTC is the key and core capability introduced in SRS4. From 1 to 3 seconds latency at the beginning, to 100 to 300 milliseconds latency now, it is definitely not a change of numbers, but an essential change. +- Monitoring and broadcasting business to the cloud. In addition to using FFmpeg to actively pull streams to SRS, you can also use the SRT protocol of the broadcasting industry to publish streams, or the GB28181 protocol of the surveillance industry to publish streams, SRS can converts it to the Internet protocols for playing. +- Low latency live streaming and interactive live streaming. Convert RTMP to WebRTC for playing to reduce the latency of palying, and can also use the WebRTC to publish stream. In the future will support WebTransport live streaming. +- Large-Scale Business, if business grows rapidly, you can use SRS Edge Cluster to support massice Players, or use SRS Origin Cluster to support massive Encoders, of course, you can migrate your business to the video cloud smoothly too. In the future, SRS will also support WebRTC cluster. + +Each scenario can build a typical application. + +## For Details + +Third, Understand the technical points, application scenarios, code and problem solving, about 3 to 6 months. + +- [Video Columns](./introduction.md#effective-srs), includes environment building, code analysis, and explanations from professional teachers at Voice Academy. +- [Solution Guides](./introduction.md#solution-guides),share and explore the application of SRS in different scenarios. +- [Deployment Guides](./introduction.md#deployment-guides), how to deploy to implement different specific functions. +- [Cluster Guides](./introduction.md#cluster-guides), when business grows rapidly, how to scale single server to cluster, and how to serve users in different regions. +- [Integration Guides](./introduction.md#integration-guides), How to integrate with existing systems, how to authenticate users, security and anti-stealing chain mechanisms, etc. +- [Develop Guides](./introduction.md#develop-guide), Concurrent principles, code analysis, high performance server framework, performance optimization, etc. + +If you can thoroughly understand SRS, it's really not difficult. + +Author:winlinvip + +Origin Link:https://www.jianshu.com/p/2662df9fe078 + +From:jianshu.com + +The copyright belongs to the author. For commercial reproduction, please contact the author for authorization, and for non-commercial reproduction, please cite the source. + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/learning-path) + + diff --git a/versioned_docs/version-6.0/doc/log-rotate.md b/versioned_docs/version-6.0/doc/log-rotate.md new file mode 100644 index 00000000..ec8d91d2 --- /dev/null +++ b/versioned_docs/version-6.0/doc/log-rotate.md @@ -0,0 +1,73 @@ +--- +title: Log Rotate +sidebar_label: Log Rotate +hide_title: false +hide_table_of_contents: false +--- + +# LogRotate + +SRS always writes log to a single log file `srs.log`, so it will become very larger. We can use rotate the log to zip or remove it. + +1. First, move the log file to another tmp log file:```mv objs/srs.log /tmp/srs.`date +%s`.log``` +1. Then, send signal to SRS. SRS will close the previous file fd and reopen the log file:```killall -s SIGUSR1``` +1. Finally, zip or remove the tmp log file. + +## Use logrotate + +Recommend to use [logrotate](https://www.jianshu.com/p/ec7f1626a3d3) to manage log files. + +1. Install logrotate: + +``` +sudo yum install -y logrotate +``` + +1. Config logrotate to manage SRS log file: + +``` +cat << END > /etc/logrotate.d/srs +/usr/local/srs/objs/srs.log { + daily + dateext + compress + rotate 7 + size 1024M + sharedscripts + postrotate + kill -USR1 \`cat /usr/local/srs/objs/srs.pid\` + endscript +} +END +``` + +> Note: Run logrotate manually by `logrotate -f /etc/logrotate.d/srs` + +## CopyTruncate + +For SRS2, we could use [copytruncate](https://unix.stackexchange.com/questions/475524/how-copytruncate-actually-works), +**but it's strongly not recommended** because the logs maybe dropped, so it's only a workaround for server not supported +SIGUSR1 such as SRS2. + +> Yes, SRS3 surely supports copytruncate and it's not recommended. + +The config is bellow, from [PR#1561](https://github.com/ossrs/srs/pull/1561#issuecomment-571408173) by [wnpllrzodiac](https://github.com/wnpllrzodiac): + +``` +cat << END > /etc/logrotate.d/srs +/usr/local/srs/objs/srs.log { + daily + dateext + compress + rotate 7 + size 1024M + copytruncate +} +END +``` + +Winlin 2016.12 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/log-rotate) + + diff --git a/versioned_docs/version-6.0/doc/log.md b/versioned_docs/version-6.0/doc/log.md new file mode 100644 index 00000000..cbae00d4 --- /dev/null +++ b/versioned_docs/version-6.0/doc/log.md @@ -0,0 +1,506 @@ +--- +title: Log +sidebar_label: Log +hide_title: false +hide_table_of_contents: false +--- + +# SRS Log System + +SRS can log to console or file, with level, session oriented log and tracable log. + +## LogTank + +The tank is the container for log, to where write log: + +There are two tank of SRS log, config the `srs_log_tank` to: +* console: Write log to console. Before config parsed, write log to console too. +* file: Default. Write log to file, and the `srs_log_file` specified the path of log file, which default to `./objs/srs.log` + +The log specified config: + +```bash +# the log tank, console or file. +# if console, print log to console. +# if file, write log to file. requires srs_log_file if log to file. +# default: file. +srs_log_tank file; +``` + +## LogLevel + +The level is specified by `srs_log_level` and control which level of log to print: +* verbose: Lots of log, which hurts performance. SRS default to disable it when compile. +* info:Detail log, which huts performance. SRS default to disable it when compile. +* trace: Important log, less and SRS enable it as default level. +* warn: Warning log, without debug log. +* error: Error level. + +The level in config file: + +```bash +# the log level, for all log tanks. +# can be: verbose, info, trace, warn, error +# defualt: trace +srs_log_level trace; +``` + +Notes: +* Enable all high level, for example, enable trace/warn/error when set level to trace. +* The verbose and info level is disabled when compile. Modify the `srs_kernel_log.hpp` when need to enable this. +* Recomment to use trace level. + +## Log of tools + +The feature Transcode/Ingest use external tools, for instance, FFMPEG. SRS use isolate log file for the external tools. + +Set the tools log to `/dev/null` to disable the log: + +```bash +# the logs dir. +# if enabled ffmpeg, each stracoding stream will create a log file. +# "/dev/null" to disable the log. +# default: ./objs +ff_log_dir ./objs; +``` + +## Log Format + +SRS provides session oriented log, to enalbe us to grep specified connection log: + +```bash +[2014-04-04 11:21:29.183][trace][2837][104][11] rtmp get peer ip success. ip=192.168.1.179 +``` + +The log format is: +* [2014-04-04 11:21:29.183] Date of log. The ms is set by the time cache of SRS_TIME_RESOLUTION_MS to avoid performance issue. +* [trace] Level of log. Trace is ok, warn and error maybe something is wrong. +* [2837] The pid of process(SrsPid). The session id maybe duplicated for multiple process. +* [104] The session id(SrsId), unique for the same process. So the pid+session-id is used to identify a connection. +* [11] The errno of system, optional for warn and error. +* rtmp get peer ip success. The description of log. + +The following descript how to analysis the log of SRS. + +### Tracable Log + +SRS can get the whole log when we got something, for example, the ip of client, or the stream for client and time to play, the page url. + +Event for the cluster, SRS can find the session oriented directly. We can get the session of server, and the source id for the session, and the upnode session log util the origin server and the publish id. + +The client also can get the pid and session-id of the connection on server. For example: + +A client play stream: rtmp://dev:1935/live/livestream +![All id for client](/img/doc-guides-log-001.png) +We can get the server ip `192.168.1.107`, the pid `9131` and session id `117`. We can grep on this server directly by keyword "\[9131\]\[117\]": +```bash +[winlin@dev6 srs]$ grep -ina "\[12665\]\[114\]" objs/edge.log +1307:[2014-05-27 19:21:27.276][trace][12665][114] serve client, peer ip=192.168.1.113 +1308:[2014-05-27 19:21:27.284][trace][12665][114] complex handshake with client success +1309:[2014-05-27 19:21:27.284][trace][12665][114] rtmp connect app success. tcUrl=rtmp://dev:1935/live, pageUrl=http://ossrs.net/players/srs_player.html?vhost=dev&stream=livestream&server=dev&port=1935, swfUrl=http://ossrs.net/players/srs_player/release/srs_player.swf?_version=1.21, schema=rtmp, vhost=__defaultVhost__, port=1935, app=live +1310:[2014-05-27 19:21:27.486][trace][12665][114] set ack window size to 2500000 +1311:[2014-05-27 19:21:27.486][trace][12665][114] identify ignore messages except AMF0/AMF3 command message. type=0x5 +1312:[2014-05-27 19:21:27.501][trace][12665][114] ignored. set buffer length to 800 +1313:[2014-05-27 19:21:27.501][trace][12665][114] identify ignore messages except AMF0/AMF3 command message. type=0x4 +1314:[2014-05-27 19:21:27.518][trace][12665][114] identity client type=play, stream_name=livestream, duration=-1.00 +1315:[2014-05-27 19:21:27.518][trace][12665][114] identify client success. type=Play, stream_name=livestream, duration=-1.00 +1316:[2014-05-27 19:21:27.518][trace][12665][114] set output chunk size to 4096 +1317:[2014-05-27 19:21:27.518][trace][12665][114] source url=__defaultVhost__/live/livestream, ip=192.168.1.113, cache=1, is_edge=1, id=-1 +1318:[2014-05-27 19:21:27.518][trace][12665][114] dispatch cached gop success. count=0, duration=0 +1319:[2014-05-27 19:21:27.518][trace][12665][114] create consumer, queue_size=30.00, tba=0, tbv=0 +1322:[2014-05-27 19:21:27.518][trace][12665][114] ignored. set buffer length to 800 +1333:[2014-05-27 19:21:27.718][trace][12665][114] update source_id=115 +1334:[2014-05-27 19:21:27.922][trace][12665][114] -> PLA time=301, msgs=12, okbps=1072,0,0, ikbps=48,0,0 +``` + +While the source id is 115(`source_id=115`), then find this session: +``` +[winlin@dev6 srs]$ grep -ina "\[12665\]\[115\]" objs/edge.log +1320:[2014-05-27 19:21:27.518][trace][12665][115] edge connected, can_publish=1, url=rtmp://dev:1935/live/livestream, server=127.0.0.1:19350 +1321:[2014-05-27 19:21:27.518][trace][12665][115] connect to server success. server=127.0.0.1, ip=127.0.0.1, port=19350 +1323:[2014-05-27 19:21:27.519][trace][12665][115] complex handshake with server success. +1324:[2014-05-27 19:21:27.561][trace][12665][115] set ack window size to 2500000 +1325:[2014-05-27 19:21:27.602][trace][12665][115] drop unknown message, type=6 +1326:[2014-05-27 19:21:27.602][trace][12665][115] connected, version=0.9.119, ip=127.0.0.1, pid=12633, id=141 +1327:[2014-05-27 19:21:27.602][trace][12665][115] set output chunk size to 60000 +1328:[2014-05-27 19:21:27.602][trace][12665][115] edge change from 100 to state 101 (ingest connected). +1329:[2014-05-27 19:21:27.603][trace][12665][115] set input chunk size to 60000 +1330:[2014-05-27 19:21:27.603][trace][12665][115] dispatch metadata success. +1331:[2014-05-27 19:21:27.603][trace][12665][115] update video sequence header success. size=46 +1332:[2014-05-27 19:21:27.603][trace][12665][115] update audio sequence header success. size=4 +1335:[2014-05-27 19:21:37.653][trace][12665][115] <- EIG time=10163, okbps=0,0,0, ikbps=234,254,231 +``` + +We can finger out the upnode server session info `connected, version=0.9.119, ip=127.0.0.1, pid=12633, id=141`, then to grep on the upnode server: +``` +[winlin@dev6 srs]$ grep -ina "\[12633\]\[141\]" objs/srs.log +783:[2014-05-27 19:21:27.518][trace][12633][141] serve client, peer ip=127.0.0.1 +784:[2014-05-27 19:21:27.519][trace][12633][141] complex handshake with client success +785:[2014-05-27 19:21:27.561][trace][12633][141] rtmp connect app success. tcUrl=rtmp://dev:1935/live, pageUrl=, swfUrl=, schema=rtmp, vhost=__defaultVhost__, port=1935, app=live +786:[2014-05-27 19:21:27.561][trace][12633][141] set ack window size to 2500000 +787:[2014-05-27 19:21:27.561][trace][12633][141] identify ignore messages except AMF0/AMF3 command message. type=0x5 +788:[2014-05-27 19:21:27.602][trace][12633][141] identity client type=play, stream_name=livestream, duration=-1.00 +789:[2014-05-27 19:21:27.602][trace][12633][141] identify client success. type=Play, stream_name=livestream, duration=-1.00 +790:[2014-05-27 19:21:27.602][trace][12633][141] set output chunk size to 60000 +791:[2014-05-27 19:21:27.602][trace][12633][141] source url=__defaultVhost__/live/livestream, ip=127.0.0.1, cache=1, is_edge=0, id=131 +792:[2014-05-27 19:21:27.602][trace][12633][141] dispatch cached gop success. count=241, duration=3638 +793:[2014-05-27 19:21:27.602][trace][12633][141] create consumer, queue_size=30.00, tba=44100, tbv=1000 +794:[2014-05-27 19:21:27.602][trace][12633][141] ignored. set buffer length to 65564526 +795:[2014-05-27 19:21:27.604][trace][12633][141] set input chunk size to 60000 +798:[2014-05-27 19:21:32.420][trace][12633][141] -> PLA time=4809, msgs=14, okbps=307,0,0, ikbps=5,0,0 +848:[2014-05-27 19:22:54.414][trace][12633][141] -> PLA time=86703, msgs=12, okbps=262,262,0, ikbps=0,0,0 +867:[2014-05-27 19:22:57.225][trace][12633][141] update source_id=149 +``` + +And the source id 149(`source_id=149`), that is the session id of encoder: +``` +[winlin@dev6 srs]$ grep -ina "\[12633\]\[149\]" objs/srs.log +857:[2014-05-27 19:22:56.919][trace][12633][149] serve client, peer ip=127.0.0.1 +858:[2014-05-27 19:22:56.921][trace][12633][149] complex handshake with client success +859:[2014-05-27 19:22:56.960][trace][12633][149] rtmp connect app success. tcUrl=rtmp://127.0.0.1:19350/live?vhost=__defaultVhost__, pageUrl=, swfUrl=, schema=rtmp, vhost=__defaultVhost__, port=19350, app=live +860:[2014-05-27 19:22:57.040][trace][12633][149] identify client success. type=publish(FMLEPublish), stream_name=livestream, duration=-1.00 +861:[2014-05-27 19:22:57.040][trace][12633][149] set output chunk size to 60000 +862:[2014-05-27 19:22:57.040][trace][12633][149] source url=__defaultVhost__/live/livestream, ip=127.0.0.1, cache=1, is_edge=0, id=-1 +863:[2014-05-27 19:22:57.123][trace][12633][149] set input chunk size to 60000 +864:[2014-05-27 19:22:57.210][trace][12633][149] dispatch metadata success. +865:[2014-05-27 19:22:57.210][trace][12633][149] update video sequence header success. size=46 +866:[2014-05-27 19:22:57.210][trace][12633][149] update audio sequence header success. size=4 +870:[2014-05-27 19:23:04.970][trace][12633][149] <- CPB time=8117, okbps=4,0,0, ikbps=320,0,0 +``` + +Encoder => Origin => Edge => Player, the whole link log we got directly! + +### Reverse Tracable Log + +The tracable is finger log from the player to the origin. The reverse tracable log is from the origin to the edge and player. + +For example, there is a origin and a edge, to grep the log on origin by keyword `edge-srs`: + +``` +[winlin@dev6 srs]$ grep -ina "edge-srs" objs/srs.origin.log +30:[2014-08-06 09:41:31.649][trace][21433][107] edge-srs ip=192.168.1.159, version=0.9.189, pid=21435, id=108 +``` + +We get all edge srs which connectted to this origin, this edge ip is 192.168.1.159, pid is 21435, session id is 108. Then grep the log on the edge: + +``` +[winlin@dev6 srs]$ grep --color -ina "\[108\]" objs/srs.log +29:[2014-08-06 10:09:34.579][trace][22314][108] edge pull connected, can_publish=1, url=rtmp://dev:1935/live/livestream, server=127.0.0.1:1936 +30:[2014-08-06 10:09:34.591][trace][22314][108] complex handshake success. +31:[2014-08-06 10:09:34.671][trace][22314][108] connected, version=0.9.190, ip=127.0.0.1, pid=22288, id=107 +32:[2014-08-06 10:09:34.672][trace][22314][108] out chunk size to 60000 +33:[2014-08-06 10:09:34.672][trace][22314][108] ignore the disabled transcode: +34:[2014-08-06 10:09:34.672][trace][22314][108] edge change from 100 to state 101 (pull). +35:[2014-08-06 10:09:34.672][trace][22314][108] input chunk size to 60000 +36:[2014-08-06 10:09:34.672][trace][22314][108] got metadata, width=768, height=320, vcodec=7, acodec=10 +37:[2014-08-06 10:09:34.672][trace][22314][108] 46B video sh, codec(7, profile=100, level=32, 0x0, 0kbps, 0fps, 0s) +38:[2014-08-06 10:09:34.672][trace][22314][108] 4B audio sh, codec(10, profile=1, 2channels, 0kbps, 44100HZ), flv(16bits, 2channels, 44100HZ) +39:[2014-08-06 10:09:34.779][trace][22314][107] update source_id=108[108] +46:[2014-08-06 10:09:36.853][trace][22314][110] source url=__defaultVhost__/live/livestream, ip=192.168.1.179, cache=1, is_edge=1, source_id=108[108] +50:[2014-08-06 10:09:44.949][trace][22314][108] <- EIG time=10293, okbps=3,0,0, ikbps=441,0,0 +53:[2014-08-06 10:09:47.805][warn][22314][108][4] origin disconnected, retry. ret=1007 +``` + +On this edge, we finger out there is 2 connections which connected on the source, by keyword `source_id=108`: + +``` +39:[2014-08-06 10:09:34.779][trace][22314][107] update source_id=108[108] +46:[2014-08-06 10:09:36.853][trace][22314][110] source url=__defaultVhost__/live/livestream, ip=192.168.1.179, cache=1, is_edge=1, source_id=108[108] +``` + +There are 2 connections connected on this source, 107 and 110. + +### Any Tracable Log + +For SRS support tracalbe and reverse tracable log, so we can got the whold stream delivery log at any point. + +For example, a cluster has a origin and an edge, origin ingest stream. + +When I know the stream name, or any information, for example, we can grep the keyword `type=Play` for all client to play stream on origin server: + +``` +[winlin@dev6 srs]$ grep -ina "type=Play" objs/srs.origin.log +31:[2014-08-06 10:09:34.671][trace][22288][107] client identified, type=Play, stream_name=livestream, duration=-1.00 +``` + +We got session id 107 which play the stream on origin: + +``` +[winlin@dev6 srs]$ grep -ina "\[107\]" objs/srs.origin.log +27:[2014-08-06 10:09:34.589][trace][22288][107] RTMP client ip=127.0.0.1 +28:[2014-08-06 10:09:34.591][trace][22288][107] complex handshake success +29:[2014-08-06 10:09:34.631][trace][22288][107] connect app, tcUrl=rtmp://dev:1935/live, pageUrl=http://www.ossrs.net/players/srs_player.html?vhost=dev&stream=livestream&server=dev&port=1935, swfUrl=http://www.ossrs.net/players/srs_player/release/srs_player.swf?_version=1.23, schema=rtmp, vhost=__defaultVhost__, port=1935, app=live, args=(obj) +30:[2014-08-06 10:09:34.631][trace][22288][107] edge-srs ip=192.168.1.159, version=0.9.190, pid=22314, id=108 +31:[2014-08-06 10:09:34.671][trace][22288][107] client identified, type=Play, stream_name=livestream, duration=-1.00 +32:[2014-08-06 10:09:34.671][trace][22288][107] out chunk size to 60000 +33:[2014-08-06 10:09:34.671][trace][22288][107] source url=__defaultVhost__/live/livestream, ip=127.0.0.1, cache=1, is_edge=0, source_id=105[105] +34:[2014-08-06 10:09:34.672][trace][22288][107] dispatch cached gop success. count=307, duration=4515 +35:[2014-08-06 10:09:34.672][trace][22288][107] create consumer, queue_size=30.00, tba=44100, tbv=25 +36:[2014-08-06 10:09:34.672][trace][22288][107] ignored. set buffer length to 1000 +37:[2014-08-06 10:09:34.673][trace][22288][107] input chunk size to 60000 +40:[2014-08-06 10:09:44.748][trace][22288][107] -> PLA time=10007, msgs=0, okbps=464,0,0, ikbps=3,0,0 +41:[2014-08-06 10:09:47.805][warn][22288][107][104] client disconnect peer. ret=1004 +``` + +The soruce id is 105, specified by `source_id=105`: + +``` +[winlin@dev6 srs]$ grep --color -ina "\[105\]" objs/srs.origin.log +16:[2014-08-06 10:09:30.331][trace][22288][105] RTMP client ip=127.0.0.1 +17:[2014-08-06 10:09:30.331][trace][22288][105] srand initialized the random. +18:[2014-08-06 10:09:30.332][trace][22288][105] simple handshake success. +19:[2014-08-06 10:09:30.373][trace][22288][105] connect app, tcUrl=rtmp://127.0.0.1:1936/live?vhost=__defaultVhost__, pageUrl=, swfUrl=, schema=rtmp, vhost=__defaultVhost__, port=1936, app=live, args=null +21:[2014-08-06 10:09:30.417][trace][22288][105] client identified, type=publish(FMLEPublish), stream_name=livestream, duration=-1.00 +22:[2014-08-06 10:09:30.417][trace][22288][105] out chunk size to 60000 +23:[2014-08-06 10:09:30.418][trace][22288][105] source url=__defaultVhost__/live/livestream, ip=127.0.0.1, cache=1, is_edge=0, source_id=-1[-1] +24:[2014-08-06 10:09:30.466][trace][22288][105] got metadata, width=768, height=320, vcodec=7, acodec=10 +25:[2014-08-06 10:09:30.466][trace][22288][105] 46B video sh, codec(7, profile=100, level=32, 0x0, 0kbps, 0fps, 0s) +26:[2014-08-06 10:09:30.466][trace][22288][105] 4B audio sh, codec(10, profile=1, 2channels, 0kbps, 44100HZ), flv(16bits, 2channels, 44100HZ) +33:[2014-08-06 10:09:34.671][trace][22288][107] source url=__defaultVhost__/live/livestream, ip=127.0.0.1, cache=1, is_edge=0, source_id=105[105] +38:[2014-08-06 10:09:40.732][trace][22288][105] <- CPB time=10100, okbps=3,0,0, ikbps=332,0,0 +``` + +This source is the ingest stream source, we got the root source. + +And we got 107 which is srs edge connection, by keyword `edge-srs`: + +``` +30:[2014-08-06 10:09:34.631][trace][22288][107] edge-srs ip=192.168.1.159, version=0.9.190, pid=22314, id=108 +``` + +Find the log on edge, the session id is 108: + +``` +[winlin@dev6 srs]$ grep --color -ina "\[108\]" objs/srs.log +29:[2014-08-06 10:09:34.579][trace][22314][108] edge pull connected, can_publish=1, url=rtmp://dev:1935/live/livestream, server=127.0.0.1:1936 +30:[2014-08-06 10:09:34.591][trace][22314][108] complex handshake success. +31:[2014-08-06 10:09:34.671][trace][22314][108] connected, version=0.9.190, ip=127.0.0.1, pid=22288, id=107 +32:[2014-08-06 10:09:34.672][trace][22314][108] out chunk size to 60000 +33:[2014-08-06 10:09:34.672][trace][22314][108] ignore the disabled transcode: +34:[2014-08-06 10:09:34.672][trace][22314][108] edge change from 100 to state 101 (pull). +35:[2014-08-06 10:09:34.672][trace][22314][108] input chunk size to 60000 +36:[2014-08-06 10:09:34.672][trace][22314][108] got metadata, width=768, height=320, vcodec=7, acodec=10 +37:[2014-08-06 10:09:34.672][trace][22314][108] 46B video sh, codec(7, profile=100, level=32, 0x0, 0kbps, 0fps, 0s) +38:[2014-08-06 10:09:34.672][trace][22314][108] 4B audio sh, codec(10, profile=1, 2channels, 0kbps, 44100HZ), flv(16bits, 2channels, 44100HZ) +39:[2014-08-06 10:09:34.779][trace][22314][107] update source_id=108[108] +46:[2014-08-06 10:09:36.853][trace][22314][110] source url=__defaultVhost__/live/livestream, ip=192.168.1.179, cache=1, is_edge=1, source_id=108[108] +50:[2014-08-06 10:09:44.949][trace][22314][108] <- EIG time=10293, okbps=3,0,0, ikbps=441,0,0 +53:[2014-08-06 10:09:47.805][warn][22314][108][4] origin disconnected, retry. ret=1007 +``` + +We got the edge source 108, and there are 2 clients connected on this source 107 and 110, specified by keyword `source_id=108`: + +``` +[winlin@dev6 srs]$ grep --color -ina "\[107\]" objs/srs.log +18:[2014-08-06 10:09:34.281][trace][22314][107] RTMP client ip=192.168.1.179 +19:[2014-08-06 10:09:34.282][trace][22314][107] srand initialized the random. +20:[2014-08-06 10:09:34.291][trace][22314][107] complex handshake success +21:[2014-08-06 10:09:34.291][trace][22314][107] connect app, tcUrl=rtmp://dev:1935/live, pageUrl=http://www.ossrs.net/players/srs_player.html?vhost=dev&stream=livestream&server=dev&port=1935, swfUrl=http://www.ossrs.net/players/srs_player/release/srs_player.swf?_version=1.23, schema=rtmp, vhost=__defaultVhost__, port=1935, app=live, args=null +22:[2014-08-06 10:09:34.532][trace][22314][107] ignored. set buffer length to 800 +23:[2014-08-06 10:09:34.568][trace][22314][107] client identified, type=Play, stream_name=livestream, duration=-1.00 +24:[2014-08-06 10:09:34.568][trace][22314][107] out chunk size to 60000 +25:[2014-08-06 10:09:34.568][trace][22314][107] source url=__defaultVhost__/live/livestream, ip=192.168.1.179, cache=1, is_edge=1, source_id=-1[-1] +26:[2014-08-06 10:09:34.579][trace][22314][107] dispatch cached gop success. count=0, duration=0 +27:[2014-08-06 10:09:34.579][trace][22314][107] create consumer, queue_size=30.00, tba=0, tbv=0 +28:[2014-08-06 10:09:34.579][trace][22314][107] ignored. set buffer length to 800 +39:[2014-08-06 10:09:34.779][trace][22314][107] update source_id=108[108] +54:[2014-08-06 10:09:47.805][trace][22314][107] cleanup when unpublish +55:[2014-08-06 10:09:47.805][trace][22314][107] edge change from 101 to state 0 (init). +56:[2014-08-06 10:09:47.805][warn][22314][107][9] client disconnect peer. ret=1004 +``` + +The 107 is a client which trigger the edge to fetch stream from origin. Find 110: + +``` +[winlin@dev6 srs]$ grep --color -ina "\[110\]" objs/srs.log +40:[2014-08-06 10:09:36.609][trace][22314][110] RTMP client ip=192.168.1.179 +41:[2014-08-06 10:09:36.613][trace][22314][110] complex handshake success +42:[2014-08-06 10:09:36.613][trace][22314][110] connect app, tcUrl=rtmp://dev:1935/live, pageUrl=http://www.ossrs.net/players/srs_player.html?vhost=dev&stream=livestream&server=dev&port=1935, swfUrl=http://www.ossrs.net/players/srs_player/release/srs_player.swf?_version=1.23, schema=rtmp, vhost=__defaultVhost__, port=1935, app=live, args=null +43:[2014-08-06 10:09:36.835][trace][22314][110] ignored. set buffer length to 800 +44:[2014-08-06 10:09:36.853][trace][22314][110] client identified, type=Play, stream_name=livestream, duration=-1.00 +45:[2014-08-06 10:09:36.853][trace][22314][110] out chunk size to 60000 +46:[2014-08-06 10:09:36.853][trace][22314][110] source url=__defaultVhost__/live/livestream, ip=192.168.1.179, cache=1, is_edge=1, source_id=108[108] +47:[2014-08-06 10:09:36.853][trace][22314][110] dispatch cached gop success. count=95, duration=1573 +48:[2014-08-06 10:09:36.853][trace][22314][110] create consumer, queue_size=30.00, tba=44100, tbv=25 +49:[2014-08-06 10:09:36.853][trace][22314][110] ignored. set buffer length to 800 +51:[2014-08-06 10:09:45.919][trace][22314][110] -> PLA time=8759, msgs=21, okbps=461,0,0, ikbps=3,0,0 +52:[2014-08-06 10:09:46.247][warn][22314][110][104] client disconnect peer. ret=1004 +``` + +The 110 is a flash player client. + +### System info. + +The system info and port listen at: + +```bash +[winlin@dev6 srs]$ ./objs/srs -c console.conf +[winlin@dev6 srs]$ cat objs/srs.log +[2014-04-04 11:39:24.176][trace][0][0] config parsed EOF +[2014-04-04 11:39:24.176][trace][0][0] log file is ./objs/srs.log +[2014-04-04 11:39:24.177][trace][0][0] srs 0.9.46 +[2014-04-04 11:39:24.177][trace][0][0] uname: Linux dev6 2.6.32-71.el6.x86_64 +#1 SMP Fri May 20 03:51:51 BST 2011 x86_64 x86_64 x86_64 GNU/Linux +[2014-04-04 11:39:24.177][trace][0][0] build: 2014-04-03 18:38:23, little-endian +[2014-04-04 11:39:24.177][trace][0][0] configure: --dev --with-hls --with-nginx +--with-ssl --with-ffmpeg --with-http-callback --with-http-server --with-http-api +--with-librtmp --with-bwtc --with-research --with-utest --without-gperf --without-gmc +--without-gmp --without-gcp --without-gprof --without-arm-ubuntu12 --jobs=1 +--prefix=/usr/local/srs +[2014-04-04 11:39:24.177][trace][0][0] write pid=4021 to ./objs/srs.pid success! +[2014-04-04 11:39:24.177][trace][100][16] server started, listen at port=1935, type=0, fd=6 +[2014-04-04 11:39:24.177][trace][100][16] server started, listen at port=1985, type=1, fd=7 +[2014-04-04 11:39:24.177][trace][100][16] server started, listen at port=8080, type=2, fd=8 +[2014-04-04 11:39:24.177][trace][101][16] listen cycle start, port=1935, type=0, fd=6 +[2014-04-04 11:39:24.177][trace][102][11] listen cycle start, port=1985, type=1, fd=7 +[2014-04-04 11:39:24.177][trace][103][11] listen cycle start, port=8080, type=2, fd=8 +[2014-04-04 11:39:26.799][trace][0][11] get a signal, signo=2 +[2014-04-04 11:39:26.799][trace][0][11] user terminate program +``` + +It means: +* The log file path:[2014-04-04 11:39:24.176][trace][0][0] log file is ./objs/srs.log +* SRS version:[2014-04-04 11:39:24.177][trace][0][0] srs 0.9.46 +* Compile info:[2014-04-04 11:39:24.177][trace][0][0] uname: Linux dev6 2.6.32-71.el6.x86_64 +#1 SMP Fri May 20 03:51:51 BST 2011 x86_64 x86_64 x86_64 GNU/Linux +* Compile date:[2014-04-04 11:39:24.177][trace][0][0] build: 2014-04-03 18:38:23, little-endian +* Build options:[2014-04-04 11:39:24.177][trace][0][0] configure: --dev --with-hls --with-nginx +--with-ssl --with-ffmpeg --with-http-callback --with-http-server --with-http-api --with-librtmp +--with-bwtc --with-research --with-utest --without-gperf --without-gmc --without-gmp +--without-gcp --without-gprof --without-arm-ubuntu12 --jobs=1 --prefix=/usr/local/srs +* PID file:[2014-04-04 11:39:24.177][trace][0][0] write pid=4021 to ./objs/srs.pid success! +* Listen at port 1935(RTMP):[2014-04-04 11:39:24.177][trace][100][16] server started, listen at port=1935, type=0, fd=6 +* Listen at port 1985(HTTP接口):[2014-04-04 11:39:24.177][trace][100][16] server started, listen at port=1985, type=1, fd=7 +* Listen at port 8080(HTTP服务):[2014-04-04 11:39:24.177][trace][100][16] server started, listen at port=8080, type=2, fd=8 +* Ready for connections:[2014-04-04 11:39:24.177][trace][101][16] listen cycle start, port=1935, type=0, fd=6 + +### Session oriented log + +SRS provides session oriented log. + +For example, SRS running for 365 days, served 10000000 clients, how to find a specified client log? + +We need something to grep, for instance, we know the stream url: `rtmp://192.168.1.107:1935/live/livestream`, then we can find the keyword to grep by research the publish log: + +```bash +[2014-04-04 11:56:06.074][trace][104][11] rtmp get peer ip success. ip=192.168.1.179, +send_to=30000000us, recv_to=30000000us +[2014-04-04 11:56:06.080][trace][104][11] srand initialized the random. +[2014-04-04 11:56:06.082][trace][104][11] simple handshake with client success. +[2014-04-04 11:56:06.083][trace][104][11] rtmp connect app success. +tcUrl=rtmp://192.168.1.107:1935/live, pageUrl=, swfUrl=rtmp://192.168.1.107:1935/live, +schema=rtmp, vhost=__defaultVhost__, port=1935, app=live +[2014-04-04 11:56:06.288][trace][104][11] set ack window size to 2500000 +[2014-04-04 11:56:06.288][trace][104][11] identify ignore messages except AMF0/AMF3 +command message. type=0x5 +[2014-04-04 11:56:06.288][trace][104][11] identify client success. +type=publish(FMLEPublish), stream_name=livestream +``` + +The keyword to grep: +* Use keyword `identify client success`, then `type=publish`, then `livestream`. +* Or, use keyword `identify client success. type=publish`, then `livestream`. +* We can grep all `identify client success. type=publish`, and research the result. + +For example: + +```bash +[winlin@dev6 srs]$ cat objs/srs.log|grep -ina "identify client success. type=publish" +20:[2014-04-04 11:56:06.288][trace][104][11] identify client success. type=publish, stream_name=livestream +43:[2014-04-04 11:56:18.138][trace][105][11] identify client success. type=publish, stream_name=winlin +65:[2014-04-04 11:56:29.531][trace][106][11] identify client success. type=publish, stream_name=livestream +86:[2014-04-04 11:56:35.966][trace][107][11] identify client success. type=publish, stream_name=livestream +``` + +There are some publish stream, and we can grep specified streamname. + +```bash +[winlin@dev6 srs]$ cat objs/srs.log|grep -ina "identify client success. type=publish"|grep -a "livestream" +20:[2014-04-04 11:56:06.288][trace][104][11] identify client success. type=publish, stream_name=livestream +65:[2014-04-04 11:56:29.531][trace][106][11] identify client success. type=publish, stream_name=livestream +86:[2014-04-04 11:56:35.966][trace][107][11] identify client success. type=publish, stream_name=livestream +``` + +We can filter the result by time, for example, we use session id 104 to grep by keyword `\[104\]\[`: +```bash +[winlin@dev6 srs]$ cat objs/srs.log |grep -ina "\[104\]\[" +14:[2014-04-04 11:56:06.074][trace][104][11] rtmp get peer ip success. ip=192.168.1.179, +send_to=30000000us, recv_to=30000000us +15:[2014-04-04 11:56:06.080][trace][104][11] srand initialized the random. +16:[2014-04-04 11:56:06.082][trace][104][11] simple handshake with client success. +17:[2014-04-04 11:56:06.083][trace][104][11] rtmp connect app success. +tcUrl=rtmp://192.168.1.107:1935/live, pageUrl=, swfUrl=rtmp://192.168.1.107:1935/live, +schema=rtmp, vhost=__defaultVhost__, port=1935, app=live +18:[2014-04-04 11:56:06.288][trace][104][11] set ack window size to 2500000 +19:[2014-04-04 11:56:06.288][trace][104][11] identify ignore messages except AMF0/AMF3 +command message. type=0x5 +20:[2014-04-04 11:56:06.288][trace][104][11] identify client success. +type=publish(FMLEPublish), stream_name=livestream +21:[2014-04-04 11:56:06.288][trace][104][11] set output chunk size to 60000 +22:[2014-04-04 11:56:06.288][trace][104][11] set chunk_size=60000 success +23:[2014-04-04 11:56:07.397][trace][104][11] <- time=225273, obytes=4168, ibytes=7607, okbps=32, ikbps=59 +24:[2014-04-04 11:56:07.398][trace][104][11] dispatch metadata success. +25:[2014-04-04 11:56:07.398][trace][104][11] process onMetaData message success. +26:[2014-04-04 11:56:07.398][trace][104][11] update video sequence header success. size=67 +27:[2014-04-04 11:56:08.704][trace][104][11] <- time=226471, obytes=4168, ibytes=36842, okbps=13, ikbps=116 +28:[2014-04-04 11:56:09.901][trace][104][11] <- time=227671, obytes=4168, ibytes=67166, okbps=9, ikbps=152 +29:[2014-04-04 11:56:11.102][trace][104][11] <- time=228869, obytes=4168, ibytes=97481, okbps=6, ikbps=155 +30:[2014-04-04 11:56:11.219][trace][104][11] clear cache/metadata/sequence-headers when unpublish. +31:[2014-04-04 11:56:11.219][trace][104][11] control message(unpublish) accept, retry stream service. +32:[2014-04-04 11:56:11.219][trace][104][11] ignore AMF0/AMF3 command message. +33:[2014-04-04 11:56:11.419][trace][104][11] drop the AMF0/AMF3 command message, command_name=deleteStream +34:[2014-04-04 11:56:11.420][trace][104][11] ignore AMF0/AMF3 command message. +35:[2014-04-04 11:56:12.620][error][104][104] recv client message failed. ret=207(Connection reset by peer) +36:[2014-04-04 11:56:12.620][error][104][104] identify client failed. ret=207(Connection reset by peer) +37:[2014-04-04 11:56:12.620][warn][104][104] client disconnect peer. ret=204 +[winlin@dev6 srs]$ +``` + +Then we got the log for this session, and client closed connection by log: `36:[2014-04-04 11:56:12.620][error][104][104] identify client failed. ret=207(Connection reset by peer)`. + +## Daemon + +When default SRS only print less log? Because SRS default use `conf/srs.conf` in daemon mode and print to log file. + +When enable daemon, then no need to start by nohup: + +```bash +# whether start as deamon +# default: on +daemon on; +``` + +Use `conf/console.conf` to not start in daemon and log to conosle. + +```bash +# no-daemon and write log to console config for srs. +# @see full.conf for detail config. + +listen 1935; +daemon off; +srs_log_tank console; +vhost __defaultVhost__ { +} +``` + +Startup command: + +```bash +./objs/srs -c conf/console.conf +``` + +To startup with default config `conf/srs.conf`: + +```bash +[winlin@dev6 srs]$ ./objs/srs -c conf/srs.conf +[2014-04-14 12:12:57.775][trace][0][0] config parse complete +[2014-04-14 12:12:57.775][trace][0][0] write log to file ./objs/srs.log +[2014-04-14 12:12:57.775][trace][0][0] you can: tailf ./objs/srs.log +[2014-04-14 12:12:57.775][trace][0][0] @see https://ossrs.io/lts/en-us/docs/v4/doc/log +``` + +Winlin 2014.10 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/log) + + diff --git a/versioned_docs/version-6.0/doc/low-latency.md b/versioned_docs/version-6.0/doc/low-latency.md new file mode 100644 index 00000000..6c5a0582 --- /dev/null +++ b/versioned_docs/version-6.0/doc/low-latency.md @@ -0,0 +1,222 @@ +--- +title: Low Latency +sidebar_label: Low Latency +hide_title: false +hide_table_of_contents: false +--- + +# Low Latency Live Stream + +The RTMP and HLS can cover all requires for internet live stream, +read [DeliveryHLS](./hls.md), +while RTMP is designed for low latency live stream. + +The deploy for low latency, read [Usage: Realtime](./sample-realtime.md) + +## Use Scenario + +The low latency use scenario: +* Live show. +* Video meeting. +* Other, for example, monitor, education. + +## Latency + +RTMP is design for low latency: +* Adobe flash player is good at play RTMP stream. +* RTMP is stable enough for longtime publish and play on PC. +* Low latency, about 0.8-3s. +* For RTMP is base on TCP, the latency maybe very large for network issue. + +## HLS LowLatency + +HLS has a bigger delay than RTMP, usually more than 5 seconds. If not set up properly, it can be over 15 seconds. + +If you want to reduce the HLS delay, please check out [HLS LowLatency](./hls.md#hls-low-latency). + +## Benchmark + +We use the clock of mobile phone to test the latency, +read [RTMP latency benchmark](http://blog.csdn.net/win_lin/article/details/12615591) + +When netowork is ok: +* RTMP can ensure 0.8-3s latency. +* The RTMP cluster add 0.3s latency for each level. +* The latency of nginx-rtmp is larger than SRS, maybe the cache or multiple process issue. +* The gop cache always make the latency larger, but SRS can disable the gop cache. +* The bufferTime of flash client should set to small, see NetStream.bufferTime. + +## Min-Latency + +When min-latency is enabled, SRS will diable the mr(merged-read) and use timeout cond wait, to send about 1-2 video packets when got it. + +We can got 0.1s latency for vp6 video only stream, read [#257](https://github.com/ossrs/srs/issues/257#issuecomment-66773208). The config: + +``` +vhost mrw.srs.com { + # whether enable min delay mode for vhost. + # for min latence mode: + # 1. disable the publish.mr for vhost. + # 2. use timeout for cond wait for consumer queue. + # @see https://github.com/ossrs/srs/issues/257 + # default: off + min_latency off; +} +``` + +For example to deploy realtime stream, read [wiki]([EN](./sample-realtime.md), [CN](./sample-realtime.md)). + +## Merged-Read + +The perfromance of RTMP read is very low, because we must read 1byte chunk type, then chunk header, finally payload. So SRS 1.0 only supports 1000 publisher, and 2700 player. SRS 2.0 supports 4500 publisher, and 10000 player. + +To improve the read performance, SRS2.0 introduced the merged-read, which read Nms packets from socket then parsed in buffer. The config: + +``` +# the MR(merged-read) setting for publisher. +vhost mrw.srs.com { + # the config for FMLE/Flash publisher, which push RTMP to SRS. + publish { + # about MR, read https://github.com/ossrs/srs/issues/241 + # when enabled the mr, SRS will read as large as possible. + # default: off + mr off; + # the latency in ms for MR(merged-read), + # the performance+ when latency+, and memory+, + # memory(buffer) = latency * kbps / 8 + # for example, latency=500ms, kbps=3000kbps, each publish connection will consume + # memory = 500 * 3000 / 8 = 187500B = 183KB + # when there are 2500 publisher, the total memory of SRS atleast: + # 183KB * 2500 = 446MB + # the value recomment is [300, 2000] + # default: 350 + mr_latency 350; + } +} +``` + +That is, when merged-read enabled, the read buffer of SRS is `latency` ms, the latency also increase to this value. + +For low latency, user should disable merged-read, SRS will recv and parse the packet immediately. + +## Merged-Write + +SRS always use merged-write to send packets. This algorithm can improve about 500% performance, for example, SRS 1.0 writev a packet which supports 2700 clients, while SRS 2.0 writev multiple packets and supports 10000 clients. + +User can config the merged write pacets in ms, recomment to use default value: + +``` +# the MW(merged-write) settings for player. +vhost mrw.srs.com { + # for play client, both RTMP and other stream clients, + # for instance, the HTTP FLV stream clients. + play { + # set the MW(merged-write) latency in ms. + # SRS always set mw on, so we just set the latency value. + # the latency of stream >= mw_latency + mr_latency + # the value recomment is [300, 1800] + # default: 350 + mw_latency 350; + } +} +``` + +User can config this to 100ms for very low latency. + +## GOP-Cache + +The gop is the gop between two I frame. + +SRS use gop-cache to cache the last gop for the live stream, +when client play stream, SRS can send the last gop to client +to enable the client to start play immediately. + +Config of srs: + +```bash +# the listen ports, split by space. +listen 1935; +vhost __defaultVhost__ { + # for play client, both RTMP and other stream clients, + # for instance, the HTTP FLV stream clients. + play { + # whether cache the last gop. + # if on, cache the last gop and dispatch to client, + # to enabled fast startup for client, client play immediately. + # if off, send the latest media data to client, + # client need to wait for the next Iframe to decode and show the video. + # set to off if requires min delay; + # set to on if requires client fast startup. + # default: on + gop_cache off; + } +} +``` + +Read about the min.delay.com in `conf/full.conf`. + +## Low Latency config + +Recoment to use the bellow config for low latency application: + +```bash +# the listen ports, split by space. +listen 1935; +vhost __defaultVhost__ { + tcp_nodelay on; + min_latency on; + + play { + gop_cache off; + queue_length 10; + mw_latency 100; + } + + publish { + mr off; + } +} +``` + +## Benchmark Data + +SRS: 0.9.55 + +Encoder: FMLE, video(h264, profile=baseline, level=3.1, keyframe-frequency=5seconds), fps=15, input=640x480, +output(500kbps, 640x480), no audio output. + +Network: Publish to aliyun qindao server. + +SRS config: + +```bash +listen 1935; +vhost __defaultVhost__ { + enabled on; + play { + gop_cache off; + } + hls { + enabled on; + hls_path ./objs/nginx/html; + hls_fragment 5; + hls_window 20; + } +} +``` + +Latency: RTMP 2s, HLS 24s. + +Read: ![RTMP-HLS-latency](/img/doc-main-concepts-low-latency-001.png) + +## Edge Benchmark Data + +SRS RTMP cluster almost not add more latency. + +Read ![Edge-latency](/img/doc-main-concepts-low-latency-002.png) + +Winlin 2015.8 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/low-latency) + + diff --git a/versioned_docs/version-6.0/doc/nginx-exec.md b/versioned_docs/version-6.0/doc/nginx-exec.md new file mode 100644 index 00000000..2467a138 --- /dev/null +++ b/versioned_docs/version-6.0/doc/nginx-exec.md @@ -0,0 +1,55 @@ +--- +title: Nginx RTMP EXEC +sidebar_label: Nginx RTMP EXEC +hide_title: false +hide_table_of_contents: false +--- + +# Exec + +## NGINX RTMP EXEC + +SRS only support some exec introduced by NGINX RTMP: + +1. exec/exec_publish: Support. +1. exec_pull: Not support. +1. exec_play: Not support. +1. exec_record_done: Not support. + +> Note: You could use [HTTP Callback](./http-callback.md) to start FFmpeg on your backend server. It's much better solution. + +## Config + +The config for SRS EXEC list bellow, you can refer to `conf/exec.conf`. + +``` +vhost __defaultVhost__ { + # the exec used to fork process when got some event. + exec { + # whether enable the exec. + # default: off. + enabled off; + # when publish stream, exec the process with variables: + # [vhost] the input stream vhost. + # [port] the intput stream port. + # [app] the input stream app. + # [stream] the input stream name. + # [engine] the tanscode engine name. + # other variables for exec only: + # [url] the rtmp url which trigger the publish. + # [tcUrl] the client request tcUrl. + # [swfUrl] the client request swfUrl. + # [pageUrl] the client request pageUrl. + # @remark empty to ignore this exec. + publish ./objs/ffmpeg/bin/ffmpeg -f flv -i [url] -c copy -y ./[stream].flv; + } +} +``` + +Winlin 2015.8 + +[ne]: https://github.com/arut/nginx-rtmp-module/wiki/Directives#exec + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/nginx-exec) + + diff --git a/versioned_docs/version-6.0/doc/nginx-for-hls.md b/versioned_docs/version-6.0/doc/nginx-for-hls.md new file mode 100644 index 00000000..85395b9c --- /dev/null +++ b/versioned_docs/version-6.0/doc/nginx-for-hls.md @@ -0,0 +1,272 @@ +--- +title: HLS Cluster +sidebar_label: HLS Cluster +hide_title: false +hide_table_of_contents: false +--- + +# Nginx for HLS + +Edge Cluster is designed to solve the problem of many people watching, and it can support a large number of people watching live streams. Please note: + +* SRS Edge only supports live streaming protocols, such as RTMP or HTTP-FLV, etc. Refer to [RTMP Edge Cluster](./sample-rtmp-cluster.md). +* SRS Edge does not support sliced live streams like HLS or DASH. Essentially, they are not streams but file distribution. +* SRS Edge does not support WebRTC stream distribution, as this is not the design goal of Edge. WebRTC has its own clustering method, refer to [#2091](https://github.com/ossrs/srs/issues/2091). + +This article describes the edge cluster for HLS or DASH slices, which is based on NGINX implementation, so it is also called NGINX Edge Cluster. + +## Oryx + +The NGINX edge cluster can work together with the Oryx to achieve HLS distribution. For more details, please refer to [Oryx HLS CDN](https://github.com/ossrs/oryx/tree/main/scripts/nginx-hls-cdn). + +## NGINX Edge Cluster + +The NGINX edge cluster is essentially a reverse proxy with caching, also known as NGINX Proxy with Cache. + +```text ++------------+ +------------+ +------------+ +------------+ ++ FFmpeg/OBS +--RTMP-->-+ SRS Origin +--HLS-->--+ NGINX +--HLS-->--+ Visitors + ++------------+ +------------+ + Servers + +------------+ + +------------+ +``` + +You only need to configure the caching strategy of NGINX, no additional plugins are needed, as NGINX itself supports it. + +```bash +http { + # For Proxy Cache. + proxy_cache_path /tmp/nginx-cache levels=1:2 keys_zone=srs_cache:8m max_size=1000m inactive=600m; + proxy_temp_path /tmp/nginx-cache/tmp; + + server { + listen 8081; + # For Proxy Cache. + proxy_cache_valid 404 10s; + proxy_cache_lock on; + proxy_cache_lock_age 300s; + proxy_cache_lock_timeout 300s; + proxy_cache_min_uses 1; + + location ~ /.+/.*\.(m3u8)$ { + proxy_pass http://127.0.0.1:8080$request_uri; + # For Proxy Cache. + proxy_cache srs_cache; + proxy_cache_key $scheme$proxy_host$uri$args; + proxy_cache_valid 200 302 10s; + } + location ~ /.+/.*\.(ts)$ { + proxy_pass http://127.0.0.1:8080$request_uri; + # For Proxy Cache. + proxy_cache srs_cache; + proxy_cache_key $scheme$proxy_host$uri; + proxy_cache_valid 200 302 60m; + } + } +} +``` + +> Note: You can configure the cache directory `proxy_cache_path` and `proxy_temp_path` to be accessible directories. + +> Note: Generally, do not modify the `location` configuration unless you know what it means. If you want to change it, make sure it runs first before making changes. + +You must not configure it as a pure Proxy, as this will pass the load through to SRS, and the number of clients the system supports will still be limited by SRS. + +After enabling Cache, no matter how much load NGINX has, SRS will only have one stream. In this way, we can expand multiple NGINX to support a large number of concurrent viewers. + +For example, a 1Mbps HLS stream, with 1000 clients playing on NGINX, the bandwidth of NGINX would be 1Gbps, while SRS would only have 1Mbps. + +If we expand to 10 NGINX, each with 10Gbps bandwidth, the total system bandwidth would be 100Gbps, capable of supporting 100,000 concurrent viewers, with SRS bandwidth consumption only at 10Mbps. + +How to verify that the system is working properly? This is where Benchmark comes in. + +## Benchmark + +How to stress test this system? You can use [srs-bench](https://github.com/ossrs/srs-bench#usage), which is very convenient to use and can be started directly with Docker: + +```bash +docker run --rm -it --network=host --name sb ossrs/srs:sb \ + ./objs/sb_hls_load -c 500 \ + -r http://your_server_public_ipv4/live/livestream.m3u8 +``` + +And you can also stress test RTMP and HTTP-FLV: + +```bash +docker run --rm -it --network=host --name sb ossrs/srs:sb \ + ./objs/sb_http_load -c 500 \ + -r http://your_server_public_ipv4/live/livestream.flv +``` + +> Note: Each SB simulated client concurrency is between 500 and 1000, depending on the CPU not exceeding 80%. You can start multiple processes for stress testing. + +Now let's get our hands on creating an HLS cluster. + +## Example + +Now let's use Docker to build an HLS distribution cluster. + +First, start the SRS origin server: + +```bash +./objs/srs -c conf/hls.origin.conf +``` + +Then, start the NGINX origin server: + +```bash +nginx -c $(pwd)/conf/hls.edge.conf +``` + +Finally, push the stream to the origin server: + +```bash +ffmpeg -re -i doc/source.flv -c copy \ + -f flv rtmp://127.0.0.1/live/livestream +``` + +Play HLS: + +* SRS origin server: http://127.0.0.1:8080/live/livestream.m3u8 +* NGINX edge: http://127.0.0.1:8081/live/livestream.m3u8 + +Start the stress test and get HLS from NGINX: + +```bash +docker run --rm -it --network=host --name sb ossrs/srs:sb \ + ./objs/sb_hls_load -c 500 \ + -r http://192.168.0.14:8081/live/livestream.m3u8 +``` + +However, the pressure on SRS is not significant, and the CPU consumption is all on NGINX. + +The NGINX edge cluster successfully solved the HLS distribution problem. If you also need to do low-latency live streaming and distribute HTTP-FLV, how to do it? What if you want to support HTTPS HLS or HTTPS-FLV? + +NGINX has no problem at all. Now let's see how to work with the SRS Edge Server to implement HTTP-FLV and HLS distribution through NGINX. + +## Work with SRS Edge Server + +The NGINX edge cluster can also work with the SRS Edge Server to achieve HLS and HTTP-FLV distribution. + +```text ++------------+ +------------+ +| SRS Origin +--RTMP-->--+ SRS Edge + ++-----+------+ +----+-------+ + | | +------------+ + | +---HTTP-FLV->--+ NGINX + +-----------+ + | + Edge +--HLS/FLV-->--+ Visitors + + +-------HLS--->-------------------------+ Servers + +-----------+ + +------------+ +``` + +It's very simple to implement. All you need to do is deploy an SRS on the NGINX server and have NGINX work in reverse proxy mode. + +```bash +# For SRS streaming, for example: +# http://r.ossrs.net/live/livestream.flv +location ~ /.+/.*\.(flv)$ { + proxy_pass http://127.0.0.1:8080$request_uri; +} +``` + +In this way, HLS is managed by NGINX for caching and back-to-source, while FLV is cached and back-to-source by SRS Edge. + +Although this architecture is good, in fact, NGINX can directly serve as an HLS origin server, which can provide even higher performance. Is it possible? No problem at all. Let's see how to use NGINX to distribute HLS completely. + +## NGINX Origin Server + +Since HLS is just a regular file, it can also be directly used with NGINX as an HLS origin server. + +In a super high-concurrency NGINX Edge cluster, a small data center-level cluster can also be formed, with centralized back-to-source from a specific NGINX, which can support even higher concurrency. + +Using NGINX to distribute HLS files is actually very simple, you only need to set the root: + +```bash + # For HLS delivery + location ~ /.+/.*\.(m3u8)$ { + root /usr/local/srs/objs/nginx/html; + add_header Cache-Control "public, max-age=10"; + } + location ~ /.+/.*\.(ts)$ { + root /usr/local/srs/objs/nginx/html; + add_header Cache-Control "public, max-age=86400"; + } +``` + +> Note: Here we set the cache time for m3u8 to 10 seconds, which needs to be adjusted according to the size of the segment. + +> Note: Since SRS currently supports HLS variant and implements HLS playback statistics, it is not as efficient as NGINX. See [#2995](https://github.com/ossrs/srs/issues/2995) + +> Note: SRS should set `Cache-Control` because the segment service can dynamically set the correct cache time to reduce latency. See [#2991](https://github.com/ossrs/srs/issues/2991) + +## Debugging + +How to determine if the cache is effective? You can add a field `upstream_cache_status` in the NGINX log and analyze the NGINX log to determine if the cache is effective: + +```bash +log_format main '$upstream_cache_status $remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; +access_log /var/log/nginx/access.log main; +``` + +The first field is the cache status, which can be analyzed using the following command, for example, to only view the cache status of TS files: + +```bash +cat /var/log/nginx/access.log | grep '.ts HTTP' \ + | awk '{print $1}' | sort | uniq -c | sort -r +``` + +You can see which ones are HIT cache, so you don't need to download files from SRS, but directly get files from NGINX. + +You can also directly add this field to the response header, so you can see in the browser whether each request has HIT: + +```bash +add_header X-Cache-Status $upstream_cache_status; +``` + +> Note: Regarding the cache effective time, refer to the definition of the field [proxy_cache_valid](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_valid), in fact, if the source station specifies `Cache-Control`, it will override this configuration. + +## aaPanel Configuration + +If you are using aaPanel, you can add a new site, and then write the following configuration in the site's configuration: + +```bash + # For Proxy Cache. + proxy_cache_path /tmp/nginx-cache levels=1:2 keys_zone=srs_cache:8m max_size=1000m inactive=600m; + proxy_temp_path /tmp/nginx-cache/tmp; + + server { + listen 80; + server_name your.domain.com; + + # For Proxy Cache. + proxy_cache_valid 404 10s; + proxy_cache_lock on; + proxy_cache_lock_age 300s; + proxy_cache_lock_timeout 300s; + proxy_cache_min_uses 1; + + location ~ /.+/.*\.(m3u8)$ { + proxy_pass http://127.0.0.1:8080$request_uri; + # For Proxy Cache. + proxy_cache srs_cache; + proxy_cache_key $scheme$proxy_host$uri$args; + proxy_cache_valid 200 302 10s; + } + location ~ /.+/.*\.(ts)$ { + proxy_pass http://127.0.0.1:8080$request_uri; + # For Proxy Cache. + proxy_cache srs_cache; + proxy_cache_key $scheme$proxy_host$uri; + proxy_cache_valid 200 302 60m; + } + } +``` + +Translation to English: + +> Note: Generally, when adding a new site in aaPanel, it listens to port 80, and the domain server_name is the domain name you fill in yourself. Other configurations are the same as the aaPanel settings. Alternatively, you can also add the above cache and location configurations to the site settings in aaPanel. + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/nginx-for-hls) + + diff --git a/versioned_docs/version-6.0/doc/origin-cluster.md b/versioned_docs/version-6.0/doc/origin-cluster.md new file mode 100644 index 00000000..1ad2beb3 --- /dev/null +++ b/versioned_docs/version-6.0/doc/origin-cluster.md @@ -0,0 +1,66 @@ +--- +title: Origin Cluster +sidebar_label: Origin Cluster +hide_title: false +hide_table_of_contents: false +--- + +# OriginCluster + +## Design + +About the design of Origin Cluster, please read the [Issue#464](https://github.com/ossrs/srs/issues/464#issuecomment-306082751). +SRS Origin Cluster is designed for large amount of streams. + +![](/img/doc-advanced-guides-origin-cluster-001.png) + +> Remark: Origin cluster only supports RTMP, use Edge to transmux RTMP to FLV. + +## Config + +The config for origin cluster: + +``` +vhost __defaultVhost__ { + # The config for cluster. + cluster { + # The cluster mode, local or remote. + # local: It's an origin server, serve streams itself. + # remote: It's an edge server, fetch or push stream to origin server. + # default: local + mode local; + + # For origin(mode local) cluster, turn on the cluster. + # @remark Origin cluster only supports RTMP, use Edge to transmux RTMP to FLV. + # default: off + # TODO: FIXME: Support reload. + origin_cluster on; + + # For origin (mode local) cluster, the co-worker's HTTP APIs. + # This origin will connect to co-workers and communicate with them. + # please read: https://ossrs.io/lts/en-us/docs/v4/doc/origin-cluster + # TODO: FIXME: Support reload. + coworkers 127.0.0.1:9091 127.0.0.1:9092; + } +} +``` + +* mode: The mode of cluster, it should be local for origin cluster. +* origin_cluster: Whether enable origin cluster. +* coworkers: The HTTP APIs of other origin servers in the cluster. + +> Remark: Say, a client, a player or edge server, starts to play a stream from a origin server. The origin server would query the coworkers and redirect client by RTMP302 when it doesn't serve the stream. If no origin is found, it responses error. The HTTP API response message includes fields for whether owns the stream, and stream information. + +> Remark: Note in particular that server response error when the requested stream hasn't been publish to origin server. For independent origin server, server responses success and waits for stream to be published. While when origin in origin cluster, as the stream might not be published to it, it should responses error and shouldn't wait for the stream. + +## Usage + +To use origin cluster, please read [#464](https://github.com/ossrs/srs/issues/464#issuecomment-366169487). + +We also recommend to use a edge server please read [here](https://github.com/ossrs/srs/issues/464#issuecomment-366169962). The edge server can transmux RTMP to HTTP-FLV, supports fault-tolerance. + +2018.02 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/origin-cluster) + + diff --git a/versioned_docs/version-6.0/doc/perf.md b/versioned_docs/version-6.0/doc/perf.md new file mode 100644 index 00000000..4eacd50c --- /dev/null +++ b/versioned_docs/version-6.0/doc/perf.md @@ -0,0 +1,14 @@ +--- +title: Perf Analysis +sidebar_label: Perf Analysis +hide_title: false +hide_table_of_contents: false +--- + +# Perf + +Please read [SRS性能(CPU)、内存优化工具用法](https://www.jianshu.com/p/6d4a89359352) + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/perf) + + diff --git a/versioned_docs/version-6.0/doc/performance.md b/versioned_docs/version-6.0/doc/performance.md new file mode 100644 index 00000000..30a7320a --- /dev/null +++ b/versioned_docs/version-6.0/doc/performance.md @@ -0,0 +1,710 @@ +--- +title: Performance +sidebar_label: Performance +hide_title: false +hide_table_of_contents: false +--- + +# Performance + +There is a set of tools for performance improvement and detecting memory leaking. + +> Note: All tools will hurts performance more or less, so never enable these tools unless you need to fix memory issue. + +## RTC + +RTC is delivering over UDP, so the first and most important configuration is for kernel network: + +```bash +# Query the kernel configuration +sysctl net.core.rmem_max +sysctl net.core.rmem_default +sysctl net.core.wmem_max +sysctl net.core.wmem_default + +# Set the UDP buffer to 16MB +sysctl net.core.rmem_max=16777216 +sysctl net.core.rmem_default=16777216 +sysctl net.core.wmem_max=16777216 +sysctl net.core.wmem_default=16777216 +``` + +> Note: For Docker, it read the configuration from host, so you only need to setup the host machine. + +> Note:If need to set these configurations in docker, you must run with `--network=host`. + +Or, you could also modify the file `/etc/sysctl.conf` to enalbe if when reboot: + +```bash +# vi /etc/sysctl.conf +# For RTC +net.core.rmem_max=16777216 +net.core.rmem_default=16777216 +net.core.wmem_max=16777216 +net.core.wmem_default=16777216 +``` + +Query the network statistics and UDP packets dropping: + +```bash +netstat -suna +netstat -suna && sleep 30 && netstat -suna +``` + +For Example: + +* `224911319 packets received` The total received UDP packets. +* `65731106 receive buffer errors` The total dropped UDP packets before receiving +* `123534411 packets sent` The total sent UDP packets. +* `0 send buffer errors` The total dropped UDP packets before sending. + +> Note: SRS also prints about the packets dropped in application level, for example `loss=(r:49,s:0)` which means dropped 49 packets before receiving. + +> Note:Please note that you must run the command in docker container, not on host machine. + +The length of UDP queue: + +```bash +netstat -lpun +``` + +For example: + +* `Recv-Q 427008` Established: The count of bytes not copied by the user program connected to this socket. +* `Send-Q 0` Established: The count of bytes not acknowledged by the remote host. + +Other useful parameters of netstat: + +* `--udp|-u` Filter by UDP protocol. +* `--numeric|-n` Show numerical addresses instead of trying to determine symbolic host, port or user names. +* `--statistics|-s` Show statistics. +* `--all|-a` Show both listening and non-listening sockets. With the --interfaces option, show interfaces that are not up. +* `--listening|-l` Show only listening sockets. (These are omitted by default.) +* `--program|-p` Show the PID and name of the program to which each socket belongs. + +## PERF + +PERF is Performance analysis tools for Linux. + +Show performance bottleneck of SRS: + +``` +perf top -p $(pidof srs) +``` + +To record the data: + +``` +perf record -p $(pidof srs) + +# Press CTRL+C after about 30s. + +perf report +``` + +Show stack or backtrace: + +``` +perf record -a --call-graph fp -p $(pidof srs) +perf report --call-graph --stdio +``` + +> Note: Record to file by `perf report --call-graph --stdio >t.txt`。 + +> Remark: The stack(`-g`) does not work for SRS(ST), because ST modifies the SP. + +## ASAN(Google Address Sanitizer) + +SRS5+ supports [ASAN](https://github.com/google/sanitizers/wiki/AddressSanitizer) by default. + +If you want to disable it, please check bellow configure options: + +```bash +./configure -h |grep asan + --sanitizer=on|off Whether build SRS with address sanitizer(asan). Default: on + --sanitizer-static=on|off Whether build SRS with static libasan(asan). Default: off + --sanitizer-log=on|off Whether hijack the log for libasan(asan). Default: off +``` + +Highly recommend to enable ASAN because it works great. + +## GPROF + +GPROF is a GNU tool, see [SRS GPROF](./gprof.md) and [GNU GPROF](http://www.cs.utah.edu/dept/old/texinfo/as/gprof.html). + +Usage: +``` +# Build SRS with GPROF +./configure --gprof=on && make + +# Start SRS with GPROF +./objs/srs -c conf/console.conf + +# Or CTRL+C to stop GPROF +killall -2 srs + +# To analysis result. +gprof -b ./objs/srs gmon.out +``` + +## GPERF + +GPERF is [google tcmalloc](https://github.com/gperftools/gperftools), please see [GPERF](./gperf.md)。 + +### GPERF: GCP + +GCP is for CPU performance analysis, see [GCP](https://gperftools.github.io/gperftools/cpuprofile.html). + +Usage: + +``` +# Build SRS with GCP +./configure --gperf=on --gcp=on && make + +# Start SRS with GCP +./objs/srs -c conf/console.conf + +# Or CTRL+C to stop GCP +killall -2 srs + +# To analysis cpu profile +./objs/pprof --text objs/srs gperf.srs.gcp* +``` + +> Note: For more details, please read [cpu-profiler](https://github.com/ossrs/srs/tree/4.0release/trunk/research/gperftools/cpu-profiler). + +Install tool for graph: + +```bash +yum install -y graphviz +``` + +Output svg graph to open by Chrome: + +```bash +./objs/pprof --svg ./objs/srs gperf.srs.gcp >t.svg +``` + +### GPERF: GMD + +GMD is for memory corrupt detecting, see [GMD](http://blog.csdn.net/win_lin/article/details/50461709). + +Usage: +``` +# Build SRS with GMD. +./configure --gperf=on --gmd=on && make + +# Start SRS with GMD. +env TCMALLOC_PAGE_FENCE=1 ./objs/srs -c conf/console.conf +``` + +> Note: For more details, please read [heap-defense](https://github.com/ossrs/srs/tree/4.0release/trunk/research/gperftools/heap-defense). + +> Note: Need link with `libtcmalloc_debug.a` and enable env `TCMALLOC_PAGE_FENCE`. + +### GPERF: GMC + +GMC is for memory leaking, see [GMC](https://gperftools.github.io/gperftools/heap_checker.html). + +Usage: + +``` +# Build SRS with GMC +./configure --gperf=on --gmc=on && make + +# Start SRS with GMC +env PPROF_PATH=./objs/pprof HEAPCHECK=normal ./objs/srs -c conf/console.conf 2>gmc.log + +# Or CTRL+C to stop gmc +killall -2 srs + +# To analysis memory leak +cat gmc.log +``` + +> Note: For more details, please read [heap-checker](https://github.com/ossrs/srs/tree/4.0release/trunk/research/gperftools/heap-checker). + +### GPERF: GMP + +GMD is for memory performance, see [GMP](https://gperftools.github.io/gperftools/heapprofile.html). + +Usage: +``` +# Build SRS with GMP +./configure --gperf=on --gmp=on && make + +# Start SRS with GMP +./objs/srs -c conf/console.conf + +# Or CTRL+C to stop gmp +killall -2 srs + +# To analysis memory profile +./objs/pprof --text objs/srs gperf.srs.gmp* +``` + +> Note: For more details, please read [heap-profiler](https://github.com/ossrs/srs/tree/4.0release/trunk/research/gperftools/heap-profiler). + +## VALGRIND + +SRS3+ also supports valgrind. + +``` +valgrind --leak-check=full ./objs/srs -c conf/console.conf +``` + +> Remark: For ST to support valgrind, see [state-threads](https://github.com/ossrs/state-threads#usage) and [ST#2](https://github.com/ossrs/state-threads/issues/2). + +## Syscall + +Please use [strace -c -p PID](https://man7.org/linux/man-pages/man1/strace.1.html) for syscal performance issue. + +## OSX + +For macOS, please use [Instruments](https://stackoverflow.com/questions/11445619/profiling-c-on-mac-os-x) + +``` +instruments -l 30000 -t Time\ Profiler -p 72030 +``` + +> Remark: You can also click `Sample` button in `Active Monitor`. + +## Multiple Process and Softirq + +You can run softirq(Kernel Network Transmission) on CPU0, so run SRS on other CPUs: + +```bash +taskset -p 0xfe $(pidof srs) +``` + +Or run SRS on CPU1: + +```bash +taskset -pc 1 $(pidof srs) +``` + +Then you can run `top` and press `1` to see each CPU statistics: + +```bash +top # Press 1 +#%Cpu0 : 1.8 us, 1.1 sy, 0.0 ni, 90.8 id, 0.0 wa, 0.0 hi, 6.2 si, 0.0 st +#%Cpu1 : 67.6 us, 17.6 sy, 0.0 ni, 14.9 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st +``` + +Or use `mpstat -P ALL` + +```bash +mpstat -P ALL +#01:23:14 PM CPU %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle +#01:23:14 PM all 33.33 0.00 8.61 0.04 0.00 3.00 0.00 0.00 0.00 55.02 +#01:23:14 PM 0 2.46 0.00 1.32 0.06 0.00 6.27 0.00 0.00 0.00 89.88 +#01:23:14 PM 1 61.65 0.00 15.29 0.02 0.00 0.00 0.00 0.00 0.00 23.03 +``` + +> Note: Use `cat /proc/softirqs` to check softirq type, please see [Introduction to deferred interrupts (Softirq, Tasklets and Workqueues)](https://0xax.gitbooks.io/linux-insides/content/Interrupts/linux-interrupts-9.html) + +> Note: If SRS run with softirq at CPU0, the total CPU will be larger than total of running on different CPUs. + +If you got more CPUs, you can run softirq to multiple CPUs: + +```bash +# grep virtio /proc/interrupts | grep -e in -e out + 29: 64580032 0 0 0 PCI-MSI-edge virtio0-input.0 + 30: 1 49 0 0 PCI-MSI-edge virtio0-output.0 + 31: 48663403 0 11845792 0 PCI-MSI-edge virtio0-input.1 + 32: 1 0 0 52 PCI-MSI-edge virtio0-output.1 + +# cat /proc/irq/29/smp_affinity +1 # Bind softirq of virtio0 incoming to CPU0. +# cat /proc/irq/30/smp_affinity +2 # Bind softirq of virtio0 outgoing to CPU1. +# cat /proc/irq/31/smp_affinity +4 # Bind softirq of virtio1 incoming to CPU2. +# cat /proc/irq/32/smp_affinity +8 # Bind softirq of virtio1 outgoing to CPU3. +``` + +To disable softirq balance and force to run on CPU0, see [Linux: scaling softirq among many CPU cores](http://natsys-lab.blogspot.com/2012/09/linux-scaling-softirq-among-many-cpu.html) +and [SMP IRQ affinity](https://www.kernel.org/doc/Documentation/IRQ-affinity.txt) by: + +```bash +for irq in $(grep virtio /proc/interrupts | grep -e in -e out | cut -d: -f1); do + echo 1 > /proc/irq/$irq/smp_affinity +done +``` + +> Note:Run `echo 3 > /proc/irq/$irq/smp_affinity` if bind to CPU0 and CPU1. + +Then run SRS on other CPUs except CPU0: + +```bash +taskset -a -p 0xfe $(cat objs/srs.pid) +``` + +You can improve about 20% performance by bind softirq to CPU0. + +You can also setup in the startup script. + +## Process Priority + +You can set SRS to run in higher priority: + +```bash +renice -n -15 -p $(pidof srs) +``` + +> Note: The value of nice is `-20` to `19` and default is `0`. + +To check the priority, which is the `NI` field of top: + +```bash +top -n1 -p $(pidof srs) +# PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND +# 1505 root 5 -15 519920 421556 4376 S 66.7 5.3 4:41.12 srs +``` + +## Performance Banchmark + +The performance benchmark for SRS, compare with nginx-rtmp single process. + +Provides detail benchmark steps. + +The latest data, read [performance](https://github.com/ossrs/srs/tree/develop#performance). + +### Hardware + +The client and server use lo net interface to test: + +* Hardware: VirtualBox on ThinkPad T430 +* OS: CentOS 6.0 x86_64 Linux 2.6.32-71.el6.x86_64 +* CPU: 3 Intel(R) Core(TM) i7-3520M CPU @ 2.90GHz +* Memory: 2007MB + +### OS + +Login as root, set the fd limits: + +* Set limit: `ulimit -HSn 10240` +* View the limit: + +```bash +[root@dev6 ~]# ulimit -n +10240 +``` + +* Restart SRS:`sudo /etc/init.d/srs restart` + +### NGINX-RTMP + +NGINX-RTMP version and build command. + +* NGINX: nginx-1.5.7.tar.gz +* NGINX-RTMP: nginx-rtmp-module-1.0.4.tar.gz +* Read [nginx-rtmp](http://download.csdn.net/download/winlinvip/6795467) +* Build: + +```bash +./configure --prefix=`pwd`/../_release \ +--add-module=`pwd`/../nginx-rtmp-module-1.0.4 \ +--with-http_ssl_module && make && make install +``` + +* Config nginx:`_release/conf/nginx.conf` + +```bash +user root; +worker_processes 1; +events { + worker_connections 10240; +} +rtmp{ + server{ + listen 19350; + application live{ + live on; + } + } +} +``` + +* The limit of fd: + +```bash +[root@dev6 nginx-rtmp]# ulimit -n +10240 +``` + +* Start: ``./_release/sbin/nginx`` +* Check nginx started: + +```bash +[root@dev6 nginx-rtmp]# netstat -anp|grep 19350 +tcp 0 0 0.0.0.0:19350 0.0.0.0:* LISTEN 6486/nginx +``` + +### SRS + +SRS version and build. + +* SRS: [SRS 0.9](https://github.com/ossrs/srs/releases/tag/0.9) +* Build: ``./configure && make`` +* Config SRS:`conf/srs.conf` + +```bash +listen 1935; +max_connections 10240; +vhost __defaultVhost__ { + gop_cache on; + forward 127.0.0.1:19350; +} +``` + +* Check limit fds: + +```bash +[root@dev6 trunk]# ulimit -n +10240 +``` + +* Start SRS: ``nohup ./objs/srs -c conf/srs.conf >/dev/null 2>&1 &`` +* Check SRS started: + +```bash +[root@dev6 trunk]# netstat -anp|grep "1935 " +tcp 0 0 0.0.0.0:1935 0.0.0.0:* LISTEN 6583/srs +``` + +### Publish and Play + +Use centos to publish RTMP: + +* Start FFMPEG: + +```bash +for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg \ + -re -i doc/source.flv \ + -acodec copy -vcodec copy \ + -f flv rtmp://127.0.0.1:1935/live/livestream; \ + sleep 1; +done +``` + +* SRS RTMP stream URL: `rtmp://192.168.2.101:1935/live/livestream` +* Nginx-RTMP stream URL: `rtmp://192.168.2.101:19350/live/livestream` + +### Client + +The RTMP load test tool, read [srs-bench](https://github.com/ossrs/srs-bench) + +The sb_rtmp_load used to test RTMP load, support 800-3k concurrency for each process. + +* Build: `./configure && make` +* Start: `./objs/sb_rtmp_load -c 800 -r ` + +### Record Data + +Record data before test: + +* Use top command: + +```bash +srs_pid=$(pidof srs); \ +nginx_pid=`ps aux|grep nginx|grep worker|awk '{print $2}'`; \ +load_pids=`ps aux|grep objs|grep sb_rtmp_load|awk '{ORS=",";print $2}'`; \ +top -p $load_pids$srs_pid,$nginx_pid +``` + +* The connections: + +```bash +srs_connections=`netstat -anp|grep srs|grep ESTABLISHED|wc -l`; \ +nginx_connections=`netstat -anp|grep nginx|grep ESTABLISHED|wc -l`; \ +echo "srs_connections: $srs_connections"; \ +echo "nginx_connections: $nginx_connections"; +``` + +* The bandwidth in NBps: + +```bash +[root@dev6 nginx-rtmp]# dstat -N lo 30 +----total-cpu-usage---- -dsk/total- -net/lo- ---paging-- ---system-- +usr sys idl wai hiq siq| read writ| recv send| in out | int csw + 0 0 96 0 0 3| 0 0 |1860B 58k| 0 0 |2996 465 + 0 1 96 0 0 3| 0 0 |1800B 56k| 0 0 |2989 463 + 0 0 97 0 0 2| 0 0 |1500B 46k| 0 0 |2979 461 +``` + +* The table + +| Server | CPU | Mem | Conn | ENbps | ANbps | sb | Lat | +| ------ | --- | ------- | ------ | ---------- | ---------- | ------ | -------- | +| SRS | 1.0% | 3MB | 3 | - | - | - | 0.8s | +| nginx-rtmp | 0.7% | 8MB | 2 | - | - | - | 0.8s | + +Memory(Mem): The memory usage in MB. + +Clients(Conn): The connections/clients to server. + +ExpectNbps(ENbps): The expect network bandwidth in Xbps. + +ActualNbps(ANBps): The actual network bandwidth in Xbps. + +srs-bench(srs-bench/sb): The mock benchmark client tool. + +Latency(Lat): The latency of client. + +### Benchmark SRS + +Let's start performance benchmark. + +* Start 500 clients + +```bash +./objs/sb_rtmp_load -c 500 -r rtmp://127.0.0.1:1935/live/livestream >/dev/null & +``` + +* The data: + +| Server | CPU | Mem | Conn | ENbps | ANbps | sb | Lat | +| ------ | --- | ------- | ------ | ---------- | ---------- | ------ | -------- | +| SRS | 9.0% | 8MB | 503 | 100Mbps | 112Mbps | 12.6% | 0.8s | + +* The data for 1000 clients: + +| Server | CPU | Mem | Conn | ENbps | ANbps | sb | Lat | +| ------ | --- | ------- | ------ | ---------- | ---------- | ------ | -------- | +| SRS | 23.6% | 13MB | 1003 | 200Mbps | 239Mbps | 16.6% | 0.8s | + +* The data for 1500 clients: + +| Server | CPU | Mem | Conn | ENbps | ANbps | sb | Lat | +| ------ | --- | ------- | ------ | ---------- | ---------- | ------ | -------- | +| SRS | 38.6% | 20MB | 1503 | 300Mbps | 360Mbps | 17% | 0.8s | + +* The data for 2000 clients: + +| Server | CPU | Mem | Conn | ENbps | ANbps | sb | Lat | +| ------ | --- | ------- | ------ | ---------- | ---------- | ------ | -------- | +| SRS | 65.2% | 34MB | 2003 | 400Mbps | 480Mbps | 22% | 0.8s | + +* The data for 2500 clients: + +| Server | CPU | Mem | Conn | ENbps | ANbps | sb | Lat | +| ------ | --- | ------- | ------ | ---------- | ---------- | ------ | -------- | +| SRS | 72.9% | 38MB | 2503 | 500Mbps | 613Mbps | 24% | 0.8s | + +### Benchmark NginxRTMP + +Let's start performance benchmark. + +* Start 500 clients: + +```bash +./objs/sb_rtmp_load -c 500 -r rtmp://127.0.0.1:19350/live/livestream >/dev/null & +``` +* The data for 500 clients: + +| Server | CPU | Mem | Conn | ENbps | ANbps | sb | Lat | +| ------ | --- | ------- | ------ | ---------- | ---------- | ------ | -------- | +| nginx-rtmp | 8.3% | 13MB | 502 | 100Mbps | 120Mbps | 16.3% | 0.8s | + +* The data for 1000 clients: + +| Server | CPU | Memory | Clients | ExpectNbps | ActualNbps | srs-bench | Latency| +| ------ | --- | ------- | ------ | ---------- | ---------- | ------ | -------- | +| nginx-rtmp | 27.3% | 19MB | 1002 | 200Mbps | 240Mbps | 30% | 0.8s | + +* The data for 1500 clients: + +| Server | CPU | Mem | Conn | ENbps | ANbps | sb | Lat | +| ------ | --- | ------- | ------ | ---------- | ---------- | ------ | -------- | +| nginx-rtmp | 42.3% | 25MB | 1502 | 300Mbps | 400Mbps | 31% | 0.8s | + +* The data for 2000 clients: + +| Server | CPU | Mem | Conn | ENbps | ANbps | sb | Lat | +| ------ | --- | ------- | ------ | ---------- | ---------- | ------ | -------- | +| nginx-rtmp | 48.9% | 31MB | 2002 | 400Mbps | 520Mbps | 33% | 0.8s | + +* The data for 2500 clients: + +| Server | CPU | Mem | Conn | ENbps | ANbps | sb | Lat | +| ------ | --- | ------- | ------ | ---------- | ---------- | ------ | -------- | +| nginx-rtmp | 74.2% | 37MB | 2502 | 500Mbps | 580Mbps | 35% | 0.8s | + +### Performance Compare + +| Server | CPU | Mem | Conn | ENbps | ANbps | sb | Lat | +| ------ | --- | ------- | ------ | ---------- | ---------- | ------ | -------- | +| nginx-rtmp | 8.3% | 13MB | 502 | 100Mbps | 120Mbps | 16.3% | 0.8s | +| SRS | 9.0% | 8MB | 503 | 100Mbps | 112Mbps | 12.6% | 0.8s | +| nginx-rtmp | 27.3% | 19MB | 1002 | 200Mbps | 240Mbps | 30% | 0.8s | +| SRS | 23.6% | 13MB | 1003 | 200Mbps | 239Mbps | 16.6% | 0.8s | +| nginx-rtmp | 42.3% | 25MB | 1502 | 300Mbps | 400Mbps | 31% | 0.8s | +| SRS | 38.6% | 20MB | 1503 | 300Mbps | 360Mbps | 17% | 0.8s | +| nginx-rtmp | 48.9% | 31MB | 2002 | 400Mbps | 520Mbps | 33% | 0.8s | +| SRS | 65.2% | 34MB | 2003 | 400Mbps | 480Mbps | 22% | 0.8s | +| nginx-rtmp | 74.2% | 37MB | 2502 | 500Mbps | 580Mbps | 35% | 0.8s | +| SRS | 72.9% | 38MB | 2503 | 500Mbps | 613Mbps | 24% | 0.8s | + +### Performance Banchmark 4k + +The performance is refined to support about 4k clients. + +``` +[winlin@dev6 srs]$ ./objs/srs -v +0.9.130 +``` + +``` +top - 19:52:35 up 1 day, 11:11, 8 users, load average: 1.20, 1.05, 0.92 +Tasks: 171 total, 4 running, 167 sleeping, 0 stopped, 0 zombie +Cpu0 : 26.0%us, 23.0%sy, 0.0%ni, 34.0%id, 0.3%wa, 0.0%hi, 16.7%si, 0.0%st +Cpu1 : 26.4%us, 20.4%sy, 0.0%ni, 34.1%id, 0.7%wa, 0.0%hi, 18.4%si, 0.0%st +Cpu2 : 22.5%us, 15.4%sy, 0.0%ni, 45.3%id, 1.0%wa, 0.0%hi, 15.8%si, 0.0%st +Mem: 2055440k total, 1972196k used, 83244k free, 136836k buffers +Swap: 2064376k total, 3184k used, 2061192k free, 926124k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND +17034 root 20 0 415m 151m 2040 R 94.4 7.6 14:29.33 ./objs/srs -c console.conf + 1063 winlin 20 0 131m 68m 1336 S 17.9 3.4 54:05.77 ./objs/sb_rtmp_load -c 800 -r rtmp://127.0.0.1:1935/live/livestream + 1011 winlin 20 0 132m 68m 1336 R 17.6 3.4 54:45.53 ./objs/sb_rtmp_load -c 800 -r rtmp://127.0.0.1:1935/live/livestream +18736 winlin 20 0 113m 48m 1336 S 17.6 2.4 1:37.96 ./objs/sb_rtmp_load -c 800 -r rtmp://127.0.0.1:1935/live/livestream + 1051 winlin 20 0 131m 68m 1336 S 16.9 3.4 53:25.04 ./objs/sb_rtmp_load -c 800 -r rtmp://127.0.0.1:1935/live/livestream +18739 winlin 20 0 104m 39m 1336 R 15.6 2.0 1:25.71 ./objs/sb_rtmp_load -c 800 -r rtmp://127.0.0.1:1935/live/livestream +``` + +``` +[winlin@dev6 ~]$ dstat -N lo 30 +----total-cpu-usage---- -dsk/total- ---net/lo-- ---paging-- ---system-- +usr sys idl wai hiq siq| read writ| recv send| in out | int csw + 3 2 92 0 0 3| 11k 27k| 0 0 | 1B 26B|3085 443 + 32 17 33 0 0 17| 273B 60k| 69M 69M| 0 0 |4878 6652 + 34 18 32 0 0 16| 0 38k| 89M 89M| 0 0 |4591 6102 + 35 19 30 0 0 17| 137B 41k| 91M 91M| 0 0 |4682 6064 + 33 17 33 0 0 17| 0 31k| 55M 55M| 0 0 |4920 7785 + 33 18 31 0 0 17|2867B 34k| 90M 90M| 0 0 |4742 6530 + 32 18 33 0 0 17| 0 31k| 66M 66M| 0 0 |4922 7666 + 33 17 32 0 0 17| 137B 39k| 65M 65M| 0 0 |4841 7299 + 35 18 30 0 0 17| 0 28k| 100M 100M| 0 0 |4754 6752 + 32 17 33 0 0 18| 0 41k| 44M 44M| 0 0 |5130 8251 + 34 18 32 0 0 16| 0 30k| 104M 104M| 0 0 |4456 5718 +``` + +![SRS 4k](/img/doc-advanced-guides-performance-001.png) + +### Performance Banchmark 6k + +SRS2.0.15, not SRS1.0, performance is refined to support 6k clients. +That is 4Gbps for 522kbps bitrate, for a single SRS process. Read https://github.com/ossrs/srs/issues/194 + +### Performance Banchmark 7.5k + +SRS2.0.30 refined to support 7.5k clients, read https://github.com/ossrs/srs/issues/217 + +Winlin 2014.11 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/performance) + + diff --git a/versioned_docs/version-6.0/doc/raspberrypi.md b/versioned_docs/version-6.0/doc/raspberrypi.md new file mode 100644 index 00000000..6ac7fa99 --- /dev/null +++ b/versioned_docs/version-6.0/doc/raspberrypi.md @@ -0,0 +1,202 @@ +--- +title: RaspBerryPi +sidebar_label: RaspBerryPi +hide_title: false +hide_table_of_contents: false +--- + +# Performance benchmark for SRS on RaspberryPi + +SRS can running on armv6(RaspberryPi) or armv7(Android). +The bellow data show the performance benchmark. + +## Install SRS + +Download the binary for armv6 from [Github](http://ossrs.net/srs.release/releases/) +or [SRS Server](http://ossrs.net/srs/releases/) + +## RaspberryPi + +The hardware of raspberrypi: +* [RaspberryPi](http://item.jd.com/1014155.html):Type B +* SoC BroadcomBCM2835(CPU,GPU,DSP,SDRAM,USB) +* CPU ARM1176JZF-S(ARM11) 700MHz +* GPU Broadcom VideoCore IV, OpenGL ES 2.0, 1080p 30 h.264/MPEG-4 AVC decoder +* RAM 512MByte +* USB 2 x USB2.0 +* VideoOutput Composite RCA(PAL&NTSC), HDMI(rev 1.3&1.4), raw LCD Panels via DSI 14 HDMI resolution from 40x350 to 1920x1200 plus various PAL and NTSC standards +* AudioOutput 3.5mm, HDMI +* Storage SD/MMC/SDIO socket +* Network 10/100 ethernet +* Device 8xGPIO, UART, I2C, SPI bus, +3.3V, +5V, ground(nagetive) +* Power 700mA(3.5W) 5V +* Size 85.60 x 53.98 mm(3.370 x 2.125 in) +* OS Debian GNU/linux, Fedora, Arch Linux ARM, RISC OS, XBMC + +Software: +* RaspberryPi img:2014-01-07-wheezy-raspbian.img +* uname: Linux raspberrypi 3.10.25+ #622 PREEMPT Fri Jan 3 18:41:00 GMT 2014 armv6l GNU/Linux +* cpu: arm61 +* Server: srs 0.9.38 +* ServerType: raspberry pi +* Client:[srs-bench](https://github.com/ossrs/srs-bench) +* ClientType: Virtual Machine Centos6 +* Play: PC win7, flash +* Network: 100Mbps + +Stream information: +* Video Bitrate: 200kbps +* Resolution: 768x320 +* Audio Bitrate: 30kbps + +For arm [SRS: arm](./arm.md#raspberrypi) + +## OS settings + +Login as root, set the fd limits: + +* Set limit: `ulimit -HSn 10240` +* View the limit: + +```bash +[root@dev6 ~]# ulimit -n +10240 +``` + +* Restart SRS:`sudo /etc/init.d/srs restart` + +## Publish and Play + +Use centos to publish to SRS: + +* Start FFMPEG: + +```bash +for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg \ + -re -i doc/source.flv \ + -acodec copy -vcodec copy \ + -f flv rtmp://192.168.1.105:1935/live/livestream; \ + sleep 1; +done +``` + +* Play RTMP: `rtmp://192.168.1.105:1935/live/livestream` +* Online Play: [Online Player](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.flv&port=8080&schema=http) + +## Client + +The RTMP load test tool, read [srs-bench](https://github.com/ossrs/srs-bench) + +The sb_rtmp_load used to test RTMP load, support 800-3k concurrency for each process. + +* Build: `./configure && make` +* Start: `./objs/sb_rtmp_load -c 800 -r ` + +## Record Data + +Record data before test: + +* The cpu for SRS: + +```bash +pid=`ps aux|grep srs|grep objs|awk '{print $2}'` && top -p $pid +``` + +* The cpu for srs-bench: + +```bash +pid=`ps aux|grep load|grep rtmp|awk '{print $2}'` && top -p $pid +``` + +* The connections: + +```bash +for((;;)); do \ + srs_connections=`sudo netstat -anp|grep 1935|grep ESTABLISHED|wc -l`; \ + echo "srs_connections: $srs_connections"; \ + sleep 5; \ +done +``` + +* The bandwidth in NBps: + +```bash +[winlin@dev6 ~]$ dstat 30 +----total-cpu-usage---- -dsk/total- -net/lo- ---paging-- ---system-- +usr sys idl wai hiq siq| read writ| recv send| in out | int csw + 0 0 96 0 0 3| 0 0 |1860B 58k| 0 0 |2996 465 + 0 1 96 0 0 3| 0 0 |1800B 56k| 0 0 |2989 463 + 0 0 97 0 0 2| 0 0 |1500B 46k| 0 0 |2979 461 +``` + +* The table + +| Server | CPU | Mem | Conn | ENbps | ANbps | sb | Lat | +| ------ | --- | ------ | ------- | ---------- | ---------- | ------- | ------- | +| SRS | 1.0% | 3MB | 3 | - | - | - | 0.8s | + +Memory(Mem): The memory usage for server. + +Clients(Conn): The cocurrency connections to server. + +ExpectNbps(ENbps): The expect network bandwidth in Xbps. + +ActualNbps(ANbps): The actual network bandwidth in Xbps. + +## Benchmark SRS 0.9.38 + +Let's start performance benchmark. + +* The data for 10 clients: + +```bash +./objs/sb_rtmp_load -c 10 -r rtmp://192.168.1.105:1935/live/livestream >/dev/null & +``` + +| Server | CPU | Mem | Conn | ENbps | ANbps | sb | Lat | +| ------ | --- | ------ | ------- | ---------- | ---------- | ------- | ------- | +| SRS | 17% | 1.4MB | 11 | 2.53Mbps | 2.6Mbps | 1.3% | 1.7s | + +* The data for 20 clients: + +| Server | CPU | Mem | Conn | ENbps | ANbps | sb | Lat | +| ------ | --- | ------ | ------- | ---------- | ---------- | ------- | ------- | +| SRS | 23% | 2MB | 21 | 4.83Mbps | 5.5Mbps | 2.3% | 1.5s | + +* The data for 30 clients: + +| Server | CPU | Mem | Conn | ENbps | ANbps | sb | Lat | +| ------ | --- | ------ | ------- | ---------- | ---------- | ------- | ------- | +| SRS | 50% | 4MB | 31 | 7.1Mbps | 8Mbps | 4% | 2s | + +The summary for RaspberryPi Type B, 230kbps performance: + +| Server | CPU | Mem | Conn | ENbps | ANbps | sb | Lat | +| ------ | --- | ------ | ------- | ---------- | ---------- | ------- | ------- | +| SRS | 17% | 1.4MB | 11 | 2.53Mbps | 2.6Mbps | 1.3% | 1.7s | +| SRS | 23% | 2MB | 21 | 4.83Mbps | 5.5Mbps | 2.3% | 1.5s | +| SRS | 50% | 4MB | 31 | 7.1Mbps | 8Mbps | 4% | 2s | + +## Benchmark SRS 0.9.72 + +The benchmark for RTMP SRS 0.9.72. + +| Server | CPU | Mem | Conn | ENbps | ANbps | sb | Lat | +| ------ | --- | ------ | ------- | ---------- | ---------- | ------- | ------- | +| SRS | 5% | 2MB | 2 | 1Mbps | 1.2Mbps | 0% | 1.5s | +| SRS | 20% | 2MB | 12 | 6.9Mbps | 6.6Mbps | 2.8% | 2s | +| SRS | 36% | 2.4MB | 22 | 12.7Mbps | 12.9Mbps | 2.3% | 2.5s | +| SRS | 47% | 3.1MB | 32 | 18.5Mbps | 18.5Mbps | 5% | 2.0s | +| SRS | 62% | 3.4MB | 42 | 24.3Mbps | 25.7Mbps | 9.3% | 3.4s | +| SRS | 85% | 3.7MB | 52 | 30.2Mbps | 30.7Mbps | 13.6% | 3.5s | + +## cubieboard benchmark + +No data. + +Winlin 2014.11 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/raspberrypi) + + diff --git a/versioned_docs/version-6.0/doc/reload.md b/versioned_docs/version-6.0/doc/reload.md new file mode 100644 index 00000000..4a582e58 --- /dev/null +++ b/versioned_docs/version-6.0/doc/reload.md @@ -0,0 +1,51 @@ +--- +title: Reload +sidebar_label: Reload +hide_title: false +hide_table_of_contents: false +--- + +# Reload + +Almost all features of SRS support reload, donot disconnect +all connection and apply the new config. + +## NotSupportedFeatures + +The bellow features can not reload: +* deamon: whether start as deamon mode. +* mode: the mode of vhost. + +The daemon never support reload. + +The mode of vhost, to make the vhost origin or edge, should never directly +change the mode, because of: + +* The origin and edge switch is too complex. +* The origin always put in a device group, never change to edge actually. +* The upnode or origin restart have no effect to user, edge will retry. + +A workaround to modify the mode of vhost: +* Delete the vhost and reload. +* Ensure the vhost is deleted, for the reload is async. +* Add vhost with new mode, then reload. + +## Use Scenario + +The use scenario of reload: +* Donot restart server to apply new config, only `killall -1 srs`. +* Donot disconnect user connections. + +## Usage + +The usage of reload: `killall -1 srs` + +Or send signal to process: `kill -1 7635` + +Or use SRS scripts: `/etc/init.d/srs reload` + +Winlin 2014.11 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/reload) + + diff --git a/versioned_docs/version-6.0/doc/resource.md b/versioned_docs/version-6.0/doc/resource.md new file mode 100644 index 00000000..f919d510 --- /dev/null +++ b/versioned_docs/version-6.0/doc/resource.md @@ -0,0 +1,93 @@ +--- +title: Ports and Resource +sidebar_label: Ports and Resource +hide_title: false +hide_table_of_contents: false +--- + +# Resources + +The resources of SRS. + +## Ports + +The ports used by SRS, kernel services: + +* `tcp://1935`, for [RTMP live streaming server](./rtmp.md). +* `tcp://1985`, HTTP API server, for [HTTP-API](./http-api.md), [WebRTC](./webrtc.md), etc. +* `tcp://8080`, HTTP live streaming server, [HTTP-FLV](./flv.md), [HLS](./hls.md) as such. +* `udp://8000`, [WebRTC Media](./webrtc.md) server. + +For optional HTTPS services, which might be provided by other web servers: + +* `tcp://8088`, HTTPS live streaming server. +* `tcp://1990`, HTTPS API server. + +For optional stream converter services, to push streams to SRS: + +* `udp://8935`, Stream Converter: [Push MPEGTS over UDP](./streamer.md#push-mpeg-ts-over-udp) server. +* `tcp://8936`, Stream Converter: [Push HTTP-FLV](./streamer.md#push-http-flv-to-srs) server. +* `udp://10080`, Stream Converter: [Push SRT Media](https://github.com/ossrs/srs/issues/1147#issuecomment-577469119) server. + +For external services to work with SRS: + +* `udp://1989`, [WebRTC Signaling](https://github.com/ossrs/signaling#usage) server. + +## APIs + +The API used by SRS: + +* `/api/v1/` The HTTP API path. +* `/rtc/v1/` The HTTP API path for RTC. +* `/sig/v1/` The [demo signaling](https://github.com/ossrs/signaling) API. + +Other API used by [ossrs.net](https://ossrs.net): + +* `/gif/v1` The statistic API. +* `/service/v1/` The latest available version API. +* `/ws-service/v1/` The latest available version API, by websocket. +* `/im-service/v1/` The latest available version API, by IM. +* `/code-service/v1/` The latest available version API, by Code verification. + +The statistic path for [ossrs.net](https://ossrs.net): + +* `/srs/xxx` The GitHub pages for [srs](https://github.com/ossrs/srs) +* `/release/xxx` The pages for [ossrs.net](https://ossrs.net) +* `/console/xxx` The pages for [console](http://ossrs.net/console/) +* `/player/xxx` The pages for [players and publishers](http://ossrs.net/players/) +* `/k8s/xxx` The template and repository deploy by K8s, like [srs-k8s-template](https://github.com/ossrs/srs-k8s-template) + +## Mirrors + +[Gitee](https://gitee.com/ossrs/srs), [the GIT usage](./git.md) + +``` +git clone https://gitee.com/ossrs/srs.git && +cd srs && git remote set-url origin https://github.com/ossrs/srs.git && git pull +``` + +> Remark: For users in China, recomment to use mirror from CSDN or OSChina, because they are much faster. +[Gitlab](https://gitlab.com/winlinvip/srs-gitlab), [the GIT usage](./git.md) + +``` +git clone https://gitlab.com/winlinvip/srs-gitlab.git srs && +cd srs && git remote set-url origin https://github.com/ossrs/srs.git && git pull +``` + +[Github](https://github.com/ossrs/srs), [the GIT usage](./git.md) + +``` +git clone https://github.com/ossrs/srs.git +``` + +| Branch | Cost | Size | CMD | +| --- | --- | --- | --- | +| 3.0release | 2m19.931s | 262MB | git clone -b 3.0release https://gitee.com/ossrs/srs.git | +| 3.0release | 0m56.515s | 95MB | git clone -b 3.0release --depth=1 https://gitee.com/ossrs/srs.git | +| develop | 2m22.430s | 234MB | git clone -b develop https://gitee.com/ossrs/srs.git | +| develop | 0m46.421s | 42MB | git clone -b develop --depth=1 https://gitee.com/ossrs/srs.git | +| min | 2m22.865s | 217MB | git clone -b min https://gitee.com/ossrs/srs.git | +| min | 0m36.472s | 11MB | git clone -b min --depth=1 https://gitee.com/ossrs/srs.git | +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/resource) + + diff --git a/versioned_docs/version-6.0/doc/reuse-port.md b/versioned_docs/version-6.0/doc/reuse-port.md new file mode 100644 index 00000000..56abde49 --- /dev/null +++ b/versioned_docs/version-6.0/doc/reuse-port.md @@ -0,0 +1,109 @@ +--- +title: Reuse Port +sidebar_label: Reuse Port +hide_title: false +hide_table_of_contents: false +--- + +# Reuse Port + +You can use REUSE_PORT for different use scenarios. + +## For Edge Server + +The [performance of SRS2](https://github.com/ossrs/srs/tree/2.0release#performance) is improved huge, but is it enough? +Absolutely NOT! In SRS3, we provide [OriginCluster](./sample-origin-cluster.md) for multiple origin servers to work together, +and [go-oryx](https://github.com/ossrs/go-oryx) as a tcp proxy for edge server, and these are not good enough, so we support +SO_REUSEPORT feature for multiple processes edge server. + +![](/img/doc-guides-reuse-port-001.png) + +> Remark: The SO_REUSEPORT requires Linux Kernel 3.9+, so you should upgrade your kernel for CentOS6, or you could choose Ubuntu20. + +First, we start a edge server which listen at 1935: + +``` +./objs/srs -c conf/edge.conf +``` + +Then, at the same server, start another edge server which also listen at 1935: + +``` +./objs/srs -c conf/edge2.conf +``` + +> Note: They should use different pid file, or it will fail to start the second edge server. + +There are two SRS edge servers: + +``` +[root@bf2e88b31f9b trunk]# ps aux|grep srs +root 381 0.1 0.0 19888 5752 pts/2 S+ 08:03 0:01 ./objs/srs -c conf/edge.conf +root 383 0.0 0.0 19204 5468 pts/1 S+ 08:04 0:00 ./objs/srs -c conf/edge2.conf + +[root@bf2e88b31f9b trunk]# lsof -p 381 +srs 381 root 7u IPv6 18835 0t0 TCP *:macromedia-fcs (LISTEN) +[root@bf2e88b31f9b trunk]# lsof -p 383 +srs 383 root 7u IPv6 17831 0t0 TCP *:macromedia-fcs (LISTEN) +``` + +After that, we start the origin server, from which these edge server to pull streams: + +``` +./objs/srs -c conf/origin.conf +``` + +Finally, we could publish to origin/edge, and play stream from each edge server: + +``` + for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy \ + -f flv rtmp://192.168.1.170/live/livestream; \ + sleep 1; \ + done +``` + +Use VLC to play the RTMP stream: `rtmp://192.168.1.170:1935/live/livestream` + +## For Origin Server + +You can use REUSE_PORT in Origin Server. Each Origin Server is isolated, only works for HLS: + +``` + +-----------------+ +Client --->-- + Origin Servers +------> Player + +-----------------+ +``` + +> Note: If need to deliver RTMP or HTTP-FLV, pelease use [OriginCluster](./sample-origin-cluster.md). + +Start the first Origin Server, listen at `1935` and `8080`, covert RTMP to HLS: + +```bash +./objs/srs -c conf/origin.hls.only1.conf +``` + +Start the second Origin Server, listen at `1935` and `8080`, covert RTMP to HLS: + +```bash +./objs/srs -c conf/origin.hls.only2.conf +``` + +Publish stream to origin, system will select a random Origin Server: + +```bash +./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv -c copy -f flv rtmp://localhost/live/livestream1 +``` + +Publish another stream to origin, system will select a random Origin Server: + +```bash +./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv -c copy -f flv rtmp://localhost/live/livestream2 +``` + +> Note: It works only for HLS, please use [OriginCluster](./sample-origin-cluster.md) for RTMP or HTTP-FLV. + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/reuse-port) + + diff --git a/versioned_docs/version-6.0/doc/rtmp-atc.md b/versioned_docs/version-6.0/doc/rtmp-atc.md new file mode 100644 index 00000000..d7e3196b --- /dev/null +++ b/versioned_docs/version-6.0/doc/rtmp-atc.md @@ -0,0 +1,93 @@ +--- +title: RTMP ATC +sidebar_label: RTMP ATC +hide_title: false +hide_table_of_contents: false +--- + +# ATC Deploy + +How to deploy RTMP fault backup? When origin for edge restart, edge will +switch to another origin, so it is easy to config fault tolerance for edge, +only need to specifies multiple origin servers. + +How to deploy HLS fault backup? When edge can not got a piece of ts, it +will fetch from another origin server, so the ts in these two server must +be absolutely equals. We must use atc for HLS/HDS which over http file stream. + +For the deploy of HDS/HLS, read [Adobe: HDS/HLS fault backup](http://www.adobe.com/cn/devnet/adobe-media-server/articles/varnish-sample-for-failover.html): + +```bash + +----------+ +----------+ + +--ATC->-+ server +--ATC->-+ packager +-+ +---------+ ++----------+ | RTMP +----------+ RTMP +----------+ | | Reverse | +-------+ +| encoder +->-+ +->-+ Proxy +-->-+ CDN + ++----------+ | +----------+ +----------+ | | (nginx) | +-------+ + +--ATC->-+ server +--ATC->-+ packager +-+ +---------+ + RTMP +----------+ RTMP +----------+ +``` + +The RTMP is in ATC, the absolute time, so server or other tools can output +HLS in multiple tools. + +## Config ATC on SRS + +ATC of SRS is default off, the RTMP timestamp to client always start at 0. + +```bash +vhost __defaultVhost__ { + # for play client, both RTMP and other stream clients, + # for instance, the HTTP FLV stream clients. + play { + # vhost for atc for hls/hds/rtmp backup. + # generally, atc default to off, server delivery rtmp stream to client(flash) timestamp from 0. + # when atc is on, server delivery rtmp stream by absolute time. + # atc is used, for instance, encoder will copy stream to master and slave server, + # server use atc to delivery stream to edge/client, where stream time from master/slave server + # is always the same, client/tools can slice RTMP stream to HLS according to the same time, + # if the time not the same, the HLS stream cannot slice to support system backup. + # + # @see http://www.adobe.com/cn/devnet/adobe-media-server/articles/varnish-sample-for-failover.html + # @see http://www.baidu.com/#wd=hds%20hls%20atc + # + # default: off + atc off; + } +} +``` + +## ATC for Adobe Flash Player + +When ATC is on, flash will start play ok when: +* sequence header: The timstamp of sequence header must equals to the first packet. +* metadata: The timstamp of metadata must equals to the first packet. + +We test the flash player, it ok to play the RTMP stream with or without ATC. + +## ATC for encoder + +The encoder can control the atc of SRS, when encoder write a field +"bravo_atc":"true". + +We can disable this feature: + +```bash +vhost atc.srs.com { + # for play client, both RTMP and other stream clients, + # for instance, the HTTP FLV stream clients. + play { + # whether enable the auto atc, + # if enabled, detect the bravo_atc="true" in onMetaData packet, + # set atc to on if matched. + # always ignore the onMetaData if atc_auto is off. + # default: off + atc_auto off; + } +} +``` + +Winlin 2014.11 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/rtmp-atc) + + diff --git a/versioned_docs/version-6.0/doc/rtmp-handshake.md b/versioned_docs/version-6.0/doc/rtmp-handshake.md new file mode 100644 index 00000000..a1b42372 --- /dev/null +++ b/versioned_docs/version-6.0/doc/rtmp-handshake.md @@ -0,0 +1,33 @@ +--- +title: RTMP Handshake +sidebar_label: RTMP Handshake +hide_title: false +hide_table_of_contents: false +--- + +# RTMP Handshake + +The rtmp specification 1.0 defines the RTMP handshake: +* c0/s0: 1 bytes, specifies the protocol is RTMP or RTMPE/RTMPS. +* c1/s1: 1536 bytes, first 4 bytes is time, next 4 bytes is 0x00, 1528 random bytes. +* c2/s2: 1536 bytes, first 4 bytes is time echo, next 4 bytes is time, 1528 bytes c2==s1 and s2==c1. +This is the simple handshake, the standard handshake, and the FMLE use this handshake. + +While the server connected by flash player only support simple handshake, the flash player can only play the vp6 codec, and do not support h.264+aac. Adobe changed the simple handshake to encrypted complex handshake, see: [Changed Handshake of RTMP](http://blog.csdn.net/win_lin/article/details/13006803) + +The handshake summary: | + +| Handshake | Depends | Player | Client | SRS | Use Scenario | +| ---- | ----- | --------------------- | -------- | --- | ---- | +| Simple
Standard | No | vp6+mp3/speex | All | Supprted | Encoder, for examle, FMLE, FFMPEG | +| Complex | openssl | vp6+mp3/speex
h264+aac | Flash | Supported | Flash player requires complex handshake to play h.264+aac codec. | + +Player(Flash palyer): The supported codec for flash player. + +Notes: When compile SRS with SSL, SRS will try complex, then simple. + +Winlin 2014.10 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/rtmp-handshake) + + diff --git a/versioned_docs/version-6.0/doc/rtmp-pk-http.md b/versioned_docs/version-6.0/doc/rtmp-pk-http.md new file mode 100644 index 00000000..9beaca13 --- /dev/null +++ b/versioned_docs/version-6.0/doc/rtmp-pk-http.md @@ -0,0 +1,92 @@ +--- +title: RTMP vs HTTP +sidebar_label: RTMP vs HTTP +hide_title: false +hide_table_of_contents: false +--- + +# RTMP PK HTTP + +There are two major methods to deliver video over internet, Live and WebRTC. + +* Live streaming: [HLS](./hls.md), [RTMP](./rtmp.md) and [HTTP-FLV](./flv.md) for entertainment. +* WebRTC: [RTC](./webrtc.md), for communication. + +Ignore other delivery protocol, which is not used on internet: +* UDP: Private protocols, realtime protocol, latence in ms. +* P2P: FlashP2P of Adobe, others are private protocol. Large latence, in minutes. +* RTSP: Private protocol not for internet. + +And the protocol base on HTTP: +* HTTP progressive: Ancient protocol, not used now. +* HTTP stream: Support seek in query string, for instance, http://x/x.mp4?start=x. +* HLS: The HLS is developed by Apple. Both Apple and Android support it. +* HDS: The HLS like developed by Adobe, shit. +* DASH: The HLS like developed by some companies, not used in China. + +Compare the delivery methods on internet: + +* HLS: Apple HLS, for both live and vod. +* HTTP: HTTP stream, private http stream, for vod. +* RTMP: Adobe RTMP, for live stream. + +## RTMP + +The RTMP is stream protocol, good for: +* Realtime: RTMP latency can be 0.8-3s. +* DRM: RTMPE/RTMPS encrypt protocol. +* Stable for PC flash. +* Server input: The actual industrial standard for encoder to output to server is RTMP. +* Fault Tolerance: The RTMP edge-origin can support fault tolerance for stream protocol. +* Monitor: The stream protocol can be monitored. + +RTMP is bad for: +* Complex: RTMP is more complex than HTTP, especially the edge. +* Hard to cache: Must use edge to cache. + +## HTTP + +The HTTP stream is the vod stream used for some video website: + +HTTP is delivery files, good for: +* High performance: There are lots of good HTTP server, such as nginx, squid, traffic server. +* No small piece of file: The large file is good than pieces of file for HTTP cache. +* Firewall traverse: Almost all firewall never block the HTTP protocol. + +HTTP is bad for: +* Large lantency: The http stream atleast N10s latency. +* Player does not support: Only PC flash can play http stream. Mobile platform does not support http stream. + +## HLS + +HLS is the open standard of Apple. HLS is supported by Android3+. + +HLS is good for: +* High performance: Same to HTTP stream. +* Firewall traverse: Same to HTTP stream. +* Mobile Platform standard: Apple IOS/OSX, Android and PC/flash support HLS. + +HLS is bad for: +* Large lantency: The http stream atleast N10s latency. +* Pieces of file: CDN does not like small file. + +## Use Scenario + +See [HTTP](./hls.md) +and [RTMP](./rtmp.md) + +I recomment to use these delivery protocols in: +* Encoder always output RTMP for internet server. +* Server always accept RTMP from encoder. +* The cluster use RTMP as internal delivery protocol. +* The low latency application on PC: Use flash to play RTMP. +* Application without low latency required: RTMP or HLS. +* The vod stream on PC: Use HLS or HTTP stream. +* Apple IOS/OSX: Always use HLS. Or use library to play RTMP, like [https://www.vitamio.org](https://www.vitamio.org) +* Android: Always use HLS. Or use library to play RTMP. + +Winlin 2014.11 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/rtmp-pk-http) + + diff --git a/versioned_docs/version-6.0/doc/rtmp-url-vhost.md b/versioned_docs/version-6.0/doc/rtmp-url-vhost.md new file mode 100644 index 00000000..2058812e --- /dev/null +++ b/versioned_docs/version-6.0/doc/rtmp-url-vhost.md @@ -0,0 +1,271 @@ +--- +title: RTMP URL +sidebar_label: RTMP URL +hide_title: false +hide_table_of_contents: false +--- + +# RTMP URL/Vhost + +The url of RTMP is simple, and the vhost is not a new concept, although it's easy to confuse for the fresh. +What is vhost? What is app? Why FMLE should input the app and stream? + +Vhost(Virtual Host) is used to seperate customers or businesses. +Let's take a look at the typical use scenaio of vhost. + +![](/img/doc-main-concepts-rtmp-url-vhost-001.png) + +The benifit of RTMP and HLS, see: [HLS](./hls.md) + +## Use Scenario + +The use scenario of vhost: +* Multiple customers cloud: For example, CDN(content delivery network) serves multiple customers. How does CDN to seperate customer and stat the data? Maybe stream path is duplicated, for example, live/livestream is the most frequently used app and stream. The vhost, similar to the virtual server, provides abstract for multiple customers. +* Different config: For example, FMLE publish h264+mp3, which should be transcoded to h264+aac. We can use vhost to use different config, h264+mp3 disabled hls while the h264+aac vhost enalbe hls. + +In a word, vhost is the element of config, to seperate customer and apply different config. + +## Standard RTMP URL + +Standard RTMP URL is the most compatible URL, for all servers and players can identify. The RTMP URL is similar to the HTTP URL: + +| HTTP | Schema | Host | Port | App | Stream | +| ----- | ----- | ----- | ----| ---- | ---- | +| http://192.168.1.10:80/players/srs_player.html | http | 192.168.1.10 | 80 | players | srs_player.html| +| rtmp://192.168.1.10:1935/live/livestream | rtmp | 192.168.1.10 | 1935 | live | livestream | + +It is: +* Schema:The protocol prefix, HTTP/HTTPS for HTTP protocol, and RTMP/RTMPS/RTMPE/RTMPT for RTMP protocol, while the RTMFP is adobe flash p2p protocol. +* Host:The server ip or dns name to connect to. It is dns name for CDN, and the dns name is used as the vhost for the specified customer. +* Port:The tcp port, default 80 for HTTP and 1935 for RTMP. +* Path:The http file path for HTTP. +* App:The application for RTMP, similar to the directory of resource(stream). +* Stream:The stream for RTMP, similar to the resource(file) in specified app. + +## NO Vhost + +However, most user donot need the vhost, and it's a little complex, so donot use it when you donot need it. Most user actually only need app and stream. + +When to use vhost? When you serve 100+ customers and use the same delivery network, they use theire own vhost and can use the sample app and stream. + +The common use scenario, for example, if you use SRS to build a video website. So you are the only customer, donot need vhost. Suppose you provides video chat, there are some categories which includes some chat rooms. For example, categories are military, reader, history. The military category has rooms rock, radar; the reader category has red_masion. The config of SRS is very simple: + +```bash +listen 1935; +vhost __defaultVhost__ { +} +``` + +When generate web pages, for instance, military category, all app is `military`. The url of chat room is `rtmp://yourdomain.com/military/rock`, while the encoder publish this stream, and all player play this stream. + +The other pages of the military category use the same app name `military`, but use the different stream name, fr example, radar chat room use the stream url `rtmp://yourdomain.com/military/radar`. + +When generate the pages, add new stream, the config of SRS no need to change. For example, when add a new chat room cannon, no need to change config of SRS. It is simple enough! + +The reader category can use app `reader`, and the `red_mansion` chat room can use the url `rtmp://yourdomain.com/reader/red_mansion`. + +## Vhost Use Scenarios + +The vhost of RTMP is same to HTTP virtual server. For example, the demo.srs.com is resolve to 192.168.1.10 by dns or hosts: + +| HTTP | Host | Port | Vhost | +| --- | --- | --- | ----- | +| http://demo.srs.com:80/players/srs_player.html | 192.168.1.10 | 80 | demo.srs.com | +| rtmp://demo.srs.com:1935/live/livestream | 192.168.1.10 | 1935 | demo.srs.com | + +The use scenario of vhost: +* Multiple Customers: When need to serve multiple customers use the same network, for example, cctv and wasu delivery stream on the same CDN, how to seperate them, when they use the same app and stream? +* DNS scheduler: When CDN delivery content, the fast edge for the specified user is resolved for the dns name. We can use vhost as the dns name to scheduler user to different edge. +* Multiple Config sections: Sometimes we need different config, for example, to delivery RTMP for PC and transcode RTMP to HLS for android and IOS, we can use one vhost to delivery RTMP and another for HLS. + +### Multiple Customers + +For example, we got two customers cctv and wasu, use the same edge server 192.168.1.10, when user access the stream of these two customers: + +| RTMP | Host | Port | Vhost | App | Stream | +| --- | --- | -------| ----- | ---| --------| +| rtmp://show.cctv.cn/live/livestream | 192.168.1.10 | 1935 | show.cctv.cn | live | livestream | +| rtmp://show.wasu.cn/live/livestream | 192.168.1.10 | 1935 | show.wasu.cn | live | livestream | + +The config on the edge 192.168.1.10, need to config the vhost: + +```bash +listen 1935; +vhost show.cctv.cn { +} +vhost show.wasu.cn { +} +``` + +### DNS GSLB + +Please refer to the tech for DNS and CDN. + +### Config Unit + +For example, two customers cctv and wasu, and cctv needs mininum latency, while wasu needs fast startup. + +Then we config the cctv without gop cache, and wasu config with gop cache: + +```bash +listen 1935; +vhost show.cctv.cn { + chunk_size 128; +} +vhost show.wasu.cn { + chunk_size 4096; +} +``` + +These two vhosts is completely isolated. + +## Default Vhost + +The default vhost is \_\_defaultVhost\_\ introduced by FMS. When mismatch and vhost not found, use the default vhost if configed. + +For example, the config of SRS on 192.168.1.10: + +```bash +listen 1935; +vhost demo.srs.com { +} +``` + +Then, when user access the vhost: +* rtmp://demo.srs.com/live/livestream:OK, matched vhost is demo.srs.com. +* rtmp://192.168.1.10/live/livestream:Failed, no matched vhost, and no default vhost. + +The rule of default vhost is same to other vhost, the default is used for the vhost not matched and not find. + +## Locate Vhost + +There are two ways to access the vhost on server: +* DNS name: When access the dns name equals to the vhost, by dns resolve or hosts file, we can access the vhost on server. +* Stream parameters: While publishing or playing stream, the parameter can take the vhost. This needs the server supports this way, for example, SRS can use parameter `?vhost=VHOST` and `?domain=VHOST` to access specified vhost. + +For example: + +```bash +RTMP URL: rtmp://demo.srs.com/live/livestream +Edge servers: 50 servers +Edge server ip: 192.168.1.100 to 192.168.1.150 +Edge SRS config: + listen 1935; + vhost demo.srs.com { + mode remote; + origin: xxxxxxx; + } +``` + +The ways to access the url on edge servers: + +| User | RTMP URL | hosts | Target | +|--------| -------- |----------------------------|--------------------------| +| User | rtmp://demo.srs.com/live/livestream | - | Resolved by DNS | +| DevOps | rtmp://demo.srs.com/live/livestream | 192.168.1.100 demo.srs.com | Connect to 192.168.1.100 | +| DevOps | rtmp://192.168.1.100/live?
vhost=demo.srs.com/livestream | - | Connect to 192.168.1.100 | +| DevOps | rtmp://192.168.1.100/live
...vhost...demo.srs.com/livestream | - | Connect to 192.168.1.100 | + +It is sample way to access other servers. + +## Parameters in URL + +There is no parameters for RTMP URL, similar to query string of HTTP, we can pass parameters in RTMP URL for SRS: +* Vhost:Specifies the vhost in the RTMP URL for SRS. +* Token authentication: Not implements for SRS, while user can specifies the token in the RTMP URL, SRS can fetch the token and verify it on remote authentication server. The token authentication is better and complex than refer authentication. + +The parameters in SRS URL: + +* `rtmp://192.168.1.100/live/livestream?vhost=demo.srs.com` +* `rtmp://192.168.1.100/live/livestream?domain=demo.srs.com` +* `rtmp://192.168.1.100/live/livestream?token=xxx` +* `rtmp://192.168.1.100/live/livestream?vhost=demo.srs.com&token=xxx` + +> Note: FMLE passes the parameters in app, which should not be used now, because it confuses people. + +It's also applied to other protocols, for example: + +* `http://192.168.1.100/live/livestream.flv?vhost=demo.srs.com&token=xxx` +* `http://192.168.1.100/live/livestream.m3u8?vhost=demo.srs.com&token=xxx` +* `webrtc://192.168.1.100/live/livestream?vhost=demo.srs.com&token=xxx` + +> Note: SRT is another story, please read [SRT Parameters](./srt.md) for details. + +## URL of SRS + +SRS always simplify the problem, never make it more complex. + +The RTMP URL of SRS use standard RTMP URL. Generally do not need to modify the url or add parameters in it, except: +* Change vhost: Manually change vhost in RTMP URL for debuging. +* Token authentication: To support token authentication. + +Furthermore, recomment user to use one level app and stream, never use multiple level app and stream. For example: + +```bash +// Not recomment multiple level app and stream, which confuse people. +rtmp://demo.srs.com/show/live/livestream +rtmp://demo.srs.com/show/live/livestream/2013 +``` + +The srs_player and srs_publisher donot support multiple level app and stream. Both srs_player and srs_publisher make the word behind the last / to stream, the left is tcUrl(vhost/app). For example: + +```bash +// For both srs_player and srs_publisher: +// play or publish the following rtmp URL: +rtmp://demo.srs.com/show/live/livestream/2013 +schema: rtmp +host/vhost: demo.srs.com +app: show/live/livestream +stream: 2013 +``` + +It simplify the url, the palyer and publisher only need user to input a url, not tcUrl and stream. + +The RTMP URL of SRS: + +| URL | Description | +| ---- | ------ | +| rtmp://demo.srs.com/live/livestream | Standard RTMP URL | +| rtmp://192.168.1.10/live/livestream?vhost=demo.srs.com | URL specifies vhost | +| rtmp://demo.srs.com/live/livestream?key=ER892ID839KD9D0A1D87D | URL specifies token authentication | + +## Example Vhosts in SRS + +The full.conf of conf of SRS contains many vhost, which used to show each feature. All features is put into the vhost demo.srs.com: + +| Category | Vhost | Description | +| -------- | ----- | ---- | +| RTMP | __defaultVhost__ | Default Vhost, only RTMP.| +| RTMP | chunksize.vhost.com | Sample to set the chunk_size.| +| Forward | same.vhost.forward.vhost.com | Sample for Foward stream to the same vhost.| +| HLS | with-hls.vhost.com | Sample for HLS.| +| HLS | no-hls.vhost.com | Sample to disable the HLS.| +| RTMP | min.delay.com | Sample to config the minimum latency for RTMP.| +| RTMP | refer.anti_suck.com | Sample for Refer anti-suck DRM.| +| RTMP | removed.vhost.com | Sample to disable vhost.| +| Callback | hooks.callback.vhost.com | Sample for http callback.| +| Transcode | mirror.transcode.vhost.com | Sample for transcode, to use the sample filter of FFMPEG.| +| Transcode | crop.transcode.vhost.com | Sample for transcode, to use the crop filter of FFMPEG.| +| Transcode | logo.transcode.vhost.com | Sample for transcode, to use the logo filter of FFMPEG.| +| Transcode | audio.transcode.vhost.com | Sample for transcode, to transcode audio only.| +| Transcode | copy.transcode.vhost.com | Sample for transcode, demux and mux.| +| Transcode | all.transcode.vhost.com | Sample for transcode, all transcode features.| +| Transcode | ffempty.transcode.vhost.com | Sample for empty transcode, display the parameters.| +| Transcode | app.transcode.vhost.com | Sample for transcode, transcode specified app streams.| +| Transcode | stream.transcode.vhost.com | Sample for transcode, transcode specified streams. | + +The demo.conf of conf of SRS, used for demo of SRS。 + +| Category | Vhost | Description | +| -------- | ----- | ---- | +| DEMO | players | The vhost for default stream of srs_player, ingest this stream.| +| DEMO | players_pub | The vhost for the srs_publisher to publish stream to.| +| DEMO | players_pub_rtmp | The low latency vhost for demo.| +| DEMO | demo.srs.com | The full features for demo.| +| Others | dev | The vhost for dev, ignore.| + +Winlin 2014.10 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/rtmp-url-vhost) + + diff --git a/versioned_docs/version-6.0/doc/rtmp.md b/versioned_docs/version-6.0/doc/rtmp.md new file mode 100644 index 00000000..b01e6f27 --- /dev/null +++ b/versioned_docs/version-6.0/doc/rtmp.md @@ -0,0 +1,326 @@ +--- +title: RTMP +sidebar_label: RTMP +hide_title: false +hide_table_of_contents: false +--- + +# RTMP + +RTMP is a basic and de facto standard protocol for live streaming for many years. + +However, Adobe neither maintaining RTMP protocol nor contributing as an RFC protocol, so many new features +aren't supported by RTMP, such as HEVC and opus. By March 2023, the [Enhanced RTMP](https://github.com/veovera/enhanced-rtmp) +project is finally set up, supporting HEVC and AV1. SRS and OBS now support [HEVC](https://github.com/veovera/enhanced-rtmp/issues/4) +encoding based on Enhanced RTMP. + +For live streaming producing, more recent years, SRT, WebRTC and RIST have been growing rapidly. More and +more devices supported SRT or RIST in live streaming. You're also able to use WebRTC for live streaming. + +For live streaming deliver, HLS is the most common used protocol, is supported by almost all CDN and devices +such as PC, iOS, Android and tablet PC. However, HLS has large (3~5s+) latency, you could use HTTP-FLV, +HTTP-TS or WebRTC for low latency use scenario. + +Today, RTMP is still used in live streaming producing, for example, OBS publish RTMP stream to YouTube, Twitch, etc. +If you want to ingest stream from a device or publish to a platform, RTMP is the right choice for compatibility. + +## Usage + +SRS supports RTMP by default, please run by [docker](./getting-started.md) or [build from source](./getting-started-build.md): + +```bash +docker run --rm -it -p 1935:1935 ossrs/srs:5 \ + ./objs/srs -c conf/rtmp.conf +``` + +Publish stream by [FFmpeg](https://ffmpeg.org/download.html) or [OBS](https://obsproject.com/download) : + +```bash +ffmpeg -re -i ./doc/source.flv -c copy -f flv rtmp://localhost/live/livestream +``` + +Play stream by: + +* RTMP (by [VLC](https://www.videolan.org/)): `rtmp://localhost/live/livestream` + +SRS supports converting RTMP to other protocols, described in next sections. + +## Config + +The configuration about RTMP: + +```bash +vhost __defaultVhost__ { + # whether enable min delay mode for vhost. + # for min latency mode: + # 1. disable the publish.mr for vhost. + # 2. use timeout for cond wait for consumer queue. + # @see https://github.com/ossrs/srs/issues/257 + # default: off (for RTMP/HTTP-FLV) + # default: on (for WebRTC) + min_latency off; + + # whether enable the TCP_NODELAY + # if on, set the nodelay of fd by setsockopt + # Overwrite by env SRS_VHOST_TCP_NODELAY for all vhosts. + # default: off + tcp_nodelay off; + + # the default chunk size is 128, max is 65536, + # some client does not support chunk size change, + # vhost chunk size will override the global value. + # Overwrite by env SRS_VHOST_CHUNK_SIZE for all vhosts. + # default: global chunk size. + chunk_size 128; + + # The input ack size, 0 to not set. + # Generally, it's set by the message from peer, + # but for some peer(encoder), it never send message but use a different ack size. + # We can chnage the default ack size in server-side, to send acknowledge message, + # or the encoder maybe blocked after publishing for some time. + # Overwrite by env SRS_VHOST_IN_ACK_SIZE for all vhosts. + # Default: 0 + in_ack_size 0; + + # The output ack size, 0 to not set. + # This is used to notify the peer(player) to send acknowledge to server. + # Overwrite by env SRS_VHOST_OUT_ACK_SIZE for all vhosts. + # Default: 2500000 + out_ack_size 2500000; + + # the config for FMLE/Flash publisher, which push RTMP to SRS. + publish { + # about MR, read https://github.com/ossrs/srs/issues/241 + # when enabled the mr, SRS will read as large as possible. + # Overwrite by env SRS_VHOST_PUBLISH_MR for all vhosts. + # default: off + mr off; + # the latency in ms for MR(merged-read), + # the performance+ when latency+, and memory+, + # memory(buffer) = latency * kbps / 8 + # for example, latency=500ms, kbps=3000kbps, each publish connection will consume + # memory = 500 * 3000 / 8 = 187500B = 183KB + # when there are 2500 publisher, the total memory of SRS at least: + # 183KB * 2500 = 446MB + # the recommended value is [300, 2000] + # Overwrite by env SRS_VHOST_PUBLISH_MR_LATENCY for all vhosts. + # default: 350 + mr_latency 350; + + # the 1st packet timeout in ms for encoder. + # Overwrite by env SRS_VHOST_PUBLISH_FIRSTPKT_TIMEOUT for all vhosts. + # default: 20000 + firstpkt_timeout 20000; + # the normal packet timeout in ms for encoder. + # Overwrite by env SRS_VHOST_PUBLISH_NORMAL_TIMEOUT for all vhosts. + # default: 5000 + normal_timeout 7000; + # whether parse the sps when publish stream. + # we can got the resolution of video for stat api. + # but we may failed to cause publish failed. + # @remark If disabled, HLS might never update the sps/pps, it depends on this. + # Overwrite by env SRS_VHOST_PUBLISH_PARSE_SPS for all vhosts. + # default: on + parse_sps on; + # When parsing SPS/PPS, whether try ANNEXB first. If not, try IBMF first, then ANNEXB. + # Overwrite by env SRS_VHOST_PUBLISH_TRY_ANNEXB_FIRST for all vhosts. + # default: on + try_annexb_first on; + # The timeout in seconds to disconnect publisher when idle, which means no players. + # Note that 0 means no timeout or this feature is disabled. + # Note that this feature conflicts with forward, because it disconnect the publisher stream. + # Overwrite by env SRS_VHOST_PUBLISH_KICKOFF_FOR_IDLE for all vhosts. + # default: 0 + kickoff_for_idle 0; + } + + # for play client, both RTMP and other stream clients, + # for instance, the HTTP FLV stream clients. + play { + # whether cache the last gop. + # if on, cache the last gop and dispatch to client, + # to enabled fast startup for client, client play immediately. + # if off, send the latest media data to client, + # client need to wait for the next Iframe to decode and show the video. + # set to off if requires min delay; + # set to on if requires client fast startup. + # Overwrite by env SRS_VHOST_PLAY_GOP_CACHE for all vhosts. + # default: on + gop_cache off; + + # Limit the max frames in gop cache. It might cause OOM if video stream has no IDR frame, so we limit to N + # frames by default. Note that it's the size of gop cache, including videos, audios and other messages. + # Overwrite by env SRS_VHOST_PLAY_GOP_CACHE_MAX_FRAMES for all vhosts. + # default: 2500 + gop_cache_max_frames 2500; + + # the max live queue length in seconds. + # if the messages in the queue exceed the max length, + # drop the old whole gop. + # Overwrite by env SRS_VHOST_PLAY_QUEUE_LENGTH for all vhosts. + # default: 30 + queue_length 10; + + # about the stream monotonically increasing: + # 1. video timestamp is monotonically increasing, + # 2. audio timestamp is monotonically increasing, + # 3. video and audio timestamp is interleaved/mixed monotonically increasing. + # it's specified by RTMP specification, @see 3. Byte Order, Alignment, and Time Format + # however, some encoder cannot provides this feature, please set this to off to ignore time jitter. + # the time jitter algorithm: + # 1. full, to ensure stream start at zero, and ensure stream monotonically increasing. + # 2. zero, only ensure stream start at zero, ignore timestamp jitter. + # 3. off, disable the time jitter algorithm, like atc. + # @remark for full, correct timestamp only when |delta| > 250ms. + # @remark disabled when atc is on. + # Overwrite by env SRS_VHOST_PLAY_TIME_JITTER for all vhosts. + # default: full + time_jitter full; + # vhost for atc for hls/hds/rtmp backup. + # generally, atc default to off, server delivery rtmp stream to client(flash) timestamp from 0. + # when atc is on, server delivery rtmp stream by absolute time. + # atc is used, for instance, encoder will copy stream to master and slave server, + # server use atc to delivery stream to edge/client, where stream time from master/slave server + # is always the same, client/tools can slice RTMP stream to HLS according to the same time, + # if the time not the same, the HLS stream cannot slice to support system backup. + # + # @see http://www.adobe.com/cn/devnet/adobe-media-server/articles/varnish-sample-for-failover.html + # @see http://www.baidu.com/#wd=hds%20hls%20atc + # + # @remark when atc is on, auto off the time_jitter + # Overwrite by env SRS_VHOST_PLAY_ATC for all vhosts. + # default: off + atc off; + # whether use the interleaved/mixed algorithm to correct the timestamp. + # if on, always ensure the timestamp of audio+video is interleaved/mixed monotonically increase. + # if off, use time_jitter to correct the timestamp if required. + # @remark to use mix_correct, atc should on(or time_jitter should off). + # Overwrite by env SRS_VHOST_PLAY_MIX_CORRECT for all vhosts. + # default: off + mix_correct off; + + # whether enable the auto atc, + # if enabled, detect the bravo_atc="true" in onMetaData packet, + # set atc to on if matched. + # always ignore the onMetaData if atc_auto is off. + # Overwrite by env SRS_VHOST_PLAY_ATC_AUTO for all vhosts. + # default: off + atc_auto off; + + # set the MW(merged-write) latency in ms. + # SRS always set mw on, so we just set the latency value. + # the latency of stream >= mw_latency + mr_latency + # the value recomment is [300, 1800] + # @remark For WebRTC, we enable pass-by-timestamp mode, so we ignore this config. + # default: 350 (For RTMP/HTTP-FLV) + # Overwrite by env SRS_VHOST_PLAY_MW_LATENCY for all vhosts. + # default: 0 (For WebRTC) + mw_latency 350; + + # Set the MW(merged-write) min messages. + # default: 0 (For Real-Time, min_latency on) + # default: 1 (For WebRTC, min_latency off) + # default: 8 (For RTMP/HTTP-FLV, min_latency off). + # Overwrite by env SRS_VHOST_PLAY_MW_MSGS for all vhosts. + mw_msgs 8; + + # the minimal packets send interval in ms, + # used to control the ndiff of stream by srs_rtmp_dump, + # for example, some device can only accept some stream which + # delivery packets in constant interval(not cbr). + # @remark 0 to disable the minimal interval. + # @remark >0 to make the srs to send message one by one. + # @remark user can get the right packets interval in ms by srs_rtmp_dump. + # Overwrite by env SRS_VHOST_PLAY_SEND_MIN_INTERVAL for all vhosts. + # default: 0 + send_min_interval 10.0; + # whether reduce the sequence header, + # for some client which cannot got duplicated sequence header, + # while the sequence header is not changed yet. + # Overwrite by env SRS_VHOST_PLAY_REDUCE_SEQUENCE_HEADER for all vhosts. + # default: off + reduce_sequence_header on; + } +} +``` + +> Note: These configurations are for publish and play. Note that there are some other configurations in other sections, +for example, converting RTMP to [HTTP-FLV](./flv.md#config) or HTTP-TS. + +## On Demand Live Streaming + +In some situations, you might want to start streaming only when someone starts watching: + +1. The streaming source connects to the system but doesn't send the stream to SRS. +2. The player connects to the system and requests to play the stream. +3. The system tells the streaming source to start sending the stream to SRS. +4. The player gets the stream from SRS and plays it. + +> Note: The "system" here refers to your business system, not SRS. + +This is called "on-demand live streaming" or "on-demand streaming." What happens if the player stops watching? + +1. The system needs to tell the streaming source to stop sending the stream. +2. Or, when the last player stops watching, SRS waits for a while and then disconnects the stream. + +The second solution is recommended, as it's easier to use. Your system won't need to tell the streaming source to stop, because SRS will disconnect it automatically. You just need to enable the following configuration: + +```bash +# The timeout in seconds to disconnect publisher when idle, which means no players. +# Note that 0 means no timeout or this feature is disabled. +# Note that this feature conflicts with forward, because it disconnect the publisher stream. +# Overwrite by env SRS_VHOST_PUBLISH_KICKOFF_FOR_IDLE for all vhosts. +# default: 0 +kickoff_for_idle 0; +``` + +For more details, you can refer to [this PR](https://github.com/ossrs/srs/pull/3105). + +## Converting RTMP to HLS + +If want to convert RTMP to HLS, please see [HLS](./hls.md). + +## Converting RTMP to HTTP-FLV + +If want to convert RTMP to HTTP-FLV or HTTP-TS, please see [HTTP-FLV](./flv.md). + +## Converting RTMP to WebRTC + +If want to convert RTMP to WebRTC, please see [WebRTC: RTMP to RTC](./webrtc.md#rtmp-to-rtc). + +## Converting RTMP to MPEGTS-DASH + +If want to convert RTMP to MPEGTS-DASH, please see [DASH](./sample-dash.md). + +## Converting SRT to RTMP + +If want to convert SRT to RTMP, please see [SRT](./srt.md). + +## Converting WebRTC to RTMP + +If want to convert WebRTC to RTMP, please see [WebRTC: RTC to RTMP](./webrtc.md#rtc-to-rtmp). + +## RTMP Cluster + +If want to support a large set of players, please see [Edge Cluster](./edge.md). + +If want to support a larget set of publishers or streams, please see [Origin Cluster](./origin-cluster.md). + +Note that there are lots of solutions for [load balancing](../../../blog/load-balancing-streaming-servers). + +## Low Latency RTMP + +If want to support low latency RTMP stream, please see [LowLatency](./low-latency.md). + +## Timestamp Jitter + +SRS support correcting the timestamp for RTMP, please see [Jitter](./time-jitter.md). + +If wants SRS to keep the original timestamp, you can enable [ATC](./rtmp-atc.md). + +## Performance + +SRS use writev for high performance RTMP delivery, please follow [benchmark](./performance.md##performance-banchmark) +to test it. + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/rtmp) diff --git a/versioned_docs/version-6.0/doc/sample-arm.md b/versioned_docs/version-6.0/doc/sample-arm.md new file mode 100644 index 00000000..411c8be8 --- /dev/null +++ b/versioned_docs/version-6.0/doc/sample-arm.md @@ -0,0 +1,111 @@ +--- +title: ARM Deploy +sidebar_label: ARM Deploy +hide_title: false +hide_table_of_contents: false +--- + +# SRS ARM deploy example + +SRS can deploy on ARM linux. SRS provides srs-librtmp as client library for ARM. + +Compile and build ARM, read [SrsLinuxArm](./arm.md), +this artical describes how to deploy. + +**Suppose the IP of ubuntu12: 192.168.1.170** + +**Suppose the ARM device running in VirtualBox 1935 mapped to Ubuntu12 19350, 22 mapped to 2200. +That is, we can access Ubuntu12 19350 to access the ARM 1935, while the Ubuntu 2200 for ARM 22.** + +For more information, read [SrsLinuxArm](./arm.md) + +> Note: We need to patch ST, read [ST#1](https://github.com/ossrs/state-threads/issues/1) and [SrsLinuxArm](./arm.md#st-arm-bug-fix) + +## Ubuntu12 cross build SRS + +### Step 1, get SRS + +For detail, read [GIT](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +Or update the exists code: + +```bash +git pull +``` + +### Step 2, build SRS + +For detail, read [SrsLinuxArm](./arm.md) + +```bash +./configure --cross-build && make +``` + +> Note: To directly build on ARM device, for example RaspberryPi, use `./configure` instead. For others, please read [SrsLinuxArm](./arm.md) + +### Step 3, send SRS to ARM virtual machine + +For detail, read [SrsLinuxArm](./arm.md) + +```bash +# Password is:root +scp -P 2200 objs/srs root@localhost:~ +scp -P 2200 conf/rtmp.conf root@localhost:~ +``` + +## Start SRS on ARM + +Login to Ubuntu 2200, we are on ARM: + +### Step 4, start SRS + +For detail, read [SrsLinuxArm](./arm.md) + +```bash +./objs/srs -c conf/rtmp.conf +``` + +### Step 5, start Encoder + +For detail, read [SrsLinuxArm](./arm.md) + +Use FFMPEG to publish stream: + +```bash + for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy \ + -f flv rtmp://192.168.1.170:19350/live/livestream; \ + sleep 1; \ + done +``` + +Or use FMLE to publish stream: + +```bash +FMS URL: rtmp://192.168.1.170:19350/live +Stream: livestream +``` + +## User Machine + +Play RTMP stream on user machine. + +### Step 6, play RTMP stream + +RTMP url is: `rtmp://192.168.1.170:19350/live/livestream` + +User can use vlc to play the RTMP stream. + +Note: Please replace all ip 192.168.1.170 to your server ip. + +Winlin 2014.11 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/sample-arm) + + diff --git a/versioned_docs/version-6.0/doc/sample-dash.md b/versioned_docs/version-6.0/doc/sample-dash.md new file mode 100644 index 00000000..e00c4301 --- /dev/null +++ b/versioned_docs/version-6.0/doc/sample-dash.md @@ -0,0 +1,109 @@ +--- +title: DASH Deploy +sidebar_label: DASH Deploy +hide_title: false +hide_table_of_contents: false +--- + +# DASH deploy example + +Delivery DASH by SRS: + +**Suppose the server ip is 192.168.1.170** + +## Step 1, get SRS + +For detail, read [GIT](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +Or update the exists code: + +```bash +git pull +``` + +## Step 2, build SRS + +For detail, read [Build](./install.md) + +```bash +./configure && make +``` + +## Step 3, config SRS + +Please read [DASH](https://github.com/ossrs/srs/issues/299#issuecomment-306022840) + +Save bellow as config, or use `conf/dash.conf`: + +```bash +# conf/dash.conf +listen 1935; +max_connections 1000; +daemon off; +srs_log_tank console; +http_server { + enabled on; + listen 8080; + dir ./objs/nginx/html; +} +vhost __defaultVhost__ { + dash { + enabled on; + dash_fragment 30; + dash_update_period 150; + dash_timeshift 300; + dash_path ./objs/nginx/html; + dash_mpd_file [app]/[stream].mpd; + } +} +``` + +## Step 4, start SRS + +```bash +./objs/srs -c conf/dash.conf +``` + +> Note: You can also use other web server, such as NGINX, to delivery files of DASH. + +## Step 5, start Encoder + +Use FFMPEG to publish stream: + +```bash + for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy \ + -f flv rtmp://192.168.1.170/live/livestream; \ + sleep 1; \ + done +``` + +The stream in SRS: +* RTMP url:`rtmp://192.168.1.170/live/livestream` +* DASH url: `http://192.168.1.170:8080/live/livestream.mpd` + +## Step 6, play RTMP stream + +RTMP url is: `rtmp://192.168.1.170:1935/live/livestream` + +User can use vlc to play the RTMP stream. + +Note: Please replace all ip 192.168.1.170 to your server ip. + +## Step 7, play DASH stream + +DASH url: `http://192.168.1.170:8080/live/livestream.mpd` + +Please use VLC to play. + +Winlin 2020.01 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/sample-dash) + + diff --git a/versioned_docs/version-6.0/doc/sample-ffmpeg.md b/versioned_docs/version-6.0/doc/sample-ffmpeg.md new file mode 100644 index 00000000..d6f1164a --- /dev/null +++ b/versioned_docs/version-6.0/doc/sample-ffmpeg.md @@ -0,0 +1,137 @@ +--- +title: Transcode Deploy +sidebar_label: Transcode Deploy +hide_title: false +hide_table_of_contents: false +--- + +# Transcode deploy example + +FFMPEG can used to transcode the live stream, output the other RTMP server. +For detail, read [FFMPEG](./ffmpeg.md). + +**Suppose the server ip is 192.168.1.170** + +## Step 1, get SRS + +For detail, read [GIT](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +Or update the exists code: + +```bash +git pull +``` + +## Step 2, build SRS + +For detail, read [Build](./install.md) + +```bash +./configure --ffmpeg-tool=on && make +``` + +## Step 3, config file + +For detail, read [FFMPEG](./ffmpeg.md) + +Save the bellow as config file, or use `conf/ffmpeg.transcode.conf` instead: + +```bash +# conf/ffmpeg.transcode.conf +listen 1935; +max_connections 1000; +vhost __defaultVhost__ { + transcode { + enabled on; + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + engine ff { + enabled on; + vfilter { + } + vcodec libx264; + vbitrate 500; + vfps 25; + vwidth 768; + vheight 320; + vthreads 12; + vprofile main; + vpreset medium; + vparams { + } + acodec libfdk_aac; + abitrate 70; + asample_rate 44100; + achannels 2; + aparams { + } + output rtmp://127.0.0.1:[port]/[app]?vhost=[vhost]/[stream]_[engine]; + } + } +} +``` + +## Step 4, start SRS + +For detail, read [FFMPEG](./ffmpeg.md) + +```bash +./objs/srs -c conf/ffmpeg.conf +``` + +## Step 5, start encoder + +For detail, read [FFMPEG](./ffmpeg.md) + +Use FFMPEG to publish stream: + +```bash + for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy \ + -f flv rtmp://192.168.1.170/live/livestream; \ + sleep 1; \ + done +``` + +Or use FMLE to publish: + +```bash +FMS URL: rtmp://192.168.1.170/live +Stream: livestream +``` + +The stream in SRS: +* Stream publish by encoder: rtmp://192.168.1.170:1935/live/livestream +* Play the original stream: rtmp://192.168.1.170:1935/live/livestream +* Play the transcoded stream: rtmp://192.168.1.170:1935/live/livestream_ff + +## Step 6, play the stream + +For detail, read [FFMPEG](./ffmpeg.md) + +RTMP url is: `rtmp://192.168.1.170:1935/live/livestream` + +User can use vlc to play the RTMP stream. + +Note: Please replace all ip 192.168.1.170 to your server ip. + +## Step 7, play the transcoded stream + +For detail, read [FFMPEG](./ffmpeg.md) + +RTMP url is: `rtmp://192.168.1.170:1935/live/livestream_ff` + +User can use vlc to play the RTMP stream. + +Note: Please replace all ip 192.168.1.170 to your server ip. + +Winlin 2014.11 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/sample-ffmpeg) + + diff --git a/versioned_docs/version-6.0/doc/sample-forward.md b/versioned_docs/version-6.0/doc/sample-forward.md new file mode 100644 index 00000000..738ce75b --- /dev/null +++ b/versioned_docs/version-6.0/doc/sample-forward.md @@ -0,0 +1,156 @@ +--- +title: Forward Deploy +sidebar_label: Forward Deploy +hide_title: false +hide_table_of_contents: false +--- + +# Forward deploy example + +SRS can forward stream to other RTMP server. + +**Suppose the server ip is 192.168.1.170** + +Forward will copy streams to other RTMP server: +* Master: Encoder publish stream to master, which will forward to slave. +* Slave: Slave forward stream to slave. + +We use master to listen at 1935, and slave listen at 19350. + +## Step 1, get SRS + +For detail, read [GIT](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +Or update the exists code: + +```bash +git pull +``` + +## Step 2, build SRS + +For detail, read [Build](./install.md) + +```bash +./configure && make +``` + +## Step 3, config master SRS + +For detail, read [Forward](./forward.md) + +Save bellow as config, or use `conf/forward.master.conf`: + +```bash +# conf/forward.master.conf +listen 1935; +max_connections 1000; +pid ./objs/srs.master.pid; +srs_log_tank file; +srs_log_file ./objs/srs.master.log; +vhost __defaultVhost__ { + forward { + enabled on; + destination 127.0.0.1:19350; + } +} +``` + +## Step 4, start master SRS + +For detail, read [Forward](./forward.md) + +```bash +./objs/srs -c conf/forward.master.conf +``` + +## Step 5, config slave SRS + +For detail, read [Forward](./forward.md) + +Save bellow as config, or use `conf/forward.slave.conf`: + +```bash +# conf/forward.slave.conf +listen 19350; +pid ./objs/srs.slave.pid; +srs_log_tank file; +srs_log_file ./objs/srs.slave.log; +vhost __defaultVhost__ { +} +``` + +## Step 6, start slave SRS + +For detail, read [Forward](./forward.md) + +```bash +./objs/srs -c conf/forward.slave.conf +``` + +Note: Ensure the master and slave is ok, no error in log. + +```bash +[winlin@dev6 srs]$ sudo netstat -anp|grep srs +tcp 0 0 0.0.0.0:1935 0.0.0.0:* LISTEN 7826/srs +tcp 0 0 0.0.0.0:19350 0.0.0.0:* LISTEN 7834/srs +``` + +## Step 7, start Encoder + +For detail, read [Forward](./forward.md) + +Use FFMPEG to publish stream: + +```bash + for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy \ + -f flv rtmp://192.168.1.170/live/livestream; \ + sleep 1; \ + done +``` + +Or use FMLE to publish: + +```bash +FMS URL: rtmp://192.168.1.170/live +Stream: livestream +``` + +The stream in SRS: +* Stream publish by encoder: rtmp://192.168.1.170:1935/live/livestream +* The stream forward by master SRS: rtmp://192.168.1.170:19350/live/livestream +* Play stream on master: rtmp://192.168.1.170/live/livestream +* Play strema on slave: rtmp://192.168.1.170:19350/live/livestream + +## Step 8, play the stream on master + +For detail, read [Forward](./forward.md) + +RTMP url is: `rtmp://192.168.1.170:1935/live/livestream` + +User can use vlc to play the RTMP stream. + +Note: Please replace all ip 192.168.1.170 to your server ip. + +## Step 9, play the stream on slave + +For detail, read [Forward](./forward.md) + +RTMP url is: `rtmp://192.168.1.170:19350/live/livestream` + +User can use vlc to play the RTMP stream. + +Note: Please replace all ip 192.168.1.170 to your server ip. + +Winlin 2014.11 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/sample-forward) + + diff --git a/versioned_docs/version-6.0/doc/sample-hls-cluster.md b/versioned_docs/version-6.0/doc/sample-hls-cluster.md new file mode 100644 index 00000000..f434bf1e --- /dev/null +++ b/versioned_docs/version-6.0/doc/sample-hls-cluster.md @@ -0,0 +1,158 @@ +--- +title: HLS Cluster Deploy +sidebar_label: HLS Cluster Deploy +hide_title: false +hide_table_of_contents: false +--- + +# HLS Edge Cluster Example + +Example for HLS Edge Cluster, like to create a CDN to deliver HLS files. + +**Suppose the server ip is 192.168.1.170** + +## Step 1, Get SRS code + +For detail, read [GIT](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +Or update the exists code: + +```bash +git pull +``` + +## Step 2, Configure and build SRS + +For detail, read [Build](./install.md) + +```bash +./configure && make +``` + +## Step 3, Config origin srs, to generate HLS files + +See [HLS](./hls.md). + +Please use config `conf/hls.origin.conf`, or create a config file by: + +```bash +# conf/hls.origin.conf +listen 1935; +max_connections 1000; +daemon off; +srs_log_tank console; +http_server { + enabled on; + listen 8080; +} +vhost __defaultVhost__ { + hls { + enabled on; + hls_ctx off; + hls_ts_ctx off; + } +} +``` + +## Step 4, Config edge NGINX to deliver HLS files. + +See [Nginx for HLS](./nginx-for-hls.md). + +Save bellow as config, or use `conf/hls.edge.conf`: + +```bash +# conf/hls.edge.conf +worker_processes 3; +events { + worker_connections 10240; +} + +http { + # For Proxy Cache. + proxy_cache_path /tmp/nginx-cache levels=1:2 keys_zone=srs_cache:8m max_size=1000m inactive=600m; + proxy_temp_path /tmp/nginx-cache/tmp; + + server { + listen 8081; + # For Proxy Cache. + proxy_cache_valid 404 10s; + proxy_cache_lock on; + proxy_cache_lock_age 300s; + proxy_cache_lock_timeout 300s; + proxy_cache_min_uses 1; + + location ~ /.+/.*\.(m3u8)$ { + proxy_pass http://127.0.0.1:8080$request_uri; + # For Proxy Cache. + proxy_cache srs_cache; + proxy_cache_key $scheme$proxy_host$uri$args; + proxy_cache_valid 200 302 10s; + } + location ~ /.+/.*\.(ts)$ { + proxy_pass http://127.0.0.1:8080$request_uri; + # For Proxy Cache. + proxy_cache srs_cache; + proxy_cache_key $scheme$proxy_host$uri; + proxy_cache_valid 200 302 60m; + } + } +} +``` + +## Step 5, Start SRS Origin and NGINX Edge Server + +```bash +nginx -c $(pwd)/conf/hls.edge.conf +./objs/srs -c conf/hls.origin.conf +``` + +> Note: Please follow instructions of [NGINX](https://nginx.org/) to download and install. + +## Step 6, Publish RTMP stream to SRS Origin, to generate HLS files. + +Use FFMPEG to publish stream: + +```bash +for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy -f flv rtmp://192.168.1.170/live/livestream; \ + sleep 1; \ +done +``` + +Or use OBS to publish: + +```bash +Server: rtmp://192.168.1.170/live +StreamKey: livestream +``` + +## Step 7, Play HLS stream + +HLS by SRS Origin: `http://192.168.1.170:8080/live/livestream.m3u8` + +HLS by NGINX Edge: `http://192.168.1.170:8081/live/livestream.m3u8` + +Note: Please replace all ip 192.168.1.170 to your server ip. + +## Step 8: Benchmark and More NGINX Edge Servers + +Please use [srs-bench](https://github.com/ossrs/srs-bench#usage) to simulate a set of visitors: + +```bash +docker run --rm -it --network=host --name sb ossrs/srs:sb \ + ./objs/sb_hls_load -c 100 -r http://192.168.1.170:8081/live/livestream.m3u8 +``` + +You could run more NGINX from another server, use the same config. + +Winlin 2014.11 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/sample-hls-cluster) + + diff --git a/versioned_docs/version-6.0/doc/sample-hls.md b/versioned_docs/version-6.0/doc/sample-hls.md new file mode 100644 index 00000000..4cc8a9c1 --- /dev/null +++ b/versioned_docs/version-6.0/doc/sample-hls.md @@ -0,0 +1,14 @@ +--- +title: HLS Deploy +sidebar_label: HLS Deploy +hide_title: false +hide_table_of_contents: false +--- + +# HLS deploy example + +Migrated to [HLS](./hls.md). + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/sample-hls) + + diff --git a/versioned_docs/version-6.0/doc/sample-http-flv-cluster.md b/versioned_docs/version-6.0/doc/sample-http-flv-cluster.md new file mode 100644 index 00000000..3a1f48f6 --- /dev/null +++ b/versioned_docs/version-6.0/doc/sample-http-flv-cluster.md @@ -0,0 +1,165 @@ +--- +title: HTTP-FLV Cluster Deploy +sidebar_label: HTTP-FLV Cluster Deploy +hide_title: false +hide_table_of_contents: false +--- + +# HTTP FLV Cluster Example + +About the HTTP FLV cluster of SRS, read [HTTP FLV](./flv.md#about-http-flv) + +How to use multiple process for HTTP FLV? Please read [Reuse Port](./reuse-port.md) for detail. + +This example show how to deploy three SRS instance, listen at different port at a machine(user can deploy each to different machine, use same port), while one is origin server, another two are edge servers. We can publish RTMP to origin or edge, and play the RTMP/FLV at any edge. The latency is same to RTMP, 0.8-1s. + +**Suppose the server ip is 192.168.1.170** + +## Step 1, get SRS + +For detail, read [GIT](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +Or update the exists code: + +```bash +git pull +``` + +## Step 2, build SRS + +For detail, read [Build](./install.md) + +```bash +./configure && make +``` + +## Step 3, config origin SRS + +For detail, read [HTTP FLV](./flv.md) + +Save bellow as config, or use `conf/http.flv.live.conf`: + +```bash +# conf/http.flv.live.conf +listen 1935; +max_connections 1000; +http_server { + enabled on; + listen 8080; + dir ./objs/nginx/html; +} +vhost __defaultVhost__ { + http_remux { + enabled on; + mount [vhost]/[app]/[stream].flv; + hstrs on; + } +} +``` + +## Step 4, config edge SRS + +For detail, read [HTTP FLV](./flv.md) + +Save bellow as config, or use `conf/http.flv.live.edge1.conf` or `conf/http.flv.live.edge2.conf`: + +```bash +# conf/http.flv.live.edge1.conf +listen 19351; +max_connections 1000; +pid objs/srs.flv.19351.pid; +srs_log_file objs/srs.flv.19351.log; +http_server { + enabled on; + listen 8081; + dir ./objs/nginx/html; +} +vhost __defaultVhost__ { + mode remote; + origin 127.0.0.1; + http_remux { + enabled on; + mount [vhost]/[app]/[stream].flv; + hstrs on; + } +} +``` + +## Step 5, start SRS + +For detail, read [HTTP FLV](./flv.md) + +```bash +./objs/srs -c conf/http.flv.live.conf & +./objs/srs -c conf/http.flv.live.edge1.conf & +./objs/srs -c conf/http.flv.live.edge2.conf & +``` + +## Step 6, start Encoder + +For detail, read read [HTTP FLV](./flv.md) + +Use FFMPEG to publish stream: + +```bash + for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy \ + -f flv rtmp://192.168.1.170/live/livestream; \ + sleep 1; \ + done +``` + +Or use FMLE to publish: + +```bash +FMS URL: rtmp://192.168.1.170/live +Stream: livestream +``` + +The streams on SRS origin: +* RTMP: `rtmp://192.168.1.170/live/livestream` +* HTTP FLV: `http://192.168.1.170:8080/live/livestream.flv` + +The streams on SRS edge1: +* RTMP: `rtmp://192.168.1.170:19351/live/livestream` +* HTTP FLV: `http://192.168.1.170:8081/live/livestream.flv` + +The streams on SRS edge2: +* RTMP: `rtmp://192.168.1.170:19352/live/livestream` +* HTTP FLV: `http://192.168.1.170:8082/live/livestream.flv` + +## Step 7, play RTMP + +For detail, read [HTTP FLV](./flv.md) + +Origin RTMP url is: `rtmp://192.168.1.170:1935/live/livestream`, User can use vlc to play the RTMP stream. + +Edge1 RTMP url is: `rtmp://192.168.1.170:19351/live/livestream`, User can use vlc to play the RTMP stream. + +Edge2 RTMP url is: `rtmp://192.168.1.170:19352/live/livestream`, User can use vlc to play the RTMP stream. + +Note: Please replace all ip 192.168.1.170 to your server ip. + +## Step 8, play HTTP FLV + +For detail, read [HTTP FLV](./flv.md) + +Origin HTTP FLV url: `http://192.168.1.170:8080/live/livestream.flv`, User can use vlc to play the HLS stream. Or, use online SRS player(you must input the flv url): [srs-player](https://ossrs.net/players/srs_player.html) + +Edge1 HTTP FLV url: `http://192.168.1.170:8081/live/livestream.flv`, User can use vlc to play the HLS stream. Or, use online SRS player(you must input the flv url): [srs-player](https://ossrs.net/players/srs_player.html) + +Edge2 HTTP FLV url: `http://192.168.1.170:8082/live/livestream.flv`, User can use vlc to play the HLS stream. Or, use online SRS player(you must input the flv url): [srs-player](https://ossrs.net/players/srs_player.html) + +Note: Please replace all ip 192.168.1.170 to your server ip. + +Winlin 2014.11 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/sample-http-flv-cluster) + + diff --git a/versioned_docs/version-6.0/doc/sample-http-flv.md b/versioned_docs/version-6.0/doc/sample-http-flv.md new file mode 100644 index 00000000..12bf10b2 --- /dev/null +++ b/versioned_docs/version-6.0/doc/sample-http-flv.md @@ -0,0 +1,14 @@ +--- +title: HTTP-FLV Deploy +sidebar_label: HTTP-FLV Deploy +hide_title: false +hide_table_of_contents: false +--- + +# HTTP FLV deploy example + +Migrated to [HTTP-FLV](./flv.md). + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/sample-http-flv) + + diff --git a/versioned_docs/version-6.0/doc/sample-http.md b/versioned_docs/version-6.0/doc/sample-http.md new file mode 100644 index 00000000..e211243d --- /dev/null +++ b/versioned_docs/version-6.0/doc/sample-http.md @@ -0,0 +1,124 @@ +--- +title: HTTP Server Deploy +sidebar_label: HTTP Server Deploy +hide_title: false +hide_table_of_contents: false +--- + +# SRS HTTP server deploy example + +SRS embeded HTTP server, to delivery HLS and files. + +**Suppose the server ip is 192.168.1.170** + +## Step 1, get SRS + +For detail, read [GIT](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +Or update the exists code: + +```bash +git pull +``` + +## Step 2, build SRS + +For detail, read [Build](./install.md) + +```bash +./configure && make +``` + +## Step 3, config SRS + +For detail, read [HLS](./hls.md) and [HTTP Server](./http-server.md) + +Save bellow as config, or use `conf/http.hls.conf`: + +```bash +# conf/http.hls.conf +listen 1935; +max_connections 1000; +http_server { + enabled on; + listen 8080; + dir ./objs/nginx/html; +} +vhost __defaultVhost__ { + hls { + enabled on; + hls_path ./objs/nginx/html; + hls_fragment 10; + hls_window 60; + } +} +``` + +Note: The hls_path must exists, srs never create it. For detail, read [HLS](./hls.md) + +## Step 4, start SRS + +For detail, read [HLS](./hls.md) and [SRS HTTP Server](./http-server.md) + +```bash +./objs/srs -c conf/http.hls.conf +``` + +## Step 5, start Encoder + +For detail, read [HLS](./hls.md) + +Use FFMPEG to publish stream: + +```bash + for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy \ + -f flv rtmp://192.168.1.170/live/livestream; \ + sleep 1; \ + done +``` + +Or use FMLE(which support h.264+aac) to publish, read [Transcode2HLS](./sample-transcode-to-hls.md): + +```bash +FMS URL: rtmp://192.168.1.170/live +Stream: livestream +``` + +The streams on SRS: +* RTMP: `rtmp://192.168.1.170/live/livestream` +* HLS: `http://192.168.1.170:8080/live/livestream.m3u8` + +## Step 6, play RTMP + +For detail, read [HLS](./hls.md) + +RTMP url is: `rtmp://192.168.1.170:1935/live/livestream` + +User can use vlc to play the RTMP stream. + +Note: Please replace all ip 192.168.1.170 to your server ip. + +## Step 7, play HLS + +For detail, read [HLS](./hls.md) + +HLS url: `http://192.168.1.170:8080/live/livestream.m3u8` + +User can use vlc to play the HLS stream. + +Or, use online SRS player: [srs-player](https://ossrs.net/players/srs_player.html) + +Note: Please replace all ip 192.168.1.170 to your server ip. + +Winlin 2014.11 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/sample-http) + + diff --git a/versioned_docs/version-6.0/doc/sample-ingest.md b/versioned_docs/version-6.0/doc/sample-ingest.md new file mode 100644 index 00000000..43c6d1c4 --- /dev/null +++ b/versioned_docs/version-6.0/doc/sample-ingest.md @@ -0,0 +1,89 @@ +--- +title: Ingest Deploy +sidebar_label: Ingest Deploy +hide_title: false +hide_table_of_contents: false +--- + +# Ingest deploy example + +SRS can start process to ingest file/stream/device, transcode or not, +then publish to SRS. For detail, read [Ingest](./ingest.md). + +**Suppose the server ip is 192.168.1.170** + +## Step 1, get SRS + +For detail, read [GIT](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +Or update the exists code: + +```bash +git pull +``` + +## Step 2, build SRS + +For detail, read [Build](./install.md) + +```bash +./configure --ffmpeg-tool=on && make +``` + +## Step 3, config SRS + +For detail, read [Ingest](./ingest.md) + +Save bellow as config, or use `conf/ingest.conf`: + +```bash +# conf/ingest.conf +listen 1935; +max_connections 1000; +vhost __defaultVhost__ { + ingest livestream { + enabled on; + input { + type file; + url ./doc/source.flv; + } + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + engine { + enabled off; + output rtmp://127.0.0.1:[port]/live?vhost=[vhost]/livestream; + } + } +} +``` + +## Step 4, start SRS + +For detail, read [Ingest](./ingest.md) + +```bash +./objs/srs -c conf/ingest.conf +``` + +The streams on SRS: +* Stream ingest: rtmp://192.168.1.170:1935/live/livestream + +## Step 5, play RTMP + +For detail, read [Ingest](./ingest.md) + +RTMP url is: `rtmp://192.168.1.170:1935/live/livestream` + +User can use vlc to play the RTMP stream. + +Note: Please replace all ip 192.168.1.170 to your server ip. + +Winlin 2014.11 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/sample-ingest) + + diff --git a/versioned_docs/version-6.0/doc/sample-origin-cluster.md b/versioned_docs/version-6.0/doc/sample-origin-cluster.md new file mode 100644 index 00000000..fdf999fb --- /dev/null +++ b/versioned_docs/version-6.0/doc/sample-origin-cluster.md @@ -0,0 +1,156 @@ +--- +title: RTMP Origin Cluster +sidebar_label: RTMP Origin Cluster +hide_title: false +hide_table_of_contents: false +--- + +# RTMP Origin Cluster + +RTMP Origin Cluster is a powerful feature for huge pushing streams, +we could use RTMP Origin Cluster and RTMP Edge Cluster together, +to support huge pushing and pulling streams. + +**Suppose your server is: 192.168.1.170** + +## Step 1: Get SRS + +For more information please read [here](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +Or update your repository: + +```bash +git pull +``` + +## Step 2: Build SRS + +For more information please read [here](./install.md) + +```bash +./configure && make +``` + +## Step 3: Config the first origin, Origin ServerA + +For more information please read [here](./origin-cluster.md) + +You can use the file `conf/origin.cluster.serverA.conf`, or write your own: + +```bash +# conf/origin.cluster.serverA.conf +listen 19350; +max_connections 1000; +daemon off; +srs_log_tank console; +pid ./objs/origin.cluster.serverA.pid; +http_api { + enabled on; + listen 9090; +} +vhost __defaultVhost__ { + cluster { + mode local; + origin_cluster on; + coworkers 127.0.0.1:9091; + } +} +``` + +## Step 4: Config the second origin, Origin ServerB + +For more information please read [here](./origin-cluster.md) + +You can use the file `conf/origin.cluster.serverB.conf`, or write your own: + +```bash +# conf/origin.cluster.serverB.conf +listen 19351; +max_connections 1000; +daemon off; +srs_log_tank console; +pid ./objs/origin.cluster.serverB.pid; +http_api { + enabled on; + listen 9091; +} +vhost __defaultVhost__ { + cluster { + mode local; + origin_cluster on; + coworkers 127.0.0.1:9090; + } +} +``` + +## Step 5: Config edge server, which pulls streams from Origin Servers + +For more information please read [here](./origin-cluster.md) + +You can use the file `conf/origin.cluster.edge.conf`, or write your own: + +```bash +# conf/origin.cluster.edge.conf +listen 1935; +max_connections 1000; +pid objs/edge.pid; +daemon off; +srs_log_tank console; +vhost __defaultVhost__ { + cluster { + mode remote; + origin 127.0.0.1:19351 127.0.0.1:19350; + } +} +``` + +## Step 6: Start SRS servers + +For more information please read [here](./origin-cluster.md) + +```bash +./objs/srs -c conf/origin.cluster.serverA.conf & +./objs/srs -c conf/origin.cluster.serverB.conf & +./objs/srs -c conf/origin.cluster.edge.conf & +``` + +## Step 7: Push stream to any Origin Server + +For more information please read [here](./origin-cluster.md) + +By FFmpeg: + +```bash + for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy \ + -f flv rtmp://192.168.1.170:19350/live/livestream; \ + sleep 1; \ + done +``` + +Or FMLE: + +```bash +FMS URL: rtmp://192.168.1.170:19350/live +Stream: livestream +``` + +## Step 8: Play RTMP stream from Edge server + +For more information please read [here](./origin-cluster.md) + +RTMP URL is: `rtmp://192.168.1.170/live/livestream`, you can choose VLC. + +> Remark: Replace the IP `192.168.1.170` to your server IP. + +Winlin 2018.2 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/sample-origin-cluster) + + diff --git a/versioned_docs/version-6.0/doc/sample-realtime.md b/versioned_docs/version-6.0/doc/sample-realtime.md new file mode 100644 index 00000000..8829061a --- /dev/null +++ b/versioned_docs/version-6.0/doc/sample-realtime.md @@ -0,0 +1,111 @@ +--- +title: RTMP Realtime Deploy +sidebar_label: RTMP Realtime Deploy +hide_title: false +hide_table_of_contents: false +--- + +# RTMP low latency deploy example + +The SRS realtime(low latency) mode can decrease the latency to 0.8-3s. +For detail about latency, read [LowLatency](./low-latency.md). + +**Suppose the server ip is 192.168.1.170** + +## Step 1, get SRS + +For detail, read [GIT](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +Or update the exists code: + +```bash +git pull +``` + +## Step 2, build SRS + +For detail, read [Build](./install.md) + +```bash +./configure && make +``` + +## Step 3, config SRS + +For detail, read [LowLatency](./low-latency.md) + +Save bellow as config, or use `conf/realtime.conf`: + +```bash +# conf/realtime.conf +listen 1935; +max_connections 1000; +vhost __defaultVhost__ { + tcp_nodelay on; + min_latency on; + + play { + gop_cache off; + queue_length 10; + mw_latency 100; + } + + publish { + mr off; + } +} +``` + +## Step 4, start SRS + +For detail, read [LowLatency](./low-latency.md) + +```bash +./objs/srs -c conf/realtime.conf +``` + +## Step 5, start Encoder + +For detail, read [LowLatency](./low-latency.md) + +Use FFMPEG to publish stream: + +```bash + for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy \ + -f flv rtmp://192.168.1.170/live/livestream; \ + sleep 1; \ + done +``` + +Or use FMLE to publish: + +```bash +FMS URL: rtmp://192.168.1.170/live +Stream: livestream +``` + +Note: To measure the latency, can use the clock of mobile phone. +![latency](/img/sample-realtime-001.png) + +## Step 6, play RTMP + +For detail, read [LowLatency](./low-latency.md) + +RTMP url is: `rtmp://192.168.1.170:1935/live/livestream` + +User can use vlc to play the RTMP stream. + +Note: Please replace all ip 192.168.1.170 to your server ip. + +Winlin 2014.12 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/sample-realtime) + + diff --git a/versioned_docs/version-6.0/doc/sample-rtmp-cluster.md b/versioned_docs/version-6.0/doc/sample-rtmp-cluster.md new file mode 100644 index 00000000..89dfba8f --- /dev/null +++ b/versioned_docs/version-6.0/doc/sample-rtmp-cluster.md @@ -0,0 +1,120 @@ +--- +title: RTMP Cluster Deploy +sidebar_label: RTMP Cluster Deploy +hide_title: false +hide_table_of_contents: false +--- + +# RTMP Edge Cluster Example + +RTMP Edge cluster deploy example + +RTMP Edge cluster is the kernel feature of SRS. + +**Suppose the server ip is 192.168.1.170** + +## Step 1, get SRS + +For detail, read [GIT](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +Or update the exists code: + +```bash +git pull +``` + +## Step 2, build SRS + +For detail, read [Build](./install.md) + +```bash +./configure && make +``` + +## Step 3, config origin SRS + +For detail, read [RTMP](./rtmp.md) and [Edge](./edge.md) + +Save bellow as config, or use `conf/origin.conf`: + +```bash +# conf/origin.conf +listen 19350; +max_connections 1000; +pid objs/origin.pid; +srs_log_file ./objs/origin.log; +vhost __defaultVhost__ { +} +``` + +## Step 4, config edge SRS + +For detail, read [RTMP](./rtmp.md) and [Edge](./edge.md) + +Save bellow as config, or use `conf/edge.conf`: + +```bash +# conf/edge.conf +listen 1935; +max_connections 1000; +pid objs/edge.pid; +srs_log_file ./objs/edge.log; +vhost __defaultVhost__ { + cluster { + mode remote; + origin 127.0.0.1:19350; + } +} +``` + +## Step 5, start SRS + +For detail, read [RTMP](./rtmp.md) and [Edge](./edge.md) + +```bash +./objs/srs -c conf/origin.conf & +./objs/srs -c conf/edge.conf & +``` + +## Step 6, start Enocder + +For detail, read [RTMP](./rtmp.md) and [Edge](./edge.md) + +Use FFMPEG to publish stream: + +```bash + for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy \ + -f flv rtmp://192.168.1.170/live/livestream; \ + sleep 1; \ + done +``` + +Or use FMLE to publish: + +```bash +FMS URL: rtmp://192.168.1.170/live +Stream: livestream +``` + +## Step 7, play RTMP + +For detail, read [RTMP](./rtmp.md) and [Edge](./edge.md) + +Origin RTMP url is: `rtmp://192.168.1.170:19350/live/livestream`, User can use vlc to play the RTMP stream. + +Edge RTMP url is: `rtmp://192.168.1.170:1935/live/livestream`, User can use vlc to play the RTMP stream. + +Note: Please replace all ip 192.168.1.170 to your server ip. + +Winlin 2014.11 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/sample-rtmp-cluster) + + diff --git a/versioned_docs/version-6.0/doc/sample-rtmp.md b/versioned_docs/version-6.0/doc/sample-rtmp.md new file mode 100644 index 00000000..ef26d723 --- /dev/null +++ b/versioned_docs/version-6.0/doc/sample-rtmp.md @@ -0,0 +1,12 @@ +--- +title: RTMP Deploy +sidebar_label: RTMP Deploy +hide_title: false +hide_table_of_contents: false +--- + +# RTMP Delivery + +Migrated to [RTMP](./rtmp.md). + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/sample-rtmp) diff --git a/versioned_docs/version-6.0/doc/sample-srt.md b/versioned_docs/version-6.0/doc/sample-srt.md new file mode 100644 index 00000000..b4a563ae --- /dev/null +++ b/versioned_docs/version-6.0/doc/sample-srt.md @@ -0,0 +1,14 @@ +--- +title: SRT Deploy +sidebar_label: SRT Deploy +hide_title: false +hide_table_of_contents: false +--- + +# SRT deploy example + +Migrated to [SRT](./srt.md). + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/sample-srt) + + diff --git a/versioned_docs/version-6.0/doc/sample-transcode-to-hls.md b/versioned_docs/version-6.0/doc/sample-transcode-to-hls.md new file mode 100644 index 00000000..5d93851f --- /dev/null +++ b/versioned_docs/version-6.0/doc/sample-transcode-to-hls.md @@ -0,0 +1,142 @@ +--- +title: Transcode HLS Deploy +sidebar_label: Transcode HLS Deploy +hide_title: false +hide_table_of_contents: false +--- + +# Transcode for HLS deploy example + +HLS required h.264+aac, user can transcode for other codecs. + +Pure audio HLS, read [HLS audio only][http://ossrs.net/srs.release/wiki/HLS-Audio-Only] + +**Suppose the server ip is 192.168.1.170** + +## Step 1, get SRS + +For detail, read [GIT](./git.md) + +```bash +git clone https://github.com/ossrs/srs +cd srs/trunk +``` + +Or update the exists code: + +```bash +git pull +``` + +## Step 2, build SRS + +For detail, read [Build](./install.md) + +```bash +./configure --ffmpeg-tool=on && make +``` + +## Step 3, config SRS + +For detail, read [HLS](./hls.md) + +Save bellow as config, or use `conf/transcode2hls.audio.only.conf`: + +```bash +# conf/transcode2hls.audio.only.conf +listen 1935; +max_connections 1000; +http_server { + enabled on; + listen 8080; + dir ./objs/nginx/html; +} +vhost __defaultVhost__ { + hls { + enabled on; + hls_path ./objs/nginx/html; + hls_fragment 10; + hls_window 60; + } + transcode { + enabled on; + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + engine ff { + enabled on; + vcodec copy; + acodec libfdk_aac; + abitrate 45; + asample_rate 44100; + achannels 2; + aparams { + } + output rtmp://127.0.0.1:[port]/[app]?vhost=[vhost]/[stream]_[engine]; + } + } +} +``` + +## Step 4, strat SRS + +For detail, read [HLS](./hls.md) + +```bash +./objs/srs -c conf/transcode2hls.audio.only.conf +``` + +## Step 5, start Encoder + +For detail, read [HLS](./hls.md) + +Use FFMPEG to publish stream: + +```bash + for((;;)); do \ + ./objs/ffmpeg/bin/ffmpeg -re -i ./doc/source.flv \ + -c copy \ + -f flv rtmp://192.168.1.170/live/livestream; \ + sleep 1; \ + done +``` + +Or use FMLE to publish: + +```bash +FMS URL: rtmp://192.168.1.170/live +Stream: livestream +``` + +The stream in SRS: +* RTMP URL: `rtmp://192.168.1.170/live/livestream` +* Transcode RTMP: `rtmp://192.168.1.170/live/livestream_ff` +* Transcode HLS: `http://192.168.1.170:8080/live/livestream_ff.m3u8` + +Note: we can use another vhost to output HLS, other codecs transcode then output to this vhost. + +## Step 6, play RTMP + +For detail, read [HLS](./hls.md) + +RTMP url is: `rtmp://192.168.1.170:1935/live/livestream_ff` + +User can use vlc to play the RTMP stream. + +Note: Please replace all ip 192.168.1.170 to your server ip. + +## Step 7, play HLS + +For detail, read [HLS](./hls.md) + +HLS url: `http://192.168.1.170:8080/live/livestream_ff.m3u8` + +User can use vlc to play the HLS stream. + +Or, use online SRS player: [srs-player](https://ossrs.net/players/srs_player.html) + +Note: Please replace all ip 192.168.1.170 to your server ip. + +Winlin 2014.11 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/sample-transcode-to-hls) + + diff --git a/versioned_docs/version-6.0/doc/sample.md b/versioned_docs/version-6.0/doc/sample.md new file mode 100644 index 00000000..3098b7a8 --- /dev/null +++ b/versioned_docs/version-6.0/doc/sample.md @@ -0,0 +1,269 @@ +--- +title: Use Scenarios +sidebar_label: Use Scenarios +hide_title: false +hide_table_of_contents: false +--- + +# Use Scenarios + +一般来讲,SRS的应用方式有以下几类: + +1. 搭建大规模CDN集群,可以在CDN内部的源站和边缘部署SRS。 +2. 小型业务快速搭建几台流媒体集群,譬如学校、企业等,需要分发的流不多,同时CDN覆盖不如自己部署几个节点,可以用SRS搭建自己的小集群。 +3. SRS作为源站,CDN作为加速边缘集群。比如推流到CDN后CDN转推到源站,播放时CDN会从源站取流。这样可以同时使用多个CDN。同时还可以在源站做DRM和DVR,输出HLS,更重要的是如果直接推CDN一般CDN之间不是互通的,当一个CDN出现故障无法快速切换到其他CDN。 +4. 编码器可以集成SRS支持拉流。一般编码器支持推RTMP/UDP流,如果集成SRS后,可以支持拉多种流。 +5. 协议转换网关,比如可以推送FLV到SRS转成RTMP协议,或者拉RTSP转RTMP,还有拉HLS转RTMP。SRS只要能接入流,就能输出能输出的协议。 +6. 学习流媒体可以用SRS。SRS提供了大量的协议的文档,wiki,和文档对应的代码,详细的issues,流媒体常见的功能实现,还有新流媒体技术的尝试。 +7. 还可以装逼用,在SRS微信群里涉及到很多流媒体和传输的问题,是个装逼的好平台。 + +## Quzhibo + +趣直播,一个知识直播平台,目前直播技术为主。 + +主要流程: + +* obs 直播 +* 有三台hls 服务器,主 srs 自动 forward 到 srs,然后那三台切割 +* 有两台 flv 服务器,remote 拉群,发现有时会挂掉,用了个监控srs的脚本,一发现挂掉立马重启 +* srs 推流到七牛,利用七牛接口,来生成 m3u8 回放 这样可以结束后,立马看到回放 + +## Europe: Forward+EXEC + +BEGINHO STREAMING PROJECT + +I needed solution for pushing streams from origin server +to edge server. On origin server all streams are avaliable +in multicast (prepared with ffmpeg, h264 in mpegts container). +But routing multicast through GRE tunnel to the edge +server was very buggy. Any networks hickups in origin-edge +route were affecting streams in bad way (freezeing, pixelation and such)... +So, I found SRS project and after some reading of docs, I +decided to give it a try. Most intereseting feature of SRS +to me was a "forward" option. It allows to push all streams +you have avaliable on local server (SRS origin) to remote +server (SRS edge) with a single line in config file. +https://ossrs.net/lts/zh-cn/docs/v4/doc/sample-forward + +SRS2 config on origin server: +``` + vhost __defaultVhost__ { + forward xxx:19350; + } +``` + +I "told" to ffmpegs on transcoder to publish stream to rtmp, +instead of multicast (and yes, I used multicast group as rtmp stream name): +``` + ffmpeg -i udp://xxx:1234 -vcodec libx264 -acodec libfdk_aac \ + -metadata service_name="Channel 1" -metadata service_provider="PBS" \ + -f flv rtmp://xxx:1935/live/xxx:1234 +``` + +Tested stream with ffprobe: +``` + [root@encoder1 ~]# ffprobe rtmp://xxx:1935/live/xxx:1234 + Input #0, flv, from 'rtmp://xxx:1935/live/xxx:1234': + Metadata: + service_name : Channel 1 + service_provider: PBS + encoder : Lavf57.24.100 + server : SRS/2.0.209(ZhouGuowen) + srs_primary : SRS/1.0release + srs_authors : winlin,wenjie.zhao + server_version : 2.0.209 + Duration: N/A, start: 0.010000, bitrate: N/A + Stream #0:0: Audio: aac (LC), 48000 Hz, stereo, fltp, 128 kb/s + Stream #0:1: Video: h264 (High), yuvj420p(pc, bt709), 720x576 [SAR 16:11 DAR 20:11], 24 fps, 24 tbr, 1k tbn +``` + +On edge server (example IP xxx), there is a streaming software +wich accepts only mpegts as source. So, after receiving rtmp streams +from origin server, I needed all streams back to mpegts. +SRS have support for several types for output (hls, hds, rtmp, http-flv...) +but not mpegts, and i need udp mpegts. Then I asked Winlin for help +and he suggested to use SRS3 on edge server, as SRS3 have an feature +that SRS2 dont, and thats "exec" option. In SRS3 config, you can use +exec option, to call ffmpeg for every incoming stream and convert it to +whatever you like. I compiled SRS3 with "--with-ffmpeg" switch +(yes, source tree comes with ffmpeg in it) on edge server and... + +SRS3 config on edge: +``` + listen 19350; + max_connections 1024; + srs_log_tank file; + srs_log_file ./objs/srs.slave.log; + srs_log_level error; + vhost __defaultVhost__ { + exec { + enabled on; + publish ./objs/ffmpeg/bin/ffmpeg -v quiet -re -i rtmp://127.0.0.1:1935/[app]?vhost=[vhost]/[stream] -c copy -vbsf h264_mp4toannexb -f mpegts "udp://[stream]?localaddr=127.0.0.1&pkt_size=1316"; + } + } +``` + +FFmpeg will convert all incoming streams to udp mpegts, binding them +to lo (127.0.0.1) interface (you dont want multicast to leak all around). +SRS3 will use [stream] for udp address, thats why rtmp stream have name +by its multicast group on origin server ;) +When converting from rtmp to mpegts, "-vbsf h264_mp4toannexb" option is needed! +After starting SRS3 with this config, i checked is stream forwarded from +master server properly. So, ffprobe again, now on edge server: +``` + [root@edge ~]# ffprobe udp://xxx:1234?localaddr=127.0.0.1 + Input #0, mpegts, from 'udp://xxx:5002?localaddr=127.0.0.1': + Duration: N/A, start: 29981.146500, bitrate: 130 kb/s + Program 1 + Metadata: + service_name : Channel 1 + service_provider: PBS + Stream #0:0[0x100]: Video: h264 (High) ([27][0][0][0] / 0x001B), yuvj420p(pc, bt709), 720x576 [SAR 16:11 DAR 20:11], 24 fps, 24 tbr, 90k tbn, 180k tbc + Stream #0:1[0x101]: Audio: aac ([15][0][0][0] / 0x000F), 48000 Hz, stereo, fltp, 130 kb/s +``` + +I keep adding new streams with ffmpeg at origin server and they are avaliable +on slave server after second or two. Its almost a year when I started this origin +and edge SRS instances and they are still working without single restart ;) + +Many thanks to Winlin! + +## LijiangTV + +[丽江热线](https://www.lijiangtv.com/live/),丽江广播电视台。 + +## UPYUN + +2015,[又拍云直播部分](https://www.upyun.com/solutions/video.html),在SRS3基础上深度定制的版本。 + +## bravovcloud + +2015,[观止云直播服务器](http://www.bravovcloud.com/product/yff/),在SRS3基础上深度定制的版本。 + +## gosun + +2014.11,[高升CDN直播部分](http://www.gosun.com/service/streaming_acceleration.html),在SRS1的基础上深度定制的版本。 + +## 北京云博视 + +2014.10.10 by 谁变 63110982
+[http://www.y-bos.com/](http://www.y-bos.com/) + +## verycdn + +[verycdn](http://www.verycdn.cn/) 开始用SRS。 + +2014.9.13 by 1163202026 11:19:35
+目前SRS在测试中,没用过别的,直接上的srs,目前测试下来比较OK,没什么大问题。 + +## SRS产品使用者 + +2014.7.23 by 阿才(1426953942) 11:04:01
+我接触srs才几个月,不敢发表什么意见,只是通过这段时间的学习,觉得这个项目做得相当棒,作者及项目团队工作相当出色,精神非常值得赞赏,目前还在学习中。 + +2014.7.23 by 随想曲(156530446) 11:04:48
+我作为使用者来说,就是这玩意完全当成正规高大上的产品用啦! + +2014.7.23 by 湖中鱼(283946467) 11:06:23
+me没怎么去具体分析srs只是觉得作者文档写得比较流畅不乏幽默感。但是目前我用到的功能只有rtmp推送直播,及hls这些nginx-rtmp都有,所以还是选择了用老外的东西 + +2014.7.23 by 我是蝈蝈(383854294) 11:11:59
+为什么用SRS?轻便,省资源,有中文说明。SRS那些一站式的脚本与演示demo就能看出来作者是很用心的 + +## web秀场 + +2014.7 by 刘重驰 + +我们目前正在调研 准备用到web秀场 和 移动端流媒体服务上 + +## 视频直播 + +2014.7 by 大腰怪 + +## 远程视频直播 + +2014.7 by 韧 + +我们的分发服务器用的就是srs,简单易用,稳定性好 + +我们以前也用过几个分发软件,都没有srs好用,真心的 + +## chnvideo + +2014.7 [chnvideo](http://chnvideo.com/)编码器内置SRS提供RTMP和HLS拉服务。 + +## 某工厂监控系统 + +2014.4 by 斗破苍穷(154554381) + +某工厂的监控系统主要组成: +* 采集端:采集端采用IPC摄像头安装在工厂重要监控位置,通过网线或者wifi连接到监控中心交换机。 +* 监控中心:中心控制服务器,负责管理采集端和流媒体服务器,提供PC/Android/IOS观看平台。 +* 流媒体服务器:负责接收采集端的流,提供观看端RTMP/HLS的流。 +* 观看端:PC/Android/IOS。要求PC端的延迟在3秒内。Android/IOS延迟在20秒之内。 + +主要流程包括: +* 采集端启动:IPC摄像头像监控中心注册,获得发布地址,并告知监控中心采集端的信息,譬如摄像头设备名,ip地址,位置信息之类。 +* 采集端开始推流:IPC摄像头使用librtmp发布到地址,即将音频视频数据推送到RTMP流媒体服务器。 +* 流媒体服务器接收流:流媒体服务器使用SRS,接收采集端的RTMP流。FMS-3/3.5/4.5都有问题,估计是和librtmp对接问题。 +* 观看端观看:用户使用PC/Android/IOS登录监控中心后,监控中心返回所有的摄像头信息和流地址。PC端使用flash,延迟在3秒之内;Android/IOS使用HLS,延迟在20秒之内。 +* 时移:监控中心会开启录制计划,将RTMP流录制为FLV文件。用户可以在监控中心观看录制的历史视频。 + +## 网络摄像机 + +2014.4 by camer(2504296471) + +网络摄像机使用hi3518芯片,如何用网页无插件直接观看网络摄像机的流呢? + +目前有应用方式如下: +* hi3518上跑采集和推流程序(用srslibrtmp) +* 同时hi3518上还跑了srs/nginx-rtmp作为服务器 +* 推流程序推到hi3518本机的nginx服务器 +* PC上网页直接观看hi3518上的流 + +## IOS可以看的监控 + +2014.3 by 独孤不孤独(378668966) + +一般监控摄像头只支持输出RTMP/RTSP,或者支持RTSP方式读取流。如果想在IOS譬如IPad上看监控的流,怎么办?先部署一套rtmp服务器譬如nginx-rtmp/crtmpd/wowza/red5之类,然后用ffmpeg把rtsp流转成rtmp(或者摄像头直接推流到rtmp服务器),然后让服务器切片成hls输出,在IOS上观看。想想都觉得比较麻烦额,如果摄像头比较多怎么办?一个服务器还扛不住,部署集群? + +最简单的方式是什么?摄像头自己支持输出HLS流不就好了?也就是摄像头有个内网ip作为服务器,摄像头给出一个hls的播放地址,IOS客户端譬如IPad可以播放这个HLS地址。 + +SRS最适合做这个事情,依赖很少,提供[arm编译脚本](./sample-arm.md),只需要[改下configure的交叉编译工具](./arm.md#%E4%BD%BF%E7%94%A8%E5%85%B6%E4%BB%96%E4%BA%A4%E5%8F%89%E7%BC%96%E8%AF%91%E5%B7%A5%E5%85%B7)就可以编译了。 + +主要流程: +* 编译arm下的srs,部署到树莓派,在摄像头中启动srs。 +* 使用ffmpeg将摄像头的rtsp以RTMP方式推到srs。或者用自己程序采集设备数据推送RTMP流到srs。 +* srs分发RTMP流和HLS流。其实PC上也可以看了。 +* IOS譬如IPad上播放HLS地址。 + +## 清华活动直播 + +2014.2 by youngcow(5706022) + +清华大学每周都会有活动,譬如名家演讲等,使用SRS支持,少量的机器即可满足高并发。 + +主要流程: +* 在教室使用播控系统(摄像机+采集卡或者摄像机+导播台)推送RTMP流到主SRS +* 主SRS自动Forward给从SRS(参考[Forward](./forward.md)) +* PC客户端(Flash)使用FlowerPlayer,支持多个服务器的负载均衡 +* FlowerPlayer支持在两个主从SRS,自动选择一个服务器,实现负载均衡 + +主要的活动包括: +* 2014-02-23,丘成桐清华演讲 + +## 某农场监控 + +2014.1 by 孙悟空 + +农场中摄像头支持RTSP访问,FFMPEG将RTSP转换成RTMP推送到SRS,flash客户端播放RTMP流。同时flash客户端可以和控制服务器通信,控制农场的浇水和施肥。 + +![农场植物开花了](http://ossrs.net/srs.release/wiki/images/application/farm.jpg) + +截图:农场的植物开花了,据说种的是萝卜。。。 + +Winlin 2014.2 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/sample) + + diff --git a/versioned_docs/version-6.0/doc/security.md b/versioned_docs/version-6.0/doc/security.md new file mode 100644 index 00000000..0cf1aff0 --- /dev/null +++ b/versioned_docs/version-6.0/doc/security.md @@ -0,0 +1,68 @@ +--- +title: Security +sidebar_label: Security +hide_title: false +hide_table_of_contents: false +--- + +# Security + +SRS provides simple security strategy to allow or deny specifies clients. + +## Config + +The config for security of vhost: + +``` +vhost your_vhost { + # security for host to allow or deny clients. + # @see https://github.com/ossrs/srs/issues/211 + security { + # whether enable the security for vhost. + # default: off + enabled on; + # the security list, each item format as: + # allow|deny publish|play all| + # for example: + # allow publish all; + # deny publish all; + # allow publish 127.0.0.1; + # deny publish 127.0.0.1; + # allow publish 10.0.0.0/8; + # deny publish 10.0.0.0/8; + # allow play all; + # deny play all; + # allow play 127.0.0.1; + # deny play 127.0.0.1; + # allow play 10.0.0.0/8; + # deny play 10.0.0.0/8; + # SRS apply the following simple strategies one by one: + # 1. allow all if security disabled. + # 2. default to deny all when security enabled. + # 3. allow if matches allow strategy. + # 4. deny if matches deny strategy. + allow play all; + allow publish all; + } +} +``` + +Please see `conf/security.deny.publish.conf` for detail. + +## Kickoff Client + +SRS provides api to kickoff user, read [wiki](./http-api.md#kickoff-client). + +## Bug + +The bug about this feature, read [#211](https://github.com/ossrs/srs/issues/211) + +## Reload + +When reload the security config, it only effects the new clients. + +Winlin 2015.1 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/security) + + diff --git a/versioned_docs/version-6.0/doc/service.md b/versioned_docs/version-6.0/doc/service.md new file mode 100644 index 00000000..7727a3ab --- /dev/null +++ b/versioned_docs/version-6.0/doc/service.md @@ -0,0 +1,175 @@ +--- +title: Linux Service +sidebar_label: Linux Service +hide_title: false +hide_table_of_contents: false +--- + +# SRS Linux Service + +There are many ways to startup SRS: +* Directly run srs at the trunk/objs, and need start again when system restart. +* Linux service, the init.d scirpt at `srs/trunk/etc/init.d/srs`, and user can add to linux service when linked to the /etc/init.d/srs then add as service `/sbin/chkconfig --add srs`. + +The SRS release binary can be downloaded from release site, we can install as system service, see: [Github: release](http://ossrs.net/srs.release) or [Mirror for China: release](http://www.ossrs.net) + +## Manual + +We donot need to add to linux service to directly start SRS: + +```bash +cd srs/trunk && +./etc/init.d/srs start +``` + +or + +```bash +cd srs/trunk && +./objs/srs -c conf/srs.conf +``` + +## init.d + +Install and startup SRS as linux system service: +* Build SRS: the install script will modify the INSTALL ROOT of init.d script. +* Link to init.d: link the `trunk/etc/init.d/srs` to `/etc/init.d/srs` +* Add to linux service: use /sbin/chkconfig for Centos. + +Step1: Build and Install SRS + +Intall SRS when build ok: + +```bash +make && sudo make install +``` + +the install of make will install srs to the prefix dir, default to `/usr/local/srs`, which is specified by configure, for instance, ```./configure --prefix=`pwd`/_release``` set the install dir to _release of current dir to use `make install` without sudo. + +Step2: Link script to init.d: + +```bash +sudo ln -sf \ + /usr/local/srs/etc/init.d/srs \ + /etc/init.d/srs +``` + +Step3:Add as linux service: + +```bash +#centos 6 +sudo /sbin/chkconfig --add srs +``` + +or + +```bash +#ubuntu12 +sudo update-rc.d srs defaults +``` + +Use init.d script + +Get the status of SRS: + +```bash +/etc/init.d/srs status +``` + +Start SRS: + +```bash +/etc/init.d/srs start +``` + +Stop SRS: + +```bash +/etc/init.d/srs stop +``` + +Restart SRS: + +```bash +/etc/init.d/srs restart +``` + +Reload SRS: + +```bash +/etc/init.d/srs reload +``` + +For logrotate(`SIGUSR1`): + +```bash +/etc/init.d/srs rotate +``` + +For Gracefully Quit(`SIGQUIT`): + +```bash +/etc/init.d/srs grace +``` + +## systemctl + +Ubuntu20 use systemctl to manage services, we also need to install init.d service, then add to systemctl: + +``` +./configure && make && sudo make install && +sudo ln -sf /usr/local/srs/etc/init.d/srs /etc/init.d/srs && +sudo cp -f /usr/local/srs/usr/lib/systemd/system/srs.service /usr/lib/systemd/system/srs.service && +sudo systemctl daemon-reload && sudo systemctl enable srs +``` + +> Remark: We MUST copy the srs.service, or we couldn't enable the service by systemctl. + +Use systemctl to start SRS: + +``` +sudo systemctl start srs +``` + +## Gracefully Upgrade + +Gracefully Upgrade allows upgrade with zero downtime, it can be done by: + +* New SRS and old SRS should be able to listen at the same ports. They provide services in the same ports simultaneously. +* The old SRS then closes listeners, and quit util all connections closed, this is Gracefully Quit. + +> Note: About more informations, please see [#1579](https://github.com/ossrs/srs/issues/1579#issuecomment-587233844). + +SRS3 supports Gracefully Quit: + +* Use signal `SIGQUIT`, or command `/etc/init.d/srs grace` +* A new config `grace_start_wait` to wait for a while then start gracefully quit, default 2.3s +* A new config `grace_final_wait` allows wait for a few minutes finally, default 3.2s +* A new config `force_grace_quit` to force gracefully quit, see [#1579](https://github.com/ossrs/srs/issues/1579#issuecomment-587475077). + +```bash +# For gracefully quit, wait for a while then close listeners, +# because K8S notify SRS with SIGQUIT and update Service simultaneously, +# maybe there is some new connections incoming before Service updated. +# @see https://github.com/ossrs/srs/issues/1595#issuecomment-587516567 +# default: 2300 +grace_start_wait 2300; +# For gracefully quit, final wait for cleanup in milliseconds. +# @see https://github.com/ossrs/srs/issues/1579#issuecomment-587414898 +# default: 3200 +grace_final_wait 3200; +# Whether force gracefully quit, never fast quit. +# By default, SIGTERM which means fast quit, is sent by K8S, so we need to +# force SRS to treat SIGTERM as gracefully quit for gray release or canary. +# @see https://github.com/ossrs/srs/issues/1579#issuecomment-587475077 +# default: off +force_grace_quit off; +``` + +> Note: There is a example for Gracefully Quit, see [#1579](https://github.com/ossrs/srs/issues/1579#issuecomment-587414898) + +Winlin 2019.10 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/service) + + diff --git a/versioned_docs/version-6.0/doc/snapshot.md b/versioned_docs/version-6.0/doc/snapshot.md new file mode 100644 index 00000000..bfd68d56 --- /dev/null +++ b/versioned_docs/version-6.0/doc/snapshot.md @@ -0,0 +1,133 @@ +--- +title: Snapshot +sidebar_label: Snapshot +hide_title: false +hide_table_of_contents: false +--- + +# Snapshot + +SRS provides workaround for snapshots: + +1. HttpCallback: Use http callbacks to handle `on_publish` event to snapshot by FFMPEG, and to stop FFMPEG when got `on_unpublish` event. +1. Transcoder: Use transcoder to snapshot. + +## HttpCallback + +This section describes how to use http callbacks to snapshot. + +First, start the sample api server: +``` +cd research/api-server && go run server.go 8085 +``` + +Second, write the config for SRS to snapshot: +``` +# snapshot.conf +listen 1935; +max_connections 1000; +daemon off; +srs_log_tank console; +vhost __defaultVhost__ { + http_hooks { + enabled on; + on_publish http://127.0.0.1:8085/api/v1/snapshots; + on_unpublish http://127.0.0.1:8085/api/v1/snapshots; + } + ingest { + enabled on; + input { + type file; + url ./doc/source.flv; + } + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + engine { + enabled off; + output rtmp://127.0.0.1:[port]/live?vhost=[vhost]/livestream; + } + } +} +``` + +Thrird, start SRS and the ingest will publish RTMP stream, which will trigger the `on_publish` event, then api will snapshot: +``` +./objs/srs -c snapshot.conf +``` + +The snapshot generate thumbnails to directory: +``` +winlin:srs winlin$ ls -lh research/api-server/static-dir/live/*.png +-rw-r--r-- 1 winlin staff 73K Oct 20 13:35 livestream-001.png +-rw-r--r-- 1 winlin staff 91K Oct 20 13:35 livestream-002.png +-rw-r--r-- 1 winlin staff 11K Oct 20 13:35 livestream-003.png +-rw-r--r-- 1 winlin staff 167K Oct 20 13:35 livestream-004.png +-rw-r--r-- 1 winlin staff 172K Oct 20 13:35 livestream-005.png +-rw-r--r-- 1 winlin staff 264K Oct 20 13:35 livestream-006.png +lrwxr-xr-x 1 winlin staff 105B Oct 20 13:35 livestream-best.png -> livestream-006.png +``` + +The thumbnail `live-livestream-best.png` will link to the big one to avoid blank image. + +User can access it by http server: [http://localhost:8085/live/livestream-best.png](http://localhost:8085/live/livestream-best.png) + +## Transcoder + +The transcoder can used for snapshot: + +``` +listen 1935; +max_connections 1000; +daemon off; +srs_log_tank console; +vhost __defaultVhost__ { + transcode { + enabled on; + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + engine snapshot { + enabled on; + iformat flv; + vfilter { + vf fps=1; + } + vcodec png; + vparams { + vframes 6; + } + acodec an; + oformat image2; + output ./objs/nginx/html/[app]/[stream]-%03d.png; + } + } + ingest { + enabled on; + input { + type file; + url ./doc/source.flv; + } + ffmpeg ./objs/ffmpeg/bin/ffmpeg; + engine { + enabled off; + output rtmp://127.0.0.1:[port]/live?vhost=[vhost]/livestream; + } + } +} +``` + +The thumbnails: +``` +winlin:srs winlin$ ls -lh objs/nginx/html/live/*.png +-rw-r--r-- 1 winlin staff 265K Oct 20 14:52 livestream-001.png +-rw-r--r-- 1 winlin staff 265K Oct 20 14:52 livestream-002.png +-rw-r--r-- 1 winlin staff 287K Oct 20 14:52 livestream-003.png +-rw-r--r-- 1 winlin staff 235K Oct 20 14:52 livestream-004.png +-rw-r--r-- 1 winlin staff 315K Oct 20 14:52 livestream-005.png +-rw-r--r-- 1 winlin staff 405K Oct 20 14:52 livestream-006.png +``` + +Note: SRS never choose the best thumbnail. + +Winlin 2015.10 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/snapshot) + + diff --git a/versioned_docs/version-6.0/doc/special-control.md b/versioned_docs/version-6.0/doc/special-control.md new file mode 100644 index 00000000..42f9005a --- /dev/null +++ b/versioned_docs/version-6.0/doc/special-control.md @@ -0,0 +1,176 @@ +--- +title: Special Control +sidebar_label: Special Control +hide_title: false +hide_table_of_contents: false +--- + +# SpecialControl + +SRS provides a set of config to ingerate with other systems. + +## Send Minimal Interval + +``` +vhost __defaultVhost__ { + # for play client, both RTMP and other stream clients, + # for instance, the HTTP FLV stream clients. + play { + # the minimal packets send interval in ms, + # used to control the ndiff of stream by srs_rtmp_dump, + # for example, some device can only accept some stream which + # delivery packets in constant interval(not cbr). + # @remark 0 to disable the minimal interval. + # @remark >0 to make the srs to send message one by one. + # @remark user can get the right packets interval in ms by srs_rtmp_dump. + # default: 0 + send_min_interval 10.0; + } +} +``` + +## Reduce Sequence Header + +``` +vhost __defaultVhost__ { + # for play client, both RTMP and other stream clients, + # for instance, the HTTP FLV stream clients. + play { + # whether reduce the sequence header, + # for some client which cannot got duplicated sequence header, + # while the sequence header is not changed yet. + # default: off + reduce_sequence_header on; + } +} +``` + +## Publish 1st Packet Timeout + +``` +vhost __defaultVhost__ { + # the config for FMLE/Flash publisher, which push RTMP to SRS. + publish { + # the 1st packet timeout in ms for encoder. + # default: 20000 + firstpkt_timeout 20000; + } +} +``` + +## Publish Normal Timeout + +``` +vhost __defaultVhost__ { + # the config for FMLE/Flash publisher, which push RTMP to SRS. + publish { + # the normal packet timeout in ms for encoder. + # default: 5000 + normal_timeout 7000; + } +} +``` + +## Debug SRS Upnode + +``` +vhost __defaultVhost__ { + # when upnode(forward to, edge push to, edge pull from) is srs, + # it's strongly recommend to open the debug_srs_upnode, + # when connect to upnode, it will take the debug info, + # for example, the id, source id, pid. + # please see https://ossrs.io/lts/en-us/docs/v4/doc/log + # default: on + debug_srs_upnode on; +} +``` + +## UTC Time + +``` +# whether use utc_time to generate the time struct, +# if off, use localtime() to generate it, +# if on, use gmtime() instead, which use UTC time. +# default: off +utc_time off; +``` + +## HLS TS Floor + +``` +vhost __defaultVhost__ { + hls { + # whether use floor for the hls_ts_file path generation. + # if on, use floor(timestamp/hls_fragment) as the variable [timestamp], + # and use enahanced algorithm to calc deviation for segment. + # @remark when floor on, recommend the hls_segment>=2*gop. + # default: off + hls_ts_floor off; + } +} +``` + +## HLS Wait Keyframe + +``` +vhost __defaultVhost__ { + hls { + # whether wait keyframe to reap segment, + # if off, reap segment when duration exceed the fragment, + # if on, reap segment when duration exceed and got keyframe. + # default: on + hls_wait_keyframe on; + } +} +``` + +## HttpHooks On HLS Notify + +``` +vhost __defaultVhost__ { + http_hooks { + # when srs reap a ts file of hls, call this hook, + # used to push file to cdn network, by get the ts file from cdn network. + # so we use HTTP GET and use the variable following: + # [app], replace with the app. + # [stream], replace with the stream. + # [ts_url], replace with the ts url. + # ignore any return data of server. + # @remark random select a url to report, not report all. + on_hls_notify http://127.0.0.1:8085/api/v1/hls/[app]/[stream][ts_url]; + } +} +``` + +## TCP NoDelay + +``` +vhost __defaultVhost__ { + # whether enable the TCP_NODELAY + # if on, set the nodelay of fd by setsockopt + # default: off + tcp_nodelay on; +} +``` + +## ATC Auto + +``` +vhost __defaultVhost__ { + # for play client, both RTMP and other stream clients, + # for instance, the HTTP FLV stream clients. + play { + # whether enable the auto atc, + # if enabled, detect the bravo_atc="true" in onMetaData packet, + # set atc to on if matched. + # always ignore the onMetaData if atc_auto is off. + # default: off + atc_auto off; + } +} + +Winlin 2015.8 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/special-control) + + diff --git a/versioned_docs/version-6.0/doc/srs-lib-rtmp.md b/versioned_docs/version-6.0/doc/srs-lib-rtmp.md new file mode 100644 index 00000000..bdb37653 --- /dev/null +++ b/versioned_docs/version-6.0/doc/srs-lib-rtmp.md @@ -0,0 +1,18 @@ +--- +title: Librtmp +sidebar_label: Librtmp +hide_title: false +hide_table_of_contents: false +--- + +# SRS librtmp + +[SRS](https://github.com/ossrs/srs) is a dedicated server project, +please use [librtmp](https://github.com/ossrs/librtmp) instead, +please read [#32](https://github.com/ossrs/srs-librtmp/issues/32). + +Winlin 2014.11 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/srs-lib-rtmp) + + diff --git a/versioned_docs/version-6.0/doc/srt-codec.md b/versioned_docs/version-6.0/doc/srt-codec.md new file mode 100644 index 00000000..5819f3c3 --- /dev/null +++ b/versioned_docs/version-6.0/doc/srt-codec.md @@ -0,0 +1,14 @@ +--- +title: SRT Codec +sidebar_label: SRT Codec +hide_title: false +hide_table_of_contents: false +--- + +# SRT codec support + +Migrated to [SRT](./srt.md). + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/srt-codec) + + diff --git a/versioned_docs/version-6.0/doc/srt-params.md b/versioned_docs/version-6.0/doc/srt-params.md new file mode 100644 index 00000000..716a32d2 --- /dev/null +++ b/versioned_docs/version-6.0/doc/srt-params.md @@ -0,0 +1,14 @@ +--- +title: SRT Params +sidebar_label: SRT Params +hide_title: false +hide_table_of_contents: false +--- + +# SRT Config + +Migrated to [SRT](./srt.md). + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/srt-params) + + diff --git a/versioned_docs/version-6.0/doc/srt-url.md b/versioned_docs/version-6.0/doc/srt-url.md new file mode 100644 index 00000000..140b30d9 --- /dev/null +++ b/versioned_docs/version-6.0/doc/srt-url.md @@ -0,0 +1,14 @@ +--- +title: SRT URL +sidebar_label: SRT URL +hide_title: false +hide_table_of_contents: false +--- + +# SRT URL Specification + +Migrated to [SRT](./srt.md). + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/srt-url) + + diff --git a/versioned_docs/version-6.0/doc/srt.md b/versioned_docs/version-6.0/doc/srt.md new file mode 100644 index 00000000..85703f4e --- /dev/null +++ b/versioned_docs/version-6.0/doc/srt.md @@ -0,0 +1,391 @@ +--- +title: SRT +sidebar_label: SRT +hide_title: false +hide_table_of_contents: false +--- + +# SRT + +SRT (Secure Reliable Transport) is a broadcasting protocol created by Haivision to replace RTMP. Many live streaming +encoders like OBS, vMix, and FFmpeg already support SRT, and many users prefer it for streaming. + +Adobe hasn't been updating the RTMP protocol or submitting it to standard organizations like RFC, so it doesn't support +many new features like HEVC or Opus. In March 2023, the Enhanced RTMP project was created, which now supports HEVC and +AV1. SRS and OBS also support HEVC encoding based on Enhanced RTMP. + +Since SRT uses TS encapsulation, it naturally supports new codecs. SRT is based on the UDP protocol, so it has lower +latency and better performance on weak networks than RTMP. RTMP latency is usually 1-3 seconds, while SRT latency is +300-500 milliseconds. SRT is more stable on weak networks, making it better for long-distance and outdoor broadcasting. + +SRT is a core protocol of SRS. SRS has supported SRT since 2020 and improved its consistency with other core protocols +in 2022. SRT and RTMP have very high consistency in terms of callbacks and API support. + +Please refer to [#1147](https://github.com/ossrs/srs/issues/1147) for the detailed research and development process. + +## Usage + +SRS has built-in support for SRT and can be used with [docker](./getting-started.md) or [compiled from source](./getting-started-build.md): + +```bash +docker run --rm -it -p 1935:1935 -p 8080:8080 -p 10080:10080/udp ossrs/srs:5 \ + ./objs/srs -c conf/srt.conf +``` + +Use [FFmpeg(click to download)](https://ffmpeg.org/download.html) or [OBS(click to download)](https://obsproject.com/download) to push the stream: + +```bash +ffmpeg -re -i ./doc/source.flv -c copy -pes_payload_size 0 -f mpegts \ + 'srt://127.0.0.1:10080?streamid=#!::r=live/livestream,m=publish' +``` + +Open the following page to play the stream (if SRS is not on your local machine, replace localhost with the server IP): + +* RTMP(VLC/ffplay): `rtmp://localhost/live/livestream` +* HLS by SRS player: [http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html) +* SRT(VLC/ffplay): `srt://127.0.0.1:10080?streamid=#!::r=live/livestream,m=request` + +SRS supports converting SRT to other protocols, which will be described in detail below. + +## Config + +The configuration for SRT is as follows: + +```bash +srt_server { + # whether SRT server is enabled. + # Overwrite by env SRS_SRT_SERVER_ENABLED + # default: off + enabled on; + # The UDP listen port for SRT. + # Overwrite by env SRS_SRT_SERVER_LISTEN + listen 10080; + # For detail parameters, please read wiki: + # @see https://ossrs.net/lts/zh-cn/docs/v5/doc/srt-params + # @see https://ossrs.io/lts/en-us/docs/v5/doc/srt-params + # The maxbw is the max bandwidth of the sender side. + # -1: Means the biggest bandwidth is infinity. + # 0: Means the bandwidth is determined by SRTO_INPUTBW. + # >0: Means the bandwidth is the configuration value. + # Overwrite by env SRS_SRT_SERVER_MAXBW + # default: -1 + maxbw 1000000000; + # Maximum Segment Size. Used for buffer allocation and rate calculation using packet counter assuming fully + # filled packets. Each party can set its own MSS value independently. During a handshake the parties exchange + # MSS values, and the lowest is used. + # Overwrite by env SRS_SRT_SERVER_MSS + # default: 1500 + mss 1500; + # The timeout time of the SRT connection on the sender side in ms. When SRT connects to a peer costs time + # more than this config, it will be close. + # Overwrite by env SRS_SRT_SERVER_CONNECT_TIMEOUT + # default: 3000 + connect_timeout 4000; + # The timeout time of SRT connection on the receiver side in ms. When the SRT connection is idle + # more than this config, it will be close. + # Overwrite by env SRS_SRT_SERVER_PEER_IDLE_TIMEOUT + # default: 10000 + peer_idle_timeout 8000; + # Default app for vmix, see https://github.com/ossrs/srs/pull/1615 + # Overwrite by env SRS_SRT_SERVER_DEFAULT_APP + # default: live + default_app live; + # The peerlatency is set by the sender side and will notify the receiver side. + # Overwrite by env SRS_SRT_SERVER_PEERLATENCY + # default: 0 + peerlatency 0; + # The recvlatency means latency from sender to receiver. + # Overwrite by env SRS_SRT_SERVER_RECVLATENCY + # default: 120 + recvlatency 0; + # This latency configuration configures both recvlatency and peerlatency to the same value. + # Overwrite by env SRS_SRT_SERVER_LATENCY + # default: 120 + latency 0; + # The tsbpd mode means timestamp based packet delivery. + # SRT sender side will pack timestamp in each packet. If this config is true, + # the receiver will read the packet according to the timestamp in the head of the packet. + # Overwrite by env SRS_SRT_SERVER_TSBPDMODE + # default: on + tsbpdmode off; + # The tlpkdrop means too-late Packet Drop + # SRT sender side will pack timestamp in each packet, When the network is congested, + # the packet will drop if latency is bigger than the configuration in both sender side and receiver side. + # And on the sender side, it also will be dropped because latency is bigger than configuration. + # Overwrite by env SRS_SRT_SERVER_TLPKTDROP + # default: on + tlpktdrop off; + # The send buffer size of SRT. + # Overwrite by env SRS_SRT_SERVER_SENDBUF + # default: 8192 * (1500-28) + sendbuf 2000000; + # The recv buffer size of SRT. + # Overwrite by env SRS_SRT_SERVER_RECVBUF + # default: 8192 * (1500-28) + recvbuf 2000000; + # The passphrase of SRT. + # If passphrase is no empty, all the srt client must be using the correct passphrase to publish or play, + # or the srt connection will reject. The length of passphrase must be in range 10~79. + # @see https://github.com/Haivision/srt/blob/master/docs/API/API-socket-options.md#srto_passphrase. + # Overwrite by env SRS_SRT_SERVER_PASSPHRASE + # default: "" + passphrase xxxxxxxxxxxx; + # The pbkeylen of SRT. + # The pbkeylen determined the AES encrypt algorithm, this option only allow 4 values which is 0, 16, 24, 32 + # @see https://github.com/Haivision/srt/blob/master/docs/API/API-socket-options.md#srto_pbkeylen. + # Overwrite by env SRS_SRT_SERVER_PBKEYLEN + # default: 0 + pbkeylen 16; +} +vhost __defaultVhost__ { + srt { + # Whether enable SRT on this vhost. + # Overwrite by env SRS_VHOST_SRT_ENABLED for all vhosts. + # Default: off + enabled on; + # Whether covert SRT to RTMP stream. + # Overwrite by env SRS_VHOST_SRT_TO_RTMP for all vhosts. + # Default: on + srt_to_rtmp on; + } +} +``` + +> Note: These configurations are for publish and play. Note that there are some other configurations in other sections, +for example, converting RTMP to [HTTP-FLV](./flv.md#config) or HTTP-TS. + +All SRT configuration parameters can be found in the [libsrt](https://github.com/Haivision/srt/blob/master/docs/API/API-socket-options.md#list-of-options) documentation. Below are the important parameters supported by SRS: + +* `tsbpdmode`: Timestamp-based packet delivery mode. Each packet gets a timestamp, and the application reads them at the interval specified by the timestamps. +* `latency`: In milliseconds (ms). This configures both recvlatency and peerlatency to the same value. If recvlatency is set, it will be used; if peerlatency is set, it will be used. +* `recvlatency`: In milliseconds (ms). This is the receiver's buffer time length, including the time it takes for a packet to travel from the sender, through the network, to the receiver, and finally to the media application. This buffer time should be greater than RTT and prepared for multiple packet retransmissions. + * Low-latency networks: If the application requires low latency, consider setting the parameter to less than 250ms (human perception is not affected by audio/video latency below 250ms). + * Long-distance, high RTT: If the transmission distance is long and RTT is high, a small latency cannot be set. For important live broadcasts that don't require low latency but need smooth playback without jitter, set latency >= 3*RTT, as this includes packet retransmission and ack/nack cycles. +* `peerlatency`: In milliseconds (ms). This is the sender's setting for peerlatency, telling the receiver how long the latency buffer should be. If the receiver also sets recvlatency, the receiver will use the larger of the two values as the latency buffer length. + * Low-latency networks: Same recommendations as for `recvlatency`. + * Long-distance, high RTT: Same recommendations as for `recvlatency`. +* `tlpkdrop`: Whether to drop too-late packets. Since SRT is designed for audio/video transmission, the receiver sends packets to the application based on timestamps or encoding bitrate. If a packet arrives too late at the receiver (after latency timeout), it will be dropped. In live mode, tlpkdrop is true by default, as live broadcasts require low latency. +* `maxbw`: In bytes/s, the maximum sending bandwidth. `-1`: Maximum bandwidth is 1Gbps; `0`: Determined by SRTO_INPUTBW calculation (not recommended for live mode); `>0`: Bandwidth in bytes/s. +* `mss`: In bytes, the maximum size of a single sent packet. This refers to the size of IP packets, including UDP and SRT protocol packets. +* `connect_timeout`: In milliseconds (ms), the SRT connection timeout. +* `peer_idle_timeout`: In milliseconds (ms), the SRT peer timeout. +* `sendbuf`: In bytes, the SRT send buffer size. +* `recvbuf`: In bytes, the SRT receive buffer size. +* `payloadsize`: In bytes, the payload size is a multiple of 188 (the minimum size of an MPEG-TS packet), defaulting to 1316 bytes (188x7). +* `passphrase`: The SRT connection password, default is empty (no encryption). The password must be between 10-79 characters long, and the client must enter the correct password to connect successfully, or the connection will be rejected. +* `pbkeylen`: The SRT encryption key length, default is 0. The stream encryption key length can be 0/16/24/32, corresponding to different AES encryption key lengths. This parameter needs to be set when the `passphrase` option is set. +* `srt_to_rtmp`: Whether to enable SRT to RTMP conversion. After converting to RTMP, it can be played using RTMP, HTTP-FLV, and HLS protocols. + +## Low Latency Mode + +If you want the lowest latency and can tolerate occasional packet loss, consider this setting. + +> Note: Keep in mind that SRT will retransmit lost packets. Only when the network is very bad, and packets arrive very late or not at all, will they be discarded with `tlpktdrop` enabled, causing screen glitches. + +For events, activities, and TV production with long-distance streaming, the link is usually prepared in advance and is stable and dedicated. In these scenarios, a fixed latency is required, allowing a certain degree of packet loss (very low probability). Generally, the RTT of the link is detected before the stream starts and is used as a basis for configuring SRT streaming parameters. + +The recommended configuration is as follows, assuming an RTT of 100ms: + +```bash +srt_server { + enabled on; + listen 10080; + connect_timeout 4000; + peerlatency 300; # RTT * 3 + recvlatency 300; # RTT * 3 + latency 300; # RTT * 3 + tlpktdrop on; + tsbpdmode on; +} +``` + +This section describes how to reduce the latency of SRT, which is relevant to each link. The summary is as follows: + +* Pay attention to the client's Ping and CPU, which are easily overlooked but can affect latency. +* Please use Oryx as the server, as it has been adjusted and will not cause additional latency. +* An increase in RTT will affect latency. Generally, with an RTT of below 60ms, it can be stable at the expected latency. +* With an RTT of 100ms, latency is approximately 300ms, and with an RTT of 150ms, latency increases to around 430ms. +* Packet loss will affect quality. With a packet loss rate of over 10%, there will be screen flickering and dropped frames, but it does not affect latency significantly, particularly for audio. +* Currently, the lowest latency can be achieved by using vmix or Xinxiang to stream SRT and playing it with ffplay, resulting in a latency of around 200ms. +* When streaming SRT with OBS and playing it with ffplay, the latency is around 350ms. + +> Special Note: Based on current tests, the latency ceiling for SRT is 300ms. Although vmix can be set to a 1ms latency, it does not work and the actual latency will only be worse, not better. However, if the network is well maintained, a latency of 300ms is sufficient. + +Recommended solution for ultra high-definition, ultra low-latency, SRT live streaming: + +* Streaming: Xinxiang (230ms), vMix (200ms), OBS (300ms). +* Playback: ffplay (200ms), vMix (230ms), Xinxiang (400ms). + +| - | ffplay | vMix Playback | Xinxiang Playback | +| --- | ---- | --- | --- | +| vMix Push | 200ms | 300ms | - | +| OBS Push | 300ms | - | - | +| Xinxiang Push (http://www.sinsam.com/) | 230ms | - | 400ms | + +Latency involves each link, below are the detailed configurations for each link. The directory is as follows: + +* [CPU](https://github.com/ossrs/srs/issues/3464#lagging-cpu) Client CPU can cause latency. +* [Ping](https://github.com/ossrs/srs/issues/3464#lagging-ping) Client network RTT affects latency. +* [Encoder](https://github.com/ossrs/srs/issues/3464#lagging-encoder) Configuring encoder for low latency mode. +* [Server](https://github.com/ossrs/srs/issues/3464#lagging-server) Configuring the server for low latency. +* [SRT](https://github.com/ossrs/srs/issues/3464#lagging-srt) Special configuration for SRT servers. +* [Player](https://github.com/ossrs/srs/issues/3464#lagging-player) Configuring the player for low latency. +* [Benchmark](https://github.com/ossrs/srs/issues/3464#lagging-benchmark) Accurately measuring latency. +* [Bitrate](https://github.com/ossrs/srs/issues/3464#lagging-bitrate) Impact of different bitrates (0.5 to 6Mbps) on latency. +* [Network Jitter](https://github.com/ossrs/srs/issues/3464#lagging-jitter) Impact of packet loss and different RTT on latency. +* [Report](https://github.com/ossrs/srs/issues/3464#lagging-report) Test report. + +## High Quality Mode + +If you want the highest quality and can't tolerate even a small chance of screen glitches, but can accept increased latency, consider this configuration. + +When using SRT on public networks, the connection can be unstable, and RTT (Round Trip Time) may change dynamically. For low-latency live streaming, you need adaptive latency and must not lose packets. + +Recommended settings are as follows: + +``` +srt_server { + enabled on; + listen 10080; + connect_timeout 4000; + peerlatency 0; + recvlatency 0; + latency 0; + tlpktdrop off; + tsbpdmode off; +} +``` + +> Note: If you still experience screen glitches with the above settings, please refer to the [FFmpeg patch](https://github.com/FFmpeg/FFmpeg/commit/9099046cc76c9e3bf02f62a237b4d444cdaf5b20). + +## Video codec + +Currently, H264 and HEVC encoding are supported. Since SRT protocol transfers media in MPEG-TS format, which already supports HEVC encoding (streamtype 0x24), SRT can naturally transmit HEVC encoded video without any modifications. + +To stream with HEVC encoding, use the following command: +```bash +ffmpeg -re -i source.mp4 -c:v libx265 -c:a copy -pes_payload_size 0 -f mpegts \ + 'srt://127.0.0.1:10080?streamid=#!::r=live/livestream,m=publish' +``` + +To play HEVC encoded video, use the following command: +```bash +ffplay 'srt://127.0.0.1:10080?streamid=#!::h=live/livestream,m=request' +``` + +## Audio codec + +Currently supported encoding formats: +* AAC, with sample rates of 44100, 22050, 11025, and 5512. + +## FFmpeg push SRT stream + +When using FFmpeg to push AAC audio format SRT stream, it is recommended to add the `-pes_payload_size 0` parameter in the command line. This parameter prevents multiple AAC audio frames from being combined into one PES package, reducing latency and audio-video synchronization issues. + +FFmpeg command line example: + +```bash +ffmpeg -re -i source.mp4 -c copy -pes_payload_size 0 -f mpegts \ + 'srt://127.0.0.1:10080?streamid=#!::r=live/livestream,m=publish' +``` + +## SRT URL + +SRT URL uses YAML format, which is different from the common URL definition. + +Consider the SRS definition for RTMP address, please refer to [RTMP URL](./rtmp-url-vhost.md) definition: + +* Regular RTMP format (without vhost) + - `rtmp://hostip:port/app/stream` + - Example: `rtmp://10.111.1.100:1935/live/livestream` + - In this example, app="live", stream="livestream" +* Complex RTMP format (with vhost) + - `rtmp://hostip:port/app/stream?vhost=xxx` + - Example: `rtmp://10.111.1.100:1935/live/livestream?vhost=srs.com.cn` + - In this example, vhost="srs.com.cn", app="live", stream="livestream" + +Whether it is streaming or playing, the RTMP address is a single address, and RTMP uses protocol layer messages to determine it. `publish message` means streaming to the URL, and `play message` means playing the URL. + +SRT is a transport layer protocol, so it cannot determine whether the operation on an SRT URL is streaming or playing. The SRT documentation has recommendations for streaming/playing: [AccessControl.md](https://github.com/Haivision/srt/blob/master/docs/features/access-control.md) +The key method is to use the streamid parameter to clarify the purpose of the URL, and the streamid format complies with the YAML format. + +Here is an SRT URL without vhost: +* Streaming address: `srt://127.0.0.1:10080?streamid=#!::r=live/livestream,m=publish` +* Playing address: `srt://127.0.0.1:10080?streamid=#!::r=live/livestream,m=request` +* Corresponding RTMP playing address: `rtmp://127.0.0.1/live/livestream` + +Where: +* `#!::`, is the beginning, in line with the YAML format standard. +* `r`, maps to the `app/stream` in the RTMP address. +* `m`, `publish` means streaming, `request` means playing. + +Here is an SRT URL with vhost support: +* Streaming address: `srt://127.0.0.1:10080?streamid=#!::h=srs.srt.com.cn,r=live/livestream,m=publish` +* Playing address: `srt://127.0.0.1:10080?streamid=#!::h=srs.srt.com.cn,r=live/livestream,m=request` +* Corresponding RTMP address: `rtmp://127.0.0.1/live/livestream?vhost=srs.srt.com.cn` + +Where: +* `h`, maps to the vhost in the RTMP address + +## SRT URL without streamid + +Some devices do not support streamid input or do not support some special characters in streamid, such as `!`, `#`, `,`, etc. In this case, you can use only `ip:port` for streaming, such as `srt://127.0.0.1:10080`. For this URL, SRS will set the streamid to `#!::r=live/livestream,m=publish` by default. + +In other words, the following two addresses are equivalent: +* `srt://127.0.0.1:10080` +* `srt://127.0.0.1:10080?streamid=#!::r=live/livestream,m=publish` + +## Authentication + +For the definition of SRT URLs, please refer to [SRT URL Schema](#srt-url). + +Here is a special note on how to include authentication information, see [SRS URL: Token](./rtmp-url-vhost.md#parameters-in-url). +If you need to include authentication information such as the secret parameter, you can specify it in the streamid, for example: + +``` +streamid=#!::r=live/livestream,secret=xxx +``` + +Here is a specific example: + +``` +ffmpeg -re -i doc/source.flv -c copy -f mpegts \ + 'srt://127.0.0.1:10080?streamid=#!::r=live/livestream,secret=xxx,m=publish' +``` + +The address for forwarding to SRS would be like this: + +``` +rtmp://127.0.0.1:1935/live/livestream?secret=xxx +``` + +## SRT Encoder + +SRT Encoder is an encoder based on the SRT adaptive bitrate. It predicts low-latency outbound bandwidth based on information such as RTT, maxBw, and inflight in the SRT protocol, dynamically adjusting the encoding bitrate to be based on the network outbound bandwidth. + +GitHub address: [runner365/srt_encoder](https://github.com/runner365/srt_encoder) + +Based on the basic congestion control algorithm of BBR, the encoder predicts the state machine of the encoding bitrate (keep, increase, decrease) based on the minRTT, maxBw, and current inflight within one cycle (1~2 seconds). + +Note: +1) This example is just a basic BBR algorithm example, and users can implement the interfaces in the CongestionCtrlI class to improve the BBR algorithm. +2) SRT is still an evolving protocol, and the accuracy of its congestion control and external parameter updates is also improving. + +Easy to use, after compiling, you can directly use the ffmpeg command line. + +## Coroutine Native SRT + +How does SRS implement SRT? Based on coroutine-based SRT architecture, we need to adapt it to ST as SRT has its own IO scheduling, so that we can achieve the best maintainability. + +* For the specific code submission, please refer to [#3010](https://github.com/ossrs/srs/pull/3010) or [1af30dea](https://github.com/ossrs/srs/commit/1af30dea324d0f1729aabd22536ea62e03497d7d) + +> Note: Please note that the SRT in SRS 4.0 is a non-ST architecture, and it is implemented by launching a separate thread, which may not have the same level of maintainability as the native ST coroutine architecture. + +## Q&A + +1. Does SRS support forwarding SRT streams to Nginx? + +> Yes, it is supported. You can use OBS/FFmpeg to push SRT streams to SRS, and SRS will convert the SRT stream into the RTMP protocol. Then, you can convert RTMP to HLS, FLV, WebRTC, and also forward the RTMP stream to Nginx. + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.net&path=/lts/doc/en/v6/srt) + diff --git a/versioned_docs/version-6.0/doc/streamer.md b/versioned_docs/version-6.0/doc/streamer.md new file mode 100644 index 00000000..76a5a72f --- /dev/null +++ b/versioned_docs/version-6.0/doc/streamer.md @@ -0,0 +1,151 @@ +--- +title: Stream Converter +sidebar_label: Caster +hide_title: false +hide_table_of_contents: false +--- + +# Stream Caster + +Stream Converters listen at special TCP/UDP ports, accept new connections and receive packets, then convert to and push +RTMP stream to SRS server like a RTMP client. + +In short, it converts other protocols to RTMP, works like this: + +```text +Client ---PUSH--> Stream Converter --RTMP--> SRS --RTMP/FLV/HLS/WebRTC--> Clients +``` + +> Note: Some stream protocol contains more than one single stream or even transport connections. + +## Use Scenario + +There are some use scenarios for stream caster, for example: + +* Push MPEG-TS over UDP, by some encoder device. +* Push FLV by HTTP POST, by some mobile device. + +> Note: FFmpeg supports PUSH MPEGTS over UDP and FLV by HTTP POST to SRS. + +## Build + +Stream Converter is always enabled in SRS, while some protocols might need special configure parameters, please read +instructions of each protocol. + +## Protocols + +The protocols supported by Stream Converter: + +* MPEG-TS over UDP: MPEG-TS stream over UDP protocol. +* FLV by HTTP POST: FLV stream over HTTP protocol. + +## Config + +The configuration for stream converter: + +``` +# Push MPEGTS over UDP to SRS. +stream_caster { + # Whether stream converter is enabled. + # Default: off + enabled on; + # The type of stream converter, could be: + # mpegts_over_udp, push MPEG-TS over UDP and convert to RTMP. + caster mpegts_over_udp; + # The output rtmp url. + # For mpegts_over_udp converter, the typically output url: + # rtmp://127.0.0.1/live/livestream + output rtmp://127.0.0.1/live/livestream; + # The listen port for stream converter. + # For mpegts_over_udp converter, listen at udp port. for example, 8935. + listen 8935; +} + +# Push FLV by HTTP POST to SRS. +stream_caster { + # Whether stream converter is enabled. + # Default: off + enabled on; + # The type of stream converter, could be: + # flv, push FLV by HTTP POST and convert to RTMP. + caster flv; + # The output rtmp url. + # For flv converter, the typically output url: + # rtmp://127.0.0.1/[app]/[stream] + # For example, POST to url: + # http://127.0.0.1:8936/live/livestream.flv + # Where the [app] is "live" and [stream] is "livestream", output is: + # rtmp://127.0.0.1/live/livestream + output rtmp://127.0.0.1/[app]/[stream]; + # The listen port for stream converter. + # For flv converter, listen at tcp port. for example, 8936. + listen 8936; +} +``` + +Please follow instructions of specified protocols bellow. + +## Push MPEG-TS over UDP + +You're able to push MPEGTS over UDP to SRS, then covert to RTMP and other protocols. + +First, start SRS with configuration for MPEGTS: + +```bash +./objs/srs -c conf/push.mpegts.over.udp.conf +``` + +> Note: About the detail configuration, please read about the `mpegts_over_udp` section of [config](#config). + +Then, start to push stream, for example, by FFmpeg: + +```bash +ffmpeg -re -f flv -i doc/source.flv -c copy -f mpegts udp://127.0.0.1:8935 +``` + +Finally, play the stream: + +* [http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?stream=livestream.flv) +* [http://localhost:8080/live/livestream.m3u8](http://localhost:8080/players/srs_player.html?stream=livestream.m3u8) +* [http://localhost:1985/rtc/v1/whep/?app=live&stream=livestream](http://localhost:8080/players/whep.html?autostart=true) + +Please note that each UDP port is bind to a RTMP stream. + +> Note: About the development notes, please see [#250](https://github.com/ossrs/srs/issues/250). + +## Push HTTP FLV to SRS + +You're also able to push HTTP FLV by HTTP POST, which is very simple for mobile device to send HTTP stream. + +First, start SRS with configuration for FLV: + +```bash +./objs/srs -c conf/push.flv.conf +``` + +> Note: About the detail configuration, please read about the `flv` section of [config](#config). + +Then, start to push stream, for example, by FFmpeg: + +```bash +ffmpeg -re -f flv -i doc/source.flv -c copy \ + -f flv http://127.0.0.1:8936/live/livestream.flv +``` + +Finally, play the stream: + +* [http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?stream=livestream.flv) +* [http://localhost:8080/live/livestream.m3u8](http://localhost:8080/players/srs_player.html?stream=livestream.m3u8) +* [http://localhost:1985/rtc/v1/whep/?app=live&stream=livestream](http://localhost:8080/players/whep.html?autostart=true) + +> Note: About the development notes, please see [#2611](https://github.com/ossrs/srs/issues/2611). + +## Push RTSP to SRS + +It's been eliminated, see [#2304](https://github.com/ossrs/srs/issues/2304#issuecomment-826009290). + +2015.1 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/streamer) + + diff --git a/versioned_docs/version-6.0/doc/time-jitter.md b/versioned_docs/version-6.0/doc/time-jitter.md new file mode 100644 index 00000000..f729cdba --- /dev/null +++ b/versioned_docs/version-6.0/doc/time-jitter.md @@ -0,0 +1,104 @@ +--- +title: Time Jitter +sidebar_label: Time Jitter +hide_title: false +hide_table_of_contents: false +--- + +# TimeJitter + +This article describes the timestamp correct of SRS. + +## RTMP Monotonically Increase Timestamp + +RTMP requires the timestamp is mono-inc(monotonically increase). The mono-inc is the +timestamp of packet is always larger. + +RTMP requires the stream is mono-inc. The audio is mono-inc, +video is mono-inc, and stream mixed audio with video is mono-inc. + +What happens when not mono-inc? Some server will disconnect connection, flash maybe +failed to play stream. + +## Timestamp Jitter + +SRS will ensure the stream timestamp is mono-inc. When delta of packets too large, set to 40ms(fps 25). + +Some components use the timestamp jitter: +* RTMP delivery: The timestamp jitter algorithm can set by vhost `time_jitter`. +* DVR: The timestamp jitter algorithm can set by dvr `time_jitter`. +* HLS: Always ensure the timestamp is mono-inc, use `full` timestamp jitter algorithm. +* Forward: Always ensure the timestamp is mono-inc, use `full` timestamp jitter algorithm. +* HTTP Audio Stream Fast Cache: Equals to RTMP time jitter, the vhost config. @see `fast_cache`. + +You can disable the timestamp jitter algorithm when your encoder can not ensure the +video+autio mono-inc, some encoder can ensure video mono-inc and audio mono-inc. + +## Config + +Config the timestamp jitter in vhost for RTMP delivery: + +```bash +vhost jitter.srs.com { + # for play client, both RTMP and other stream clients, + # for instance, the HTTP FLV stream clients. + play { + # about the stream monotonically increasing: + # 1. video timestamp is monotonically increasing, + # 2. audio timestamp is monotonically increasing, + # 3. video and audio timestamp is interleaved/mixed monotonically increasing. + # it's specified by RTMP specification, @see 3. Byte Order, Alignment, and Time Format + # however, some encoder cannot provides this feature, please set this to off to ignore time jitter. + # the time jitter algorithm: + # 1. full, to ensure stream start at zero, and ensure stream monotonically increasing. + # 2. zero, only ensure sttream start at zero, ignore timestamp jitter. + # 3. off, disable the time jitter algorithm, like atc. + # default: full + time_jitter full; + # whether use the interleaved/mixed algorithm to correct the timestamp. + # if on, always ensure the timestamp of audio+video is interleaved/mixed monotonically increase. + # if off, use time_jitter to correct the timestamp if required. + # default: off + mix_correct off; + } +} +``` + +While the `mix_correct` of vhost can correct the audio+video stream to mixed monotonically increase. + +Config timestamp jitter for dvr: + +``` +vhost dvr.srs.com { + # dvr RTMP stream to file, + # start to record to file when encoder publish, + # reap flv according by specified dvr_plan. + # http callbacks: + # @see http callback on_dvr_hss_reap_flv on http_hooks section. + dvr { + # about the stream monotonically increasing: + # 1. video timestamp is monotonically increasing, + # 2. audio timestamp is monotonically increasing, + # 3. video and audio timestamp is interleaved monotonically increasing. + # it's specified by RTMP specification, @see 3. Byte Order, Alignment, and Time Format + # however, some encoder cannot provides this feature, please set this to off to ignore time jitter. + # the time jitter algorithm: + # 1. full, to ensure stream start at zero, and ensure stream monotonically increasing. + # 2. zero, only ensure sttream start at zero, ignore timestamp jitter. + # 3. off, disable the time jitter algorithm, like atc. + # default: full + time_jitter full; + } +} +``` + +## ATC + +When [RTMP ATC](./rtmp-atc.md) is on, +RTMP always disable the time_jitter. + +Winlin 2015.4 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/time-jitter) + + diff --git a/versioned_docs/version-6.0/doc/webrtc.md b/versioned_docs/version-6.0/doc/webrtc.md new file mode 100644 index 00000000..bceae31e --- /dev/null +++ b/versioned_docs/version-6.0/doc/webrtc.md @@ -0,0 +1,521 @@ +--- +title: WebRTC +sidebar_label: WebRTC +hide_title: false +hide_table_of_contents: false +--- + +# WebRTC + +WebRTC is an online real-time communication solution open-sourced by Google. In simple terms, it is an +internet audio and video conference system. As it follows the RFC standard protocol and is supported +by browsers, its boundaries are constantly expanding. It is used in low-latency audio and video scenarios, +such as online meetings, live streaming video chat with guests, low-latency live broadcasts, remote +robot control, remote desktop, cloud video game, smart doorbells, and live web page streaming. + +WebRTC is essentially a standard for direct communication between two web browsers, mainly consisting +of signaling and media protocols. Signaling deals with the negotiation of capabilities between two +devices, such as supported encoding and decoding abilities. Media handles the encryption and low-latency +transmission of media packets between devices. In addition, WebRTC itself also implements audio processing +technologies like 3A, network congestion control such as NACK, FEC, and GCC, audio and video encoding +and decoding, as well as smooth and low-latency playback technologies. + +```bash ++----------------+ +----------------+ ++ Browser +----<--Signaling----->--+ Browser + ++ (like Chrome) +----<----Media----->----+ (like Chrome) + ++----------------+ +----------------+ +``` + +> Note: WebRTC is now an official RFC standard, so it is supported by various browsers. There are many +> open-source implementations, making it available not only in browsers but also in mobile browsers and +> native libraries. For simplicity, in this post, the term "browser" refers to any client or device that +> supports the WebRTC protocol. + +In reality, on the internet, it's almost impossible for two browsers to communicate directly, especially +when they're not on the same local network and are located far apart, like in different cities or countries. +The data transfer between the two browsers goes through many network routers and firewalls, making it hard +to ensure good transmission quality. Therefore, in practical applications, data needs to be relayed through +servers. There are several types of WebRTC servers to help with this process: + +* Signaling Server: This is a service that helps two browsers exchange SDP (Session Description Protocol) information. For multi-person conferences, room services are needed, but the main purpose is still to exchange SDP between browsers. In the streaming media field, to enable WebRTC for streaming and playback, similar to RTMP/SRT/HLS streaming, the WHIP/WHEP protocols have been designed. +* TURN Server: Relay service that helps two browsers forward media data between them. This is a transparent forwarding service without data caching, so during multi-person meetings, browsers need to transfer `N*N + N*(N-2)` copies of data. It is generally used in very few communication scenarios, such as one-on-one. +* SFU Server: Selective forwarding service with cached data on the server, allowing browsers to upload only one copy of data, which the server then replicates to other participants. SRS is an example of an SFU. For more information on SFU's role, refer to [this link](https://stackoverflow.com/a/75491178/17679565). Most current WebRTC servers are SFU servers, with `N*N` streams being transferred, reducing the amount of data transfer by `N*(N-2)` compared to TURN servers. This helps solve most transmission issues. +* MCU Server: Multipoint Control Unit Server, the server merges the streams in a conference into one, so the browser only needs to transfer `N*2` sets of data, uploading one and downloading one. However, due to the need for encoding and decoding, the number of streams supported by the server is an order of magnitude less than SFU, and it is only used in certain specific scenarios. For more details, refer to [#3625](https://github.com/ossrs/srs/discussions/3625). + +We primarily focus on explaining the SFU (Selective Forwarding Unit) workflow, as it is widely used in +WebRTC servers, and it essentially functions like a browser: + +```bash ++----------------+ +---------+ ++ Browser +----<--Signaling----->--+ SFU + ++ (like Chrome) +----<----Media----->----+ Server + ++----------------+ +---------+ +``` + +> Note: Generally, SFUs have Signaling capabilities. In fact, RTMP addresses can be considered as a very +> simplified signaling protocol. However, WebRTC signaling requires more complex negotiation of media and +> transport capabilities. In complex WebRTC systems, there might be separate Signaling and Room clusters, +> but SFUs also have simplified Signaling capabilities, which may be used for communication with other +> services. + +SRS is a media server that provides Signaling and SFU Server capabilities. Unlike other SFUs like Janus, +SRS is based on streams. Even though there can be multiple participants in a room, essentially, someone is +pushing a stream, and others are subscribing to it. This way, it avoids coupling all the streams in a room to +a single SFU transmission and can distribute them across multiple SFU transmissions, allowing for larger +conferences with more participants. + +SRS supports signaling protocols WHIP and WHEP. For more details, please refer to the [HTTP API](#http-api) +section. Unlike live streaming, signaling and media are separated, so you need to set up Candidates, see +[Candidate](#config-candidate). Media uses UDP by default, but if UDP is unavailable, you can use TCP as described in +[TCP](#webrtc-over-tcp). If you encounter issues, it could be due to incorrect Candidate settings or firewall/port +restrictions, refer to [Connectivity](#connection-failures) and use the provided tools to check. SRS also supports +converting between different protocols, such as streaming RTMP and viewing with WebRTC, as explained in +[RTMP to WebRTC](#rtmp-to-rtc), or streaming with WebRTC and viewing with HLS, as described in +[RTC to RTMP](#rtc-to-rtmp). + +SRS supported the WebRTC protocol in 2020. For more information on the development process, please refer +to [#307](https://github.com/ossrs/srs/issues/307). + +## Config + +There are some config for WebRTC, please see `full.conf` for more: + +```bash +rtc_server { + # Whether enable WebRTC server. + # Overwrite by env SRS_RTC_SERVER_ENABLED + # default: off + enabled on; + # The udp listen port, we will reuse it for connections. + # Overwrite by env SRS_RTC_SERVER_LISTEN + # default: 8000 + listen 8000; + # For WebRTC over TCP directly, not TURN, see https://github.com/ossrs/srs/issues/2852 + # Some network does not support UDP, or not very well, so we use TCP like HTTP/80 port for firewall traversing. + tcp { + # Whether enable WebRTC over TCP. + # Overwrite by env SRS_RTC_SERVER_TCP_ENABLED + # Default: off + enabled off; + # The TCP listen port for WebRTC. Highly recommend is some normally used ports, such as TCP/80, TCP/443, + # TCP/8000, TCP/8080 etc. However SRS default to TCP/8000 corresponding to UDP/8000. + # Overwrite by env SRS_RTC_SERVER_TCP_LISTEN + # Default: 8000 + listen 8000; + } + # The protocol for candidate to use, it can be: + # udp Generate UDP candidates. Note that UDP server is always enabled for WebRTC. + # tcp Generate TCP candidates. Fail if rtc_server.tcp(WebRTC over TCP) is disabled. + # all Generate UDP+TCP candidates. Ignore if rtc_server.tcp(WebRTC over TCP) is disabled. + # Note that if both are connected, we will use the first connected(DTLS done) one. + # Overwrite by env SRS_RTC_SERVER_PROTOCOL + # Default: udp + protocol udp; + # The exposed candidate IPs, response in SDP candidate line. It can be: + # * Retrieve server IP automatically, from all network interfaces. + # $CANDIDATE Read the IP from ENV variable, use * if not set. + # x.x.x.x A specified IP address or DNS name, use * if 0.0.0.0. + # @remark For Firefox, the candidate MUST be IP, MUST NOT be DNS name, see https://bugzilla.mozilla.org/show_bug.cgi?id=1239006 + # @see https://ossrs.net/lts/zh-cn/docs/v4/doc/webrtc#config-candidate + # Overwrite by env SRS_RTC_SERVER_CANDIDATE + # default: * + candidate *; +} + +vhost rtc.vhost.srs.com { + rtc { + # Whether enable WebRTC server. + # Overwrite by env SRS_VHOST_RTC_ENABLED for all vhosts. + # default: off + enabled on; + # Whether support NACK. + # default: on + nack on; + # Whether support TWCC. + # default: on + twcc on; + # Whether enable transmuxing RTMP to RTC. + # If enabled, transcode aac to opus. + # Overwrite by env SRS_VHOST_RTC_RTMP_TO_RTC for all vhosts. + # default: off + rtmp_to_rtc off; + # Whether enable transmuxing RTC to RTMP. + # Overwrite by env SRS_VHOST_RTC_RTC_TO_RTMP for all vhosts. + # Default: off + rtc_to_rtmp off; + } +} +``` + +The config `rtc_server` is global configuration for RTC, for example: +* `enabled`:Whether enable WebRTC server. +* `listen`:The udp listen port, we will reuse it for connections. +* `candidate`:The exposed candidate IPs, response in SDP candidate line. Please read [Config: Candidate](./webrtc.md#config-candidate) for detail. +* `tcp.listen`: Whether enable WebRTC over TCP. Please read [WebRTC over TCP](./webrtc.md#webrtc-over-tcp) for detail. + +For each vhost, the configuration is `rtc` section, for example: +* `rtc.enabled`:Whether enable WebRTC server for this vhost. +* `rtc.rtmp_to_rtc`:Whether enable transmuxing RTMP to RTC. +* `rtc.rtc_to_rtmp`:Whether enable transmuxing RTC to RTMP. +* `rtc.stun_timeout`:The timeout in seconds for session timeout. +* `rtc.nack`:Whether support NACK for ARQ. +* `rtc.twcc`:Whether support TWCC for congestion feedback. +* `rtc.dtls_role`:The role of dtls when peer is actpass: passive or active. + +## Config: Candidate + +Please note that `candidate` is essential important, and most failure is caused by wrong `candidate`, so be careful. + +The easiest method to modify the `candidate` involves indicating the `eip` in the URL. For instance, if your server +is `192.168.3.10`, utilize this URL: + +* [http://localhost:1985/rtc/v1/whip/?app=live&stream=livestream&eip=192.168.3.10](http://localhost:8080/players/whip.html?eip=192.168.3.10) + +Moreover, the easiest and most direct method to modify the default UDP port `8000`, particularly when it is +behind a load balancer or proxy, involves utilizing the `eip`. For example, if you employ UDP `18000` as the port, +consider using this URL: + +* [http://localhost:1985/rtc/v1/whip/?app=live&stream=livestream&eip=192.168.3.10:18000](http://localhost:8080/players/whip.html?eip=192.168.3.10:18000) + +As it shows, `candidate` is server IP to connect to, SRS will response it in SDP answer as `candidate`, like this one: + +```bash +type: answer, sdp: v=0 +a=candidate:0 1 udp 2130706431 192.168.3.6 8000 typ host generation 0 +``` + +So the `192.168.3.6 8000` is an endpoint that client could access. There be some IP you can use: +* Config as fixed IP, such as `candidate 192.168.3.6;` +* Use `ifconfig` to get server IP and pass by environment variable, such as `candidate $CANDIDATE;` +* Detect automatically, first by environment, then use server network interface IP, such as `candidate *;`, we will explain at bellow. +* Specify the `?eip=x` in URL, such as: `webrtc://192.168.3.6/live/livestream?eip=192.168.3.6` +* Normally API is provided by SRS, so you're able to use hostname of HTTP-API as `candidate`, we will explain at bellow. + +Configurations for automatically detect the IP for `candidate`: +* `candidate *;` or `candidate 0.0.0.0;` means detect the network interface IP. +* `use_auto_detect_network_ip on;` If disabled, never detect the IP automatically. +* `ip_family ipv4;` To filter the IP if automatically detect. + +Configurations for using HTTP-API hostname as `candidate`: +* `api_as_candidates on;` If disabled, never use HTTP API hostname as candidate. +* `resolve_api_domain on;` If hostname is domain name, resolve to IP address. Note that Firefox does not support domain name. +* `keep_api_domain on;` Whether keep the domain name to resolve it by client. + +> Note: Please note that if no `candidate` specified, SRS will use one automatically detected IP. + +In short, the `candidate` must be a IP address that client could connect to. + +Use command `ifconfig` to retrieve the IP: + +```bash +# For macOS +CANDIDATE=$(ifconfig en0 inet| grep 'inet '|awk '{print $2}') + +# For CentOS +CANDIDATE=$(ifconfig eth0|grep 'inet '|awk '{print $2}') + +# Directly set ip. +CANDIDATE="192.168.3.10" +``` + +Pass it to SRS by ENV: + +```bash +env CANDIDATE="192.168.3.10" \ + ./objs/srs -c conf/rtc.conf +``` + +For example, to run SRS in docker, and setup the CANDIDATE: + +```bash +export CANDIDATE="192.168.3.10" +docker run --rm --env CANDIDATE=$CANDIDATE \ + -p 1935:1935 -p 8080:8080 -p 1985:1985 -p 8000:8000/udp \ + ossrs/srs:5 \ + objs/srs -c conf/rtc.conf +``` + +> Note:About the usage of srs-docker, please read [srs-docker](https://github.com/ossrs/dev-docker/tree/v4#usage). + +## Stream URL + +In SRS, both live streaming and WebRTC are based on the concept of `streams`. So, the URL definition for +streams is very consistent. Here are some different stream addresses for various protocols in SRS, which +you can access after installing SRS: + +* Publish or play stream over RTMP: `rtmp://localhost/live/livestream` +* Play stream over HTTP-FLV: [http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html) +* Play stream over HLS: [http://localhost:8080/live/livestream.m3u8](http://localhost:8080/players/srs_player.html?stream=livestream.m3u8) +* Publish stream over WHIP: [http://localhost:1985/rtc/v1/whip/?app=live&stream=livestream](http://localhost:8080/players/whip.html) +* Play stream over WHEP: [http://localhost:1985/rtc/v1/whep/?app=live&stream=livestream](http://localhost:8080/players/whep.html) + +> Remark: Since Flash is disabled, RTMP streams cannot be played in Chrome. Please use VLC to play them. + +Before WHIP and WHEP were introduced, SRS supported another format with a different HTTP API format, but it +still exchanged SDP. It is no longer recommended: + +* Publish: [webrtc://localhost/live/livestream](http://localhost:8080/players/rtc_publisher.html) +* Play: [webrtc://localhost/live/livestream](http://localhost:8080/players/rtc_player.html) + +> Note: SRT addresses are not provided here because their design is not in a common URL format. + +## WebRTC over TCP + +In many networks, UDP is not available for WebRTC, so TCP is very important to make it highly reliable. SRS supports directly TCP transport for WebRTC, not TURN, which introduce a complex network layer and system. It also makes the LoadBalancer possible to forward TCP packets, because TCP is more stable than UDP for LoadBalancer. + +* All HTTP API, HTTP Stream and WebRTC over TCP reuses one TCP port, such as TCP(443) for HTTPS. +* Support directly transport over UDP or TCP, no dependency of TURN, no extra system and resource cost. +* Works very well with [Proxy(Not Implemented)](https://github.com/ossrs/srs/issues/3138) and [Cluster(Not Implemented)](https://github.com/ossrs/srs/issues/2091), for load balancing and system capacity. + +Run SRS with WebRTC over TCP, by default the port is 8000: + +```bash +docker run --rm -it -p 8080:8080 -p 1985:1985 -p 8000:8000 \ + -e CANDIDATE="192.168.3.82" \ + -e SRS_RTC_SERVER_TCP_ENABLED=on \ + -e SRS_RTC_SERVER_PROTOCOL=tcp \ + -e SRS_RTC_SERVER_TCP_LISTEN=8000 \ + ossrs/srs:v5 +``` + +Please use [FFmpeg](https://ffmpeg.org/download.html) or [OBS](https://obsproject.com/download) to publish stream: + +```bash +ffmpeg -re -i ./doc/source.flv -c copy -f flv rtmp://localhost/live/livestream +``` + +* Play WebRTC over TCP: [http://localhost:1985/rtc/v1/whep/?app=live&stream=livestream](http://localhost:8080/players/whep.html?autostart=true) +* Play HTTP FLV: [http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?autostart=true) +* Play HLS: [http://localhost:8080/live/livestream.m3u8](http://localhost:8080/players/srs_player.html?stream=livestream.m3u8&autostart=true) + +> Note: We config SRS by environment variables, you're able to use config file also. + +> Note: We use dedicated TCP port, for example, HTTP API(1985), HTTP Stream(8080) and WebRTC over TCP(8000), you're able to reuse one TCP port at HTTP Stream(8080). + +## HTTP API + +SRS supports WHIP and WHEP protocols. After installing SRS, you can test it with the following links: + +* To use WHIP for streaming: [http://localhost:1985/rtc/v1/whip/?app=live&stream=livestream](http://localhost:8080/players/whip.html) +* To use WHEP for playback: [http://localhost:1985/rtc/v1/whep/?app=live&stream=livestream](http://localhost:8080/players/whep.html) + +For details on the protocols, refer to [WHIP](./http-api.md#webrtc-publish) and [WHEP](./http-api.md#webrtc-play). +Bellow is the workflow: + +[![](/img/doc-whip-whep-workflow.png)](https://www.figma.com/file/fA75Nl6Fr6v8hsrJba5Xrn/How-Does-WHIP%2FWHEP-Work%3F?type=whiteboard&node-id=0-1) + +If you install SRS on a Mac or Linux, you can test the local SRS service with localhost. However, if you're using +Windows, a remote Linux server, or need to test on other devices, you must use HTTPS for WHIP streaming, while +WHEP can still use HTTP. To enable SRS HTTPS, refer to [HTTPS API](./http-api.md#https-api), or use a web server +proxy like Nginx by referring to [HTTPS Proxy](./http-api.md#http-and-https-proxy). + +If you need to test if the HTTP API is working properly, you can use the `curl` tool. For more details, please +refer to [Connectivity Check](#connection-failures). + +## Connection Failures + +Some developer come to SRS community to get help, because they get error when use OBS WHIP to connect to online WHIP +server, because online server must use HTTPS and the UDP port might be more available, and it's hard to debug or +login to the online server for privacy or network issue. + +So we find some ways to troubleshoot the connection failures in OBS WHIP, generally it's caused by HTTPS API setup +or UDP port not available issue. + +Use curl to test WHIP HTTP or HTTPS API: + +```bash +curl "http://localhost:1985/rtc/v1/whip/?ice-ufrag=6pk11386&ice-pwd=l91z529147ri9163933p51c4&app=live&stream=livestream-$(date +%s)" \ + -H 'Origin: http://localhost' -H 'Referer: http://localhost' \ + -H 'Accept: */*' -H 'Content-type: application/sdp' \ + -H 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)' \ + --data-raw $'v=0\r\na=group:BUNDLE 0 1\r\nm=audio 9 UDP/TLS/RTP/SAVPF 111\r\nc=IN IP4 0.0.0.0\r\na=rtcp:9 IN IP4 0.0.0.0\r\na=ice-ufrag:J8X7\r\na=ice-pwd:Dpq7/fW/osYcPeLsCW2Ek1JH\r\na=setup:actpass\r\na=mid:0\r\na=sendonly\r\na=msid:- audio\r\na=rtcp-mux\r\na=rtpmap:111 opus/48000/2\r\na=ssrc:3184534672 cname:stream\r\nm=video 9 UDP/TLS/RTP/SAVPF 106\r\nc=IN IP4 0.0.0.0\r\na=rtcp:9 IN IP4 0.0.0.0\r\na=ice-ufrag:J8X7\r\na=ice-pwd:Dpq7/fW/osYcPeLsCW2Ek1JH\r\na=setup:actpass\r\na=mid:1\r\na=sendonly\r\na=msid:- video\r\na=rtcp-mux\r\na=rtpmap:106 H264/90000\r\na=ssrc:512761356 cname:stream' \ + -v -k +``` + +> Note: You can replace `http://localhost` with `https://yourdomain.com` to test HTTPS API. + +> Note: For Oryx, you should specify the secret, so please change the `/rtc/v1/whip?ice-ufrag=` to `/rtc/v1/whip?secret=xxx&ice-ufrag=` as such. + +> Note: You can also use `eip=ip` or `eip=ip:port` to force SRS to use it as the candidate. Please see [CANDIDATE](#config-candidate) for details. + +The answer contains the candidate, the UDP server IP, such as `127.0.0.1`: + +``` +a=candidate:0 1 udp 2130706431 127.0.0.1 8000 typ host generation 0 +``` + +Use `nc` to send UDP packet to SRS WHIP server: + +```bash +echo -en "\x00\x01\x00\x50\x21\x12\xa4\x42\x74\x79\x6d\x7a\x41\x51\x2b\x2f\x4a\x4b\x77\x52\x00\x06\x00\x0d\x36\x70\x6b\x31\x31\x33\x38\x36\x3a\x4a\x38\x58\x37\x00\x00\x00\xc0\x57\x00\x04\x00\x01\x00\x0a\x80\x2a\x00\x08\xda\xad\x1d\xce\xe8\x95\x5a\x83\x00\x24\x00\x04\x6e\x7f\x1e\xff\x00\x08\x00\x14\x56\x8f\x1e\x1e\x4f\x5f\x17\xf9\x2e\xa1\xec\xbd\x51\xd9\xa2\x27\xe4\xfd\xda\xb1\x80\x28\x00\x04\x84\xd3\x5a\x79" \ + |nc -w 3 -u 127.0.0.1 8000 |od -Ax -c -t x1 |grep '000' && \ + echo "Success" || echo "Failed" +``` + +> Note: You also can use `nc` or [server.go](https://github.com/ossrs/srs/pull/3837) as the UDP server for test. + +If use SRS as WHIP server, should response with: + +``` +0000000 001 001 \0 @ ! 022 244 B t y m z A Q + / +0000010 J K w R \0 006 \0 \r 6 p k 1 1 3 8 6 +0000020 : J 8 X 7 \0 \0 \0 \0 \0 \b \0 001 376 ` +0000030 ầ ** ** 027 \0 \b \0 024 206 263 + ʼn ** 025 G 215 +0000040 I 335 P ^ " 7 } N ? 017 037 224 200 ( \0 004 +0000050 303 < 250 272 +0000054 +Success +``` + +> Note: Should be SRS 5.0.191+, see [#3837](https://github.com/ossrs/srs/pull/3837), you can also use +> [server.go](https://github.com/ossrs/srs/issues/2843) as the UDP server for test. + +## RTMP to RTC + +Please use `conf/rtmp2rtc.conf` as config. + +```bash +export CANDIDATE="192.168.1.10" +docker run --rm --env CANDIDATE=$CANDIDATE \ + -p 1935:1935 -p 8080:8080 -p 1985:1985 -p 8000:8000/udp \ + ossrs/srs:5 \ + objs/srs -c conf/rtmp2rtc.conf +``` + +> Note: Please set CANDIDATE as the ip of server, please read [CANDIDATE](./webrtc.md#config-candidate). + +Use FFmpeg docker to push to localhost: + +```bash +docker run --rm -it ossrs/srs:encoder ffmpeg -stream_loop -1 -re -i doc/source.flv \ + -c copy -f flv rtmp://host.docker.internal/live/livestream +``` + +Play the stream in browser: + +* WebRTC:[http://localhost:1985/rtc/v1/whep/?app=live&stream=livestream](http://localhost:8080/players/whep.html?autostart=true) +* HTTP-FLV:[http://localhost:8080/live/livestream.flv](http://localhost:8080/players/srs_player.html?autostart=true&stream=livestream.flv&port=8080&schema=http) + +## RTC to RTC + +Please use `conf/rtc.conf` as config. + +```bash +export CANDIDATE="192.168.1.10" +docker run --rm --env CANDIDATE=$CANDIDATE \ + -p 1935:1935 -p 8080:8080 -p 1985:1985 -p 8000:8000/udp \ + ossrs/srs:5 \ + objs/srs -c conf/rtc.conf +``` + +> Note: Please set CANDIDATE as the ip of server, please read [CANDIDATE](./webrtc.md#config-candidate). + +Play the stream in browser: + +* Publish stream over WHIP: [http://localhost:1985/rtc/v1/whip/?app=live&stream=livestream](http://localhost:8080/players/whip.html) +* Play stream over WHEP: [http://localhost:1985/rtc/v1/whep/?app=live&stream=livestream](http://localhost:8080/players/whep.html) + +> Remark: Note that if not localhost, the WebRTC publisher should be HTTPS page. + +## RTC to RTMP + +Please use `conf/rtc2rtmp.conf` as config. + +```bash +export CANDIDATE="192.168.1.10" +docker run --rm --env CANDIDATE=$CANDIDATE \ + -p 1935:1935 -p 8080:8080 -p 1985:1985 -p 8000:8000/udp \ + ossrs/srs:5 \ + objs/srs -c conf/rtc2rtmp.conf +``` + +> Note: Please set CANDIDATE as the ip of server, please read [CANDIDATE](./webrtc.md#config-candidate). + +The streams: + +* Publish stream over WHIP: [http://localhost:1985/rtc/v1/whip/?app=live&stream=livestream](http://localhost:8080/players/whip.html) +* Play stream over WHEP: [http://localhost:1985/rtc/v1/whep/?app=live&stream=livestream](http://localhost:8080/players/whep.html) +* HTTP-FLV:[http://localhost:8080/live/show.flv](http://localhost:8080/players/srs_player.html?autostart=true&stream=show.flv) +* RTMP by VLC:rtmp://localhost/live/show + +## SFU: One to One + +Please use `conf/rtc.conf` as config. + +```bash +export CANDIDATE="192.168.1.10" +docker run --rm --env CANDIDATE=$CANDIDATE \ + -p 1935:1935 -p 8080:8080 -p 1985:1985 -p 8000:8000/udp \ + ossrs/srs:5 \ + objs/srs -c conf/rtc.conf +``` + +> Note: Please set CANDIDATE as the ip of server, please read [CANDIDATE](./webrtc.md#config-candidate). + +Then startup the signaling, please read [usage](http://ossrs.net/srs.release/wiki/https://github.com/ossrs/signaling#usage): + +```bash +docker run --rm -p 1989:1989 ossrs/signaling:1 +``` + +Use HTTPS proxy [httpx-static](https://github.com/ossrs/go-oryx/tree/develop/httpx-static#usage) as api gateway: + +```bash +export CANDIDATE="192.168.1.10" +docker run --rm -p 80:80 -p 443:443 ossrs/httpx:1 \ + ./bin/httpx-static -http 80 -https 443 -ssk ./etc/server.key -ssc ./etc/server.crt \ + -proxy http://$CANDIDATE:1989/sig -proxy http://$CANDIDATE:1985/rtc \ + -proxy http://$CANDIDATE:8080/ +``` + +To open [http://localhost/demos/one2one.html?autostart=true](http://localhost/demos/one2one.html?autostart=true) + +Or by the IP [https://192.168.3.6/demos/one2one.html?autostart=true](https://192.168.3.6/demos/one2one.html?autostart=true) + +> Note: For self-sign certificate, please type `thisisunsafe` to accept it. + +## SFU: Video Room + +Please follow [SFU: One to One](./webrtc.md#sfu-one-to-one), and open the bellow demo pages. + +To open [http://localhost/demos/room.html?autostart=true](http://localhost/demos/room.html?autostart=true) + +Or by the IP [https://192.168.3.6/demos/room.html?autostart=true](https://192.168.3.6/demos/room.html?autostart=true) + +> Note: For self-sign certificate, please type `thisisunsafe` to accept it. + +## Room to Live + +Please follow [SFU: One to One](./webrtc.md#sfu-one-to-one), and please convert RTC to RTMP, for FFmpeg to mix the streams. + +```bash +export CANDIDATE="192.168.1.10" +docker run --rm --env CANDIDATE=$CANDIDATE \ + -p 1935:1935 -p 8080:8080 -p 1985:1985 -p 8000:8000/udp \ + ossrs/srs:5 \ + objs/srs -c conf/rtc2rtmp.conf +``` + +If use FFmpeg to mix streams, there is a FFmpeg CLI on the demo page, for example: + +```bash +ffmpeg -f flv -i rtmp://192.168.3.6/live/alice -f flv -i rtmp://192.168.3.6/live/314d0336 \ + -filter_complex "[1:v]scale=w=96:h=72[ckout];[0:v][ckout]overlay=x=W-w-10:y=H-h-10[out]" -map "[out]" \ + -c:v libx264 -profile:v high -preset medium \ + -filter_complex amix -c:a aac \ + -f flv rtmp://192.168.3.6/live/merge +``` + +Input: +* rtmp://192.168.3.6/live/alice +* rtmp://192.168.3.6/live/314d0336 + +Output: +* rtmp://192.168.3.6/live/merge + +Winlin 2020.02 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/webrtc) + + diff --git a/versioned_docs/version-6.0/doc/windows.md b/versioned_docs/version-6.0/doc/windows.md new file mode 100644 index 00000000..2110c50d --- /dev/null +++ b/versioned_docs/version-6.0/doc/windows.md @@ -0,0 +1,89 @@ +--- +title: Windows +sidebar_label: Windows +hide_title: false +hide_table_of_contents: false +--- + +# SRS for Windows + +SRS 5.0.89+ supports Windows(Cygwin64). + +## Build from code + +Please install [Cygwin64](https://cygwin.com/install.html). + +Install packages `gcc-g++` `make` `automake` `patch` `pkg-config` `tcl` `cmake`, please see [packages](https://github.com/cygwin/cygwin-install-action#parameters). + +Build SRS with cygwin terminal: + +```bash +git checkout develop +./configure +make +``` + +If success, there should be a `./objs/srs.exe`, please follow [Getting Started](./getting-started.md) to use it. + +## Install from binary + +For each [release](https://github.com/ossrs/srs/releases) of SRS, from SRS 5.0.89, there is always a binary installer of SRS Windows, normally as the artifact of release, which allows you to install and run SRS very easy. + +Bellow is some examples, note that you should always use the latest [release](https://github.com/ossrs/srs/releases), not the fixed one: + +* [Latest release](https://github.com/ossrs/srs/releases) +* [SRS-Windows-x86_64-5.0.89-setup.exe](https://github.com/ossrs/srs/releases/tag/v5.0.89) +* [SRS-Windows-x86_64-5.0.19-setup.exe](https://github.com/ossrs/srs/releases/tag/v5.0.19) + +> Note: SRS 5.0.89+ supports cygwin pipeline, to build and packge automatically by GitHub Actions. + +![](/img/windows-2022-11-20-001.png) + +Run SRS as administrator: + +![](/img/windows-2022-11-20-002.png) + +Publish to SRS Windows by FFmpeg: + +```bash +ffmpeg -re -i ~/srs/doc/source.flv -c copy -f flv rtmp://win11/live/livestream +``` + +Play by VLC or [srs-player](http://win11:8080/) + +![](/img/windows-2022-11-20-003.png) + +Most of SRS features are available in Windows, for example, RTMP, HTTP-FLV, HLS, WebRTC, HTTP-API, Prometheus Exporter, etc. + +## Package by NSIS + +If want to package by [NSIS](https://nsis.sourceforge.io/Download), please run in cygwin terminal: + +```bash +"/cygdrive/c/Program Files (x86)/NSIS/makensis.exe" \ + /DSRS_VERSION=$(./objs/srs -v 2>&1) \ + /DCYGWIN_DIR="C:\cygwin64" \ + packaging/nsis/srs.nsi +``` + +## Known Issues + +* [Cygwin: Build with SRT is ok, but crash when running. #3251](https://github.com/ossrs/srs/issues/3251) +* [Cygwin: Support address sanitizer for windows. #3252](https://github.com/ossrs/srs/issues/3252) +* [Cygwin: ST stuck when working in multiple threads mode. #3253](https://github.com/ossrs/srs/issues/3253) +* [Cygwin: Support iocp and windows native build. #3256](https://github.com/ossrs/srs/issues/3256) +* [Cygwin: Build srtp with openssl fail for no srtp_aes_icm_ctx_t #3254](https://github.com/ossrs/srs/issues/3254) + +## Links + +ST supports windows: https://github.com/ossrs/state-threads/issues/20 + +Commits about SRS Windows: https://github.com/ossrs/srs-windows/issues/2 + +Windows docker also works for SRS, however, `srs.exe` is more popular for windows developers. + +Winlin 2022.11 + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/doc/en/v6/windows) + + diff --git a/versioned_docs/version-6.0/tools/demo.md b/versioned_docs/version-6.0/tools/demo.md new file mode 100644 index 00000000..02371a31 --- /dev/null +++ b/versioned_docs/version-6.0/tools/demo.md @@ -0,0 +1,23 @@ +--- +title: Demo +sidebar_label: Demo +hide_title: false +hide_table_of_contents: false +--- + +# Demo + +### FLV +* [HTTP-FLV](http://ossrs.net/players/srs_player.html?app=live&stream=livestream.flv&server=d.ossrs.net&port=80&autostart=true&vhost=d.ossrs.net&schema=http) +* [HTTPS-FLV](https://ossrs.net/players/srs_player.html?app=live&stream=livestream.flv&server=d.ossrs.net&port=443&autostart=true&vhost=d.ossrs.net&schema=https) + +### HLS +* [HLS](http://ossrs.net/players/srs_player.html?app=live&stream=livestream.m3u8&server=d.ossrs.net&port=80&autostart=true&vhost=d.ossrs.net&schema=http) +* [HTTPS HLS](https://ossrs.net/players/srs_player.html?app=live&stream=livestream.m3u8&server=d.ossrs.net&port=443&autostart=true&vhost=d.ossrs.net&schema=https) + +### WebRTC +* [WebRTC](http://ossrs.net/players/rtc_player.html?vhost=d.ossrs.net&server=d.ossrs.net&port=1985&autostart=true) + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/tools/en/v6/demo) + + diff --git a/versioned_docs/version-6.0/tools/specs.md b/versioned_docs/version-6.0/tools/specs.md new file mode 100644 index 00000000..deed2e49 --- /dev/null +++ b/versioned_docs/version-6.0/tools/specs.md @@ -0,0 +1,62 @@ +--- +title: Specifications +sidebar_label: Specifications +hide_title: false +hide_table_of_contents: false +--- + +# Specifications + +## Live Streaming + +1. [amf0_spec_121207.pdf](/files/amf0_spec_121207.pdf), adobe amf0 +1. [amf3_spec_121207.pdf](/files/amf3_spec_121207.pdf), adobe amf3 +1. [hls-m3u8-draft-pantos-http-live-streaming-12.txt](/files/hls-m3u8-draft-pantos-http-live-streaming-12.txt), [hls-m3u8-draft-pantos-http-live-streaming-12.pdf](/files/hls-m3u8-draft-pantos-http-live-streaming-12.pdf), m3u8 +1. [hls-mpeg-ts-VB_WhitePaper_TransportStreamVSProgramStream_rd2.pdf](/files/hls-mpeg-ts-VB_WhitePaper_TransportStreamVSProgramStream_rd2.pdf), ts +1. [rtmp.part1.Chunk-Stream.pdf](/files/rtmp.part1.Chunk-Stream.pdf), [rtmp.part2.Message-Formats.pdf](/files/rtmp.part2.Message-Formats.pdf), [rtmp.part3.Commands-Messages.pdf](/files/rtmp.part3.Commands-Messages.pdf), [rtmp_specification_1.0.pdf](/files/rtmp_specification_1.0.pdf), adobe rtmp +1. [flv_v10_1.pdf](/files/flv_v10_1.pdf), adobe flv +1. [video_file_format_spec_v10_1.pdf](/files/video_file_format_spec_v10_1.pdf) flv/f4v + +## Codec + +1. [mp3.id3v2.3.0.pdf](/files/mp3.id3v2.3.0.pdf), http://id3.org/id3v2.3.0 +1. [H.264_MPEG-4-Part-10-White-Paper.pdf](/files/H.264_MPEG-4-Part-10-White-Paper.pdf), h264 + +## HTTP + +1. [http1.0-rfc1945.txt, rfc1945-1996-http1.0.txt](/files/rfc1945-1996-http1.0.txt), http://www.rfc-editor.org/rfc/rfc1945.txt +1. [http1.1-rfc2616.txt, rfc2616-1999-http1.1.txt](/files/rfc2616-1999-http1.1.txt), http://www.rfc-editor.org/rfc/rfc2616.txt +1. [arpa-internet-text-messages-rfc822.txt, rfc822-1982-arpa-internet-text-messages.txt](/files/rfc822-1982-arpa-internet-text-messages.txt), http://www.rfc-editor.org/rfc/rfc822.txt + +## RTC + +1. [STUN, rfc5389-2008-stun.pdf](/files/rfc5389-2008-stun.pdf): https://tools.ietf.org/html/rfc5389 +1. [TURN, rfc5766-2010-turn.pdf](/files/rfc5766-2010-turn.pdf): https://tools.ietf.org/html/rfc5766 +1. [ICE, rfc5245-2010-ice.pdf](/files/rfc5245-2010-ice.pdf): https://tools.ietf.org/html/rfc5245 +1. [SIP, rfc3261-2002-sip.pdf](/files/rfc3261-2002-sip.pdf): https://tools.ietf.org/html/rfc3261 + +## SRT + +1. [Haivision_SRT_Open_Source_White_Paper.pdf](/files/Haivision_SRT_Open_Source_White_Paper.pdf) +1. [SRT_Alliance_Deployment_Guide.pdf](/files/SRT_Alliance_Deployment_Guide.pdf) +1. [SRT_Protocol_TechnicalOverview_DRAFT_2018-10-17.pdf](/files/SRT_Protocol_TechnicalOverview_DRAFT_2018-10-17.pdf) + +## Others + +1. [kafka-160915-0553-82964.pdf](/files/kafka-160915-0553-82964.pdf), https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol +1. [rtmfp-rfc7016.pdf, rfc7016-2013-rtmfp.pdf](/files/rfc7016-2013-rtmfp.pdf), adobe RTMFP, http://tools.ietf.org/html/rfc7016. +1. [rtmfp-tsvarea-1.pdf](/files/rtmfp-tsvarea-1.pdf), http://www.ietf.org/proceedings/10mar/slides/tsvarea-1.pdf +1. [rfc2326-1998-rtsp.pdf](/files/rfc2326-1998-rtsp.pdf) +1. [rfc3550-2003-rtp.pdf](/files/rfc3550-2003-rtp.pdf) +1. [adobe-hds-specification.pdf](/files/adobe-hds-specification.pdf) +1. [adobe-media-manifest-specification.pdf](/files/adobe-media-manifest-specification.pdf) +1. [HTTPDynamicStreamingSpecificationErrataMay2014.pdf](/files/HTTPDynamicStreamingSpecificationErrataMay2014.pdf) +1. [FlashMediaManifestFormatSpecificationErrataMay2014.pdf](/files/FlashMediaManifestFormatSpecificationErrataMay2014.pdf) + +## Files + +1. [ffmpeg-logo.png](/files/ffmpeg-logo.png), [ffmpeg-min.png](/files/ffmpeg-min.png), ffmpeg logo +1. [source.flv](/files/source.flv), [source.200kbps.768x320.flv](/files/source.200kbps.768x320.flv), avatar, 400kbps + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/tools/en/v6/specs) + diff --git a/versioned_docs/version-6.0/tools/utility.md b/versioned_docs/version-6.0/tools/utility.md new file mode 100644 index 00000000..25b9b8cc --- /dev/null +++ b/versioned_docs/version-6.0/tools/utility.md @@ -0,0 +1,19 @@ +--- +title: Utility +sidebar_label: Utility +hide_title: false +hide_table_of_contents: false +--- + +# Utility + +* [Console](https://ossrs.net/console/en_index.html) +* [HTTP-FLV Player](https://ossrs.net/players/srs_player.html) +* [WebRTC WHIP Publisher](https://ossrs.net/players/whip.html) +* [WebRTC WHEP Player](https://ossrs.net/players/whep.html) +* [App Download](https://ossrs.net/releases/app.html) +* [HTTP-REST](https://ossrs.net/http-rest/) + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/tools/en/v6/utility) + + diff --git a/versioned_docs/version-6.0/tutorial/oryx.md b/versioned_docs/version-6.0/tutorial/oryx.md new file mode 100644 index 00000000..c15ddac1 --- /dev/null +++ b/versioned_docs/version-6.0/tutorial/oryx.md @@ -0,0 +1,31 @@ +--- +title: Oryx +sidebar_label: Oryx +hide_title: false +hide_table_of_contents: false +--- + +# Oryx + +Oryx(SRS Stack) is an open source, media streaming service solution, which is easy to use and can be used in multiple media +scenarios. It combines the SRS, FFmpeg, WebRTC and other projects, and provides a one-stop solution for small and +medium-sized enterprises to easily obtain digital capabilities. + +## 24/7 Live Stream: Easy Stream Your Camera to YouTube with DDNS & VPS - No PC or OBS Required! + +https://youtu.be/nNOBFRshO6Q + +Discover how to effortlessly stream your camera to YouTube 24/7 using DDNS and VPS in this step-by-step +tutorial. Say goodbye to the need for a PC or OBS, and enjoy a more stable and seamless live streaming +experience. + +## Ultimate Unmanned Live Streaming Solution: Easy, Affordable & No PC Required! Perfect for Slow Media, Sleep Music, ASMR, Movie Streaming & More! + +https://youtu.be/68PIGFDGihU + +How to push a MP4 file, or DVR content to a live room? That's virtual live streaming. +This video introduce how to do virtual live by Oryx. + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/tutorial/en/v6/oryx) + + diff --git a/versioned_docs/version-6.0/tutorial/srs-books.md b/versioned_docs/version-6.0/tutorial/srs-books.md new file mode 100644 index 00000000..b62d9c49 --- /dev/null +++ b/versioned_docs/version-6.0/tutorial/srs-books.md @@ -0,0 +1,12 @@ +--- +title: SRS Books +sidebar_label: SRS Books +hide_title: false +hide_table_of_contents: false +--- + +# SRS Books + +On the way... + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/tutorial/en/v6/srs-books) diff --git a/versioned_docs/version-6.0/tutorial/srs-faq.md b/versioned_docs/version-6.0/tutorial/srs-faq.md new file mode 100644 index 00000000..58a7aa8f --- /dev/null +++ b/versioned_docs/version-6.0/tutorial/srs-faq.md @@ -0,0 +1,14 @@ +--- +title: SRS FAQ +sidebar_label: Live Stream FAQ +hide_title: false +hide_table_of_contents: false +--- + +# FAQ + +On the way... + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/tutorial/en/v6/srs-faq) + + diff --git a/versioned_docs/version-6.0/tutorial/srs-other.md b/versioned_docs/version-6.0/tutorial/srs-other.md new file mode 100644 index 00000000..8400aa97 --- /dev/null +++ b/versioned_docs/version-6.0/tutorial/srs-other.md @@ -0,0 +1,14 @@ +--- +title: SRS Other +sidebar_label: SRS Other +hide_title: false +hide_table_of_contents: false +--- + +# SRS Other + +On the way... + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/tutorial/en/v6/srs-other) + + diff --git a/versioned_docs/version-6.0/tutorial/srs-server.md b/versioned_docs/version-6.0/tutorial/srs-server.md new file mode 100644 index 00000000..e23787b5 --- /dev/null +++ b/versioned_docs/version-6.0/tutorial/srs-server.md @@ -0,0 +1,29 @@ +--- +title: SRS Server +sidebar_label: SRS Server +hide_title: false +hide_table_of_contents: false +--- + +# SRS Server + +## Unlock the Power of SRS: Real-World Use Cases and Boosting Your Business with Simple Realtime Server. + +[https://youtu.be/WChYr6z7EpY](https://youtu.be/WChYr6z7EpY) + +Simple Realtime Server (SRS) is a real-time video server that supports various protocols, such as RTMP, WebRTC, HLS, +HTTP-FLV, SRT, and MPEG-DASH. It acts as a media gateway, converting between RTMP, SRT, and WebRTC. SRS has several +use cases, including as an origin cluster, for virtual live-streaming with Oryx, with the SRS Player WordPress +plugin, for Unity developers, in the broadcast industry, for AI video and audio processing, and for cutting +live-streaming expenses. SRS is compatible with various open-source technologies and has a global community on +Discord. + +## Ultra Low Latency Streaming with OBS WHIP + +[https://youtu.be/SqrazCPWcV0](https://youtu.be/SqrazCPWcV0) + +Discover the power of OBS WHIP for low-latency live streaming, its benefits for content creators and broadcasters, +and how to set it up with SRS in just three simple steps. + +![](https://ossrs.io/gif/v1/sls.gif?site=ossrs.io&path=/lts/tutorial/en/v6/srs-server) + diff --git a/versioned_sidebars/version-6.0-sidebars.json b/versioned_sidebars/version-6.0-sidebars.json new file mode 100644 index 00000000..39f0685c --- /dev/null +++ b/versioned_sidebars/version-6.0-sidebars.json @@ -0,0 +1,147 @@ +{ + "tutorialSidebar": [ + "tutorial/srs-server", + "tutorial/oryx", + "tutorial/srs-faq", + "tutorial/srs-books", + "tutorial/srs-other" + ], + "toolsSidebar": [ + "tools/utility", + "tools/specs" + ], + "docsSidebar": [ + "doc/introduction", + { + "type": "category", + "label": "Getting Started", + "link": { + "type": "generated-index" + }, + "collapsed": true, + "items": [ + "doc/getting-started", + "doc/getting-started-build", + "doc/getting-started-oryx", + "doc/getting-started-k8s" + ] + }, + { + "type": "category", + "label": "Main Protocols", + "link": { + "type": "generated-index" + }, + "collapsed": true, + "items": [ + "doc/rtmp", + "doc/hls", + "doc/webrtc", + "doc/flv", + "doc/srt", + "doc/gb28181" + ] + }, + { + "type": "category", + "label": "Main Features", + "link": { + "type": "generated-index" + }, + "collapsed": true, + "items": [ + "doc/http-server", + "doc/hevc", + "doc/dvr", + "doc/streamer", + "doc/ingest", + "doc/forward", + "doc/security", + "doc/snapshot" + ] + }, + { + "type": "category", + "label": "OpenAPI", + "link": { + "type": "generated-index" + }, + "collapsed": true, + "items": [ + "doc/http-api", + "doc/http-callback", + "doc/exporter" + ] + }, + { + "type": "category", + "label": "Clusters", + "link": { + "type": "generated-index" + }, + "collapsed": true, + "items": [ + "doc/origin-cluster", + "doc/edge", + "doc/nginx-for-hls", + "doc/reuse-port" + ] + }, + { + "type": "category", + "label": "DevOps", + "link": { + "type": "generated-index" + }, + "collapsed": true, + "items": [ + "doc/log", + "doc/log-rotate", + "doc/service", + "doc/windows", + "doc/reload", + "doc/resource" + ] + }, + { + "type": "category", + "label": "Advanced Guides", + "link": { + "type": "generated-index" + }, + "collapsed": true, + "items": [ + "doc/low-latency", + "doc/arm", + "doc/drm", + "doc/k8s", + "doc/performance", + "doc/rtmp-url-vhost" + ] + }, + { + "type": "category", + "label": "Others", + "link": { + "type": "generated-index" + }, + "collapsed": true, + "items": [ + "doc/ide", + "doc/git", + "doc/sample-dash", + "doc/ffmpeg", + "doc/nginx-exec", + "doc/client-sdk", + "doc/raspberrypi", + "doc/time-jitter", + "doc/rtmp-pk-http", + "doc/rtmp-handshake", + "doc/rtmp-atc", + "doc/flv-vod-stream", + "doc/delivery-hds", + "doc/srs-lib-rtmp" + ] + } + ] +} diff --git a/versions.json b/versions.json index f5909c53..ced5501f 100644 --- a/versions.json +++ b/versions.json @@ -1,4 +1,5 @@ [ + "6.0", "5.0", "4.0" ]