init commit

This commit is contained in:
qiurui 2024-05-09 09:56:06 +08:00
parent 9bbed399ed
commit f4b8f96cfc
53 changed files with 2229 additions and 3226 deletions

View File

@ -1,94 +1,33 @@
.PHONY: all clean
CXX = g++
CXXFLAGS = -Wall -Wextra -O2 -std=c++17 -g
test_file_creation: util/test_file_creation.cc util/util.cc
$(CXX) $(CXXFLAGS) -o test_file_creation util/test_file_creation.cc util/util.cc -lkvazaar -lpthread
CXX = /home/qiurui/Documents/docker-volume/rkpro/sdk-official/luckfox-pico/tools/linux/toolchain/arm-rockchip830-linux-uclibcgnueabihf/bin/arm-rockchip830-linux-uclibcgnueabihf-g++
CXXFLAGS = -I./include -L./lib -Wall -Wextra -O2 -std=c++17 -g
uvgrtp_sender: uvgrtp/sender.cc util/util.cc
$(CXX) $(CXXFLAGS) -o uvgrtp/sender uvgrtp/sender.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread -lcryptopp
$(CXX) $(CXXFLAGS) -o uvgrtp/sender uvgrtp/sender.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread
uvgrtp_receiver: uvgrtp/receiver.cc util/util.cc
$(CXX) $(CXXFLAGS) -o uvgrtp/receiver uvgrtp/receiver.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread -lcryptopp
$(CXX) $(CXXFLAGS) -o uvgrtp/receiver uvgrtp/receiver.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread
uvgrtp_latency_sender: uvgrtp/latency_sender.cc util/util.cc
$(CXX) $(CXXFLAGS) -o uvgrtp/latency_sender uvgrtp/latency_sender.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread -lcryptopp
$(CXX) $(CXXFLAGS) -o uvgrtp/latency_sender uvgrtp/latency_sender.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread
uvgrtp_latency_receiver: uvgrtp/latency_receiver.cc util/util.cc
$(CXX) $(CXXFLAGS) -o uvgrtp/latency_receiver uvgrtp/latency_receiver.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread -lcryptopp
$(CXX) $(CXXFLAGS) -o uvgrtp/latency_receiver uvgrtp/latency_receiver.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread
uvgrtp_vpcc_latency_sender: uvgrtp/vpcc_latency_sender.cc util/util.cc
$(CXX) $(CXXFLAGS) -o uvgrtp/vpcc_latency_sender uvgrtp/vpcc_latency_sender.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread -lcryptopp
$(CXX) $(CXXFLAGS) -o uvgrtp/vpcc_latency_sender uvgrtp/vpcc_latency_sender.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread
uvgrtp_vpcc_latency_receiver: uvgrtp/vpcc_latency_receiver.cc util/util.cc
$(CXX) $(CXXFLAGS) -o uvgrtp/vpcc_latency_receiver uvgrtp/vpcc_latency_receiver.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread -lcryptopp
$(CXX) $(CXXFLAGS) -o uvgrtp/vpcc_latency_receiver uvgrtp/vpcc_latency_receiver.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread
uvgrtp_vpcc_sender: uvgrtp/vpcc_sender.cc util/util.cc
$(CXX) $(CXXFLAGS) -o uvgrtp/vpcc_sender uvgrtp/vpcc_sender.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread -lcryptopp
$(CXX) $(CXXFLAGS) -o uvgrtp/vpcc_sender uvgrtp/vpcc_sender.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread
uvgrtp_vpcc_receiver: uvgrtp/vpcc_receiver.cc util/util.cc
$(CXX) $(CXXFLAGS) -o uvgrtp/vpcc_receiver uvgrtp/vpcc_receiver.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread -lcryptopp
# ffmpeg_sender:
# $(CXX) $(CXXFLAGS) -Wno-unused -Wno-deprecated-declarations -Wno-unused-result -o ffmpeg/sender \
# ffmpeg/sender.cc util/util.cc `pkg-config --libs libavformat` -lpthread
ffmpeg_sender: ffmpeg/sender.cc util/util.cc
$(CXX) $(CXXFLAGS) -Wno-unused -Wno-deprecated-declarations -Wno-unused-result -o ffmpeg/sender \
ffmpeg/sender.cc util/util.cc -lavformat -lavcodec -lswscale -lz -lavutil -lpthread
ffmpeg_receiver: ffmpeg/receiver.cc util/util.cc
$(CXX) $(CXXFLAGS) -Wno-unused -Wno-deprecated-declarations -Wno-unused-result -o ffmpeg/receiver \
ffmpeg/receiver.cc util/util.cc -lavformat -lavcodec -lswscale -lz -lavutil -lpthread
ffmpeg_latency_sender: ffmpeg/latency_sender.cc util/util.cc
$(CXX) $(CXXFLAGS) -Wno-unused -Wno-deprecated-declarations -Wno-unused-result -o ffmpeg/latency_sender \
ffmpeg/latency_sender.cc util/util.cc `pkg-config --libs libavformat` -lpthread
ffmpeg_latency_receiver: ffmpeg/latency_receiver.cc util/util.cc
$(CXX) $(CXXFLAGS) -Wno-unused -Wno-deprecated-declarations -Wno-unused-result -o ffmpeg/latency_receiver \
ffmpeg/latency_receiver.cc util/util.cc -lavformat -lavcodec -lswscale -lz -lavutil -lpthread
live555_sender: live555/sender.cc live555/source.cc util/util.cc
$(CXX) $(CXXFLAGS) live555/sender.cc live555/source.cc util/util.cc -o live555/sender \
-I /usr/local/include/liveMedia \
-I /usr/local/include/groupsock \
-I /usr/local/include/BasicUsageEnvironment \
-I /usr/local/include/UsageEnvironment \
-lpthread -lliveMedia -lgroupsock -lBasicUsageEnvironment \
-lUsageEnvironment -lcrypto -lssl
live555_receiver: live555/receiver.cc live555/sink.cc util/util.cc
$(CXX) $(CXXFLAGS) live555/receiver.cc live555/sink.cc util/util.cc -o live555/receiver \
-I /usr/local/include/liveMedia \
-I /usr/local/include/groupsock \
-I /usr/local/include/BasicUsageEnvironment \
-I /usr/local/include/UsageEnvironment \
-lpthread -lliveMedia -lgroupsock -lBasicUsageEnvironment \
-lUsageEnvironment -lcrypto -lssl
live555_latency_sender: live555/latency_sender.cc util/util.cc
$(CXX) $(CXXFLAGS) live555/latency_sender.cc util/util.cc -o live555/latency_sender \
-I /usr/local/include/liveMedia \
-I /usr/local/include/groupsock \
-I /usr/local/include/BasicUsageEnvironment \
-I /usr/local/include/UsageEnvironment \
-lpthread -lliveMedia -lgroupsock -lBasicUsageEnvironment \
-lUsageEnvironment -lcrypto -lssl
live555_latency_receiver: live555/latency_receiver.cc
$(CXX) $(CXXFLAGS) live555/latency_receiver.cc -o live555/latency_receiver \
-I /usr/local/include/liveMedia \
-I /usr/local/include/groupsock \
-I /usr/local/include/BasicUsageEnvironment \
-I /usr/local/include/UsageEnvironment \
-lpthread -lliveMedia -lgroupsock -lBasicUsageEnvironment \
-lUsageEnvironment -lcrypto -lssl
$(CXX) $(CXXFLAGS) -o uvgrtp/vpcc_receiver uvgrtp/vpcc_receiver.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread
clean:
rm -f uvgrtp/receiver uvgrtp/sender uvgrtp/latency_sender uvgrtp/latency_receiver \
uvgrtp/vpcc_latency_sender uvgrtp/vpcc_latency_receiver \
uvgrtp/vpcc_sender uvgrtp/vpcc_receiver \
ffmpeg/receiver ffmpeg/sender ffmpeg/latency_sender ffmpeg/latency_receiver \
live555/receiver live555/sender live555/latency test_file_creation
uvgrtp/vpcc_sender uvgrtp/vpcc_receiver

33
Makefile.bk Normal file
View File

@ -0,0 +1,33 @@
.PHONY: all clean
CXX = /home/qiurui/Documents/docker-volume/rkpro/sdk-official/luckfox-pico/tools/linux/toolchain/arm-rockchip830-linux-uclibcgnueabihf/bin/arm-rockchip830-linux-uclibcgnueabihf-g++
CXXFLAGS = -I./include -L./lib -Wall -Wextra -O2 -std=c++17 -g
uvgrtp_sender: uvgrtp/sender.cc util/util.cc
$(CXX) $(CXXFLAGS) -o uvgrtp/sender uvgrtp/sender.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread -lcryptopp
uvgrtp_receiver: uvgrtp/receiver.cc util/util.cc
$(CXX) $(CXXFLAGS) -o uvgrtp/receiver uvgrtp/receiver.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread -lcryptopp
uvgrtp_latency_sender: uvgrtp/latency_sender.cc util/util.cc
$(CXX) $(CXXFLAGS) -o uvgrtp/latency_sender uvgrtp/latency_sender.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread -lcryptopp
uvgrtp_latency_receiver: uvgrtp/latency_receiver.cc util/util.cc
$(CXX) $(CXXFLAGS) -o uvgrtp/latency_receiver uvgrtp/latency_receiver.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread -lcryptopp
uvgrtp_vpcc_latency_sender: uvgrtp/vpcc_latency_sender.cc util/util.cc
$(CXX) $(CXXFLAGS) -o uvgrtp/vpcc_latency_sender uvgrtp/vpcc_latency_sender.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread -lcryptopp
uvgrtp_vpcc_latency_receiver: uvgrtp/vpcc_latency_receiver.cc util/util.cc
$(CXX) $(CXXFLAGS) -o uvgrtp/vpcc_latency_receiver uvgrtp/vpcc_latency_receiver.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread -lcryptopp
uvgrtp_vpcc_sender: uvgrtp/vpcc_sender.cc util/util.cc
$(CXX) $(CXXFLAGS) -o uvgrtp/vpcc_sender uvgrtp/vpcc_sender.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread -lcryptopp
uvgrtp_vpcc_receiver: uvgrtp/vpcc_receiver.cc util/util.cc
$(CXX) $(CXXFLAGS) -o uvgrtp/vpcc_receiver uvgrtp/vpcc_receiver.cc util/util.cc uvgrtp/v3c_util.cc -luvgrtp -lpthread -lcryptopp
clean:
rm -f uvgrtp/receiver uvgrtp/sender uvgrtp/latency_sender uvgrtp/latency_receiver \
uvgrtp/vpcc_latency_sender uvgrtp/vpcc_latency_receiver \
uvgrtp/vpcc_sender uvgrtp/vpcc_receiver

View File

@ -1,220 +0,0 @@
#include "../util/util.hh"
extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
#include <libavutil/channel_layout.h>
#include <libavutil/common.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
#include <stdbool.h>
}
#include <chrono>
#include <thread>
#include <string>
#include <atomic>
extern void *get_mem(int argc, char **argv, size_t& len);
#define WIDTH 3840
#define HEIGHT 2160
#define FPS 200
std::chrono::high_resolution_clock::time_point fs, fe;
std::atomic<bool> received(false);
struct ffmpeg_ctx {
AVFormatContext *sender;
AVFormatContext *receiver;
};
static ffmpeg_ctx *init_ffmpeg(std::string remote_address, int remote_port)
{
avcodec_register_all();
av_register_all();
avformat_network_init();
av_log_set_level(AV_LOG_PANIC);
ffmpeg_ctx *ctx = new ffmpeg_ctx;
enum AVCodecID codec_id = AV_CODEC_ID_H265;
int i, ret, x, y, got_output;
AVCodecContext *c = NULL;
AVCodec *codec;
AVFrame *frame;
AVPacket pkt;
codec = avcodec_find_encoder(codec_id);
c = avcodec_alloc_context3(codec);
c->width = HEIGHT;
c->height = WIDTH;
c->time_base.num = 1;
c->time_base.den = FPS;
c->pix_fmt = AV_PIX_FMT_YUV420P;
c->codec_type = AVMEDIA_TYPE_VIDEO;
c->flags = AV_CODEC_FLAG_GLOBAL_HEADER;
avcodec_open2(c, codec, NULL);
frame = av_frame_alloc();
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
c->pix_fmt, 32);
AVOutputFormat *fmt = av_guess_format("rtp", NULL, NULL);
char addr[64] = { 0 };
snprintf(addr, 64, "rtp://%s: %d", remote_address.c_str(), remote_port);
ret = avformat_alloc_output_context2(&ctx->sender, fmt, fmt->name, addr);
avio_open(&ctx->sender->pb, ctx->sender->filename, AVIO_FLAG_WRITE);
struct AVStream* stream = avformat_new_stream(ctx->sender, codec);
stream->codecpar->width = WIDTH;
stream->codecpar->height = HEIGHT;
stream->codecpar->codec_id = AV_CODEC_ID_HEVC;
stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
stream->time_base.num = 1;
stream->time_base.den = FPS;
char buf[256];
AVDictionary *d_s = NULL;
AVDictionary *d_r = NULL;
snprintf(buf, sizeof(buf), "%d", 40 * 1000 * 1000);
av_dict_set(&d_s, "buffer_size", buf, 32);
/* Flush the underlying I/O stream after each packet.
*
* Default is -1 (auto), which means that the underlying protocol will decide,
* 1 enables it, and has the effect of reducing the latency,
* 0 disables it and may increase IO throughput in some cases. */
snprintf(buf, sizeof(buf), "%d", 1);
av_dict_set(&d_s, "flush_packets", NULL, 32);
/* Set maximum buffering duration for interleaving. The duration is expressed in microseconds,
* and defaults to 10000000 (10 seconds).
*
* To ensure all the streams are interleaved correctly, libavformat will wait until it has
* at least one packet for each stream before actually writing any packets to the output file.
* When some streams are "sparse" (i.e. there are large gaps between successive packets),
* this can result in excessive buffering.
*
* This field specifies the maximum difference between the timestamps of the first and
* the last packet in the muxing queue, above which libavformat will output a packet regardless of
* whether it has queued a packet for all the streams.
*
* If set to 0, libavformat will continue buffering packets until it has a packet for each stream,
* regardless of the maximum timestamp difference between the buffered packets. */
snprintf(buf, sizeof(buf), "%d", 1000);
av_dict_set(&d_s, "max_interleave_delta", buf, 32);
/* avioflags flags (input/output)
*
* Possible values:
* direct
* Reduce buffering. */
snprintf(buf, sizeof(buf), "direct");
av_dict_set(&d_s, "avioflags", buf, 32);
(void)avformat_write_header(ctx->sender, &d_s);
/* When sender has been initialized, initialize receiver */
ctx->receiver = avformat_alloc_context();
int video_stream_index;
av_dict_set(&d_r, "protocol_whitelist", "file,udp,rtp", 0);
/* input buffer size */
snprintf(buf, sizeof(buf), "%d", 40 * 1000 * 1000);
av_dict_set(&d_r, "buffer_size", buf, 32);
/* avioflags flags (input/output)
*
* Possible values:
* direct
* Reduce buffering. */
snprintf(buf, sizeof(buf), "direct");
av_dict_set(&d_r, "avioflags", buf, 32);
/* Reduce the latency introduced by buffering during initial input streams analysis. */
av_dict_set(&d_r, "nobuffer", NULL, 32);
/* Set probing size in bytes, i.e. the size of the data to analyze to get stream information.
*
* A higher value will enable detecting more information in case it is dispersed into the stream,
* but will increase latency. Must be an integer not lesser than 32. It is 5000000 by default. */
snprintf(buf, sizeof(buf), "%d", 32);
av_dict_set(&d_r, "probesize", buf, 32);
/* Set number of frames used to probe fps. */
snprintf(buf, sizeof(buf), "%d", 2);
av_dict_set(&d_r, "fpsprobesize", buf, 32);
ctx->receiver->flags = AVFMT_FLAG_NONBLOCK;
if (!strcmp(remote_address.c_str(), "127.0.0.1"))
snprintf(buf, sizeof(buf), "ffmpeg/sdp/localhost/lat_hevc.sdp");
else
snprintf(buf, sizeof(buf), "ffmpeg/sdp/lan/lat_hevc.sdp");
if (avformat_open_input(&ctx->receiver, buf, NULL, &d_r) != 0) {
fprintf(stderr, "nothing found!\n");
return NULL;
}
if (avformat_find_stream_info(ctx->receiver, NULL) < 0) {
fprintf(stderr, "stream info not found!\n");
return NULL;
}
/* search video stream */
for (size_t i = 0; i < ctx->receiver->nb_streams; i++) {
if (ctx->receiver->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
video_stream_index = i;
}
return ctx;
}
static int receiver(std::string remote_address, int remote_port)
{
AVPacket pkt;
ffmpeg_ctx *ctx;
if (!(ctx = init_ffmpeg(remote_address.c_str(), remote_port)))
return EXIT_FAILURE;
av_init_packet(&pkt);
av_read_play(ctx->receiver);
while (av_read_frame(ctx->receiver, &pkt) >= 0) {
av_write_frame(ctx->sender, &pkt);
}
return EXIT_SUCCESS;
}
int main(int argc, char **argv)
{
if (argc != 7) {
fprintf(stderr, "usage: ./%s <local address> <local port> <remote address> <remote port> \
<format> <srtp>\n", __FILE__);
return EXIT_FAILURE;
}
std::string local_address = argv[1];
int local_port = atoi(argv[2]);
std::string remote_address = argv[3];
int remote_port = atoi(argv[4]);
bool vvc_enabled = get_vvc_state(argv[5]);
bool srtp_enabled = get_srtp_state(argv[6]);
return receiver(remote_address, remote_port);
}

View File

@ -1,338 +0,0 @@
#include "../util/util.hh"
extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
#include <libavutil/channel_layout.h>
#include <libavutil/common.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
#include <stdbool.h>
}
#include <atomic>
#include <deque>
#include <chrono>
#include <string>
#include <thread>
#include <unordered_map>
using namespace std::chrono;
extern void* get_mem(std::string filename, size_t& len);
#define WIDTH 3840
#define HEIGHT 2160
#define FPS 30
std::chrono::high_resolution_clock::time_point fs, fe;
std::atomic<bool> ready(false);
uint64_t ff_key = 0;
static std::unordered_map<uint64_t, high_resolution_clock::time_point> timestamps;
static std::deque<high_resolution_clock::time_point> timestamps2;
high_resolution_clock::time_point start2;
struct ffmpeg_ctx {
AVFormatContext *sender;
AVFormatContext *receiver;
};
static ffmpeg_ctx *init_ffmpeg(std::string remote_address, int remote_port)
{
avcodec_register_all();
av_register_all();
avformat_network_init();
av_log_set_level(AV_LOG_PANIC);
ffmpeg_ctx *ctx = new ffmpeg_ctx;
enum AVCodecID codec_id = AV_CODEC_ID_H265;
int i, ret, x, y, got_output;
AVCodecContext *c = NULL;
AVCodec *codec;
AVFrame *frame;
AVPacket pkt;
codec = avcodec_find_encoder(codec_id);
c = avcodec_alloc_context3(codec);
c->width = HEIGHT;
c->height = WIDTH;
c->time_base.num = 1;
c->time_base.den = FPS;
c->pix_fmt = AV_PIX_FMT_YUV420P;
c->codec_type = AVMEDIA_TYPE_VIDEO;
c->flags = AV_CODEC_FLAG_GLOBAL_HEADER;
avcodec_open2(c, codec, NULL);
frame = av_frame_alloc();
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
c->pix_fmt, 32);
AVOutputFormat *fmt = av_guess_format("rtp", NULL, NULL);
char addr[64] = { 0 };
snprintf(addr, 64, "rtp://%s: %d", remote_address.c_str(), remote_port);
ret = avformat_alloc_output_context2(&ctx->sender, fmt, fmt->name, addr);
avio_open(&ctx->sender->pb, ctx->sender->filename, AVIO_FLAG_WRITE);
struct AVStream* stream = avformat_new_stream(ctx->sender, codec);
stream->codecpar->width = WIDTH;
stream->codecpar->height = HEIGHT;
stream->codecpar->codec_id = AV_CODEC_ID_HEVC;
stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
stream->time_base.num = 1;
stream->time_base.den = FPS;
char buf[256];
AVDictionary *d_s = NULL;
AVDictionary *d_r = NULL;
snprintf(buf, sizeof(buf), "%d", 40 * 1000 * 1000);
av_dict_set(&d_s, "buffer_size", buf, 32);
#if 1
/* Flush the underlying I/O stream after each packet.
*
* Default is -1 (auto), which means that the underlying protocol will decide,
* 1 enables it, and has the effect of reducing the latency,
* 0 disables it and may increase IO throughput in some cases. */
snprintf(buf, sizeof(buf), "%d", 1);
av_dict_set(&d_s, "flush_packets", NULL, 32);
/* Set maximum buffering duration for interleaving. The duration is expressed in microseconds,
* and defaults to 10000000 (10 seconds).
*
* To ensure all the streams are interleaved correctly, libavformat will wait until it has
* at least one packet for each stream before actually writing any packets to the output file.
* When some streams are "sparse" (i.e. there are large gaps between successive packets),
* this can result in excessive buffering.
*
* This field specifies the maximum difference between the timestamps of the first and
* the last packet in the muxing queue, above which libavformat will output a packet regardless of
* whether it has queued a packet for all the streams.
*
* If set to 0, libavformat will continue buffering packets until it has a packet for each stream,
* regardless of the maximum timestamp difference between the buffered packets. */
snprintf(buf, sizeof(buf), "%d", 1000);
av_dict_set(&d_s, "max_interleave_delta", buf, 32);
/* avioflags flags (input/output)
*
* Possible values:
* direct
* Reduce buffering. */
snprintf(buf, sizeof(buf), "direct");
av_dict_set(&d_s, "avioflags", buf, 32);
#endif
(void)avformat_write_header(ctx->sender, &d_s);
/* When sender has been initialized, initialize receiver */
ctx->receiver = avformat_alloc_context();
int video_stream_index;
av_dict_set(&d_r, "protocol_whitelist", "file,udp,rtp", 0);
/* input buffer size */
snprintf(buf, sizeof(buf), "%d", 40 * 1000 * 1000);
av_dict_set(&d_r, "buffer_size", buf, 32);
#if 1
/* avioflags flags (input/output)
*
* Possible values:
* direct
* Reduce buffering. */
snprintf(buf, sizeof(buf), "direct");
av_dict_set(&d_r, "avioflags", buf, 32);
/* Reduce the latency introduced by buffering during initial input streams analysis. */
av_dict_set(&d_r, "nobuffer", NULL, 32);
/* Set probing size in bytes, i.e. the size of the data to analyze to get stream information.
*
* A higher value will enable detecting more information in case it is dispersed into the stream,
* but will increase latency. Must be an integer not lesser than 32. It is 5000000 by default. */
snprintf(buf, sizeof(buf), "%d", 32);
av_dict_set(&d_r, "probesize", buf, 32);
/* Set number of frames used to probe fps. */
snprintf(buf, sizeof(buf), "%d", 2);
av_dict_set(&d_r, "fpsprobesize", buf, 32);
#endif
ctx->receiver->flags = AVFMT_FLAG_NONBLOCK;
if (!strcmp(remote_address.c_str(), "127.0.0.1"))
snprintf(buf, sizeof(buf), "ffmpeg/sdp/localhost/lat_hevc.sdp");
else
snprintf(buf, sizeof(buf), "ffmpeg/sdp/lan/lat_hevc.sdp");
if (avformat_open_input(&ctx->receiver, buf, NULL, &d_r) != 0) {
fprintf(stderr, "nothing found!\n");
return NULL;
}
for (size_t i = 0; i < ctx->receiver->nb_streams; i++) {
if (ctx->receiver->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
video_stream_index = i;
}
return ctx;
}
static void receiver(ffmpeg_ctx *ctx)
{
uint64_t key = 0;
uint64_t diff = 0;
uint64_t frame_total = 0;
uint64_t intra_total = 0;
uint64_t inter_total = 0;
uint64_t frames = 0;
uint64_t intras = 0;
uint64_t inters = 0;
AVPacket packet;
av_init_packet(&packet);
std::chrono::high_resolution_clock::time_point start;
start = std::chrono::high_resolution_clock::now();
/* start reading packets from stream */
av_read_play(ctx->receiver);
while (av_read_frame(ctx->receiver, &packet) >= 0) {
key = packet.size - 1;
if (!frames)
key = ff_key;
auto diff = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::high_resolution_clock::now() - timestamps2.front()
).count();
timestamps2.pop_front();
if (((packet.data[3] >> 1) & 0x3f) == 19)
intra_total += (diff / 1000), intras++;
else if (((packet.data[3] >> 1) & 0x3f) == 1)
inter_total += (diff / 1000), inters++;
if (++frames < 596)
frame_total += (diff / 1000);
else
break;
timestamps.erase(key);
av_free_packet(&packet);
av_init_packet(&packet);
}
fprintf(stderr, "%zu: intra %lf, inter %lf, avg %lf\n",
frames,
intra_total / (float)intras,
inter_total / (float)inters,
frame_total / (float)frames
);
ready = true;
}
static int sender(std::string input_file, std::string remote_address, int remote_port)
{
size_t len = 0;
void *mem = get_mem(input_file, len);
std::vector<uint64_t> chunk_sizes;
get_chunk_sizes(get_chunk_filename(input_file), chunk_sizes);
ffmpeg_ctx *ctx = init_ffmpeg(remote_address, remote_port);
(void)new std::thread(receiver, ctx);
uint64_t current_frame = 0;
uint64_t key = 0;
uint64_t period = (uint64_t)((1000 / (float)FPS) * 1000);
uint64_t offset = 0;
AVPacket pkt;
std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
for (auto& chunk_size : chunk_sizes)
{
/* Start code lookup/merging of small packets causes the incoming frame size
* to differ quite significantly from "chunk_size" */
if (!offset)
{
ff_key = chunk_size;
}
if (timestamps.find(chunk_size) != timestamps.end()) {
fprintf(stderr, "cannot use %zu for key!\n", chunk_size);
continue;
}
timestamps[chunk_size] = std::chrono::high_resolution_clock::now();
timestamps2.push_back(std::chrono::high_resolution_clock::now());
av_init_packet(&pkt);
pkt.data = (uint8_t*)mem + offset;
pkt.size = chunk_size;
av_interleaved_write_frame(ctx->sender, &pkt);
av_packet_unref(&pkt);
current_frame++;
offset += chunk_size;
auto runtime = (uint64_t)std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::high_resolution_clock::now() - start
).count();
if (runtime < current_frame * period)
std::this_thread::sleep_for(std::chrono::microseconds(current_frame * period - runtime));
}
while (!ready.load())
{
std::this_thread::sleep_for(std::chrono::milliseconds(50));
}
return EXIT_SUCCESS;
}
int main(int argc, char **argv)
{
if (argc != 9) {
fprintf(stderr, "usage: ./%s <input file> <local address> <local port> <remote address> <remote port> <fps> <format> <srtp> \n", __FILE__);
return EXIT_FAILURE;
}
std::string input_file = argv[1];
std::string local_address = argv[2];
int local_port = atoi(argv[3]);
std::string remote_address = argv[4];
int remote_port = atoi(argv[5]);
float fps = atof(argv[6]);
bool vvc_enabled = get_vvc_state(argv[7]);
bool srtp_enabled = get_srtp_state(argv[8]);
return sender(input_file, remote_address, remote_port);
}

View File

@ -1,217 +0,0 @@
#include "../util/util.hh"
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
#include <libswscale/swscale.h>
}
#include <atomic>
#include <cstdio>
#include <chrono>
#include <thread>
#include <cstdlib>
#include <iostream>
#define SETUP_FFMPEG_PARAMETERS
struct thread_info {
size_t pkts;
size_t bytes;
std::chrono::high_resolution_clock::time_point start;
} *thread_info;
std::atomic<int> nready(0);
std::chrono::high_resolution_clock::time_point last;
size_t pkts = 0;
static int cb(void *ctx)
{
if (pkts) {
uint64_t diff = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - last
).count();
/* we haven't received a frame in the last 300 milliseconds, stop receiver */
if (diff >= 10)
return 1;
}
return 0;
}
static const AVIOInterruptCB int_cb = { cb, NULL };
void thread_func(int thread_num, int nthreads, std::string local_address, int local_port,
std::string remote_address, int remote_port, bool vvc, bool srtp)
{
AVFormatContext *format_ctx = avformat_alloc_context();
AVCodecContext *codec_ctx = NULL;
int video_stream_index = 0;
/* register everything */
av_register_all();
avformat_network_init();
format_ctx->interrupt_callback = int_cb;
av_log_set_level(AV_LOG_PANIC);
/* open rtsp */
AVDictionary *d = NULL;
av_dict_set(&d, "protocol_whitelist", "file,udp,rtp", 0);
char buf[256];
/* input buffer size */
snprintf(buf, sizeof(buf), "%d", 40 * 1000 * 1000);
av_dict_set(&d, "buffer_size", buf, 32);
#ifdef SETUP_FFMPEG_PARAMETERS
snprintf(buf, sizeof(buf), "%d", 10000000);
av_dict_set(&d, "max_delay", buf, 32);
snprintf(buf, sizeof(buf), "%d", 40 * 1000 * 1000);
av_dict_set(&d, "recv_buffer_size", buf, 32);
snprintf(buf, sizeof(buf), "%d", 40 * 1000 * 1000);
av_dict_set(&d, "rcvbuf", buf, 32);
/* avioflags flags (input/output)
*
* Possible values:
* direct
* Reduce buffering. */
snprintf(buf, sizeof(buf), "direct");
av_dict_set(&d, "avioflags", buf, 32);
/* Reduce the latency introduced by buffering during initial input streams analysis. */
av_dict_set(&d, "nobuffer", NULL, 32);
/* Set probing size in bytes, i.e. the size of the data to analyze to get stream information.
*
* A higher value will enable detecting more information in case it is dispersed into the stream,
* but will increase latency. Must be an integer not lesser than 32. It is 5000000 by default. */
snprintf(buf, sizeof(buf), "%d", 32);
av_dict_set(&d, "probesize", buf, 32);
/* Set number of frames used to probe fps. */
snprintf(buf, sizeof(buf), "%d", 2);
av_dict_set(&d, "fpsprobesize", buf, 32);
snprintf(buf, sizeof(buf), "%d", 1);
av_dict_set(&d, "stimeout", buf, 32);
snprintf(buf, sizeof(buf), "%d", 1);
av_dict_set(&d, "timeout", buf, 32);
snprintf(buf, sizeof(buf), "%d", 1);
av_dict_set(&d, "rw_timeout", buf, 32);
#endif
if (!strcmp(local_address.c_str(), "127.0.0.1"))
snprintf(buf, sizeof(buf), "ffmpeg/sdp/localhost/hevc_%d.sdp", nthreads);
else
snprintf(buf, sizeof(buf), "ffmpeg/sdp/lan/hevc_%d.sdp", nthreads);
if (avformat_open_input(&format_ctx, buf, NULL, &d)) {
fprintf(stderr, "failed to open input file\n");
nready++;
return;
}
if (avformat_find_stream_info(format_ctx, NULL) < 0) {
fprintf(stderr, "failed to find stream info!\n");
nready++;
return;
}
for (size_t i = 0; i < format_ctx->nb_streams; i++) {
if (format_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
video_stream_index = i;
}
size_t size = 0;
AVPacket packet;
av_init_packet(&packet);
std::chrono::high_resolution_clock::time_point start;
start = std::chrono::high_resolution_clock::now();
/* start reading packets from stream */
av_read_play(format_ctx);
while (av_read_frame(format_ctx, &packet) >= 0) {
if (packet.stream_index == video_stream_index)
size += packet.size;
av_free_packet(&packet);
av_init_packet(&packet);
pkts += 1;
last = std::chrono::high_resolution_clock::now();
}
if (pkts == 598) {
fprintf(stderr, "%zu %zu %zu\n", size, pkts,
std::chrono::duration_cast<std::chrono::milliseconds>(last - start).count()
);
} else {
fprintf(stderr, "discard %zu %zu %zu\n", size, pkts,
std::chrono::duration_cast<std::chrono::milliseconds>(last - start).count()
);
}
av_read_pause(format_ctx);
nready++;
}
int main(int argc, char **argv)
{
if (argc != 9) {
fprintf(stderr, "usage: ./%s <result file> <local address> <local port> <remote address> <remote port> \
<number of threads> <format> <srtp>\n", __FILE__);
return EXIT_FAILURE;
}
std::string result_filename = argv[1];
std::string local_address = argv[2];
int local_port = atoi(argv[3]);
std::string remote_address = argv[4];
int remote_port = atoi(argv[5]);
int nthreads = atoi(argv[6]);
bool vvc_enabled = get_vvc_state(argv[7]);
bool srtp_enabled = get_srtp_state(argv[8]);
thread_info = (struct thread_info *)calloc(nthreads, sizeof(*thread_info));
std::vector<std::thread*> threads = {};
for (int i = 0; i < nthreads; ++i) {
threads.push_back(new std::thread(thread_func, i, nthreads, local_address, local_port,
remote_address, remote_port, vvc_enabled, srtp_enabled));
}
// wait all the thread executions to end and delete them
for (int i = 0; i < nthreads; ++i) {
if (threads[i]->joinable())
{
threads[i]->join();
}
delete threads[i];
threads[i] = nullptr;
}
/*
for (int i = 0; i < nthreads; ++i)
new std::thread(thread_func, argv[1], i * 2);
while (nready.load() != nthreads)
std::this_thread::sleep_for(std::chrono::milliseconds(20));
*/
return EXIT_SUCCESS;
}

View File

@ -1,8 +0,0 @@
v=0
o=user 0 0 IN IP4 <your ip>
s=No Name
c=IN IP4 <your ip>
t=0 0
m=video 8888 RTP/AVP 96
a=rtpmap:96 H265/90000
a=recvonly

View File

@ -1,8 +0,0 @@
v=0
o=user 0 0 IN IP4 <your ip>
s=No Name
c=IN IP4 <your ip>
t=0 0
m=video 8890 RTP/AVP 96
a=rtpmap:96 H265/90000
a=recvonly

View File

@ -1,8 +0,0 @@
v=0
o=user 0 0 IN IP4 <your ip>
s=No Name
c=IN IP4 <your ip>
t=0 0
m=video 8892 RTP/AVP 96
a=rtpmap:96 H265/90000
a=recvonly

View File

@ -1,8 +0,0 @@
v=0
o=user 0 0 IN IP4 <your ip>
s=No Name
c=IN IP4 <your ip>
t=0 0
m=video 8894 RTP/AVP 96
a=rtpmap:96 H265/90000
a=recvonly

View File

@ -1,8 +0,0 @@
v=0
o=user 0 0 IN IP4 <your ip>
s=No Name
c=IN IP4 <your ip>
t=0 0
m=video 8896 RTP/AVP 96
a=rtpmap:96 H265/90000
a=recvonly

View File

@ -1,8 +0,0 @@
v=0
o=user 0 0 IN IP4 <your ip>
s=No Name
c=IN IP4 <your ip>
t=0 0
m=video 8898 RTP/AVP 96
a=rtpmap:96 H265/90000
a=recvonly

View File

@ -1,8 +0,0 @@
v=0
o=user 0 0 IN IP4 <your ip>
s=No Name
c=IN IP4 <your ip>
t=0 0
m=video 8900 RTP/AVP 96
a=rtpmap:96 H265/90000
a=recvonly

View File

@ -1,8 +0,0 @@
v=0
o=user 0 0 IN IP4 <your ip>
s=No Name
c=IN IP4 <your ip>
t=0 0
m=video 8902 RTP/AVP 96
a=rtpmap:96 H265/90000
a=recvonly

View File

@ -1,8 +0,0 @@
v=0
o=user 0 0 IN IP4 <your ip>
s=No Name
c=IN IP4 <your ip>
t=0 0
m=video 8889 RTP/AVP 96
a=rtpmap:96 H265/90000
a=recvonly

View File

@ -1,8 +0,0 @@
v=0
o=user 0 0 IN IP4 127.0.0.1
s=No Name
c=IN IP4 127.0.0.1
t=0 0
m=video 8888 RTP/AVP 96
a=rtpmap:96 H265/90000
a=recvonly

View File

@ -1,8 +0,0 @@
v=0
o=user 0 0 IN IP4 127.0.0.1
s=No Name
c=IN IP4 127.0.0.1
t=0 0
m=video 8890 RTP/AVP 96
a=rtpmap:96 H265/90000
a=recvonly

View File

@ -1,8 +0,0 @@
v=0
o=user 0 0 IN IP4 127.0.0.1
s=No Name
c=IN IP4 127.0.0.1
t=0 0
m=video 8892 RTP/AVP 96
a=rtpmap:96 H265/90000
a=recvonly

View File

@ -1,8 +0,0 @@
v=0
o=user 0 0 IN IP4 127.0.0.1
s=No Name
c=IN IP4 127.0.0.1
t=0 0
m=video 8894 RTP/AVP 96
a=rtpmap:96 H265/90000
a=recvonly

View File

@ -1,8 +0,0 @@
v=0
o=user 0 0 IN IP4 127.0.0.1
s=No Name
c=IN IP4 127.0.0.1
t=0 0
m=video 8896 RTP/AVP 96
a=rtpmap:96 H265/90000
a=recvonly

View File

@ -1,8 +0,0 @@
v=0
o=user 0 0 IN IP4 127.0.0.1
s=No Name
c=IN IP4 127.0.0.1
t=0 0
m=video 8898 RTP/AVP 96
a=rtpmap:96 H265/90000
a=recvonly

View File

@ -1,8 +0,0 @@
v=0
o=user 0 0 IN IP4 127.0.0.1
s=No Name
c=IN IP4 127.0.0.1
t=0 0
m=video 8900 RTP/AVP 96
a=rtpmap:96 H265/90000
a=recvonly

View File

@ -1,8 +0,0 @@
v=0
o=user 0 0 IN IP4 127.0.0.1
s=No Name
c=IN IP4 127.0.0.1
t=0 0
m=video 8902 RTP/AVP 96
a=rtpmap:96 H265/90000
a=recvonly

View File

@ -1,8 +0,0 @@
v=0
o=user 0 0 IN IP4 127.0.0.1
s=No Name
c=IN IP4 127.0.0.1
t=0 0
m=video 8889 RTP/AVP 96
a=rtpmap:96 H265/90000
a=recvonly

View File

@ -1,187 +0,0 @@
#include "../util/util.hh"
extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
#include <libavutil/channel_layout.h>
#include <libavutil/common.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
#include <stdbool.h>
}
#include <atomic>
#include <chrono>
#include <thread>
#include <cstdlib>
#include <string>
#include <iostream>
#define WIDTH 3840
#define HEIGHT 2160
std::atomic<int> nready(0);
void thread_func(void* mem, std::string local_address, uint16_t local_port,
std::string remote_address, uint16_t remote_port, int thread_num, double fps, bool vvc, bool srtp,
const std::string result_file, std::vector<uint64_t> chunk_sizes)
{
enum AVCodecID codec_id = AV_CODEC_ID_H265;
AVCodec *codec;
AVCodecContext *c = NULL;
int i;
int ret;
int x;
int y;
int got_output;
AVFrame *frame;
AVPacket pkt;
codec = avcodec_find_encoder(codec_id);
c = avcodec_alloc_context3(codec);
av_log_set_level(AV_LOG_PANIC);
c->width = HEIGHT;
c->height = WIDTH;
c->time_base.num = 1;
c->time_base.den = fps;
c->pix_fmt = AV_PIX_FMT_YUV420P;
c->codec_type = AVMEDIA_TYPE_VIDEO;
c->flags = AV_CODEC_FLAG_GLOBAL_HEADER;
avcodec_open2(c, codec, NULL);
frame = av_frame_alloc();
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
c->pix_fmt, 32);
AVFormatContext* avfctx;
AVOutputFormat* fmt = av_guess_format("rtp", NULL, NULL);
char addr[64] = { 0 };
snprintf(addr, 64, "rtp://%s: %d", remote_address.c_str(), remote_port + thread_num*2);
ret = avformat_alloc_output_context2(&avfctx, fmt, fmt->name, addr);
avio_open(&avfctx->pb, avfctx->filename, AVIO_FLAG_WRITE);
struct AVStream* stream = avformat_new_stream(avfctx, codec);
/* stream->codecpar->bit_rate = 400000; */
stream->codecpar->width = WIDTH;
stream->codecpar->height = HEIGHT;
stream->codecpar->codec_id = AV_CODEC_ID_HEVC;
stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
stream->time_base.num = 1;
stream->time_base.den = fps;
(void)avformat_write_header(avfctx, NULL);
uint64_t chunk_size = 0;
uint64_t current_frame = 0;
uint64_t period = (uint64_t)((1000 / (float)fps) * 1000);
size_t bytes_sent = 0;
std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
for (auto& chunk_size : chunk_sizes)
{
av_init_packet(&pkt);
pkt.data = (uint8_t*)mem + bytes_sent;
pkt.size = chunk_size;
av_interleaved_write_frame(avfctx, &pkt);
av_packet_unref(&pkt);
++current_frame;
bytes_sent += chunk_size;
auto runtime = (uint64_t)std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::high_resolution_clock::now() - start
).count();
if (runtime < current_frame * period)
std::this_thread::sleep_for(std::chrono::microseconds(current_frame * period - runtime));
}
auto end = std::chrono::high_resolution_clock::now();
uint64_t diff = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
write_send_results_to_file(result_file, bytes_sent, diff);
nready++;
avcodec_close(c);
av_free(c);
av_freep(&frame->data[0]);
av_frame_free(&frame);
}
int main(int argc, char **argv)
{
if (argc != 11) {
fprintf(stderr, "usage: ./%s <input file> <result file> <local address> <local port> <remote address> <remote port> \
<number of threads> <fps> <format> <srtp> \n", __FILE__);
return EXIT_FAILURE;
}
std::string input_file = argv[1];
std::string result_file = argv[2];
std::string local_address = argv[3];
int local_port = atoi(argv[4]);
std::string remote_address = argv[5];
int remote_port = atoi(argv[6]);
int nthreads = atoi(argv[7]);
int fps = atoi(argv[8]);
bool vvc_enabled = get_vvc_state(argv[9]);
bool srtp_enabled = get_srtp_state(argv[10]);
std::cout << "Starting FFMpeg sender tests. " << local_address << ":" << local_port
<< "->" << remote_address << ":" << remote_port << std::endl;
avcodec_register_all();
av_register_all();
avformat_network_init();
size_t len = 0;
void *mem = get_mem(input_file, len);
std::vector<uint64_t> chunk_sizes;
get_chunk_sizes(get_chunk_filename(input_file), chunk_sizes);
if (mem == nullptr || chunk_sizes.empty())
{
std::cerr << "Failed to get file: " << input_file << std::endl;
std::cerr << "or chunk location file: " << get_chunk_filename(input_file) << std::endl;
return EXIT_FAILURE;
}
std::vector<std::thread*> threads;
for (int i = 0; i < nthreads; ++i) {
threads.push_back(new std::thread(thread_func, mem, local_address, local_port, remote_address,
remote_port, i, fps, vvc_enabled, srtp_enabled, result_file, chunk_sizes));
}
for (unsigned int i = 0; i < threads.size(); ++i) {
if (threads[i]->joinable())
{
threads[i]->join();
}
delete threads[i];
threads[i] = nullptr;
}
threads.clear();
return EXIT_SUCCESS;
}

76
include/uvgrtp/clock.hh Normal file
View File

@ -0,0 +1,76 @@
#pragma once
#ifdef _WIN32
#include <winsock2.h>
#include <windows.h>
#else
#include <sys/time.h>
#endif
#include <chrono>
namespace uvgrtp {
namespace clock {
/* network time protocol */
namespace ntp {
/**
* \brief Get current time in NTP units
*
* \return NTP timestamp
*/
uint64_t now();
/**
* \brief Calculate the time difference of two NTP times
*
* The second timestamp is subtracted from the first one
*
* \param ntp1 First NTP timestamp
* \param ntp2 Second NTP timestamp
*
* \return Difference of the timestamps in milliseconds
*/
uint64_t diff(uint64_t ntp1, uint64_t ntp2);
/**
* \brief Calculate the time difference of two NTP times
*
* \details This function calls uvgrtp::clock::ntp::now()
* and then subtracts the input parameter from that timestamp value.
*
* \param then NTP timestamp
*
* \return Difference of the timestamps in milliseconds
*/
uint64_t diff_now(uint64_t then);
}
/// \cond DO_NOT_DOCUMENT
/* high-resolution clock */
namespace hrc {
typedef std::chrono::high_resolution_clock::time_point hrc_t;
hrc_t now();
/* the result is in milliseconds */
uint64_t diff(hrc_t hrc1, hrc_t hrc2);
/* the result is in milliseconds */
uint64_t diff_now(hrc_t then);
uint64_t diff_now_us(hrc_t& then);
}
uint64_t ms_to_jiffies(uint64_t ms);
uint64_t jiffies_to_ms(uint64_t jiffies);
#ifdef _WIN32
int gettimeofday(struct timeval *tp, struct timezone *tzp);
#endif
/// \endcond
}
}
namespace uvg_rtp = uvgrtp;

99
include/uvgrtp/context.hh Normal file
View File

@ -0,0 +1,99 @@
#pragma once
#include "util.hh"
#include <map>
#include <string>
#include <memory>
namespace uvgrtp {
class session;
class socketfactory;
/**
* \brief Provides CNAME isolation and can be used to create uvgrtp::session objects
*/
class context {
public:
/**
* \brief RTP context constructor
*
* \details Most of the time one RTP context per application is enough.
* If CNAME namespace isolation is required, multiple context objects can be created.
*/
context();
/**
* \brief RTP context destructor
*
* \details This does not destroy active sessions. They must be destroyed manually
* by calling uvgrtp::context::destroy_session()
*/
~context();
/**
* \brief Create a new RTP session between two IP addresses
*
* \param addresses Local and remote IP address for session as a pair
*
* \return RTP session object
*
* \retval uvgrtp::session On success
* \retval nullptr If memory allocation failed
*/
uvgrtp::session* create_session(std::pair<std::string, std::string> addresses);
/**
* \brief Create a new RTP session
*
* \param address IP address of the remote participant
*
* \return RTP session object
*
* \retval uvgrtp::session On success
* \retval nullptr If "address" is empty or memory allocation failed
*/
uvgrtp::session *create_session(std::string address);
/// \cond DO_NOT_DOCUMENT
// Obsolete method, replaced by create_session(std::pair<std::string, std::string> addresses);
uvgrtp::session *create_session(std::string remote_addr, std::string local_addr);
/// \endcond
/**
* \brief Destroy RTP session and all of its media streams
*
* \param session Pointer to the session object that should be destroyed
*
* \return RTP error code
*
* \retval RTP_OK On success
* \retval RTP_INVALID_VALUE If session is nullptr
*/
rtp_error_t destroy_session(uvgrtp::session *session);
/// \cond DO_NOT_DOCUMENT
std::string& get_cname();
/// \endcond
/**
* \brief Has Crypto++ been included in uvgRTP library
*
* \retval true Crypto++ has been included, using SRTP is possible
* \retval false Crypto++ has not been included, using SRTP is not possible
*/
bool crypto_enabled() const;
private:
/* Generate CNAME for participant using host and login names */
std::string generate_cname() const;
/* CNAME is the same for all connections */
std::string cname_;
std::shared_ptr<uvgrtp::socketfactory> sfp_;
};
}
namespace uvg_rtp = uvgrtp;

266
include/uvgrtp/frame.hh Normal file
View File

@ -0,0 +1,266 @@
#pragma once
#include "util.hh"
#ifdef _WIN32
#include <winsock2.h>
#include <windows.h>
#include <ws2def.h>
#else
#include <netinet/in.h>
#endif
#include <string>
#include <vector>
/* https://stackoverflow.com/questions/1537964/visual-c-equivalent-of-gccs-attribute-packed */
#if defined(__MINGW32__) || defined(__MINGW64__) || defined(__GNUC__) || defined(__linux__)
#define PACK(__Declaration__) __Declaration__ __attribute__((__packed__))
#else
#define PACK(__Declaration__) __pragma(pack(push, 1)) __Declaration__ __pragma(pack(pop))
#endif
namespace uvgrtp {
namespace frame {
enum RTCP_FRAME_TYPE {
RTCP_FT_SR = 200, /* Sender report */
RTCP_FT_RR = 201, /* Receiver report */
RTCP_FT_SDES = 202, /* Source description */
RTCP_FT_BYE = 203, /* Goodbye */
RTCP_FT_APP = 204, /* Application-specific message */
RTCP_FT_RTPFB = 205, /* Transport layer FB message */
RTCP_FT_PSFB = 206 /* Payload-specific FB message */
};
enum RTCP_PSFB_FMT {
RTCP_PSFB_PLI = 1, /* Picture Loss Indication (PLI), defined in RFC 4585 */
RTCP_PSFB_SLI = 2, /* Slice Loss Indication (SLI), defined in RFC 4585 */
RTCP_PSFB_RPSI = 3, /* Reference Picture Selection Indication (RPSI), defined in RFC 4585 */
RTCP_PSFB_FIR = 4, /* Full Intra Request (FIR), defined in RFC 5154 */
RTCP_PSFB_TSTR = 5, /* Temporal-Spatial Trade-off Request (TSTR), defined in RFC 5154 */
RTCP_PSFB_AFB = 15 /* Application Layer FB (AFB), defined in RFC 4585 */
};
enum RTCP_RTPFB_FMT {
RTCP_RTPFB_NACK = 1 /* Generic NACK, defined in RFC 4585 section 6.2 */
};
PACK(struct rtp_header {
uint8_t version:2;
uint8_t padding:1;
uint8_t ext:1;
uint8_t cc:4;
uint8_t marker:1;
uint8_t payload:7;
uint16_t seq = 0;
uint32_t timestamp = 0;
uint32_t ssrc = 0;
});
PACK(struct ext_header {
uint16_t type = 0;
uint16_t len = 0;
uint8_t *data = nullptr;
});
/** \brief See <a href="https://www.rfc-editor.org/rfc/rfc3550#section-5" target="_blank">RFC 3550 section 5</a> */
struct rtp_frame {
struct rtp_header header;
uint32_t *csrc = nullptr;
struct ext_header *ext = nullptr;
size_t padding_len = 0; /* non-zero if frame is padded */
/** \brief Length of the packet payload in bytes added by uvgRTP to help process the frame
*
* \details payload_len = total length - header length - padding length (if padded)
*/
size_t payload_len = 0;
uint8_t* payload = nullptr;
/// \cond DO_NOT_DOCUMENT
uint8_t *dgram = nullptr; /* pointer to the UDP datagram (for internal use only) */
size_t dgram_size = 0; /* size of the UDP datagram */
/// \endcond
};
/** \brief Header of for all RTCP packets defined in <a href="https://www.rfc-editor.org/rfc/rfc3550#section-6" target="_blank">RFC 3550 section 6</a> */
struct rtcp_header {
/** \brief This field identifies the version of RTP. The version defined by
* RFC 3550 is two (2). */
uint8_t version = 0;
/** \brief Does this packet contain padding at the end */
uint8_t padding = 0;
union {
/** \brief Source count or report count. Alternative to pkt_subtype. */
uint8_t count = 0;
/** \brief Subtype in APP packets. Alternative to count */
uint8_t pkt_subtype;
/** \brief Feedback message type (FMT), specified in RFC 5104 section 4.3. Alternative to count and pkt_subtype */
uint8_t fmt;
};
/** \brief Identifies the RTCP packet type */
uint8_t pkt_type = 0;
/** \brief Length of the whole message measured in 32-bit words */
uint16_t length = 0;
};
/** \brief See <a href="https://www.rfc-editor.org/rfc/rfc3550#section-6.4.1" target="_blank">RFC 3550 section 6.4.1</a> */
struct rtcp_sender_info {
/** \brief NTP timestamp, most significant word */
uint32_t ntp_msw = 0;
/** \brief NTP timestamp, least significant word */
uint32_t ntp_lsw = 0;
/** \brief RTP timestamp corresponding to this NTP timestamp */
uint32_t rtp_ts = 0;
uint32_t pkt_cnt = 0;
/** \brief Also known as octet count*/
uint32_t byte_cnt = 0;
};
/** \brief See <a href="https://www.rfc-editor.org/rfc/rfc3550#section-6.4.1" target="_blank">RFC 3550 section 6.4.1</a> */
struct rtcp_report_block {
uint32_t ssrc = 0;
uint8_t fraction = 0;
int32_t lost = 0;
uint32_t last_seq = 0;
uint32_t jitter = 0;
uint32_t lsr = 0; /* last Sender Report */
uint32_t dlsr = 0; /* delay since last Sender Report */
};
/** \brief See <a href="https://www.rfc-editor.org/rfc/rfc3550#section-6.4.2" target="_blank">RFC 3550 section 6.4.2</a> */
struct rtcp_receiver_report {
struct rtcp_header header;
uint32_t ssrc = 0;
std::vector<rtcp_report_block> report_blocks;
};
/** \brief See <a href="https://www.rfc-editor.org/rfc/rfc3550#section-6.4.1" target="_blank">RFC 3550 section 6.4.1</a> */
struct rtcp_sender_report {
struct rtcp_header header;
uint32_t ssrc = 0;
struct rtcp_sender_info sender_info;
std::vector<rtcp_report_block> report_blocks;
};
/** \brief See <a href="https://www.rfc-editor.org/rfc/rfc3550#section-6.5" target="_blank">RFC 3550 section 6.5</a> */
struct rtcp_sdes_item {
uint8_t type = 0;
uint8_t length = 0;
uint8_t *data = nullptr;
};
/** \brief See <a href="https://www.rfc-editor.org/rfc/rfc3550#section-6.5" target="_blank">RFC 3550 section 6.5</a> */
struct rtcp_sdes_chunk {
uint32_t ssrc = 0;
std::vector<rtcp_sdes_item> items;
};
/** \brief See <a href="https://www.rfc-editor.org/rfc/rfc3550#section-6.5" target="_blank">RFC 3550 section 6.5</a> */
struct rtcp_sdes_packet {
struct rtcp_header header;
std::vector<rtcp_sdes_chunk> chunks;
};
/** \brief See <a href="https://www.rfc-editor.org/rfc/rfc3550#section-6.7" target="_blank">RFC 3550 section 6.7</a> */
struct rtcp_app_packet {
struct rtcp_header header;
uint32_t ssrc = 0;
uint8_t name[4] = {0};
uint8_t *payload = nullptr;
/** \brief Size of the payload in bytes. Added by uvgRTP to help process the payload. */
size_t payload_len = 0;
};
/** \brief Full Intra Request, See RFC 5104 section 4.3.1 */
struct rtcp_fir {
uint32_t ssrc = 0;
uint8_t seq = 0;
};
/** \brief Slice Loss Indication, See RFC 4585 section 6.3.2 */
struct rtcp_sli {
uint16_t first = 0;
uint16_t num = 0;
uint8_t picture_id = 0;
};
/** \brief Reference Picture Selection Indication, See RFC 4585 section 6.3.3 */
struct rtcp_rpsi {
uint8_t pb = 0;
uint8_t pt = 0;
uint8_t* str = nullptr;
};
/** \brief RTCP Feedback Control Information, See RFC 4585 section 6.1 */
struct rtcp_fb_fci {
union {
rtcp_fir fir;
rtcp_sli sli;
rtcp_rpsi rpsi;
};
};
/** \brief Feedback message. See RFC 4585 section 6.1 */
struct rtcp_fb_packet {
struct rtcp_header header;
uint32_t sender_ssrc = 0;
uint32_t media_ssrc = 0;
std::vector<rtcp_fb_fci> items;
/** \brief Size of the payload in bytes. Added by uvgRTP to help process the payload. */
size_t payload_len = 0;
};
PACK(struct zrtp_frame {
uint8_t version:4;
uint16_t unused:12;
uint16_t seq = 0;
uint32_t magic = 0;
uint32_t ssrc = 0;
uint8_t payload[1];
});
/* Allocate an RTP frame
*
* First function allocates an empty RTP frame (no payload)
*
* Second allocates an RTP frame with payload of size "payload_len",
*
* Third allocate an RTP frame with payload of size "payload_len"
* + probation zone of size "pz_size" * MAX_PAYLOAD
*
* Return pointer to frame on success
* Return nullptr on error and set rtp_errno to:
* RTP_MEMORY_ERROR if allocation of memory failed */
rtp_frame *alloc_rtp_frame();
rtp_frame *alloc_rtp_frame(size_t payload_len);
/* Deallocate RTP frame
*
* Return RTP_OK on successs
* Return RTP_INVALID_VALUE if "frame" is nullptr */
rtp_error_t dealloc_frame(uvgrtp::frame::rtp_frame *frame);
/* Allocate ZRTP frame
* Parameter "payload_size" defines the length of the frame
*
* Return pointer to frame on success
* Return nullptr on error and set rtp_errno to:
* RTP_MEMORY_ERROR if allocation of memory failed
* RTP_INVALID_VALUE if "payload_size" is 0 */
void* alloc_zrtp_frame(size_t payload_size);
/* Deallocate ZRTP frame
*
* Return RTP_OK on successs
* Return RTP_INVALID_VALUE if "frame" is nullptr */
rtp_error_t dealloc_frame(uvgrtp::frame::zrtp_frame* frame);
}
}
namespace uvg_rtp = uvgrtp;

15
include/uvgrtp/lib.hh Normal file
View File

@ -0,0 +1,15 @@
#pragma once
/* Including this header will include all the necessary headers for using uvgRTP, but
* you can also include the headers individually instead of this header. */
#include "media_stream.hh" // media streamer class
#include "session.hh" // session class
#include "context.hh" // context class
#include "rtcp.hh" // RTCP
#include "clock.hh" // time related functions
#include "frame.hh" // frame related functions
#include "util.hh" // types
#include "version.hh" // version

View File

@ -0,0 +1,467 @@
#pragma once
#include "util.hh"
#include <unordered_map>
#include <memory>
#include <string>
#include <atomic>
#include <cstdint>
#ifndef _WIN32
#include <sys/socket.h>
#include <netinet/in.h>
#else
#include <ws2ipdef.h>
#endif
namespace uvgrtp {
// forward declarations
class rtp;
class rtcp;
class zrtp;
class base_srtp;
class srtp;
class srtcp;
class reception_flow;
class holepuncher;
class socket;
class socketfactory;
class rtcp_reader;
namespace frame {
struct rtp_frame;
}
namespace formats {
class media;
}
/**
* \brief The media_stream is an entity which represents one RTP stream.
*
* \details media_stream is defined by the ports which are used for sending and/or receiving media.
* It is possible for media_stream to be bi- or unidirectional. The unidirectionality
* is achieved by specifying RCE_SEND_ONLY or RCE_RECEIVE_ONLY flag when creating media_stream.
*
* If RCE_RTCP was given when creating media_stream, you can get the uvgrtp::rtcp object with get_rtcp()-function.
*
* media_stream corresponds to one RTP session in <a href="https://www.rfc-editor.org/rfc/rfc3550">RFC 3550</a>.
*/
class media_stream {
public:
/// \cond DO_NOT_DOCUMENT
media_stream(std::string cname, std::string remote_addr, std::string local_addr, uint16_t src_port, uint16_t dst_port,
rtp_format_t fmt, std::shared_ptr<uvgrtp::socketfactory> sfp, int rce_flags);
~media_stream();
/* Initialize traditional RTP session.
* Allocate Connection/Reader/Writer objects and initialize them
*
* Return RTP_OK on success
* Return RTP_MEMORY_ERROR if allocation failed
*
* Other error return codes are defined in {conn,writer,reader}.hh */
rtp_error_t init(std::shared_ptr<uvgrtp::zrtp> zrtp);
/* Initialize Secure RTP session with automatic ZRTP negotiation
* Allocate Connection/Reader/Writer objects and initialize them
*
* Return RTP_OK on success
* Return RTP_MEMORY_ERROR if allocation failed
*
* TODO document all error codes!
*
* Other error return codes are defined in {conn,writer,reader,srtp}.hh */
rtp_error_t init_auto_zrtp(std::shared_ptr<uvgrtp::zrtp> zrtp);
/// \endcond
/**
*
* \brief Start the ZRTP negotiation manually.
*
* \details There is two ways to use ZRTP.
* 1. Use flags RCE_SRTP + RCE_SRTP_KMNGMNT_ZRTP + (RCE_ZRTP_DIFFIE_HELLMAN_MODE/RCE_ZRTP_MULTISTREAM_MODE)
* to automatically start ZRTP negotiation when creating a media stream.
* 2. Use flags RCE_SRTP + (RCE_ZRTP_DIFFIE_HELLMAN_MODE/RCE_ZRTP_MULTISTREAM_MODE) and after creating
* the media stream, call start_zrtp() to manually start the ZRTP negotiation
*
* \return RTP error code
*
* \retval RTP_OK On success
* \retval RTP_TIMEOUT if ZRTP timed out
* \retval RTP_GENERIC_ERROR on other errors */
rtp_error_t start_zrtp();
/**
*
* \brief Add keying information for user-managed SRTP session
*
* \details For user-managed SRTP session (flag RCE_SRTP_KMNGMNT_USER),
* the media stream is not started until SRTP key has been added and all calls
* to push_frame() will fail.
*
* \param key SRTP master key, default is 128-bit long
* \param salt 112-bit long salt
*
* \return RTP error code
*
* \retval RTP_OK On success
* \retval RTP_INVALID_VALUE If key or salt is invalid
* \retval RTP_NOT_SUPPORTED If user-managed SRTP was not specified in create_stream() */
rtp_error_t add_srtp_ctx(uint8_t *key, uint8_t *salt);
/**
* \brief Send data to remote participant with a custom timestamp
*
* \details If so specified either by the selected media format and/or given
* ::RTP_CTX_ENABLE_FLAGS, uvgRTP fragments the input data into RTP packets of 1492 bytes,
* or to any other size defined by the application using ::RCC_MTU_SIZE
*
* The frame is automatically reconstructed by the receiver if all fragments have been
* received successfully.
*
* \param data Pointer to data the that should be sent, uvgRTP does not take ownership of the memory
* \param data_len Length of data
* \param rtp_flags Optional flags, see ::RTP_FLAGS for more details
*
* \return RTP error code
*
* \retval RTP_OK On success
* \retval RTP_INVALID_VALUE If one of the parameters are invalid
* \retval RTP_MEMORY_ERROR If the data chunk is too large to be processed
* \retval RTP_SEND_ERROR If uvgRTP failed to send the data to remote
* \retval RTP_GENERIC_ERROR If an unspecified error occurred
*/
rtp_error_t push_frame(uint8_t *data, size_t data_len, int rtp_flags);
/**
* \brief Send data to remote participant with a custom timestamp
*
* \details If so specified either by the selected media format and/or given
* ::RTP_CTX_ENABLE_FLAGS, uvgRTP fragments the input data into RTP packets of 1492 bytes,
* or to any other size defined by the application using ::RCC_MTU_SIZE
*
* The frame is automatically reconstructed by the receiver if all fragments have been
* received successfully.
*
* \param data Smart pointer to data the that should be sent
* \param data_len Length of data
* \param rtp_flags Optional flags, see ::RTP_FLAGS for more details
*
* \return RTP error code
*
* \retval RTP_OK On success
* \retval RTP_INVALID_VALUE If one of the parameters are invalid
* \retval RTP_MEMORY_ERROR If the data chunk is too large to be processed
* \retval RTP_SEND_ERROR If uvgRTP failed to send the data to remote
* \retval RTP_GENERIC_ERROR If an unspecified error occurred
*/
rtp_error_t push_frame(std::unique_ptr<uint8_t[]> data, size_t data_len, int rtp_flags);
/**
* \brief Send data to remote participant with a custom timestamp
*
* \details If so specified either by the selected media format and/or given
* ::RTP_CTX_ENABLE_FLAGS, uvgRTP fragments the input data into RTP packets of 1492 bytes,
* or to any other size defined by the application using ::RCC_MTU_SIZE
*
* The frame is automatically reconstructed by the receiver if all fragments have been
* received successfully.
*
* If application so wishes, it may override uvgRTP's own timestamp
* calculations and provide timestamping information for the stream itself.
* This requires that the application provides a sensible value for the ts
* parameter. If RTCP has been enabled, uvgrtp::rtcp::set_ts_info() should have
* been called.
*
* \param data Pointer to data the that should be sent, uvgRTP does not take ownership of the memory
* \param data_len Length of data
* \param ts 32-bit timestamp value for the data
* \param rtp_flags Optional flags, see ::RTP_FLAGS for more details
*
* \return RTP error code
*
* \retval RTP_OK On success
* \retval RTP_INVALID_VALUE If one of the parameters are invalid
* \retval RTP_MEMORY_ERROR If the data chunk is too large to be processed
* \retval RTP_SEND_ERROR If uvgRTP failed to send the data to remote
* \retval RTP_GENERIC_ERROR If an unspecified error occurred
*/
rtp_error_t push_frame(uint8_t *data, size_t data_len, uint32_t ts, int rtp_flags);
/**
* \brief Send data to remote participant with custom RTP and NTP timestamps
*
* \details If so specified either by the selected media format and/or given
* ::RTP_CTX_ENABLE_FLAGS, uvgRTP fragments the input data into RTP packets of 1492 bytes,
* or to any other size defined by the application using ::RCC_MTU_SIZE
*
* The frame is automatically reconstructed by the receiver if all fragments have been
* received successfully.
*
* If application so wishes, it may override uvgRTP's own timestamp
* calculations and provide timestamping information for the stream itself.
* This requires that the application provides a sensible value for the ts
* parameter. If RTCP has been enabled, uvgrtp::rtcp::set_ts_info() should have
* been called.
*
* \param data Pointer to data the that should be sent, uvgRTP does not take ownership of the memory
* \param data_len Length of data
* \param ts 32-bit RTP timestamp for the packet
* \param ntp_ts 64-bit NTP timestamp value of when the packets data was sampled. NTP timestamp is a
* 64-bit unsigned fixed-point number with the integer part (seconds) in the first 32 bits and the
* fractional part (fractional seconds) in the last 32 bits. Used for synchronizing multiple streams.
* \param rtp_flags Optional flags, see ::RTP_FLAGS for more details
*
* \return RTP error code
*
* \retval RTP_OK On success
* \retval RTP_INVALID_VALUE If one of the parameters are invalid
* \retval RTP_MEMORY_ERROR If the data chunk is too large to be processed
* \retval RTP_SEND_ERROR If uvgRTP failed to send the data to remote
* \retval RTP_GENERIC_ERROR If an unspecified error occurred
*/
rtp_error_t push_frame(uint8_t* data, size_t data_len, uint32_t ts, uint64_t ntp_ts, int rtp_flags);
/**
* \brief Send data to remote participant with a custom timestamp
*
* \details If so specified either by the selected media format and/or given
* ::RTP_CTX_ENABLE_FLAGS, uvgRTP fragments the input data into RTP packets of 1492 bytes,
* or to any other size defined by the application using ::RCC_MTU_SIZE
*
* The frame is automatically reconstructed by the receiver if all fragments have been
* received successfully.
*
* If application so wishes, it may override uvgRTP's own timestamp
* calculations and provide timestamping information for the stream itself.
* This requires that the application provides a sensible value for the ts
* parameter. If RTCP has been enabled, uvgrtp::rtcp::set_ts_info() should have
* been called.
*
* \param data Smart pointer to data the that should be sent
* \param data_len Length of data
* \param ts 32-bit timestamp value for the data
* \param rtp_flags Optional flags, see ::RTP_FLAGS for more details
*
* \return RTP error code
*
* \retval RTP_OK On success
* \retval RTP_INVALID_VALUE If one of the parameters are invalid
* \retval RTP_MEMORY_ERROR If the data chunk is too large to be processed
* \retval RTP_SEND_ERROR If uvgRTP failed to send the data to remote
* \retval RTP_GENERIC_ERROR If an unspecified error occurred
*/
rtp_error_t push_frame(std::unique_ptr<uint8_t[]> data, size_t data_len, uint32_t ts, int rtp_flags);
/**
* \brief Send data to remote participant with custom RTP and NTP timestamps
*
* \details If so specified either by the selected media format and/or given
* ::RTP_CTX_ENABLE_FLAGS, uvgRTP fragments the input data into RTP packets of 1492 bytes,
* or to any other size defined by the application using ::RCC_MTU_SIZE
*
* The frame is automatically reconstructed by the receiver if all fragments have been
* received successfully.
*
* If application so wishes, it may override uvgRTP's own timestamp
* calculations and provide timestamping information for the stream itself.
* This requires that the application provides a sensible value for the ts
* parameter. If RTCP has been enabled, uvgrtp::rtcp::set_ts_info() should have
* been called.
*
* \param data Smart pointer to data the that should be sent
* \param data_len Length of data
* \param ts 32-bit RTP timestamp for the packet
* \param ntp_ts 64-bit NTP timestamp value of when the packets data was sampled. NTP timestamp is a
* 64-bit unsigned fixed-point number with the integer part (seconds) in the first 32 bits and the
* fractional part (fractional seconds) in the last 32 bits. Used for synchronizing multiple streams.
* \param rtp_flags Optional flags, see ::RTP_FLAGS for more details
*
* \return RTP error code
*
* \retval RTP_OK On success
* \retval RTP_INVALID_VALUE If one of the parameters are invalid
* \retval RTP_MEMORY_ERROR If the data chunk is too large to be processed
* \retval RTP_SEND_ERROR If uvgRTP failed to send the data to remote
* \retval RTP_GENERIC_ERROR If an unspecified error occurred
*/
rtp_error_t push_frame(std::unique_ptr<uint8_t[]> data, size_t data_len, uint32_t ts, uint64_t ntp_ts, int rtp_flags);
// Disabled for now
//rtp_error_t push_user_packet(uint8_t* data, uint32_t len);
//rtp_error_t install_user_receive_hook(void* arg, void (*hook)(void*, uint8_t* data, uint32_t len));
/**
* \brief Poll a frame indefinitely from the media stream object
*
* \return RTP frame
*
* \retval uvgrtp::frame::rtp_frame* On success
* \retval nullptr If an unrecoverable error happened
*/
uvgrtp::frame::rtp_frame *pull_frame();
/**
* \brief Poll a frame for a specified time from the media stream object
*
* \param timeout_ms How long is a frame waited, in milliseconds
*
* \return RTP frame
*
* \retval uvgrtp::frame::rtp_frame* On success
* \retval nullptr If a frame was not received within the specified time limit or in case of an error
*/
uvgrtp::frame::rtp_frame *pull_frame(size_t timeout_ms);
/**
* \brief Asynchronous way of getting frames
*
* \details Receive hook is an alternative to polling frames using uvgrtp::media_stream::pull_frame().
* Instead of application asking from uvgRTP if there are any new frames available, uvgRTP will notify
* the application when a frame has been received
*
* The hook should not be used for media processing as it will block the receiver from
* reading more frames. Instead, it should only be used as an interface between uvgRTP and
* the calling application where the frame hand-off happens.
*
* \param arg Optional argument that is passed to the hook when it is called, can be set to nullptr
* \param hook Function pointer to the receive hook that uvgRTP should call
*
* \return RTP error code
*
* \retval RTP_OK On success
* \retval RTP_INVALID_VALUE If hook is nullptr */
rtp_error_t install_receive_hook(void *arg, void (*hook)(void *, uvgrtp::frame::rtp_frame *));
/**
* \brief Configure the media stream, see ::RTP_CTX_CONFIGURATION_FLAGS for more details
*
* \return RTP error code
*
* \retval RTP_OK On success
* \retval RTP_INVALID_VALUE If the provided value is not valid for a given configuration flag
* \retval RTP_GENERIC_ERROR If setsockopt(2) failed
*/
rtp_error_t configure_ctx(int rcc_flag, ssize_t value);
/**
* \brief Get the values associated with configuration flags, see ::RTP_CTX_CONFIGURATION_FLAGS for more details
*
* \return Value of the configuration flag
*
* \retval int value on success
* \retval -1 on error
*/
int get_configuration_value(int rcc_flag);
/// \cond DO_NOT_DOCUMENT
/* Get unique key of the media stream
* Used by session to index media streams */
uint32_t get_key() const;
/// \endcond
/**
* \brief Get pointer to the RTCP object of the media stream
*
* \details This object is used to control all RTCP-related functionality
* and RTCP documentation can be found from \ref uvgrtp::rtcp
*
* \return Pointer to RTCP object
*
* \retval uvgrtp::rtcp* If RTCP has been enabled (RCE_RTCP has been given to uvgrtp::session::create_stream())
* \retval nullptr If RTCP has not been enabled
*/
uvgrtp::rtcp *get_rtcp();
/**
* \brief Get SSRC identifier. You can use the SSRC value for example to find the report
* block belonging to this media_stream in RTCP sender/receiver report.
*
* \return SSRC value
*/
uint32_t get_ssrc() const;
private:
/* Initialize the connection by initializing the socket
* and binding ourselves to specified interface and creating
* an outgoing address */
rtp_error_t init_connection();
/* Create the media object for the stream */
rtp_error_t create_media(rtp_format_t fmt);
/* free all allocated resources */
rtp_error_t free_resources(rtp_error_t ret);
rtp_error_t init_srtp_with_zrtp(int rce_flags, int type, std::shared_ptr<uvgrtp::base_srtp> srtp,
std::shared_ptr<uvgrtp::zrtp> zrtp);
rtp_error_t start_components();
rtp_error_t install_packet_handlers();
uint32_t get_default_bandwidth_kbps(rtp_format_t fmt);
bool check_pull_preconditions();
rtp_error_t check_push_preconditions(int rtp_flags, bool smart_pointer);
inline uint8_t* copy_frame(uint8_t* original, size_t data_len);
uint32_t key_;
std::shared_ptr<uvgrtp::srtp> srtp_;
std::shared_ptr<uvgrtp::srtcp> srtcp_;
std::shared_ptr<uvgrtp::socket> socket_;
std::shared_ptr<uvgrtp::rtp> rtp_;
std::shared_ptr<uvgrtp::rtcp> rtcp_;
std::shared_ptr<uvgrtp::zrtp> zrtp_;
std::shared_ptr<uvgrtp::socketfactory> sfp_;
sockaddr_in remote_sockaddr_;
sockaddr_in6 remote_sockaddr_ip6_;
std::string remote_address_;
std::string local_address_;
uint16_t src_port_;
uint16_t dst_port_;
bool ipv6_;
rtp_format_t fmt_;
bool new_socket_;
/* Media context config */
int rce_flags_ = 0;
/* Has the media stream been initialized */
bool initialized_;
/* RTP packet reception flow. Dispatches packets to other components */
std::shared_ptr<uvgrtp::reception_flow> reception_flow_;
/* Media object associated with this media stream. */
std::unique_ptr<uvgrtp::formats::media> media_;
/* Thread that keeps the holepunched connection open for unidirectional streams */
std::unique_ptr<uvgrtp::holepuncher> holepuncher_;
std::string cname_;
ssize_t fps_numerator_ = 30;
ssize_t fps_denominator_ = 1;
uint32_t bandwidth_ = 0;
std::shared_ptr<std::atomic<std::uint32_t>> ssrc_;
std::shared_ptr<std::atomic<std::uint32_t>> remote_ssrc_;
// Save values associated with context flags, to be returned with get_configuration_value
// Values are initialized to -2, which means value not set
int snd_buf_size_;
int rcv_buf_size_;
};
}
namespace uvg_rtp = uvgrtp;

695
include/uvgrtp/rtcp.hh Normal file
View File

@ -0,0 +1,695 @@
#pragma once
#include "clock.hh"
#include "util.hh"
#include "frame.hh"
#ifdef _WIN32
#include <ws2ipdef.h>
#else
#include <sys/socket.h>
#include <netinet/in.h>
#endif
#include <bitset>
#include <map>
#include <thread>
#include <vector>
#include <functional>
#include <memory>
#include <mutex>
#include <deque>
#include <atomic>
namespace uvgrtp {
class rtp;
class srtcp;
class socket;
class socketfactory;
class rtcp_reader;
typedef std::vector<std::pair<size_t, uint8_t*>> buf_vec; // also defined in socket.hh
/// \cond DO_NOT_DOCUMENT
enum RTCP_ROLE {
RECEIVER,
SENDER
};
struct sender_statistics {
/* sender stats */
uint32_t sent_pkts = 0; /* Number of sent RTP packets */
uint32_t sent_bytes = 0; /* Number of sent bytes excluding RTP Header */
bool sent_rtp_packet = false; // since last report
};
struct receiver_statistics {
/* receiver stats */
uint32_t received_pkts = 0; /* Number of packets received */
uint32_t lost_pkts = 0; /* Number of dropped RTP packets */
uint32_t received_bytes = 0; /* Number of bytes received excluding RTP Header */
bool received_rtp_packet = false; // since last report
uint32_t expected_pkts = 0; /* Number of expected packets */
uint32_t received_prior = 0; /* Number of received packets in last report */
uint32_t expected_prior = 0; /* Number of expected packets in last report */
double jitter = 0; /* The estimation of jitter (see RFC 3550 A.8) */
uint32_t transit = 0; /* TODO: */
/* Receiver clock related stuff */
uint64_t initial_ntp = 0; /* Wallclock reading when the first RTP packet was received */
uint32_t initial_rtp = 0; /* RTP timestamp of the first RTP packet received */
uint32_t clock_rate = 0; /* Rate of the clock (used for jitter calculations) */
uint32_t lsr = 0; /* Middle 32 bits of the 64-bit NTP timestamp of previous SR */
uvgrtp::clock::hrc::hrc_t sr_ts; /* When the last SR was received (used to calculate delay) */
uint16_t max_seq = 0; /* Highest sequence number received */
uint32_t base_seq = 0; /* First sequence number received */
uint32_t bad_seq = 0; /* TODO: */
uint16_t cycles = 0; /* Number of sequence cycles */
};
struct rtcp_participant {
struct receiver_statistics stats; /* RTCP session statistics of the participant */
uint32_t probation = 0; /* has the participant been fully accepted to the session */
int role = 0; /* is the participant a sender or a receiver */
/* Save the latest RTCP packets received from this participant
* Users can query these packets using the SSRC of participant */
uvgrtp::frame::rtcp_sender_report *sr_frame = nullptr;
uvgrtp::frame::rtcp_receiver_report *rr_frame = nullptr;
uvgrtp::frame::rtcp_sdes_packet *sdes_frame = nullptr;
uvgrtp::frame::rtcp_app_packet *app_frame = nullptr;
};
struct rtcp_app_packet {
rtcp_app_packet(const rtcp_app_packet& orig_packet) = delete;
rtcp_app_packet(const char* name, uint8_t subtype, uint32_t payload_len, std::unique_ptr<uint8_t[]> payload);
~rtcp_app_packet();
const char* name;
uint8_t subtype;
uint32_t payload_len;
std::unique_ptr<uint8_t[]> payload;
};
/// \endcond
/**
* \brief RTCP instance handles all incoming and outgoing RTCP traffic, including report generation
*
* \details If media_stream was created with RCE_RTCP flag, RTCP is enabled. RTCP periodically sends compound RTCP packets.
* The bit rate of RTP session influences the reporting interval, but changing this has not yet been implemented.
*
* The compound RTCP packet begins with either Sender Reports if we sent RTP packets recently or Receiver Report if we didn't
* send RTP packets recently. Both of these report types include report blocks for all the RTP sources we have received packets
* from during reporting period. The compound packets also always have an SDES packet and calling send_sdes_packet()-function will
* modify the contents of this SDES packet.
*
* You can use the APP packet to test new RTCP packet types using the send_app_packet()-function.
* The APP packets are added to these periodically sent compound packets.
*
*
* See <a href="https://www.rfc-editor.org/rfc/rfc3550#section-6" target="_blank">RFC 3550 section 6</a> for more details.
*/
class rtcp {
public:
/// \cond DO_NOT_DOCUMENT
rtcp(std::shared_ptr<uvgrtp::rtp> rtp, std::shared_ptr<std::atomic<std::uint32_t>> ssrc, std::shared_ptr<std::atomic<uint32_t>> remote_ssrc,
std::string cname, std::shared_ptr<uvgrtp::socketfactory> sfp, int rce_flags);
rtcp(std::shared_ptr<uvgrtp::rtp> rtp, std::shared_ptr<std::atomic<std::uint32_t>> ssrc, std::shared_ptr<std::atomic<uint32_t>> remote_ssrc,
std::string cname, std::shared_ptr<uvgrtp::socketfactory> sfp, std::shared_ptr<uvgrtp::srtcp> srtcp, int rce_flags);
~rtcp();
/* start the RTCP runner thread
*
* return RTP_OK on success and RTP_MEMORY_ERROR if the allocation fails */
rtp_error_t start();
/* End the RTCP session and send RTCP BYE to all participants
*
* return RTP_OK on success */
rtp_error_t stop();
/* Generate either RTCP Sender or Receiver report and sent it to all participants
* Return RTP_OK on success and RTP_ERROR on error */
rtp_error_t generate_report();
/* Handle incoming RTCP packet (first make sure it's a valid RTCP packet)
* This function will call one of the above functions internally
*
* Return RTP_OK on success and RTP_ERROR on error */
rtp_error_t handle_incoming_packet(void* args, int rce_flags, uint8_t* buffer, size_t size, frame::rtp_frame** out);
/// \endcond
/* Send "frame" to all participants
*
* These routines will convert all necessary fields to network byte order
*
* Return RTP_OK on success
* Return RTP_INVALID_VALUE if "frame" is in some way invalid
* Return RTP_SEND_ERROR if sending "frame" did not succeed (see socket.hh for details) */
/**
* \brief Send an RTCP SDES packet
*
* \param items Vector of SDES items
*
* \retval RTP_OK On success
* \retval RTP_MEMORY_ERROR If allocation fails
* \retval RTP_GENERIC_ERROR If sending fails
*/
rtp_error_t send_sdes_packet(const std::vector<uvgrtp::frame::rtcp_sdes_item>& items);
/**
* \brief Send an RTCP APP packet
*
* \param name Name of the APP item, e.g., STAT, must have a length of four ASCII characters
* \param subtype Subtype of the APP item
* \param payload_len Length of the payload
* \param payload Payload
*
* \retval RTP_OK On success
* \retval RTP_MEMORY_ERROR If allocation fails
* \retval RTP_GENERIC_ERROR If sending fails
*/
rtp_error_t send_app_packet(const char *name, uint8_t subtype, uint32_t payload_len, const uint8_t *payload);
/**
* \brief Send an RTCP BYE packet
*
* \details In case the quitting participant is a mixer and is serving multiple
* paricipants, the input vector contains the SSRCs of all those participants. If the
* participant is a regular member of the session, the vector only contains the SSRC
* of the participant.
*
* \param ssrcs Vector of SSRCs of those participants who are quitting
*
* \retval RTP_OK On success
* \retval RTP_MEMORY_ERROR If allocation fails
* \retval RTP_GENERIC_ERROR If sending fails
*/
rtp_error_t send_bye_packet(std::vector<uint32_t> ssrcs);
/// \cond DO_NOT_DOCUMENT
/* Return the latest RTCP packet received from participant of "ssrc"
* Return nullptr if we haven't received this kind of packet or if "ssrc" doesn't exist
*
* NOTE: Caller is responsible for deallocating the memory */
uvgrtp::frame::rtcp_sender_report *get_sender_packet(uint32_t ssrc);
uvgrtp::frame::rtcp_receiver_report *get_receiver_packet(uint32_t ssrc);
uvgrtp::frame::rtcp_sdes_packet *get_sdes_packet(uint32_t ssrc);
uvgrtp::frame::rtcp_app_packet *get_app_packet(uint32_t ssrc);
/* Somebody joined the multicast group the owner of this RTCP instance is part of
* Add it to RTCP participant list so we can start listening for reports
*
* "clock_rate" tells how much the RTP timestamp advances, this information is needed
* to calculate the interarrival jitter correctly. It has nothing do with our clock rate,
* (or whether we're even sending anything)
*
* Return RTP_OK on success and RTP_ERROR on error */
rtp_error_t add_initial_participant(uint32_t clock_rate);
/* Functions for updating various RTP sender statistics */
void sender_update_stats(const uvgrtp::frame::rtp_frame *frame);
/* If we've detected that our SSRC has collided with someone else's SSRC, we need to
* generate new random SSRC and reinitialize our own RTCP state.
* RTCP object still has the participants of "last session", we can use their SSRCs
* to detected new collision
*
* Return RTP_OK if reinitialization succeeded
* Return RTP_SSRC_COLLISION if our new SSRC has collided and we need to generate new SSRC */
rtp_error_t reset_rtcp_state(uint32_t ssrc);
/* Update various session statistics */
void update_session_statistics(const uvgrtp::frame::rtp_frame *frame);
/* Getter for interval_ms_, which is calculated by set_session_bandwidth
* Be aware that this interval is frequently re-calculated in rtcp_runner() */
uint32_t get_rtcp_interval_ms() const;
/* Set total bandwidth for this session, called at the start
* This affects the RTCP packet transmission interval */
void set_session_bandwidth(uint32_t kbps);
std::shared_ptr<uvgrtp::socket> get_socket() const;
/* Store the following info in RTCP
* Local IP address
* Remote IP address
* Local port number for RTCP
* Destination port number for RTCP
* These are used when adding new participants and creating sockets for them */
rtp_error_t set_network_addresses(std::string local_addr, std::string remote_addr,
uint16_t local_port, uint16_t dst_port, bool ipv6);
/* Return SSRCs of all participants */
std::vector<uint32_t> get_participants() const;
/// \endcond
/**
* \brief Provide timestamping information for RTCP
*
* \details If the application wishes to timestamp the stream itself AND it has
* enabled RTCP by using ::RCE_RTCP, it must provide timestamping information for
* RTCP so sensible synchronization values can be calculated for Sender Reports
*
* The application can call uvgrtp::clock::ntp::now() to get the current wall clock
* reading as an NTP timestamp value
*
* \param clock_start NTP timestamp for t = 0
* \param clock_rate Clock rate of the stream
* \param rtp_ts_start RTP timestamp for t = 0
*/
void set_ts_info(uint64_t clock_start, uint32_t clock_rate, uint32_t rtp_ts_start);
/* Alternate way to get RTCP packets is to install a hook for them. So instead of
* polling an RTCP packet, user can install a function that is called when
* a specific RTCP packet is received. */
/**
* \brief Install an RTCP Sender Report hook
*
* \details This function is called when an RTCP Sender Report is received
*
* \param hook Function pointer to the hook
*
* \retval RTP_OK on success
* \retval RTP_INVALID_VALUE If hook is nullptr
*/
rtp_error_t install_sender_hook(void (*hook)(uvgrtp::frame::rtcp_sender_report *));
/**
* \brief Install an RTCP Sender Report hook
*
* \details This function is called when an RTCP Sender Report is received
*
* \param sr_handler C++ function pointer to the hook
*
* \retval RTP_OK on success
* \retval RTP_INVALID_VALUE If hook is nullptr
*/
rtp_error_t install_sender_hook(std::function<void(std::unique_ptr<uvgrtp::frame::rtcp_sender_report>)> sr_handler);
/**
* \brief Install an RTCP Receiver Report hook
*
* \details This function is called when an RTCP Receiver Report is received
*
* \param hook Function pointer to the hook
*
* \retval RTP_OK on success
* \retval RTP_INVALID_VALUE If hook is nullptr
*/
rtp_error_t install_receiver_hook(void (*hook)(uvgrtp::frame::rtcp_receiver_report *));
/**
* \brief Install an RTCP Receiver Report hook
*
* \details This function is called when an RTCP Receiver Report is received
*
* \param rr_handler C++ function pointer to the hook
*
* \retval RTP_OK on success
* \retval RTP_INVALID_VALUE If hook is nullptr
*/
rtp_error_t install_receiver_hook(std::function<void(std::unique_ptr<uvgrtp::frame::rtcp_receiver_report>)> rr_handler);
/**
* \brief Install an RTCP SDES packet hook
*
* \details This function is called when an RTCP SDES packet is received
*
* \param hook Function pointer to the hook
*
* \retval RTP_OK on success
* \retval RTP_INVALID_VALUE If hook is nullptr
*/
rtp_error_t install_sdes_hook(void (*hook)(uvgrtp::frame::rtcp_sdes_packet *));
/**
* \brief Install an RTCP SDES packet hook
*
* \details This function is called when an RTCP SDES packet is received
*
* \param sdes_handler C++ function pointer to the hook
*
* \retval RTP_OK on success
* \retval RTP_INVALID_VALUE If hook is nullptr
*/
rtp_error_t install_sdes_hook(std::function<void(std::unique_ptr<uvgrtp::frame::rtcp_sdes_packet>)> sdes_handler);
/**
* \brief Install an RTCP APP packet hook
*
* \details This function is called when an RTCP APP packet is received
*
* \param hook Function pointer to the hook
*
* \retval RTP_OK on success
* \retval RTP_INVALID_VALUE If hook is nullptr
*/
rtp_error_t install_app_hook(void (*hook)(uvgrtp::frame::rtcp_app_packet *));
/**
* \brief Install an RTCP APP packet hook
*
* \details This function is called when an RTCP APP packet is received
*
* \param app_handler C++ function pointer to the hook
*
* \retval RTP_OK on success
* \retval RTP_INVALID_VALUE If hook is nullptr
*/
rtp_error_t install_app_hook(std::function<void(std::unique_ptr<uvgrtp::frame::rtcp_app_packet>)> app_handler);
/// \cond DO_NOT_DOCUMENT
// These have been replaced by functions with unique_ptr in them
rtp_error_t install_sender_hook(std::function<void(std::shared_ptr<uvgrtp::frame::rtcp_sender_report>)> sr_handler);
rtp_error_t install_receiver_hook(std::function<void(std::shared_ptr<uvgrtp::frame::rtcp_receiver_report>)> rr_handler);
rtp_error_t install_sdes_hook(std::function<void(std::shared_ptr<uvgrtp::frame::rtcp_sdes_packet>)> sdes_handler);
rtp_error_t install_app_hook(std::function<void(std::shared_ptr<uvgrtp::frame::rtcp_app_packet>)> app_handler);
/// \endcond
/**
* \brief Install hook for one type of APP packets
*
* \details Each time the RR/SR is sent, all APP sending hooks call their respective functions to get the data
*
* \param app_name name of the APP packet. Max 4 chars
* \param app_sending the function to be called when hook fires
* \retval RTP_OK on success
* \retval RTP_INVALID_VALUE If app_name is empty or longer that 4 characters or function pointer is empty
*/
rtp_error_t install_send_app_hook(std::string app_name, std::function<std::unique_ptr<uint8_t[]>(uint8_t& subtype, uint32_t& payload_len)> app_sending_func);
/**
* \brief Remove all installed hooks for RTCP
*
* \details Removes all installed hooks so they can be readded in case of changes
*
* \retval RTP_OK on success
*/
rtp_error_t remove_all_hooks();
/**
* \brief Remove a hook for sending APP packets
* *
* \param app_name name of the APP packet hook. Max 4 chars
* \retval RTP_OK on success
* \retval RTP_INVALID_VALUE if hook with given app_name was not found
*/
rtp_error_t remove_send_app_hook(std::string app_name);
/// \cond DO_NOT_DOCUMENT
/* Update RTCP-related sender statistics */
rtp_error_t update_sender_stats(size_t pkt_size);
void set_socket(std::shared_ptr<uvgrtp::socket> socket);
/* Update RTCP-related receiver statistics from RTP packets */
rtp_error_t recv_packet_handler_common(void *arg, int rce_flags, uint8_t* read_ptr, size_t size, frame::rtp_frame **out);
/* Update RTCP-related sender statistics */
static rtp_error_t send_packet_handler_vec(void *arg, uvgrtp::buf_vec& buffers);
// the length field is the rtcp packet size measured in 32-bit words - 1
size_t rtcp_length_in_bytes(uint16_t length);
void set_payload_size(size_t mtu_size);
/// \endcond
private:
rtp_error_t set_sdes_items(const std::vector<uvgrtp::frame::rtcp_sdes_item>& items);
uint32_t size_of_ready_app_packets() const;
uint32_t size_of_apps_from_hook(std::vector< std::shared_ptr<rtcp_app_packet>> packets) const;
uint32_t size_of_compound_packet(uint16_t reports,
bool sr_packet, bool rr_packet, bool sdes_packet, uint32_t app_size, bool bye_packet) const;
/* read the header values from rtcp packet */
void read_rtcp_header(const uint8_t* buffer, size_t& read_ptr,
uvgrtp::frame::rtcp_header& header);
void read_reports(const uint8_t* buffer, size_t& read_ptr, size_t packet_end, uint8_t count,
std::vector<uvgrtp::frame::rtcp_report_block>& reports);
void read_ssrc(const uint8_t* buffer, size_t& read_ptr, uint32_t& out_ssrc);
/* Handle different kinds of incoming rtcp packets. The read header is passed to functions
which read rest of the frame type specific data.
* Return RTP_OK on success and RTP_ERROR on error */
rtp_error_t handle_sender_report_packet(uint8_t* buffer, size_t& read_ptr, size_t packet_end,
uvgrtp::frame::rtcp_header& header);
rtp_error_t handle_receiver_report_packet(uint8_t* buffer, size_t& read_ptr, size_t packet_end,
uvgrtp::frame::rtcp_header& header);
rtp_error_t handle_sdes_packet(uint8_t* buffer, size_t& read_ptr, size_t packet_end,
uvgrtp::frame::rtcp_header& header, uint32_t sender_ssrc);
rtp_error_t handle_bye_packet(uint8_t* buffer, size_t& read_ptr,
uvgrtp::frame::rtcp_header& header);
rtp_error_t handle_app_packet(uint8_t* buffer, size_t& read_ptr, size_t packet_end,
uvgrtp::frame::rtcp_header& header);
rtp_error_t handle_fb_packet(uint8_t* buffer, size_t& read_ptr, size_t packet_end,
uvgrtp::frame::rtcp_header& header);
static void rtcp_runner(rtcp *rtcp);
/* when we start the RTCP instance, we don't know what the SSRC of the remote is
* when an RTP packet is received, we must check if we've already received a packet
* from this sender and if not, create new entry to receiver_stats_ map */
bool is_participant(uint32_t ssrc) const;
//TODO: Resolve collision??
/* When we receive an RTP or RTCP packet, we need to check the source address and see if it's
* the same address where we've received packets before.
*
* If the address is new, it means we have detected an SSRC collision and the paket should
* be dropped We also need to check whether this SSRC matches with our own SSRC and if it does
* we need to send RTCP BYE and rejoin to the session */
bool collision_detected(uint32_t ssrc) const;
/* Move participant from initial_peers_ to participants_ */
rtp_error_t add_participant(uint32_t ssrc);
/* We've got a message from new source (the SSRC of the frame is not known to us)
* Initialize statistics for the peer and move it to participants_ */
rtp_error_t init_new_participant(const uvgrtp::frame::rtp_frame *frame);
/* Initialize the RTP Sequence related stuff of peer
* This function assumes that the peer already exists in the participants_ map */
rtp_error_t init_participant_seq(uint32_t ssrc, uint16_t base_seq);
/* Update the SSRC's sequence related data in participants_ map
*
* Return RTP_OK if the received packet was OK
* Return RTP_GENERIC_ERROR if it wasn't and
* packet-related statistics should not be updated */
rtp_error_t update_participant_seq(uint32_t ssrc, uint16_t seq);
/* Update the RTCP bandwidth variables
*
* "pkt_size" tells how much rtcp_byte_count_
* should be increased before calculating the new average */
void update_rtcp_bandwidth(size_t pkt_size);
/* Update average RTCP packet size variable
* packet_size is the size of received RTCP packet in octets */
void update_avg_rtcp_size(uint64_t packet_size);
/* Calculate the RTCP report interval in seconds
* rtcp_bw is given in kbps
* Defined in RFC3550 Appendix A.7 */
double rtcp_interval(int members, int senders,
double rtcp_bw, bool we_sent, double avg_rtcp_size, bool red_min, bool randomisation);
/* RTCP runner keeps track of ssrcs and how long they have been silent.
* By default a source get timed out if it has been silent for 25 seconds
* If an ssrc is timed out, this function removes it from participants_ map and
* updates any other infos */
rtp_error_t remove_timeout_ssrc(uint32_t ssrc);
/* Because struct statistics contains uvgRTP clock object we cannot
* zero it out without compiler complaining about it so all the fields
* must be set to zero manually */
void zero_stats(uvgrtp::sender_statistics *stats);
void zero_stats(uvgrtp::receiver_statistics *stats);
/* Takes ownership of the frame */
rtp_error_t send_rtcp_packet_to_participants(uint8_t* frame, uint32_t frame_size, bool encrypt);
void free_participant(std::unique_ptr<rtcp_participant> participant);
void cleanup_participants();
/* Secure RTCP context */
std::shared_ptr<uvgrtp::srtcp> srtcp_;
/* RTP context flags */
int rce_flags_;
/* are we a sender (and possible a receiver) or just a receiver */
int our_role_;
/* TODO: time_t?? */
// TODO: Check these, they don't seem to be used
size_t tp_; /* the last time an RTCP packet was transmitted */
size_t tc_; /* the current time */
size_t tn_; /* the next scheduled transmission time of an RTCP packet */
size_t pmembers_; /* the estimated number of session members at the time tn was last recomputed */
size_t members_; /* the most current estimate for the number of session members */
size_t senders_; /* the most current estimate for the number of senders in the session */
/* Total session bandwidth. RTCP bandwidth will be set to 5 % of this */
uint32_t total_bandwidth_;
/* The target RTCP bandwidth, i.e., the total bandwidth
* that will be used for RTCP packets by all members of this session,
* in octets per second. This will be a specified fraction of the
* "session bandwidth" parameter supplied to the application at startup. */
double rtcp_bandwidth_;
/* "Minimum" value for RTCP transmission interval, depends on the session bandwidth
* Actual interval can be 50 % smaller due to randomisation */
uint32_t reduced_minimum_;
/* Flag that is true if the application has sent data since
* the 2nd previous RTCP report was transmitted. */
// TODO: Only set, never read
bool we_sent_;
/* Store sender and receiver info, this is needed when calling
* add_participant dynamically (i.e. after initializing the stream) */
std::string local_addr_;
std::string remote_addr_;
uint16_t local_port_;
uint16_t dst_port_;
/* The average compound RTCP packet size, in octets,
* over all RTCP packets sent and received by this participant. The
* size includes lower-layer transport and network protocol headers
* (e.g., UDP and IP) as explained in Section 6.2 */
// TODO: Only set, never read
size_t avg_rtcp_pkt_pize_;
/* Average RTCP packet size in octets.
* Initialized to 64 */
uint64_t avg_rtcp_size_;
/* Number of RTCP packets and bytes sent and received by this participant */
// TODO: Only set, never read
size_t rtcp_pkt_count_;
size_t rtcp_byte_count_;
/* Number of RTCP packets sent */
uint32_t rtcp_pkt_sent_count_;
/* Flag that is true if the application has not yet sent an RTCP packet. */
// TODO: Only set, never read
bool initial_;
/* Copy of our own current SSRC */
std::shared_ptr<std::atomic_uint> ssrc_;
/* Copy of the remote streams SSRC */
std::shared_ptr<std::atomic<uint32_t>> remote_ssrc_;
/* NTP timestamp associated with initial RTP timestamp (aka t = 0) */
uint64_t clock_start_;
/* Clock rate of the media ie. how fast does the time increase */
uint32_t clock_rate_;
/* The first value of RTP timestamp (aka t = 0) */
uint32_t rtp_ts_start_;
std::map<uint32_t, std::unique_ptr<rtcp_participant>> participants_;
uint8_t num_receivers_; // maximum is 32 at the moment (5 bits)
bool ipv6_;
/* Address of the socket that we are sending data to */
sockaddr_in socket_address_;
sockaddr_in6 socket_address_ipv6_;
/* Map for keeping track of sources for timeouts
* First number is the sources ssrc
* Second number is how many milliseconds it has been silent*/
std::map<uint32_t, uint32_t> ms_since_last_rep_;
/* statistics for RTCP Sender and Receiver Reports */
struct sender_statistics our_stats;
/* If we expect frames from remote but haven't received anything from remote yet,
* the participant resides in this vector until he's moved to participants_ */
std::vector<std::unique_ptr<rtcp_participant>> initial_participants_;
void (*sender_hook_)(uvgrtp::frame::rtcp_sender_report *);
void (*receiver_hook_)(uvgrtp::frame::rtcp_receiver_report *);
void (*sdes_hook_)(uvgrtp::frame::rtcp_sdes_packet *);
void (*app_hook_)(uvgrtp::frame::rtcp_app_packet *);
std::function<void(std::shared_ptr<uvgrtp::frame::rtcp_sender_report>)> sr_hook_f_;
std::function<void(std::unique_ptr<uvgrtp::frame::rtcp_sender_report>)> sr_hook_u_;
std::function<void(std::shared_ptr<uvgrtp::frame::rtcp_receiver_report>)> rr_hook_f_;
std::function<void(std::unique_ptr<uvgrtp::frame::rtcp_receiver_report>)> rr_hook_u_;
std::function<void(std::shared_ptr<uvgrtp::frame::rtcp_sdes_packet>)> sdes_hook_f_;
std::function<void(std::unique_ptr<uvgrtp::frame::rtcp_sdes_packet>)> sdes_hook_u_;
std::function<void(std::shared_ptr<uvgrtp::frame::rtcp_app_packet>)> app_hook_f_;
std::function<void(std::unique_ptr<uvgrtp::frame::rtcp_app_packet>)> app_hook_u_;
std::function<void(std::unique_ptr<uvgrtp::frame::rtcp_fb_packet>)> fb_hook_u_;
std::mutex sr_mutex_;
std::mutex rr_mutex_;
std::mutex sdes_mutex_;
std::mutex app_mutex_;
std::mutex fb_mutex_;
mutable std::mutex participants_mutex_;
std::mutex send_app_mutex_;
std::unique_ptr<std::thread> report_generator_;
std::shared_ptr<uvgrtp::socket> rtcp_socket_;
std::shared_ptr<uvgrtp::socketfactory> sfp_;
std::shared_ptr<uvgrtp::rtcp_reader> rtcp_reader_;
bool is_active() const
{
return active_;
}
bool active_;
std::atomic<uint32_t> interval_ms_;
std::shared_ptr<uvgrtp::rtp> rtp_ptr_;
std::mutex packet_mutex_;
// messages waiting to be sent
std::vector<uvgrtp::frame::rtcp_sdes_item> ourItems_; // always sent
std::vector<uint32_t> bye_ssrcs_; // sent once
std::map<std::string, std::deque<rtcp_app_packet>> app_packets_; // sent one at a time per name
// APPs for hook
std::multimap<std::string, std::function <std::unique_ptr<uint8_t[]>(uint8_t& subtype, uint32_t& payload_len)>> outgoing_app_hooks_;
bool hooked_app_;
uvgrtp::frame::rtcp_sdes_item cnameItem_;
char cname_[255];
size_t mtu_size_;
};
}
namespace uvg_rtp = uvgrtp;

131
include/uvgrtp/session.hh Normal file
View File

@ -0,0 +1,131 @@
#pragma once
#include "util.hh"
#include <mutex>
#include <string>
#include <vector>
#include <unordered_map>
#include <memory>
namespace uvgrtp {
class media_stream;
class zrtp;
class socketfactory;
/** \brief Provides ZRTP synchronization and can be used to create uvgrtp::media_stream objects
*
* \details
*
*
*
* By itself session does not do anything. The actual RTP streaming is done by media_stream objects,
* which can be created by session. media_stream corresponds to an RTP session in RFC 3550.
*/
class session {
public:
/// \cond DO_NOT_DOCUMENT
session(std::string cname, std::string addr, std::shared_ptr<uvgrtp::socketfactory> sfp);
session(std::string cname, std::string remote_addr, std::string local_addr, std::shared_ptr<uvgrtp::socketfactory> sfp);
~session();
/// \endcond
/**
* \brief Create a uni- or bidirectional media stream
*
* \details
*
* The created object is used for sending and/or receiving media, see documentation
* for uvgrtp::media_stream for more details.
*
* If both addresses were provided when uvgrtp::session was created, by default
* uvgRTP binds itself to local_addr:src_port and sends packets to remote_addr:dst_port.
*
* User can enable and disable functionality of media_stream by OR'ing (using |) RCE_* flags
* together and passing them using the rce_flags parameter. In rce_flags, the RCE_SEND_ONLY flag
* can be used to avoid binding and src_port is thus ignored. Correspondinly RCE_RECEIVE_ONLY flag
* means dst_port is ignored. Without either RCE_SEND_ONLY or RCE_RECEIVE_ONLY,
* and if only one address was provided for session that address is interpreted as remote_addr and
* binding happens to ANY:src_port.
*
* \param src_port Local port that uvgRTP listens to for incoming RTP packets
* \param dst_port Remote port where uvgRTP sends RTP packets
* \param fmt Format of the media stream. see ::RTP_FORMAT for more details
* \param rce_flags RTP context enable flags, see ::RTP_CTX_ENABLE_FLAGS for more details
*
* \return RTP media stream object
*
* \retval uvgrtp::media_stream* On success
* \retval nullptr On failure, see print and
*/
uvgrtp::media_stream *create_stream(uint16_t src_port, uint16_t dst_port, rtp_format_t fmt, int rce_flags);
/**
* \brief Create a unidirectional media_stream for an RTP session
*
* \details
*
* The created object is used for sending or receiving media, see documentation
* for uvgrtp::media_stream for more details.
* User can enable and disable functionality of uvgRTP by OR'ing (using |) RCE_* flags
* together and passing them using the rce_flags parameter. The RCE_SEND_ONLY flag in
* rce_flags means the port is interpreted as a remote port. The RCE_RECEIVE_ONLY means
* the port is used for binding to a local interface. Without either flag,
* this function defaults to RCE_SEND_ONLY.
*
* \param port Either local or remote port depending on rce_flags
* \param fmt Format of the media stream. see ::RTP_FORMAT for more details
* \param rce_flags RTP context enable flags, see ::RTP_CTX_ENABLE_FLAGS for more details
*
* \return RTP media stream object
*
* \retval uvgrtp::media_stream* On success
* \retval nullptr On failure, see print
*/
uvgrtp::media_stream *create_stream(uint16_t port, rtp_format_t fmt, int rce_flags);
/**
* \brief Destroy a media stream
*
* \param stream Pointer to the media stream that should be destroyed
*
* \return RTP error code
*
* \retval RTP_OK On success
* \retval RTP_INVALID_VALUE If stream is nullptr
* \retval RTP_NOT_FOUND If stream does not belong to this session
*/
rtp_error_t destroy_stream(uvgrtp::media_stream *stream);
/// \cond DO_NOT_DOCUMENT
/* Get unique key of the session
* Used by context to index sessions */
std::string& get_key();
/// \endcond
private:
/* Each RTP multimedia session shall have one ZRTP session from which all session are derived */
std::shared_ptr<uvgrtp::zrtp> zrtp_;
std::string generic_address_;
/* Each RTP multimedia session is always IP-specific */
std::string remote_address_;
/* If user so wishes, the session can be bound to a certain interface */
std::string local_address_;
/* All media streams of this session */
std::unordered_map<uint32_t, uvgrtp::media_stream *> streams_;
std::mutex session_mtx_;
std::string cname_;
std::shared_ptr<uvgrtp::socketfactory> sf_;
};
}
namespace uvg_rtp = uvgrtp;

397
include/uvgrtp/util.hh Normal file
View File

@ -0,0 +1,397 @@
/// \file util.hh
#pragma once
/// \cond DO_NOT_DOCUMENT
#ifdef _WIN32
#include <winsock2.h>
#include <windows.h>
#else
#include <sys/time.h>
#endif
// ssize_t definition for all systems
#if defined(_MSC_VER)
typedef SSIZE_T ssize_t;
#else
#include <sys/types.h>
#endif
#include <stdint.h>
/// \endcond
/**
* \enum RTP_ERROR
*
* \brief RTP error codes
*
* \details These error valus are returned from various uvgRTP functions. Functions that return a pointer set rtp_errno global value that should be checked if a function call failed
*/
typedef enum RTP_ERROR {
/// \cond DO_NOT_DOCUMENT
RTP_MULTIPLE_PKTS_READY = 6,
RTP_PKT_READY = 5,
RTP_PKT_MODIFIED = 4,
RTP_PKT_NOT_HANDLED = 3,
RTP_INTERRUPTED = 2,
RTP_NOT_READY = 1,
/// \endcond
RTP_OK = 0, ///< Success
RTP_GENERIC_ERROR = -1, ///< Generic error condition
RTP_SOCKET_ERROR = -2, ///< Failed to create socket
RTP_BIND_ERROR = -3, ///< Failed to bind to interface
RTP_INVALID_VALUE = -4, ///< Invalid value
RTP_SEND_ERROR = -5, ///< System call send(2) or one of its derivatives failed
RTP_MEMORY_ERROR = -6, ///< Memory allocation failed
RTP_SSRC_COLLISION = -7, ///< SSRC collision detected
RTP_INITIALIZED = -8, ///< Object already initialized
RTP_NOT_INITIALIZED = -9, ///< Object has not been initialized
RTP_NOT_SUPPORTED = -10, ///< Method/version/extension not supported
RTP_RECV_ERROR = -11, ///< System call recv(2) or one of its derivatives failed
RTP_TIMEOUT = -12, ///< Operation timed out
RTP_NOT_FOUND = -13, ///< Object not found
RTP_AUTH_TAG_MISMATCH = -14, ///< Authentication tag does not match the RTP packet contents
} rtp_error_t;
/**
* \enum RTP_FORMAT
*
* \brief These flags are given to uvgrtp::session::create_stream()
*/
typedef enum RTP_FORMAT {
// See RFC 3551 for more details
// static audio profiles
RTP_FORMAT_GENERIC = 0, ///< Same as PCMU
RTP_FORMAT_PCMU = 0, ///< PCMU, ITU-T G.711
// 1 is reserved in RFC 3551
// 2 is reserved in RFC 3551
RTP_FORMAT_GSM = 3, ///< GSM (Group Speciale Mobile)
RTP_FORMAT_G723 = 4, ///< G723
RTP_FORMAT_DVI4_32 = 5, ///< DVI 32 kbit/s
RTP_FORMAT_DVI4_64 = 6, ///< DVI 64 kbit/s
RTP_FORMAT_LPC = 7, ///< LPC
RTP_FORMAT_PCMA = 8, ///< PCMA
RTP_FORMAT_G722 = 9, ///< G722
RTP_FORMAT_L16_STEREO = 10, ///< L16 Stereo
RTP_FORMAT_L16_MONO = 11, ///< L16 Mono
// 12 QCELP is unsupported in uvgRTP
// 13 CN is unsupported in uvgRTP
// 14 MPA is unsupported in uvgRTP
RTP_FORMAT_G728 = 15, ///< G728
RTP_FORMAT_DVI4_441 = 16, ///< DVI 44.1 kbit/s
RTP_FORMAT_DVI4_882 = 17, ///< DVI 88.2 kbit/s
RTP_FORMAT_G729 = 18, ///< G729, 8 kbit/s
// 19 is reserved in RFC 3551
// 20 - 23 are unassigned in RFC 3551
/* static video profiles, unsupported in uvgRTP
* 24 is unassigned
* 25 is CelB,
* 26 is JPEG
* 27 is unassigned
* 28 is nv
* 29 is unassigned
* 30 is unassigned
* 31 is H261
* 32 is MPV
* 33 is MP2T
* 32 is H263
*/
/* Rest of static numbers
* 35 - 71 are unassigned
* 72 - 76 are reserved
* 77 - 95 are unassigned
*/
/* Formats with dynamic payload numbers 96 - 127, including default values.
* Use RCC_DYN_PAYLOAD_TYPE flag to change the number if desired. */
RTP_FORMAT_G726_40 = 96, ///< G726, 40 kbit/s
RTP_FORMAT_G726_32 = 97, ///< G726, 32 kbit/s
RTP_FORMAT_G726_24 = 98, ///< G726, 24 kbit/s
RTP_FORMAT_G726_16 = 99, ///< G726, 16 kbit/s
RTP_FORMAT_G729D = 100, ///< G729D, 6.4 kbit/s
RTP_FORMAT_G729E = 101, ///< G729E, 11.8 kbit/s
RTP_FORMAT_GSM_EFR = 102, ///< GSM enhanced full rate speech transcoding
RTP_FORMAT_L8 = 103, ///< L8, linear audio data samples
// RED is unsupported in uvgRTP
RTP_FORMAT_VDVI = 104, ///< VDVI, variable-rate DVI4
RTP_FORMAT_OPUS = 105, ///< Opus, see RFC 7587
// H263-1998 is unsupported in uvgRTP
RTP_FORMAT_H264 = 106, ///< H.264/AVC, see RFC 6184
RTP_FORMAT_H265 = 107, ///< H.265/HEVC, see RFC 7798
RTP_FORMAT_H266 = 108, ///< H.266/VVC
RTP_FORMAT_ATLAS = 109 ///< V3C
} rtp_format_t;
/**
* \enum RTP_FLAGS
*
* \brief These flags are given to uvgrtp::media_stream::push_frame()
* and they can be OR'ed together
*/
typedef enum RTP_FLAGS {
RTP_NO_FLAGS = 0, ///< Use this if you have no RTP flags
/// \cond DO_NOT_DOCUMENT
/** Obsolete flags*/
RTP_OBSOLETE = 1,
RTP_SLICE = 1, // used to do what RTP_NO_H26X_SCL does, may do something different in the future
/// \endcond
/** Make a copy of the frame and perform operation on the copy. Cannot be used with unique_ptr. */
RTP_COPY = 1 << 1,
/** By default, uvgRTP searches for start code prefixes (0x000001 or 0x00000001)
* from the frame to divide NAL units and remove the prefix. If you instead
* want to provide the NAL units without the start code prefix yourself,
* you may use this flag to disable Start Code Lookup (SCL) and the frames
* will be treated as send-ready NAL units. */
RTP_NO_H26X_SCL = 1 << 2,
/** Disable the use of Aggregation Packets in H26x formats **/
RTP_H26X_DO_NOT_AGGR = 1 << 3
} rtp_flags_t;
/**
* \enum RTP_CTX_ENABLE_FLAGS
*
* \brief RTP context enable flags
*
* \details These flags are passed to uvgrtp::session::create_stream and can be OR'ed together
*/
enum RTP_CTX_ENABLE_FLAGS {
RCE_NO_FLAGS = 0, ///< Use this if you have no RCE flags
/// \cond DO_NOT_DOCUMENT
RCE_OBSOLETE = 1, ///< for checking if user inputs obsolete flags
/// \endcond
// These can be used to specify what the address does for one address create session
RCE_SEND_ONLY = 1 << 1, ///< address/port interpreted as remote, no binding to local socket
RCE_RECEIVE_ONLY = 1 << 2, ///< address/port interpreted as local, sending not possible
/** Use SRTP for this connection */
RCE_SRTP = 1 << 3,
/** Use ZRTP for key management
*
* If this flag is provided, before the session starts,
* ZRTP will negotiate keys with the remote participants
* and these keys are used as salting/keying material for the session.
*
* This flag must be coupled with RCE_SRTP and is mutually exclusive
* with RCE_SRTP_KMNGMNT_USER. */
RCE_SRTP_KMNGMNT_ZRTP = 1 << 4,
/** Use user-defined way to manage keys
*
* If this flag is provided, before the media transportation starts,
* user must provide a master key and salt form which SRTP session
* keys are derived
*
* This flag must be coupled with RCE_SRTP and is mutually exclusive
* with RCE_SRTP_KMNGMNT_ZRTP */
RCE_SRTP_KMNGMNT_USER = 1 << 5,
/** By default, uvgRTP restores the stream by prepending 3 or 4 byte start code to each received
* H26x frame, so there is no difference with sender input. You can remove start code prefix with
* this flag */
RCE_NO_H26X_PREPEND_SC = 1 << 6,
/** Use this flag to discard inter frames that don't have their previous dependencies
arrived. Does not work if the dependencies are not in monotonic order. */
RCE_H26X_DEPENDENCY_ENFORCEMENT = 1 << 7,
/** Fragment frames into RTP packets of MTU size (1492 bytes).
*
* Some RTP profiles define fragmentation by setting the marker bit indicating the
* last fragment of the frame. You can enable this functionality using this flag at
* both sender and receiver.
*/
RCE_FRAGMENT_GENERIC = 1 << 8,
/** Enable System Call Clustering (SCC). Sender side flag.
The benefit of SCC is reduced CPU usage at the sender, but its cost is increased chance of
losing frames at the receiving end due to too many packets arriving at once.*/
RCE_SYSTEM_CALL_CLUSTERING = 1 << 9,
/** Disable RTP payload encryption */
RCE_SRTP_NULL_CIPHER = 1 << 10,
/** Enable RTP packet authentication
*
* This flag forces the security layer to add authentication tag
* to each outgoing RTP packet for all streams that have SRTP enabled.
*
* NOTE: this flag must be coupled with at least RCE_SRTP */
RCE_SRTP_AUTHENTICATE_RTP = 1 << 11,
/** Enable packet replay protection */
RCE_SRTP_REPLAY_PROTECTION = 1 << 12,
/** Enable RTCP for the media stream.
* If SRTP is enabled, SRTCP is used instead */
RCE_RTCP = 1 << 13,
/** If the Mediastream object is used as a unidirectional stream
* but holepunching has been enabled, this flag can be used to make
* uvgRTP periodically send a short UDP datagram to keep the hole
* in the firewall open */
RCE_HOLEPUNCH_KEEPALIVE = 1 << 14,
/** Use 192-bit keys with SRTP, only user key management is supported */
RCE_SRTP_KEYSIZE_192 = 1 << 15,
/** Use 256-bit keys with SRTP, only user key management is supported */
RCE_SRTP_KEYSIZE_256 = 1 << 16,
/** Select which ZRTP stream performs the Diffie-Hellman exchange (default) */
RCE_ZRTP_DIFFIE_HELLMAN_MODE = 1 << 17,
/** Select which ZRTP stream does not perform Diffie-Hellman exchange */
RCE_ZRTP_MULTISTREAM_MODE = 1 << 18,
/** Force uvgRTP to send packets at certain framerate (default 30 fps) */
RCE_FRAME_RATE = 1 << 19,
/** Paces the sending of frame fragments within frame interval (default 1/30 s) */
RCE_PACE_FRAGMENT_SENDING = 1 << 20,
/** Use a single UDP port for both RTP and RTCP transmission (default RTCP port is +1) **/
RCE_RTCP_MUX = 1 << 21,
/// \cond DO_NOT_DOCUMENT
RCE_LAST = 1 << 22
/// \endcond
}; // maximum is 1 << 30 for int
/**
* \enum RTP_CTX_CONFIGURATION_FLAGS
*
* \brief RTP context configuration flags
*
* \details These flags are given to uvgrtp::media_stream::configure_ctx
*/
enum RTP_CTX_CONFIGURATION_FLAGS {
/// \cond DO_NOT_DOCUMENT
RCC_NO_FLAGS = 0, // This flag has no purpose
/// \endcond
/** How large is the receiver UDP buffer size
*
* Default value is 4 MB
*
* For video with high bitrate (100+ fps 4K), it is advisable to set this
* to a high number to prevent OS from dropping packets */
RCC_UDP_RCV_BUF_SIZE = 1,
/** How large is the sender UDP buffer size
*
* Default value is 4 MB
*
* For video with high bitrate (100+ fps 4K), it is advisable to set this
* to a high number to prevent OS from dropping packets */
RCC_UDP_SND_BUF_SIZE = 2,
/** How large is the uvgRTP receiver ring buffer
*
* Default value is 4 MB
*
* For video with high bitrate (100+ fps 4K), it is advisable to set this
* to a high number to prevent uvgRTP from overwriting previous packets */
RCC_RING_BUFFER_SIZE = 3,
/** How many milliseconds is each frame waited for until it is considered lost.
*
* Default is 500 milliseconds
*
* This is valid only for fragmented frames,
* i.e. RTP_FORMAT_H26X and RTP_FORMAT_GENERIC with RCE_FRAGMENT_GENERIC (TODO) */
RCC_PKT_MAX_DELAY = 4,
/** Change uvgRTP's default payload number in RTP header */
RCC_DYN_PAYLOAD_TYPE = 5,
/** Change uvgRTP's clock rate in RTP header and RTP timestamp calculations */
RCC_CLOCK_RATE = 6,
/** Set a maximum value for the Ethernet frame size assumed by uvgRTP.
*
* Default is 1492, from this IP and UDP, and RTP headers
* are removed, giving a payload size of 1452 bytes.
*
* If application wishes to use small UDP datagram,
* it can set MTU size to, for example, 500 bytes or if it wishes
* to use jumbo frames, it can set the MTU size to 9000 bytes */
RCC_MTU_SIZE = 7,
/** Set the numerator of frame rate used by uvgRTP.
*
* Default is 30.
*
* Setting the fps for uvgRTP serves two possible functions:
* 1) if RCE_FRAME_RATE has been set, the fps is enforced and
* uvgRTP tries to send frames at this exact frame rate,
2) if RCE_PACE_FRAGMENT_SENDING has been set, the fragments are set at a constant pace
* spaced out evenly within frame interval */
RCC_FPS_NUMERATOR = 8,
/** Set the denominator of frame rate used by uvgRTP.
*
* Default is 1
*
* See RCC_FPS_NUMERATOR for more info.
*/
RCC_FPS_DENOMINATOR = 9,
/** Set the local SSRC of the stream manually
*
* By default local SSRC is generated randomly
*/
RCC_SSRC = 10,
/** Set the remote SSRC of the stream manually
*
* By default remote SSRC is generated randomly
*/
RCC_REMOTE_SSRC = 11,
/** Set bandwidth for the session
*
* uvgRTP chooses this automatically depending on the format of the data being transferred.
* Use this flag to manually set the session bandwidth in kbps.
* RTCP reporting interval depends on this session bandwidth. The interval is calculated with the
* following formula:
*
* RTCP interval = 1000 * 360 / session_bandwidth_kbps
*
* Larger bandwidth values result in shorter RTCP intervals, and vice versa.
* See RFC 3550 Appendix A.7 for further information on RTCP interval
*/
RCC_SESSION_BANDWIDTH = 12,
/** Set the timeout value for socket polling
*
* Default value is 100 ms. If you are experiencing packet loss when receiving, you can try
* lowering this value down to 0. This will, however cause increased CPU usage in the receiver, so
* use with caution.
*/
RCC_POLL_TIMEOUT = 13,
/// \cond DO_NOT_DOCUMENT
RCC_LAST
/// \endcond
};
extern thread_local rtp_error_t rtp_errno;

12
include/uvgrtp/version.hh Normal file
View File

@ -0,0 +1,12 @@
#pragma once
#include <cstdint>
#include <string>
namespace uvgrtp {
std::string get_version();
uint16_t get_version_major();
uint16_t get_version_minor();
uint16_t get_version_patch();
std::string get_git_hash();
} // namespace uvgrtp

View File

@ -0,0 +1,27 @@
#ifndef UVGRTP_H
#define UVGRTP_H
#ifdef __cplusplus
extern "C"
{
#endif
void uvgrtp_create_ctx(void** uvgrtp_context);
void uvgrtp_create_session(void* uvgrtp_context, void** uvgrtp_session, char* remote_address);
void uvgrtp_create_stream(void* uvgrtp_session, void** uvgrtp_stream, uint16_t local_port, uint16_t remote_port, int rce_flags);
void uvgrtp_destroy_ctx(void* uvgrtp_context);
void uvgrtp_destroy_session(void* uvgrtp_context, void* uvgrtp_session);
void uvgrtp_destroy_stream(void* uvgrtp_session, void* uvgrtp_stream);
void uvgrtp_push_frame(void* uvgrtp_stream, uint8_t* data, size_t data_len, int rtp_flags);
#ifdef __cplusplus
}
#endif
#endif //UVGRTP_H

BIN
lib/libuvgrtp.a Normal file

Binary file not shown.

View File

@ -1,267 +0,0 @@
#include <BasicUsageEnvironment.hh>
#include <FramedSource.hh>
#include <GroupsockHelper.hh>
#include <liveMedia/liveMedia.hh>
#include <RTPInterface.hh>
#include <chrono>
#include <climits>
#include <mutex>
#include <thread>
#include "latsink.hh"
#include "source.hh"
#define BUFFER_SIZE 40 * 1000 * 1000
EventTriggerId H265FramedSource::eventTriggerId = 0;
unsigned H265FramedSource::referenceCount = 0;
H265FramedSource *framedSource;
static size_t frames = 0;
static size_t bytes = 0;
static uint8_t *nal_ptr = nullptr;
static size_t nal_size = 0;
static std::mutex lat_mtx;
static std::chrono::high_resolution_clock::time_point start;
static std::chrono::high_resolution_clock::time_point last;
static uint8_t *buf;
static size_t offset = 0;
/* static size_t bytes = 0; */
static uint64_t current = 0;
static uint64_t period = 0;
std::chrono::high_resolution_clock::time_point s_tmr, e_tmr;
static void thread_func(void)
{
unsigned prev_frames = UINT_MAX;
while (true) {
std::this_thread::sleep_for(std::chrono::milliseconds(2000));
if (prev_frames == frames)
break;
prev_frames = frames;
}
fprintf(stderr, "%zu %zu %lu\n", bytes, frames,
std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - start
).count()
);
exit(EXIT_FAILURE);
}
RTPLatencySink::RTPLatencySink(UsageEnvironment& env):
MediaSink(env)
{
fReceiveBuffer = new uint8_t[BUFFER_SIZE];
}
RTPLatencySink::~RTPLatencySink()
{
}
void RTPLatencySink::uninit()
{
stopPlaying();
delete fReceiveBuffer;
}
void RTPLatencySink::afterGettingFrame(
void *clientData,
unsigned frameSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds
)
{
((RTPLatencySink *)clientData)->afterGettingFrame(
frameSize,
numTruncatedBytes,
presentationTime,
durationInMicroseconds
);
}
void RTPLatencySink::afterGettingFrame(
unsigned frameSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds
)
{
(void)frameSize, (void)numTruncatedBytes;
(void)presentationTime, (void)durationInMicroseconds;
/* start loop that monitors activity and if there has been
* no activity for 2s (same as uvgRTP) the receiver is stopped) */
if (!frames)
(void)new std::thread(thread_func);
//fprintf(stderr, "got frame %zu %zu\n", frames + 1, frameSize);
nal_ptr = fReceiveBuffer;
nal_size = frameSize;
lat_mtx.unlock();
framedSource->deliver_frame();
if (++frames == 602) {
fprintf(stderr, "%zu %zu %lu\n", bytes, frames,
std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - start
).count()
);
exit(EXIT_SUCCESS);
}
continuePlaying();
}
void RTPLatencySink::process()
{
}
Boolean RTPLatencySink::continuePlaying()
{
if (!fSource)
return False;
fSource->getNextFrame(
fReceiveBuffer,
BUFFER_SIZE,
afterGettingFrame,
this,
onSourceClosure,
this
);
return True;
}
H265FramedSource *H265FramedSource::createNew(UsageEnvironment& env, unsigned fps)
{
return new H265FramedSource(env, fps);
}
H265FramedSource::H265FramedSource(UsageEnvironment& env, unsigned fps):
FramedSource(env),
fps_(fps)
{
period = (uint64_t)((1000 / fps) * 1000);
if (!eventTriggerId)
eventTriggerId = envir().taskScheduler().createEventTrigger(deliverFrame0);
}
void H265FramedSource::deliver_frame()
{
deliverFrame();
}
H265FramedSource::~H265FramedSource()
{
if (!--referenceCount) {
envir().taskScheduler().deleteEventTrigger(eventTriggerId);
eventTriggerId = 0;
}
}
void H265FramedSource::doGetNextFrame()
{
deliverFrame();
}
void H265FramedSource::deliverFrame0(void *clientData)
{
((H265FramedSource *)clientData)->deliverFrame();
}
void H265FramedSource::deliverFrame()
{
if (!isCurrentlyAwaitingData())
return;
if (!lat_mtx.try_lock())
return;
uint8_t *newFrameDataStart = nal_ptr;
unsigned newFrameSize = nal_size;
bytes += newFrameSize;
if (newFrameSize > fMaxSize) {
fFrameSize = fMaxSize;
fNumTruncatedBytes = newFrameSize - fMaxSize;
} else {
fFrameSize = newFrameSize;
}
fDurationInMicroseconds = 0;
memmove(fTo, newFrameDataStart, fFrameSize);
FramedSource::afterGetting(this);
}
static int receiver(std::string local_address, int local_port,
std::string remote_address, int remote_port)
{
H265VideoStreamDiscreteFramer *framer;
TaskScheduler *scheduler;
UsageEnvironment *env;
RTPLatencySink *sink;
struct in_addr addr;
RTPSink *videoSink;
RTPSource *source;
scheduler = BasicTaskScheduler::createNew();
env = BasicUsageEnvironment::createNew(*scheduler);
OutPacketBuffer::maxSize = 40 * 1000 * 1000;
lat_mtx.lock();
/* receiver */
addr.s_addr = our_inet_addr(local_address.c_str());
Groupsock recv_sock(*env, addr, Port(local_port), 255);
source = H265VideoRTPSource::createNew(*env, &recv_sock, 96);
sink = new RTPLatencySink(*env);
/* sender */
addr.s_addr = our_inet_addr(remote_address.c_str());
Groupsock send_socket(*env, addr, Port(remote_port), 255);
framedSource = H265FramedSource::createNew(*env, 30);
framer = H265VideoStreamDiscreteFramer::createNew(*env, framedSource);
videoSink = H265VideoRTPSink::createNew(*env, &send_socket, 96);
videoSink->startPlaying(*framer, NULL, videoSink);
sink->startPlaying(*source, nullptr, nullptr);
env->taskScheduler().doEventLoop();
return 0;
}
int main(int argc, char **argv)
{
if (argc != 7) {
fprintf(stderr, "usage: ./%s <local address> <local port> <remote address> <remote port> \
<format> <srtp>\n", __FILE__);
return EXIT_FAILURE;
}
std::string local_address = argv[1];
int local_port = atoi(argv[2]);
std::string remote_address = argv[3];
int remote_port = atoi(argv[4]);
bool vvc_enabled = get_vvc_state(argv[5]);
bool srtp_enabled = get_srtp_state(argv[6]);
return receiver(local_address, local_port, remote_address, remote_port);
}

View File

@ -1,408 +0,0 @@
#include "../util/util.hh"
#include <BasicUsageEnvironment.hh>
#include <FramedSource.hh>
#include <GroupsockHelper.hh>
#include <liveMedia/liveMedia.hh>
#include <RTPInterface.hh>
#include <chrono>
#include <climits>
#include <mutex>
#include <queue>
#include <thread>
#include <unordered_map>
#include <string>
#include "latsource.hh"
#include "sink.hh"
using namespace std::chrono;
#define BUFFER_SIZE 40 * 1000 * 1000
#define KEY(frame, size) ((((frame) % 64) << 26) + (size))
EventTriggerId H265LatencyFramedSource::eventTriggerId = 0;
unsigned H265LatencyFramedSource::referenceCount = 0;
static uint64_t current = 0;
static uint64_t period = 0;
static bool initialized = false;
static size_t frames = 0;
static size_t nintras = 0;
static size_t ninters = 0;
static size_t intra_total = 0;
static size_t inter_total = 0;
static size_t frame_total = 0;
static std::mutex lat_mtx;
static std::queue<std::pair<size_t, uint8_t *>> nals;
static high_resolution_clock::time_point s_tmr, start;
typedef std::pair<high_resolution_clock::time_point, size_t> finfo;
static std::unordered_map<uint64_t, finfo> timestamps;
static const uint8_t *ff_avc_find_startcode_internal(const uint8_t *p, const uint8_t *end)
{
const uint8_t *a = p + 4 - ((intptr_t)p & 3);
for (end -= 3; p < a && p < end; p++) {
if (p[0] == 0 && p[1] == 0 && p[2] == 1)
return p;
}
for (end -= 3; p < end; p += 4) {
uint32_t x = *(const uint32_t*)p;
// if ((x - 0x01000100) & (~x) & 0x80008000) // little endian
// if ((x - 0x00010001) & (~x) & 0x00800080) // big endian
if ((x - 0x01010101) & (~x) & 0x80808080) { // generic
if (p[1] == 0) {
if (p[0] == 0 && p[2] == 1)
return p;
if (p[2] == 0 && p[3] == 1)
return p+1;
}
if (p[3] == 0) {
if (p[2] == 0 && p[4] == 1)
return p+2;
if (p[4] == 0 && p[5] == 1)
return p+3;
}
}
}
for (end += 3; p < end; p++) {
if (p[0] == 0 && p[1] == 0 && p[2] == 1)
return p;
}
return end + 3;
}
const uint8_t *ff_avc_find_startcode(const uint8_t *p, const uint8_t *end)
{
const uint8_t *out= ff_avc_find_startcode_internal(p, end);
if (p < out && out < end && !out[-1]) out--;
return out;
}
static std::pair<size_t, uint8_t *> find_next_nal(std::string input_file)
{
static size_t len = 0;
static uint8_t *p = NULL;
static uint8_t *end = NULL;
static uint8_t *nal_start = NULL;
static uint8_t *nal_end = NULL;
if (!p) {
p = (uint8_t *)get_mem(input_file, len);
end = p + len;
len = 0;
nal_start = (uint8_t *)ff_avc_find_startcode(p, end);
}
while (nal_start < end && !*(nal_start++))
;
if (nal_start == end)
return std::make_pair(0, nullptr);
nal_end = (uint8_t *)ff_avc_find_startcode(nal_start, end);
auto ret = std::make_pair((size_t)(nal_end - nal_start), (uint8_t *)nal_start);
len += 4 + nal_end - nal_start;
nal_start = nal_end;
return ret;
}
H265LatencyFramedSource *H265LatencyFramedSource::createNew(UsageEnvironment& env, std::string input_file)
{
return new H265LatencyFramedSource(env, input_file);
}
H265LatencyFramedSource::H265LatencyFramedSource(UsageEnvironment& env, std::string input_file):
FramedSource(env),
input_file_(input_file)
{
period = (uint64_t)((1000 / (float)30) * 1000);
if (!eventTriggerId)
eventTriggerId = envir().taskScheduler().createEventTrigger(deliverFrame0);
}
H265LatencyFramedSource::~H265LatencyFramedSource()
{
if (!--referenceCount) {
envir().taskScheduler().deleteEventTrigger(eventTriggerId);
eventTriggerId = 0;
}
}
void H265LatencyFramedSource::doGetNextFrame()
{
if (!initialized) {
s_tmr = std::chrono::high_resolution_clock::now();
initialized = true;
}
deliverFrame();
}
void H265LatencyFramedSource::deliverFrame0(void *clientData)
{
((H265LatencyFramedSource *)clientData)->deliverFrame();
}
void H265LatencyFramedSource::deliverFrame()
{
if (!isCurrentlyAwaitingData())
return;
auto nal = find_next_nal(input_file_);
if (!nal.first || !nal.second)
return;
uint64_t runtime = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::high_resolution_clock::now() - s_tmr
).count();
if (runtime < current * period)
std::this_thread::sleep_for(std::chrono::microseconds(current * period - runtime));
/* try to hold fps for intra/inter frames only */
if (nal.first > 1500)
++current;
/* Start timer for the frame
* RTP sink will calculate the time difference once the frame is received */
/* printf("send frame %u, size %u\n", current, nal.first); */
uint64_t key = nal.first;
if (timestamps.find(key) != timestamps.end()) {
fprintf(stderr, "cannot use size as timestamp for this frame!\n");
exit(EXIT_FAILURE);
}
timestamps[key].first = std::chrono::high_resolution_clock::now();
timestamps[key].second = nal.first;
uint8_t *newFrameDataStart = nal.second;
unsigned newFrameSize = nal.first;
if (newFrameSize > fMaxSize) {
fFrameSize = fMaxSize;
fNumTruncatedBytes = newFrameSize - fMaxSize;
} else {
fFrameSize = newFrameSize;
}
fDurationInMicroseconds = 0;
memmove(fTo, newFrameDataStart, fFrameSize);
FramedSource::afterGetting(this);
}
static void thread_func(void)
{
unsigned prev_frames = UINT_MAX;
while (true) {
std::this_thread::sleep_for(std::chrono::milliseconds(5000));
if (prev_frames == frames) {
/* fprintf(stderr, "frame lost\n"); */
break;
}
prev_frames = frames;
}
fprintf(stderr, "%zu: intra %lf, inter %lf, avg %lf\n",
frames,
intra_total / (float)nintras,
inter_total / (float)ninters,
frame_total / (float)frames
);
exit(EXIT_FAILURE);
}
RTPSink_::RTPSink_(UsageEnvironment& env):
MediaSink(env)
{
fReceiveBuffer = new uint8_t[BUFFER_SIZE];
}
RTPSink_::~RTPSink_()
{
}
void RTPSink_::uninit()
{
stopPlaying();
delete fReceiveBuffer;
}
void RTPSink_::afterGettingFrame(
void *clientData,
unsigned frameSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds
)
{
((RTPSink_ *)clientData)->afterGettingFrame(
frameSize,
numTruncatedBytes,
presentationTime,
durationInMicroseconds
);
}
void RTPSink_::afterGettingFrame(
unsigned frameSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds
)
{
(void)frameSize, (void)numTruncatedBytes;
(void)presentationTime, (void)durationInMicroseconds;
/* start loop that monitors activity and if there has been
* no activity for 2s (same as uvgRTP) the receiver is stopped) */
if (!frames)
(void)new std::thread(thread_func);
uint64_t diff;
uint8_t nal_type;
uint64_t key = frameSize;
/* printf("recv frame %zu, size %u\n", frames + 1, frameSize); */
if (timestamps.find(key) == timestamps.end()) {
printf("frame %zu,%zu not found from set!\n", frames + 1, key);
exit(EXIT_FAILURE);
}
if (timestamps[key].second != frameSize) {
printf("frame size mismatch (%zu vs %zu)\n", timestamps[key].second, frameSize);
exit(EXIT_FAILURE);
}
diff = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::high_resolution_clock::now() - timestamps[key].first
).count();
timestamps.erase(key);
nal_type = (fReceiveBuffer[0] >> 1) & 0x3f;
if (nal_type == 19 || nal_type == 1) {
if (nal_type == 19)
nintras++, intra_total += (diff / 1000);
else
ninters++, inter_total += (diff / 1000);
frame_total += (diff / 1000);
}
if (++frames == 601) {
fprintf(stderr, "%zu: intra %lf, inter %lf, avg %lf\n",
frames,
intra_total / (float)nintras,
inter_total / (float)ninters,
frame_total / (float)frames
);
exit(EXIT_SUCCESS);
}
continuePlaying();
}
Boolean RTPSink_::continuePlaying()
{
if (!fSource)
return False;
fSource->getNextFrame(
fReceiveBuffer,
BUFFER_SIZE,
afterGettingFrame,
this,
onSourceClosure,
this
);
return True;
}
static int sender(std::string input_file, std::string local_address, int local_port,
std::string remote_address, int remote_port)
{
(void)addr;
H265VideoStreamDiscreteFramer *framer;
H265LatencyFramedSource *framedSource;
TaskScheduler *scheduler;
UsageEnvironment *env;
RTPSink *videoSink;
RTPSource *source;
RTPSink_ *sink;
scheduler = BasicTaskScheduler::createNew();
env = BasicUsageEnvironment::createNew(*scheduler);
OutPacketBuffer::maxSize = 40 * 1000 * 1000;
Port send_port(remote_port);
struct in_addr dst_addr;
dst_addr.s_addr = our_inet_addr(remote_address.c_str());
Port recv_port(local_port);
struct in_addr src_addr;
src_addr.s_addr = our_inet_addr(local_address.c_str());
Groupsock send_socket(*env, dst_addr, send_port, 255);
Groupsock recv_socket(*env, src_addr, recv_port, 255);
/* sender */
videoSink = H265VideoRTPSink::createNew(*env, &send_socket, 96);
framedSource = H265LatencyFramedSource::createNew(*env, input_file);
framer = H265VideoStreamDiscreteFramer::createNew(*env, framedSource);
/* receiver */
source = H265VideoRTPSource::createNew(*env, &recv_socket, 96);
sink = new RTPSink_(*env);
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
videoSink->startPlaying(*framer, NULL, videoSink);
sink->startPlaying(*source, nullptr, nullptr);
env->taskScheduler().doEventLoop();
return 0;
}
int main(int argc, char **argv)
{
if (argc != 9) {
fprintf(stderr, "usage: ./%s <input file> <local address> <local port> <remote address> <remote port> <fps> <format> <srtp> \n", __FILE__);
return EXIT_FAILURE;
}
std::string input_file = argv[1];
std::string local_address = argv[2];
int local_port = atoi(argv[3]);
std::string remote_address = argv[4];
int remote_port = atoi(argv[5]);
float fps = atof(argv[6]);
bool vvc_enabled = get_vvc_state(argv[7]);
bool srtp_enabled = get_srtp_state(argv[8]);
return sender(input_file, local_address, local_port, remote_address, remote_port);
}

View File

@ -1,116 +0,0 @@
#include <chrono>
#include <climits>
#include <thread>
#include <RTPInterface.hh>
#include "latsink.hh"
#define BUFFER_SIZE 40 * 1000 * 1000
static size_t frames = 0;
static size_t bytes = 0;
static std::chrono::high_resolution_clock::time_point start;
static std::chrono::high_resolution_clock::time_point last;
static void thread_func(void)
{
unsigned prev_frames = UINT_MAX;
while (true) {
std::this_thread::sleep_for(std::chrono::milliseconds(2000));
if (prev_frames == frames)
break;
prev_frames = frames;
}
fprintf(stderr, "%zu %zu %lu\n", bytes, frames,
std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - start
).count()
);
exit(EXIT_FAILURE);
}
RTPLatencySink::RTPLatencySink(UsageEnvironment& env):
MediaSink(env)
{
fReceiveBuffer = new uint8_t[BUFFER_SIZE];
}
RTPLatencySink::~RTPLatencySink()
{
}
void RTPLatencySink::uninit()
{
stopPlaying();
delete fReceiveBuffer;
}
void RTPLatencySink::afterGettingFrame(
void *clientData,
unsigned frameSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds
)
{
((RTPLatencySink *)clientData)->afterGettingFrame(
frameSize,
numTruncatedBytes,
presentationTime,
durationInMicroseconds
);
}
void RTPLatencySink::afterGettingFrame(
unsigned frameSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds
)
{
(void)frameSize, (void)numTruncatedBytes;
(void)presentationTime, (void)durationInMicroseconds;
/* start loop that monitors activity and if there has been
* no activity for 2s (same as uvgRTP) the receiver is stopped) */
if (!frames)
(void)new std::thread(thread_func);
fprintf(stderr, "got frame\n");
if (++frames == 601) {
fprintf(stderr, "%zu %zu %lu\n", bytes, frames,
std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - start
).count()
);
exit(EXIT_SUCCESS);
}
continuePlaying();
}
void RTPLatencySink::process()
{
}
Boolean RTPLatencySink::continuePlaying()
{
if (!fSource)
return False;
fSource->getNextFrame(
fReceiveBuffer,
BUFFER_SIZE,
afterGettingFrame,
this,
onSourceClosure,
this
);
return True;
}

View File

@ -1,32 +0,0 @@
#include <H265VideoRTPSink.hh>
class RTPLatencySink : public MediaSink
{
public:
RTPLatencySink(UsageEnvironment& env);
virtual ~RTPLatencySink();
void uninit();
static void afterGettingFrame(
void *clientData,
unsigned frameSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds
);
void afterGettingFrame(
unsigned frameSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds
);
protected:
void process();
private:
virtual Boolean continuePlaying();
uint8_t *fReceiveBuffer;
};

View File

@ -1,187 +0,0 @@
#include "latsource.hh"
#include "../util/util.hh"
#include <GroupsockHelper.hh>
#include <FramedSource.hh>
#include <chrono>
#include <mutex>
#include <queue>
#include <thread>
EventTriggerId H265LatencyFramedSource::eventTriggerId = 0;
unsigned H265LatencyFramedSource::referenceCount = 0;
static uint8_t *buf;
static size_t offset = 0;
static size_t bytes = 0;
static uint64_t current = 0;
static uint64_t period = 0;
static bool initialized = false;
std::mutex delivery_mtx;
std::queue<std::pair<size_t, uint8_t *>> nals;
std::chrono::high_resolution_clock::time_point s_tmr, e_tmr;
static const uint8_t *ff_avc_find_startcode_internal(const uint8_t *p, const uint8_t *end)
{
const uint8_t *a = p + 4 - ((intptr_t)p & 3);
for (end -= 3; p < a && p < end; p++) {
if (p[0] == 0 && p[1] == 0 && p[2] == 1)
return p;
}
for (end -= 3; p < end; p += 4) {
uint32_t x = *(const uint32_t*)p;
// if ((x - 0x01000100) & (~x) & 0x80008000) // little endian
// if ((x - 0x00010001) & (~x) & 0x00800080) // big endian
if ((x - 0x01010101) & (~x) & 0x80808080) { // generic
if (p[1] == 0) {
if (p[0] == 0 && p[2] == 1)
return p;
if (p[2] == 0 && p[3] == 1)
return p+1;
}
if (p[3] == 0) {
if (p[2] == 0 && p[4] == 1)
return p+2;
if (p[4] == 0 && p[5] == 1)
return p+3;
}
}
}
for (end += 3; p < end; p++) {
if (p[0] == 0 && p[1] == 0 && p[2] == 1)
return p;
}
return end + 3;
}
const uint8_t *ff_avc_find_startcode(const uint8_t *p, const uint8_t *end)
{
const uint8_t *out= ff_avc_find_startcode_internal(p, end);
if (p < out && out < end && !out[-1]) out--;
return out;
}
static std::pair<size_t, uint8_t *> find_next_nal(void)
{
static size_t len = 0;
static uint8_t *p = NULL;
static uint8_t *end = NULL;
static uint8_t *nal_start = NULL;
static uint8_t *nal_end = NULL;
if (!p) {
p = (uint8_t *)get_mem(input_file_, len);
end = p + len;
len = 0;
nal_start = (uint8_t *)ff_avc_find_startcode(p, end);
}
while (nal_start < end && !*(nal_start++))
;
if (nal_start == end)
return std::make_pair(0, nullptr);
nal_end = (uint8_t *)ff_avc_find_startcode(nal_start, end);
auto ret = std::make_pair((size_t)(nal_end - nal_start), (uint8_t *)nal_start);
len += 4 + nal_end - nal_start;
nal_start = nal_end;
return ret;
}
H265LatencyFramedSource *H265LatencyFramedSource::createNew(UsageEnvironment& env, std::string input_file)
{
return new H265LatencyFramedSource(env, input_file);
}
H265LatencyFramedSource::H265LatencyFramedSource(UsageEnvironment& env, std::string input_file):
FramedSource(env),
input_file_(input_file)
{
period = (uint64_t)((1000 / (float)30) * 1000);
if (!eventTriggerId)
eventTriggerId = envir().taskScheduler().createEventTrigger(deliverFrame0);
}
H265LatencyFramedSource::~H265LatencyFramedSource()
{
if (!--referenceCount) {
envir().taskScheduler().deleteEventTrigger(eventTriggerId);
eventTriggerId = 0;
}
}
void H265LatencyFramedSource::doGetNextFrame()
{
if (!initialized) {
s_tmr = std::chrono::high_resolution_clock::now();
initialized = true;
}
deliverFrame();
}
void H265LatencyFramedSource::deliverFrame0(void *clientData)
{
((H265LatencyFramedSource *)clientData)->deliverFrame();
}
void H265LatencyFramedSource::deliverFrame()
{
if (!isCurrentlyAwaitingData())
return;
mtx_.lock();
fprintf(stderr, "send frame\n");
auto nal = find_next_nal();
if (!nal.first || !nal.second) {
e_tmr = std::chrono::high_resolution_clock::now();
uint64_t diff = (uint64_t)std::chrono::duration_cast<std::chrono::milliseconds>(e_tmr - s_tmr).count();
fprintf(stderr, "%lu bytes, %lu kB, %lu MB took %lu ms %lu s\n",
bytes, bytes / 1000, bytes / 1000 / 1000,
diff, diff / 1000
);
exit(EXIT_SUCCESS);
}
uint64_t runtime = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::high_resolution_clock::now() - s_tmr
).count();
if (runtime < current * period)
std::this_thread::sleep_for(std::chrono::microseconds(current * period - runtime));
/* try to hold fps for intra/inter frames only */
if (nal.first > 1500)
++current;
uint8_t *newFrameDataStart = nal.second;
unsigned newFrameSize = nal.first;
bytes += newFrameSize;
if (newFrameSize > fMaxSize) {
fFrameSize = fMaxSize;
fNumTruncatedBytes = newFrameSize - fMaxSize;
} else {
fFrameSize = newFrameSize;
}
fDurationInMicroseconds = 0;
memmove(fTo, newFrameDataStart, fFrameSize);
FramedSource::afterGetting(this);
}

View File

@ -1,22 +0,0 @@
#pragma once
#include <FramedSource.hh>
class H265LatencyFramedSource : public FramedSource {
public:
static H265LatencyFramedSource *createNew(UsageEnvironment& env, std::string input_file);
static EventTriggerId eventTriggerId;
protected:
H265LatencyFramedSource(UsageEnvironment& env, std::string input_file);
virtual ~H265LatencyFramedSource();
private:
void deliverFrame();
virtual void doGetNextFrame();
static void deliverFrame0(void *clientData);
static unsigned referenceCount;
std::string input_file_;
};

View File

@ -1,51 +0,0 @@
#include <liveMedia/liveMedia.hh>
#include <BasicUsageEnvironment.hh>
#include <GroupsockHelper.hh>
#include "sink.hh"
#include <cstdlib>
int main(int argc, char **argv)
{
if (argc != 9) {
fprintf(stderr, "usage: ./%s <result file> <local address> <local port> <remote address> <remote port> \
<number of threads> <format> <srtp>\n", __FILE__);
return EXIT_FAILURE;
}
result_filename = argv[1];
std::string local_address = argv[2];
int local_port = atoi(argv[3]);
std::string remote_address = argv[4];
int remote_port = atoi(argv[5]);
int nthreads = atoi(argv[6]);
bool vvc_enabled = get_vvc_state(argv[7]);
bool srtp_enabled = get_srtp_state(argv[8]);
if (vvc_enabled || srtp_enabled)
{
std::cerr << "Unsupported option for Live555 tester" << std::endl;
return EXIT_FAILURE;
}
bool srtp = false; // TODO
TaskScheduler *scheduler = BasicTaskScheduler::createNew();
UsageEnvironment *env = BasicUsageEnvironment::createNew(*scheduler);
Port rtpPort(local_port);
struct in_addr dst_addr;
dst_addr.s_addr = our_inet_addr(local_address.c_str());
Groupsock rtpGroupsock(*env, dst_addr, rtpPort, 255);
OutPacketBuffer::maxSize = 40 * 1000 * 1000;
RTPSource *source = H265VideoRTPSource::createNew(*env, &rtpGroupsock, 96);
RTPSink_ *sink = new RTPSink_(*env);
sink->startPlaying(*source, nullptr, nullptr);
env->taskScheduler().doEventLoop();
return EXIT_SUCCESS;
}

View File

@ -1,55 +0,0 @@
#include <liveMedia/liveMedia.hh>
#include <BasicUsageEnvironment.hh>
#include <GroupsockHelper.hh>
#include "source.hh"
#include "H265VideoStreamDiscreteFramer.hh"
#include <cstdlib>
#include <iostream>
int main(int argc, char **argv)
{
if (argc != 11) {
fprintf(stderr, "usage: ./%s <input file> <result file> <local address> <local port> <remote address> <remote port> \
<number of threads> <fps> <format> <srtp> \n", __FILE__);
return EXIT_FAILURE;
}
std::string input_file = argv[1];
std::string result_file = argv[2];
std::string local_address = argv[3];
int local_port = atoi(argv[4]);
std::string remote_address = argv[5];
int remote_port = atoi(argv[6]);
int nthreads = atoi(argv[7]);
int fps = atoi(argv[8]);
bool vvc_enabled = get_vvc_state(argv[9]);
bool srtp_enabled = get_srtp_state(argv[10]);
if (vvc_enabled || srtp_enabled)
{
std::cerr << "Unsupported option for Live555 tester" << std::endl;
return EXIT_FAILURE;
}
TaskScheduler *scheduler = BasicTaskScheduler::createNew();
UsageEnvironment *env = BasicUsageEnvironment::createNew(*scheduler);
H265FramedSource* framedSource = H265FramedSource::createNew(*env, fps, input_file, result_file);
H265VideoStreamDiscreteFramer* framer = H265VideoStreamDiscreteFramer::createNew(*env, framedSource);
Port rtpPort(remote_port);
struct in_addr dst_addr;
dst_addr.s_addr = our_inet_addr(remote_address.c_str());
Groupsock rtpGroupsock(*env, dst_addr, rtpPort, 255);
OutPacketBuffer::maxSize = 40 * 1000 * 1000;
RTPSink* videoSink = H265VideoRTPSink::createNew(*env, &rtpGroupsock, 96);
videoSink->startPlaying(*framer, NULL, videoSink);
env->taskScheduler().doEventLoop();
return EXIT_SUCCESS;
}

View File

@ -1,117 +0,0 @@
#include <RTPInterface.hh>
#include "sink.hh"
#include <chrono>
#include <climits>
#include <thread>
#define BUFFER_SIZE 1600000
size_t frames = 0;
size_t bytes = 0;
std::chrono::high_resolution_clock::time_point start;
std::chrono::high_resolution_clock::time_point last;
static void thread_func(void)
{
unsigned prev_frames = UINT_MAX;
// looks like the only purpose of this function is to print the results to the log
while (true) {
std::this_thread::sleep_for(std::chrono::milliseconds(2000));
if (prev_frames == frames)
break;
prev_frames = frames;
}
fprintf(stderr, "%zu %zu %lu\n", bytes, frames,
std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - start
).count()
);
exit(EXIT_FAILURE);
}
RTPSink_::RTPSink_(UsageEnvironment& env):
MediaSink(env)
{
fReceiveBuffer = new uint8_t[BUFFER_SIZE];
}
RTPSink_::~RTPSink_()
{
}
void RTPSink_::uninit()
{
stopPlaying();
delete fReceiveBuffer;
}
void RTPSink_::afterGettingFrame(
void *clientData,
unsigned frameSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds
)
{
((RTPSink_ *)clientData)->afterGettingFrame(
frameSize,
numTruncatedBytes,
presentationTime,
durationInMicroseconds
);
}
void RTPSink_::afterGettingFrame(
unsigned frameSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds
)
{
(void)frameSize, (void)numTruncatedBytes;
(void)presentationTime, (void)durationInMicroseconds;
/* start loop that monitors activity and if there has been
* no activity for 2s (same as uvgRTP) the receiver is stopped) */
if (!frames)
(void)new std::thread(thread_func);
if (++frames == 601) {
fprintf(stderr, "%zu %zu %lu\n", bytes, frames,
std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - start
).count()
);
exit(EXIT_SUCCESS);
}
continuePlaying();
}
void RTPSink_::process()
{
}
Boolean RTPSink_::continuePlaying()
{
if (!fSource)
return False;
fSource->getNextFrame(
fReceiveBuffer,
BUFFER_SIZE,
afterGettingFrame,
this,
onSourceClosure,
this
);
return True;
}

View File

@ -1,32 +0,0 @@
#include <H265VideoRTPSink.hh>
class RTPSink_ : public MediaSink
{
public:
RTPSink_(UsageEnvironment& env);
virtual ~RTPSink_();
void uninit();
static void afterGettingFrame(
void *clientData,
unsigned frameSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds
);
void afterGettingFrame(
unsigned frameSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds
);
protected:
void process();
private:
virtual Boolean continuePlaying();
uint8_t *fReceiveBuffer;
};

View File

@ -1,187 +0,0 @@
#include "../util/util.hh"
#include <GroupsockHelper.hh>
#include <FramedSource.hh>
#include "source.hh"
#include <chrono>
#include <mutex>
#include <queue>
#include <thread>
#include <string>
EventTriggerId H265FramedSource::eventTriggerId = 0;
unsigned H265FramedSource::referenceCount = 0;
uint8_t *buf;
size_t offset = 0;
size_t bytes = 0;
uint64_t current = 0;
uint64_t period = 0;
bool initialized = false;
std::mutex delivery_mtx;
std::queue<std::pair<size_t, uint8_t *>> nals;
std::chrono::high_resolution_clock::time_point s_tmr, e_tmr;
static const uint8_t *ff_avc_find_startcode_internal(const uint8_t *p, const uint8_t *end)
{
const uint8_t *a = p + 4 - ((intptr_t)p & 3);
for (end -= 3; p < a && p < end; p++) {
if (p[0] == 0 && p[1] == 0 && p[2] == 1)
return p;
}
for (end -= 3; p < end; p += 4) {
uint32_t x = *(const uint32_t*)p;
// if ((x - 0x01000100) & (~x) & 0x80008000) // little endian
// if ((x - 0x00010001) & (~x) & 0x00800080) // big endian
if ((x - 0x01010101) & (~x) & 0x80808080) { // generic
if (p[1] == 0) {
if (p[0] == 0 && p[2] == 1)
return p;
if (p[2] == 0 && p[3] == 1)
return p+1;
}
if (p[3] == 0) {
if (p[2] == 0 && p[4] == 1)
return p+2;
if (p[4] == 0 && p[5] == 1)
return p+3;
}
}
}
for (end += 3; p < end; p++) {
if (p[0] == 0 && p[1] == 0 && p[2] == 1)
return p;
}
return end + 3;
}
const uint8_t *ff_avc_find_startcode(const uint8_t *p, const uint8_t *end)
{
const uint8_t *out= ff_avc_find_startcode_internal(p, end);
if (p < out && out < end && !out[-1]) out--;
return out;
}
static std::pair<size_t, uint8_t *> find_next_nal(const std::string& input_file)
{
static size_t len = 0;
static uint8_t *p = NULL;
static uint8_t *end = NULL;
static uint8_t *nal_start = NULL;
static uint8_t *nal_end = NULL;
if (!p) {
p = (uint8_t *)get_mem(input_file, len);
end = p + len;
len = 0;
nal_start = (uint8_t *)ff_avc_find_startcode(p, end);
}
while (nal_start < end && !*(nal_start++))
;
if (nal_start == end)
return std::make_pair(0, nullptr);
nal_end = (uint8_t *)ff_avc_find_startcode(nal_start, end);
auto ret = std::make_pair((size_t)(nal_end - nal_start), (uint8_t *)nal_start);
len += nal_end - nal_start;
nal_start = nal_end;
return ret;
}
H265FramedSource *H265FramedSource::createNew(UsageEnvironment& env, unsigned fps,
std::string input_file, std::string result_file)
{
return new H265FramedSource(env, fps, input_file, result_file);
}
H265FramedSource::H265FramedSource(UsageEnvironment& env, unsigned fps,
std::string input_file, std::string result_file):
FramedSource(env),
fps_(fps),
input_file_(input_file),
result_file_(result_file)
{
period = (uint64_t)((1000 / (float)fps) * 1000);
if (!eventTriggerId)
eventTriggerId = envir().taskScheduler().createEventTrigger(deliverFrame0);
}
H265FramedSource::~H265FramedSource()
{
if (!--referenceCount) {
envir().taskScheduler().deleteEventTrigger(eventTriggerId);
eventTriggerId = 0;
}
}
void H265FramedSource::doGetNextFrame()
{
if (!initialized) {
s_tmr = std::chrono::high_resolution_clock::now();
initialized = true;
}
deliverFrame();
}
void H265FramedSource::deliverFrame0(void *clientData)
{
((H265FramedSource *)clientData)->deliverFrame();
}
void H265FramedSource::deliverFrame()
{
if (!isCurrentlyAwaitingData())
return;
delivery_mtx.lock();
auto nal = find_next_nal(input_file_);
if (!nal.first || !nal.second) {
uint64_t diff = (uint64_t)std::chrono::duration_cast<std::chrono::milliseconds>(e_tmr - s_tmr).count();
write_send_results_to_file(result_file_, bytes, diff);
exit(EXIT_SUCCESS);
}
uint64_t runtime = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::high_resolution_clock::now() - s_tmr
).count();
if (runtime < current * period)
std::this_thread::sleep_for(std::chrono::microseconds(current * period - runtime));
/* try to hold fps for intra/inter frames only */
if (nal.first > 1500)
++current;
uint8_t *newFrameDataStart = nal.second;
unsigned newFrameSize = nal.first;
bytes += newFrameSize;
if (newFrameSize > fMaxSize) {
fFrameSize = fMaxSize;
fNumTruncatedBytes = newFrameSize - fMaxSize;
} else {
fFrameSize = newFrameSize;
}
fDurationInMicroseconds = 0;
memmove(fTo, newFrameDataStart, fFrameSize);
delivery_mtx.unlock();
FramedSource::afterGetting(this);
e_tmr = std::chrono::high_resolution_clock::now();
}

View File

@ -1,39 +0,0 @@
#pragma once
#include <FramedSource.hh>
#include <string>
class H265FramedSource: public FramedSource {
public:
static H265FramedSource *createNew(UsageEnvironment& env, unsigned fps,
std::string input_file, std::string result_file);
public:
static EventTriggerId eventTriggerId;
// Note that this is defined here to be a static class variable, because this code is intended to illustrate how to
// encapsulate a *single* device - not a set of devices.
// You can, however, redefine this to be a non-static member variable.
void deliver_frame();
protected:
H265FramedSource(UsageEnvironment& env, unsigned fps, std::string input_file, std::string result_file);
// called only by createNew(), or by subclass constructors
virtual ~H265FramedSource();
private:
// redefined virtual functions:
virtual void doGetNextFrame();
//virtual void doStopGettingFrames(); // optional
private:
static void deliverFrame0(void* clientData);
void deliverFrame();
private:
static unsigned referenceCount; // used to count how many instances of this class currently exist
unsigned fps_;
std::string input_file_;
std::string result_file_;
};

View File

@ -1,199 +0,0 @@
#include <chrono>
#include <mutex>
#include <thread>
#include "live555_util.hh"
#define MAX_WRITE_SIZE 1444
extern int get_next_frame_start(uint8_t *data, uint32_t offset, uint32_t data_len, uint8_t& start_len);
FramedSourceCustom::FramedSourceCustom(UsageEnvironment *env)
:FramedSource(*env)
{
len_ = 0;
off_ = 0;
chunk_ptr_ = 0;
afterEvent_ = envir().taskScheduler().createEventTrigger((TaskFunc*)FramedSource::afterGetting);
}
FramedSourceCustom::~FramedSourceCustom()
{
}
void FramedSourceCustom::doGetNextFrame()
{
if (isCurrentlyAwaitingData())
sendFrame();
}
void FramedSourceCustom::doStopGettingFrames()
{
noMoreTasks_ = true;
}
void FramedSourceCustom::splitIntoNals()
{
uint8_t start_len;
int32_t prev_offset = 0;
int offset = get_next_frame_start((uint8_t *)c_chunk_, 0, c_chunk_len_, start_len);
prev_offset = offset;
while (offset != -1) {
offset = get_next_frame_start((uint8_t *)c_chunk_, offset, c_chunk_len_, start_len);
if (offset > 4 && offset != -1) {
nals_.push(std::make_pair(offset - prev_offset - start_len, &c_chunk_[prev_offset]));
prev_offset = offset;
}
}
if (prev_offset == -1)
prev_offset = 0;
nals_.push(std::make_pair(c_chunk_len_ - prev_offset, &c_chunk_[prev_offset]));
}
void FramedSourceCustom::sendFrame()
{
/* initialization is not ready but scheduler
* is already asking for frames, send empty frame */
if (len_ == 0 && off_ == 0) {
fFrameSize = 0;
envir().taskScheduler().triggerEvent(afterEvent_, this);
return;
}
if (c_chunk_ == nullptr) {
fpt_start_ = std::chrono::high_resolution_clock::now();
if (chunks_.empty()) {
printStats();
}
/* TODO: framer */
auto cinfo = chunks_.front();
chunks_.pop();
c_chunk_ = cinfo.second;
c_chunk_len_ = cinfo.first;
c_chunk_off_ = 0;
splitIntoNals();
}
if (c_nal_ == nullptr) {
auto ninfo = nals_.front();
nals_.pop();
c_nal_ = ninfo.second;
c_nal_len_ = ninfo.first;
c_nal_off_ = 0;
}
void *send_ptr = nullptr;
size_t send_len = 0;
size_t send_off = 0;
if (c_nal_len_ < MAX_WRITE_SIZE) {
send_len = c_nal_len_;
send_ptr = c_nal_;
send_off = 0;
} else {
int left = c_nal_len_ - c_nal_off_;
if (left < MAX_WRITE_SIZE)
send_len = left;
else
send_len = MAX_WRITE_SIZE;
send_ptr = c_nal_;
send_off = c_nal_off_;
}
memcpy(fTo, (uint8_t *)send_ptr + send_off, send_len);
fFrameSize = send_len;
afterGetting(this);
/* check if we need to change chunk or nal unit */
bool nal_written_fully = (c_nal_len_ <= c_nal_off_ + send_len);
if (nal_written_fully && nals_.empty()) {
c_chunk_ = nullptr;
n_calls_++;
fpt_end_ = std::chrono::high_resolution_clock::now();
diff_total_ += std::chrono::duration_cast<std::chrono::microseconds>(fpt_end_ - fpt_start_).count();
} else {
if (!nal_written_fully) {
c_nal_off_ += send_len;
} else {
c_nal_ = nullptr;
}
}
}
void FramedSourceCustom::printStats()
{
*stop_ = 1;
end_ = std::chrono::high_resolution_clock::now();
uint64_t diff = (uint64_t)std::chrono::duration_cast<std::chrono::milliseconds>(end_ - start_).count();
fprintf(stderr, "%lu bytes, %lu kB, %lu MB took %lu ms %lu s\n",
total_size_, total_size_ / 1000, total_size_ / 1000 / 1000,
diff, diff / 1000
);
fprintf(stderr, "n calls %u\n", n_calls_);
fprintf(stderr, "avg processing time of frame: %lu\n", diff_total_ / n_calls_);
}
void FramedSourceCustom::startFramedSource(void *mem, size_t len, char *stop_rtp)
{
mem_ = mem;
len_ = len;
off_ = 0;
n_calls_ = 0;
diff_total_ = 0;
stop_ = stop_rtp;
c_chunk_ = nullptr;
c_chunk_len_ = 0;
c_chunk_off_ = 0;
total_size_ = 0;
uint64_t chunk_size = 0;
for (size_t i = 0, k = 0; i < len && k < 3000; k++) {
memcpy(&chunk_size, (uint8_t *)mem + i, sizeof(uint64_t));
i += sizeof(uint64_t);
chunks_.push(std::make_pair(chunk_size, (uint8_t *)mem_ + i));
i += chunk_size;
total_size_ += chunk_size;
}
start_ = std::chrono::high_resolution_clock::now();
}
void createConnection(
UsageEnvironment *env,
Connection& connection,
std::string sess_addr,
std::string ip_addr,
uint16_t portNum
)
{
sockaddr_in addr, sess;
inet_pton(AF_INET, ip_addr.c_str(), &addr.sin_addr);
inet_pton(AF_INET, sess_addr.c_str(), &sess.sin_addr);
connection.rtpPort = new Port(0);
connection.rtpGroupsock = new Groupsock(*env, sess.sin_addr, addr.sin_addr, *connection.rtpPort);
connection.rtpGroupsock->changeDestinationParameters(addr.sin_addr, portNum, 255);
}

View File

@ -1,78 +0,0 @@
#pragma once
#include <liveMedia.hh>
#include <FramedSource.hh>
#include <UsageEnvironment.hh>
#include <GroupsockHelper.hh>
#include <BasicUsageEnvironment.hh>
#include <Groupsock.hh>
#include <GroupsockHelper.hh>
#include <cstring>
#include <string>
#include <netinet/ip.h>
#include <arpa/inet.h>
#include <queue>
#include <chrono>
#include <mutex>
class FramedSourceCustom : public FramedSource
{
public:
FramedSourceCustom(UsageEnvironment *env);
~FramedSourceCustom();
void startFramedSource(void *mem, size_t len, char *stop_rtp);
virtual void doGetNextFrame();
protected:
virtual void doStopGettingFrames();
private:
void sendFrame();
void printStats();
void splitIntoNals();
EventTriggerId afterEvent_;
bool separateInput_;
bool ending_;
bool removeStartCodes_;
char *stop_;
int n_calls_;
uint64_t diff_total_;
uint64_t total_size_;
void *mem_;
int len_;
int off_;
uint8_t *c_nal_;
size_t c_nal_len_;
size_t c_nal_off_;
std::queue<std::pair<size_t, uint8_t *>> nals_;
uint8_t *c_chunk_;
size_t c_chunk_len_;
size_t c_chunk_off_;
int chunk_ptr_;
std::queue<std::pair<size_t, uint8_t *>> chunks_;
std::mutex mutex_;
TaskToken currentTask_;
std::chrono::high_resolution_clock::time_point start_, end_, fpt_end_, fpt_start_;
bool noMoreTasks_;
};
struct Connection {
Port *rtpPort;
Port *rtcpPort;
Groupsock *rtpGroupsock;
Groupsock *rtcpGroupsock;
};
void createConnection(UsageEnvironment *env, Connection& conn, std::string sess, std::string ip, uint16_t port);

View File

@ -1,258 +0,0 @@
#include <kvazaar.h>
#include "util.hh"
#include <iostream>
#include <string>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <stdlib.h>
int kvazaar_encode(const std::string& input, const std::string& output, const std::string& memory_filename,
int width, int height, int qp, int fps, int period, std::string& preset);
bool encode_frame(kvz_picture* input, int& rvalue, std::ofstream& outputFile, std::ofstream& memoryFile,
const kvz_api* api, kvz_encoder* enc);
void cleanup_kvazaar(kvz_picture* input, const kvz_api* api, kvz_encoder* enc, kvz_config* config);
int main(int argc, char** argv)
{
if (argc != 8) {
fprintf(stderr, "usage: ./%s <filename> <width> <height> <qp> <fps> <period> <preset>\n", __FILE__);
return EXIT_FAILURE;
}
std::string input_file = argv[1];
std::string output_file = argv[1];
// remove any possible file extensions and add hevc
size_t lastindex = input_file.find_last_of(".");
if (lastindex != std::string::npos)
{
output_file = input_file.substr(0, lastindex);
}
// add hevc file ending
output_file = output_file + ".hevc";
if (input_file == output_file)
{
output_file = "out_" + output_file;
}
std::string mem_file = get_chunk_filename(output_file);
int width = atoi(argv[2]);
int height = atoi(argv[3]);
int qp = atoi(argv[4]);
int fps = atoi(argv[5]);
int intra_period = atoi(argv[6]);
std::string preset = argv[7];
if (!width || !height || !qp || !fps || !intra_period || preset == "")
{
std::cerr << "Invalid command line arguments" << std::endl;
return EXIT_FAILURE;
}
return kvazaar_encode(input_file, output_file, mem_file, width, height, qp, fps, intra_period, preset);
}
int kvazaar_encode(const std::string& input, const std::string& output, const std::string& memory_filename,
int width, int height, int qp, int fps, int period, std::string& preset)
{
std::cout << "Opening files. Input: " << input << " Output: " << output << std::endl;
std::cout << "Parameters. Res: " << width << "x" << height << " fps: " << fps << " qp: " << qp << std::endl;
std::ifstream inputFile (input, std::ios::in | std::ios::binary);
std::ofstream outputFile(output, std::ios::out | std::ios::binary);
std::ofstream memoryFile(memory_filename, std::ios::out | std::ios::binary);
if (!inputFile.good())
{
if (inputFile.eof())
{
std::cerr << "Input eof before starting" << std::endl;
}
else if (inputFile.bad())
{
std::cerr << "Input bad before starting" << std::endl;
}
else if (inputFile.fail())
{
std::cerr << "Input fail before starting" << std::endl;
}
return EXIT_FAILURE;
}
if (!outputFile.good() || !memoryFile.good())
{
if (outputFile.eof() || memoryFile.eof())
{
std::cerr << "Output eof before starting" << std::endl;
}
else if (outputFile.bad() || memoryFile.bad())
{
std::cerr << "Output bad before starting" << std::endl;
}
else if (outputFile.fail() || memoryFile.fail())
{
std::cerr << "Output fail before starting" << std::endl;
}
return EXIT_FAILURE;
}
kvz_encoder* enc = NULL;
const kvz_api* api = kvz_api_get(8);
kvz_config* config = api->config_alloc();
api->config_init(config);
api->config_parse(config, "preset", preset.c_str());
config->width = width;
config->height = height;
config->hash = kvz_hash::KVZ_HASH_NONE;
config->intra_period = period;
config->qp = qp;
config->framerate_num = fps;
config->framerate_denom = 1;
enc = api->encoder_open(config);
kvz_picture* img_in = api->picture_alloc(width, height);
if (!enc || !img_in) {
std::cerr << "Failed to open kvazaar encoder!" << std::endl;
inputFile.close();
outputFile.close();
memoryFile.close();
cleanup_kvazaar(img_in, api, enc, config);
return EXIT_FAILURE;
}
int frame_count = 1;
bool input_has_been_read = false;
while (!input_has_been_read) {
if (!inputFile.read((char*)img_in->y, width * height) ||
!inputFile.read((char*)img_in->u, width * height/4) ||
!inputFile.read((char*)img_in->v, width * height/4)) {
if (inputFile.eof())
{
std::cout << "End of input file reached" << std::endl;
}
else if (inputFile.bad())
{
std::cerr << "Input bad" << std::endl;
}
else if (inputFile.fail())
{
std::cerr << "Input fails" << std::endl;
}
else
{
std::cerr << "Unknown Input error" << std::endl;
}
input_has_been_read = true;
inputFile.close();
continue;
}
std::cout << "Start encoding frame " << frame_count << std::endl;
++frame_count;
// feed input to kvazaar and write output to file
int rvalue = EXIT_FAILURE;
if (!encode_frame(img_in, rvalue, outputFile, memoryFile, api, enc))
{
// if encoding fails
outputFile.close();
memoryFile.close();
cleanup_kvazaar(img_in, api, enc, config);
return rvalue;
}
}
// write the rest of the frames that are being encoded to file
int rvalue = EXIT_FAILURE;
while (encode_frame(nullptr, rvalue, outputFile, memoryFile, api, enc));
memoryFile.close();
outputFile.close();
cleanup_kvazaar(img_in, api, enc, config);
return rvalue;
}
void cleanup_kvazaar(kvz_picture* input, const kvz_api* api, kvz_encoder* enc, kvz_config* config)
{
if (api)
{
if (enc)
{
api->encoder_close(enc);
}
if (config)
{
api->config_destroy(config);
}
if (input)
{
api->picture_free(input);
}
}
}
bool encode_frame(kvz_picture* input, int& rvalue, std::ofstream& outputFile, std::ofstream& memoryFile,
const kvz_api* api, kvz_encoder* enc)
{
kvz_picture* img_rec = nullptr;
kvz_picture* img_src = nullptr;
uint32_t len_out = 0;
kvz_frame_info info_out;
kvz_data_chunk* chunks_out = nullptr;
if (!api->encoder_encode(enc, input, &chunks_out, &len_out, &img_rec, &img_src, &info_out))
{
fprintf(stderr, "Failed to encode image.\n");
for (uint32_t i = 0; i < 16; i++) {
api->picture_free(input);
}
rvalue = EXIT_FAILURE;
return false;
}
if (chunks_out == nullptr && input == nullptr) {
// We are done since there is no more input or output left.
rvalue = EXIT_SUCCESS;
return false;
}
if (chunks_out != NULL) {
uint64_t written = 0;
// Calculate the total size of the chunks
for (kvz_data_chunk* chunk = chunks_out; chunk != nullptr; chunk = chunk->next) {
written += chunk->len;
}
std::cout << "Write the size of the chunk: " << written << std::endl;
memoryFile.write((char*)(&written), sizeof(uint64_t));
// write the chunks into the file
for (kvz_data_chunk* chunk = chunks_out; chunk != nullptr; chunk = chunk->next) {
outputFile.write((char*)(chunk->data), chunk->len);
}
}
return true;
}

BIN
uvgrtp/latency_sender Executable file

Binary file not shown.

BIN
uvgrtp/receiver Executable file

Binary file not shown.

BIN
uvgrtp/sender Executable file

Binary file not shown.