713 lines
26 KiB
C++
713 lines
26 KiB
C++
#include <iostream>
|
|
#include <unistd.h>
|
|
#include <cmath>
|
|
#include "timing.h"
|
|
#include "log/logger.h"
|
|
#include "common.h"
|
|
|
|
#include <modules/audio_processing/include/audio_processing.h>
|
|
#include <modules/audio_processing/include/config.h>
|
|
#include "alsa_dev.h"
|
|
#include "rnnoise_plugin.h"
|
|
|
|
using namespace toolkit;
|
|
|
|
#define MIX_AUDIO_CHANNELS 1
|
|
#define MIX_AUDIO_RATE 32000
|
|
#define MIX_OUTPUT_RATE 48000
|
|
#define MIX_AUDIO_SAMPLES (10 * MIX_AUDIO_RATE / 1000)
|
|
|
|
|
|
struct audio_buf_t
|
|
{
|
|
uint8_t* data;
|
|
int index;
|
|
int size;
|
|
};
|
|
|
|
struct RtmpConfig {
|
|
char url[1024];
|
|
AVFormatContext *formatCtx;
|
|
AVStream *stream;
|
|
AVCodecContext *codecCtx;
|
|
SwrContext *swrCtx;
|
|
};
|
|
|
|
struct CallContext {
|
|
RtmpConfig rtmp;
|
|
std::thread *rtmp_thread;
|
|
std::thread *alsa_thread;
|
|
std::mutex *mutex;
|
|
std::vector<audio_buf_t> *list;
|
|
webrtc::AudioProcessing *apm;
|
|
webrtc::AudioProcessing *apm2;
|
|
webrtc::StreamConfig *rtc_stream_config;
|
|
alsa::AlsaDev *alsa;
|
|
alsa::Config alsa_config;
|
|
// rnnoise
|
|
RnNoiseCommonPlugin *rnnoise;
|
|
bool rnnoise_enable;
|
|
float vadThreshold;// (0, 1)
|
|
uint32_t vadGracePeriodBlocks;// (0, 20)
|
|
uint32_t retroactiveVADGraceBlocks;// 0
|
|
//
|
|
bool running;
|
|
};
|
|
|
|
webrtc::AudioProcessing::Config webtcConfigInit()
|
|
{
|
|
webrtc::AudioProcessing::Config apmConfig;
|
|
apmConfig.pipeline.maximum_internal_processing_rate = MIX_OUTPUT_RATE;
|
|
apmConfig.pipeline.multi_channel_capture = MIX_AUDIO_CHANNELS > 1 ? true : false;
|
|
apmConfig.pipeline.multi_channel_render = MIX_AUDIO_CHANNELS > 1 ? true : false;
|
|
//PreAmplifier
|
|
apmConfig.pre_amplifier.enabled = false;
|
|
apmConfig.pre_amplifier.fixed_gain_factor = 0.7f;
|
|
//HighPassFilter
|
|
apmConfig.high_pass_filter.enabled = true;
|
|
apmConfig.high_pass_filter.apply_in_full_band = false;
|
|
//EchoCanceller
|
|
apmConfig.echo_canceller.enabled = false;
|
|
apmConfig.echo_canceller.mobile_mode = false;
|
|
apmConfig.echo_canceller.export_linear_aec_output = false;
|
|
apmConfig.echo_canceller.enforce_high_pass_filtering = false;
|
|
//NoiseSuppression
|
|
apmConfig.noise_suppression.enabled = false;
|
|
apmConfig.noise_suppression.level = webrtc::AudioProcessing::Config::NoiseSuppression::kVeryHigh;
|
|
apmConfig.noise_suppression.analyze_linear_aec_output_when_available = false;
|
|
//TransientSuppression
|
|
apmConfig.transient_suppression.enabled = false;
|
|
//VoiceDetection
|
|
apmConfig.voice_detection.enabled = true;
|
|
//GainController1
|
|
apmConfig.gain_controller1.enabled = false;
|
|
// kAdaptiveAnalog 自适应模拟模式
|
|
// kAdaptiveDigital 自适应数字增益模式
|
|
// kFixedDigital 固定数字增益模式
|
|
apmConfig.gain_controller1.mode = webrtc::AudioProcessing::Config::GainController1::kFixedDigital;
|
|
apmConfig.gain_controller1.target_level_dbfs = 6; // 目标音量
|
|
apmConfig.gain_controller1.compression_gain_db = 60; // 增益能力
|
|
apmConfig.gain_controller1.enable_limiter = true; // 压限器开关
|
|
apmConfig.gain_controller1.analog_level_minimum = 0;
|
|
apmConfig.gain_controller1.analog_level_maximum = 255;
|
|
apmConfig.gain_controller1.analog_gain_controller.enabled = true;
|
|
// apmConfig.gain_controller1.analog_gain_controller.startup_min_volume = webrtc::kAgcStartupMinVolume;
|
|
apmConfig.gain_controller1.analog_gain_controller.startup_min_volume = 0;
|
|
apmConfig.gain_controller1.analog_gain_controller.clipped_level_min = 0;
|
|
apmConfig.gain_controller1.analog_gain_controller.enable_agc2_level_estimator = false;
|
|
apmConfig.gain_controller1.analog_gain_controller.enable_digital_adaptive = true;
|
|
//GainController2
|
|
apmConfig.gain_controller2.enabled = true;
|
|
apmConfig.gain_controller2.fixed_digital.gain_db = 20.2f;
|
|
apmConfig.gain_controller2.adaptive_digital.enabled = true;
|
|
apmConfig.gain_controller2.adaptive_digital.vad_probability_attack = 1.f;
|
|
apmConfig.gain_controller2.adaptive_digital.level_estimator = webrtc::AudioProcessing::Config::GainController2::kRms;
|
|
apmConfig.gain_controller2.adaptive_digital.level_estimator_adjacent_speech_frames_threshold = 1;
|
|
apmConfig.gain_controller2.adaptive_digital.use_saturation_protector = true;
|
|
apmConfig.gain_controller2.adaptive_digital.initial_saturation_margin_db = 20.f;
|
|
apmConfig.gain_controller2.adaptive_digital.extra_saturation_margin_db = 2.f;
|
|
apmConfig.gain_controller2.adaptive_digital.gain_applier_adjacent_speech_frames_threshold = 1;
|
|
apmConfig.gain_controller2.adaptive_digital.max_gain_change_db_per_second = 3.f;
|
|
apmConfig.gain_controller2.adaptive_digital.max_output_noise_level_dbfs = -50.f;
|
|
//ResidualEchoDetector
|
|
apmConfig.residual_echo_detector.enabled = false;
|
|
//LevelEstimation
|
|
apmConfig.level_estimation.enabled = false;
|
|
|
|
return apmConfig;
|
|
}
|
|
|
|
static int64_t t_analyze = 0;
|
|
static int64_t t_render = 0;
|
|
static int64_t t_capture = 0;
|
|
static int64_t t_process = 0;
|
|
|
|
|
|
int pushInit(RtmpConfig *config);
|
|
void pushDestory(RtmpConfig *config);
|
|
void push_thread(CallContext *ctx);
|
|
void capture_thread(CallContext *ctx);
|
|
|
|
int main()
|
|
{
|
|
std::string push_url = "rtmp://192.168.15.248:1935/live/1";
|
|
|
|
//初始化日志系统
|
|
Logger::Instance().add(std::make_shared<ConsoleChannel> ());
|
|
Logger::Instance().add(std::make_shared<FileChannel>());
|
|
Logger::Instance().setWriter(std::make_shared<AsyncLogWriter>());
|
|
|
|
// webrtc初始化
|
|
webrtc::AudioProcessing *apm = webrtc::AudioProcessingBuilder().Create();
|
|
if (!apm) {
|
|
PrintI("create apm failed.\n");
|
|
return -1;
|
|
}
|
|
// apm 增益
|
|
webrtc::AudioProcessing::Config config = webtcConfigInit();
|
|
apm->ApplyConfig(config);
|
|
apm->Initialize();
|
|
|
|
// apm2 降噪
|
|
config.gain_controller1.enabled = false;
|
|
webrtc::AudioProcessing *apm2 = webrtc::AudioProcessingBuilder().Create();
|
|
apm2->ApplyConfig(config);
|
|
apm2->Initialize();
|
|
|
|
webrtc::StreamConfig streamConfig;
|
|
streamConfig.set_has_keyboard(false);
|
|
streamConfig.set_num_channels(MIX_AUDIO_CHANNELS);
|
|
streamConfig.set_sample_rate_hz(MIX_OUTPUT_RATE);
|
|
PrintI("webrtc params: {\n%s\n}\n", config.ToString().c_str());
|
|
|
|
// rnnoise
|
|
float vad_threshold = 0.85;
|
|
|
|
// alsa设备参数
|
|
alsa::Config alsaConfig;
|
|
sprintf(alsaConfig.device, "default");
|
|
alsaConfig.period_time = MIX_AUDIO_SAMPLES * 1000000 / MIX_AUDIO_RATE;
|
|
alsaConfig.buffer_time = 5 * alsaConfig.period_time;
|
|
alsaConfig.channels = MIX_AUDIO_CHANNELS;
|
|
alsaConfig.format = SND_PCM_FORMAT_S16_LE;
|
|
alsaConfig.rate = MIX_AUDIO_RATE;
|
|
|
|
// 上下文
|
|
CallContext pushCtx;
|
|
memset(&pushCtx, 0, sizeof(pushCtx));
|
|
strcpy(pushCtx.rtmp.url, push_url.data());
|
|
pushCtx.mutex = new std::mutex;
|
|
pushCtx.list = new std::vector<audio_buf_t>();
|
|
pushCtx.apm = apm;
|
|
pushCtx.apm2 = apm2;
|
|
pushCtx.alsa_config = alsaConfig;
|
|
pushCtx.rtc_stream_config = &streamConfig;
|
|
pushCtx.vadThreshold = vad_threshold;
|
|
pushCtx.vadGracePeriodBlocks = 0;
|
|
pushCtx.retroactiveVADGraceBlocks = 0;
|
|
|
|
char c;
|
|
bool quit = false;
|
|
/*
|
|
while ((c = getchar()) != EOF && !quit)
|
|
{
|
|
switch (c)
|
|
{
|
|
case 'q': {
|
|
InfoL << "app quit";
|
|
quit = true;
|
|
pushCtx.running = false;
|
|
if (pushCtx.rtmp_thread && pushCtx.rtmp_thread->joinable())
|
|
pushCtx.rtmp_thread->join();
|
|
if (pushCtx.alsa_thread && pushCtx.alsa_thread->joinable())
|
|
pushCtx.alsa_thread->join();
|
|
break;
|
|
}
|
|
case 's': {
|
|
InfoL << "start push: " << pushCtx.rtmp.url;
|
|
pushCtx.running = true;
|
|
pushCtx.alsa_thread = new std::thread(capture_thread, &pushCtx);
|
|
pushCtx.rtmp_thread = new std::thread(push_thread, &pushCtx);
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
*/
|
|
std::string input_str;
|
|
|
|
while (getline(std::cin, input_str)) {
|
|
if (input_str == std::string("quit") ||
|
|
input_str == std::string("q")) {
|
|
InfoL << "app quit";
|
|
quit = true;
|
|
pushCtx.running = false;
|
|
if (pushCtx.rtmp_thread && pushCtx.rtmp_thread->joinable())
|
|
pushCtx.rtmp_thread->join();
|
|
if (pushCtx.alsa_thread && pushCtx.alsa_thread->joinable())
|
|
pushCtx.alsa_thread->join();
|
|
break;
|
|
}
|
|
else if (input_str == std::string("s")) {
|
|
InfoL << "start push: " << pushCtx.rtmp.url;
|
|
pushCtx.running = true;
|
|
pushCtx.alsa_thread = new std::thread(capture_thread, &pushCtx);
|
|
pushCtx.rtmp_thread = new std::thread(push_thread, &pushCtx);
|
|
}
|
|
else if (input_str.find("-e") == 0) {
|
|
std::string enable_str = input_str.substr(2, input_str.size() - 2);
|
|
trim(enable_str);
|
|
pushCtx.rnnoise_enable = atoi(enable_str.c_str()) > 0 ? true : false;
|
|
InfoL << "rnnoise enable: " << pushCtx.rnnoise_enable;
|
|
}
|
|
else if (input_str.find("-t") == 0) {
|
|
std::string th_str = input_str.substr(2, input_str.size() - 2);
|
|
trim(th_str);
|
|
int threshold = atoi(th_str.c_str());
|
|
pushCtx.vadThreshold = std::max(std::min(threshold / 100.f, 0.99f), 0.f);
|
|
InfoL << "VAD Threshold(%): " << pushCtx.vadThreshold;
|
|
}
|
|
else if (input_str.find("-p") == 0) {
|
|
std::string period_str = input_str.substr(2, input_str.size() - 2);
|
|
trim(period_str);
|
|
int period = atoi(period_str.c_str());
|
|
pushCtx.vadGracePeriodBlocks = std::max(std::min(period, 20), 0);
|
|
InfoL << "VAD Grace Period (ms): " << pushCtx.vadGracePeriodBlocks;
|
|
}
|
|
}
|
|
InfoL << "push end";
|
|
if (apm) {
|
|
apm->Initialize();
|
|
delete apm;
|
|
}
|
|
for (auto buf: *pushCtx.list) {
|
|
free(buf.data);
|
|
}
|
|
pushCtx.list->clear();
|
|
delete pushCtx.list;
|
|
if (pushCtx.rtmp_thread) delete pushCtx.rtmp_thread;
|
|
if (pushCtx.alsa_thread) delete pushCtx.alsa_thread;
|
|
delete pushCtx.mutex;
|
|
return 0;
|
|
}
|
|
|
|
#include "rnnoise/rnnoise.h"
|
|
|
|
void capture_thread(CallContext *ctx)
|
|
{
|
|
// 声卡初始化
|
|
alsa::AlsaDev usbCaptureDev;
|
|
if (usbCaptureDev.applyConfig(ctx->alsa_config) < 0) {
|
|
PrintE("alsa config failed.\n");
|
|
return ;
|
|
}
|
|
if (usbCaptureDev.init(SND_PCM_STREAM_CAPTURE) < 0) {
|
|
PrintE("alsa init failed.\n");
|
|
return ;
|
|
}
|
|
PrintI("alsa init: %s\n", usbCaptureDev.configToString());
|
|
ctx->alsa = &usbCaptureDev;
|
|
|
|
uint8_t *capData = nullptr;
|
|
int buffer_size = usbCaptureDev.getFrames() * usbCaptureDev.getFrameSize();
|
|
capData = (uint8_t *)malloc(buffer_size);
|
|
assert(capData);
|
|
|
|
// rnnoise
|
|
ctx->rnnoise = new RnNoiseCommonPlugin(MIX_AUDIO_CHANNELS);
|
|
ctx->rnnoise->init();
|
|
std::vector<float *> in;
|
|
std::vector<float *> out;
|
|
for (int ch = 0; ch < MIX_AUDIO_CHANNELS; ++ch) {
|
|
in.push_back(new float[10 * MIX_OUTPUT_RATE / 1000]);
|
|
out.push_back(new float[10 * MIX_OUTPUT_RATE / 1000]);
|
|
}
|
|
auto rnn = rnnoise_create(NULL);
|
|
|
|
// 重采样
|
|
AVFrame *inputFrame = av_frame_alloc();
|
|
{
|
|
inputFrame->sample_rate = MIX_AUDIO_RATE;
|
|
inputFrame->format = AV_SAMPLE_FMT_S16;
|
|
inputFrame->channels = MIX_AUDIO_CHANNELS;
|
|
inputFrame->nb_samples = MIX_AUDIO_SAMPLES;
|
|
inputFrame->channel_layout = av_get_default_channel_layout(MIX_AUDIO_CHANNELS);
|
|
|
|
int size = av_samples_get_buffer_size(nullptr,
|
|
inputFrame->channels, inputFrame->nb_samples, (AVSampleFormat)inputFrame->format, 1);
|
|
uint8_t *buffer = (uint8_t *)av_malloc(size);
|
|
avcodec_fill_audio_frame(inputFrame, inputFrame->channels, (AVSampleFormat)inputFrame->format,
|
|
(const uint8_t*)buffer, size, 1);
|
|
InfoL << "input frame samples: " << inputFrame->nb_samples
|
|
<< ", buffer_size: " << size;
|
|
}
|
|
AVFrame *outputFrame = av_frame_alloc();
|
|
{
|
|
outputFrame->format = AV_SAMPLE_FMT_S16;
|
|
outputFrame->channels = MIX_AUDIO_CHANNELS;
|
|
outputFrame->channel_layout = av_get_default_channel_layout(MIX_AUDIO_CHANNELS);
|
|
outputFrame->sample_rate = MIX_OUTPUT_RATE;
|
|
outputFrame->nb_samples = 10 * MIX_OUTPUT_RATE / 1000;
|
|
|
|
int output_bz = av_samples_get_buffer_size(NULL, outputFrame->channels, outputFrame->nb_samples, (AVSampleFormat)outputFrame->format, 0);
|
|
uint8_t *samples_data = (uint8_t *)av_malloc(output_bz);
|
|
avcodec_fill_audio_frame(outputFrame, outputFrame->channels, (AVSampleFormat)outputFrame->format, samples_data, output_bz, 0);
|
|
InfoL << "output frame samples: " << outputFrame->nb_samples
|
|
<< ", buffer_size: " << output_bz;
|
|
}
|
|
SwrContext *swrCtx = swr_alloc_set_opts(nullptr,
|
|
outputFrame->channel_layout,
|
|
(AVSampleFormat)outputFrame->format,
|
|
outputFrame->sample_rate,
|
|
inputFrame->channel_layout,
|
|
(AVSampleFormat)inputFrame->format,
|
|
inputFrame->sample_rate,
|
|
0, nullptr);
|
|
if (!swrCtx) {
|
|
PrintE("swr_alloc_set_opts failed.\n");
|
|
return ;
|
|
}
|
|
swr_init(swrCtx);
|
|
|
|
int output_size = av_samples_get_buffer_size(NULL, outputFrame->channels, outputFrame->nb_samples, (AVSampleFormat)outputFrame->format, 0);
|
|
uint8_t *output_buf = outputFrame->data[0];
|
|
|
|
|
|
FILE *input_fp = fopen("/root/rtmp_push_in.pcm", "wb");
|
|
FILE *output_fp = fopen("/root/rtmp_push_out.pcm", "wb");
|
|
while (ctx->running) {
|
|
// 采集
|
|
t_capture = gettimeofday();
|
|
size_t read_size = usbCaptureDev.read(capData, buffer_size);
|
|
// PrintI("alsa read %d\n", read_size);
|
|
if (read_size <= 0) {
|
|
msleep(1);
|
|
continue;
|
|
}
|
|
|
|
// 重采样
|
|
memcpy(inputFrame->data[0], capData, buffer_size);
|
|
// fwrite(inputFrame->data[0], 1, buffer_size, input_fp);
|
|
{
|
|
const uint8_t** in = (const uint8_t**)inputFrame->data;
|
|
uint8_t **out = outputFrame->data;
|
|
|
|
int len2, out_data_size;
|
|
|
|
len2 = swr_convert(swrCtx, out, outputFrame->nb_samples, in, inputFrame->nb_samples);
|
|
if (len2 < 0) {
|
|
printf("swr_convert failed. \n");
|
|
break;
|
|
}
|
|
int out_size = len2;
|
|
while (len2 > 0) {
|
|
len2 = swr_convert(swrCtx, out, outputFrame->nb_samples, nullptr, 0);
|
|
out_size += len2;
|
|
}
|
|
// InfoL << "swr convert output: " << out_size;
|
|
}
|
|
|
|
// 降噪
|
|
{
|
|
t_process = gettimeofday();
|
|
ctx->apm->ProcessStream((int16_t *)output_buf, *ctx->rtc_stream_config, *ctx->rtc_stream_config, (int16_t *)output_buf);
|
|
// ctx->apm2->ProcessStream((int16_t *)capData, *ctx->rtc_stream_config, *ctx->rtc_stream_config, (int16_t *)capData);
|
|
|
|
}
|
|
|
|
// rnnoise
|
|
if (ctx->rnnoise_enable) {
|
|
int16_t *ptr = (int16_t *)output_buf;
|
|
int nb_samples = outputFrame->nb_samples;
|
|
for(int i = 0; i < nb_samples; ++i) {
|
|
for (int ch = 0; ch < MIX_AUDIO_CHANNELS; ++ch) {
|
|
in[ch][i] = ptr[i * MIX_AUDIO_CHANNELS + ch];
|
|
}
|
|
}
|
|
const float *input[] = {in[0]};
|
|
float *output[] = {out[0]};
|
|
|
|
for (int i = 0; i < nb_samples; ++i)
|
|
for (int ch = 0; ch < MIX_AUDIO_CHANNELS; ++ch)
|
|
output[ch][i] = input[ch][i];
|
|
|
|
ctx->rnnoise->process(input, output, nb_samples,
|
|
ctx->vadThreshold, ctx->vadGracePeriodBlocks, ctx->retroactiveVADGraceBlocks);
|
|
// for (int ch = 0; ch < MIX_AUDIO_CHANNELS; ++ch)
|
|
// rnnoise_process_frame(rnn, output[ch], input[ch]);
|
|
|
|
|
|
// PrintI("rnnoise process: vadThreshold=%lf, vadGracePeriodBlocks=%d, retroactiveVADGraceBlocks=%d\n",
|
|
// ctx->vadThreshold, ctx->vadGracePeriodBlocks, ctx->retroactiveVADGraceBlocks);
|
|
for (int i = 0; i < nb_samples; ++i)
|
|
for (int ch = 0; ch < MIX_AUDIO_CHANNELS; ++ch)
|
|
ptr[i * MIX_AUDIO_CHANNELS + ch] = output[ch][i];
|
|
}
|
|
// fwrite(output_buf, 1, output_size, output_fp);
|
|
// 音频缓存到队列
|
|
{
|
|
// uint8_t *buffer = (uint8_t *)malloc(buffer_size);
|
|
// memcpy(buffer, capData, buffer_size);
|
|
uint8_t *buffer = (uint8_t *)malloc(output_size);
|
|
memcpy(buffer, output_buf, output_size);
|
|
std::unique_lock<std::mutex> lck(*ctx->mutex);
|
|
audio_buf_t out;
|
|
out.data = buffer;
|
|
out.index = 0;
|
|
out.size = output_size;
|
|
ctx->list->emplace_back(out);
|
|
}
|
|
}
|
|
InfoL << "capture thread end";
|
|
usbCaptureDev.destory();
|
|
if (capData) free(capData);
|
|
ctx->alsa = nullptr;
|
|
}
|
|
|
|
void push_thread(CallContext *ctx)
|
|
{
|
|
RtmpConfig rtmp;
|
|
if (pushInit(&ctx->rtmp) < 0) {
|
|
return ;
|
|
}
|
|
memcpy(&rtmp, &ctx->rtmp, sizeof(rtmp));
|
|
AVRational av;
|
|
int64_t pts = 0;
|
|
AVPacket *pkt = av_packet_alloc();
|
|
int ret;
|
|
av.den = rtmp.codecCtx->sample_rate;
|
|
av.num = 1;
|
|
|
|
AVFrame *inputFrame = av_frame_alloc();
|
|
{
|
|
inputFrame->sample_rate = MIX_OUTPUT_RATE;
|
|
inputFrame->format = AV_SAMPLE_FMT_S16;
|
|
inputFrame->channels = MIX_AUDIO_CHANNELS;
|
|
inputFrame->nb_samples = 1024 * MIX_OUTPUT_RATE / 44100;
|
|
inputFrame->channel_layout = av_get_default_channel_layout(MIX_AUDIO_CHANNELS);
|
|
|
|
int size = av_samples_get_buffer_size(nullptr, inputFrame->channels, inputFrame->nb_samples, (AVSampleFormat)inputFrame->format, 1);
|
|
uint8_t *buffer = (uint8_t *)av_malloc(size);
|
|
avcodec_fill_audio_frame(inputFrame, inputFrame->channels, (AVSampleFormat)inputFrame->format,
|
|
(const uint8_t*)buffer, size, 1);
|
|
}
|
|
AVFrame *outputFrame = av_frame_alloc();
|
|
{
|
|
outputFrame->format = rtmp.codecCtx->sample_fmt;
|
|
outputFrame->channel_layout = rtmp.codecCtx->channel_layout;
|
|
outputFrame->sample_rate = rtmp.codecCtx->sample_rate;
|
|
outputFrame->nb_samples = rtmp.codecCtx->frame_size;
|
|
outputFrame->channels = rtmp.codecCtx->channels;
|
|
|
|
int output_bz = av_samples_get_buffer_size(NULL, outputFrame->channels, outputFrame->nb_samples, (AVSampleFormat)outputFrame->format, 0);
|
|
uint8_t *samples_data = (uint8_t *)av_malloc(output_bz);
|
|
avcodec_fill_audio_frame(outputFrame, outputFrame->channels, (AVSampleFormat)outputFrame->format, samples_data, output_bz, 0);
|
|
}
|
|
// 写入帧头
|
|
ret = avformat_write_header(rtmp.formatCtx, nullptr);
|
|
if (ret < 0) {
|
|
PrintE("avformat_write_header failed.\n");
|
|
return ;
|
|
}
|
|
int frames = 0;
|
|
while (ctx->running) {
|
|
if (frames <= 0) frames = inputFrame->nb_samples;
|
|
while (frames > 0 && ctx->list->size() > 0)
|
|
{
|
|
std::unique_lock<std::mutex> lck(*ctx->mutex);
|
|
auto nsData = ctx->list->begin();
|
|
|
|
|
|
int needSize = frames * sizeof(int16_t) * inputFrame->channels;
|
|
int readSize = (nsData->size - nsData->index) >= needSize ? needSize : (nsData->size - nsData->index);
|
|
|
|
memcpy(inputFrame->data[0] + (inputFrame->nb_samples - frames)*sizeof(int16_t)*inputFrame->channels, nsData->data + nsData->index, readSize);
|
|
|
|
frames -= readSize/(sizeof(int16_t) * inputFrame->channels);
|
|
nsData->index += readSize;
|
|
|
|
if (nsData->index >= nsData->size) {
|
|
free(nsData->data);
|
|
ctx->list->erase(ctx->list->begin());
|
|
}
|
|
}
|
|
if (frames > 0) continue;
|
|
// 重采样
|
|
{
|
|
const uint8_t** in = (const uint8_t**)inputFrame->data;
|
|
uint8_t **out = outputFrame->data;
|
|
|
|
int len2, out_data_size;
|
|
|
|
len2 = swr_convert(rtmp.swrCtx, out, outputFrame->nb_samples, in, inputFrame->nb_samples);
|
|
if (len2 < 0) {
|
|
printf("swr_convert failed. \n");
|
|
break;
|
|
}
|
|
|
|
// out_data_size = len2 * rtmp.codecCtx->channels * av_get_bytes_per_sample(rtmp.codecCtx->sample_fmt);
|
|
// if (ns_fp) fwrite(outputFrame->data[0], 1, out_data_size, ns_fp);
|
|
}
|
|
|
|
|
|
// 推流到远端
|
|
if (pts > INT64_MAX) pts = 0;
|
|
outputFrame->pts = pts;
|
|
pts += av_rescale_q(outputFrame->nb_samples, av, rtmp.codecCtx->time_base);
|
|
|
|
ret = avcodec_send_frame(rtmp.codecCtx, outputFrame);
|
|
if (ret < 0) {
|
|
PrintE("avcodec_send_frame failed: %d\n", ret);
|
|
break;
|
|
}
|
|
while (ret >= 0) {
|
|
ret = avcodec_receive_packet(rtmp.codecCtx, pkt);
|
|
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
|
break;
|
|
} else if (ret < 0) {
|
|
fprintf(stderr, "Error during encoding\n");
|
|
break;
|
|
}
|
|
|
|
// 将数据包时间戳从编码器时间基转换到流时间基
|
|
pkt->stream_index = rtmp.stream->index;
|
|
av_packet_rescale_ts(pkt, rtmp.codecCtx->time_base, rtmp.stream->time_base);
|
|
pkt->duration = av_rescale_q(pkt->duration, rtmp.codecCtx->time_base, rtmp.stream->time_base);
|
|
|
|
// 写入数据包到输出媒体文件
|
|
ret = av_interleaved_write_frame(rtmp.formatCtx, pkt);
|
|
if (ret < 0) {
|
|
fprintf(stderr, "Error while writing audio frame\n");
|
|
break;
|
|
}
|
|
|
|
// 释放数据包
|
|
av_packet_unref(pkt);
|
|
}
|
|
}
|
|
InfoL << "push thread end";
|
|
if (ctx->running)
|
|
ctx->running = false;
|
|
// 写入帧尾
|
|
av_write_trailer(rtmp.formatCtx);
|
|
// 释放线程资源
|
|
av_packet_free(&pkt);
|
|
av_frame_free(&inputFrame);
|
|
av_frame_free(&outputFrame);
|
|
pushDestory(&ctx->rtmp);
|
|
memset(&rtmp, 0, sizeof(rtmp));
|
|
}
|
|
|
|
int pushInit(RtmpConfig *config)
|
|
{
|
|
if (nullptr == strstr(config->url, "rtmp://")) {
|
|
PrintE("url error, url: %s\n", config->url);
|
|
return -1;
|
|
}
|
|
AVCodec *codec = nullptr;
|
|
AVCodecContext *codecCtx = nullptr;
|
|
AVFormatContext *afctx = nullptr;
|
|
AVCodecParameters *codecPar = nullptr;
|
|
SwrContext *swrCtx = nullptr;
|
|
AVStream *audio_st = nullptr;
|
|
AVDictionary *opts = nullptr;
|
|
int ret;
|
|
|
|
// 打开输出流
|
|
ret = avformat_alloc_output_context2(&afctx, nullptr, "flv", config->url);
|
|
if (ret < 0) {
|
|
PrintE("open output failed.\n");
|
|
goto fail;
|
|
}
|
|
if ( !(afctx->oformat->flags & AVFMT_NOFILE) ) {
|
|
ret = avio_open(&afctx->pb, config->url, AVIO_FLAG_WRITE);
|
|
if (ret < 0) {
|
|
PrintE("avio_open failed.\n");
|
|
goto fail;
|
|
}
|
|
}
|
|
|
|
// 创建音频流
|
|
audio_st = avformat_new_stream(afctx, codec);
|
|
if (!audio_st) {
|
|
PrintE("alloc new audio stream failed.\n");
|
|
goto fail;
|
|
}
|
|
// 设置编码参数
|
|
codecPar = afctx->streams[audio_st->index]->codecpar;
|
|
codecPar->codec_id = AV_CODEC_ID_AAC;
|
|
codecPar->codec_type = AVMEDIA_TYPE_AUDIO;
|
|
codecPar->codec_tag = 0;
|
|
codecPar->bit_rate = 128 * 1024;
|
|
codecPar->sample_rate = 44100;
|
|
codecPar->channel_layout = av_get_default_channel_layout(MIX_AUDIO_CHANNELS);
|
|
codecPar->channels = av_get_channel_layout_nb_channels(codecPar->channel_layout);
|
|
codecPar->format = AV_SAMPLE_FMT_FLTP;
|
|
|
|
// 编码器初始化
|
|
codec = avcodec_find_encoder(codecPar->codec_id);
|
|
if (!codec) {
|
|
PrintE("find codec aac failed.\n");
|
|
return -1;
|
|
}
|
|
|
|
codecCtx = avcodec_alloc_context3(codec);
|
|
if (!codecCtx) {
|
|
PrintE("alloc codec context failed.\n");
|
|
goto fail;
|
|
}
|
|
|
|
ret = avcodec_parameters_to_context(codecCtx, codecPar);
|
|
if (ret < 0) {
|
|
PrintE("copt codec params failed.\n");
|
|
goto fail;
|
|
}
|
|
// 禁用缓冲
|
|
av_dict_set(&opts, "fflags", "nobuffer", AV_DICT_MATCH_CASE);
|
|
// av_dict_set(&opts, "rtmp_live", "1", AV_DICT_MATCH_CASE);
|
|
// 打开编码器
|
|
ret = avcodec_open2(codecCtx, codec, &opts);
|
|
if (ret < 0) {
|
|
PrintE("open codec {} failed.\n", codec->id);
|
|
goto fail;
|
|
}
|
|
audio_st->codecpar->codec_tag = 0;
|
|
// 释放字典资源
|
|
av_dict_free(&opts);
|
|
|
|
// 打印输出流信息
|
|
av_dump_format(afctx, 0, config->url, 1);
|
|
|
|
// 重采样初始化
|
|
swrCtx = swr_alloc_set_opts(nullptr,
|
|
// output
|
|
codecCtx->channel_layout,
|
|
codecCtx->sample_fmt,
|
|
codecCtx->sample_rate,
|
|
// input
|
|
av_get_default_channel_layout(MIX_AUDIO_CHANNELS),
|
|
AV_SAMPLE_FMT_S16,
|
|
MIX_OUTPUT_RATE,
|
|
0, nullptr);
|
|
if (!swrCtx) {
|
|
PrintE("swr_alloc_set_opts failed.\n");
|
|
goto fail;
|
|
}
|
|
swr_init(swrCtx);
|
|
|
|
config->codecCtx = codecCtx;
|
|
config->formatCtx = afctx;
|
|
config->stream = audio_st;
|
|
config->swrCtx = swrCtx;
|
|
PrintI("rtmp push init ok.\n");
|
|
return 0;
|
|
fail:
|
|
if (afctx) {
|
|
if (afctx->pb)
|
|
avio_close(afctx->pb);
|
|
avformat_free_context(afctx);
|
|
}
|
|
if (codecCtx) {
|
|
avcodec_close(codecCtx);
|
|
avcodec_free_context(&codecCtx);
|
|
}
|
|
if (swrCtx) {
|
|
swr_close(swrCtx);
|
|
swr_free(&swrCtx);
|
|
}
|
|
return -1;
|
|
}
|
|
void pushDestory(RtmpConfig *config) {
|
|
if (config->formatCtx) {
|
|
if (config->formatCtx->pb)
|
|
avio_close(config->formatCtx->pb);
|
|
avformat_free_context(config->formatCtx);
|
|
}
|
|
if (config->codecCtx) {
|
|
avcodec_close(config->codecCtx);
|
|
avcodec_free_context(&config->codecCtx);
|
|
}
|
|
if (config->swrCtx) {
|
|
swr_close(config->swrCtx);
|
|
swr_free(&config->swrCtx);
|
|
}
|
|
} |