How to convet gstreamer command code to C code? - audio

The command is:
gst-launch-1.0 filesrc location=/home/pi/Videos/watch.mp4 ! qtdemux name=demux \
demux.audio_0 ! queue ! decodebin ! audioconvert ! audioresample ! autoaudiosink \
demux.video_0 ! queue ! decodebin ! videoconvert ! videoscale ! video/x-raw,width=800,height=480 ! avenc_bmp ! fakesink
It's a little difficult for me to link them together because both 'qtdemux' and 'decodebin' are used in this example.
I have tried with 'tee', but it's obviously slow.
Could anyone give me some help?Thank you.
#include <stdio.h>
#include <gstreamer-1.0/gst/gst.h>
#include <stdbool.h>
typedef struct {
GstElement *pipeline;
GstElement *filesrc;
GstElement *qtdemux;
/* Video */
struct{
GstElement *queue;
GstElement *decode;
GstElement *convert;
GstElement *scale;
GstElement *capsfilter;
GstElement *enc_bmp;
GstElement *fakesink;
}video;
/* Audio */
struct{
GstElement *queue;
GstElement *decode;
GstElement *convert;
GstElement *resample;
GstElement *sink;
}audio;
} gstreamer_t;
static void pad_added_handler (GstElement *src, GstPad *pad, gstreamer_t* data)
{
GstCaps *caps;
GstStructure *pad_sct;
const gchar *name;
caps = gst_pad_get_current_caps(pad);
pad_sct = gst_caps_get_structure( caps, 0 );
name = gst_structure_get_name(pad_sct);
printf( "src name = %s\r\n", gst_element_get_name(src) );
printf( "pad name = %s\r\n", name);
printf( "Received new pad '%s' from '%s'.\r\n", GST_PAD_NAME (pad), GST_ELEMENT_NAME (src));
GstPad *sinkpad = NULL;
if(g_str_has_prefix (name, "video/x-h264")) {
sinkpad = gst_element_get_static_pad(data->video.queue, "sink");
if(!gst_pad_link(pad, sinkpad) != GST_PAD_LINK_OK )
printf("not link !!\n\n");
gst_object_unref (sinkpad);
}else if(g_str_has_prefix( name, "audio/mpeg")){
sinkpad = gst_element_get_static_pad ( data->audio.queue, "sink");
if(gst_pad_link( pad, sinkpad) != GST_PAD_LINK_OK )
printf("not link !!\n\n");
gst_object_unref (sinkpad);
}
else
printf("Another Pad: %s.\r\n", name);
gst_caps_unref (caps);
}
int main(int argc, char *argv[]) {
gstreamer_t gstreamer;
GstBus *bus;
GstMessage *msg;
GstStateChangeReturn ret;
gboolean terminate = FALSE;
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Create the elements */
gstreamer.filesrc = gst_element_factory_make ("filesrc", "filesrc");
g_object_set (gstreamer.filesrc, "location", "../../Videos/watch.mp4", NULL);
gstreamer.qtdemux = gst_element_factory_make ("qtdemux", "qtdemux");
/* Video */
gstreamer.video.queue = gst_element_factory_make("queue", "video");
gstreamer.video.decode = gst_element_factory_make ("decodebin", "decodebin");
gstreamer.video.convert = gst_element_factory_make ("videoconvert", "videoconvert");
gstreamer.video.scale = gst_element_factory_make ("videoscale", "videoscale");
gstreamer.video.capsfilter = gst_element_factory_make ("capsfilter", "capsfilter");
GstCaps *Caps = gst_caps_from_string("video/x-raw,width=800,height=480");
g_object_set(G_OBJECT(gstreamer.video.capsfilter), "caps", Caps, NULL);
gst_caps_unref(Caps);
gstreamer.video.enc_bmp = gst_element_factory_make ("avenc_bmp", "avenc_bmp");
gstreamer.video.fakesink = gst_element_factory_make ("fakesink", "fakesink");
g_object_set (gstreamer.video.fakesink, "sync", true, NULL);
/* Audio */
gstreamer.audio.queue = gst_element_factory_make("queue", "queue_audio");
gstreamer.audio.decode = gst_element_factory_make("decodebin", "decodebin");
gstreamer.audio.convert = gst_element_factory_make("audioconvert", "audioconvert");
gstreamer.audio.resample = gst_element_factory_make("audioresample", "audioresample");
gstreamer.audio.sink = gst_element_factory_make("autoaudiosink", "autoaudiosink");
/* Create the empty pipeline */
gstreamer.pipeline = gst_pipeline_new ("gstreamer-pipeline");
if (!gstreamer.pipeline || !gstreamer.filesrc || !gstreamer.qtdemux ||
!gstreamer.video.queue || !gstreamer.video.decode || !gstreamer.video.convert || !gstreamer.video.scale || !gstreamer.video.capsfilter || !gstreamer.video.enc_bmp || !gstreamer.video.fakesink||
!gstreamer.audio.queue || !gstreamer.audio.decode || !gstreamer.audio.convert || !gstreamer.audio.resample ||!gstreamer.audio.sink)
{
g_printerr ("Not all elements could be created.\n");
return -1;
}
/* Build the pipeline. Note that we are NOT linking the source at this
* point. We will do it later. */
gst_bin_add_many (GST_BIN (gstreamer.pipeline), gstreamer.filesrc, gstreamer.qtdemux,
gstreamer.video.queue, gstreamer.video.decode, gstreamer.video.convert, gstreamer.video.scale, gstreamer.video.capsfilter, gstreamer.video.enc_bmp, gstreamer.video.fakesink,
gstreamer.audio.queue, gstreamer.audio.decode, gstreamer.audio.convert, gstreamer.audio.resample, gstreamer.audio.sink, NULL);
if (!gst_element_link (gstreamer.filesrc, gstreamer.qtdemux)) {
g_printerr ("Elements filesrc and qtdemux could not be linked.\n");
gst_object_unref (gstreamer.pipeline);
return -1;
}
if (!gst_element_link_many (gstreamer.video.convert, gstreamer.video.scale, gstreamer.video.capsfilter, gstreamer.video.enc_bmp, gstreamer.video.fakesink, NULL)) {
g_printerr ("Video elements could not be linked.\n");
gst_object_unref (gstreamer.pipeline);
return -1;
}
if (!gst_element_link_many (gstreamer.audio.convert, gstreamer.audio.resample, gstreamer.audio.sink, NULL)) {
g_printerr ("Audio elements could not be linked.\n");
gst_object_unref (gstreamer.pipeline);
return -1;
}
g_signal_connect (gstreamer.qtdemux, "pad-added", G_CALLBACK (pad_added_handler), &gstreamer);
/* Start playing */
ret = gst_element_set_state (gstreamer.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (gstreamer.pipeline);
return -1;
}
/* Listen to the bus */
bus = gst_element_get_bus (gstreamer.pipeline);
do {
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE,
GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
terminate = TRUE;
break;
case GST_MESSAGE_EOS:
g_print ("\nEnd-Of-Stream reached.\n");
terminate = TRUE;
break;
case GST_MESSAGE_STATE_CHANGED:
/* We are only interested in state-changed messages from the pipeline */
if (GST_MESSAGE_SRC (msg) == GST_OBJECT (gstreamer.pipeline)) {
GstState old_state, new_state, pending_state;
gst_message_parse_state_changed (msg, &old_state, &new_state, &pending_state);
g_print ("Pipeline state changed from %s to %s:\n",
gst_element_state_get_name (old_state), gst_element_state_get_name (new_state));
}
break;
break;
default:
/* We should not reach here */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
} while (!terminate);
/* Free resources */
gst_object_unref (bus);
gst_element_set_state (gstreamer.pipeline, GST_STATE_NULL);
gst_object_unref (gstreamer.pipeline);
return 0;
}
The output is:
(stream:5337): GStreamer-WARNING **: 02:28:38.279: Name 'decodebin' is not unique in bin 'gstreamer-pipeline', not adding
Pipeline state changed from NULL to READY:
src name = qtdemux
pad name = video/x-h264
Received new pad 'video_0' from 'qtdemux'.
not link !!
src name = qtdemux
pad name = audio/mpeg
Received new pad 'audio_0' from 'qtdemux'.
Error received from element qtdemux: Internal data stream error.
Debugging information: ../gst/isomp4/qtdemux.c(6545): gst_qtdemux_loop (): /GstPipeline:gstreamer-pipeline/GstQTDemux:qtdemux:
streaming stopped, reason not-linked (-1)
I dont't know how to link the video and audio.

The problem you're facing is with respect to decodebin.
One single pipeline cannot contain two elements that have the same name.
In your case,
gstreamer.video.decode = gst_element_factory_make ("decodebin", "decodebin");
and
gstreamer.audio.decode = gst_element_factory_make("decodebin", "decodebin");
Have the same names set to decodebin
Thats why one of the decodebins are not even being added to the pipeline.
You can change it to something else. For example
gstreamer.audio.decode = gst_element_factory_make("decodebin", "decodebin-audio");
and
gstreamer.video.decode = gst_element_factory_make("decodebin", "decodebin-video");
This rule applies to any parent element that is directly added to your pipeline.

Related

Gstreamer apply audio delay to live stream

I would like to add an audio delay (up to 10/15 seconds) in a live stream with gstreamer rtmpsink
this is my line
gst-launch-1.0 -vvv flvmux streamable=true name=mux ! rtmpsink location="rtmp://localhost/live" \
souphttpsrc location="http://<url video h264>" ! tsdemux ! h264parse ! queue ! mux. \
souphttpsrc location="https://<url audio aac>" ! icydemux ! aacparse ! queue ! mux.
acting directly on the line I tried to add "queue max-size-buffers=0 max-size-time=0 max-size-bytes=0 min-threshold-time=15000000000" after accparse but the entire stream is blocked in this way
acting in c i tried to modify the pts timestamp on the aacparse pad's buffer, but i can change with any value and there is no effect
gst_pad_add_probe(line->aacparse_srcpad, GST_PAD_PROBE_TYPE_BUFFER, cb_have_data_audio, NULL, NULL);
..
static GstPadProbeReturn
cb_have_data_audio (GstPad *pad, GstPadProbeInfo *info, gpointer user_data)
{
GstMapInfo map;
GstBuffer *buffer;
buffer = GST_PAD_PROBE_INFO_BUFFER (info);
buffer = gst_buffer_make_writable (buffer);
if (buffer == NULL)
return GST_PAD_PROBE_OK;
GstClockTime pts = GST_BUFFER_PTS(buffer);
GST_BUFFER_PTS(buffer) = pts + 100000000000;
GST_PAD_PROBE_INFO_DATA (info) = buffer;
return GST_PAD_PROBE_OK;
}
I tried also to use gst_pad_set_offset(), but again no effect
gst_pad_set_offset(line->aacparse_srcpad, 1000000000);
even playing with the flvmux pads and setting "streamable=false" there is no effect, what should be the right approach to add delay only to the audio?
#include <gst/gst.h>
#include<stdio.h>
static GMainLoop *loop;
static gint counter;
static GstBus *bus;
static gboolean prerolled = FALSE;
static GstPad *sinkpad,*ident_sink;
static GstClockTime ptimestamp=(guint64)1000;
static GstClockTime dtimestamp= 0;
static GstPadProbeReturn
display_data (GstPad *pad,
GstBuffer *apsInfo,
gpointer user_data)
{
// apsInfo = gst_buffer_ref(apsInfo);
// apsInfo = gst_buffer_make_writable(apsInfo);
// int fps = 30;
// ptimestamp += gst_util_uint64_scale_int (1, GST_SECOND, fps);
// int a=GST_BUFFER_PTS (apsInfo) ;
// int b=GST_BUFFER_DTS (apsInfo) ;
// GST_BUFFER_PTS (apsInfo)=ptimestamp;
// GST_BUFFER_DTS (apsInfo)=ptimestamp;;
// //printf("%d %d \n",a,b);
// GST_BUFFER_DURATION (apsInfo) = gst_util_uint64_scale_int (1, GST_SECOND, fps);
}
static GstPadProbeReturn
change_time (GstPad *pad,
GstBuffer *apsInfo,
gpointer user_data)
{
//printf("change ing time");
//apsInfo = gst_buffer_ref(apsInfo);
//apsInfo = gst_buffer_make_writable(apsInfo);
/* when these commented lines are added the buffer status returns gst_buffer_is_writable
writern wrtable but any changes in code does not effect the video but when these lines are
removed the buffer status writern not writable but the changes in code effect the video
*/
GST_BUFFER_FLAG_SET (apsInfo, GST_BUFFER_FLAG_DISCONT);
int fps = 30;
dtimestamp += gst_util_uint64_scale_int (1, GST_SECOND, fps);
ptimestamp += gst_util_uint64_scale_int (1, GST_SECOND, fps);
int a=GST_BUFFER_PTS (apsInfo) = ptimestamp;
int b=GST_BUFFER_DTS (apsInfo) = dtimestamp;
printf("%d %d \n",a,b);
GST_BUFFER_DURATION (apsInfo) = gst_util_uint64_scale_int (1, GST_SECOND, fps);
}
static void
dec_counter (GstElement * pipeline)
{
if (prerolled)
return;
if (g_atomic_int_dec_and_test (&counter)) {
/* all probes blocked and no-more-pads signaled, post
* message on the bus. */
prerolled = TRUE;
gst_bus_post (bus, gst_message_new_application (
GST_OBJECT_CAST (pipeline),
gst_structure_new_empty ("ExPrerolled")));
}
}
/* called when a source pad of uridecodebin is blocked */
static GstPadProbeReturn
cb_blocked (GstPad *pad,
GstPadProbeInfo *info,
gpointer user_data)
{
GstElement *pipeline = GST_ELEMENT (user_data);
if (prerolled)
return GST_PAD_PROBE_REMOVE;
dec_counter (pipeline);
return GST_PAD_PROBE_OK;
}
/* called when uridecodebin has a new pad */
static void
cb_pad_added (GstElement *element,
GstPad *pad,
gpointer user_data)
{
GstElement *pipeline = GST_ELEMENT (user_data);
if (prerolled)
return;
g_atomic_int_inc (&counter);
gst_pad_add_probe (pad, GST_PAD_PROBE_TYPE_BLOCK_DOWNSTREAM,
(GstPadProbeCallback) cb_blocked, pipeline, NULL);
/* try to link to the video pad */
gst_pad_link (pad, sinkpad);
}
/* called when uridecodebin has created all pads */
static void
cb_no_more_pads (GstElement *element,
gpointer user_data)
{
GstElement *pipeline = GST_ELEMENT (user_data);
if (prerolled)
return;
dec_counter (pipeline);
}
/* called when a new message is posted on the bus */
static void
cb_message (GstBus *bus,
GstMessage *message,
gpointer user_data)
{
GstElement *pipeline = GST_ELEMENT (user_data);
switch (GST_MESSAGE_TYPE (message)) {
case GST_MESSAGE_ERROR:
g_print ("we received an error!\n");
g_main_loop_quit (loop);
break;
case GST_MESSAGE_EOS:
g_print ("we reached EOS\n");
g_main_loop_quit (loop);
break;
case GST_MESSAGE_APPLICATION:
{
if (gst_message_has_name (message, "ExPrerolled")) {
/* it's our message */
g_print ("we are all prerolled, do seek\n");
gst_element_seek (pipeline, 1.0, GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH,
GST_SEEK_TYPE_SET, 0,
GST_SEEK_TYPE_NONE, GST_CLOCK_TIME_NONE);
gst_element_set_state (pipeline, GST_STATE_PLAYING);
}
break;
}
default:
break;
}
}
gint
main (gint argc,
gchar *argv[])
{
GstElement *pipeline, *src, *csp, *vs, *sink ,*idelem;
/* init GStreamer */
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);
/* build */
pipeline = gst_pipeline_new ("my-pipeline");
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
gst_bus_add_signal_watch (bus);
g_signal_connect (bus, "message", (GCallback) cb_message,
pipeline);
src = gst_element_factory_make ("uridecodebin", "src");
if (src == NULL)
g_error ("Could not create 'uridecodebin' element");
g_object_set (src, "uri", "http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/BigBuckBunny.mp4", NULL);
csp = gst_element_factory_make ("videoconvert", "csp");
if (csp == NULL)
g_error ("Could not create 'videoconvert' element");
vs = gst_element_factory_make ("videoscale", "vs");
if (csp == NULL)
g_error ("Could not create 'videoscale' element");
idelem = gst_element_factory_make ("identity", "identity-elem");
if (idelem == NULL)
g_error ("Could not create 'idelem' ");
sink = gst_element_factory_make ("autovideosink", "sink");
if (sink == NULL)
g_error ("Could not create 'autovideosink' element");
gst_bin_add_many (GST_BIN (pipeline), src, csp, vs,idelem, sink, NULL);
/* can't link src yet, it has no pads */
gst_element_link_many (csp, vs,idelem, sink, NULL);
sinkpad = gst_element_get_static_pad (csp, "sink");
//ident_sink = gst_element_get_static_pad (idelem, "sink");
// //
// gst_pad_add_probe (sinkpad, GST_PAD_PROBE_TYPE_BUFFER,
// (GstPadProbeCallback) display_data, NULL, NULL); //displayinfo data
// gst_object_unref (sinkpad);
GstElement *identity_elem = gst_bin_get_by_name(GST_BIN(pipeline), "identity-elem");
g_object_set(G_OBJECT(identity_elem), "signal-handoffs", TRUE, NULL); //change time
g_signal_connect(idelem, "handoff", (GCallback) change_time, pipeline);
g_atomic_int_set (&counter, 1);
g_signal_connect (src, "pad-added",
(GCallback) cb_pad_added, pipeline);
g_signal_connect (src, "no-more-pads",
(GCallback) cb_no_more_pads, pipeline);
gst_element_set_state (pipeline, GST_STATE_PAUSED);
g_main_loop_run (loop);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (sinkpad);
gst_object_unref (bus);
gst_object_unref (pipeline);
g_main_loop_unref (loop);
return 0;
}

How to use openssl to do a anonymous DH connection

Hi I am new to working with openssl, i am trying to implement anonymous Diffie Hellman connection using openssl.
The code server code looks some thing like this:
#include "openssl/err.h"
#include "openssl/ssl.h"
#include "openssl/dh.h"
#include <arpa/inet.h>
#include <errno.h>
#include <malloc.h>
#include <netinet/in.h>
#include <resolv.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <unistd.h>
#define FAIL -1
// Create the SSL socket and intialize the socket address structure
int OpenListener(int port) {
int sd;
struct sockaddr_in addr;
sd = socket(PF_INET, SOCK_STREAM, 0);
bzero(&addr, sizeof(addr));
addr.sin_family = AF_INET;
addr.sin_port = htons(port);
addr.sin_addr.s_addr = INADDR_ANY;
if (bind(sd, (struct sockaddr *)&addr, sizeof(addr)) != 0) {
perror("can't bind port");
abort();
}
if (listen(sd, 10) != 0) {
perror("Can't configure listening port");
abort();
}
return sd;
}
int isRoot() {
if (getuid() != 0) {
return 0;
} else {
return 1;
}
}
SSL_CTX *InitServerCTX(void) {
SSL_METHOD *method;
SSL_CTX *ctx;
DH *dh_;
int codes = 0;
OpenSSL_add_all_algorithms(); /* load & register all cryptos, etc. */
SSL_load_error_strings(); /* load all error messages */
method = TLSv1_2_server_method(); /* create new server-method instance */
ctx = SSL_CTX_new(method); /* create new context from method */
if (ctx == NULL) {
ERR_print_errors_fp(stderr);
abort();
}
if (SSL_CTX_set_cipher_list(ctx, "ADH-AES256-SHA256") != 1) {
ERR_print_errors_fp(stderr);
abort();
}
dh_ = DH_new();
if (dh_ == NULL) {
printf ("Alloc for dh failed\n");
ERR_print_errors_fp(stderr);
abort();
}
// Generate Parameters.
if (!DH_generate_parameters_ex (dh_, 1024, DH_GENERATOR_5, NULL)) {
printf ("Generate Parameters failed!!\n");
ERR_print_errors_fp(stderr);
abort();
ERR_print_errors_fp(stderr);
}
// Check if p and g parameters are safe to use.
if (!DH_check (dh_, &codes)) {
printf ("Cannot perform DH check\n");
ERR_print_errors_fp(stderr);
abort();
}
if (codes) {
printf ("DH codes: %x\n", codes);
ERR_print_errors_fp(stderr);
abort();
}
if (!DH_generate_key (dh_)) {
printf ("Failure in DH Generate key\n");
ERR_print_errors_fp(stderr);
abort();
}
SSL_CTX_set_tmp_dh(ctx, dh_);
DH_free(dh_);
return ctx;
}
void Servlet(SSL *ssl) /* Serve the connection -- threadable */
{
char buf[1024] = {0};
int sd, bytes;
const char *ServerResponse = "<\Body>\
<Name>aticleworld.com</Name>\
<year>1.5</year>\
<BlogType>Embedede and c\c++<\BlogType>\
<Author>amlendra<Author>\
<\Body>";
const char *cpValidMessage = "<Body>\
<UserName>aticle<UserName>\
<Password>123<Password>\
<\Body>";
if (SSL_accept(ssl) == FAIL) /* do SSL-protocol accept */
ERR_print_errors_fp(stderr);
else {
//ShowCerts(ssl); /* get any certificates */
bytes = SSL_read(ssl, buf, sizeof(buf)); /* get request */
buf[bytes] = '\0';
printf("Client msg: \"%s\"\n", buf);
if (bytes > 0) {
if (strcmp(cpValidMessage, buf) == 0) {
SSL_write(ssl, ServerResponse, strlen(ServerResponse)); /* send reply */
} else {
SSL_write(ssl, "Invalid Message",
strlen("Invalid Message")); /* send reply */
}
} else {
ERR_print_errors_fp(stderr);
}
}
sd = SSL_get_fd(ssl); /* get socket connection */
SSL_free(ssl); /* release SSL state */
close(sd); /* close connection */
}
int main(int count, char *Argc[]) {
SSL_CTX *ctx;
int server;
char *portnum;
// Only root user have the permsion to run the server
if (!isRoot()) {
printf("This program must be run as root/sudo user!!");
exit(0);
}
if (count != 2) {
printf("Usage: %s <portnum>\n", Argc[0]);
exit(0);
}
// Initialize the SSL library
SSL_library_init();
portnum = Argc[1];
ctx = InitServerCTX(); /* initialize SSL */
server = OpenListener(atoi(portnum)); /* create server socket */
while (1) {
struct sockaddr_in addr;
socklen_t len = sizeof(addr);
SSL *ssl;
int client = accept(server, (struct sockaddr *)&addr,
&len); /* accept connection as usual */
printf("Connection: %s:%d\n", inet_ntoa(addr.sin_addr),
ntohs(addr.sin_port));
ssl = SSL_new(ctx); /* get new SSL state with context */
SSL_set_fd(ssl, client); /* set connection socket to SSL state */
Servlet(ssl); /* service connection */
}
close(server); /* close server socket */
SSL_CTX_free(ctx); /* release context */
}
The client program looks like this:
#include <errno.h>
#include <malloc.h>
#include <netdb.h>
#include <openssl/err.h>
#include <openssl/ssl.h>
#include <openssl/dh.h>
#include <resolv.h>
#include <stdio.h>
#include <string.h>
#include <sys/socket.h>
#include <unistd.h>
#define FAIL -1
int OpenConnection(const char *hostname, int port) {
int sd;
struct hostent *host;
struct sockaddr_in addr;
if ((host = gethostbyname(hostname)) == NULL) {
perror(hostname);
abort();
}
sd = socket(PF_INET, SOCK_STREAM, 0);
bzero(&addr, sizeof(addr));
addr.sin_family = AF_INET;
addr.sin_port = htons(port);
addr.sin_addr.s_addr = *(long *)(host->h_addr);
printf ("Connecting....\n");
if (connect(sd, (struct sockaddr *)&addr, sizeof(addr)) != 0) {
close(sd);
perror(hostname);
abort();
} else {
printf ("Connected...\n");
}
return sd;
}
SSL_CTX *InitCTX(void) {
SSL_METHOD *method;
SSL_CTX *ctx;
DH *dh_;
int codes = 0;
OpenSSL_add_all_algorithms(); /* Load cryptos, et.al. */
SSL_load_error_strings(); /* Bring in and register error messages */
method = TLSv1_2_client_method(); /* Create new client-method instance */
ctx = SSL_CTX_new(method); /* Create new context */
if (ctx == NULL) {
ERR_print_errors_fp(stderr);
abort();
}
if (SSL_CTX_set_cipher_list(ctx, "ADH-AES256-SHA256") != 1) {
printf("SSL_CTX_set_cipher_list failed\n");
SSL_CTX_free (ctx);
ctx = NULL;
ERR_print_errors_fp(stderr);
abort();
}
dh_ = DH_new();
if (dh_ == NULL) {
printf ("Alloc for dh failed\n");
}
// Generate Parameters.
if (!DH_generate_parameters_ex (dh_, 1024, DH_GENERATOR_5, NULL)) {
printf ("Generate Parameters failed!!\n");
}
// Check if p and g parameters are safe to use.
if (!DH_check (dh_, &codes)) {
printf ("Cannot perform DH check\n");
}
if (codes) {
printf ("DH codes: %x\n", codes);
}
if (!DH_generate_key (dh_)) {
printf ("Failure in DH Generate key\n");
//retVal = false;
//break;
}
SSL_CTX_set_tmp_dh(ctx, dh_);
DH_free(dh_);
return ctx;
}
void ShowCerts(SSL *ssl) {
X509 *cert;
char *line;
cert = SSL_get_peer_certificate(ssl); /* get the server's certificate */
if (cert != NULL) {
printf("Server certificates:\n");
line = X509_NAME_oneline(X509_get_subject_name(cert), 0, 0);
printf("Subject: %s\n", line);
free(line); /* free the malloc'ed string */
line = X509_NAME_oneline(X509_get_issuer_name(cert), 0, 0);
printf("Issuer: %s\n", line);
free(line); /* free the malloc'ed string */
X509_free(cert); /* free the malloc'ed certificate copy */
} else
printf("Info: No client certificates configured.\n");
}
int main(int count, char *strings[]) {
SSL_CTX *ctx;
int server;
SSL *ssl;
char buf[1024];
char acClientRequest[1024] = {0};
int bytes;
char *hostname, *portnum;
if (count != 3) {
printf("usage: %s <hostname> <portnum>\n", strings[0]);
exit(0);
}
SSL_library_init();
hostname = strings[1];
portnum = strings[2];
ctx = InitCTX();
server = OpenConnection(hostname, atoi(portnum));
ssl = SSL_new(ctx); /* create new SSL connection state */
SSL_set_fd(ssl, server); /* attach the socket descriptor */
printf ("Before connect\n");
if (SSL_connect(ssl) == FAIL) /* perform the connection */
ERR_print_errors_fp(stderr);
else {
char acUsername[16] = {0};
char acPassword[16] = {0};
const char *cpRequestMessage = "<Body>\
<UserName>%s<UserName>\
<Password>%s<Password>\
<\Body>";
printf("Enter the User Name : ");
scanf("%s", acUsername);
printf("\n\nEnter the Password : ");
scanf("%s", acPassword);
sprintf(acClientRequest, cpRequestMessage, acUsername,
acPassword); /* construct reply */
printf("\n\nConnected with %s encryption\n", SSL_get_cipher(ssl));
ShowCerts(ssl); /* get any certs */
SSL_write(ssl, acClientRequest,
strlen(acClientRequest)); /* encrypt & send message */
bytes = SSL_read(ssl, buf, sizeof(buf)); /* get reply & decrypt */
buf[bytes] = 0;
printf("Received: \"%s\"\n", buf);
SSL_free(ssl); /* release connection state */
}
close(server); /* close socket */
SSL_CTX_free(ctx); /* release context */
return 0;
}
When i compile and execute this code i get the following error:
server side:
140093403805344:error:140940E5:SSL routines:SSL3_READ_BYTES:ssl handshake failure:s3_pkt.c:996:
client side error:
140134052299200:error:141640B5:SSL routines:tls_construct_client_hello:no ciphers available:../ssl/statem/statem_clnt.c:800:
Can any help me out on how to establish a connection with Anonymous DH?

Output RTSP stream with ffmpeg

I'm attempting to use the ffmpeg libraries to send a video stream from my application to a media server (in this case wowza). I have been able to do the reverse and consume an RTSP stream but I'm having a few issues writing an RTSP stream.
I have found a few examples and attempted to utilise the relevant bits. The code is below. I have simplified it as much as I can. I do only want to send a single H264 bit stream to the wowza server and which it can handle.
I get an "Integer division by zero" exception whenever in the av_interleaved_write_frame function when I try and send a packet. The exception looks like it's related to the packet timestamps not being set correctly. I've tried different values and can get past the exception by setting some contrived values but then the write call fails.
#include <iostream>
#include <fstream>
#include <sstream>
#include <cstring>
#include "stdafx.h"
#include "windows.h"
extern "C"
{
#include <libavcodec\avcodec.h>
#include <libavformat\avformat.h>
#include <libavformat\avio.h>
#include <libswscale\swscale.h>
}
using namespace std;
static int video_is_eof;
#define STREAM_DURATION 50.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
#define VIDEO_CODEC_ID CODEC_ID_H264
static int sws_flags = SWS_BICUBIC;
/* video output */
static AVFrame *frame;
static AVPicture src_picture, dst_picture;
static int frame_count;
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
{
/* rescale output packet timestamp values from codec to stream timebase */
pkt->pts = av_rescale_q_rnd(pkt->pts, *time_base, st->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt->dts = av_rescale_q_rnd(pkt->dts, *time_base, st->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt->duration = av_rescale_q(pkt->duration, *time_base, st->time_base);
pkt->stream_index = st->index;
// Exception occurs here.
return av_interleaved_write_frame(fmt_ctx, pkt);
}
/* Add an output stream. */
static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id)
{
AVCodecContext *c;
AVStream *st;
/* find the encoder */
*codec = avcodec_find_encoder(codec_id);
if (!(*codec)) {
fprintf(stderr, "Could not find encoder for '%s'\n", avcodec_get_name(codec_id));
exit(1);
}
st = avformat_new_stream(oc, *codec);
if (!st) {
fprintf(stderr, "Could not allocate stream\n");
exit(1);
}
st->id = oc->nb_streams - 1;
c = st->codec;
c->codec_id = codec_id;
c->bit_rate = 400000;
c->width = 352;
c->height = 288;
c->time_base.den = STREAM_FRAME_RATE;
c->time_base.num = 1;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
return st;
}
static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
{
int ret;
AVCodecContext *c = st->codec;
/* open the codec */
ret = avcodec_open2(c, codec, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open video codec: ");
exit(1);
}
/* allocate and init a re-usable frame */
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
/* Allocate the encoded raw picture. */
ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
if (ret < 0) {
fprintf(stderr, "Could not allocate picture: ");
exit(1);
}
/* copy data and linesize picture pointers to frame */
*((AVPicture *)frame) = dst_picture;
}
/* Prepare a dummy image. */
static void fill_yuv_image(AVPicture *pict, int frame_index, int width, int height)
{
int x, y, i;
i = frame_index;
/* Y */
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
/* Cb and Cr */
for (y = 0; y < height / 2; y++) {
for (x = 0; x < width / 2; x++) {
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
}
}
}
static void write_video_frame(AVFormatContext *oc, AVStream *st, int flush)
{
int ret;
AVCodecContext *c = st->codec;
if (!flush) {
fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
}
AVPacket pkt = { 0 };
int got_packet;
av_init_packet(&pkt);
/* encode the image */
frame->pts = frame_count;
ret = avcodec_encode_video2(c, &pkt, flush ? NULL : frame, &got_packet);
if (ret < 0) {
fprintf(stderr, "Error encoding video frame:");
exit(1);
}
/* If size is zero, it means the image was buffered. */
if (got_packet) {
ret = write_frame(oc, &c->time_base, st, &pkt);
}
else {
if (flush) {
video_is_eof = 1;
}
ret = 0;
}
if (ret < 0) {
fprintf(stderr, "Error while writing video frame: ");
exit(1);
}
frame_count++;
}
static void close_video(AVFormatContext *oc, AVStream *st)
{
avcodec_close(st->codec);
av_free(src_picture.data[0]);
av_free(dst_picture.data[0]);
av_frame_free(&frame);
}
int _tmain(int argc, _TCHAR* argv[])
{
printf("starting...\n");
const char *filename = "rtsp://test:password#192.168.33.19:1935/ffmpeg/0";
AVOutputFormat *fmt;
AVFormatContext *oc;
AVStream *video_st;
AVCodec *video_codec;
double video_time;
int flush, ret;
/* Initialize libavcodec, and register all codecs and formats. */
av_register_all();
avformat_network_init();
AVOutputFormat* oFmt = av_oformat_next(NULL);
while (oFmt) {
if (oFmt->video_codec == VIDEO_CODEC_ID) {
break;
}
oFmt = av_oformat_next(oFmt);
}
if (!oFmt) {
printf("Could not find the required output format.\n");
exit(1);
}
/* allocate the output media context */
avformat_alloc_output_context2(&oc, oFmt, "rtsp", filename);
if (!oc) {
printf("Could not set the output media context.\n");
exit(1);
}
fmt = oc->oformat;
if (!fmt) {
printf("Could not create the output format.\n");
exit(1);
}
video_st = NULL;
cout << "Codec = " << avcodec_get_name(fmt->video_codec) << endl;
if (fmt->video_codec != AV_CODEC_ID_NONE)
{
video_st = add_stream(oc, &video_codec, fmt->video_codec);
}
/* Now that all the parameters are set, we can open the video codec and allocate the necessary encode buffers. */
if (video_st) {
open_video(oc, video_codec, video_st);
}
av_dump_format(oc, 0, filename, 1);
char errorBuff[80];
if (!(fmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open outfile '%s': %s", filename, av_make_error_string(errorBuff, 80, ret));
return 1;
}
}
flush = 0;
while (video_st && !video_is_eof) {
/* Compute current video time. */
video_time = (video_st && !video_is_eof) ? video_st->pts.val * av_q2d(video_st->time_base) : INFINITY;
if (!flush && (!video_st || video_time >= STREAM_DURATION)) {
flush = 1;
}
if (video_st && !video_is_eof) {
write_video_frame(oc, video_st, flush);
}
}
if (video_st) {
close_video(oc, video_st);
}
if ((fmt->flags & AVFMT_NOFILE)) {
avio_close(oc->pb);
}
avformat_free_context(oc);
printf("finished.\n");
getchar();
return 0;
}
Does anyone have any insights about how the packet timestamps can be successfully set?
I solved the integer division by zero by building ffmpeg on my Windows instance and debugging the av_interleaved_write_frame call. Turns out it was the pts not being set on the video stream object that was causing the exception.
Adding the line below to the while loop in the main function fixed the problem:
video_st->pts.val += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
Here's a sample that works to get a H264 encoded dummy stream to a Wowza server via ffmpeg's RTSP pipeline.
// Roughly based on: https://ffmpeg.org/doxygen/trunk/muxing_8c-source.html
#include <chrono>
#include <thread>
#include <tchar.h>
extern "C"
{
#include <libavcodec\avcodec.h>
#include <libavformat\avformat.h>
#include <libavformat\avio.h>
#include <libswscale\swscale.h>
#include <libavutil\time.h>
}
#pragma comment(lib,"libavformat/libavformat.a")
#pragma comment(lib,"libavcodec/libavcodec.a")
#pragma comment(lib,"libavutil/libavutil.a")
#pragma comment(lib,"libswscale/libswscale.a")
#pragma comment(lib,"x264.lib")
#pragma comment(lib,"libswresample/libswresample.a")
using namespace std;
static int video_is_eof;
#define STREAM_DURATION 20
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */ //AV_PIX_FMT_NV12;
#define VIDEO_CODEC_ID CODEC_ID_H264
/* video output */
static AVFrame *frame;
static AVPicture src_picture, dst_picture;
/* Add an output stream. */
static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id)
{
AVCodecContext *c;
AVStream *st;
/* find the encoder */
*codec = avcodec_find_encoder(codec_id);
if (!(*codec)) {
av_log(NULL, AV_LOG_ERROR, "Could not find encoder for '%s'.\n", avcodec_get_name(codec_id));
}
else {
st = avformat_new_stream(oc, *codec);
if (!st) {
av_log(NULL, AV_LOG_ERROR, "Could not allocate stream.\n");
}
else {
st->id = oc->nb_streams - 1;
st->time_base.den = st->pts.den = 90000;
st->time_base.num = st->pts.num = 1;
c = st->codec;
c->codec_id = codec_id;
c->bit_rate = 400000;
c->width = 352;
c->height = 288;
c->time_base.den = STREAM_FRAME_RATE;
c->time_base.num = 1;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
}
}
return st;
}
static int open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
{
int ret;
AVCodecContext *c = st->codec;
/* open the codec */
ret = avcodec_open2(c, codec, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not open video codec.\n", avcodec_get_name(c->codec_id));
}
else {
/* allocate and init a re-usable frame */
frame = av_frame_alloc();
if (!frame) {
av_log(NULL, AV_LOG_ERROR, "Could not allocate video frame.\n");
ret = -1;
}
else {
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
/* Allocate the encoded raw picture. */
ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not allocate picture.\n");
}
else {
/* copy data and linesize picture pointers to frame */
*((AVPicture *)frame) = dst_picture;
}
}
}
return ret;
}
/* Prepare a dummy image. */
static void fill_yuv_image(AVPicture *pict, int frame_index, int width, int height)
{
int x, y, i;
i = frame_index;
/* Y */
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
/* Cb and Cr */
for (y = 0; y < height / 2; y++) {
for (x = 0; x < width / 2; x++) {
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
}
}
}
static int write_video_frame(AVFormatContext *oc, AVStream *st, int frameCount)
{
int ret = 0;
AVCodecContext *c = st->codec;
fill_yuv_image(&dst_picture, frameCount, c->width, c->height);
AVPacket pkt = { 0 };
int got_packet;
av_init_packet(&pkt);
/* encode the image */
frame->pts = frameCount;
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error encoding video frame.\n");
}
else {
if (got_packet) {
pkt.stream_index = st->index;
pkt.pts = av_rescale_q_rnd(pkt.pts, c->time_base, st->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
ret = av_write_frame(oc, &pkt);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while writing video frame.\n");
}
}
}
return ret;
}
int _tmain(int argc, _TCHAR* argv[])
{
printf("starting...\n");
const char *url = "rtsp://test:password#192.168.33.19:1935/ffmpeg/0";
//const char *url = "rtsp://192.168.33.19:1935/ffmpeg/0";
AVFormatContext *outContext;
AVStream *video_st;
AVCodec *video_codec;
int ret = 0, frameCount = 0;
av_log_set_level(AV_LOG_DEBUG);
//av_log_set_level(AV_LOG_TRACE);
av_register_all();
avformat_network_init();
avformat_alloc_output_context2(&outContext, NULL, "rtsp", url);
if (!outContext) {
av_log(NULL, AV_LOG_FATAL, "Could not allocate an output context for '%s'.\n", url);
goto end;
}
if (!outContext->oformat) {
av_log(NULL, AV_LOG_FATAL, "Could not create the output format for '%s'.\n", url);
goto end;
}
video_st = add_stream(outContext, &video_codec, VIDEO_CODEC_ID);
/* Now that all the parameters are set, we can open the video codec and allocate the necessary encode buffers. */
if (video_st) {
av_log(NULL, AV_LOG_DEBUG, "Video stream codec %s.\n ", avcodec_get_name(video_st->codec->codec_id));
ret = open_video(outContext, video_codec, video_st);
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "Open video stream failed.\n");
goto end;
}
}
else {
av_log(NULL, AV_LOG_FATAL, "Add video stream for the codec '%s' failed.\n", avcodec_get_name(VIDEO_CODEC_ID));
goto end;
}
av_dump_format(outContext, 0, url, 1);
ret = avformat_write_header(outContext, NULL);
if (ret != 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to connect to RTSP server for '%s'.\n", url);
goto end;
}
printf("Press any key to start streaming...\n");
getchar();
auto startSend = std::chrono::system_clock::now();
while (video_st) {
frameCount++;
auto startFrame = std::chrono::system_clock::now();
ret = write_video_frame(outContext, video_st, frameCount);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Write video frame failed.\n", url);
goto end;
}
auto streamDuration = std::chrono::duration_cast<chrono::milliseconds>(std::chrono::system_clock::now() - startSend).count();
printf("Elapsed time %ldms, video stream pts %ld.\n", streamDuration, video_st->pts.val);
if (streamDuration / 1000.0 > STREAM_DURATION) {
break;
}
else {
auto frameDuration = std::chrono::duration_cast<chrono::milliseconds>(std::chrono::system_clock::now() - startFrame).count();
std::this_thread::sleep_for(std::chrono::milliseconds((long)(1000.0 / STREAM_FRAME_RATE - frameDuration)));
}
}
if (video_st) {
avcodec_close(video_st->codec);
av_free(src_picture.data[0]);
av_free(dst_picture.data[0]);
av_frame_free(&frame);
}
avformat_free_context(outContext);
end:
printf("finished.\n");
getchar();
return 0;
}

How to catch stdout stream in ffmpeg then pipe it to v4l2loopback

I'm trying to pipe my h264 stream to ffmpeg and then to my v4l2loopback device. Problem is that I'm fairly new to linux, so just can't get it working.
The stream can be outputted to stdout, but I do not know how to catch it again with ffmpeg and then again pipe it to my v4l2loopback device.
Does anybody know how this could be done or maybe a pointer on how to solve it?
This is the capture program:
PS! You can find the options for the capture program almost in the bottom of the code.
/*
* V4L2 video capture example, modified by Derek Molloy for the Logitech C920 camera
* Modifications, added the -F mode for H264 capture and associated help detail
* www.derekmolloy.ie
*
* V4L2 video capture example
*
* This program can be used and distributed without restrictions.
*
* This program is provided with the V4L2 API
* see http://linuxtv.org/docs.php for more information
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <getopt.h> /* getopt_long() */
#include <fcntl.h> /* low-level i/o */
#include <unistd.h>
#include <errno.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include <linux/videodev2.h>
#define CLEAR(x) memset(&(x), 0, sizeof(x))
enum io_method {
IO_METHOD_READ,
IO_METHOD_MMAP,
IO_METHOD_USERPTR,
};
struct buffer {
void *start;
size_t length;
};
static char *dev_name;
static enum io_method io = IO_METHOD_MMAP;
static int fd = -1;
struct buffer *buffers;
static unsigned int n_buffers;
static int out_buf;
static int force_format = 0;
static int frame_count = 100;
static void errno_exit(const char *s)
{
fprintf(stderr, "%s error %d, %s\n", s, errno, strerror(errno));
exit(EXIT_FAILURE);
}
static int xioctl(int fh, int request, void *arg)
{
int r;
do {
r = ioctl(fh, request, arg);
} while (-1 == r && EINTR == errno);
return r;
}
static void process_image(const void *p, int size)
{
if (out_buf)
fwrite(p, size, 1, stdout);
fflush(stderr);
fprintf(stderr, ".");
fflush(stdout);
}
static int read_frame(void)
{
struct v4l2_buffer buf;
unsigned int i;
switch (io) {
case IO_METHOD_READ:
if (-1 == read(fd, buffers[0].start, buffers[0].length)) {
switch (errno) {
case EAGAIN:
return 0;
case EIO:
/* Could ignore EIO, see spec. */
/* fall through */
default:
errno_exit("read");
}
}
process_image(buffers[0].start, buffers[0].length);
break;
case IO_METHOD_MMAP:
CLEAR(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
if (-1 == xioctl(fd, VIDIOC_DQBUF, &buf)) {
switch (errno) {
case EAGAIN:
return 0;
case EIO:
/* Could ignore EIO, see spec. */
/* fall through */
default:
errno_exit("VIDIOC_DQBUF");
}
}
assert(buf.index < n_buffers);
process_image(buffers[buf.index].start, buf.bytesused);
if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))
errno_exit("VIDIOC_QBUF");
break;
case IO_METHOD_USERPTR:
CLEAR(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_USERPTR;
if (-1 == xioctl(fd, VIDIOC_DQBUF, &buf)) {
switch (errno) {
case EAGAIN:
return 0;
case EIO:
/* Could ignore EIO, see spec. */
/* fall through */
default:
errno_exit("VIDIOC_DQBUF");
}
}
for (i = 0; i < n_buffers; ++i)
if (buf.m.userptr == (unsigned long)buffers[i].start
&& buf.length == buffers[i].length)
break;
assert(i < n_buffers);
process_image((void *)buf.m.userptr, buf.bytesused);
if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))
errno_exit("VIDIOC_QBUF");
break;
}
return 1;
}
static void mainloop(void)
{
unsigned int count;
unsigned int loopIsInfinite = 0;
if (frame_count == 0) loopIsInfinite = 1; //infinite loop
count = frame_count;
while ((count-- > 0) || loopIsInfinite) {
for (;;) {
fd_set fds;
struct timeval tv;
int r;
FD_ZERO(&fds);
FD_SET(fd, &fds);
/* Timeout. */
tv.tv_sec = 2;
tv.tv_usec = 0;
r = select(fd + 1, &fds, NULL, NULL, &tv);
if (-1 == r) {
if (EINTR == errno)
continue;
errno_exit("select");
}
if (0 == r) {
fprintf(stderr, "select timeout\n");
exit(EXIT_FAILURE);
}
if (read_frame())
break;
/* EAGAIN - continue select loop. */
}
}
}
static void stop_capturing(void)
{
enum v4l2_buf_type type;
switch (io) {
case IO_METHOD_READ:
/* Nothing to do. */
break;
case IO_METHOD_MMAP:
case IO_METHOD_USERPTR:
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (-1 == xioctl(fd, VIDIOC_STREAMOFF, &type))
errno_exit("VIDIOC_STREAMOFF");
break;
}
}
static void start_capturing(void)
{
unsigned int i;
enum v4l2_buf_type type;
switch (io) {
case IO_METHOD_READ:
/* Nothing to do. */
break;
case IO_METHOD_MMAP:
for (i = 0; i < n_buffers; ++i) {
struct v4l2_buffer buf;
CLEAR(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = i;
if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))
errno_exit("VIDIOC_QBUF");
}
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (-1 == xioctl(fd, VIDIOC_STREAMON, &type))
errno_exit("VIDIOC_STREAMON");
break;
case IO_METHOD_USERPTR:
for (i = 0; i < n_buffers; ++i) {
struct v4l2_buffer buf;
CLEAR(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_USERPTR;
buf.index = i;
buf.m.userptr = (unsigned long)buffers[i].start;
buf.length = buffers[i].length;
if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))
errno_exit("VIDIOC_QBUF");
}
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (-1 == xioctl(fd, VIDIOC_STREAMON, &type))
errno_exit("VIDIOC_STREAMON");
break;
}
}
static void uninit_device(void)
{
unsigned int i;
switch (io) {
case IO_METHOD_READ:
free(buffers[0].start);
break;
case IO_METHOD_MMAP:
for (i = 0; i < n_buffers; ++i)
if (-1 == munmap(buffers[i].start, buffers[i].length))
errno_exit("munmap");
break;
case IO_METHOD_USERPTR:
for (i = 0; i < n_buffers; ++i)
free(buffers[i].start);
break;
}
free(buffers);
}
static void init_read(unsigned int buffer_size)
{
buffers = calloc(1, sizeof(*buffers));
if (!buffers) {
fprintf(stderr, "Out of memory\n");
exit(EXIT_FAILURE);
}
buffers[0].length = buffer_size;
buffers[0].start = malloc(buffer_size);
if (!buffers[0].start) {
fprintf(stderr, "Out of memory\n");
exit(EXIT_FAILURE);
}
}
static void init_mmap(void)
{
struct v4l2_requestbuffers req;
CLEAR(req);
req.count = 4;
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_MMAP;
if (-1 == xioctl(fd, VIDIOC_REQBUFS, &req)) {
if (EINVAL == errno) {
fprintf(stderr, "%s does not support "
"memory mapping\n", dev_name);
exit(EXIT_FAILURE);
} else {
errno_exit("VIDIOC_REQBUFS");
}
}
if (req.count < 2) {
fprintf(stderr, "Insufficient buffer memory on %s\n",
dev_name);
exit(EXIT_FAILURE);
}
buffers = calloc(req.count, sizeof(*buffers));
if (!buffers) {
fprintf(stderr, "Out of memory\n");
exit(EXIT_FAILURE);
}
for (n_buffers = 0; n_buffers < req.count; ++n_buffers) {
struct v4l2_buffer buf;
CLEAR(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = n_buffers;
if (-1 == xioctl(fd, VIDIOC_QUERYBUF, &buf))
errno_exit("VIDIOC_QUERYBUF");
buffers[n_buffers].length = buf.length;
buffers[n_buffers].start =
mmap(NULL /* start anywhere */,
buf.length,
PROT_READ | PROT_WRITE /* required */,
MAP_SHARED /* recommended */,
fd, buf.m.offset);
if (MAP_FAILED == buffers[n_buffers].start)
errno_exit("mmap");
}
}
static void init_userp(unsigned int buffer_size)
{
struct v4l2_requestbuffers req;
CLEAR(req);
req.count = 4;
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_USERPTR;
if (-1 == xioctl(fd, VIDIOC_REQBUFS, &req)) {
if (EINVAL == errno) {
fprintf(stderr, "%s does not support "
"user pointer i/o\n", dev_name);
exit(EXIT_FAILURE);
} else {
errno_exit("VIDIOC_REQBUFS");
}
}
buffers = calloc(4, sizeof(*buffers));
if (!buffers) {
fprintf(stderr, "Out of memory\n");
exit(EXIT_FAILURE);
}
for (n_buffers = 0; n_buffers < 4; ++n_buffers) {
buffers[n_buffers].length = buffer_size;
buffers[n_buffers].start = malloc(buffer_size);
if (!buffers[n_buffers].start) {
fprintf(stderr, "Out of memory\n");
exit(EXIT_FAILURE);
}
}
}
static void init_device(void)
{
struct v4l2_capability cap;
struct v4l2_cropcap cropcap;
struct v4l2_crop crop;
struct v4l2_format fmt;
unsigned int min;
if (-1 == xioctl(fd, VIDIOC_QUERYCAP, &cap)) {
if (EINVAL == errno) {
fprintf(stderr, "%s is no V4L2 device\n",
dev_name);
exit(EXIT_FAILURE);
} else {
errno_exit("VIDIOC_QUERYCAP");
}
}
if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) {
fprintf(stderr, "%s is no video capture device\n",
dev_name);
exit(EXIT_FAILURE);
}
switch (io) {
case IO_METHOD_READ:
if (!(cap.capabilities & V4L2_CAP_READWRITE)) {
fprintf(stderr, "%s does not support read i/o\n",
dev_name);
exit(EXIT_FAILURE);
}
break;
case IO_METHOD_MMAP:
case IO_METHOD_USERPTR:
if (!(cap.capabilities & V4L2_CAP_STREAMING)) {
fprintf(stderr, "%s does not support streaming i/o\n",
dev_name);
exit(EXIT_FAILURE);
}
break;
}
/* Select video input, video standard and tune here. */
CLEAR(cropcap);
cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (0 == xioctl(fd, VIDIOC_CROPCAP, &cropcap)) {
crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
crop.c = cropcap.defrect; /* reset to default */
if (-1 == xioctl(fd, VIDIOC_S_CROP, &crop)) {
switch (errno) {
case EINVAL:
/* Cropping not supported. */
break;
default:
/* Errors ignored. */
break;
}
}
} else {
/* Errors ignored. */
}
CLEAR(fmt);
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
fprintf(stderr, "Force Format %d\n", force_format);
if (force_format) {
if (force_format==2){
fmt.fmt.pix.width = 1920;
fmt.fmt.pix.height = 1080;
fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_H264;
fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
}
else if(force_format==1){
fmt.fmt.pix.width = 640;
fmt.fmt.pix.height = 480;
fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
}
if (-1 == xioctl(fd, VIDIOC_S_FMT, &fmt))
errno_exit("VIDIOC_S_FMT");
/* Note VIDIOC_S_FMT may change width and height. */
} else {
/* Preserve original settings as set by v4l2-ctl for example */
if (-1 == xioctl(fd, VIDIOC_G_FMT, &fmt))
errno_exit("VIDIOC_G_FMT");
}
/* Buggy driver paranoia. */
min = fmt.fmt.pix.width * 2;
if (fmt.fmt.pix.bytesperline < min)
fmt.fmt.pix.bytesperline = min;
min = fmt.fmt.pix.bytesperline * fmt.fmt.pix.height;
if (fmt.fmt.pix.sizeimage < min)
fmt.fmt.pix.sizeimage = min;
switch (io) {
case IO_METHOD_READ:
init_read(fmt.fmt.pix.sizeimage);
break;
case IO_METHOD_MMAP:
init_mmap();
break;
case IO_METHOD_USERPTR:
init_userp(fmt.fmt.pix.sizeimage);
break;
}
}
static void close_device(void)
{
if (-1 == close(fd))
errno_exit("close");
fd = -1;
}
static void open_device(void)
{
struct stat st;
if (-1 == stat(dev_name, &st)) {
fprintf(stderr, "Cannot identify '%s': %d, %s\n",
dev_name, errno, strerror(errno));
exit(EXIT_FAILURE);
}
if (!S_ISCHR(st.st_mode)) {
fprintf(stderr, "%s is no device\n", dev_name);
exit(EXIT_FAILURE);
}
fd = open(dev_name, O_RDWR /* required */ | O_NONBLOCK, 0);
if (-1 == fd) {
fprintf(stderr, "Cannot open '%s': %d, %s\n",
dev_name, errno, strerror(errno));
exit(EXIT_FAILURE);
}
}
static void usage(FILE *fp, int argc, char **argv)
{
fprintf(fp,
"Usage: %s [options]\n\n"
"Version 1.3\n"
"Options:\n"
"-d | --device name Video device name [%s]\n"
"-h | --help Print this message\n"
"-m | --mmap Use memory mapped buffers [default]\n"
"-r | --read Use read() calls\n"
"-u | --userp Use application allocated buffers\n"
"-o | --output Outputs stream to stdout\n"
"-f | --format Force format to 640x480 YUYV\n"
"-F | --formatH264 Force format to 1920x1080 H264\n"
"-c | --count Number of frames to grab [%i] - use 0 for infinite\n"
"\n"
"Example usage: capture -F -o -c 300 > output.raw\n"
"Captures 300 frames of H264 at 1920x1080 - use raw2mpg4 script to convert to mpg4\n",
argv[0], dev_name, frame_count);
}
static const char short_options[] = "d:hmruofFc:";
static const struct option
long_options[] = {
{ "device", required_argument, NULL, 'd' },
{ "help", no_argument, NULL, 'h' },
{ "mmap", no_argument, NULL, 'm' },
{ "read", no_argument, NULL, 'r' },
{ "userp", no_argument, NULL, 'u' },
{ "output", no_argument, NULL, 'o' },
{ "format", no_argument, NULL, 'f' },
{ "formatH264", no_argument, NULL, 'F' },
{ "count", required_argument, NULL, 'c' },
{ 0, 0, 0, 0 }
};
int main(int argc, char **argv)
{
dev_name = "/dev/video0";
for (;;) {
int idx;
int c;
c = getopt_long(argc, argv,
short_options, long_options, &idx);
if (-1 == c)
break;
switch (c) {
case 0: /* getopt_long() flag */
break;
case 'd':
dev_name = optarg;
break;
case 'h':
usage(stdout, argc, argv);
exit(EXIT_SUCCESS);
case 'm':
io = IO_METHOD_MMAP;
break;
case 'r':
io = IO_METHOD_READ;
break;
case 'u':
io = IO_METHOD_USERPTR;
break;
case 'o':
out_buf++;
break;
case 'f':
force_format=1;
break;
case 'F':
force_format=2;
break;
case 'c':
errno = 0;
frame_count = strtol(optarg, NULL, 0);
if (errno)
errno_exit(optarg);
break;
default:
usage(stderr, argc, argv);
exit(EXIT_FAILURE);
}
}
open_device();
init_device();
start_capturing();
mainloop();
stop_capturing();
uninit_device();
close_device();
fprintf(stderr, "\n");
return 0;
}
It's a modified version of a V4L2 video capture example.
Then I know that if I have outputed the streame to a file I would have to run this command to convert the raw format to mp4 format:
ffmpeg -f h264 -i output.raw -vcodec copy output.mp4
And the v4l2loopback program I'm using is foud here:
https://github.com/umlaeute/v4l2loopback
------------------Update------------------
Okay. So I got the pipe from the capture program to ffmpeg working. It captures, decodes the h264 and I can write it to a mp4 file with this command:
./capture -F -d /dev/video0 -o | ffmpeg -f h264 -i - -vcodec copy out.mp4
Now I am trying to get the last pipe working with this command:
./capture -F -d /dev/video0 -o | ffmpeg -f h264 -i - -vcodec copy -f mp4 - | gst-launch-0.10 -v fdsrc ! v4l2sink device=/dev/video3
I get these errors:
muxer does not support non seekable output
Could not write header for output file #0 (incorrect codec parameters ?): Invalid argument
Any ideas?
In your last command you are piping an MP4 to GStreamer. See the -f mp4 - part:
./capture -F -d /dev/video0 -o | ffmpeg -f h264 -i - -vcodec copy -f mp4 - | gst-launch-0.10 -v fdsrc ! v4l2sink device=/dev/video3
What you want to do is pipe the H.264 stream inside the MP4 instead.
Try replacing -f mp4 - with -f h264 -.
In fact you could probably skip entirely the creation of an MP4 and just do:
./capture -F -d /dev/video0 -o | gst-launch-0.10 -v fdsrc ! v4l2sink device=/dev/video3
since the -F option forces H.264.

Error in a simple gstreamer code to play an "mp3" audio file

#include <gst/gst.h>
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char *argv[])
{
GstElement *element;
GstElement *element1;
GstElement *element2;
GstElement *pipeline;
gst_init(&argc, &argv);
if(argc != 2)
{
g_print("usage: %s argv[1] \n", argv[0]);
exit(EXIT_FAILURE);
}
/* Creating a new pipeline */
pipeline = gst_pipeline_new("pipeline");
/* creating a new *.mp3 source element */
element = gst_element_factory_make("filesrc", "source");
if(element != NULL)
g_object_set(G_OBJECT(element), "location", argv[1], NULL);
else
{
g_print("Failed to create element \n");
exit(EXIT_FAILURE);
}
/* creating a new *.mp3 de-coder element */
element1 = gst_element_factory_make("decodebin2", "decoder");
if(element1 != NULL)
g_print("element1 success \n");
else
{
g_print("Failed to create element1 \n");
exit(EXIT_FAILURE);
}
/* creating a new *.mp3 sink element */
element2 = gst_element_factory_make("autoaudiosink", "play_audio");
if(element2 != NULL)
g_print("element2 success \n");
else
{
g_print("Failed to create element2 \n");
exit(EXIT_FAILURE);
}
/* Adding elements to pipeline */
gst_bin_add_many(GST_BIN(pipeline), element, element1, element2, NULL);
/* Linking src to sink element */
gst_element_link_many(element, element1, element2, NULL);
/* start playing */
gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_PLAYING);
while(gst_bin_iterate_recurse(GST_BIN(pipeline)));
/* stop playing */
gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_NULL);
/* un-referencing all the elements in the pipeline */
gst_object_unref(GST_OBJECT(pipeline));
return 0;
}
1) Compilation step:- gcc gst6.c -o gst6 pk-config --cflags --libs gstreamer-0.10,
it is compiling without any warnings and errors.
2) Exported PKG_CONFIG_PATH env variable i.e. export PKG_CONFIG_PATH=/usr/lin/pkgconfig.
3) Execution step: ./gst6 /home/user/Downloads/*.mp3
4) Output: element1 success,
element2 success
killed.
Unable to play the audio file. Please let me know whats wrong in my program. This is my first program.
This line:
while(gst_bin_iterate_recurse(GST_BIN(pipeline)));
does not make any sense. It requests an iterator in a loop, but does not use it, neither frees it.
If you want to wait until playing is finished, you probably need to use the GstBus functions gst_bus_have_pending and gst_bus_pop to handle messages.

Resources