Gstreamer apply audio delay to live stream - audio

I would like to add an audio delay (up to 10/15 seconds) in a live stream with gstreamer rtmpsink
this is my line
gst-launch-1.0 -vvv flvmux streamable=true name=mux ! rtmpsink location="rtmp://localhost/live" \
souphttpsrc location="http://<url video h264>" ! tsdemux ! h264parse ! queue ! mux. \
souphttpsrc location="https://<url audio aac>" ! icydemux ! aacparse ! queue ! mux.
acting directly on the line I tried to add "queue max-size-buffers=0 max-size-time=0 max-size-bytes=0 min-threshold-time=15000000000" after accparse but the entire stream is blocked in this way
acting in c i tried to modify the pts timestamp on the aacparse pad's buffer, but i can change with any value and there is no effect
gst_pad_add_probe(line->aacparse_srcpad, GST_PAD_PROBE_TYPE_BUFFER, cb_have_data_audio, NULL, NULL);
..
static GstPadProbeReturn
cb_have_data_audio (GstPad *pad, GstPadProbeInfo *info, gpointer user_data)
{
GstMapInfo map;
GstBuffer *buffer;
buffer = GST_PAD_PROBE_INFO_BUFFER (info);
buffer = gst_buffer_make_writable (buffer);
if (buffer == NULL)
return GST_PAD_PROBE_OK;
GstClockTime pts = GST_BUFFER_PTS(buffer);
GST_BUFFER_PTS(buffer) = pts + 100000000000;
GST_PAD_PROBE_INFO_DATA (info) = buffer;
return GST_PAD_PROBE_OK;
}
I tried also to use gst_pad_set_offset(), but again no effect
gst_pad_set_offset(line->aacparse_srcpad, 1000000000);
even playing with the flvmux pads and setting "streamable=false" there is no effect, what should be the right approach to add delay only to the audio?

#include <gst/gst.h>
#include<stdio.h>
static GMainLoop *loop;
static gint counter;
static GstBus *bus;
static gboolean prerolled = FALSE;
static GstPad *sinkpad,*ident_sink;
static GstClockTime ptimestamp=(guint64)1000;
static GstClockTime dtimestamp= 0;
static GstPadProbeReturn
display_data (GstPad *pad,
GstBuffer *apsInfo,
gpointer user_data)
{
// apsInfo = gst_buffer_ref(apsInfo);
// apsInfo = gst_buffer_make_writable(apsInfo);
// int fps = 30;
// ptimestamp += gst_util_uint64_scale_int (1, GST_SECOND, fps);
// int a=GST_BUFFER_PTS (apsInfo) ;
// int b=GST_BUFFER_DTS (apsInfo) ;
// GST_BUFFER_PTS (apsInfo)=ptimestamp;
// GST_BUFFER_DTS (apsInfo)=ptimestamp;;
// //printf("%d %d \n",a,b);
// GST_BUFFER_DURATION (apsInfo) = gst_util_uint64_scale_int (1, GST_SECOND, fps);
}
static GstPadProbeReturn
change_time (GstPad *pad,
GstBuffer *apsInfo,
gpointer user_data)
{
//printf("change ing time");
//apsInfo = gst_buffer_ref(apsInfo);
//apsInfo = gst_buffer_make_writable(apsInfo);
/* when these commented lines are added the buffer status returns gst_buffer_is_writable
writern wrtable but any changes in code does not effect the video but when these lines are
removed the buffer status writern not writable but the changes in code effect the video
*/
GST_BUFFER_FLAG_SET (apsInfo, GST_BUFFER_FLAG_DISCONT);
int fps = 30;
dtimestamp += gst_util_uint64_scale_int (1, GST_SECOND, fps);
ptimestamp += gst_util_uint64_scale_int (1, GST_SECOND, fps);
int a=GST_BUFFER_PTS (apsInfo) = ptimestamp;
int b=GST_BUFFER_DTS (apsInfo) = dtimestamp;
printf("%d %d \n",a,b);
GST_BUFFER_DURATION (apsInfo) = gst_util_uint64_scale_int (1, GST_SECOND, fps);
}
static void
dec_counter (GstElement * pipeline)
{
if (prerolled)
return;
if (g_atomic_int_dec_and_test (&counter)) {
/* all probes blocked and no-more-pads signaled, post
* message on the bus. */
prerolled = TRUE;
gst_bus_post (bus, gst_message_new_application (
GST_OBJECT_CAST (pipeline),
gst_structure_new_empty ("ExPrerolled")));
}
}
/* called when a source pad of uridecodebin is blocked */
static GstPadProbeReturn
cb_blocked (GstPad *pad,
GstPadProbeInfo *info,
gpointer user_data)
{
GstElement *pipeline = GST_ELEMENT (user_data);
if (prerolled)
return GST_PAD_PROBE_REMOVE;
dec_counter (pipeline);
return GST_PAD_PROBE_OK;
}
/* called when uridecodebin has a new pad */
static void
cb_pad_added (GstElement *element,
GstPad *pad,
gpointer user_data)
{
GstElement *pipeline = GST_ELEMENT (user_data);
if (prerolled)
return;
g_atomic_int_inc (&counter);
gst_pad_add_probe (pad, GST_PAD_PROBE_TYPE_BLOCK_DOWNSTREAM,
(GstPadProbeCallback) cb_blocked, pipeline, NULL);
/* try to link to the video pad */
gst_pad_link (pad, sinkpad);
}
/* called when uridecodebin has created all pads */
static void
cb_no_more_pads (GstElement *element,
gpointer user_data)
{
GstElement *pipeline = GST_ELEMENT (user_data);
if (prerolled)
return;
dec_counter (pipeline);
}
/* called when a new message is posted on the bus */
static void
cb_message (GstBus *bus,
GstMessage *message,
gpointer user_data)
{
GstElement *pipeline = GST_ELEMENT (user_data);
switch (GST_MESSAGE_TYPE (message)) {
case GST_MESSAGE_ERROR:
g_print ("we received an error!\n");
g_main_loop_quit (loop);
break;
case GST_MESSAGE_EOS:
g_print ("we reached EOS\n");
g_main_loop_quit (loop);
break;
case GST_MESSAGE_APPLICATION:
{
if (gst_message_has_name (message, "ExPrerolled")) {
/* it's our message */
g_print ("we are all prerolled, do seek\n");
gst_element_seek (pipeline, 1.0, GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH,
GST_SEEK_TYPE_SET, 0,
GST_SEEK_TYPE_NONE, GST_CLOCK_TIME_NONE);
gst_element_set_state (pipeline, GST_STATE_PLAYING);
}
break;
}
default:
break;
}
}
gint
main (gint argc,
gchar *argv[])
{
GstElement *pipeline, *src, *csp, *vs, *sink ,*idelem;
/* init GStreamer */
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);
/* build */
pipeline = gst_pipeline_new ("my-pipeline");
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
gst_bus_add_signal_watch (bus);
g_signal_connect (bus, "message", (GCallback) cb_message,
pipeline);
src = gst_element_factory_make ("uridecodebin", "src");
if (src == NULL)
g_error ("Could not create 'uridecodebin' element");
g_object_set (src, "uri", "http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/BigBuckBunny.mp4", NULL);
csp = gst_element_factory_make ("videoconvert", "csp");
if (csp == NULL)
g_error ("Could not create 'videoconvert' element");
vs = gst_element_factory_make ("videoscale", "vs");
if (csp == NULL)
g_error ("Could not create 'videoscale' element");
idelem = gst_element_factory_make ("identity", "identity-elem");
if (idelem == NULL)
g_error ("Could not create 'idelem' ");
sink = gst_element_factory_make ("autovideosink", "sink");
if (sink == NULL)
g_error ("Could not create 'autovideosink' element");
gst_bin_add_many (GST_BIN (pipeline), src, csp, vs,idelem, sink, NULL);
/* can't link src yet, it has no pads */
gst_element_link_many (csp, vs,idelem, sink, NULL);
sinkpad = gst_element_get_static_pad (csp, "sink");
//ident_sink = gst_element_get_static_pad (idelem, "sink");
// //
// gst_pad_add_probe (sinkpad, GST_PAD_PROBE_TYPE_BUFFER,
// (GstPadProbeCallback) display_data, NULL, NULL); //displayinfo data
// gst_object_unref (sinkpad);
GstElement *identity_elem = gst_bin_get_by_name(GST_BIN(pipeline), "identity-elem");
g_object_set(G_OBJECT(identity_elem), "signal-handoffs", TRUE, NULL); //change time
g_signal_connect(idelem, "handoff", (GCallback) change_time, pipeline);
g_atomic_int_set (&counter, 1);
g_signal_connect (src, "pad-added",
(GCallback) cb_pad_added, pipeline);
g_signal_connect (src, "no-more-pads",
(GCallback) cb_no_more_pads, pipeline);
gst_element_set_state (pipeline, GST_STATE_PAUSED);
g_main_loop_run (loop);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (sinkpad);
gst_object_unref (bus);
gst_object_unref (pipeline);
g_main_loop_unref (loop);
return 0;
}

Related

How to convet gstreamer command code to C code?

The command is:
gst-launch-1.0 filesrc location=/home/pi/Videos/watch.mp4 ! qtdemux name=demux \
demux.audio_0 ! queue ! decodebin ! audioconvert ! audioresample ! autoaudiosink \
demux.video_0 ! queue ! decodebin ! videoconvert ! videoscale ! video/x-raw,width=800,height=480 ! avenc_bmp ! fakesink
It's a little difficult for me to link them together because both 'qtdemux' and 'decodebin' are used in this example.
I have tried with 'tee', but it's obviously slow.
Could anyone give me some help?Thank you.
#include <stdio.h>
#include <gstreamer-1.0/gst/gst.h>
#include <stdbool.h>
typedef struct {
GstElement *pipeline;
GstElement *filesrc;
GstElement *qtdemux;
/* Video */
struct{
GstElement *queue;
GstElement *decode;
GstElement *convert;
GstElement *scale;
GstElement *capsfilter;
GstElement *enc_bmp;
GstElement *fakesink;
}video;
/* Audio */
struct{
GstElement *queue;
GstElement *decode;
GstElement *convert;
GstElement *resample;
GstElement *sink;
}audio;
} gstreamer_t;
static void pad_added_handler (GstElement *src, GstPad *pad, gstreamer_t* data)
{
GstCaps *caps;
GstStructure *pad_sct;
const gchar *name;
caps = gst_pad_get_current_caps(pad);
pad_sct = gst_caps_get_structure( caps, 0 );
name = gst_structure_get_name(pad_sct);
printf( "src name = %s\r\n", gst_element_get_name(src) );
printf( "pad name = %s\r\n", name);
printf( "Received new pad '%s' from '%s'.\r\n", GST_PAD_NAME (pad), GST_ELEMENT_NAME (src));
GstPad *sinkpad = NULL;
if(g_str_has_prefix (name, "video/x-h264")) {
sinkpad = gst_element_get_static_pad(data->video.queue, "sink");
if(!gst_pad_link(pad, sinkpad) != GST_PAD_LINK_OK )
printf("not link !!\n\n");
gst_object_unref (sinkpad);
}else if(g_str_has_prefix( name, "audio/mpeg")){
sinkpad = gst_element_get_static_pad ( data->audio.queue, "sink");
if(gst_pad_link( pad, sinkpad) != GST_PAD_LINK_OK )
printf("not link !!\n\n");
gst_object_unref (sinkpad);
}
else
printf("Another Pad: %s.\r\n", name);
gst_caps_unref (caps);
}
int main(int argc, char *argv[]) {
gstreamer_t gstreamer;
GstBus *bus;
GstMessage *msg;
GstStateChangeReturn ret;
gboolean terminate = FALSE;
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Create the elements */
gstreamer.filesrc = gst_element_factory_make ("filesrc", "filesrc");
g_object_set (gstreamer.filesrc, "location", "../../Videos/watch.mp4", NULL);
gstreamer.qtdemux = gst_element_factory_make ("qtdemux", "qtdemux");
/* Video */
gstreamer.video.queue = gst_element_factory_make("queue", "video");
gstreamer.video.decode = gst_element_factory_make ("decodebin", "decodebin");
gstreamer.video.convert = gst_element_factory_make ("videoconvert", "videoconvert");
gstreamer.video.scale = gst_element_factory_make ("videoscale", "videoscale");
gstreamer.video.capsfilter = gst_element_factory_make ("capsfilter", "capsfilter");
GstCaps *Caps = gst_caps_from_string("video/x-raw,width=800,height=480");
g_object_set(G_OBJECT(gstreamer.video.capsfilter), "caps", Caps, NULL);
gst_caps_unref(Caps);
gstreamer.video.enc_bmp = gst_element_factory_make ("avenc_bmp", "avenc_bmp");
gstreamer.video.fakesink = gst_element_factory_make ("fakesink", "fakesink");
g_object_set (gstreamer.video.fakesink, "sync", true, NULL);
/* Audio */
gstreamer.audio.queue = gst_element_factory_make("queue", "queue_audio");
gstreamer.audio.decode = gst_element_factory_make("decodebin", "decodebin");
gstreamer.audio.convert = gst_element_factory_make("audioconvert", "audioconvert");
gstreamer.audio.resample = gst_element_factory_make("audioresample", "audioresample");
gstreamer.audio.sink = gst_element_factory_make("autoaudiosink", "autoaudiosink");
/* Create the empty pipeline */
gstreamer.pipeline = gst_pipeline_new ("gstreamer-pipeline");
if (!gstreamer.pipeline || !gstreamer.filesrc || !gstreamer.qtdemux ||
!gstreamer.video.queue || !gstreamer.video.decode || !gstreamer.video.convert || !gstreamer.video.scale || !gstreamer.video.capsfilter || !gstreamer.video.enc_bmp || !gstreamer.video.fakesink||
!gstreamer.audio.queue || !gstreamer.audio.decode || !gstreamer.audio.convert || !gstreamer.audio.resample ||!gstreamer.audio.sink)
{
g_printerr ("Not all elements could be created.\n");
return -1;
}
/* Build the pipeline. Note that we are NOT linking the source at this
* point. We will do it later. */
gst_bin_add_many (GST_BIN (gstreamer.pipeline), gstreamer.filesrc, gstreamer.qtdemux,
gstreamer.video.queue, gstreamer.video.decode, gstreamer.video.convert, gstreamer.video.scale, gstreamer.video.capsfilter, gstreamer.video.enc_bmp, gstreamer.video.fakesink,
gstreamer.audio.queue, gstreamer.audio.decode, gstreamer.audio.convert, gstreamer.audio.resample, gstreamer.audio.sink, NULL);
if (!gst_element_link (gstreamer.filesrc, gstreamer.qtdemux)) {
g_printerr ("Elements filesrc and qtdemux could not be linked.\n");
gst_object_unref (gstreamer.pipeline);
return -1;
}
if (!gst_element_link_many (gstreamer.video.convert, gstreamer.video.scale, gstreamer.video.capsfilter, gstreamer.video.enc_bmp, gstreamer.video.fakesink, NULL)) {
g_printerr ("Video elements could not be linked.\n");
gst_object_unref (gstreamer.pipeline);
return -1;
}
if (!gst_element_link_many (gstreamer.audio.convert, gstreamer.audio.resample, gstreamer.audio.sink, NULL)) {
g_printerr ("Audio elements could not be linked.\n");
gst_object_unref (gstreamer.pipeline);
return -1;
}
g_signal_connect (gstreamer.qtdemux, "pad-added", G_CALLBACK (pad_added_handler), &gstreamer);
/* Start playing */
ret = gst_element_set_state (gstreamer.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (gstreamer.pipeline);
return -1;
}
/* Listen to the bus */
bus = gst_element_get_bus (gstreamer.pipeline);
do {
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE,
GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
terminate = TRUE;
break;
case GST_MESSAGE_EOS:
g_print ("\nEnd-Of-Stream reached.\n");
terminate = TRUE;
break;
case GST_MESSAGE_STATE_CHANGED:
/* We are only interested in state-changed messages from the pipeline */
if (GST_MESSAGE_SRC (msg) == GST_OBJECT (gstreamer.pipeline)) {
GstState old_state, new_state, pending_state;
gst_message_parse_state_changed (msg, &old_state, &new_state, &pending_state);
g_print ("Pipeline state changed from %s to %s:\n",
gst_element_state_get_name (old_state), gst_element_state_get_name (new_state));
}
break;
break;
default:
/* We should not reach here */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
} while (!terminate);
/* Free resources */
gst_object_unref (bus);
gst_element_set_state (gstreamer.pipeline, GST_STATE_NULL);
gst_object_unref (gstreamer.pipeline);
return 0;
}
The output is:
(stream:5337): GStreamer-WARNING **: 02:28:38.279: Name 'decodebin' is not unique in bin 'gstreamer-pipeline', not adding
Pipeline state changed from NULL to READY:
src name = qtdemux
pad name = video/x-h264
Received new pad 'video_0' from 'qtdemux'.
not link !!
src name = qtdemux
pad name = audio/mpeg
Received new pad 'audio_0' from 'qtdemux'.
Error received from element qtdemux: Internal data stream error.
Debugging information: ../gst/isomp4/qtdemux.c(6545): gst_qtdemux_loop (): /GstPipeline:gstreamer-pipeline/GstQTDemux:qtdemux:
streaming stopped, reason not-linked (-1)
I dont't know how to link the video and audio.
The problem you're facing is with respect to decodebin.
One single pipeline cannot contain two elements that have the same name.
In your case,
gstreamer.video.decode = gst_element_factory_make ("decodebin", "decodebin");
and
gstreamer.audio.decode = gst_element_factory_make("decodebin", "decodebin");
Have the same names set to decodebin
Thats why one of the decodebins are not even being added to the pipeline.
You can change it to something else. For example
gstreamer.audio.decode = gst_element_factory_make("decodebin", "decodebin-audio");
and
gstreamer.video.decode = gst_element_factory_make("decodebin", "decodebin-video");
This rule applies to any parent element that is directly added to your pipeline.

bluez pairing before exchange

I'm using code from An Introduction to Bluetooth Programming ► Chapter 4. Bluetooth programming in C with BlueZ4.2. ► RFCOMM sockets to send messages between two Raspberry Pi.
However, if I don't make the pairing between two devices through the bluetoothctl, I can't use the client because it gives me error:
uh oh: Invalid exchange.
Can you give me some hints on how can I make the pair through the C code? I need to use this "automatically" without need to pairing through the bluetoothctl before the C code.
Before getting into my answer, I am not sure how to achieve this using "libbluetooth" API's. But my below answer is based on DBUS API using GDBUS. This should most likely work with any recent bluez (with bluetoothd) running.
Note, with Bluez5 it's recommended to use DBUS API's.
To brief, you need to develop an Agent which accepts the Pairing request automatically, assuming "Confirmation" agent here. Please refer agent capabilities here.
With recent bluez version (atleast 5.47+), we have a new API "ConnectDevice" which can be used to connect device without scanning/discovery. From your question, I understand that you are trying to communicate between two RPi's, so you can find the BT address for both the bluetooth controllers. With BT address in places,
/*
* bluez_adapter_connect.c - Connect with device without StartDiscovery
* - This example registers an agen with NoInputOutput capability for the purpose of
* auto pairing
* - Use ConnectDevice method to connect with device using provided MAC address
* - Usual signal subscription to get the details of the connected device
* - Introduced new signal handler to exit the program gracefully
*
* Note: As "ConnectDevice" is new API and in experimental state (but mostly stable)
* one need to use "-E" option when starting "bluetoothd". Systems running systemd can
* edit /lib/systemd/system/bluetooth.service in ExecStart option
*
* When this API is useful?
* - When you already have the MAC address of end bluetooth Device to connect with, then
* you don't need to scan for the device (with or without filter) and connect it.
* - StartDiscovery + Pair + Connect => ConnectDevice
*
* How you will have MAC address before scanning?
* - When you have other communication (wired or wireless) medium to exchange the MAC address
* - For example, NFC OOB can be used to exchange the MAC address
* - Testing Bluetooth with same device (MAC address known)
*
* - Here Agent capability is registered as "NoInputOutput" for experimental purpose only, in
* real world scenario, Pair + Connect involves real Agents.
* - Also note, bluez_agent_call_method and bluez_adapter_call_method are two different methods doing
* the same work with difference in interface name and object path. This exist just to make the
* understanding more clear.
*
* gcc `pkg-config --cflags glib-2.0 gio-2.0` -Wall -Wextra -o ./bin/bluez_adapter_connect ./bluez_adapter_connect.c `pkg-config --libs glib-2.0 gio-2.0`
*/
#include <glib.h>
#include <gio/gio.h>
#include <signal.h>
GMainLoop *loop;
GDBusConnection *con;
static void bluez_property_value(const gchar *key, GVariant *value)
{
const gchar *type = g_variant_get_type_string(value);
g_print("\t%s : ", key);
switch(*type) {
case 'o':
case 's':
g_print("%s\n", g_variant_get_string(value, NULL));
break;
case 'b':
g_print("%d\n", g_variant_get_boolean(value));
break;
case 'u':
g_print("%d\n", g_variant_get_uint32(value));
break;
case 'a':
/* TODO Handling only 'as', but not array of dicts */
if(g_strcmp0(type, "as"))
break;
g_print("\n");
const gchar *uuid;
GVariantIter i;
g_variant_iter_init(&i, value);
while(g_variant_iter_next(&i, "s", &uuid))
g_print("\t\t%s\n", uuid);
break;
default:
g_print("Other\n");
break;
}
}
typedef void (*method_cb_t)(GObject *, GAsyncResult *, gpointer);
static int bluez_adapter_call_method(const char *method, GVariant *param, method_cb_t method_cb)
{
g_dbus_connection_call(con,
"org.bluez",
/* TODO Find the adapter path runtime */
"/org/bluez/hci0",
"org.bluez.Adapter1",
method,
param,
NULL,
G_DBUS_CALL_FLAGS_NONE,
-1,
NULL,
method_cb,
(void *)method);
return 0;
}
static void bluez_result_async_cb(GObject *con,
GAsyncResult *res,
gpointer data)
{
const gchar *key = (gchar *)data;
GVariant *result = NULL;
GError *error = NULL;
result = g_dbus_connection_call_finish((GDBusConnection *)con, res, &error);
if(error != NULL) {
g_print("Unable to get result: %s\n", error->message);
return;
}
if(result) {
result = g_variant_get_child_value(result, 0);
bluez_property_value(key, result);
}
g_variant_unref(result);
}
static void bluez_device_appeared(GDBusConnection *sig,
const gchar *sender_name,
const gchar *object_path,
const gchar *interface,
const gchar *signal_name,
GVariant *parameters,
gpointer user_data)
{
(void)sig;
(void)sender_name;
(void)object_path;
(void)interface;
(void)signal_name;
(void)user_data;
GVariantIter *interfaces;
const char *object;
const gchar *interface_name;
GVariant *properties;
g_variant_get(parameters, "(&oa{sa{sv}})", &object, &interfaces);
while(g_variant_iter_next(interfaces, "{&s#a{sv}}", &interface_name, &properties)) {
if(g_strstr_len(g_ascii_strdown(interface_name, -1), -1, "device")) {
g_print("[ %s ]\n", object);
const gchar *property_name;
GVariantIter i;
GVariant *prop_val;
g_variant_iter_init(&i, properties);
while(g_variant_iter_next(&i, "{&sv}", &property_name, &prop_val))
bluez_property_value(property_name, prop_val);
g_variant_unref(prop_val);
}
g_variant_unref(properties);
}
return;
}
#define BT_ADDRESS_STRING_SIZE 18
static void bluez_device_disappeared(GDBusConnection *sig,
const gchar *sender_name,
const gchar *object_path,
const gchar *interface,
const gchar *signal_name,
GVariant *parameters,
gpointer user_data)
{
(void)sig;
(void)sender_name;
(void)object_path;
(void)interface;
(void)signal_name;
GVariantIter *interfaces;
const char *object;
const gchar *interface_name;
char address[BT_ADDRESS_STRING_SIZE] = {'\0'};
g_variant_get(parameters, "(&oas)", &object, &interfaces);
while(g_variant_iter_next(interfaces, "s", &interface_name)) {
if(g_strstr_len(g_ascii_strdown(interface_name, -1), -1, "device")) {
int i;
char *tmp = g_strstr_len(object, -1, "dev_") + 4;
for(i = 0; *tmp != '\0'; i++, tmp++) {
if(*tmp == '_') {
address[i] = ':';
continue;
}
address[i] = *tmp;
}
g_print("\nDevice %s removed\n", address);
g_main_loop_quit((GMainLoop *)user_data);
}
}
return;
}
static void bluez_signal_adapter_changed(GDBusConnection *conn,
const gchar *sender,
const gchar *path,
const gchar *interface,
const gchar *signal,
GVariant *params,
void *userdata)
{
(void)conn;
(void)sender;
(void)path;
(void)interface;
(void)userdata;
GVariantIter *properties = NULL;
GVariantIter *unknown = NULL;
const char *iface;
const char *key;
GVariant *value = NULL;
const gchar *signature = g_variant_get_type_string(params);
if(strcmp(signature, "(sa{sv}as)") != 0) {
g_print("Invalid signature for %s: %s != %s", signal, signature, "(sa{sv}as)");
goto done;
}
g_variant_get(params, "(&sa{sv}as)", &iface, &properties, &unknown);
while(g_variant_iter_next(properties, "{&sv}", &key, &value)) {
if(!g_strcmp0(key, "Powered")) {
if(!g_variant_is_of_type(value, G_VARIANT_TYPE_BOOLEAN)) {
g_print("Invalid argument type for %s: %s != %s", key,
g_variant_get_type_string(value), "b");
goto done;
}
g_print("Adapter is Powered \"%s\"\n", g_variant_get_boolean(value) ? "on" : "off");
}
if(!g_strcmp0(key, "Discovering")) {
if(!g_variant_is_of_type(value, G_VARIANT_TYPE_BOOLEAN)) {
g_print("Invalid argument type for %s: %s != %s", key,
g_variant_get_type_string(value), "b");
goto done;
}
g_print("Adapter scan \"%s\"\n", g_variant_get_boolean(value) ? "on" : "off");
}
}
done:
if(properties != NULL)
g_variant_iter_free(properties);
if(value != NULL)
g_variant_unref(value);
}
static int bluez_adapter_set_property(const char *prop, GVariant *value)
{
GVariant *result;
GError *error = NULL;
result = g_dbus_connection_call_sync(con,
"org.bluez",
"/org/bluez/hci0",
"org.freedesktop.DBus.Properties",
"Set",
g_variant_new("(ssv)", "org.bluez.Adapter1", prop, value),
NULL,
G_DBUS_CALL_FLAGS_NONE,
-1,
NULL,
&error);
if(error != NULL)
return 1;
g_variant_unref(result);
return 0;
}
static int bluez_adapter_connect_device(char **argv)
{
int rc;
GVariantBuilder *b = g_variant_builder_new(G_VARIANT_TYPE_VARDICT);
g_variant_builder_add(b, "{sv}", "Address", g_variant_new_string(argv[1]));
GVariant *device_dict = g_variant_builder_end(b);
g_variant_builder_unref(b);
rc = bluez_adapter_call_method("ConnectDevice",
g_variant_new_tuple(&device_dict, 1),
bluez_result_async_cb);
if(rc) {
g_print("Not able to call ConnectDevice\n");
return 1;
}
return 0;
}
#define AGENT_PATH "/org/bluez/AutoPinAgent"
static int bluez_agent_call_method(const gchar *method, GVariant *param)
{
GVariant *result;
GError *error = NULL;
result = g_dbus_connection_call_sync(con,
"org.bluez",
"/org/bluez",
"org.bluez.AgentManager1",
method,
param,
NULL,
G_DBUS_CALL_FLAGS_NONE,
-1,
NULL,
&error);
if(error != NULL) {
g_print("Register %s: %s\n", AGENT_PATH, error->message);
return 1;
}
g_variant_unref(result);
return 0;
}
static int bluez_register_autopair_agent(void)
{
int rc;
rc = bluez_agent_call_method("RegisterAgent", g_variant_new("(os)", AGENT_PATH, "NoInputNoOutput"));
if(rc)
return 1;
rc = bluez_agent_call_method("RequestDefaultAgent", g_variant_new("(o)", AGENT_PATH));
if(rc) {
bluez_agent_call_method("UnregisterAgent", g_variant_new("(o)", AGENT_PATH));
return 1;
}
return 0;
}
static void cleanup_handler(int signo)
{
if (signo == SIGINT) {
g_print("received SIGINT\n");
g_main_loop_quit(loop);
}
}
int main(int argc, char **argv)
{
int rc;
guint prop_changed;
guint iface_added;
guint iface_removed;
if(signal(SIGINT, cleanup_handler) == SIG_ERR)
g_print("can't catch SIGINT\n");
con = g_bus_get_sync(G_BUS_TYPE_SYSTEM, NULL, NULL);
if(con == NULL) {
g_print("Not able to get connection to system bus\n");
return 1;
}
loop = g_main_loop_new(NULL, FALSE);
prop_changed = g_dbus_connection_signal_subscribe(con,
"org.bluez",
"org.freedesktop.DBus.Properties",
"PropertiesChanged",
NULL,
"org.bluez.Adapter1",
G_DBUS_SIGNAL_FLAGS_NONE,
bluez_signal_adapter_changed,
NULL,
NULL);
iface_added = g_dbus_connection_signal_subscribe(con,
"org.bluez",
"org.freedesktop.DBus.ObjectManager",
"InterfacesAdded",
NULL,
NULL,
G_DBUS_SIGNAL_FLAGS_NONE,
bluez_device_appeared,
loop,
NULL);
iface_removed = g_dbus_connection_signal_subscribe(con,
"org.bluez",
"org.freedesktop.DBus.ObjectManager",
"InterfacesRemoved",
NULL,
NULL,
G_DBUS_SIGNAL_FLAGS_NONE,
bluez_device_disappeared,
loop,
NULL);
rc = bluez_adapter_set_property("Powered", g_variant_new("b", TRUE));
if(rc) {
g_print("Not able to enable the adapter\n");
goto fail;
}
rc = bluez_register_autopair_agent();
if(rc) {
g_print("Not able to register default autopair agent\n");
goto fail;
}
if(argc == 2) {
rc = bluez_adapter_connect_device(argv);
if(rc)
goto fail;
}
g_main_loop_run(loop);
rc = bluez_adapter_set_property("Powered", g_variant_new("b", FALSE));
if(rc)
g_print("Not able to disable the adapter\n");
fail:
g_dbus_connection_signal_unsubscribe(con, prop_changed);
g_dbus_connection_signal_unsubscribe(con, iface_added);
g_dbus_connection_signal_unsubscribe(con, iface_removed);
g_object_unref(con);
return 0;
}
you should be able to use the above program to connect the device. Here in this example, the agent is registered as "NoInputOutput" capability, something like bluetooth headphones, so that no pairing response is required.
But you should modify this example to client side (assuming this example going to run in RPi 1 as server, modify this to accept request in client side RPi 2).
You can find the detailed explanation about this example here and also some relevant GDBUS based examples here.

How to make enable hls support on gstreamer-plugins-bad building from source ? (Compiling to arm with poky)

I'm to trying to make gstreamer-plugins-bad with HLS support to work in a ARM Device. The device has already gstreamer support and I was able to compile gstreamer-plugins-bad, but couldn't make HLS Demux to work. Every time it says it's missing a plugin.
Here is the code I'm trying to make to work:
#include <string.h>
#include <gtk/gtk.h>
#include <gst/gst.h>
#include <gst/video/videooverlay.h>
#include <gdk/gdk.h>
//#if defined (GDK_WINDOWING_X11)
///#include <gdk/gdkx.h>
///#elif defined (GDK_WINDOWING_WIN32)
//#include <gdk/gdkwin32.h>
//#elif defined (GDK_WINDOWING_QUARTZ)
//#include <gdk/gdkquartz.h>
//#endif
/* Structure to contain all our information, so we can pass it around */
typedef struct _CustomData {
GstElement *playbin; /* Our one and only pipeline */
GtkWidget *slider; /* Slider widget to keep track of current position */
GtkWidget *streams_list; /* Text widget to display info about the streams */
gulong slider_update_signal_id; /* Signal ID for the slider update signal */
GstState state; /* Current state of the pipeline */
gint64 duration; /* Duration of the clip, in nanoseconds */
} CustomData;
/* This function is called when the GUI toolkit creates the physical window that will hold the video.
* At this point we can retrieve its handler (which has a different meaning depending on the windowing system)
* and pass it to GStreamer through the VideoOverlay interface. */
static void realize_cb (GtkWidget *widget, CustomData *data) {
GdkWindow *window = gtk_widget_get_window (widget);
guintptr window_handle;
if (!gdk_window_ensure_native (window))
g_error ("Couldn't create native window needed for GstVideoOverlay!");
/* Retrieve window handler from GDK */
//#if defined (GDK_WINDOWING_WIN32)
// window_handle = (guintptr)GDK_WINDOW_HWND (window);
//#elif defined (GDK_WINDOWING_QUARTZ)
// window_handle = gdk_quartz_window_get_nsview (window);
//#elif defined (GDK_WINDOWING_X11)
//window_handle = GDK_WINDOW_XID (window);
window_handle = gdk_x11_drawable_get_xid(window);
//#endif
/* Pass it to playbin, which implements VideoOverlay and will forward it to the video sink */
gst_video_overlay_set_window_handle (GST_VIDEO_OVERLAY (data->playbin), window_handle);
}
/* This function is called when the PLAY button is clicked */
static void play_cb (GtkButton *button, CustomData *data) {
gst_element_set_state (data->playbin, GST_STATE_PLAYING);
}
/* This function is called when the PAUSE button is clicked */
static void pause_cb (GtkButton *button, CustomData *data) {
gst_element_set_state (data->playbin, GST_STATE_PAUSED);
}
/* This function is called when the STOP button is clicked */
static void stop_cb (GtkButton *button, CustomData *data) {
gst_element_set_state (data->playbin, GST_STATE_READY);
}
/* This function is called when the main window is closed */
static void delete_event_cb (GtkWidget *widget, GdkEvent *event, CustomData *data) {
stop_cb (NULL, data);
gtk_main_quit ();
}
/* This function is called everytime the video window needs to be redrawn (due to damage/exposure,
* rescaling, etc). GStreamer takes care of this in the PAUSED and PLAYING states, otherwise,
* we simply draw a black rectangle to avoid garbage showing up. */
static gboolean draw_cb (GtkWidget *widget, cairo_t *cr, CustomData *data) {
if (data->state < GST_STATE_PAUSED) {
GtkAllocation allocation;
/* Cairo is a 2D graphics library which we use here to clean the video window.
* It is used by GStreamer for other reasons, so it will always be available to us. */
gtk_widget_get_allocation (widget, &allocation);
cairo_set_source_rgb (cr, 0, 0, 0);
cairo_rectangle (cr, 0, 0, allocation.width, allocation.height);
cairo_fill (cr);
}
return FALSE;
}
/* This function is called when the slider changes its position. We perform a seek to the
* new position here. */
static void slider_cb (GtkRange *range, CustomData *data) {
gdouble value = gtk_range_get_value (GTK_RANGE (data->slider));
gst_element_seek_simple (data->playbin, GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH | GST_SEEK_FLAG_KEY_UNIT,
(gint64)(value * GST_SECOND));
}
/* This creates all the GTK+ widgets that compose our application, and registers the callbacks */
static void create_ui (CustomData *data) {
GtkWidget *main_window; /* The uppermost window, containing all other windows */
GtkWidget *video_window; /* The drawing area where the video will be shown */
GtkWidget *main_box; /* VBox to hold main_hbox and the controls */
GtkWidget *main_hbox; /* HBox to hold the video_window and the stream info text widget */
GtkWidget *controls; /* HBox to hold the buttons and the slider */
GtkWidget *play_button, *pause_button, *stop_button; /* Buttons */
main_window = gtk_window_new (GTK_WINDOW_TOPLEVEL);
g_signal_connect (G_OBJECT (main_window), "delete-event", G_CALLBACK (delete_event_cb), data);
video_window = gtk_drawing_area_new ();
gtk_widget_set_double_buffered (video_window, FALSE);
g_signal_connect (video_window, "realize", G_CALLBACK (realize_cb), data);
g_signal_connect (video_window, "draw", G_CALLBACK (draw_cb), data);
play_button = gtk_button_new_with_label("Play"); //gtk_button_new_from_icon_name ("media-playback-start", GTK_ICON_SIZE_SMALL_TOOLBAR);
g_signal_connect (G_OBJECT (play_button), "clicked", G_CALLBACK (play_cb), data);
pause_button = gtk_button_new_with_label("Pause");//gtk_button_new_from_icon_name ("media-playback-pause", GTK_ICON_SIZE_SMALL_TOOLBAR);
g_signal_connect (G_OBJECT (pause_button), "clicked", G_CALLBACK (pause_cb), data);
stop_button = gtk_button_new_with_label("Stop");//gtk_button_new_from_icon_name ("media-playback-stop", GTK_ICON_SIZE_SMALL_TOOLBAR);
g_signal_connect (G_OBJECT (stop_button), "clicked", G_CALLBACK (stop_cb), data);
// data->slider = gtk_scale_new_with_range (GTK_ORIENTATION_HORIZONTAL, 0, 100, 1);
// gtk_scale_set_draw_value (GTK_SCALE (data->slider), 0);
// data->slider_update_signal_id = g_signal_connect (G_OBJECT (data->slider), "value-changed", G_CALLBACK (slider_cb), data);
data->streams_list = gtk_text_view_new ();
gtk_text_view_set_editable (GTK_TEXT_VIEW (data->streams_list), FALSE);
//controls = gtk_box_new (GTK_ORIENTATION_HORIZONTAL, 0);
controls = gtk_hbox_new(FALSE, 0);
gtk_box_pack_start (GTK_BOX (controls), play_button, FALSE, FALSE, 2);
gtk_box_pack_start (GTK_BOX (controls), pause_button, FALSE, FALSE, 2);
gtk_box_pack_start (GTK_BOX (controls), stop_button, FALSE, FALSE, 2);
//gtk_box_pack_start (GTK_BOX (controls), data->slider, TRUE, TRUE, 2);
main_hbox = gtk_hbox_new (FALSE, 0);
gtk_box_pack_start (GTK_BOX (main_hbox), video_window, TRUE, TRUE, 0);
gtk_box_pack_start (GTK_BOX (main_hbox), data->streams_list, FALSE, FALSE, 2);
main_box = gtk_vbox_new (FALSE, 0);
gtk_box_pack_start (GTK_BOX (main_box), main_hbox, TRUE, TRUE, 0);
gtk_box_pack_start (GTK_BOX (main_box), controls, FALSE, FALSE, 0);
gtk_container_add (GTK_CONTAINER (main_window), main_box);
gtk_window_set_default_size (GTK_WINDOW (main_window), 640, 480);
gtk_widget_show_all (main_window);
}
/* This function is called periodically to refresh the GUI */
static gboolean refresh_ui (CustomData *data) {
gint64 current = -1;
/* We do not want to update anything unless we are in the PAUSED or PLAYING states */
if (data->state < GST_STATE_PAUSED)
return TRUE;
/* If we didn't know it yet, query the stream duration */
if (!GST_CLOCK_TIME_IS_VALID (data->duration)) {
if (!gst_element_query_duration (data->playbin, GST_FORMAT_TIME, &data->duration)) {
g_printerr ("Could not query current duration.\n");
} else {
/* Set the range of the slider to the clip duration, in SECONDS */
//gtk_range_set_range (GTK_RANGE (data->slider), 0, (gdouble)data->duration / GST_SECOND);
}
}
if (gst_element_query_position (data->playbin, GST_FORMAT_TIME, &current)) {
/* Block the "value-changed" signal, so the slider_cb function is not called
* (which would trigger a seek the user has not requested) */
//g_signal_handler_block (data->slider, data->slider_update_signal_id);
/* Set the position of the slider to the current pipeline positoin, in SECONDS */
//gtk_range_set_value (GTK_RANGE (data->slider), (gdouble)current / GST_SECOND);
/* Re-enable the signal */
//g_signal_handler_unblock (data->slider, data->slider_update_signal_id);
}
return TRUE;
}
/* This function is called when new metadata is discovered in the stream */
static void tags_cb (GstElement *playbin, gint stream, CustomData *data) {
/* We are possibly in a GStreamer working thread, so we notify the main
* thread of this event through a message in the bus */
gst_element_post_message (playbin,
gst_message_new_application (GST_OBJECT (playbin),
gst_structure_new_empty ("tags-changed")));
}
/* This function is called when an error message is posted on the bus */
static void error_cb (GstBus *bus, GstMessage *msg, CustomData *data) {
GError *err;
gchar *debug_info;
/* Print error details on the screen */
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
/* Set the pipeline to READY (which stops playback) */
gst_element_set_state (data->playbin, GST_STATE_READY);
}
/* This function is called when an End-Of-Stream message is posted on the bus.
* We just set the pipeline to READY (which stops playback) */
static void eos_cb (GstBus *bus, GstMessage *msg, CustomData *data) {
g_print ("End-Of-Stream reached.\n");
gst_element_set_state (data->playbin, GST_STATE_READY);
}
/* This function is called when the pipeline changes states. We use it to
* keep track of the current state. */
static void state_changed_cb (GstBus *bus, GstMessage *msg, CustomData *data) {
GstState old_state, new_state, pending_state;
gst_message_parse_state_changed (msg, &old_state, &new_state, &pending_state);
if (GST_MESSAGE_SRC (msg) == GST_OBJECT (data->playbin)) {
data->state = new_state;
g_print ("State set to %s\n", gst_element_state_get_name (new_state));
if (old_state == GST_STATE_READY && new_state == GST_STATE_PAUSED) {
/* For extra responsiveness, we refresh the GUI as soon as we reach the PAUSED state */
refresh_ui (data);
}
}
}
/* Extract metadata from all the streams and write it to the text widget in the GUI */
static void analyze_streams (CustomData *data) {
gint i;
GstTagList *tags;
gchar *str, *total_str;
guint rate;
gint n_video, n_audio, n_text;
GtkTextBuffer *text;
/* Clean current contents of the widget */
text = gtk_text_view_get_buffer (GTK_TEXT_VIEW (data->streams_list));
gtk_text_buffer_set_text (text, "", -1);
/* Read some properties */
g_object_get (data->playbin, "n-video", &n_video, NULL);
g_object_get (data->playbin, "n-audio", &n_audio, NULL);
g_object_get (data->playbin, "n-text", &n_text, NULL);
for (i = 0; i < n_video; i++) {
tags = NULL;
/* Retrieve the stream's video tags */
g_signal_emit_by_name (data->playbin, "get-video-tags", i, &tags);
if (tags) {
total_str = g_strdup_printf ("video stream %d:\n", i);
gtk_text_buffer_insert_at_cursor (text, total_str, -1);
g_free (total_str);
gst_tag_list_get_string (tags, GST_TAG_VIDEO_CODEC, &str);
total_str = g_strdup_printf (" codec: %s\n", str ? str : "unknown");
gtk_text_buffer_insert_at_cursor (text, total_str, -1);
g_free (total_str);
g_free (str);
gst_tag_list_free (tags);
}
}
for (i = 0; i < n_audio; i++) {
tags = NULL;
/* Retrieve the stream's audio tags */
g_signal_emit_by_name (data->playbin, "get-audio-tags", i, &tags);
if (tags) {
total_str = g_strdup_printf ("\naudio stream %d:\n", i);
gtk_text_buffer_insert_at_cursor (text, total_str, -1);
g_free (total_str);
if (gst_tag_list_get_string (tags, GST_TAG_AUDIO_CODEC, &str)) {
total_str = g_strdup_printf (" codec: %s\n", str);
gtk_text_buffer_insert_at_cursor (text, total_str, -1);
g_free (total_str);
g_free (str);
}
if (gst_tag_list_get_string (tags, GST_TAG_LANGUAGE_CODE, &str)) {
total_str = g_strdup_printf (" language: %s\n", str);
gtk_text_buffer_insert_at_cursor (text, total_str, -1);
g_free (total_str);
g_free (str);
}
if (gst_tag_list_get_uint (tags, GST_TAG_BITRATE, &rate)) {
total_str = g_strdup_printf (" bitrate: %d\n", rate);
gtk_text_buffer_insert_at_cursor (text, total_str, -1);
g_free (total_str);
}
gst_tag_list_free (tags);
}
}
for (i = 0; i < n_text; i++) {
tags = NULL;
/* Retrieve the stream's subtitle tags */
g_signal_emit_by_name (data->playbin, "get-text-tags", i, &tags);
if (tags) {
total_str = g_strdup_printf ("\nsubtitle stream %d:\n", i);
gtk_text_buffer_insert_at_cursor (text, total_str, -1);
g_free (total_str);
if (gst_tag_list_get_string (tags, GST_TAG_LANGUAGE_CODE, &str)) {
total_str = g_strdup_printf (" language: %s\n", str);
gtk_text_buffer_insert_at_cursor (text, total_str, -1);
g_free (total_str);
g_free (str);
}
gst_tag_list_free (tags);
}
}
}
/* This function is called when an "application" message is posted on the bus.
* Here we retrieve the message posted by the tags_cb callback */
static void application_cb (GstBus *bus, GstMessage *msg, CustomData *data) {
if (g_strcmp0 (gst_structure_get_name (gst_message_get_structure (msg)), "tags-changed") == 0) {
/* If the message is the "tags-changed" (only one we are currently issuing), update
* the stream info GUI */
analyze_streams (data);
}
}
int main(int argc, char *argv[]) {
CustomData data;
GstStateChangeReturn ret;
GstBus *bus;
/* Initialize GTK */
gtk_init (&argc, &argv);
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Initialize our data structure */
memset (&data, 0, sizeof (data));
data.duration = GST_CLOCK_TIME_NONE;
/* Create the elements */
data.playbin = gst_element_factory_make ("playbin", "playbin");
if (!data.playbin) {
g_printerr ("Not all elements could be created.\n");
return -1;
}
/* Set the URI to play */
g_object_set (data.playbin, "uri", "https://video-dev.github.io/streams/x36xhzz/x36xhzz.m3u8", NULL);
/* Connect to interesting signals in playbin */
g_signal_connect (G_OBJECT (data.playbin), "video-tags-changed", (GCallback) tags_cb, &data);
g_signal_connect (G_OBJECT (data.playbin), "audio-tags-changed", (GCallback) tags_cb, &data);
g_signal_connect (G_OBJECT (data.playbin), "text-tags-changed", (GCallback) tags_cb, &data);
/* Create the GUI */
create_ui (&data);
/* Instruct the bus to emit signals for each received message, and connect to the interesting signals */
bus = gst_element_get_bus (data.playbin);
gst_bus_add_signal_watch (bus);
g_signal_connect (G_OBJECT (bus), "message::error", (GCallback)error_cb, &data);
g_signal_connect (G_OBJECT (bus), "message::eos", (GCallback)eos_cb, &data);
g_signal_connect (G_OBJECT (bus), "message::state-changed", (GCallback)state_changed_cb, &data);
g_signal_connect (G_OBJECT (bus), "message::application", (GCallback)application_cb, &data);
gst_object_unref (bus);
/* Start playing */
ret = gst_element_set_state (data.playbin, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (data.playbin);
return -1;
}
/* Register a function that GLib will call every second */
g_timeout_add_seconds (1, (GSourceFunc)refresh_ui, &data);
/* Start the GTK main loop. We will not regain control until gtk_main_quit is called. */
gtk_main ();
/* Free resources */
gst_element_set_state (data.playbin, GST_STATE_NULL);
gst_object_unref (data.playbin);
return 0;
}
It's a sample from GStreamer tutorial ported to use GTK2. I can compile and even run it but with missing plugin problem. The code works on PC, but not in the ARM Device. It is an Yocto Based device with Poky Linux.

Output RTSP stream with ffmpeg

I'm attempting to use the ffmpeg libraries to send a video stream from my application to a media server (in this case wowza). I have been able to do the reverse and consume an RTSP stream but I'm having a few issues writing an RTSP stream.
I have found a few examples and attempted to utilise the relevant bits. The code is below. I have simplified it as much as I can. I do only want to send a single H264 bit stream to the wowza server and which it can handle.
I get an "Integer division by zero" exception whenever in the av_interleaved_write_frame function when I try and send a packet. The exception looks like it's related to the packet timestamps not being set correctly. I've tried different values and can get past the exception by setting some contrived values but then the write call fails.
#include <iostream>
#include <fstream>
#include <sstream>
#include <cstring>
#include "stdafx.h"
#include "windows.h"
extern "C"
{
#include <libavcodec\avcodec.h>
#include <libavformat\avformat.h>
#include <libavformat\avio.h>
#include <libswscale\swscale.h>
}
using namespace std;
static int video_is_eof;
#define STREAM_DURATION 50.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
#define VIDEO_CODEC_ID CODEC_ID_H264
static int sws_flags = SWS_BICUBIC;
/* video output */
static AVFrame *frame;
static AVPicture src_picture, dst_picture;
static int frame_count;
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
{
/* rescale output packet timestamp values from codec to stream timebase */
pkt->pts = av_rescale_q_rnd(pkt->pts, *time_base, st->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt->dts = av_rescale_q_rnd(pkt->dts, *time_base, st->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt->duration = av_rescale_q(pkt->duration, *time_base, st->time_base);
pkt->stream_index = st->index;
// Exception occurs here.
return av_interleaved_write_frame(fmt_ctx, pkt);
}
/* Add an output stream. */
static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id)
{
AVCodecContext *c;
AVStream *st;
/* find the encoder */
*codec = avcodec_find_encoder(codec_id);
if (!(*codec)) {
fprintf(stderr, "Could not find encoder for '%s'\n", avcodec_get_name(codec_id));
exit(1);
}
st = avformat_new_stream(oc, *codec);
if (!st) {
fprintf(stderr, "Could not allocate stream\n");
exit(1);
}
st->id = oc->nb_streams - 1;
c = st->codec;
c->codec_id = codec_id;
c->bit_rate = 400000;
c->width = 352;
c->height = 288;
c->time_base.den = STREAM_FRAME_RATE;
c->time_base.num = 1;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
return st;
}
static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
{
int ret;
AVCodecContext *c = st->codec;
/* open the codec */
ret = avcodec_open2(c, codec, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open video codec: ");
exit(1);
}
/* allocate and init a re-usable frame */
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
/* Allocate the encoded raw picture. */
ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
if (ret < 0) {
fprintf(stderr, "Could not allocate picture: ");
exit(1);
}
/* copy data and linesize picture pointers to frame */
*((AVPicture *)frame) = dst_picture;
}
/* Prepare a dummy image. */
static void fill_yuv_image(AVPicture *pict, int frame_index, int width, int height)
{
int x, y, i;
i = frame_index;
/* Y */
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
/* Cb and Cr */
for (y = 0; y < height / 2; y++) {
for (x = 0; x < width / 2; x++) {
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
}
}
}
static void write_video_frame(AVFormatContext *oc, AVStream *st, int flush)
{
int ret;
AVCodecContext *c = st->codec;
if (!flush) {
fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
}
AVPacket pkt = { 0 };
int got_packet;
av_init_packet(&pkt);
/* encode the image */
frame->pts = frame_count;
ret = avcodec_encode_video2(c, &pkt, flush ? NULL : frame, &got_packet);
if (ret < 0) {
fprintf(stderr, "Error encoding video frame:");
exit(1);
}
/* If size is zero, it means the image was buffered. */
if (got_packet) {
ret = write_frame(oc, &c->time_base, st, &pkt);
}
else {
if (flush) {
video_is_eof = 1;
}
ret = 0;
}
if (ret < 0) {
fprintf(stderr, "Error while writing video frame: ");
exit(1);
}
frame_count++;
}
static void close_video(AVFormatContext *oc, AVStream *st)
{
avcodec_close(st->codec);
av_free(src_picture.data[0]);
av_free(dst_picture.data[0]);
av_frame_free(&frame);
}
int _tmain(int argc, _TCHAR* argv[])
{
printf("starting...\n");
const char *filename = "rtsp://test:password#192.168.33.19:1935/ffmpeg/0";
AVOutputFormat *fmt;
AVFormatContext *oc;
AVStream *video_st;
AVCodec *video_codec;
double video_time;
int flush, ret;
/* Initialize libavcodec, and register all codecs and formats. */
av_register_all();
avformat_network_init();
AVOutputFormat* oFmt = av_oformat_next(NULL);
while (oFmt) {
if (oFmt->video_codec == VIDEO_CODEC_ID) {
break;
}
oFmt = av_oformat_next(oFmt);
}
if (!oFmt) {
printf("Could not find the required output format.\n");
exit(1);
}
/* allocate the output media context */
avformat_alloc_output_context2(&oc, oFmt, "rtsp", filename);
if (!oc) {
printf("Could not set the output media context.\n");
exit(1);
}
fmt = oc->oformat;
if (!fmt) {
printf("Could not create the output format.\n");
exit(1);
}
video_st = NULL;
cout << "Codec = " << avcodec_get_name(fmt->video_codec) << endl;
if (fmt->video_codec != AV_CODEC_ID_NONE)
{
video_st = add_stream(oc, &video_codec, fmt->video_codec);
}
/* Now that all the parameters are set, we can open the video codec and allocate the necessary encode buffers. */
if (video_st) {
open_video(oc, video_codec, video_st);
}
av_dump_format(oc, 0, filename, 1);
char errorBuff[80];
if (!(fmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open outfile '%s': %s", filename, av_make_error_string(errorBuff, 80, ret));
return 1;
}
}
flush = 0;
while (video_st && !video_is_eof) {
/* Compute current video time. */
video_time = (video_st && !video_is_eof) ? video_st->pts.val * av_q2d(video_st->time_base) : INFINITY;
if (!flush && (!video_st || video_time >= STREAM_DURATION)) {
flush = 1;
}
if (video_st && !video_is_eof) {
write_video_frame(oc, video_st, flush);
}
}
if (video_st) {
close_video(oc, video_st);
}
if ((fmt->flags & AVFMT_NOFILE)) {
avio_close(oc->pb);
}
avformat_free_context(oc);
printf("finished.\n");
getchar();
return 0;
}
Does anyone have any insights about how the packet timestamps can be successfully set?
I solved the integer division by zero by building ffmpeg on my Windows instance and debugging the av_interleaved_write_frame call. Turns out it was the pts not being set on the video stream object that was causing the exception.
Adding the line below to the while loop in the main function fixed the problem:
video_st->pts.val += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
Here's a sample that works to get a H264 encoded dummy stream to a Wowza server via ffmpeg's RTSP pipeline.
// Roughly based on: https://ffmpeg.org/doxygen/trunk/muxing_8c-source.html
#include <chrono>
#include <thread>
#include <tchar.h>
extern "C"
{
#include <libavcodec\avcodec.h>
#include <libavformat\avformat.h>
#include <libavformat\avio.h>
#include <libswscale\swscale.h>
#include <libavutil\time.h>
}
#pragma comment(lib,"libavformat/libavformat.a")
#pragma comment(lib,"libavcodec/libavcodec.a")
#pragma comment(lib,"libavutil/libavutil.a")
#pragma comment(lib,"libswscale/libswscale.a")
#pragma comment(lib,"x264.lib")
#pragma comment(lib,"libswresample/libswresample.a")
using namespace std;
static int video_is_eof;
#define STREAM_DURATION 20
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */ //AV_PIX_FMT_NV12;
#define VIDEO_CODEC_ID CODEC_ID_H264
/* video output */
static AVFrame *frame;
static AVPicture src_picture, dst_picture;
/* Add an output stream. */
static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id)
{
AVCodecContext *c;
AVStream *st;
/* find the encoder */
*codec = avcodec_find_encoder(codec_id);
if (!(*codec)) {
av_log(NULL, AV_LOG_ERROR, "Could not find encoder for '%s'.\n", avcodec_get_name(codec_id));
}
else {
st = avformat_new_stream(oc, *codec);
if (!st) {
av_log(NULL, AV_LOG_ERROR, "Could not allocate stream.\n");
}
else {
st->id = oc->nb_streams - 1;
st->time_base.den = st->pts.den = 90000;
st->time_base.num = st->pts.num = 1;
c = st->codec;
c->codec_id = codec_id;
c->bit_rate = 400000;
c->width = 352;
c->height = 288;
c->time_base.den = STREAM_FRAME_RATE;
c->time_base.num = 1;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
}
}
return st;
}
static int open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
{
int ret;
AVCodecContext *c = st->codec;
/* open the codec */
ret = avcodec_open2(c, codec, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not open video codec.\n", avcodec_get_name(c->codec_id));
}
else {
/* allocate and init a re-usable frame */
frame = av_frame_alloc();
if (!frame) {
av_log(NULL, AV_LOG_ERROR, "Could not allocate video frame.\n");
ret = -1;
}
else {
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
/* Allocate the encoded raw picture. */
ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not allocate picture.\n");
}
else {
/* copy data and linesize picture pointers to frame */
*((AVPicture *)frame) = dst_picture;
}
}
}
return ret;
}
/* Prepare a dummy image. */
static void fill_yuv_image(AVPicture *pict, int frame_index, int width, int height)
{
int x, y, i;
i = frame_index;
/* Y */
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
/* Cb and Cr */
for (y = 0; y < height / 2; y++) {
for (x = 0; x < width / 2; x++) {
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
}
}
}
static int write_video_frame(AVFormatContext *oc, AVStream *st, int frameCount)
{
int ret = 0;
AVCodecContext *c = st->codec;
fill_yuv_image(&dst_picture, frameCount, c->width, c->height);
AVPacket pkt = { 0 };
int got_packet;
av_init_packet(&pkt);
/* encode the image */
frame->pts = frameCount;
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error encoding video frame.\n");
}
else {
if (got_packet) {
pkt.stream_index = st->index;
pkt.pts = av_rescale_q_rnd(pkt.pts, c->time_base, st->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
ret = av_write_frame(oc, &pkt);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while writing video frame.\n");
}
}
}
return ret;
}
int _tmain(int argc, _TCHAR* argv[])
{
printf("starting...\n");
const char *url = "rtsp://test:password#192.168.33.19:1935/ffmpeg/0";
//const char *url = "rtsp://192.168.33.19:1935/ffmpeg/0";
AVFormatContext *outContext;
AVStream *video_st;
AVCodec *video_codec;
int ret = 0, frameCount = 0;
av_log_set_level(AV_LOG_DEBUG);
//av_log_set_level(AV_LOG_TRACE);
av_register_all();
avformat_network_init();
avformat_alloc_output_context2(&outContext, NULL, "rtsp", url);
if (!outContext) {
av_log(NULL, AV_LOG_FATAL, "Could not allocate an output context for '%s'.\n", url);
goto end;
}
if (!outContext->oformat) {
av_log(NULL, AV_LOG_FATAL, "Could not create the output format for '%s'.\n", url);
goto end;
}
video_st = add_stream(outContext, &video_codec, VIDEO_CODEC_ID);
/* Now that all the parameters are set, we can open the video codec and allocate the necessary encode buffers. */
if (video_st) {
av_log(NULL, AV_LOG_DEBUG, "Video stream codec %s.\n ", avcodec_get_name(video_st->codec->codec_id));
ret = open_video(outContext, video_codec, video_st);
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "Open video stream failed.\n");
goto end;
}
}
else {
av_log(NULL, AV_LOG_FATAL, "Add video stream for the codec '%s' failed.\n", avcodec_get_name(VIDEO_CODEC_ID));
goto end;
}
av_dump_format(outContext, 0, url, 1);
ret = avformat_write_header(outContext, NULL);
if (ret != 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to connect to RTSP server for '%s'.\n", url);
goto end;
}
printf("Press any key to start streaming...\n");
getchar();
auto startSend = std::chrono::system_clock::now();
while (video_st) {
frameCount++;
auto startFrame = std::chrono::system_clock::now();
ret = write_video_frame(outContext, video_st, frameCount);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Write video frame failed.\n", url);
goto end;
}
auto streamDuration = std::chrono::duration_cast<chrono::milliseconds>(std::chrono::system_clock::now() - startSend).count();
printf("Elapsed time %ldms, video stream pts %ld.\n", streamDuration, video_st->pts.val);
if (streamDuration / 1000.0 > STREAM_DURATION) {
break;
}
else {
auto frameDuration = std::chrono::duration_cast<chrono::milliseconds>(std::chrono::system_clock::now() - startFrame).count();
std::this_thread::sleep_for(std::chrono::milliseconds((long)(1000.0 / STREAM_FRAME_RATE - frameDuration)));
}
}
if (video_st) {
avcodec_close(video_st->codec);
av_free(src_picture.data[0]);
av_free(dst_picture.data[0]);
av_frame_free(&frame);
}
avformat_free_context(outContext);
end:
printf("finished.\n");
getchar();
return 0;
}

How to catch stdout stream in ffmpeg then pipe it to v4l2loopback

I'm trying to pipe my h264 stream to ffmpeg and then to my v4l2loopback device. Problem is that I'm fairly new to linux, so just can't get it working.
The stream can be outputted to stdout, but I do not know how to catch it again with ffmpeg and then again pipe it to my v4l2loopback device.
Does anybody know how this could be done or maybe a pointer on how to solve it?
This is the capture program:
PS! You can find the options for the capture program almost in the bottom of the code.
/*
* V4L2 video capture example, modified by Derek Molloy for the Logitech C920 camera
* Modifications, added the -F mode for H264 capture and associated help detail
* www.derekmolloy.ie
*
* V4L2 video capture example
*
* This program can be used and distributed without restrictions.
*
* This program is provided with the V4L2 API
* see http://linuxtv.org/docs.php for more information
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <getopt.h> /* getopt_long() */
#include <fcntl.h> /* low-level i/o */
#include <unistd.h>
#include <errno.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include <linux/videodev2.h>
#define CLEAR(x) memset(&(x), 0, sizeof(x))
enum io_method {
IO_METHOD_READ,
IO_METHOD_MMAP,
IO_METHOD_USERPTR,
};
struct buffer {
void *start;
size_t length;
};
static char *dev_name;
static enum io_method io = IO_METHOD_MMAP;
static int fd = -1;
struct buffer *buffers;
static unsigned int n_buffers;
static int out_buf;
static int force_format = 0;
static int frame_count = 100;
static void errno_exit(const char *s)
{
fprintf(stderr, "%s error %d, %s\n", s, errno, strerror(errno));
exit(EXIT_FAILURE);
}
static int xioctl(int fh, int request, void *arg)
{
int r;
do {
r = ioctl(fh, request, arg);
} while (-1 == r && EINTR == errno);
return r;
}
static void process_image(const void *p, int size)
{
if (out_buf)
fwrite(p, size, 1, stdout);
fflush(stderr);
fprintf(stderr, ".");
fflush(stdout);
}
static int read_frame(void)
{
struct v4l2_buffer buf;
unsigned int i;
switch (io) {
case IO_METHOD_READ:
if (-1 == read(fd, buffers[0].start, buffers[0].length)) {
switch (errno) {
case EAGAIN:
return 0;
case EIO:
/* Could ignore EIO, see spec. */
/* fall through */
default:
errno_exit("read");
}
}
process_image(buffers[0].start, buffers[0].length);
break;
case IO_METHOD_MMAP:
CLEAR(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
if (-1 == xioctl(fd, VIDIOC_DQBUF, &buf)) {
switch (errno) {
case EAGAIN:
return 0;
case EIO:
/* Could ignore EIO, see spec. */
/* fall through */
default:
errno_exit("VIDIOC_DQBUF");
}
}
assert(buf.index < n_buffers);
process_image(buffers[buf.index].start, buf.bytesused);
if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))
errno_exit("VIDIOC_QBUF");
break;
case IO_METHOD_USERPTR:
CLEAR(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_USERPTR;
if (-1 == xioctl(fd, VIDIOC_DQBUF, &buf)) {
switch (errno) {
case EAGAIN:
return 0;
case EIO:
/* Could ignore EIO, see spec. */
/* fall through */
default:
errno_exit("VIDIOC_DQBUF");
}
}
for (i = 0; i < n_buffers; ++i)
if (buf.m.userptr == (unsigned long)buffers[i].start
&& buf.length == buffers[i].length)
break;
assert(i < n_buffers);
process_image((void *)buf.m.userptr, buf.bytesused);
if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))
errno_exit("VIDIOC_QBUF");
break;
}
return 1;
}
static void mainloop(void)
{
unsigned int count;
unsigned int loopIsInfinite = 0;
if (frame_count == 0) loopIsInfinite = 1; //infinite loop
count = frame_count;
while ((count-- > 0) || loopIsInfinite) {
for (;;) {
fd_set fds;
struct timeval tv;
int r;
FD_ZERO(&fds);
FD_SET(fd, &fds);
/* Timeout. */
tv.tv_sec = 2;
tv.tv_usec = 0;
r = select(fd + 1, &fds, NULL, NULL, &tv);
if (-1 == r) {
if (EINTR == errno)
continue;
errno_exit("select");
}
if (0 == r) {
fprintf(stderr, "select timeout\n");
exit(EXIT_FAILURE);
}
if (read_frame())
break;
/* EAGAIN - continue select loop. */
}
}
}
static void stop_capturing(void)
{
enum v4l2_buf_type type;
switch (io) {
case IO_METHOD_READ:
/* Nothing to do. */
break;
case IO_METHOD_MMAP:
case IO_METHOD_USERPTR:
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (-1 == xioctl(fd, VIDIOC_STREAMOFF, &type))
errno_exit("VIDIOC_STREAMOFF");
break;
}
}
static void start_capturing(void)
{
unsigned int i;
enum v4l2_buf_type type;
switch (io) {
case IO_METHOD_READ:
/* Nothing to do. */
break;
case IO_METHOD_MMAP:
for (i = 0; i < n_buffers; ++i) {
struct v4l2_buffer buf;
CLEAR(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = i;
if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))
errno_exit("VIDIOC_QBUF");
}
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (-1 == xioctl(fd, VIDIOC_STREAMON, &type))
errno_exit("VIDIOC_STREAMON");
break;
case IO_METHOD_USERPTR:
for (i = 0; i < n_buffers; ++i) {
struct v4l2_buffer buf;
CLEAR(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_USERPTR;
buf.index = i;
buf.m.userptr = (unsigned long)buffers[i].start;
buf.length = buffers[i].length;
if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))
errno_exit("VIDIOC_QBUF");
}
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (-1 == xioctl(fd, VIDIOC_STREAMON, &type))
errno_exit("VIDIOC_STREAMON");
break;
}
}
static void uninit_device(void)
{
unsigned int i;
switch (io) {
case IO_METHOD_READ:
free(buffers[0].start);
break;
case IO_METHOD_MMAP:
for (i = 0; i < n_buffers; ++i)
if (-1 == munmap(buffers[i].start, buffers[i].length))
errno_exit("munmap");
break;
case IO_METHOD_USERPTR:
for (i = 0; i < n_buffers; ++i)
free(buffers[i].start);
break;
}
free(buffers);
}
static void init_read(unsigned int buffer_size)
{
buffers = calloc(1, sizeof(*buffers));
if (!buffers) {
fprintf(stderr, "Out of memory\n");
exit(EXIT_FAILURE);
}
buffers[0].length = buffer_size;
buffers[0].start = malloc(buffer_size);
if (!buffers[0].start) {
fprintf(stderr, "Out of memory\n");
exit(EXIT_FAILURE);
}
}
static void init_mmap(void)
{
struct v4l2_requestbuffers req;
CLEAR(req);
req.count = 4;
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_MMAP;
if (-1 == xioctl(fd, VIDIOC_REQBUFS, &req)) {
if (EINVAL == errno) {
fprintf(stderr, "%s does not support "
"memory mapping\n", dev_name);
exit(EXIT_FAILURE);
} else {
errno_exit("VIDIOC_REQBUFS");
}
}
if (req.count < 2) {
fprintf(stderr, "Insufficient buffer memory on %s\n",
dev_name);
exit(EXIT_FAILURE);
}
buffers = calloc(req.count, sizeof(*buffers));
if (!buffers) {
fprintf(stderr, "Out of memory\n");
exit(EXIT_FAILURE);
}
for (n_buffers = 0; n_buffers < req.count; ++n_buffers) {
struct v4l2_buffer buf;
CLEAR(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = n_buffers;
if (-1 == xioctl(fd, VIDIOC_QUERYBUF, &buf))
errno_exit("VIDIOC_QUERYBUF");
buffers[n_buffers].length = buf.length;
buffers[n_buffers].start =
mmap(NULL /* start anywhere */,
buf.length,
PROT_READ | PROT_WRITE /* required */,
MAP_SHARED /* recommended */,
fd, buf.m.offset);
if (MAP_FAILED == buffers[n_buffers].start)
errno_exit("mmap");
}
}
static void init_userp(unsigned int buffer_size)
{
struct v4l2_requestbuffers req;
CLEAR(req);
req.count = 4;
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_USERPTR;
if (-1 == xioctl(fd, VIDIOC_REQBUFS, &req)) {
if (EINVAL == errno) {
fprintf(stderr, "%s does not support "
"user pointer i/o\n", dev_name);
exit(EXIT_FAILURE);
} else {
errno_exit("VIDIOC_REQBUFS");
}
}
buffers = calloc(4, sizeof(*buffers));
if (!buffers) {
fprintf(stderr, "Out of memory\n");
exit(EXIT_FAILURE);
}
for (n_buffers = 0; n_buffers < 4; ++n_buffers) {
buffers[n_buffers].length = buffer_size;
buffers[n_buffers].start = malloc(buffer_size);
if (!buffers[n_buffers].start) {
fprintf(stderr, "Out of memory\n");
exit(EXIT_FAILURE);
}
}
}
static void init_device(void)
{
struct v4l2_capability cap;
struct v4l2_cropcap cropcap;
struct v4l2_crop crop;
struct v4l2_format fmt;
unsigned int min;
if (-1 == xioctl(fd, VIDIOC_QUERYCAP, &cap)) {
if (EINVAL == errno) {
fprintf(stderr, "%s is no V4L2 device\n",
dev_name);
exit(EXIT_FAILURE);
} else {
errno_exit("VIDIOC_QUERYCAP");
}
}
if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) {
fprintf(stderr, "%s is no video capture device\n",
dev_name);
exit(EXIT_FAILURE);
}
switch (io) {
case IO_METHOD_READ:
if (!(cap.capabilities & V4L2_CAP_READWRITE)) {
fprintf(stderr, "%s does not support read i/o\n",
dev_name);
exit(EXIT_FAILURE);
}
break;
case IO_METHOD_MMAP:
case IO_METHOD_USERPTR:
if (!(cap.capabilities & V4L2_CAP_STREAMING)) {
fprintf(stderr, "%s does not support streaming i/o\n",
dev_name);
exit(EXIT_FAILURE);
}
break;
}
/* Select video input, video standard and tune here. */
CLEAR(cropcap);
cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (0 == xioctl(fd, VIDIOC_CROPCAP, &cropcap)) {
crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
crop.c = cropcap.defrect; /* reset to default */
if (-1 == xioctl(fd, VIDIOC_S_CROP, &crop)) {
switch (errno) {
case EINVAL:
/* Cropping not supported. */
break;
default:
/* Errors ignored. */
break;
}
}
} else {
/* Errors ignored. */
}
CLEAR(fmt);
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
fprintf(stderr, "Force Format %d\n", force_format);
if (force_format) {
if (force_format==2){
fmt.fmt.pix.width = 1920;
fmt.fmt.pix.height = 1080;
fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_H264;
fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
}
else if(force_format==1){
fmt.fmt.pix.width = 640;
fmt.fmt.pix.height = 480;
fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
}
if (-1 == xioctl(fd, VIDIOC_S_FMT, &fmt))
errno_exit("VIDIOC_S_FMT");
/* Note VIDIOC_S_FMT may change width and height. */
} else {
/* Preserve original settings as set by v4l2-ctl for example */
if (-1 == xioctl(fd, VIDIOC_G_FMT, &fmt))
errno_exit("VIDIOC_G_FMT");
}
/* Buggy driver paranoia. */
min = fmt.fmt.pix.width * 2;
if (fmt.fmt.pix.bytesperline < min)
fmt.fmt.pix.bytesperline = min;
min = fmt.fmt.pix.bytesperline * fmt.fmt.pix.height;
if (fmt.fmt.pix.sizeimage < min)
fmt.fmt.pix.sizeimage = min;
switch (io) {
case IO_METHOD_READ:
init_read(fmt.fmt.pix.sizeimage);
break;
case IO_METHOD_MMAP:
init_mmap();
break;
case IO_METHOD_USERPTR:
init_userp(fmt.fmt.pix.sizeimage);
break;
}
}
static void close_device(void)
{
if (-1 == close(fd))
errno_exit("close");
fd = -1;
}
static void open_device(void)
{
struct stat st;
if (-1 == stat(dev_name, &st)) {
fprintf(stderr, "Cannot identify '%s': %d, %s\n",
dev_name, errno, strerror(errno));
exit(EXIT_FAILURE);
}
if (!S_ISCHR(st.st_mode)) {
fprintf(stderr, "%s is no device\n", dev_name);
exit(EXIT_FAILURE);
}
fd = open(dev_name, O_RDWR /* required */ | O_NONBLOCK, 0);
if (-1 == fd) {
fprintf(stderr, "Cannot open '%s': %d, %s\n",
dev_name, errno, strerror(errno));
exit(EXIT_FAILURE);
}
}
static void usage(FILE *fp, int argc, char **argv)
{
fprintf(fp,
"Usage: %s [options]\n\n"
"Version 1.3\n"
"Options:\n"
"-d | --device name Video device name [%s]\n"
"-h | --help Print this message\n"
"-m | --mmap Use memory mapped buffers [default]\n"
"-r | --read Use read() calls\n"
"-u | --userp Use application allocated buffers\n"
"-o | --output Outputs stream to stdout\n"
"-f | --format Force format to 640x480 YUYV\n"
"-F | --formatH264 Force format to 1920x1080 H264\n"
"-c | --count Number of frames to grab [%i] - use 0 for infinite\n"
"\n"
"Example usage: capture -F -o -c 300 > output.raw\n"
"Captures 300 frames of H264 at 1920x1080 - use raw2mpg4 script to convert to mpg4\n",
argv[0], dev_name, frame_count);
}
static const char short_options[] = "d:hmruofFc:";
static const struct option
long_options[] = {
{ "device", required_argument, NULL, 'd' },
{ "help", no_argument, NULL, 'h' },
{ "mmap", no_argument, NULL, 'm' },
{ "read", no_argument, NULL, 'r' },
{ "userp", no_argument, NULL, 'u' },
{ "output", no_argument, NULL, 'o' },
{ "format", no_argument, NULL, 'f' },
{ "formatH264", no_argument, NULL, 'F' },
{ "count", required_argument, NULL, 'c' },
{ 0, 0, 0, 0 }
};
int main(int argc, char **argv)
{
dev_name = "/dev/video0";
for (;;) {
int idx;
int c;
c = getopt_long(argc, argv,
short_options, long_options, &idx);
if (-1 == c)
break;
switch (c) {
case 0: /* getopt_long() flag */
break;
case 'd':
dev_name = optarg;
break;
case 'h':
usage(stdout, argc, argv);
exit(EXIT_SUCCESS);
case 'm':
io = IO_METHOD_MMAP;
break;
case 'r':
io = IO_METHOD_READ;
break;
case 'u':
io = IO_METHOD_USERPTR;
break;
case 'o':
out_buf++;
break;
case 'f':
force_format=1;
break;
case 'F':
force_format=2;
break;
case 'c':
errno = 0;
frame_count = strtol(optarg, NULL, 0);
if (errno)
errno_exit(optarg);
break;
default:
usage(stderr, argc, argv);
exit(EXIT_FAILURE);
}
}
open_device();
init_device();
start_capturing();
mainloop();
stop_capturing();
uninit_device();
close_device();
fprintf(stderr, "\n");
return 0;
}
It's a modified version of a V4L2 video capture example.
Then I know that if I have outputed the streame to a file I would have to run this command to convert the raw format to mp4 format:
ffmpeg -f h264 -i output.raw -vcodec copy output.mp4
And the v4l2loopback program I'm using is foud here:
https://github.com/umlaeute/v4l2loopback
------------------Update------------------
Okay. So I got the pipe from the capture program to ffmpeg working. It captures, decodes the h264 and I can write it to a mp4 file with this command:
./capture -F -d /dev/video0 -o | ffmpeg -f h264 -i - -vcodec copy out.mp4
Now I am trying to get the last pipe working with this command:
./capture -F -d /dev/video0 -o | ffmpeg -f h264 -i - -vcodec copy -f mp4 - | gst-launch-0.10 -v fdsrc ! v4l2sink device=/dev/video3
I get these errors:
muxer does not support non seekable output
Could not write header for output file #0 (incorrect codec parameters ?): Invalid argument
Any ideas?
In your last command you are piping an MP4 to GStreamer. See the -f mp4 - part:
./capture -F -d /dev/video0 -o | ffmpeg -f h264 -i - -vcodec copy -f mp4 - | gst-launch-0.10 -v fdsrc ! v4l2sink device=/dev/video3
What you want to do is pipe the H.264 stream inside the MP4 instead.
Try replacing -f mp4 - with -f h264 -.
In fact you could probably skip entirely the creation of an MP4 and just do:
./capture -F -d /dev/video0 -o | gst-launch-0.10 -v fdsrc ! v4l2sink device=/dev/video3
since the -F option forces H.264.

Resources