I'm using gstreamer to stream video from a camera to an HLS sink and to also record that video to the filesystem as an MP4. I use a tee to split the video into the two paths. I wrote some code and got it working if I connect it all up and switch state to "play". But when I start the streaming and try to connect the splitmuxsink dynamically to the tee, the video files I record aren't valid (at least VLC won't play them). (The web stream continues to work just fine)
I've reduced my code to a simple version (below) where I start with one splimuxsink, sleep a while and then add a second one. Although the first splitmuxsink works great, the new one never generates usable video. To make the transitions, I switch the playing pipeline to pause, add the new elements (all in the paused state) and switch everything to play.
Following the gstream manual, I've tried more complicated switching procedures; adding a probe to block the new branch of the tee, etc. but the results are the same.
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <ctype.h>
#include <gst/gst.h>
#include <assert.h>
using namespace std;
int main()
{
// Create a pipeline to write a file
printf("starting\n");
gst_init(NULL, NULL);
// Create the pipeline to host the elements
GstElement* pipeline = gst_pipeline_new("pipeline");
assert(pipeline != nullptr);
GstElement* camera = gst_element_factory_make("v4l2src", "v4l2src");
assert(camera != nullptr);
g_object_set(G_OBJECT(camera),
"device", "/dev/video2",
NULL);
GstElement* caps = gst_element_factory_make("capsfilter", "capsfilter");
assert(caps != nullptr);
g_object_set(G_OBJECT(caps),
"caps", gst_caps_from_string("video/x-h264,width=640,height=480,framerate=30/1"),
NULL);
GstElement* tee = gst_element_factory_make("tee", "tee");
GstElement* queue1 = gst_element_factory_make("queue", "queue1");
GstElement* h264parse1 = gst_element_factory_make("h264parse", "h264parse1");
GstElement* splitmux1 = gst_element_factory_make("splitmuxsink", "splitmux1");
g_object_set(G_OBJECT(splitmux1),
"location", "file%03d.mp4",
"max-size-time", 10000000000,
NULL);
assert(tee != nullptr && queue1 != nullptr && h264parse1 != nullptr && splitmux1 != nullptr);
// Attach them all to the pipeline
gst_bin_add_many(GST_BIN (pipeline), camera, caps, tee, queue1, h264parse1, splitmux1, NULL);
// Link the pieces
assert(true == gst_element_link_many(camera, caps, tee, NULL));
GstPad* srcpad = gst_element_get_request_pad(tee, "src_%u");
assert(srcpad != nullptr);
GstPad* dstpad = gst_element_get_static_pad (queue1, "sink");
assert(dstpad != nullptr);
assert(GST_PAD_LINK_OK == gst_pad_link(srcpad, dstpad));
assert(true == gst_element_link_many (queue1, h264parse1, splitmux1, NULL));
assert(GST_STATE_CHANGE_ASYNC == gst_element_set_state (pipeline, GST_STATE_PLAYING));
sleep(35);
printf("Attach the splitmuxsink\n");
// Now attach another sink
GstElement* queue2 = gst_element_factory_make("queue", "queue2");
assert(nullptr != queue2);
GstElement* h264parse2 = gst_element_factory_make("h264parse", "h264parse2");
assert(nullptr != h264parse2);
GstElement* splitmux2 = gst_element_factory_make("splitmuxsink", "splitmux2");
assert(nullptr != splitmux2);
g_object_set(G_OBJECT(splitmux2),
"location", "alt%03d.mp4",
"max-size-time", 10000000000,
NULL);
// Set all the states to paused before attaching
assert(GST_STATE_CHANGE_ASYNC == gst_element_set_state (splitmux2, GST_STATE_PAUSED));
assert(GST_STATE_CHANGE_SUCCESS == gst_element_set_state (h264parse2, GST_STATE_PAUSED));
assert(GST_STATE_CHANGE_SUCCESS == gst_element_set_state (queue2, GST_STATE_PAUSED));
assert(GST_STATE_CHANGE_NO_PREROLL == gst_element_set_state (pipeline, GST_STATE_PAUSED));
gst_bin_add_many (GST_BIN (pipeline), queue2, h264parse2, splitmux2, NULL);
assert(true == gst_element_link_many (queue2, h264parse2, splitmux2, NULL));
GstPad* srcpad2 = gst_element_get_request_pad(tee, "src_%u");
assert(nullptr != srcpad2);
GstPad* dstpad2 = gst_element_get_static_pad (queue2, "sink");
assert(nullptr != dstpad2);
assert(GST_PAD_LINK_OK == gst_pad_link(srcpad2, dstpad2));
assert(GST_STATE_CHANGE_ASYNC == gst_element_set_state (pipeline, GST_STATE_PLAYING));
sleep(65);
printf("all done\n");
}
Related
The command is:
gst-launch-1.0 filesrc location=/home/pi/Videos/watch.mp4 ! qtdemux name=demux \
demux.audio_0 ! queue ! decodebin ! audioconvert ! audioresample ! autoaudiosink \
demux.video_0 ! queue ! decodebin ! videoconvert ! videoscale ! video/x-raw,width=800,height=480 ! avenc_bmp ! fakesink
It's a little difficult for me to link them together because both 'qtdemux' and 'decodebin' are used in this example.
I have tried with 'tee', but it's obviously slow.
Could anyone give me some help?Thank you.
#include <stdio.h>
#include <gstreamer-1.0/gst/gst.h>
#include <stdbool.h>
typedef struct {
GstElement *pipeline;
GstElement *filesrc;
GstElement *qtdemux;
/* Video */
struct{
GstElement *queue;
GstElement *decode;
GstElement *convert;
GstElement *scale;
GstElement *capsfilter;
GstElement *enc_bmp;
GstElement *fakesink;
}video;
/* Audio */
struct{
GstElement *queue;
GstElement *decode;
GstElement *convert;
GstElement *resample;
GstElement *sink;
}audio;
} gstreamer_t;
static void pad_added_handler (GstElement *src, GstPad *pad, gstreamer_t* data)
{
GstCaps *caps;
GstStructure *pad_sct;
const gchar *name;
caps = gst_pad_get_current_caps(pad);
pad_sct = gst_caps_get_structure( caps, 0 );
name = gst_structure_get_name(pad_sct);
printf( "src name = %s\r\n", gst_element_get_name(src) );
printf( "pad name = %s\r\n", name);
printf( "Received new pad '%s' from '%s'.\r\n", GST_PAD_NAME (pad), GST_ELEMENT_NAME (src));
GstPad *sinkpad = NULL;
if(g_str_has_prefix (name, "video/x-h264")) {
sinkpad = gst_element_get_static_pad(data->video.queue, "sink");
if(!gst_pad_link(pad, sinkpad) != GST_PAD_LINK_OK )
printf("not link !!\n\n");
gst_object_unref (sinkpad);
}else if(g_str_has_prefix( name, "audio/mpeg")){
sinkpad = gst_element_get_static_pad ( data->audio.queue, "sink");
if(gst_pad_link( pad, sinkpad) != GST_PAD_LINK_OK )
printf("not link !!\n\n");
gst_object_unref (sinkpad);
}
else
printf("Another Pad: %s.\r\n", name);
gst_caps_unref (caps);
}
int main(int argc, char *argv[]) {
gstreamer_t gstreamer;
GstBus *bus;
GstMessage *msg;
GstStateChangeReturn ret;
gboolean terminate = FALSE;
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Create the elements */
gstreamer.filesrc = gst_element_factory_make ("filesrc", "filesrc");
g_object_set (gstreamer.filesrc, "location", "../../Videos/watch.mp4", NULL);
gstreamer.qtdemux = gst_element_factory_make ("qtdemux", "qtdemux");
/* Video */
gstreamer.video.queue = gst_element_factory_make("queue", "video");
gstreamer.video.decode = gst_element_factory_make ("decodebin", "decodebin");
gstreamer.video.convert = gst_element_factory_make ("videoconvert", "videoconvert");
gstreamer.video.scale = gst_element_factory_make ("videoscale", "videoscale");
gstreamer.video.capsfilter = gst_element_factory_make ("capsfilter", "capsfilter");
GstCaps *Caps = gst_caps_from_string("video/x-raw,width=800,height=480");
g_object_set(G_OBJECT(gstreamer.video.capsfilter), "caps", Caps, NULL);
gst_caps_unref(Caps);
gstreamer.video.enc_bmp = gst_element_factory_make ("avenc_bmp", "avenc_bmp");
gstreamer.video.fakesink = gst_element_factory_make ("fakesink", "fakesink");
g_object_set (gstreamer.video.fakesink, "sync", true, NULL);
/* Audio */
gstreamer.audio.queue = gst_element_factory_make("queue", "queue_audio");
gstreamer.audio.decode = gst_element_factory_make("decodebin", "decodebin");
gstreamer.audio.convert = gst_element_factory_make("audioconvert", "audioconvert");
gstreamer.audio.resample = gst_element_factory_make("audioresample", "audioresample");
gstreamer.audio.sink = gst_element_factory_make("autoaudiosink", "autoaudiosink");
/* Create the empty pipeline */
gstreamer.pipeline = gst_pipeline_new ("gstreamer-pipeline");
if (!gstreamer.pipeline || !gstreamer.filesrc || !gstreamer.qtdemux ||
!gstreamer.video.queue || !gstreamer.video.decode || !gstreamer.video.convert || !gstreamer.video.scale || !gstreamer.video.capsfilter || !gstreamer.video.enc_bmp || !gstreamer.video.fakesink||
!gstreamer.audio.queue || !gstreamer.audio.decode || !gstreamer.audio.convert || !gstreamer.audio.resample ||!gstreamer.audio.sink)
{
g_printerr ("Not all elements could be created.\n");
return -1;
}
/* Build the pipeline. Note that we are NOT linking the source at this
* point. We will do it later. */
gst_bin_add_many (GST_BIN (gstreamer.pipeline), gstreamer.filesrc, gstreamer.qtdemux,
gstreamer.video.queue, gstreamer.video.decode, gstreamer.video.convert, gstreamer.video.scale, gstreamer.video.capsfilter, gstreamer.video.enc_bmp, gstreamer.video.fakesink,
gstreamer.audio.queue, gstreamer.audio.decode, gstreamer.audio.convert, gstreamer.audio.resample, gstreamer.audio.sink, NULL);
if (!gst_element_link (gstreamer.filesrc, gstreamer.qtdemux)) {
g_printerr ("Elements filesrc and qtdemux could not be linked.\n");
gst_object_unref (gstreamer.pipeline);
return -1;
}
if (!gst_element_link_many (gstreamer.video.convert, gstreamer.video.scale, gstreamer.video.capsfilter, gstreamer.video.enc_bmp, gstreamer.video.fakesink, NULL)) {
g_printerr ("Video elements could not be linked.\n");
gst_object_unref (gstreamer.pipeline);
return -1;
}
if (!gst_element_link_many (gstreamer.audio.convert, gstreamer.audio.resample, gstreamer.audio.sink, NULL)) {
g_printerr ("Audio elements could not be linked.\n");
gst_object_unref (gstreamer.pipeline);
return -1;
}
g_signal_connect (gstreamer.qtdemux, "pad-added", G_CALLBACK (pad_added_handler), &gstreamer);
/* Start playing */
ret = gst_element_set_state (gstreamer.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (gstreamer.pipeline);
return -1;
}
/* Listen to the bus */
bus = gst_element_get_bus (gstreamer.pipeline);
do {
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE,
GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
terminate = TRUE;
break;
case GST_MESSAGE_EOS:
g_print ("\nEnd-Of-Stream reached.\n");
terminate = TRUE;
break;
case GST_MESSAGE_STATE_CHANGED:
/* We are only interested in state-changed messages from the pipeline */
if (GST_MESSAGE_SRC (msg) == GST_OBJECT (gstreamer.pipeline)) {
GstState old_state, new_state, pending_state;
gst_message_parse_state_changed (msg, &old_state, &new_state, &pending_state);
g_print ("Pipeline state changed from %s to %s:\n",
gst_element_state_get_name (old_state), gst_element_state_get_name (new_state));
}
break;
break;
default:
/* We should not reach here */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
} while (!terminate);
/* Free resources */
gst_object_unref (bus);
gst_element_set_state (gstreamer.pipeline, GST_STATE_NULL);
gst_object_unref (gstreamer.pipeline);
return 0;
}
The output is:
(stream:5337): GStreamer-WARNING **: 02:28:38.279: Name 'decodebin' is not unique in bin 'gstreamer-pipeline', not adding
Pipeline state changed from NULL to READY:
src name = qtdemux
pad name = video/x-h264
Received new pad 'video_0' from 'qtdemux'.
not link !!
src name = qtdemux
pad name = audio/mpeg
Received new pad 'audio_0' from 'qtdemux'.
Error received from element qtdemux: Internal data stream error.
Debugging information: ../gst/isomp4/qtdemux.c(6545): gst_qtdemux_loop (): /GstPipeline:gstreamer-pipeline/GstQTDemux:qtdemux:
streaming stopped, reason not-linked (-1)
I dont't know how to link the video and audio.
The problem you're facing is with respect to decodebin.
One single pipeline cannot contain two elements that have the same name.
In your case,
gstreamer.video.decode = gst_element_factory_make ("decodebin", "decodebin");
and
gstreamer.audio.decode = gst_element_factory_make("decodebin", "decodebin");
Have the same names set to decodebin
Thats why one of the decodebins are not even being added to the pipeline.
You can change it to something else. For example
gstreamer.audio.decode = gst_element_factory_make("decodebin", "decodebin-audio");
and
gstreamer.video.decode = gst_element_factory_make("decodebin", "decodebin-video");
This rule applies to any parent element that is directly added to your pipeline.
I've found this post about changing the name of a thread.
I tried the prctl() and pthread_setname_np() functions. Both change the name of ALL my threads. In other words, it doesn't seem to work as expected.
I used:
pthread_setname_np(pthread_self(), "thread ONE");
and
pthread_setname_np(pthread_self(), "thread TWO");
Depending on which runs first, both threads say "thread ONE" or "thread TWO". I was expecting one of them to be "thread ONE" and the other to be "thread TWO".
Am I doing something wrong?
As proposed by Tzig in a comment, I tested the example as shown in the pthread_setname_np() documentation. However, I needed to test with at least two threads so I changed the code as follow to have a thread1 and thread2.
By default, I can start htop and use F4 to only show threads/processes with names including THREAD (I can also use the command line to use a different name: ./a.out MULTIFOO MULTIBAR and then use the word MULTI as the filter).
#define _GNU_SOURCE
#include <pthread.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <stdlib.h>
#define NAMELEN 16
#define errExitEN(en, msg) \
do { errno = en; perror(msg); \
exit(EXIT_FAILURE); } while (0)
static void *
threadfunc(void *parm)
{
sleep(15); // allow main program to set the thread name
return NULL;
}
int
main(int argc, char **argv)
{
pthread_t thread1, thread2;
int rc;
char thread_name[NAMELEN];
rc = pthread_create(&thread1, NULL, threadfunc, NULL);
if (rc != 0)
errExitEN(rc, "pthread_create");
rc = pthread_create(&thread2, NULL, threadfunc, NULL);
if (rc != 0)
errExitEN(rc, "pthread_create");
// test name of thread1
//
rc = pthread_getname_np(thread1, thread_name, NAMELEN);
if (rc != 0)
errExitEN(rc, "pthread_getname_np");
printf("Created thread1. Default name is: %s\n", thread_name);
rc = pthread_setname_np(thread1, (argc > 1) ? argv[1] : "THREADFOO");
if (rc != 0)
errExitEN(rc, "pthread_setname_np");
sleep(2);
rc = pthread_getname_np(thread1, thread_name, NAMELEN);
if (rc != 0)
errExitEN(rc, "pthread_getname_np");
printf("The thread1 name after setting it is %s.\n", thread_name);
// test name of thread2
//
rc = pthread_getname_np(thread2, thread_name, NAMELEN);
if (rc != 0)
errExitEN(rc, "pthread_getname_np");
printf("Created thread2. Default name is: %s\n", thread_name);
rc = pthread_setname_np(thread2, (argc > 2) ? argv[2] : "THREADBAR");
if (rc != 0)
errExitEN(rc, "pthread_setname_np");
sleep(2);
rc = pthread_getname_np(thread2, thread_name, NAMELEN);
if (rc != 0)
errExitEN(rc, "pthread_getname_np");
printf("The thread2 name after setting it is %s.\n", thread_name);
// thread1 name changed too?
//
rc = pthread_getname_np(thread1, thread_name, NAMELEN);
if (rc != 0)
errExitEN(rc, "pthread_getname_np");
printf("The thread1 name after setting thread2 name is %s.\n", thread_name);
rc = pthread_join(thread1, NULL);
if (rc != 0)
errExitEN(rc, "pthread_join");
rc = pthread_join(thread2, NULL);
if (rc != 0)
errExitEN(rc, "pthread_join");
printf("Done\n");
exit(EXIT_SUCCESS);
}
P.S. There is a bug in the original:
rc = pthread_getname_np(thread, thread_name,
(argc > 2) ? atoi(argv[1]) : NAMELEN);
Notice that the atoi() uses argv[1] instead of argv[2]. (I have reported the bug to the man-pages maintainers.)
In my example, I use the second argument as the name of the second thread and always use NAMELEN as the length of my buffer. I have no reason for reducing that amount.
RESULTS:
As expected, the pthread_getname_np() works. Great!
However, the htop or cat /proc/self/task/<tid>/comm all return the last name that was set. I guess that's a bug in the Linux kernel... Yet, my process has other threads created by the NVidia driver and those have different names.
Just in case, I tried the functions found in Linux - how to change info of forked processes in C which did seem wrong since it says "fork()'ed". But since each task has its own entry under /proc... but the issue, I suppose, is that the threads share the same memory as their main process and there is only one location for the argv[0] data. In other words, they implemented a pthread_setname_np() which works internally, but does not reflect that name in tools such as ps and htop.
Okay, I found out how to make it work. You want to write the name in the proc file directly. Than it works as expected. Each thread gets its own name.
First, we need to know the thread identifier (it's number, not the pthread_id). Under Linux, you can get that information with the following function:
pid_t gettid()
{
return static_cast<pid_t>(syscall(SYS_gettid));
}
Now to setup the thread name open the comm file in write mode and write the name there:
void set_thread_name(std::string const & name)
{
if(name.length() > 15)
throw std::range_error("thread name is limited to 15 chars");
pid_t const tid(gettid());
std::ofstream comm("/proc/" + std::to_string(tid) + "/comm");
comm << name;
}
I use C++ which simplifies things. You can, of course, do the same thing in C:
void set_thread_name(const char * name)
{
pid_t tid;
char filename[6 + 5 + 5 + 1];
if(strlen(name) > 15)
{
errno = EINVAL;
return -1;
}
tid = gettid();
snprintf(filename, sizeof(filename), "/proc/%d/comm", tid);
FILE * comm(fopen(filename, "w"));
fprintf(comm, "%s", name);
fclose(comm);
}
Now each one of my thread has a different name. Thanks to Nate who gave me the idea of trying this (even if his comment does not quite read this way).
You may also want to use the pthread_setname_np(). Somehow that function will not update the name you directly wrote to the comm file.
I'm trying to make a video player. I have added a thread to time how long a video should be show on the screen. I'm trying to decode the video and update window in the main thread; the second thread will get the packets, see how long the packet should be displayed, and send the packet to main thread then wait for time to elapse.
For some reason I get this error:
terminate called after throwing an instance of 'std::future_error'
what(): std::future_error: No associated state
What's causing the error?
My Code:
extern "C"{
//FFmpeg libraries
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
//SDL2 libraries
#include <SDL2/SDL.h>
}
// compatibility with newer API
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1)
#define av_frame_alloc avcodec_alloc_frame
#define av_frame_free avcodec_free_frame
#endif
//C++ libraries
#include <memory>
#include <stdio.h>
#include <iostream>
#include <chrono>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <future>
typedef struct PacketQueue {
AVPacketList *first_pkt, *last_pkt;
} PacketQueue;
std::atomic<bool> quitting;
std::mutex mutex;
std::condition_variable convar;
int packet_queue_put(PacketQueue *q, AVPacket *pkt){
AVPacketList *pkt1;
if(av_dup_packet(pkt) < 0){
return -1;
}
pkt1 = (AVPacketList*) av_malloc(sizeof(AVPacketList));
if(!pkt1){
return -1;
}
pkt1->pkt = *pkt;
pkt1->next = NULL;
std::lock_guard<std::mutex> lock(mutex);
if (!q->last_pkt){
q->first_pkt = pkt1;
}else{
q->last_pkt->next = pkt1;
}
q->last_pkt = pkt1;
convar.notify_all();
return 0;
}
static int packet_queue_get(PacketQueue *q, AVPacket *pkt){
AVPacketList *pkt1;
int ret;
std::unique_lock<std::mutex> lk(mutex);
while(1){
if(quitting){
ret = -1;
break;
}
pkt1 = q->first_pkt;
if(pkt1){
q->first_pkt = pkt1->next;
if(!q->first_pkt){
q->last_pkt = NULL;
}
*pkt = pkt1->pkt;
av_free(pkt1);
ret = 1;
break;
}else {
convar.wait_for(lk, std::chrono::milliseconds(1));
}
}
return ret;
}
void videoTimerFunc(AVRational time_base, PacketQueue* videoq, std::promise<AVPacket> prms){
AVPacket pkt;
int64_t last_pts = 0;
int64_t frameDelay;
AVRational microseconds = {1, 1000000};
while(!quitting){
// Getting packet and check if there are more packets
if(!packet_queue_get(videoq, &pkt)){
// Close programme
quitting = true;
}else {
// Send packet and create timer
frameDelay = av_rescale_q(pkt.dts, time_base, microseconds) - last_pts;
last_pts = av_rescale_q(pkt.dts, time_base, microseconds);
prms.set_value(pkt);
std::this_thread::sleep_for(std::chrono::microseconds(frameDelay));
}
}
}
int main(int argc, char *argv[]){
AVFormatContext* FormatCtx = nullptr;
AVCodecContext* CodecCtxOrig = nullptr;
AVCodecContext* CodecCtx = nullptr;
AVCodec* Codec = nullptr;
int videoStream;
AVFrame* Frame = nullptr;
AVPacket packet;
struct SwsContext* SwsCtx = nullptr;
PacketQueue videoq;
std::promise<AVPacket> pktprms;
std::future<AVPacket> pktftr = pktprms.get_future();
int frameFinished;
int64_t lastPTS;
SDL_Event event;
SDL_Window* screen;
SDL_Renderer* renderer;
SDL_Texture* texture;
std::shared_ptr<Uint8> yPlane, uPlane, vPlane;
int uvPitch;
if (argc != 2) {
fprintf(stderr, "Usage: %s <file>\n", argv[0]);
return -1;
}
// Register all formats and codecs
av_register_all();
// Initialise SDL2
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
fprintf(stderr, "Couldn't initialise SDL - %s\n", SDL_GetError());
return -1;
}
// Setting things up
quitting = false;
memset(&videoq, 0, sizeof(PacketQueue));
// Open video file
if(avformat_open_input(&FormatCtx, argv[1], NULL, NULL) != 0){
fprintf(stderr, "Couldn't open file\n");
return -1; // Couldn't open file
}
// Retrieve stream information
if(avformat_find_stream_info(FormatCtx, NULL) < 0){
fprintf(stderr, "Couldn't find stream information\n");
// Close the video file
avformat_close_input(&FormatCtx);
return -1; // Couldn't find stream information
}
// Find the video stream
videoStream = av_find_best_stream(FormatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
if(videoStream < 0){
fprintf(stderr, "Couldn't find video stream\n");
// Close the video file
avformat_close_input(&FormatCtx);
return -1; // Didn't find a video stream
}
// Get a pointer to the codec context for the video stream
CodecCtxOrig = FormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
Codec = avcodec_find_decoder(CodecCtxOrig->codec_id);
if(Codec == NULL){
fprintf(stderr, "Unsupported codec\n");
// Close the codec
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1; // Codec not found
}
// Copy context
CodecCtx = avcodec_alloc_context3(Codec);
if(avcodec_copy_context(CodecCtx, CodecCtxOrig) != 0){
fprintf(stderr, "Couldn't copy codec context");
// Close the codec
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1; // Error copying codec context
}
// Open codec
if(avcodec_open2(CodecCtx, Codec, NULL) < 0){
fprintf(stderr, "Couldn't open codec\n");
// Close the codec
avcodec_close(CodecCtx);
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1; // Could not open codec
}
// Allocate video frame
Frame = av_frame_alloc();
// Make a screen to put our video
screen = SDL_CreateWindow("Video Player", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, CodecCtx->width, CodecCtx->height, 0);
if(!screen){
fprintf(stderr, "SDL: could not create window - exiting\n");
quitting = true;
// Clean up SDL2
SDL_Quit();
// Free the YUV frame
av_frame_free(&Frame);
// Close the codec
avcodec_close(CodecCtx);
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1;
}
renderer = SDL_CreateRenderer(screen, -1, 0);
if(!renderer){
fprintf(stderr, "SDL: could not create renderer - exiting\n");
quitting = true;
// Clean up SDL2
SDL_DestroyWindow(screen);
SDL_Quit();
// Free the YUV frame
av_frame_free(&Frame);
// Close the codec
avcodec_close(CodecCtx);
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1;
}
// Allocate a place to put our YUV image on that screen
texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_YV12, SDL_TEXTUREACCESS_STREAMING, CodecCtx->width, CodecCtx->height);
if(!texture){
fprintf(stderr, "SDL: could not create texture - exiting\n");
quitting = true;
// Clean up SDL2
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(screen);
SDL_Quit();
// Free the YUV frame
av_frame_free(&Frame);
// Close the codec
avcodec_close(CodecCtx);
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1;
}
// Initialise SWS context for software scaling
SwsCtx = sws_getContext(CodecCtx->width, CodecCtx->height, CodecCtx->pix_fmt,
CodecCtx->width, CodecCtx->height, PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);
if(!SwsCtx){
fprintf(stderr, "Couldn't create sws context\n");
quitting = true;
// Clean up SDL2
SDL_DestroyTexture(texture);
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(screen);
SDL_Quit();
// Free the YUV frame
av_frame_free(&Frame);
// Close the codec
avcodec_close(CodecCtx);
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1;
}
// set up YV12 pixel array (12 bits per pixel)
yPlane = std::shared_ptr<Uint8>((Uint8 *)::operator new (CodecCtx->width * CodecCtx->height, std::nothrow));
uPlane = std::shared_ptr<Uint8>((Uint8 *)::operator new (CodecCtx->width * CodecCtx->height / 4, std::nothrow));
vPlane = std::shared_ptr<Uint8>((Uint8 *)::operator new (CodecCtx->width * CodecCtx->height / 4, std::nothrow));
uvPitch = CodecCtx->width / 2;
if (!yPlane || !uPlane || !vPlane) {
fprintf(stderr, "Could not allocate pixel buffers - exiting\n");
quitting = true;
// Clean up SDL2
SDL_DestroyTexture(texture);
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(screen);
SDL_Quit();
// Free the YUV frame
av_frame_free(&Frame);
// Close the codec
avcodec_close(CodecCtx);
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1;
}
std::thread videoTimerThread(videoTimerFunc, FormatCtx->streams[videoStream]->time_base, &videoq, std::move(pktprms));
while (!quitting) {
// Check for more packets
if(av_read_frame(FormatCtx, &packet) >= 0){
// Check what stream it belongs to
if (packet.stream_index == videoStream) {
packet_queue_put(&videoq, &packet);
}else{
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
}
// Check if its time to update
if(pktftr.wait_for(std::chrono::milliseconds(1)) == std::future_status::ready){
// Getting packet
packet = pktftr.get();
// Decode video frame
avcodec_decode_video2(CodecCtx, Frame, &frameFinished, &packet);
// Did we get a video frame?
if (frameFinished) {
AVPicture pict;
pict.data[0] = yPlane.get();
pict.data[1] = uPlane.get();
pict.data[2] = vPlane.get();
pict.linesize[0] = CodecCtx->width;
pict.linesize[1] = uvPitch;
pict.linesize[2] = uvPitch;
// Convert the image into YUV format that SDL uses
sws_scale(SwsCtx, (uint8_t const * const *) Frame->data, Frame->linesize, 0, CodecCtx->height, pict.data, pict.linesize);
SDL_UpdateYUVTexture(texture, NULL, yPlane.get(), CodecCtx->width, uPlane.get(), uvPitch, vPlane.get(), uvPitch);
SDL_RenderClear(renderer);
SDL_RenderCopy(renderer, texture, NULL, NULL);
SDL_RenderPresent(renderer);
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
SDL_PollEvent(&event);
switch (event.type) {
case SDL_QUIT:
quitting = true;
break;
default:
break;
}
}
videoTimerThread.join();
//SDL2 clean up
SDL_DestroyTexture(texture);
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(screen);
SDL_Quit();
// Free the YUV frame
av_frame_free(&Frame);
// Free Sws
sws_freeContext(SwsCtx);
// Close the codec
avcodec_close(CodecCtx);
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return 0;
}
You can't use std::promise and std::future in a loop. Thanks to Igor Tandetnik, for commenting that.
What would be your suggestion in order to create a single instance application, so that only one process is allowed to run at a time? File lock, mutex or what?
A good way is:
#include <sys/file.h>
#include <errno.h>
int pid_file = open("/var/run/whatever.pid", O_CREAT | O_RDWR, 0666);
int rc = flock(pid_file, LOCK_EX | LOCK_NB);
if(rc) {
if(EWOULDBLOCK == errno)
; // another instance is running
}
else {
// this is the first instance
}
Note that locking allows you to ignore stale pid files (i.e. you don't have to delete them). When the application terminates for any reason the OS releases the file lock for you.
Pid files are not terribly useful because they can be stale (the file exists but the process does not). Hence, the application executable itself can be locked instead of creating and locking a pid file.
A more advanced method is to create and bind a unix domain socket using a predefined socket name. Bind succeeds for the first instance of your application. Again, the OS unbinds the socket when the application terminates for any reason. When bind() fails another instance of the application can connect() and use this socket to pass its command line arguments to the first instance.
Here is a solution in C++. It uses the socket recommendation of Maxim. I like this solution better than the file based locking solution, because the file based one fails if the process crashes and does not delete the lock file. Another user will not be able to delete the file and lock it. The sockets are automatically deleted when the process exits.
Usage:
int main()
{
SingletonProcess singleton(5555); // pick a port number to use that is specific to this app
if (!singleton())
{
cerr << "process running already. See " << singleton.GetLockFileName() << endl;
return 1;
}
... rest of the app
}
Code:
#include <netinet/in.h>
class SingletonProcess
{
public:
SingletonProcess(uint16_t port0)
: socket_fd(-1)
, rc(1)
, port(port0)
{
}
~SingletonProcess()
{
if (socket_fd != -1)
{
close(socket_fd);
}
}
bool operator()()
{
if (socket_fd == -1 || rc)
{
socket_fd = -1;
rc = 1;
if ((socket_fd = socket(AF_INET, SOCK_DGRAM, 0)) < 0)
{
throw std::runtime_error(std::string("Could not create socket: ") + strerror(errno));
}
else
{
struct sockaddr_in name;
name.sin_family = AF_INET;
name.sin_port = htons (port);
name.sin_addr.s_addr = htonl (INADDR_ANY);
rc = bind (socket_fd, (struct sockaddr *) &name, sizeof (name));
}
}
return (socket_fd != -1 && rc == 0);
}
std::string GetLockFileName()
{
return "port " + std::to_string(port);
}
private:
int socket_fd = -1;
int rc;
uint16_t port;
};
For windows, a named kernel object (e.g. CreateEvent, CreateMutex). For unix, a pid-file - create a file and write your process ID to it.
You can create an "anonymous namespace" AF_UNIX socket. This is completely Linux-specific, but has the advantage that no filesystem actually has to exist.
Read the man page for unix(7) for more info.
Avoid file-based locking
It is always good to avoid a file based locking mechanism to implement the singleton instance of an application. The user can always rename the lock file to a different name and run the application again as follows:
mv lockfile.pid lockfile1.pid
Where lockfile.pid is the lock file based on which is checked for existence before running the application.
So, it is always preferable to use a locking scheme on object directly visible to only the kernel. So, anything which has to do with a file system is not reliable.
So the best option would be to bind to a inet socket. Note that unix domain sockets reside in the filesystem and are not reliable.
Alternatively, you can also do it using DBUS.
It's seems to not be mentioned - it is possible to create a mutex in shared memory but it needs to be marked as shared by attributes (not tested):
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
pthread_mutex_t *mutex = shmat(SHARED_MEMORY_ID, NULL, 0);
pthread_mutex_init(mutex, &attr);
There is also shared memory semaphores (but I failed to find out how to lock one):
int sem_id = semget(SHARED_MEMORY_KEY, 1, 0);
No one has mentioned it, but sem_open() creates a real named semaphore under modern POSIX-compliant OSes. If you give a semaphore an initial value of 1, it becomes a mutex (as long as it is strictly released only if a lock was successfully obtained).
With several sem_open()-based objects, you can create all of the common equivalent Windows named objects - named mutexes, named semaphores, and named events. Named events with "manual" set to true is a bit more difficult to emulate (it requires four semaphore objects to properly emulate CreateEvent(), SetEvent(), and ResetEvent()). Anyway, I digress.
Alternatively, there is named shared memory. You can initialize a pthread mutex with the "shared process" attribute in named shared memory and then all processes can safely access that mutex object after opening a handle to the shared memory with shm_open()/mmap(). sem_open() is easier if it is available for your platform (if it isn't, it should be for sanity's sake).
Regardless of the method you use, to test for a single instance of your application, use the trylock() variant of the wait function (e.g. sem_trywait()). If the process is the only one running, it will successfully lock the mutex. If it isn't, it will fail immediately.
Don't forget to unlock and close the mutex on application exit.
It will depend on which problem you want to avoid by forcing your application to have only one instance and the scope on which you consider instances.
For a daemon — the usual way is to have a /var/run/app.pid file.
For user application, I've had more problems with applications which prevented me to run them twice than with being able to run twice an application which shouldn't have been run so. So the answer on "why and on which scope" is very important and will probably bring answer specific on the why and the intended scope.
Here is a solution based on sem_open
/*
*compile with :
*gcc single.c -o single -pthread
*/
/*
* run multiple instance on 'single', and check the behavior
*/
#include <stdio.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <semaphore.h>
#include <unistd.h>
#include <errno.h>
#define SEM_NAME "/mysem_911"
int main()
{
sem_t *sem;
int rc;
sem = sem_open(SEM_NAME, O_CREAT, S_IRWXU, 1);
if(sem==SEM_FAILED){
printf("sem_open: failed errno:%d\n", errno);
}
rc=sem_trywait(sem);
if(rc == 0){
printf("Obtained lock !!!\n");
sleep(10);
//sem_post(sem);
sem_unlink(SEM_NAME);
}else{
printf("Lock not obtained\n");
}
}
One of the comments on a different answer says "I found sem_open() rather lacking". I am not sure about the specifics of what's lacking
Based on the hints in maxim's answer here is my POSIX solution of a dual-role daemon (i.e. a single application that can act as daemon and as a client communicating with that daemon). This scheme has the advantage of providing an elegant solution of the problem when the instance started first should be the daemon and all following executions should just load off the work at that daemon. It is a complete example but lacks a lot of stuff a real daemon should do (e.g. using syslog for logging and fork to put itself into background correctly, dropping privileges etc.), but it is already quite long and is fully working as is. I have only tested this on Linux so far but IIRC it should be all POSIX-compatible.
In the example the clients can send integers passed to them as first command line argument and parsed by atoi via the socket to the daemon which prints it to stdout. With this kind of sockets it is also possible to transfer arrays, structs and even file descriptors (see man 7 unix).
#include <stdio.h>
#include <stddef.h>
#include <stdbool.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <signal.h>
#include <sys/socket.h>
#include <sys/un.h>
#define SOCKET_NAME "/tmp/exampled"
static int socket_fd = -1;
static bool isdaemon = false;
static bool run = true;
/* returns
* -1 on errors
* 0 on successful server bindings
* 1 on successful client connects
*/
int singleton_connect(const char *name) {
int len, tmpd;
struct sockaddr_un addr = {0};
if ((tmpd = socket(AF_UNIX, SOCK_DGRAM, 0)) < 0) {
printf("Could not create socket: '%s'.\n", strerror(errno));
return -1;
}
/* fill in socket address structure */
addr.sun_family = AF_UNIX;
strcpy(addr.sun_path, name);
len = offsetof(struct sockaddr_un, sun_path) + strlen(name);
int ret;
unsigned int retries = 1;
do {
/* bind the name to the descriptor */
ret = bind(tmpd, (struct sockaddr *)&addr, len);
/* if this succeeds there was no daemon before */
if (ret == 0) {
socket_fd = tmpd;
isdaemon = true;
return 0;
} else {
if (errno == EADDRINUSE) {
ret = connect(tmpd, (struct sockaddr *) &addr, sizeof(struct sockaddr_un));
if (ret != 0) {
if (errno == ECONNREFUSED) {
printf("Could not connect to socket - assuming daemon died.\n");
unlink(name);
continue;
}
printf("Could not connect to socket: '%s'.\n", strerror(errno));
continue;
}
printf("Daemon is already running.\n");
socket_fd = tmpd;
return 1;
}
printf("Could not bind to socket: '%s'.\n", strerror(errno));
continue;
}
} while (retries-- > 0);
printf("Could neither connect to an existing daemon nor become one.\n");
close(tmpd);
return -1;
}
static void cleanup(void) {
if (socket_fd >= 0) {
if (isdaemon) {
if (unlink(SOCKET_NAME) < 0)
printf("Could not remove FIFO.\n");
} else
close(socket_fd);
}
}
static void handler(int sig) {
run = false;
}
int main(int argc, char **argv) {
switch (singleton_connect(SOCKET_NAME)) {
case 0: { /* Daemon */
struct sigaction sa;
sa.sa_handler = &handler;
sigemptyset(&sa.sa_mask);
if (sigaction(SIGINT, &sa, NULL) != 0 || sigaction(SIGQUIT, &sa, NULL) != 0 || sigaction(SIGTERM, &sa, NULL) != 0) {
printf("Could not set up signal handlers!\n");
cleanup();
return EXIT_FAILURE;
}
struct msghdr msg = {0};
struct iovec iovec;
int client_arg;
iovec.iov_base = &client_arg;
iovec.iov_len = sizeof(client_arg);
msg.msg_iov = &iovec;
msg.msg_iovlen = 1;
while (run) {
int ret = recvmsg(socket_fd, &msg, MSG_DONTWAIT);
if (ret != sizeof(client_arg)) {
if (errno != EAGAIN && errno != EWOULDBLOCK) {
printf("Error while accessing socket: %s\n", strerror(errno));
exit(1);
}
printf("No further client_args in socket.\n");
} else {
printf("received client_arg=%d\n", client_arg);
}
/* do daemon stuff */
sleep(1);
}
printf("Dropped out of daemon loop. Shutting down.\n");
cleanup();
return EXIT_FAILURE;
}
case 1: { /* Client */
if (argc < 2) {
printf("Usage: %s <int>\n", argv[0]);
return EXIT_FAILURE;
}
struct iovec iovec;
struct msghdr msg = {0};
int client_arg = atoi(argv[1]);
iovec.iov_base = &client_arg;
iovec.iov_len = sizeof(client_arg);
msg.msg_iov = &iovec;
msg.msg_iovlen = 1;
int ret = sendmsg(socket_fd, &msg, 0);
if (ret != sizeof(client_arg)) {
if (ret < 0)
printf("Could not send device address to daemon: '%s'!\n", strerror(errno));
else
printf("Could not send device address to daemon completely!\n");
cleanup();
return EXIT_FAILURE;
}
printf("Sent client_arg (%d) to daemon.\n", client_arg);
break;
}
default:
cleanup();
return EXIT_FAILURE;
}
cleanup();
return EXIT_SUCCESS;
}
All credits go to Mark Lakata. I merely did some very minor touch up only.
main.cpp
#include "singleton.hpp"
#include <iostream>
using namespace std;
int main()
{
SingletonProcess singleton(5555); // pick a port number to use that is specific to this app
if (!singleton())
{
cerr << "process running already. See " << singleton.GetLockFileName() << endl;
return 1;
}
// ... rest of the app
}
singleton.hpp
#include <netinet/in.h>
#include <unistd.h>
#include <cerrno>
#include <string>
#include <cstring>
#include <stdexcept>
using namespace std;
class SingletonProcess
{
public:
SingletonProcess(uint16_t port0)
: socket_fd(-1)
, rc(1)
, port(port0)
{
}
~SingletonProcess()
{
if (socket_fd != -1)
{
close(socket_fd);
}
}
bool operator()()
{
if (socket_fd == -1 || rc)
{
socket_fd = -1;
rc = 1;
if ((socket_fd = socket(AF_INET, SOCK_DGRAM, 0)) < 0)
{
throw std::runtime_error(std::string("Could not create socket: ") + strerror(errno));
}
else
{
struct sockaddr_in name;
name.sin_family = AF_INET;
name.sin_port = htons (port);
name.sin_addr.s_addr = htonl (INADDR_ANY);
rc = bind (socket_fd, (struct sockaddr *) &name, sizeof (name));
}
}
return (socket_fd != -1 && rc == 0);
}
std::string GetLockFileName()
{
return "port " + std::to_string(port);
}
private:
int socket_fd = -1;
int rc;
uint16_t port;
};
#include <windows.h>
int main(int argc, char *argv[])
{
// ensure only one running instance
HANDLE hMutexH`enter code here`andle = CreateMutex(NULL, TRUE, L"my.mutex.name");
if (GetLastError() == ERROR_ALREADY_EXISTS)
{
return 0;
}
// rest of the program
ReleaseMutex(hMutexHandle);
CloseHandle(hMutexHandle);
return 0;
}
FROM: HERE
On Windows you could also create a shared data segment and use an interlocked function to test for the first occurence, e.g.
#include <Windows.h>
#include <stdio.h>
#include <conio.h>
#pragma data_seg("Shared")
volatile LONG lock = 0;
#pragma data_seg()
#pragma comment(linker, "/SECTION:Shared,RWS")
void main()
{
if (InterlockedExchange(&lock, 1) == 0)
printf("first\n");
else
printf("other\n");
getch();
}
I have just written one, and tested.
#define PID_FILE "/tmp/pidfile"
static void create_pidfile(void) {
int fd = open(PID_FILE, O_RDWR | O_CREAT | O_EXCL, 0);
close(fd);
}
int main(void) {
int fd = open(PID_FILE, O_RDONLY);
if (fd > 0) {
close(fd);
return 0;
}
// make sure only one instance is running
create_pidfile();
}
Just run this code on a seperate thread:
void lock() {
while(1) {
ofstream closer("myapplock.locker", ios::trunc);
closer << "locked";
closer.close();
}
}
Run this as your main code:
int main() {
ifstream reader("myapplock.locker");
string s;
reader >> s;
if (s != "locked") {
//your code
}
return 0;
}
#include <gst/gst.h>
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char *argv[])
{
GstElement *element;
GstElement *element1;
GstElement *element2;
GstElement *pipeline;
gst_init(&argc, &argv);
if(argc != 2)
{
g_print("usage: %s argv[1] \n", argv[0]);
exit(EXIT_FAILURE);
}
/* Creating a new pipeline */
pipeline = gst_pipeline_new("pipeline");
/* creating a new *.mp3 source element */
element = gst_element_factory_make("filesrc", "source");
if(element != NULL)
g_object_set(G_OBJECT(element), "location", argv[1], NULL);
else
{
g_print("Failed to create element \n");
exit(EXIT_FAILURE);
}
/* creating a new *.mp3 de-coder element */
element1 = gst_element_factory_make("decodebin2", "decoder");
if(element1 != NULL)
g_print("element1 success \n");
else
{
g_print("Failed to create element1 \n");
exit(EXIT_FAILURE);
}
/* creating a new *.mp3 sink element */
element2 = gst_element_factory_make("autoaudiosink", "play_audio");
if(element2 != NULL)
g_print("element2 success \n");
else
{
g_print("Failed to create element2 \n");
exit(EXIT_FAILURE);
}
/* Adding elements to pipeline */
gst_bin_add_many(GST_BIN(pipeline), element, element1, element2, NULL);
/* Linking src to sink element */
gst_element_link_many(element, element1, element2, NULL);
/* start playing */
gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_PLAYING);
while(gst_bin_iterate_recurse(GST_BIN(pipeline)));
/* stop playing */
gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_NULL);
/* un-referencing all the elements in the pipeline */
gst_object_unref(GST_OBJECT(pipeline));
return 0;
}
1) Compilation step:- gcc gst6.c -o gst6 pk-config --cflags --libs gstreamer-0.10,
it is compiling without any warnings and errors.
2) Exported PKG_CONFIG_PATH env variable i.e. export PKG_CONFIG_PATH=/usr/lin/pkgconfig.
3) Execution step: ./gst6 /home/user/Downloads/*.mp3
4) Output: element1 success,
element2 success
killed.
Unable to play the audio file. Please let me know whats wrong in my program. This is my first program.
This line:
while(gst_bin_iterate_recurse(GST_BIN(pipeline)));
does not make any sense. It requests an iterator in a loop, but does not use it, neither frees it.
If you want to wait until playing is finished, you probably need to use the GstBus functions gst_bus_have_pending and gst_bus_pop to handle messages.