I am trying to resize the video frame using mfw_v4lsink plugin but it is not working. This problem is coming with mfw_v4lsink plugin installed in gst-fsl-plugins-3.0.11 package.
When i am trying set video frame size at the start of the pipeline , it's not working but when i do the same thing after 1 second of playback , it's working.
During debugging i didn't found the above problem with mw_4lsink plugin installed in gst-fsl-plugins-3.0.7 package. When i looked into the code of version 3.0.11 and 3.0.7 , there was a lot of changes in mfw_v4lsink plugins , so i was not able to figure it out what causing the problem.
Test code :
#include <gst/gst.h>
#include <gst/gstcaps.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
typedef struct App {
GstElement *pipeline;
GstElement *source;
GstElement *demuxer;
GstElement *audioDecoder;
GstElement *videoDecoder;
GstElement *audioQueue;
GstElement *videoQueue;
GstElement *audioResampler;
GstElement *videoScaler;
GstElement *audioSink;
GstElement *videoSink;
GstElement *videoCapsFilter;
GMainLoop *loop;
}App;
static void
on_pad_added (GstElement *element,
GstPad *pad,
gpointer data) {
App *app = (App *) data;
GstCaps *caps = gst_pad_get_caps(pad);
GstStructure *structure = gst_caps_get_structure(caps, 0);
if(g_strrstr(gst_structure_get_name(structure), "video")) {
GstPad *videoQueuePad = gst_element_get_pad(app->videoQueue,"sink");
gst_pad_link(pad,videoQueuePad);
gst_element_link(app->videoQueue,app->videoDecoder);
gst_element_link(app->videoDecoder,app->videoSink);
// gst_element_link(app->videoScaler,app->videoSink);
gst_object_unref(videoQueuePad);
gst_caps_unref(caps);
}
if(g_strrstr(gst_structure_get_name(structure), "audio")) {
GstPad *audioQueuePad = gst_element_get_pad(app->audioQueue,"sink");
gst_pad_link(pad,audioQueuePad);
gst_element_link(app->audioQueue,app->audioDecoder);
gst_element_link(app->audioDecoder,app->audioResampler);
GstCaps *capsFilter = gst_caps_new_simple ("audio/x-raw-int",
"rate", G_TYPE_INT,44100,
"channels", G_TYPE_INT,2,
"depth", G_TYPE_INT,16,
"width", G_TYPE_INT,16,
NULL);
gst_element_link_filtered(app->audioResampler,app->audioSink,capsFilter);
gst_caps_unref(capsFilter);
gst_object_unref(audioQueuePad);
}
gst_object_unref (caps);
}
static gboolean
timeout_cb (gpointer user_data) {
App *app = (App*)user_data;
g_object_set (G_OBJECT (app->videoSink), "setpara", 1, NULL);
g_object_set (G_OBJECT (app->videoSink), "disp-width", 400 ,NULL);
g_object_set (G_OBJECT (app->videoSink), "disp-height", 240, NULL);
return TRUE;
}
static gboolean
bus_cb (GstBus * bus, GstMessage * msg, gpointer user_data)
{
App *app = (App*)user_data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:{
GError *err = NULL;
gchar *dbg;
gst_message_parse_error (msg, &err, &dbg);
gst_object_default_error (msg->src, err, dbg);
g_error_free (err);
g_free (dbg);
g_main_loop_quit (app->loop);
break;
}
default:
break;
}
return TRUE;
}
int
main (int argc,
char *argv[])
{
App app;
GstBus *bus;
guint bus_watch_id;
/* Initialisation */
gst_init (&argc, &argv);
app.loop = g_main_loop_new (NULL, FALSE);
/* Check input arguments */
if (argc != 2) {
printf("Usage: %s <filename>\n", argv[0]);
return -1;
}
/* Create gstreamer elements */
app.pipeline = gst_pipeline_new ("audio-player");
app.source = gst_element_factory_make ("filesrc","source");
app.demuxer = gst_element_factory_make ("tsdemux","demuxer");
app.audioQueue = gst_element_factory_make ("queue","audio-queue");
app.videoQueue = gst_element_factory_make ("queue","video-queue");
app.audioDecoder = gst_element_factory_make ("faad","audio-decoder");
app.videoDecoder = gst_element_factory_make ("vpudec","video-decoder");
app.audioResampler = gst_element_factory_make ("audioresample", "audio-resampler");
app.videoScaler = gst_element_factory_make ("videoscale", "video-scaler");
app.audioSink = gst_element_factory_make ("alsasink", "audio-sink");
app.videoSink = gst_element_factory_make ("mfw_v4lsink", "video-sink");
app.videoCapsFilter = gst_element_factory_make ("capsfilter", "caps-filter");
if (!app.pipeline || !app.source || !app.demuxer || !app.audioDecoder
|| !app.videoDecoder || !app.audioResampler || !app.videoScaler
|| !app.audioSink || !app.videoSink || !app.audioQueue
|| !app.videoQueue) {
printf("One element could not be created. Exiting.\n");
return -1;
}
/* Set up the pipeline */
/* we set the input filename to the source element */
g_object_set (G_OBJECT (app.source), "location", argv[1], NULL);
g_object_set (G_OBJECT (app.source), "do-timestamp", TRUE, NULL);
g_object_set (G_OBJECT (app.videoSink), "sync", FALSE, NULL);
g_object_set (G_OBJECT (app.videoSink), "disp-width", 400 ,NULL);
g_object_set (G_OBJECT (app.videoSink), "disp-height", 240, NULL);
g_object_set (G_OBJECT (app.videoSink), "async", 0, NULL);
/* we add a message handler */
bus = gst_pipeline_get_bus (GST_PIPELINE (app.pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_cb,&app);
gst_object_unref (bus);
/* we add all elements into the pipeline */
gst_bin_add_many (GST_BIN (app.pipeline),
app.source,
app.demuxer,
app.audioQueue,
app.videoQueue,
app.audioDecoder,
app.videoDecoder,
app.audioResampler,
app.videoScaler,
app.audioSink,
app.videoSink,
app.videoCapsFilter,
NULL);
/* we link the elements together */
gst_element_link (app.source,app.demuxer);
g_signal_connect (app.demuxer, "pad-added", G_CALLBACK (on_pad_added),&app);
/* Set the pipeline to "playing" state*/
printf ("Now playing: %s\n", argv[1]);
gst_element_set_state (app.pipeline, GST_STATE_PLAYING);
g_timeout_add_seconds (2, timeout_cb, &app);
/* Iterate */
g_main_loop_run (app.loop);
/* Out of the main loop, clean up nicely */
printf ("Returned, stopping playback\n");
gst_element_set_state (app.pipeline, GST_STATE_NULL);
printf ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (app.pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref (app.loop);
return 0;
}
Hello Ashutosh,
Have you try the following pipeline:
gst-launch v4l2src device=/dev/video0 ! mfw_v4lsink device=/dev/video18 disp-width=1920 disp-height=1080
See the comments on the following Link: