summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/media/v4l2-core
diff options
context:
space:
mode:
authorYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 12:17:53 -0700
committerYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 15:44:42 -0700
commit9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch)
tree1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/drivers/media/v4l2-core
parent98260f3884f4a202f9ca5eabed40b1354c489b29 (diff)
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/drivers/media/v4l2-core')
-rw-r--r--kernel/drivers/media/v4l2-core/Kconfig98
-rw-r--r--kernel/drivers/media/v4l2-core/Makefile41
-rw-r--r--kernel/drivers/media/v4l2-core/tuner-core.c1383
-rw-r--r--kernel/drivers/media/v4l2-core/v4l2-async.c312
-rw-r--r--kernel/drivers/media/v4l2-core/v4l2-clk.c316
-rw-r--r--kernel/drivers/media/v4l2-core/v4l2-common.c407
-rw-r--r--kernel/drivers/media/v4l2-core/v4l2-compat-ioctl32.c1040
-rw-r--r--kernel/drivers/media/v4l2-core/v4l2-ctrls.c3461
-rw-r--r--kernel/drivers/media/v4l2-core/v4l2-dev.c1008
-rw-r--r--kernel/drivers/media/v4l2-core/v4l2-device.c296
-rw-r--r--kernel/drivers/media/v4l2-core/v4l2-dv-timings.c632
-rw-r--r--kernel/drivers/media/v4l2-core/v4l2-event.c356
-rw-r--r--kernel/drivers/media/v4l2-core/v4l2-fh.c125
-rw-r--r--kernel/drivers/media/v4l2-core/v4l2-ioctl.c2596
-rw-r--r--kernel/drivers/media/v4l2-core/v4l2-mem2mem.c873
-rw-r--r--kernel/drivers/media/v4l2-core/v4l2-of.c228
-rw-r--r--kernel/drivers/media/v4l2-core/v4l2-subdev.c590
-rw-r--r--kernel/drivers/media/v4l2-core/videobuf-core.c1196
-rw-r--r--kernel/drivers/media/v4l2-core/videobuf-dma-contig.c412
-rw-r--r--kernel/drivers/media/v4l2-core/videobuf-dma-sg.c684
-rw-r--r--kernel/drivers/media/v4l2-core/videobuf-dvb.c398
-rw-r--r--kernel/drivers/media/v4l2-core/videobuf-vmalloc.c349
-rw-r--r--kernel/drivers/media/v4l2-core/videobuf2-core.c3539
-rw-r--r--kernel/drivers/media/v4l2-core/videobuf2-dma-contig.c885
-rw-r--r--kernel/drivers/media/v4l2-core/videobuf2-dma-sg.c754
-rw-r--r--kernel/drivers/media/v4l2-core/videobuf2-dvb.c336
-rw-r--r--kernel/drivers/media/v4l2-core/videobuf2-memops.c187
-rw-r--r--kernel/drivers/media/v4l2-core/videobuf2-vmalloc.c464
28 files changed, 22966 insertions, 0 deletions
diff --git a/kernel/drivers/media/v4l2-core/Kconfig b/kernel/drivers/media/v4l2-core/Kconfig
new file mode 100644
index 000000000..ba7e21a73
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/Kconfig
@@ -0,0 +1,98 @@
+#
+# Generic video config states
+#
+
+# Enable the V4L2 core and API
+config VIDEO_V4L2
+ tristate
+ depends on (I2C || I2C=n) && VIDEO_DEV
+ default (I2C || I2C=n) && VIDEO_DEV
+
+config VIDEO_ADV_DEBUG
+ bool "Enable advanced debug functionality on V4L2 drivers"
+ default n
+ ---help---
+ Say Y here to enable advanced debugging functionality on some
+ V4L devices.
+ In doubt, say N.
+
+config VIDEO_FIXED_MINOR_RANGES
+ bool "Enable old-style fixed minor ranges on drivers/video devices"
+ default n
+ ---help---
+ Say Y here to enable the old-style fixed-range minor assignments.
+ Only useful if you rely on the old behavior and use mknod instead of udev.
+
+ When in doubt, say N.
+
+config VIDEO_PCI_SKELETON
+ tristate "Skeleton PCI V4L2 driver"
+ depends on PCI && BUILD_DOCSRC
+ depends on VIDEO_V4L2 && VIDEOBUF2_CORE
+ depends on VIDEOBUF2_MEMOPS && VIDEOBUF2_DMA_CONTIG
+ ---help---
+ Enable build of the skeleton PCI driver, used as a reference
+ when developing new drivers.
+
+# Used by drivers that need tuner.ko
+config VIDEO_TUNER
+ tristate
+ depends on MEDIA_TUNER
+
+# Used by drivers that need v4l2-mem2mem.ko
+config V4L2_MEM2MEM_DEV
+ tristate
+ depends on VIDEOBUF2_CORE
+
+# Used by drivers that need Videobuf modules
+config VIDEOBUF_GEN
+ tristate
+
+config VIDEOBUF_DMA_SG
+ tristate
+ depends on HAS_DMA
+ select VIDEOBUF_GEN
+
+config VIDEOBUF_VMALLOC
+ tristate
+ select VIDEOBUF_GEN
+
+config VIDEOBUF_DMA_CONTIG
+ tristate
+ depends on HAS_DMA
+ select VIDEOBUF_GEN
+
+config VIDEOBUF_DVB
+ tristate
+ select VIDEOBUF_GEN
+
+# Used by drivers that need Videobuf2 modules
+config VIDEOBUF2_CORE
+ select DMA_SHARED_BUFFER
+ tristate
+
+config VIDEOBUF2_MEMOPS
+ tristate
+
+config VIDEOBUF2_DMA_CONTIG
+ tristate
+ depends on HAS_DMA
+ select VIDEOBUF2_CORE
+ select VIDEOBUF2_MEMOPS
+ select DMA_SHARED_BUFFER
+
+config VIDEOBUF2_VMALLOC
+ tristate
+ select VIDEOBUF2_CORE
+ select VIDEOBUF2_MEMOPS
+ select DMA_SHARED_BUFFER
+
+config VIDEOBUF2_DMA_SG
+ tristate
+ #depends on HAS_DMA
+ select VIDEOBUF2_CORE
+ select VIDEOBUF2_MEMOPS
+
+config VIDEOBUF2_DVB
+ tristate
+ select VIDEOBUF2_CORE
diff --git a/kernel/drivers/media/v4l2-core/Makefile b/kernel/drivers/media/v4l2-core/Makefile
new file mode 100644
index 000000000..63d29f275
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/Makefile
@@ -0,0 +1,41 @@
+#
+# Makefile for the V4L2 core
+#
+
+tuner-objs := tuner-core.o
+
+videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o v4l2-fh.o \
+ v4l2-event.o v4l2-ctrls.o v4l2-subdev.o v4l2-clk.o \
+ v4l2-async.o
+ifeq ($(CONFIG_COMPAT),y)
+ videodev-objs += v4l2-compat-ioctl32.o
+endif
+ifeq ($(CONFIG_OF),y)
+ videodev-objs += v4l2-of.o
+endif
+
+obj-$(CONFIG_VIDEO_V4L2) += videodev.o
+obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o
+obj-$(CONFIG_VIDEO_V4L2) += v4l2-dv-timings.o
+
+obj-$(CONFIG_VIDEO_TUNER) += tuner.o
+
+obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o
+
+obj-$(CONFIG_VIDEOBUF_GEN) += videobuf-core.o
+obj-$(CONFIG_VIDEOBUF_DMA_SG) += videobuf-dma-sg.o
+obj-$(CONFIG_VIDEOBUF_DMA_CONTIG) += videobuf-dma-contig.o
+obj-$(CONFIG_VIDEOBUF_VMALLOC) += videobuf-vmalloc.o
+obj-$(CONFIG_VIDEOBUF_DVB) += videobuf-dvb.o
+
+obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-core.o
+obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o
+obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o
+obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG) += videobuf2-dma-contig.o
+obj-$(CONFIG_VIDEOBUF2_DMA_SG) += videobuf2-dma-sg.o
+obj-$(CONFIG_VIDEOBUF2_DVB) += videobuf2-dvb.o
+
+ccflags-y += -I$(srctree)/drivers/media/dvb-core
+ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
+ccflags-y += -I$(srctree)/drivers/media/tuners
+
diff --git a/kernel/drivers/media/v4l2-core/tuner-core.c b/kernel/drivers/media/v4l2-core/tuner-core.c
new file mode 100644
index 000000000..abdcffabc
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/tuner-core.c
@@ -0,0 +1,1383 @@
+/*
+ * i2c tv tuner chip device driver
+ * core core, i.e. kernel interfaces, registering and so on
+ *
+ * Copyright(c) by Ralph Metzler, Gerd Knorr, Gunther Mayer
+ *
+ * Copyright(c) 2005-2011 by Mauro Carvalho Chehab
+ * - Added support for a separate Radio tuner
+ * - Major rework and cleanups at the code
+ *
+ * This driver supports many devices and the idea is to let the driver
+ * detect which device is present. So rather than listing all supported
+ * devices here, we pretend to support a single, fake device type that will
+ * handle both radio and analog TV tuning.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/i2c.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/videodev2.h>
+#include <media/tuner.h>
+#include <media/tuner-types.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include "mt20xx.h"
+#include "tda8290.h"
+#include "tea5761.h"
+#include "tea5767.h"
+#include "tuner-xc2028.h"
+#include "tuner-simple.h"
+#include "tda9887.h"
+#include "xc5000.h"
+#include "tda18271.h"
+#include "xc4000.h"
+
+#define UNSET (-1U)
+
+#define PREFIX (t->i2c->dev.driver->name)
+
+/*
+ * Driver modprobe parameters
+ */
+
+/* insmod options used at init time => read/only */
+static unsigned int addr;
+static unsigned int no_autodetect;
+static unsigned int show_i2c;
+
+module_param(addr, int, 0444);
+module_param(no_autodetect, int, 0444);
+module_param(show_i2c, int, 0444);
+
+/* insmod options used at runtime => read/write */
+static int tuner_debug;
+static unsigned int tv_range[2] = { 44, 958 };
+static unsigned int radio_range[2] = { 65, 108 };
+static char pal[] = "--";
+static char secam[] = "--";
+static char ntsc[] = "-";
+
+module_param_named(debug, tuner_debug, int, 0644);
+module_param_array(tv_range, int, NULL, 0644);
+module_param_array(radio_range, int, NULL, 0644);
+module_param_string(pal, pal, sizeof(pal), 0644);
+module_param_string(secam, secam, sizeof(secam), 0644);
+module_param_string(ntsc, ntsc, sizeof(ntsc), 0644);
+
+/*
+ * Static vars
+ */
+
+static LIST_HEAD(tuner_list);
+static const struct v4l2_subdev_ops tuner_ops;
+
+/*
+ * Debug macros
+ */
+
+#define tuner_warn(fmt, arg...) do { \
+ printk(KERN_WARNING "%s %d-%04x: " fmt, PREFIX, \
+ i2c_adapter_id(t->i2c->adapter), \
+ t->i2c->addr, ##arg); \
+ } while (0)
+
+#define tuner_info(fmt, arg...) do { \
+ printk(KERN_INFO "%s %d-%04x: " fmt, PREFIX, \
+ i2c_adapter_id(t->i2c->adapter), \
+ t->i2c->addr, ##arg); \
+ } while (0)
+
+#define tuner_err(fmt, arg...) do { \
+ printk(KERN_ERR "%s %d-%04x: " fmt, PREFIX, \
+ i2c_adapter_id(t->i2c->adapter), \
+ t->i2c->addr, ##arg); \
+ } while (0)
+
+#define tuner_dbg(fmt, arg...) do { \
+ if (tuner_debug) \
+ printk(KERN_DEBUG "%s %d-%04x: " fmt, PREFIX, \
+ i2c_adapter_id(t->i2c->adapter), \
+ t->i2c->addr, ##arg); \
+ } while (0)
+
+/*
+ * Internal struct used inside the driver
+ */
+
+struct tuner {
+ /* device */
+ struct dvb_frontend fe;
+ struct i2c_client *i2c;
+ struct v4l2_subdev sd;
+ struct list_head list;
+
+ /* keep track of the current settings */
+ v4l2_std_id std;
+ unsigned int tv_freq;
+ unsigned int radio_freq;
+ unsigned int audmode;
+
+ enum v4l2_tuner_type mode;
+ unsigned int mode_mask; /* Combination of allowable modes */
+
+ bool standby; /* Standby mode */
+
+ unsigned int type; /* chip type id */
+ void *config;
+ const char *name;
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ struct media_pad pad;
+#endif
+};
+
+/*
+ * Function prototypes
+ */
+
+static void set_tv_freq(struct i2c_client *c, unsigned int freq);
+static void set_radio_freq(struct i2c_client *c, unsigned int freq);
+
+/*
+ * tuner attach/detach logic
+ */
+
+/* This macro allows us to probe dynamically, avoiding static links */
+#ifdef CONFIG_MEDIA_ATTACH
+#define tuner_symbol_probe(FUNCTION, ARGS...) ({ \
+ int __r = -EINVAL; \
+ typeof(&FUNCTION) __a = symbol_request(FUNCTION); \
+ if (__a) { \
+ __r = (int) __a(ARGS); \
+ symbol_put(FUNCTION); \
+ } else { \
+ printk(KERN_ERR "TUNER: Unable to find " \
+ "symbol "#FUNCTION"()\n"); \
+ } \
+ __r; \
+})
+
+static void tuner_detach(struct dvb_frontend *fe)
+{
+ if (fe->ops.tuner_ops.release) {
+ fe->ops.tuner_ops.release(fe);
+ symbol_put_addr(fe->ops.tuner_ops.release);
+ }
+ if (fe->ops.analog_ops.release) {
+ fe->ops.analog_ops.release(fe);
+ symbol_put_addr(fe->ops.analog_ops.release);
+ }
+}
+#else
+#define tuner_symbol_probe(FUNCTION, ARGS...) ({ \
+ FUNCTION(ARGS); \
+})
+
+static void tuner_detach(struct dvb_frontend *fe)
+{
+ if (fe->ops.tuner_ops.release)
+ fe->ops.tuner_ops.release(fe);
+ if (fe->ops.analog_ops.release)
+ fe->ops.analog_ops.release(fe);
+}
+#endif
+
+
+static inline struct tuner *to_tuner(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct tuner, sd);
+}
+
+/*
+ * struct analog_demod_ops callbacks
+ */
+
+static void fe_set_params(struct dvb_frontend *fe,
+ struct analog_parameters *params)
+{
+ struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops;
+ struct tuner *t = fe->analog_demod_priv;
+
+ if (NULL == fe_tuner_ops->set_analog_params) {
+ tuner_warn("Tuner frontend module has no way to set freq\n");
+ return;
+ }
+ fe_tuner_ops->set_analog_params(fe, params);
+}
+
+static void fe_standby(struct dvb_frontend *fe)
+{
+ struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops;
+
+ if (fe_tuner_ops->sleep)
+ fe_tuner_ops->sleep(fe);
+}
+
+static int fe_set_config(struct dvb_frontend *fe, void *priv_cfg)
+{
+ struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops;
+ struct tuner *t = fe->analog_demod_priv;
+
+ if (fe_tuner_ops->set_config)
+ return fe_tuner_ops->set_config(fe, priv_cfg);
+
+ tuner_warn("Tuner frontend module has no way to set config\n");
+
+ return 0;
+}
+
+static void tuner_status(struct dvb_frontend *fe);
+
+static const struct analog_demod_ops tuner_analog_ops = {
+ .set_params = fe_set_params,
+ .standby = fe_standby,
+ .set_config = fe_set_config,
+ .tuner_status = tuner_status
+};
+
+/*
+ * Functions to select between radio and TV and tuner probe/remove functions
+ */
+
+/**
+ * set_type - Sets the tuner type for a given device
+ *
+ * @c: i2c_client descriptor
+ * @type: type of the tuner (e. g. tuner number)
+ * @new_mode_mask: Indicates if tuner supports TV and/or Radio
+ * @new_config: an optional parameter used by a few tuners to adjust
+ internal parameters, like LNA mode
+ * @tuner_callback: an optional function to be called when switching
+ * to analog mode
+ *
+ * This function applys the tuner config to tuner specified
+ * by tun_setup structure. It contains several per-tuner initialization "magic"
+ */
+static void set_type(struct i2c_client *c, unsigned int type,
+ unsigned int new_mode_mask, void *new_config,
+ int (*tuner_callback) (void *dev, int component, int cmd, int arg))
+{
+ struct tuner *t = to_tuner(i2c_get_clientdata(c));
+ struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
+ struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
+ unsigned char buffer[4];
+ int tune_now = 1;
+
+ if (type == UNSET || type == TUNER_ABSENT) {
+ tuner_dbg("tuner 0x%02x: Tuner type absent\n", c->addr);
+ return;
+ }
+
+ t->type = type;
+ t->config = new_config;
+ if (tuner_callback != NULL) {
+ tuner_dbg("defining GPIO callback\n");
+ t->fe.callback = tuner_callback;
+ }
+
+ /* discard private data, in case set_type() was previously called */
+ tuner_detach(&t->fe);
+ t->fe.analog_demod_priv = NULL;
+
+ switch (t->type) {
+ case TUNER_MT2032:
+ if (!dvb_attach(microtune_attach,
+ &t->fe, t->i2c->adapter, t->i2c->addr))
+ goto attach_failed;
+ break;
+ case TUNER_PHILIPS_TDA8290:
+ {
+ if (!dvb_attach(tda829x_attach, &t->fe, t->i2c->adapter,
+ t->i2c->addr, t->config))
+ goto attach_failed;
+ break;
+ }
+ case TUNER_TEA5767:
+ if (!dvb_attach(tea5767_attach, &t->fe,
+ t->i2c->adapter, t->i2c->addr))
+ goto attach_failed;
+ t->mode_mask = T_RADIO;
+ break;
+ case TUNER_TEA5761:
+ if (!dvb_attach(tea5761_attach, &t->fe,
+ t->i2c->adapter, t->i2c->addr))
+ goto attach_failed;
+ t->mode_mask = T_RADIO;
+ break;
+ case TUNER_PHILIPS_FMD1216ME_MK3:
+ case TUNER_PHILIPS_FMD1216MEX_MK3:
+ buffer[0] = 0x0b;
+ buffer[1] = 0xdc;
+ buffer[2] = 0x9c;
+ buffer[3] = 0x60;
+ i2c_master_send(c, buffer, 4);
+ mdelay(1);
+ buffer[2] = 0x86;
+ buffer[3] = 0x54;
+ i2c_master_send(c, buffer, 4);
+ if (!dvb_attach(simple_tuner_attach, &t->fe,
+ t->i2c->adapter, t->i2c->addr, t->type))
+ goto attach_failed;
+ break;
+ case TUNER_PHILIPS_TD1316:
+ buffer[0] = 0x0b;
+ buffer[1] = 0xdc;
+ buffer[2] = 0x86;
+ buffer[3] = 0xa4;
+ i2c_master_send(c, buffer, 4);
+ if (!dvb_attach(simple_tuner_attach, &t->fe,
+ t->i2c->adapter, t->i2c->addr, t->type))
+ goto attach_failed;
+ break;
+ case TUNER_XC2028:
+ {
+ struct xc2028_config cfg = {
+ .i2c_adap = t->i2c->adapter,
+ .i2c_addr = t->i2c->addr,
+ };
+ if (!dvb_attach(xc2028_attach, &t->fe, &cfg))
+ goto attach_failed;
+ tune_now = 0;
+ break;
+ }
+ case TUNER_TDA9887:
+ if (!dvb_attach(tda9887_attach,
+ &t->fe, t->i2c->adapter, t->i2c->addr))
+ goto attach_failed;
+ break;
+ case TUNER_XC5000:
+ {
+ struct xc5000_config xc5000_cfg = {
+ .i2c_address = t->i2c->addr,
+ /* if_khz will be set at dvb_attach() */
+ .if_khz = 0,
+ };
+
+ if (!dvb_attach(xc5000_attach,
+ &t->fe, t->i2c->adapter, &xc5000_cfg))
+ goto attach_failed;
+ tune_now = 0;
+ break;
+ }
+ case TUNER_XC5000C:
+ {
+ struct xc5000_config xc5000c_cfg = {
+ .i2c_address = t->i2c->addr,
+ /* if_khz will be set at dvb_attach() */
+ .if_khz = 0,
+ .chip_id = XC5000C,
+ };
+
+ if (!dvb_attach(xc5000_attach,
+ &t->fe, t->i2c->adapter, &xc5000c_cfg))
+ goto attach_failed;
+ tune_now = 0;
+ break;
+ }
+ case TUNER_NXP_TDA18271:
+ {
+ struct tda18271_config cfg = {
+ .small_i2c = TDA18271_03_BYTE_CHUNK_INIT,
+ };
+
+ if (!dvb_attach(tda18271_attach, &t->fe, t->i2c->addr,
+ t->i2c->adapter, &cfg))
+ goto attach_failed;
+ tune_now = 0;
+ break;
+ }
+ case TUNER_XC4000:
+ {
+ struct xc4000_config xc4000_cfg = {
+ .i2c_address = t->i2c->addr,
+ /* FIXME: the correct parameters will be set */
+ /* only when the digital dvb_attach() occurs */
+ .default_pm = 0,
+ .dvb_amplitude = 0,
+ .set_smoothedcvbs = 0,
+ .if_khz = 0
+ };
+ if (!dvb_attach(xc4000_attach,
+ &t->fe, t->i2c->adapter, &xc4000_cfg))
+ goto attach_failed;
+ tune_now = 0;
+ break;
+ }
+ default:
+ if (!dvb_attach(simple_tuner_attach, &t->fe,
+ t->i2c->adapter, t->i2c->addr, t->type))
+ goto attach_failed;
+
+ break;
+ }
+
+ if ((NULL == analog_ops->set_params) &&
+ (fe_tuner_ops->set_analog_params)) {
+
+ t->name = fe_tuner_ops->info.name;
+
+ t->fe.analog_demod_priv = t;
+ memcpy(analog_ops, &tuner_analog_ops,
+ sizeof(struct analog_demod_ops));
+
+ if (fe_tuner_ops->get_rf_strength)
+ analog_ops->has_signal = fe_tuner_ops->get_rf_strength;
+ if (fe_tuner_ops->get_afc)
+ analog_ops->get_afc = fe_tuner_ops->get_afc;
+
+ } else {
+ t->name = analog_ops->info.name;
+ }
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ t->sd.entity.name = t->name;
+#endif
+
+ tuner_dbg("type set to %s\n", t->name);
+
+ t->mode_mask = new_mode_mask;
+
+ /* Some tuners require more initialization setup before use,
+ such as firmware download or device calibration.
+ trying to set a frequency here will just fail
+ FIXME: better to move set_freq to the tuner code. This is needed
+ on analog tuners for PLL to properly work
+ */
+ if (tune_now) {
+ if (V4L2_TUNER_RADIO == t->mode)
+ set_radio_freq(c, t->radio_freq);
+ else
+ set_tv_freq(c, t->tv_freq);
+ }
+
+ tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n",
+ c->adapter->name, c->dev.driver->name, c->addr << 1, type,
+ t->mode_mask);
+ return;
+
+attach_failed:
+ tuner_dbg("Tuner attach for type = %d failed.\n", t->type);
+ t->type = TUNER_ABSENT;
+
+ return;
+}
+
+/**
+ * tuner_s_type_addr - Sets the tuner type for a device
+ *
+ * @sd: subdev descriptor
+ * @tun_setup: type to be associated to a given tuner i2c address
+ *
+ * This function applys the tuner config to tuner specified
+ * by tun_setup structure.
+ * If tuner I2C address is UNSET, then it will only set the device
+ * if the tuner supports the mode specified in the call.
+ * If the address is specified, the change will be applied only if
+ * tuner I2C address matches.
+ * The call can change the tuner number and the tuner mode.
+ */
+static int tuner_s_type_addr(struct v4l2_subdev *sd,
+ struct tuner_setup *tun_setup)
+{
+ struct tuner *t = to_tuner(sd);
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+
+ tuner_dbg("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x, config=%p\n",
+ tun_setup->type,
+ tun_setup->addr,
+ tun_setup->mode_mask,
+ tun_setup->config);
+
+ if ((t->type == UNSET && ((tun_setup->addr == ADDR_UNSET) &&
+ (t->mode_mask & tun_setup->mode_mask))) ||
+ (tun_setup->addr == c->addr)) {
+ set_type(c, tun_setup->type, tun_setup->mode_mask,
+ tun_setup->config, tun_setup->tuner_callback);
+ } else
+ tuner_dbg("set addr discarded for type %i, mask %x. "
+ "Asked to change tuner at addr 0x%02x, with mask %x\n",
+ t->type, t->mode_mask,
+ tun_setup->addr, tun_setup->mode_mask);
+
+ return 0;
+}
+
+/**
+ * tuner_s_config - Sets tuner configuration
+ *
+ * @sd: subdev descriptor
+ * @cfg: tuner configuration
+ *
+ * Calls tuner set_config() private function to set some tuner-internal
+ * parameters
+ */
+static int tuner_s_config(struct v4l2_subdev *sd,
+ const struct v4l2_priv_tun_config *cfg)
+{
+ struct tuner *t = to_tuner(sd);
+ struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
+
+ if (t->type != cfg->tuner)
+ return 0;
+
+ if (analog_ops->set_config) {
+ analog_ops->set_config(&t->fe, cfg->priv);
+ return 0;
+ }
+
+ tuner_dbg("Tuner frontend module has no way to set config\n");
+ return 0;
+}
+
+/**
+ * tuner_lookup - Seek for tuner adapters
+ *
+ * @adap: i2c_adapter struct
+ * @radio: pointer to be filled if the adapter is radio
+ * @tv: pointer to be filled if the adapter is TV
+ *
+ * Search for existing radio and/or TV tuners on the given I2C adapter,
+ * discarding demod-only adapters (tda9887).
+ *
+ * Note that when this function is called from tuner_probe you can be
+ * certain no other devices will be added/deleted at the same time, I2C
+ * core protects against that.
+ */
+static void tuner_lookup(struct i2c_adapter *adap,
+ struct tuner **radio, struct tuner **tv)
+{
+ struct tuner *pos;
+
+ *radio = NULL;
+ *tv = NULL;
+
+ list_for_each_entry(pos, &tuner_list, list) {
+ int mode_mask;
+
+ if (pos->i2c->adapter != adap ||
+ strcmp(pos->i2c->dev.driver->name, "tuner"))
+ continue;
+
+ mode_mask = pos->mode_mask;
+ if (*radio == NULL && mode_mask == T_RADIO)
+ *radio = pos;
+ /* Note: currently TDA9887 is the only demod-only
+ device. If other devices appear then we need to
+ make this test more general. */
+ else if (*tv == NULL && pos->type != TUNER_TDA9887 &&
+ (pos->mode_mask & T_ANALOG_TV))
+ *tv = pos;
+ }
+}
+
+/**
+ *tuner_probe - Probes the existing tuners on an I2C bus
+ *
+ * @client: i2c_client descriptor
+ * @id: not used
+ *
+ * This routine probes for tuners at the expected I2C addresses. On most
+ * cases, if a device answers to a given I2C address, it assumes that the
+ * device is a tuner. On a few cases, however, an additional logic is needed
+ * to double check if the device is really a tuner, or to identify the tuner
+ * type, like on tea5767/5761 devices.
+ *
+ * During client attach, set_type is called by adapter's attach_inform callback.
+ * set_type must then be completed by tuner_probe.
+ */
+static int tuner_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tuner *t;
+ struct tuner *radio;
+ struct tuner *tv;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ int ret;
+#endif
+
+ t = kzalloc(sizeof(struct tuner), GFP_KERNEL);
+ if (NULL == t)
+ return -ENOMEM;
+ v4l2_i2c_subdev_init(&t->sd, client, &tuner_ops);
+ t->i2c = client;
+ t->name = "(tuner unset)";
+ t->type = UNSET;
+ t->audmode = V4L2_TUNER_MODE_STEREO;
+ t->standby = true;
+ t->radio_freq = 87.5 * 16000; /* Initial freq range */
+ t->tv_freq = 400 * 16; /* Sets freq to VHF High - needed for some PLL's to properly start */
+
+ if (show_i2c) {
+ unsigned char buffer[16];
+ int i, rc;
+
+ memset(buffer, 0, sizeof(buffer));
+ rc = i2c_master_recv(client, buffer, sizeof(buffer));
+ tuner_info("I2C RECV = ");
+ for (i = 0; i < rc; i++)
+ printk(KERN_CONT "%02x ", buffer[i]);
+ printk("\n");
+ }
+
+ /* autodetection code based on the i2c addr */
+ if (!no_autodetect) {
+ switch (client->addr) {
+ case 0x10:
+ if (tuner_symbol_probe(tea5761_autodetection,
+ t->i2c->adapter,
+ t->i2c->addr) >= 0) {
+ t->type = TUNER_TEA5761;
+ t->mode_mask = T_RADIO;
+ tuner_lookup(t->i2c->adapter, &radio, &tv);
+ if (tv)
+ tv->mode_mask &= ~T_RADIO;
+
+ goto register_client;
+ }
+ kfree(t);
+ return -ENODEV;
+ case 0x42:
+ case 0x43:
+ case 0x4a:
+ case 0x4b:
+ /* If chip is not tda8290, don't register.
+ since it can be tda9887*/
+ if (tuner_symbol_probe(tda829x_probe, t->i2c->adapter,
+ t->i2c->addr) >= 0) {
+ tuner_dbg("tda829x detected\n");
+ } else {
+ /* Default is being tda9887 */
+ t->type = TUNER_TDA9887;
+ t->mode_mask = T_RADIO | T_ANALOG_TV;
+ goto register_client;
+ }
+ break;
+ case 0x60:
+ if (tuner_symbol_probe(tea5767_autodetection,
+ t->i2c->adapter, t->i2c->addr)
+ >= 0) {
+ t->type = TUNER_TEA5767;
+ t->mode_mask = T_RADIO;
+ /* Sets freq to FM range */
+ tuner_lookup(t->i2c->adapter, &radio, &tv);
+ if (tv)
+ tv->mode_mask &= ~T_RADIO;
+
+ goto register_client;
+ }
+ break;
+ }
+ }
+
+ /* Initializes only the first TV tuner on this adapter. Why only the
+ first? Because there are some devices (notably the ones with TI
+ tuners) that have more than one i2c address for the *same* device.
+ Experience shows that, except for just one case, the first
+ address is the right one. The exception is a Russian tuner
+ (ACORP_Y878F). So, the desired behavior is just to enable the
+ first found TV tuner. */
+ tuner_lookup(t->i2c->adapter, &radio, &tv);
+ if (tv == NULL) {
+ t->mode_mask = T_ANALOG_TV;
+ if (radio == NULL)
+ t->mode_mask |= T_RADIO;
+ tuner_dbg("Setting mode_mask to 0x%02x\n", t->mode_mask);
+ }
+
+ /* Should be just before return */
+register_client:
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ t->pad.flags = MEDIA_PAD_FL_SOURCE;
+ t->sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_TUNER;
+ t->sd.entity.name = t->name;
+
+ ret = media_entity_init(&t->sd.entity, 1, &t->pad, 0);
+ if (ret < 0) {
+ tuner_err("failed to initialize media entity!\n");
+ kfree(t);
+ return -ENODEV;
+ }
+#endif
+ /* Sets a default mode */
+ if (t->mode_mask & T_ANALOG_TV)
+ t->mode = V4L2_TUNER_ANALOG_TV;
+ else
+ t->mode = V4L2_TUNER_RADIO;
+ set_type(client, t->type, t->mode_mask, t->config, t->fe.callback);
+ list_add_tail(&t->list, &tuner_list);
+
+ tuner_info("Tuner %d found with type(s)%s%s.\n",
+ t->type,
+ t->mode_mask & T_RADIO ? " Radio" : "",
+ t->mode_mask & T_ANALOG_TV ? " TV" : "");
+ return 0;
+}
+
+/**
+ * tuner_remove - detaches a tuner
+ *
+ * @client: i2c_client descriptor
+ */
+
+static int tuner_remove(struct i2c_client *client)
+{
+ struct tuner *t = to_tuner(i2c_get_clientdata(client));
+
+ v4l2_device_unregister_subdev(&t->sd);
+ tuner_detach(&t->fe);
+ t->fe.analog_demod_priv = NULL;
+
+ list_del(&t->list);
+ kfree(t);
+ return 0;
+}
+
+/*
+ * Functions to switch between Radio and TV
+ *
+ * A few cards have a separate I2C tuner for radio. Those routines
+ * take care of switching between TV/Radio mode, filtering only the
+ * commands that apply to the Radio or TV tuner.
+ */
+
+/**
+ * check_mode - Verify if tuner supports the requested mode
+ * @t: a pointer to the module's internal struct_tuner
+ *
+ * This function checks if the tuner is capable of tuning analog TV,
+ * digital TV or radio, depending on what the caller wants. If the
+ * tuner can't support that mode, it returns -EINVAL. Otherwise, it
+ * returns 0.
+ * This function is needed for boards that have a separate tuner for
+ * radio (like devices with tea5767).
+ * NOTE: mt20xx uses V4L2_TUNER_DIGITAL_TV and calls set_tv_freq to
+ * select a TV frequency. So, t_mode = T_ANALOG_TV could actually
+ * be used to represent a Digital TV too.
+ */
+static inline int check_mode(struct tuner *t, enum v4l2_tuner_type mode)
+{
+ int t_mode;
+ if (mode == V4L2_TUNER_RADIO)
+ t_mode = T_RADIO;
+ else
+ t_mode = T_ANALOG_TV;
+
+ if ((t_mode & t->mode_mask) == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * set_mode - Switch tuner to other mode.
+ * @t: a pointer to the module's internal struct_tuner
+ * @mode: enum v4l2_type (radio or TV)
+ *
+ * If tuner doesn't support the needed mode (radio or TV), prints a
+ * debug message and returns -EINVAL, changing its state to standby.
+ * Otherwise, changes the mode and returns 0.
+ */
+static int set_mode(struct tuner *t, enum v4l2_tuner_type mode)
+{
+ struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
+
+ if (mode != t->mode) {
+ if (check_mode(t, mode) == -EINVAL) {
+ tuner_dbg("Tuner doesn't support mode %d. "
+ "Putting tuner to sleep\n", mode);
+ t->standby = true;
+ if (analog_ops->standby)
+ analog_ops->standby(&t->fe);
+ return -EINVAL;
+ }
+ t->mode = mode;
+ tuner_dbg("Changing to mode %d\n", mode);
+ }
+ return 0;
+}
+
+/**
+ * set_freq - Set the tuner to the desired frequency.
+ * @t: a pointer to the module's internal struct_tuner
+ * @freq: frequency to set (0 means to use the current frequency)
+ */
+static void set_freq(struct tuner *t, unsigned int freq)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&t->sd);
+
+ if (t->mode == V4L2_TUNER_RADIO) {
+ if (!freq)
+ freq = t->radio_freq;
+ set_radio_freq(client, freq);
+ } else {
+ if (!freq)
+ freq = t->tv_freq;
+ set_tv_freq(client, freq);
+ }
+}
+
+/*
+ * Functions that are specific for TV mode
+ */
+
+/**
+ * set_tv_freq - Set tuner frequency, freq in Units of 62.5 kHz = 1/16MHz
+ *
+ * @c: i2c_client descriptor
+ * @freq: frequency
+ */
+static void set_tv_freq(struct i2c_client *c, unsigned int freq)
+{
+ struct tuner *t = to_tuner(i2c_get_clientdata(c));
+ struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
+
+ struct analog_parameters params = {
+ .mode = t->mode,
+ .audmode = t->audmode,
+ .std = t->std
+ };
+
+ if (t->type == UNSET) {
+ tuner_warn("tuner type not set\n");
+ return;
+ }
+ if (NULL == analog_ops->set_params) {
+ tuner_warn("Tuner has no way to set tv freq\n");
+ return;
+ }
+ if (freq < tv_range[0] * 16 || freq > tv_range[1] * 16) {
+ tuner_dbg("TV freq (%d.%02d) out of range (%d-%d)\n",
+ freq / 16, freq % 16 * 100 / 16, tv_range[0],
+ tv_range[1]);
+ /* V4L2 spec: if the freq is not possible then the closest
+ possible value should be selected */
+ if (freq < tv_range[0] * 16)
+ freq = tv_range[0] * 16;
+ else
+ freq = tv_range[1] * 16;
+ }
+ params.frequency = freq;
+ tuner_dbg("tv freq set to %d.%02d\n",
+ freq / 16, freq % 16 * 100 / 16);
+ t->tv_freq = freq;
+ t->standby = false;
+
+ analog_ops->set_params(&t->fe, &params);
+}
+
+/**
+ * tuner_fixup_std - force a given video standard variant
+ *
+ * @t: tuner internal struct
+ * @std: TV standard
+ *
+ * A few devices or drivers have problem to detect some standard variations.
+ * On other operational systems, the drivers generally have a per-country
+ * code, and some logic to apply per-country hacks. V4L2 API doesn't provide
+ * such hacks. Instead, it relies on a proper video standard selection from
+ * the userspace application. However, as some apps are buggy, not allowing
+ * to distinguish all video standard variations, a modprobe parameter can
+ * be used to force a video standard match.
+ */
+static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std)
+{
+ if (pal[0] != '-' && (std & V4L2_STD_PAL) == V4L2_STD_PAL) {
+ switch (pal[0]) {
+ case '6':
+ return V4L2_STD_PAL_60;
+ case 'b':
+ case 'B':
+ case 'g':
+ case 'G':
+ return V4L2_STD_PAL_BG;
+ case 'i':
+ case 'I':
+ return V4L2_STD_PAL_I;
+ case 'd':
+ case 'D':
+ case 'k':
+ case 'K':
+ return V4L2_STD_PAL_DK;
+ case 'M':
+ case 'm':
+ return V4L2_STD_PAL_M;
+ case 'N':
+ case 'n':
+ if (pal[1] == 'c' || pal[1] == 'C')
+ return V4L2_STD_PAL_Nc;
+ return V4L2_STD_PAL_N;
+ default:
+ tuner_warn("pal= argument not recognised\n");
+ break;
+ }
+ }
+ if (secam[0] != '-' && (std & V4L2_STD_SECAM) == V4L2_STD_SECAM) {
+ switch (secam[0]) {
+ case 'b':
+ case 'B':
+ case 'g':
+ case 'G':
+ case 'h':
+ case 'H':
+ return V4L2_STD_SECAM_B |
+ V4L2_STD_SECAM_G |
+ V4L2_STD_SECAM_H;
+ case 'd':
+ case 'D':
+ case 'k':
+ case 'K':
+ return V4L2_STD_SECAM_DK;
+ case 'l':
+ case 'L':
+ if ((secam[1] == 'C') || (secam[1] == 'c'))
+ return V4L2_STD_SECAM_LC;
+ return V4L2_STD_SECAM_L;
+ default:
+ tuner_warn("secam= argument not recognised\n");
+ break;
+ }
+ }
+
+ if (ntsc[0] != '-' && (std & V4L2_STD_NTSC) == V4L2_STD_NTSC) {
+ switch (ntsc[0]) {
+ case 'm':
+ case 'M':
+ return V4L2_STD_NTSC_M;
+ case 'j':
+ case 'J':
+ return V4L2_STD_NTSC_M_JP;
+ case 'k':
+ case 'K':
+ return V4L2_STD_NTSC_M_KR;
+ default:
+ tuner_info("ntsc= argument not recognised\n");
+ break;
+ }
+ }
+ return std;
+}
+
+/*
+ * Functions that are specific for Radio mode
+ */
+
+/**
+ * set_radio_freq - Set tuner frequency, freq in Units of 62.5 Hz = 1/16kHz
+ *
+ * @c: i2c_client descriptor
+ * @freq: frequency
+ */
+static void set_radio_freq(struct i2c_client *c, unsigned int freq)
+{
+ struct tuner *t = to_tuner(i2c_get_clientdata(c));
+ struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
+
+ struct analog_parameters params = {
+ .mode = t->mode,
+ .audmode = t->audmode,
+ .std = t->std
+ };
+
+ if (t->type == UNSET) {
+ tuner_warn("tuner type not set\n");
+ return;
+ }
+ if (NULL == analog_ops->set_params) {
+ tuner_warn("tuner has no way to set radio frequency\n");
+ return;
+ }
+ if (freq < radio_range[0] * 16000 || freq > radio_range[1] * 16000) {
+ tuner_dbg("radio freq (%d.%02d) out of range (%d-%d)\n",
+ freq / 16000, freq % 16000 * 100 / 16000,
+ radio_range[0], radio_range[1]);
+ /* V4L2 spec: if the freq is not possible then the closest
+ possible value should be selected */
+ if (freq < radio_range[0] * 16000)
+ freq = radio_range[0] * 16000;
+ else
+ freq = radio_range[1] * 16000;
+ }
+ params.frequency = freq;
+ tuner_dbg("radio freq set to %d.%02d\n",
+ freq / 16000, freq % 16000 * 100 / 16000);
+ t->radio_freq = freq;
+ t->standby = false;
+
+ analog_ops->set_params(&t->fe, &params);
+ /*
+ * The tuner driver might decide to change the audmode if it only
+ * supports stereo, so update t->audmode.
+ */
+ t->audmode = params.audmode;
+}
+
+/*
+ * Debug function for reporting tuner status to userspace
+ */
+
+/**
+ * tuner_status - Dumps the current tuner status at dmesg
+ * @fe: pointer to struct dvb_frontend
+ *
+ * This callback is used only for driver debug purposes, answering to
+ * VIDIOC_LOG_STATUS. No changes should happen on this call.
+ */
+static void tuner_status(struct dvb_frontend *fe)
+{
+ struct tuner *t = fe->analog_demod_priv;
+ unsigned long freq, freq_fraction;
+ struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops;
+ struct analog_demod_ops *analog_ops = &fe->ops.analog_ops;
+ const char *p;
+
+ switch (t->mode) {
+ case V4L2_TUNER_RADIO:
+ p = "radio";
+ break;
+ case V4L2_TUNER_DIGITAL_TV: /* Used by mt20xx */
+ p = "digital TV";
+ break;
+ case V4L2_TUNER_ANALOG_TV:
+ default:
+ p = "analog TV";
+ break;
+ }
+ if (t->mode == V4L2_TUNER_RADIO) {
+ freq = t->radio_freq / 16000;
+ freq_fraction = (t->radio_freq % 16000) * 100 / 16000;
+ } else {
+ freq = t->tv_freq / 16;
+ freq_fraction = (t->tv_freq % 16) * 100 / 16;
+ }
+ tuner_info("Tuner mode: %s%s\n", p,
+ t->standby ? " on standby mode" : "");
+ tuner_info("Frequency: %lu.%02lu MHz\n", freq, freq_fraction);
+ tuner_info("Standard: 0x%08lx\n", (unsigned long)t->std);
+ if (t->mode != V4L2_TUNER_RADIO)
+ return;
+ if (fe_tuner_ops->get_status) {
+ u32 tuner_status;
+
+ fe_tuner_ops->get_status(&t->fe, &tuner_status);
+ if (tuner_status & TUNER_STATUS_LOCKED)
+ tuner_info("Tuner is locked.\n");
+ if (tuner_status & TUNER_STATUS_STEREO)
+ tuner_info("Stereo: yes\n");
+ }
+ if (analog_ops->has_signal) {
+ u16 signal;
+
+ if (!analog_ops->has_signal(fe, &signal))
+ tuner_info("Signal strength: %hu\n", signal);
+ }
+}
+
+/*
+ * Function to splicitly change mode to radio. Probably not needed anymore
+ */
+
+static int tuner_s_radio(struct v4l2_subdev *sd)
+{
+ struct tuner *t = to_tuner(sd);
+
+ if (set_mode(t, V4L2_TUNER_RADIO) == 0)
+ set_freq(t, 0);
+ return 0;
+}
+
+/*
+ * Tuner callbacks to handle userspace ioctl's
+ */
+
+/**
+ * tuner_s_power - controls the power state of the tuner
+ * @sd: pointer to struct v4l2_subdev
+ * @on: a zero value puts the tuner to sleep, non-zero wakes it up
+ */
+static int tuner_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct tuner *t = to_tuner(sd);
+ struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
+
+ if (on) {
+ if (t->standby && set_mode(t, t->mode) == 0) {
+ tuner_dbg("Waking up tuner\n");
+ set_freq(t, 0);
+ }
+ return 0;
+ }
+
+ tuner_dbg("Putting tuner to sleep\n");
+ t->standby = true;
+ if (analog_ops->standby)
+ analog_ops->standby(&t->fe);
+ return 0;
+}
+
+static int tuner_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
+{
+ struct tuner *t = to_tuner(sd);
+
+ if (set_mode(t, V4L2_TUNER_ANALOG_TV))
+ return 0;
+
+ t->std = tuner_fixup_std(t, std);
+ if (t->std != std)
+ tuner_dbg("Fixup standard %llx to %llx\n", std, t->std);
+ set_freq(t, 0);
+ return 0;
+}
+
+static int tuner_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequency *f)
+{
+ struct tuner *t = to_tuner(sd);
+
+ if (set_mode(t, f->type) == 0)
+ set_freq(t, f->frequency);
+ return 0;
+}
+
+/**
+ * tuner_g_frequency - Get the tuned frequency for the tuner
+ * @sd: pointer to struct v4l2_subdev
+ * @f: pointer to struct v4l2_frequency
+ *
+ * At return, the structure f will be filled with tuner frequency
+ * if the tuner matches the f->type.
+ * Note: f->type should be initialized before calling it.
+ * This is done by either video_ioctl2 or by the bridge driver.
+ */
+static int tuner_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
+{
+ struct tuner *t = to_tuner(sd);
+ struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
+
+ if (check_mode(t, f->type) == -EINVAL)
+ return 0;
+ if (f->type == t->mode && fe_tuner_ops->get_frequency && !t->standby) {
+ u32 abs_freq;
+
+ fe_tuner_ops->get_frequency(&t->fe, &abs_freq);
+ f->frequency = (V4L2_TUNER_RADIO == t->mode) ?
+ DIV_ROUND_CLOSEST(abs_freq * 2, 125) :
+ DIV_ROUND_CLOSEST(abs_freq, 62500);
+ } else {
+ f->frequency = (V4L2_TUNER_RADIO == f->type) ?
+ t->radio_freq : t->tv_freq;
+ }
+ return 0;
+}
+
+/**
+ * tuner_g_tuner - Fill in tuner information
+ * @sd: pointer to struct v4l2_subdev
+ * @vt: pointer to struct v4l2_tuner
+ *
+ * At return, the structure vt will be filled with tuner information
+ * if the tuner matches vt->type.
+ * Note: vt->type should be initialized before calling it.
+ * This is done by either video_ioctl2 or by the bridge driver.
+ */
+static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
+{
+ struct tuner *t = to_tuner(sd);
+ struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
+ struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
+
+ if (check_mode(t, vt->type) == -EINVAL)
+ return 0;
+ if (vt->type == t->mode && analog_ops->get_afc)
+ analog_ops->get_afc(&t->fe, &vt->afc);
+ if (vt->type == t->mode && analog_ops->has_signal) {
+ u16 signal = (u16)vt->signal;
+
+ if (!analog_ops->has_signal(&t->fe, &signal))
+ vt->signal = signal;
+ }
+ if (vt->type != V4L2_TUNER_RADIO) {
+ vt->capability |= V4L2_TUNER_CAP_NORM;
+ vt->rangelow = tv_range[0] * 16;
+ vt->rangehigh = tv_range[1] * 16;
+ return 0;
+ }
+
+ /* radio mode */
+ if (vt->type == t->mode) {
+ vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
+ if (fe_tuner_ops->get_status) {
+ u32 tuner_status;
+
+ fe_tuner_ops->get_status(&t->fe, &tuner_status);
+ vt->rxsubchans =
+ (tuner_status & TUNER_STATUS_STEREO) ?
+ V4L2_TUNER_SUB_STEREO :
+ V4L2_TUNER_SUB_MONO;
+ }
+ vt->audmode = t->audmode;
+ }
+ vt->capability |= V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
+ vt->rangelow = radio_range[0] * 16000;
+ vt->rangehigh = radio_range[1] * 16000;
+
+ return 0;
+}
+
+/**
+ * tuner_s_tuner - Set the tuner's audio mode
+ * @sd: pointer to struct v4l2_subdev
+ * @vt: pointer to struct v4l2_tuner
+ *
+ * Sets the audio mode if the tuner matches vt->type.
+ * Note: vt->type should be initialized before calling it.
+ * This is done by either video_ioctl2 or by the bridge driver.
+ */
+static int tuner_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *vt)
+{
+ struct tuner *t = to_tuner(sd);
+
+ if (set_mode(t, vt->type))
+ return 0;
+
+ if (t->mode == V4L2_TUNER_RADIO) {
+ t->audmode = vt->audmode;
+ /*
+ * For radio audmode can only be mono or stereo. Map any
+ * other values to stereo. The actual tuner driver that is
+ * called in set_radio_freq can decide to limit the audmode to
+ * mono if only mono is supported.
+ */
+ if (t->audmode != V4L2_TUNER_MODE_MONO &&
+ t->audmode != V4L2_TUNER_MODE_STEREO)
+ t->audmode = V4L2_TUNER_MODE_STEREO;
+ }
+ set_freq(t, 0);
+
+ return 0;
+}
+
+static int tuner_log_status(struct v4l2_subdev *sd)
+{
+ struct tuner *t = to_tuner(sd);
+ struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
+
+ if (analog_ops->tuner_status)
+ analog_ops->tuner_status(&t->fe);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tuner_suspend(struct device *dev)
+{
+ struct i2c_client *c = to_i2c_client(dev);
+ struct tuner *t = to_tuner(i2c_get_clientdata(c));
+ struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
+
+ tuner_dbg("suspend\n");
+
+ if (t->fe.ops.tuner_ops.suspend)
+ t->fe.ops.tuner_ops.suspend(&t->fe);
+ else if (!t->standby && analog_ops->standby)
+ analog_ops->standby(&t->fe);
+
+ return 0;
+}
+
+static int tuner_resume(struct device *dev)
+{
+ struct i2c_client *c = to_i2c_client(dev);
+ struct tuner *t = to_tuner(i2c_get_clientdata(c));
+
+ tuner_dbg("resume\n");
+
+ if (t->fe.ops.tuner_ops.resume)
+ t->fe.ops.tuner_ops.resume(&t->fe);
+ else if (!t->standby)
+ if (set_mode(t, t->mode) == 0)
+ set_freq(t, 0);
+
+ return 0;
+}
+#endif
+
+static int tuner_command(struct i2c_client *client, unsigned cmd, void *arg)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+
+ /* TUNER_SET_CONFIG is still called by tuner-simple.c, so we have
+ to handle it here.
+ There must be a better way of doing this... */
+ switch (cmd) {
+ case TUNER_SET_CONFIG:
+ return tuner_s_config(sd, arg);
+ }
+ return -ENOIOCTLCMD;
+}
+
+/*
+ * Callback structs
+ */
+
+static const struct v4l2_subdev_core_ops tuner_core_ops = {
+ .log_status = tuner_log_status,
+ .s_power = tuner_s_power,
+};
+
+static const struct v4l2_subdev_tuner_ops tuner_tuner_ops = {
+ .s_radio = tuner_s_radio,
+ .g_tuner = tuner_g_tuner,
+ .s_tuner = tuner_s_tuner,
+ .s_frequency = tuner_s_frequency,
+ .g_frequency = tuner_g_frequency,
+ .s_type_addr = tuner_s_type_addr,
+ .s_config = tuner_s_config,
+};
+
+static const struct v4l2_subdev_video_ops tuner_video_ops = {
+ .s_std = tuner_s_std,
+};
+
+static const struct v4l2_subdev_ops tuner_ops = {
+ .core = &tuner_core_ops,
+ .tuner = &tuner_tuner_ops,
+ .video = &tuner_video_ops,
+};
+
+/*
+ * I2C structs and module init functions
+ */
+
+static const struct dev_pm_ops tuner_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tuner_suspend, tuner_resume)
+};
+
+static const struct i2c_device_id tuner_id[] = {
+ { "tuner", }, /* autodetect */
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tuner_id);
+
+static struct i2c_driver tuner_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tuner",
+ .pm = &tuner_pm_ops,
+ },
+ .probe = tuner_probe,
+ .remove = tuner_remove,
+ .command = tuner_command,
+ .id_table = tuner_id,
+};
+
+module_i2c_driver(tuner_driver);
+
+MODULE_DESCRIPTION("device driver for various TV and TV+FM radio tuners");
+MODULE_AUTHOR("Ralph Metzler, Gerd Knorr, Gunther Mayer");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/media/v4l2-core/v4l2-async.c b/kernel/drivers/media/v4l2-core/v4l2-async.c
new file mode 100644
index 000000000..85a6a3412
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/v4l2-async.c
@@ -0,0 +1,312 @@
+/*
+ * V4L2 asynchronous subdevice registration API
+ *
+ * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+static bool match_i2c(struct device *dev, struct v4l2_async_subdev *asd)
+{
+#if IS_ENABLED(CONFIG_I2C)
+ struct i2c_client *client = i2c_verify_client(dev);
+ return client &&
+ asd->match.i2c.adapter_id == client->adapter->nr &&
+ asd->match.i2c.address == client->addr;
+#else
+ return false;
+#endif
+}
+
+static bool match_devname(struct device *dev, struct v4l2_async_subdev *asd)
+{
+ return !strcmp(asd->match.device_name.name, dev_name(dev));
+}
+
+static bool match_of(struct device *dev, struct v4l2_async_subdev *asd)
+{
+ return dev->of_node == asd->match.of.node;
+}
+
+static LIST_HEAD(subdev_list);
+static LIST_HEAD(notifier_list);
+static DEFINE_MUTEX(list_lock);
+
+static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd)
+{
+ struct v4l2_async_subdev *asd;
+ bool (*match)(struct device *, struct v4l2_async_subdev *);
+
+ list_for_each_entry(asd, &notifier->waiting, list) {
+ /* bus_type has been verified valid before */
+ switch (asd->match_type) {
+ case V4L2_ASYNC_MATCH_CUSTOM:
+ match = asd->match.custom.match;
+ if (!match)
+ /* Match always */
+ return asd;
+ break;
+ case V4L2_ASYNC_MATCH_DEVNAME:
+ match = match_devname;
+ break;
+ case V4L2_ASYNC_MATCH_I2C:
+ match = match_i2c;
+ break;
+ case V4L2_ASYNC_MATCH_OF:
+ match = match_of;
+ break;
+ default:
+ /* Cannot happen, unless someone breaks us */
+ WARN_ON(true);
+ return NULL;
+ }
+
+ /* match cannot be NULL here */
+ if (match(sd->dev, asd))
+ return asd;
+ }
+
+ return NULL;
+}
+
+static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ int ret;
+
+ /* Remove from the waiting list */
+ list_del(&asd->list);
+ sd->asd = asd;
+ sd->notifier = notifier;
+
+ if (notifier->bound) {
+ ret = notifier->bound(notifier, sd, asd);
+ if (ret < 0)
+ return ret;
+ }
+ /* Move from the global subdevice list to notifier's done */
+ list_move(&sd->async_list, &notifier->done);
+
+ ret = v4l2_device_register_subdev(notifier->v4l2_dev, sd);
+ if (ret < 0) {
+ if (notifier->unbind)
+ notifier->unbind(notifier, sd, asd);
+ return ret;
+ }
+
+ if (list_empty(&notifier->waiting) && notifier->complete)
+ return notifier->complete(notifier);
+
+ return 0;
+}
+
+static void v4l2_async_cleanup(struct v4l2_subdev *sd)
+{
+ v4l2_device_unregister_subdev(sd);
+ /* Subdevice driver will reprobe and put the subdev back onto the list */
+ list_del_init(&sd->async_list);
+ sd->asd = NULL;
+ sd->dev = NULL;
+}
+
+int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
+ struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_subdev *sd, *tmp;
+ struct v4l2_async_subdev *asd;
+ int i;
+
+ if (!notifier->num_subdevs || notifier->num_subdevs > V4L2_MAX_SUBDEVS)
+ return -EINVAL;
+
+ notifier->v4l2_dev = v4l2_dev;
+ INIT_LIST_HEAD(&notifier->waiting);
+ INIT_LIST_HEAD(&notifier->done);
+
+ for (i = 0; i < notifier->num_subdevs; i++) {
+ asd = notifier->subdevs[i];
+
+ switch (asd->match_type) {
+ case V4L2_ASYNC_MATCH_CUSTOM:
+ case V4L2_ASYNC_MATCH_DEVNAME:
+ case V4L2_ASYNC_MATCH_I2C:
+ case V4L2_ASYNC_MATCH_OF:
+ break;
+ default:
+ dev_err(notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL,
+ "Invalid match type %u on %p\n",
+ asd->match_type, asd);
+ return -EINVAL;
+ }
+ list_add_tail(&asd->list, &notifier->waiting);
+ }
+
+ mutex_lock(&list_lock);
+
+ /* Keep also completed notifiers on the list */
+ list_add(&notifier->list, &notifier_list);
+
+ list_for_each_entry_safe(sd, tmp, &subdev_list, async_list) {
+ int ret;
+
+ asd = v4l2_async_belongs(notifier, sd);
+ if (!asd)
+ continue;
+
+ ret = v4l2_async_test_notify(notifier, sd, asd);
+ if (ret < 0) {
+ mutex_unlock(&list_lock);
+ return ret;
+ }
+ }
+
+ mutex_unlock(&list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_async_notifier_register);
+
+void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_subdev *sd, *tmp;
+ unsigned int notif_n_subdev = notifier->num_subdevs;
+ unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS);
+ struct device **dev;
+ int i = 0;
+
+ if (!notifier->v4l2_dev)
+ return;
+
+ dev = kmalloc(n_subdev * sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ dev_err(notifier->v4l2_dev->dev,
+ "Failed to allocate device cache!\n");
+ }
+
+ mutex_lock(&list_lock);
+
+ list_del(&notifier->list);
+
+ list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
+ struct device *d;
+
+ d = get_device(sd->dev);
+
+ v4l2_async_cleanup(sd);
+
+ /* If we handled USB devices, we'd have to lock the parent too */
+ device_release_driver(d);
+
+ if (notifier->unbind)
+ notifier->unbind(notifier, sd, sd->asd);
+
+ /*
+ * Store device at the device cache, in order to call
+ * put_device() on the final step
+ */
+ if (dev)
+ dev[i++] = d;
+ else
+ put_device(d);
+ }
+
+ mutex_unlock(&list_lock);
+
+ /*
+ * Call device_attach() to reprobe devices
+ *
+ * NOTE: If dev allocation fails, i is 0, and the whole loop won't be
+ * executed.
+ */
+ while (i--) {
+ struct device *d = dev[i];
+
+ if (d && device_attach(d) < 0) {
+ const char *name = "(none)";
+ int lock = device_trylock(d);
+
+ if (lock && d->driver)
+ name = d->driver->name;
+ dev_err(d, "Failed to re-probe to %s\n", name);
+ if (lock)
+ device_unlock(d);
+ }
+ put_device(d);
+ }
+ kfree(dev);
+
+ notifier->v4l2_dev = NULL;
+
+ /*
+ * Don't care about the waiting list, it is initialised and populated
+ * upon notifier registration.
+ */
+}
+EXPORT_SYMBOL(v4l2_async_notifier_unregister);
+
+int v4l2_async_register_subdev(struct v4l2_subdev *sd)
+{
+ struct v4l2_async_notifier *notifier;
+
+ mutex_lock(&list_lock);
+
+ INIT_LIST_HEAD(&sd->async_list);
+
+ list_for_each_entry(notifier, &notifier_list, list) {
+ struct v4l2_async_subdev *asd = v4l2_async_belongs(notifier, sd);
+ if (asd) {
+ int ret = v4l2_async_test_notify(notifier, sd, asd);
+ mutex_unlock(&list_lock);
+ return ret;
+ }
+ }
+
+ /* None matched, wait for hot-plugging */
+ list_add(&sd->async_list, &subdev_list);
+
+ mutex_unlock(&list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_async_register_subdev);
+
+void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
+{
+ struct v4l2_async_notifier *notifier = sd->notifier;
+
+ if (!sd->asd) {
+ if (!list_empty(&sd->async_list))
+ v4l2_async_cleanup(sd);
+ return;
+ }
+
+ mutex_lock(&list_lock);
+
+ list_add(&sd->asd->list, &notifier->waiting);
+
+ v4l2_async_cleanup(sd);
+
+ if (notifier->unbind)
+ notifier->unbind(notifier, sd, sd->asd);
+
+ mutex_unlock(&list_lock);
+}
+EXPORT_SYMBOL(v4l2_async_unregister_subdev);
diff --git a/kernel/drivers/media/v4l2-core/v4l2-clk.c b/kernel/drivers/media/v4l2-core/v4l2-clk.c
new file mode 100644
index 000000000..34e416a55
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/v4l2-clk.c
@@ -0,0 +1,316 @@
+/*
+ * V4L2 clock service
+ *
+ * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include <media/v4l2-clk.h>
+#include <media/v4l2-subdev.h>
+
+static DEFINE_MUTEX(clk_lock);
+static LIST_HEAD(clk_list);
+
+static struct v4l2_clk *v4l2_clk_find(const char *dev_id)
+{
+ struct v4l2_clk *clk;
+
+ list_for_each_entry(clk, &clk_list, list)
+ if (!strcmp(dev_id, clk->dev_id))
+ return clk;
+
+ return ERR_PTR(-ENODEV);
+}
+
+struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id)
+{
+ struct v4l2_clk *clk;
+ struct clk *ccf_clk = clk_get(dev, id);
+
+ if (PTR_ERR(ccf_clk) == -EPROBE_DEFER)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ if (!IS_ERR_OR_NULL(ccf_clk)) {
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+ if (!clk) {
+ clk_put(ccf_clk);
+ return ERR_PTR(-ENOMEM);
+ }
+ clk->clk = ccf_clk;
+
+ return clk;
+ }
+
+ mutex_lock(&clk_lock);
+ clk = v4l2_clk_find(dev_name(dev));
+
+ if (!IS_ERR(clk))
+ atomic_inc(&clk->use_count);
+ mutex_unlock(&clk_lock);
+
+ return clk;
+}
+EXPORT_SYMBOL(v4l2_clk_get);
+
+void v4l2_clk_put(struct v4l2_clk *clk)
+{
+ struct v4l2_clk *tmp;
+
+ if (IS_ERR(clk))
+ return;
+
+ if (clk->clk) {
+ clk_put(clk->clk);
+ kfree(clk);
+ return;
+ }
+
+ mutex_lock(&clk_lock);
+
+ list_for_each_entry(tmp, &clk_list, list)
+ if (tmp == clk)
+ atomic_dec(&clk->use_count);
+
+ mutex_unlock(&clk_lock);
+}
+EXPORT_SYMBOL(v4l2_clk_put);
+
+static int v4l2_clk_lock_driver(struct v4l2_clk *clk)
+{
+ struct v4l2_clk *tmp;
+ int ret = -ENODEV;
+
+ mutex_lock(&clk_lock);
+
+ list_for_each_entry(tmp, &clk_list, list)
+ if (tmp == clk) {
+ ret = !try_module_get(clk->ops->owner);
+ if (ret)
+ ret = -EFAULT;
+ break;
+ }
+
+ mutex_unlock(&clk_lock);
+
+ return ret;
+}
+
+static void v4l2_clk_unlock_driver(struct v4l2_clk *clk)
+{
+ module_put(clk->ops->owner);
+}
+
+int v4l2_clk_enable(struct v4l2_clk *clk)
+{
+ int ret;
+
+ if (clk->clk)
+ return clk_prepare_enable(clk->clk);
+
+ ret = v4l2_clk_lock_driver(clk);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&clk->lock);
+
+ if (++clk->enable == 1 && clk->ops->enable) {
+ ret = clk->ops->enable(clk);
+ if (ret < 0)
+ clk->enable--;
+ }
+
+ mutex_unlock(&clk->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_clk_enable);
+
+/*
+ * You might Oops if you try to disabled a disabled clock, because then the
+ * driver isn't locked and could have been unloaded by now, so, don't do that
+ */
+void v4l2_clk_disable(struct v4l2_clk *clk)
+{
+ int enable;
+
+ if (clk->clk)
+ return clk_disable_unprepare(clk->clk);
+
+ mutex_lock(&clk->lock);
+
+ enable = --clk->enable;
+ if (WARN(enable < 0, "Unbalanced %s() on %s!\n", __func__,
+ clk->dev_id))
+ clk->enable++;
+ else if (!enable && clk->ops->disable)
+ clk->ops->disable(clk);
+
+ mutex_unlock(&clk->lock);
+
+ v4l2_clk_unlock_driver(clk);
+}
+EXPORT_SYMBOL(v4l2_clk_disable);
+
+unsigned long v4l2_clk_get_rate(struct v4l2_clk *clk)
+{
+ int ret;
+
+ if (clk->clk)
+ return clk_get_rate(clk->clk);
+
+ ret = v4l2_clk_lock_driver(clk);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&clk->lock);
+ if (!clk->ops->get_rate)
+ ret = -ENOSYS;
+ else
+ ret = clk->ops->get_rate(clk);
+ mutex_unlock(&clk->lock);
+
+ v4l2_clk_unlock_driver(clk);
+
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_clk_get_rate);
+
+int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate)
+{
+ int ret;
+
+ if (clk->clk) {
+ long r = clk_round_rate(clk->clk, rate);
+ if (r < 0)
+ return r;
+ return clk_set_rate(clk->clk, r);
+ }
+
+ ret = v4l2_clk_lock_driver(clk);
+
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&clk->lock);
+ if (!clk->ops->set_rate)
+ ret = -ENOSYS;
+ else
+ ret = clk->ops->set_rate(clk, rate);
+ mutex_unlock(&clk->lock);
+
+ v4l2_clk_unlock_driver(clk);
+
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_clk_set_rate);
+
+struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
+ const char *dev_id,
+ void *priv)
+{
+ struct v4l2_clk *clk;
+ int ret;
+
+ if (!ops || !dev_id)
+ return ERR_PTR(-EINVAL);
+
+ clk = kzalloc(sizeof(struct v4l2_clk), GFP_KERNEL);
+ if (!clk)
+ return ERR_PTR(-ENOMEM);
+
+ clk->dev_id = kstrdup(dev_id, GFP_KERNEL);
+ if (!clk->dev_id) {
+ ret = -ENOMEM;
+ goto ealloc;
+ }
+ clk->ops = ops;
+ clk->priv = priv;
+ atomic_set(&clk->use_count, 0);
+ mutex_init(&clk->lock);
+
+ mutex_lock(&clk_lock);
+ if (!IS_ERR(v4l2_clk_find(dev_id))) {
+ mutex_unlock(&clk_lock);
+ ret = -EEXIST;
+ goto eexist;
+ }
+ list_add_tail(&clk->list, &clk_list);
+ mutex_unlock(&clk_lock);
+
+ return clk;
+
+eexist:
+ealloc:
+ kfree(clk->dev_id);
+ kfree(clk);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(v4l2_clk_register);
+
+void v4l2_clk_unregister(struct v4l2_clk *clk)
+{
+ if (WARN(atomic_read(&clk->use_count),
+ "%s(): Refusing to unregister ref-counted %s clock!\n",
+ __func__, clk->dev_id))
+ return;
+
+ mutex_lock(&clk_lock);
+ list_del(&clk->list);
+ mutex_unlock(&clk_lock);
+
+ kfree(clk->dev_id);
+ kfree(clk);
+}
+EXPORT_SYMBOL(v4l2_clk_unregister);
+
+struct v4l2_clk_fixed {
+ unsigned long rate;
+ struct v4l2_clk_ops ops;
+};
+
+static unsigned long fixed_get_rate(struct v4l2_clk *clk)
+{
+ struct v4l2_clk_fixed *priv = clk->priv;
+ return priv->rate;
+}
+
+struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
+ unsigned long rate, struct module *owner)
+{
+ struct v4l2_clk *clk;
+ struct v4l2_clk_fixed *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->rate = rate;
+ priv->ops.get_rate = fixed_get_rate;
+ priv->ops.owner = owner;
+
+ clk = v4l2_clk_register(&priv->ops, dev_id, priv);
+ if (IS_ERR(clk))
+ kfree(priv);
+
+ return clk;
+}
+EXPORT_SYMBOL(__v4l2_clk_register_fixed);
+
+void v4l2_clk_unregister_fixed(struct v4l2_clk *clk)
+{
+ kfree(clk->priv);
+ v4l2_clk_unregister(clk);
+}
+EXPORT_SYMBOL(v4l2_clk_unregister_fixed);
diff --git a/kernel/drivers/media/v4l2-core/v4l2-common.c b/kernel/drivers/media/v4l2-core/v4l2-common.c
new file mode 100644
index 000000000..5b808500e
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/v4l2-common.c
@@ -0,0 +1,407 @@
+/*
+ * Video for Linux Two
+ *
+ * A generic video device interface for the LINUX operating system
+ * using a set of device structures/vectors for low level operations.
+ *
+ * This file replaces the videodev.c file that comes with the
+ * regular kernel distribution.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Author: Bill Dirks <bill@thedirks.org>
+ * based on code by Alan Cox, <alan@cymru.net>
+ *
+ */
+
+/*
+ * Video capture interface for Linux
+ *
+ * A generic video device interface for the LINUX operating system
+ * using a set of device structures/vectors for low level operations.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Author: Alan Cox, <alan@lxorguk.ukuu.org.uk>
+ *
+ * Fixes:
+ */
+
+/*
+ * Video4linux 1/2 integration by Justin Schoeman
+ * <justin@suntiger.ee.up.ac.za>
+ * 2.4 PROCFS support ported from 2.4 kernels by
+ * Iñaki García Etxebarria <garetxe@euskalnet.net>
+ * Makefile fix by "W. Michael Petullo" <mike@flyn.org>
+ * 2.4 devfs support ported from 2.4 kernels by
+ * Dan Merillat <dan@merillat.org>
+ * Added Gerd Knorrs v4l1 enhancements (Justin Schoeman)
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#if defined(CONFIG_SPI)
+#include <linux/spi/spi.h>
+#endif
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/div64.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+
+#include <linux/videodev2.h>
+
+MODULE_AUTHOR("Bill Dirks, Justin Schoeman, Gerd Knorr");
+MODULE_DESCRIPTION("misc helper functions for v4l2 device drivers");
+MODULE_LICENSE("GPL");
+
+/*
+ *
+ * V 4 L 2 D R I V E R H E L P E R A P I
+ *
+ */
+
+/*
+ * Video Standard Operations (contributed by Michael Schimek)
+ */
+
+/* Helper functions for control handling */
+
+/* Fill in a struct v4l2_queryctrl */
+int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _step, s32 _def)
+{
+ const char *name;
+ s64 min = _min;
+ s64 max = _max;
+ u64 step = _step;
+ s64 def = _def;
+
+ v4l2_ctrl_fill(qctrl->id, &name, &qctrl->type,
+ &min, &max, &step, &def, &qctrl->flags);
+
+ if (name == NULL)
+ return -EINVAL;
+
+ qctrl->minimum = min;
+ qctrl->maximum = max;
+ qctrl->step = step;
+ qctrl->default_value = def;
+ qctrl->reserved[0] = qctrl->reserved[1] = 0;
+ strlcpy(qctrl->name, name, sizeof(qctrl->name));
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_ctrl_query_fill);
+
+/* I2C Helper functions */
+
+#if IS_ENABLED(CONFIG_I2C)
+
+void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
+ const struct v4l2_subdev_ops *ops)
+{
+ v4l2_subdev_init(sd, ops);
+ sd->flags |= V4L2_SUBDEV_FL_IS_I2C;
+ /* the owner is the same as the i2c_client's driver owner */
+ sd->owner = client->dev.driver->owner;
+ sd->dev = &client->dev;
+ /* i2c_client and v4l2_subdev point to one another */
+ v4l2_set_subdevdata(sd, client);
+ i2c_set_clientdata(client, sd);
+ /* initialize name */
+ snprintf(sd->name, sizeof(sd->name), "%s %d-%04x",
+ client->dev.driver->name, i2c_adapter_id(client->adapter),
+ client->addr);
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init);
+
+/* Load an i2c sub-device. */
+struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
+ struct i2c_adapter *adapter, struct i2c_board_info *info,
+ const unsigned short *probe_addrs)
+{
+ struct v4l2_subdev *sd = NULL;
+ struct i2c_client *client;
+
+ BUG_ON(!v4l2_dev);
+
+ request_module(I2C_MODULE_PREFIX "%s", info->type);
+
+ /* Create the i2c client */
+ if (info->addr == 0 && probe_addrs)
+ client = i2c_new_probed_device(adapter, info, probe_addrs,
+ NULL);
+ else
+ client = i2c_new_device(adapter, info);
+
+ /* Note: by loading the module first we are certain that c->driver
+ will be set if the driver was found. If the module was not loaded
+ first, then the i2c core tries to delay-load the module for us,
+ and then c->driver is still NULL until the module is finally
+ loaded. This delay-load mechanism doesn't work if other drivers
+ want to use the i2c device, so explicitly loading the module
+ is the best alternative. */
+ if (client == NULL || client->dev.driver == NULL)
+ goto error;
+
+ /* Lock the module so we can safely get the v4l2_subdev pointer */
+ if (!try_module_get(client->dev.driver->owner))
+ goto error;
+ sd = i2c_get_clientdata(client);
+
+ /* Register with the v4l2_device which increases the module's
+ use count as well. */
+ if (v4l2_device_register_subdev(v4l2_dev, sd))
+ sd = NULL;
+ /* Decrease the module use count to match the first try_module_get. */
+ module_put(client->dev.driver->owner);
+
+error:
+ /* If we have a client but no subdev, then something went wrong and
+ we must unregister the client. */
+ if (client && sd == NULL)
+ i2c_unregister_device(client);
+ return sd;
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board);
+
+struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
+ struct i2c_adapter *adapter, const char *client_type,
+ u8 addr, const unsigned short *probe_addrs)
+{
+ struct i2c_board_info info;
+
+ /* Setup the i2c board info with the device type and
+ the device address. */
+ memset(&info, 0, sizeof(info));
+ strlcpy(info.type, client_type, sizeof(info.type));
+ info.addr = addr;
+
+ return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info, probe_addrs);
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev);
+
+/* Return i2c client address of v4l2_subdev. */
+unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return client ? client->addr : I2C_CLIENT_END;
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_addr);
+
+/* Return a list of I2C tuner addresses to probe. Use only if the tuner
+ addresses are unknown. */
+const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type)
+{
+ static const unsigned short radio_addrs[] = {
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_TEA5761)
+ 0x10,
+#endif
+ 0x60,
+ I2C_CLIENT_END
+ };
+ static const unsigned short demod_addrs[] = {
+ 0x42, 0x43, 0x4a, 0x4b,
+ I2C_CLIENT_END
+ };
+ static const unsigned short tv_addrs[] = {
+ 0x42, 0x43, 0x4a, 0x4b, /* tda8290 */
+ 0x60, 0x61, 0x62, 0x63, 0x64,
+ I2C_CLIENT_END
+ };
+
+ switch (type) {
+ case ADDRS_RADIO:
+ return radio_addrs;
+ case ADDRS_DEMOD:
+ return demod_addrs;
+ case ADDRS_TV:
+ return tv_addrs;
+ case ADDRS_TV_WITH_DEMOD:
+ return tv_addrs + 4;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_tuner_addrs);
+
+#endif /* defined(CONFIG_I2C) */
+
+#if defined(CONFIG_SPI)
+
+/* Load an spi sub-device. */
+
+void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi,
+ const struct v4l2_subdev_ops *ops)
+{
+ v4l2_subdev_init(sd, ops);
+ sd->flags |= V4L2_SUBDEV_FL_IS_SPI;
+ /* the owner is the same as the spi_device's driver owner */
+ sd->owner = spi->dev.driver->owner;
+ sd->dev = &spi->dev;
+ /* spi_device and v4l2_subdev point to one another */
+ v4l2_set_subdevdata(sd, spi);
+ spi_set_drvdata(spi, sd);
+ /* initialize name */
+ strlcpy(sd->name, spi->dev.driver->name, sizeof(sd->name));
+}
+EXPORT_SYMBOL_GPL(v4l2_spi_subdev_init);
+
+struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev,
+ struct spi_master *master, struct spi_board_info *info)
+{
+ struct v4l2_subdev *sd = NULL;
+ struct spi_device *spi = NULL;
+
+ BUG_ON(!v4l2_dev);
+
+ if (info->modalias[0])
+ request_module(info->modalias);
+
+ spi = spi_new_device(master, info);
+
+ if (spi == NULL || spi->dev.driver == NULL)
+ goto error;
+
+ if (!try_module_get(spi->dev.driver->owner))
+ goto error;
+
+ sd = spi_get_drvdata(spi);
+
+ /* Register with the v4l2_device which increases the module's
+ use count as well. */
+ if (v4l2_device_register_subdev(v4l2_dev, sd))
+ sd = NULL;
+
+ /* Decrease the module use count to match the first try_module_get. */
+ module_put(spi->dev.driver->owner);
+
+error:
+ /* If we have a client but no subdev, then something went wrong and
+ we must unregister the client. */
+ if (spi && sd == NULL)
+ spi_unregister_device(spi);
+
+ return sd;
+}
+EXPORT_SYMBOL_GPL(v4l2_spi_new_subdev);
+
+#endif /* defined(CONFIG_SPI) */
+
+/* Clamp x to be between min and max, aligned to a multiple of 2^align. min
+ * and max don't have to be aligned, but there must be at least one valid
+ * value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples
+ * of 16 between 17 and 31. */
+static unsigned int clamp_align(unsigned int x, unsigned int min,
+ unsigned int max, unsigned int align)
+{
+ /* Bits that must be zero to be aligned */
+ unsigned int mask = ~((1 << align) - 1);
+
+ /* Clamp to aligned min and max */
+ x = clamp(x, (min + ~mask) & mask, max & mask);
+
+ /* Round to nearest aligned value */
+ if (align)
+ x = (x + (1 << (align - 1))) & mask;
+
+ return x;
+}
+
+/* Bound an image to have a width between wmin and wmax, and height between
+ * hmin and hmax, inclusive. Additionally, the width will be a multiple of
+ * 2^walign, the height will be a multiple of 2^halign, and the overall size
+ * (width*height) will be a multiple of 2^salign. The image may be shrunk
+ * or enlarged to fit the alignment constraints.
+ *
+ * The width or height maximum must not be smaller than the corresponding
+ * minimum. The alignments must not be so high there are no possible image
+ * sizes within the allowed bounds. wmin and hmin must be at least 1
+ * (don't use 0). If you don't care about a certain alignment, specify 0,
+ * as 2^0 is 1 and one byte alignment is equivalent to no alignment. If
+ * you only want to adjust downward, specify a maximum that's the same as
+ * the initial value.
+ */
+void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
+ unsigned int walign,
+ u32 *h, unsigned int hmin, unsigned int hmax,
+ unsigned int halign, unsigned int salign)
+{
+ *w = clamp_align(*w, wmin, wmax, walign);
+ *h = clamp_align(*h, hmin, hmax, halign);
+
+ /* Usually we don't need to align the size and are done now. */
+ if (!salign)
+ return;
+
+ /* How much alignment do we have? */
+ walign = __ffs(*w);
+ halign = __ffs(*h);
+ /* Enough to satisfy the image alignment? */
+ if (walign + halign < salign) {
+ /* Max walign where there is still a valid width */
+ unsigned int wmaxa = __fls(wmax ^ (wmin - 1));
+ /* Max halign where there is still a valid height */
+ unsigned int hmaxa = __fls(hmax ^ (hmin - 1));
+
+ /* up the smaller alignment until we have enough */
+ do {
+ if (halign >= hmaxa ||
+ (walign <= halign && walign < wmaxa)) {
+ *w = clamp_align(*w, wmin, wmax, walign + 1);
+ walign = __ffs(*w);
+ } else {
+ *h = clamp_align(*h, hmin, hmax, halign + 1);
+ halign = __ffs(*h);
+ }
+ } while (halign + walign < salign);
+ }
+}
+EXPORT_SYMBOL_GPL(v4l_bound_align_image);
+
+const struct v4l2_frmsize_discrete *v4l2_find_nearest_format(
+ const struct v4l2_discrete_probe *probe,
+ s32 width, s32 height)
+{
+ int i;
+ u32 error, min_error = UINT_MAX;
+ const struct v4l2_frmsize_discrete *size, *best = NULL;
+
+ if (!probe)
+ return best;
+
+ for (i = 0, size = probe->sizes; i < probe->num_sizes; i++, size++) {
+ error = abs(size->width - width) + abs(size->height - height);
+ if (error < min_error) {
+ min_error = error;
+ best = size;
+ }
+ if (!error)
+ break;
+ }
+
+ return best;
+}
+EXPORT_SYMBOL_GPL(v4l2_find_nearest_format);
+
+void v4l2_get_timestamp(struct timeval *tv)
+{
+ struct timespec ts;
+
+ ktime_get_ts(&ts);
+ tv->tv_sec = ts.tv_sec;
+ tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+}
+EXPORT_SYMBOL_GPL(v4l2_get_timestamp);
diff --git a/kernel/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/kernel/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
new file mode 100644
index 000000000..af6354305
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -0,0 +1,1040 @@
+/*
+ * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
+ * Separated from fs stuff by Arnd Bergmann <arnd@arndb.de>
+ *
+ * Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
+ * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs
+ * Copyright (C) 2003 Pavel Machek (pavel@ucw.cz)
+ * Copyright (C) 2005 Philippe De Muyter (phdm@macqel.be)
+ * Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ * These routines maintain argument size conversion between 32bit and 64bit
+ * ioctls.
+ */
+
+#include <linux/compat.h>
+#include <linux/module.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-subdev.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+
+static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long ret = -ENOIOCTLCMD;
+
+ if (file->f_op->unlocked_ioctl)
+ ret = file->f_op->unlocked_ioctl(file, cmd, arg);
+
+ return ret;
+}
+
+
+struct v4l2_clip32 {
+ struct v4l2_rect c;
+ compat_caddr_t next;
+};
+
+struct v4l2_window32 {
+ struct v4l2_rect w;
+ __u32 field; /* enum v4l2_field */
+ __u32 chromakey;
+ compat_caddr_t clips; /* actually struct v4l2_clip32 * */
+ __u32 clipcount;
+ compat_caddr_t bitmap;
+};
+
+static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up)
+{
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_window32)) ||
+ copy_from_user(&kp->w, &up->w, sizeof(up->w)) ||
+ get_user(kp->field, &up->field) ||
+ get_user(kp->chromakey, &up->chromakey) ||
+ get_user(kp->clipcount, &up->clipcount))
+ return -EFAULT;
+ if (kp->clipcount > 2048)
+ return -EINVAL;
+ if (kp->clipcount) {
+ struct v4l2_clip32 __user *uclips;
+ struct v4l2_clip __user *kclips;
+ int n = kp->clipcount;
+ compat_caddr_t p;
+
+ if (get_user(p, &up->clips))
+ return -EFAULT;
+ uclips = compat_ptr(p);
+ kclips = compat_alloc_user_space(n * sizeof(struct v4l2_clip));
+ kp->clips = kclips;
+ while (--n >= 0) {
+ if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c)))
+ return -EFAULT;
+ if (put_user(n ? kclips + 1 : NULL, &kclips->next))
+ return -EFAULT;
+ uclips += 1;
+ kclips += 1;
+ }
+ } else
+ kp->clips = NULL;
+ return 0;
+}
+
+static int put_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up)
+{
+ if (copy_to_user(&up->w, &kp->w, sizeof(kp->w)) ||
+ put_user(kp->field, &up->field) ||
+ put_user(kp->chromakey, &up->chromakey) ||
+ put_user(kp->clipcount, &up->clipcount))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int get_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
+{
+ if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format)))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int get_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp,
+ struct v4l2_pix_format_mplane __user *up)
+{
+ if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format_mplane)))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int put_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
+{
+ if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format)))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int put_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp,
+ struct v4l2_pix_format_mplane __user *up)
+{
+ if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format_mplane)))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int get_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
+{
+ if (copy_from_user(kp, up, sizeof(struct v4l2_vbi_format)))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int put_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
+{
+ if (copy_to_user(up, kp, sizeof(struct v4l2_vbi_format)))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int get_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up)
+{
+ if (copy_from_user(kp, up, sizeof(struct v4l2_sliced_vbi_format)))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int put_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up)
+{
+ if (copy_to_user(up, kp, sizeof(struct v4l2_sliced_vbi_format)))
+ return -EFAULT;
+ return 0;
+}
+
+struct v4l2_format32 {
+ __u32 type; /* enum v4l2_buf_type */
+ union {
+ struct v4l2_pix_format pix;
+ struct v4l2_pix_format_mplane pix_mp;
+ struct v4l2_window32 win;
+ struct v4l2_vbi_format vbi;
+ struct v4l2_sliced_vbi_format sliced;
+ __u8 raw_data[200]; /* user-defined */
+ } fmt;
+};
+
+/**
+ * struct v4l2_create_buffers32 - VIDIOC_CREATE_BUFS32 argument
+ * @index: on return, index of the first created buffer
+ * @count: entry: number of requested buffers,
+ * return: number of created buffers
+ * @memory: buffer memory type
+ * @format: frame format, for which buffers are requested
+ * @reserved: future extensions
+ */
+struct v4l2_create_buffers32 {
+ __u32 index;
+ __u32 count;
+ __u32 memory; /* enum v4l2_memory */
+ struct v4l2_format32 format;
+ __u32 reserved[8];
+};
+
+static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+{
+ if (get_user(kp->type, &up->type))
+ return -EFAULT;
+
+ switch (kp->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return get_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return get_v4l2_pix_format_mplane(&kp->fmt.pix_mp,
+ &up->fmt.pix_mp);
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
+ return get_v4l2_window32(&kp->fmt.win, &up->fmt.win);
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ return get_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi);
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ return get_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced);
+ default:
+ printk(KERN_INFO "compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
+ kp->type);
+ return -EINVAL;
+ }
+}
+
+static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+{
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
+ return -EFAULT;
+ return __get_v4l2_format32(kp, up);
+}
+
+static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
+{
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
+ copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
+ return -EFAULT;
+ return __get_v4l2_format32(&kp->format, &up->format);
+}
+
+static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+{
+ if (put_user(kp->type, &up->type))
+ return -EFAULT;
+
+ switch (kp->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return put_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return put_v4l2_pix_format_mplane(&kp->fmt.pix_mp,
+ &up->fmt.pix_mp);
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
+ return put_v4l2_window32(&kp->fmt.win, &up->fmt.win);
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ return put_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi);
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ return put_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced);
+ default:
+ printk(KERN_INFO "compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
+ kp->type);
+ return -EINVAL;
+ }
+}
+
+static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+{
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32)))
+ return -EFAULT;
+ return __put_v4l2_format32(kp, up);
+}
+
+static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
+{
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) ||
+ copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)))
+ return -EFAULT;
+ return __put_v4l2_format32(&kp->format, &up->format);
+}
+
+struct v4l2_standard32 {
+ __u32 index;
+ __u32 id[2]; /* __u64 would get the alignment wrong */
+ __u8 name[24];
+ struct v4l2_fract frameperiod; /* Frames, not fields */
+ __u32 framelines;
+ __u32 reserved[4];
+};
+
+static int get_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up)
+{
+ /* other fields are not set by the user, nor used by the driver */
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_standard32)) ||
+ get_user(kp->index, &up->index))
+ return -EFAULT;
+ return 0;
+}
+
+static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up)
+{
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) ||
+ put_user(kp->index, &up->index) ||
+ copy_to_user(up->id, &kp->id, sizeof(__u64)) ||
+ copy_to_user(up->name, kp->name, 24) ||
+ copy_to_user(&up->frameperiod, &kp->frameperiod, sizeof(kp->frameperiod)) ||
+ put_user(kp->framelines, &up->framelines) ||
+ copy_to_user(up->reserved, kp->reserved, 4 * sizeof(__u32)))
+ return -EFAULT;
+ return 0;
+}
+
+struct v4l2_plane32 {
+ __u32 bytesused;
+ __u32 length;
+ union {
+ __u32 mem_offset;
+ compat_long_t userptr;
+ __s32 fd;
+ } m;
+ __u32 data_offset;
+ __u32 reserved[11];
+};
+
+struct v4l2_buffer32 {
+ __u32 index;
+ __u32 type; /* enum v4l2_buf_type */
+ __u32 bytesused;
+ __u32 flags;
+ __u32 field; /* enum v4l2_field */
+ struct compat_timeval timestamp;
+ struct v4l2_timecode timecode;
+ __u32 sequence;
+
+ /* memory location */
+ __u32 memory; /* enum v4l2_memory */
+ union {
+ __u32 offset;
+ compat_long_t userptr;
+ compat_caddr_t planes;
+ __s32 fd;
+ } m;
+ __u32 length;
+ __u32 reserved2;
+ __u32 reserved;
+};
+
+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
+ enum v4l2_memory memory)
+{
+ void __user *up_pln;
+ compat_long_t p;
+
+ if (copy_in_user(up, up32, 2 * sizeof(__u32)) ||
+ copy_in_user(&up->data_offset, &up32->data_offset,
+ sizeof(__u32)))
+ return -EFAULT;
+
+ if (memory == V4L2_MEMORY_USERPTR) {
+ if (get_user(p, &up32->m.userptr))
+ return -EFAULT;
+ up_pln = compat_ptr(p);
+ if (put_user((unsigned long)up_pln, &up->m.userptr))
+ return -EFAULT;
+ } else if (memory == V4L2_MEMORY_DMABUF) {
+ if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(int)))
+ return -EFAULT;
+ } else {
+ if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset,
+ sizeof(__u32)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
+ enum v4l2_memory memory)
+{
+ if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
+ copy_in_user(&up32->data_offset, &up->data_offset,
+ sizeof(__u32)))
+ return -EFAULT;
+
+ /* For MMAP, driver might've set up the offset, so copy it back.
+ * USERPTR stays the same (was userspace-provided), so no copying. */
+ if (memory == V4L2_MEMORY_MMAP)
+ if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset,
+ sizeof(__u32)))
+ return -EFAULT;
+ /* For DMABUF, driver might've set up the fd, so copy it back. */
+ if (memory == V4L2_MEMORY_DMABUF)
+ if (copy_in_user(&up32->m.fd, &up->m.fd,
+ sizeof(int)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
+{
+ struct v4l2_plane32 __user *uplane32;
+ struct v4l2_plane __user *uplane;
+ compat_caddr_t p;
+ int num_planes;
+ int ret;
+
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_buffer32)) ||
+ get_user(kp->index, &up->index) ||
+ get_user(kp->type, &up->type) ||
+ get_user(kp->flags, &up->flags) ||
+ get_user(kp->memory, &up->memory))
+ return -EFAULT;
+
+ if (V4L2_TYPE_IS_OUTPUT(kp->type))
+ if (get_user(kp->bytesused, &up->bytesused) ||
+ get_user(kp->field, &up->field) ||
+ get_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
+ get_user(kp->timestamp.tv_usec,
+ &up->timestamp.tv_usec))
+ return -EFAULT;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
+ if (get_user(kp->length, &up->length))
+ return -EFAULT;
+
+ num_planes = kp->length;
+ if (num_planes == 0) {
+ kp->m.planes = NULL;
+ /* num_planes == 0 is legal, e.g. when userspace doesn't
+ * need planes array on DQBUF*/
+ return 0;
+ }
+
+ if (get_user(p, &up->m.planes))
+ return -EFAULT;
+
+ uplane32 = compat_ptr(p);
+ if (!access_ok(VERIFY_READ, uplane32,
+ num_planes * sizeof(struct v4l2_plane32)))
+ return -EFAULT;
+
+ /* We don't really care if userspace decides to kill itself
+ * by passing a very big num_planes value */
+ uplane = compat_alloc_user_space(num_planes *
+ sizeof(struct v4l2_plane));
+ kp->m.planes = (__force struct v4l2_plane *)uplane;
+
+ while (--num_planes >= 0) {
+ ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
+ if (ret)
+ return ret;
+ ++uplane;
+ ++uplane32;
+ }
+ } else {
+ switch (kp->memory) {
+ case V4L2_MEMORY_MMAP:
+ if (get_user(kp->length, &up->length) ||
+ get_user(kp->m.offset, &up->m.offset))
+ return -EFAULT;
+ break;
+ case V4L2_MEMORY_USERPTR:
+ {
+ compat_long_t tmp;
+
+ if (get_user(kp->length, &up->length) ||
+ get_user(tmp, &up->m.userptr))
+ return -EFAULT;
+
+ kp->m.userptr = (unsigned long)compat_ptr(tmp);
+ }
+ break;
+ case V4L2_MEMORY_OVERLAY:
+ if (get_user(kp->m.offset, &up->m.offset))
+ return -EFAULT;
+ break;
+ case V4L2_MEMORY_DMABUF:
+ if (get_user(kp->m.fd, &up->m.fd))
+ return -EFAULT;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
+{
+ struct v4l2_plane32 __user *uplane32;
+ struct v4l2_plane __user *uplane;
+ compat_caddr_t p;
+ int num_planes;
+ int ret;
+
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_buffer32)) ||
+ put_user(kp->index, &up->index) ||
+ put_user(kp->type, &up->type) ||
+ put_user(kp->flags, &up->flags) ||
+ put_user(kp->memory, &up->memory))
+ return -EFAULT;
+
+ if (put_user(kp->bytesused, &up->bytesused) ||
+ put_user(kp->field, &up->field) ||
+ put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
+ put_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec) ||
+ copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
+ put_user(kp->sequence, &up->sequence) ||
+ put_user(kp->reserved2, &up->reserved2) ||
+ put_user(kp->reserved, &up->reserved))
+ return -EFAULT;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
+ num_planes = kp->length;
+ if (num_planes == 0)
+ return 0;
+
+ uplane = (__force struct v4l2_plane __user *)kp->m.planes;
+ if (get_user(p, &up->m.planes))
+ return -EFAULT;
+ uplane32 = compat_ptr(p);
+
+ while (--num_planes >= 0) {
+ ret = put_v4l2_plane32(uplane, uplane32, kp->memory);
+ if (ret)
+ return ret;
+ ++uplane;
+ ++uplane32;
+ }
+ } else {
+ switch (kp->memory) {
+ case V4L2_MEMORY_MMAP:
+ if (put_user(kp->length, &up->length) ||
+ put_user(kp->m.offset, &up->m.offset))
+ return -EFAULT;
+ break;
+ case V4L2_MEMORY_USERPTR:
+ if (put_user(kp->length, &up->length) ||
+ put_user(kp->m.userptr, &up->m.userptr))
+ return -EFAULT;
+ break;
+ case V4L2_MEMORY_OVERLAY:
+ if (put_user(kp->m.offset, &up->m.offset))
+ return -EFAULT;
+ break;
+ case V4L2_MEMORY_DMABUF:
+ if (put_user(kp->m.fd, &up->m.fd))
+ return -EFAULT;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+struct v4l2_framebuffer32 {
+ __u32 capability;
+ __u32 flags;
+ compat_caddr_t base;
+ struct {
+ __u32 width;
+ __u32 height;
+ __u32 pixelformat;
+ __u32 field;
+ __u32 bytesperline;
+ __u32 sizeimage;
+ __u32 colorspace;
+ __u32 priv;
+ } fmt;
+};
+
+static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up)
+{
+ u32 tmp;
+
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_framebuffer32)) ||
+ get_user(tmp, &up->base) ||
+ get_user(kp->capability, &up->capability) ||
+ get_user(kp->flags, &up->flags) ||
+ copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
+ return -EFAULT;
+ kp->base = (__force void *)compat_ptr(tmp);
+ return 0;
+}
+
+static int put_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up)
+{
+ u32 tmp = (u32)((unsigned long)kp->base);
+
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_framebuffer32)) ||
+ put_user(tmp, &up->base) ||
+ put_user(kp->capability, &up->capability) ||
+ put_user(kp->flags, &up->flags) ||
+ copy_to_user(&up->fmt, &kp->fmt, sizeof(up->fmt)))
+ return -EFAULT;
+ return 0;
+}
+
+struct v4l2_input32 {
+ __u32 index; /* Which input */
+ __u8 name[32]; /* Label */
+ __u32 type; /* Type of input */
+ __u32 audioset; /* Associated audios (bitfield) */
+ __u32 tuner; /* Associated tuner */
+ v4l2_std_id std;
+ __u32 status;
+ __u32 reserved[4];
+} __attribute__ ((packed));
+
+/* The 64-bit v4l2_input struct has extra padding at the end of the struct.
+ Otherwise it is identical to the 32-bit version. */
+static inline int get_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up)
+{
+ if (copy_from_user(kp, up, sizeof(struct v4l2_input32)))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int put_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up)
+{
+ if (copy_to_user(up, kp, sizeof(struct v4l2_input32)))
+ return -EFAULT;
+ return 0;
+}
+
+struct v4l2_ext_controls32 {
+ __u32 ctrl_class;
+ __u32 count;
+ __u32 error_idx;
+ __u32 reserved[2];
+ compat_caddr_t controls; /* actually struct v4l2_ext_control32 * */
+};
+
+struct v4l2_ext_control32 {
+ __u32 id;
+ __u32 size;
+ __u32 reserved2[1];
+ union {
+ __s32 value;
+ __s64 value64;
+ compat_caddr_t string; /* actually char * */
+ };
+} __attribute__ ((packed));
+
+/* The following function really belong in v4l2-common, but that causes
+ a circular dependency between modules. We need to think about this, but
+ for now this will do. */
+
+/* Return non-zero if this control is a pointer type. Currently only
+ type STRING is a pointer type. */
+static inline int ctrl_is_pointer(u32 id)
+{
+ switch (id) {
+ case V4L2_CID_RDS_TX_PS_NAME:
+ case V4L2_CID_RDS_TX_RADIO_TEXT:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
+{
+ struct v4l2_ext_control32 __user *ucontrols;
+ struct v4l2_ext_control __user *kcontrols;
+ int n;
+ compat_caddr_t p;
+
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_ext_controls32)) ||
+ get_user(kp->ctrl_class, &up->ctrl_class) ||
+ get_user(kp->count, &up->count) ||
+ get_user(kp->error_idx, &up->error_idx) ||
+ copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
+ return -EFAULT;
+ n = kp->count;
+ if (n == 0) {
+ kp->controls = NULL;
+ return 0;
+ }
+ if (get_user(p, &up->controls))
+ return -EFAULT;
+ ucontrols = compat_ptr(p);
+ if (!access_ok(VERIFY_READ, ucontrols,
+ n * sizeof(struct v4l2_ext_control32)))
+ return -EFAULT;
+ kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
+ kp->controls = (__force struct v4l2_ext_control *)kcontrols;
+ while (--n >= 0) {
+ u32 id;
+
+ if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
+ return -EFAULT;
+ if (get_user(id, &kcontrols->id))
+ return -EFAULT;
+ if (ctrl_is_pointer(id)) {
+ void __user *s;
+
+ if (get_user(p, &ucontrols->string))
+ return -EFAULT;
+ s = compat_ptr(p);
+ if (put_user(s, &kcontrols->string))
+ return -EFAULT;
+ }
+ ucontrols++;
+ kcontrols++;
+ }
+ return 0;
+}
+
+static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
+{
+ struct v4l2_ext_control32 __user *ucontrols;
+ struct v4l2_ext_control __user *kcontrols =
+ (__force struct v4l2_ext_control __user *)kp->controls;
+ int n = kp->count;
+ compat_caddr_t p;
+
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_ext_controls32)) ||
+ put_user(kp->ctrl_class, &up->ctrl_class) ||
+ put_user(kp->count, &up->count) ||
+ put_user(kp->error_idx, &up->error_idx) ||
+ copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved)))
+ return -EFAULT;
+ if (!kp->count)
+ return 0;
+
+ if (get_user(p, &up->controls))
+ return -EFAULT;
+ ucontrols = compat_ptr(p);
+ if (!access_ok(VERIFY_WRITE, ucontrols,
+ n * sizeof(struct v4l2_ext_control32)))
+ return -EFAULT;
+
+ while (--n >= 0) {
+ unsigned size = sizeof(*ucontrols);
+ u32 id;
+
+ if (get_user(id, &kcontrols->id))
+ return -EFAULT;
+ /* Do not modify the pointer when copying a pointer control.
+ The contents of the pointer was changed, not the pointer
+ itself. */
+ if (ctrl_is_pointer(id))
+ size -= sizeof(ucontrols->value64);
+ if (copy_in_user(ucontrols, kcontrols, size))
+ return -EFAULT;
+ ucontrols++;
+ kcontrols++;
+ }
+ return 0;
+}
+
+struct v4l2_event32 {
+ __u32 type;
+ union {
+ __u8 data[64];
+ } u;
+ __u32 pending;
+ __u32 sequence;
+ struct compat_timespec timestamp;
+ __u32 id;
+ __u32 reserved[8];
+};
+
+static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *up)
+{
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_event32)) ||
+ put_user(kp->type, &up->type) ||
+ copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
+ put_user(kp->pending, &up->pending) ||
+ put_user(kp->sequence, &up->sequence) ||
+ compat_put_timespec(&kp->timestamp, &up->timestamp) ||
+ put_user(kp->id, &up->id) ||
+ copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
+ return -EFAULT;
+ return 0;
+}
+
+struct v4l2_edid32 {
+ __u32 pad;
+ __u32 start_block;
+ __u32 blocks;
+ __u32 reserved[5];
+ compat_caddr_t edid;
+};
+
+static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
+{
+ u32 tmp;
+
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_edid32)) ||
+ get_user(kp->pad, &up->pad) ||
+ get_user(kp->start_block, &up->start_block) ||
+ get_user(kp->blocks, &up->blocks) ||
+ get_user(tmp, &up->edid) ||
+ copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
+ return -EFAULT;
+ kp->edid = (__force u8 *)compat_ptr(tmp);
+ return 0;
+}
+
+static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
+{
+ u32 tmp = (u32)((unsigned long)kp->edid);
+
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_edid32)) ||
+ put_user(kp->pad, &up->pad) ||
+ put_user(kp->start_block, &up->start_block) ||
+ put_user(kp->blocks, &up->blocks) ||
+ put_user(tmp, &up->edid) ||
+ copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved)))
+ return -EFAULT;
+ return 0;
+}
+
+
+#define VIDIOC_G_FMT32 _IOWR('V', 4, struct v4l2_format32)
+#define VIDIOC_S_FMT32 _IOWR('V', 5, struct v4l2_format32)
+#define VIDIOC_QUERYBUF32 _IOWR('V', 9, struct v4l2_buffer32)
+#define VIDIOC_G_FBUF32 _IOR ('V', 10, struct v4l2_framebuffer32)
+#define VIDIOC_S_FBUF32 _IOW ('V', 11, struct v4l2_framebuffer32)
+#define VIDIOC_QBUF32 _IOWR('V', 15, struct v4l2_buffer32)
+#define VIDIOC_DQBUF32 _IOWR('V', 17, struct v4l2_buffer32)
+#define VIDIOC_ENUMSTD32 _IOWR('V', 25, struct v4l2_standard32)
+#define VIDIOC_ENUMINPUT32 _IOWR('V', 26, struct v4l2_input32)
+#define VIDIOC_G_EDID32 _IOWR('V', 40, struct v4l2_edid32)
+#define VIDIOC_S_EDID32 _IOWR('V', 41, struct v4l2_edid32)
+#define VIDIOC_TRY_FMT32 _IOWR('V', 64, struct v4l2_format32)
+#define VIDIOC_G_EXT_CTRLS32 _IOWR('V', 71, struct v4l2_ext_controls32)
+#define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32)
+#define VIDIOC_TRY_EXT_CTRLS32 _IOWR('V', 73, struct v4l2_ext_controls32)
+#define VIDIOC_DQEVENT32 _IOR ('V', 89, struct v4l2_event32)
+#define VIDIOC_CREATE_BUFS32 _IOWR('V', 92, struct v4l2_create_buffers32)
+#define VIDIOC_PREPARE_BUF32 _IOWR('V', 93, struct v4l2_buffer32)
+
+#define VIDIOC_OVERLAY32 _IOW ('V', 14, s32)
+#define VIDIOC_STREAMON32 _IOW ('V', 18, s32)
+#define VIDIOC_STREAMOFF32 _IOW ('V', 19, s32)
+#define VIDIOC_G_INPUT32 _IOR ('V', 38, s32)
+#define VIDIOC_S_INPUT32 _IOWR('V', 39, s32)
+#define VIDIOC_G_OUTPUT32 _IOR ('V', 46, s32)
+#define VIDIOC_S_OUTPUT32 _IOWR('V', 47, s32)
+
+static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ union {
+ struct v4l2_format v2f;
+ struct v4l2_buffer v2b;
+ struct v4l2_framebuffer v2fb;
+ struct v4l2_input v2i;
+ struct v4l2_standard v2s;
+ struct v4l2_ext_controls v2ecs;
+ struct v4l2_event v2ev;
+ struct v4l2_create_buffers v2crt;
+ struct v4l2_edid v2edid;
+ unsigned long vx;
+ int vi;
+ } karg;
+ void __user *up = compat_ptr(arg);
+ int compatible_arg = 1;
+ long err = 0;
+
+ /* First, convert the command. */
+ switch (cmd) {
+ case VIDIOC_G_FMT32: cmd = VIDIOC_G_FMT; break;
+ case VIDIOC_S_FMT32: cmd = VIDIOC_S_FMT; break;
+ case VIDIOC_QUERYBUF32: cmd = VIDIOC_QUERYBUF; break;
+ case VIDIOC_G_FBUF32: cmd = VIDIOC_G_FBUF; break;
+ case VIDIOC_S_FBUF32: cmd = VIDIOC_S_FBUF; break;
+ case VIDIOC_QBUF32: cmd = VIDIOC_QBUF; break;
+ case VIDIOC_DQBUF32: cmd = VIDIOC_DQBUF; break;
+ case VIDIOC_ENUMSTD32: cmd = VIDIOC_ENUMSTD; break;
+ case VIDIOC_ENUMINPUT32: cmd = VIDIOC_ENUMINPUT; break;
+ case VIDIOC_TRY_FMT32: cmd = VIDIOC_TRY_FMT; break;
+ case VIDIOC_G_EXT_CTRLS32: cmd = VIDIOC_G_EXT_CTRLS; break;
+ case VIDIOC_S_EXT_CTRLS32: cmd = VIDIOC_S_EXT_CTRLS; break;
+ case VIDIOC_TRY_EXT_CTRLS32: cmd = VIDIOC_TRY_EXT_CTRLS; break;
+ case VIDIOC_DQEVENT32: cmd = VIDIOC_DQEVENT; break;
+ case VIDIOC_OVERLAY32: cmd = VIDIOC_OVERLAY; break;
+ case VIDIOC_STREAMON32: cmd = VIDIOC_STREAMON; break;
+ case VIDIOC_STREAMOFF32: cmd = VIDIOC_STREAMOFF; break;
+ case VIDIOC_G_INPUT32: cmd = VIDIOC_G_INPUT; break;
+ case VIDIOC_S_INPUT32: cmd = VIDIOC_S_INPUT; break;
+ case VIDIOC_G_OUTPUT32: cmd = VIDIOC_G_OUTPUT; break;
+ case VIDIOC_S_OUTPUT32: cmd = VIDIOC_S_OUTPUT; break;
+ case VIDIOC_CREATE_BUFS32: cmd = VIDIOC_CREATE_BUFS; break;
+ case VIDIOC_PREPARE_BUF32: cmd = VIDIOC_PREPARE_BUF; break;
+ case VIDIOC_G_EDID32: cmd = VIDIOC_G_EDID; break;
+ case VIDIOC_S_EDID32: cmd = VIDIOC_S_EDID; break;
+ }
+
+ switch (cmd) {
+ case VIDIOC_OVERLAY:
+ case VIDIOC_STREAMON:
+ case VIDIOC_STREAMOFF:
+ case VIDIOC_S_INPUT:
+ case VIDIOC_S_OUTPUT:
+ err = get_user(karg.vi, (s32 __user *)up);
+ compatible_arg = 0;
+ break;
+
+ case VIDIOC_G_INPUT:
+ case VIDIOC_G_OUTPUT:
+ compatible_arg = 0;
+ break;
+
+ case VIDIOC_G_EDID:
+ case VIDIOC_S_EDID:
+ err = get_v4l2_edid32(&karg.v2edid, up);
+ compatible_arg = 0;
+ break;
+
+ case VIDIOC_G_FMT:
+ case VIDIOC_S_FMT:
+ case VIDIOC_TRY_FMT:
+ err = get_v4l2_format32(&karg.v2f, up);
+ compatible_arg = 0;
+ break;
+
+ case VIDIOC_CREATE_BUFS:
+ err = get_v4l2_create32(&karg.v2crt, up);
+ compatible_arg = 0;
+ break;
+
+ case VIDIOC_PREPARE_BUF:
+ case VIDIOC_QUERYBUF:
+ case VIDIOC_QBUF:
+ case VIDIOC_DQBUF:
+ err = get_v4l2_buffer32(&karg.v2b, up);
+ compatible_arg = 0;
+ break;
+
+ case VIDIOC_S_FBUF:
+ err = get_v4l2_framebuffer32(&karg.v2fb, up);
+ compatible_arg = 0;
+ break;
+
+ case VIDIOC_G_FBUF:
+ compatible_arg = 0;
+ break;
+
+ case VIDIOC_ENUMSTD:
+ err = get_v4l2_standard32(&karg.v2s, up);
+ compatible_arg = 0;
+ break;
+
+ case VIDIOC_ENUMINPUT:
+ err = get_v4l2_input32(&karg.v2i, up);
+ compatible_arg = 0;
+ break;
+
+ case VIDIOC_G_EXT_CTRLS:
+ case VIDIOC_S_EXT_CTRLS:
+ case VIDIOC_TRY_EXT_CTRLS:
+ err = get_v4l2_ext_controls32(&karg.v2ecs, up);
+ compatible_arg = 0;
+ break;
+ case VIDIOC_DQEVENT:
+ compatible_arg = 0;
+ break;
+ }
+ if (err)
+ return err;
+
+ if (compatible_arg)
+ err = native_ioctl(file, cmd, (unsigned long)up);
+ else {
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+ err = native_ioctl(file, cmd, (unsigned long)&karg);
+ set_fs(old_fs);
+ }
+
+ /* Special case: even after an error we need to put the
+ results back for these ioctls since the error_idx will
+ contain information on which control failed. */
+ switch (cmd) {
+ case VIDIOC_G_EXT_CTRLS:
+ case VIDIOC_S_EXT_CTRLS:
+ case VIDIOC_TRY_EXT_CTRLS:
+ if (put_v4l2_ext_controls32(&karg.v2ecs, up))
+ err = -EFAULT;
+ break;
+ }
+ if (err)
+ return err;
+
+ switch (cmd) {
+ case VIDIOC_S_INPUT:
+ case VIDIOC_S_OUTPUT:
+ case VIDIOC_G_INPUT:
+ case VIDIOC_G_OUTPUT:
+ err = put_user(((s32)karg.vi), (s32 __user *)up);
+ break;
+
+ case VIDIOC_G_FBUF:
+ err = put_v4l2_framebuffer32(&karg.v2fb, up);
+ break;
+
+ case VIDIOC_DQEVENT:
+ err = put_v4l2_event32(&karg.v2ev, up);
+ break;
+
+ case VIDIOC_G_EDID:
+ case VIDIOC_S_EDID:
+ err = put_v4l2_edid32(&karg.v2edid, up);
+ break;
+
+ case VIDIOC_G_FMT:
+ case VIDIOC_S_FMT:
+ case VIDIOC_TRY_FMT:
+ err = put_v4l2_format32(&karg.v2f, up);
+ break;
+
+ case VIDIOC_CREATE_BUFS:
+ err = put_v4l2_create32(&karg.v2crt, up);
+ break;
+
+ case VIDIOC_QUERYBUF:
+ case VIDIOC_QBUF:
+ case VIDIOC_DQBUF:
+ err = put_v4l2_buffer32(&karg.v2b, up);
+ break;
+
+ case VIDIOC_ENUMSTD:
+ err = put_v4l2_standard32(&karg.v2s, up);
+ break;
+
+ case VIDIOC_ENUMINPUT:
+ err = put_v4l2_input32(&karg.v2i, up);
+ break;
+ }
+ return err;
+}
+
+long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ long ret = -ENOIOCTLCMD;
+
+ if (!file->f_op->unlocked_ioctl)
+ return ret;
+
+ if (_IOC_TYPE(cmd) == 'V' && _IOC_NR(cmd) < BASE_VIDIOC_PRIVATE)
+ ret = do_video_ioctl(file, cmd, arg);
+ else if (vdev->fops->compat_ioctl32)
+ ret = vdev->fops->compat_ioctl32(file, cmd, arg);
+
+ if (ret == -ENOIOCTLCMD)
+ pr_warn("compat_ioctl32: unknown ioctl '%c', dir=%d, #%d (0x%08x)\n",
+ _IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd), cmd);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_compat_ioctl32);
diff --git a/kernel/drivers/media/v4l2-core/v4l2-ctrls.c b/kernel/drivers/media/v4l2-core/v4l2-ctrls.c
new file mode 100644
index 000000000..e3a346800
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -0,0 +1,3461 @@
+/*
+ V4L2 controls framework implementation.
+
+ Copyright (C) 2010 Hans Verkuil <hverkuil@xs4all.nl>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-dev.h>
+
+#define has_op(master, op) \
+ (master->ops && master->ops->op)
+#define call_op(master, op) \
+ (has_op(master, op) ? master->ops->op(master) : 0)
+
+/* Internal temporary helper struct, one for each v4l2_ext_control */
+struct v4l2_ctrl_helper {
+ /* Pointer to the control reference of the master control */
+ struct v4l2_ctrl_ref *mref;
+ /* The control corresponding to the v4l2_ext_control ID field. */
+ struct v4l2_ctrl *ctrl;
+ /* v4l2_ext_control index of the next control belonging to the
+ same cluster, or 0 if there isn't any. */
+ u32 next;
+};
+
+/* Small helper function to determine if the autocluster is set to manual
+ mode. */
+static bool is_cur_manual(const struct v4l2_ctrl *master)
+{
+ return master->is_auto && master->cur.val == master->manual_mode_value;
+}
+
+/* Same as above, but this checks the against the new value instead of the
+ current value. */
+static bool is_new_manual(const struct v4l2_ctrl *master)
+{
+ return master->is_auto && master->val == master->manual_mode_value;
+}
+
+/* Returns NULL or a character pointer array containing the menu for
+ the given control ID. The pointer array ends with a NULL pointer.
+ An empty string signifies a menu entry that is invalid. This allows
+ drivers to disable certain options if it is not supported. */
+const char * const *v4l2_ctrl_get_menu(u32 id)
+{
+ static const char * const mpeg_audio_sampling_freq[] = {
+ "44.1 kHz",
+ "48 kHz",
+ "32 kHz",
+ NULL
+ };
+ static const char * const mpeg_audio_encoding[] = {
+ "MPEG-1/2 Layer I",
+ "MPEG-1/2 Layer II",
+ "MPEG-1/2 Layer III",
+ "MPEG-2/4 AAC",
+ "AC-3",
+ NULL
+ };
+ static const char * const mpeg_audio_l1_bitrate[] = {
+ "32 kbps",
+ "64 kbps",
+ "96 kbps",
+ "128 kbps",
+ "160 kbps",
+ "192 kbps",
+ "224 kbps",
+ "256 kbps",
+ "288 kbps",
+ "320 kbps",
+ "352 kbps",
+ "384 kbps",
+ "416 kbps",
+ "448 kbps",
+ NULL
+ };
+ static const char * const mpeg_audio_l2_bitrate[] = {
+ "32 kbps",
+ "48 kbps",
+ "56 kbps",
+ "64 kbps",
+ "80 kbps",
+ "96 kbps",
+ "112 kbps",
+ "128 kbps",
+ "160 kbps",
+ "192 kbps",
+ "224 kbps",
+ "256 kbps",
+ "320 kbps",
+ "384 kbps",
+ NULL
+ };
+ static const char * const mpeg_audio_l3_bitrate[] = {
+ "32 kbps",
+ "40 kbps",
+ "48 kbps",
+ "56 kbps",
+ "64 kbps",
+ "80 kbps",
+ "96 kbps",
+ "112 kbps",
+ "128 kbps",
+ "160 kbps",
+ "192 kbps",
+ "224 kbps",
+ "256 kbps",
+ "320 kbps",
+ NULL
+ };
+ static const char * const mpeg_audio_ac3_bitrate[] = {
+ "32 kbps",
+ "40 kbps",
+ "48 kbps",
+ "56 kbps",
+ "64 kbps",
+ "80 kbps",
+ "96 kbps",
+ "112 kbps",
+ "128 kbps",
+ "160 kbps",
+ "192 kbps",
+ "224 kbps",
+ "256 kbps",
+ "320 kbps",
+ "384 kbps",
+ "448 kbps",
+ "512 kbps",
+ "576 kbps",
+ "640 kbps",
+ NULL
+ };
+ static const char * const mpeg_audio_mode[] = {
+ "Stereo",
+ "Joint Stereo",
+ "Dual",
+ "Mono",
+ NULL
+ };
+ static const char * const mpeg_audio_mode_extension[] = {
+ "Bound 4",
+ "Bound 8",
+ "Bound 12",
+ "Bound 16",
+ NULL
+ };
+ static const char * const mpeg_audio_emphasis[] = {
+ "No Emphasis",
+ "50/15 us",
+ "CCITT J17",
+ NULL
+ };
+ static const char * const mpeg_audio_crc[] = {
+ "No CRC",
+ "16-bit CRC",
+ NULL
+ };
+ static const char * const mpeg_audio_dec_playback[] = {
+ "Auto",
+ "Stereo",
+ "Left",
+ "Right",
+ "Mono",
+ "Swapped Stereo",
+ NULL
+ };
+ static const char * const mpeg_video_encoding[] = {
+ "MPEG-1",
+ "MPEG-2",
+ "MPEG-4 AVC",
+ NULL
+ };
+ static const char * const mpeg_video_aspect[] = {
+ "1x1",
+ "4x3",
+ "16x9",
+ "2.21x1",
+ NULL
+ };
+ static const char * const mpeg_video_bitrate_mode[] = {
+ "Variable Bitrate",
+ "Constant Bitrate",
+ NULL
+ };
+ static const char * const mpeg_stream_type[] = {
+ "MPEG-2 Program Stream",
+ "MPEG-2 Transport Stream",
+ "MPEG-1 System Stream",
+ "MPEG-2 DVD-compatible Stream",
+ "MPEG-1 VCD-compatible Stream",
+ "MPEG-2 SVCD-compatible Stream",
+ NULL
+ };
+ static const char * const mpeg_stream_vbi_fmt[] = {
+ "No VBI",
+ "Private Packet, IVTV Format",
+ NULL
+ };
+ static const char * const camera_power_line_frequency[] = {
+ "Disabled",
+ "50 Hz",
+ "60 Hz",
+ "Auto",
+ NULL
+ };
+ static const char * const camera_exposure_auto[] = {
+ "Auto Mode",
+ "Manual Mode",
+ "Shutter Priority Mode",
+ "Aperture Priority Mode",
+ NULL
+ };
+ static const char * const camera_exposure_metering[] = {
+ "Average",
+ "Center Weighted",
+ "Spot",
+ "Matrix",
+ NULL
+ };
+ static const char * const camera_auto_focus_range[] = {
+ "Auto",
+ "Normal",
+ "Macro",
+ "Infinity",
+ NULL
+ };
+ static const char * const colorfx[] = {
+ "None",
+ "Black & White",
+ "Sepia",
+ "Negative",
+ "Emboss",
+ "Sketch",
+ "Sky Blue",
+ "Grass Green",
+ "Skin Whiten",
+ "Vivid",
+ "Aqua",
+ "Art Freeze",
+ "Silhouette",
+ "Solarization",
+ "Antique",
+ "Set Cb/Cr",
+ NULL
+ };
+ static const char * const auto_n_preset_white_balance[] = {
+ "Manual",
+ "Auto",
+ "Incandescent",
+ "Fluorescent",
+ "Fluorescent H",
+ "Horizon",
+ "Daylight",
+ "Flash",
+ "Cloudy",
+ "Shade",
+ NULL,
+ };
+ static const char * const camera_iso_sensitivity_auto[] = {
+ "Manual",
+ "Auto",
+ NULL
+ };
+ static const char * const scene_mode[] = {
+ "None",
+ "Backlight",
+ "Beach/Snow",
+ "Candle Light",
+ "Dusk/Dawn",
+ "Fall Colors",
+ "Fireworks",
+ "Landscape",
+ "Night",
+ "Party/Indoor",
+ "Portrait",
+ "Sports",
+ "Sunset",
+ "Text",
+ NULL
+ };
+ static const char * const tune_emphasis[] = {
+ "None",
+ "50 Microseconds",
+ "75 Microseconds",
+ NULL,
+ };
+ static const char * const header_mode[] = {
+ "Separate Buffer",
+ "Joined With 1st Frame",
+ NULL,
+ };
+ static const char * const multi_slice[] = {
+ "Single",
+ "Max Macroblocks",
+ "Max Bytes",
+ NULL,
+ };
+ static const char * const entropy_mode[] = {
+ "CAVLC",
+ "CABAC",
+ NULL,
+ };
+ static const char * const mpeg_h264_level[] = {
+ "1",
+ "1b",
+ "1.1",
+ "1.2",
+ "1.3",
+ "2",
+ "2.1",
+ "2.2",
+ "3",
+ "3.1",
+ "3.2",
+ "4",
+ "4.1",
+ "4.2",
+ "5",
+ "5.1",
+ NULL,
+ };
+ static const char * const h264_loop_filter[] = {
+ "Enabled",
+ "Disabled",
+ "Disabled at Slice Boundary",
+ NULL,
+ };
+ static const char * const h264_profile[] = {
+ "Baseline",
+ "Constrained Baseline",
+ "Main",
+ "Extended",
+ "High",
+ "High 10",
+ "High 422",
+ "High 444 Predictive",
+ "High 10 Intra",
+ "High 422 Intra",
+ "High 444 Intra",
+ "CAVLC 444 Intra",
+ "Scalable Baseline",
+ "Scalable High",
+ "Scalable High Intra",
+ "Multiview High",
+ NULL,
+ };
+ static const char * const vui_sar_idc[] = {
+ "Unspecified",
+ "1:1",
+ "12:11",
+ "10:11",
+ "16:11",
+ "40:33",
+ "24:11",
+ "20:11",
+ "32:11",
+ "80:33",
+ "18:11",
+ "15:11",
+ "64:33",
+ "160:99",
+ "4:3",
+ "3:2",
+ "2:1",
+ "Extended SAR",
+ NULL,
+ };
+ static const char * const h264_fp_arrangement_type[] = {
+ "Checkerboard",
+ "Column",
+ "Row",
+ "Side by Side",
+ "Top Bottom",
+ "Temporal",
+ NULL,
+ };
+ static const char * const h264_fmo_map_type[] = {
+ "Interleaved Slices",
+ "Scattered Slices",
+ "Foreground with Leftover",
+ "Box Out",
+ "Raster Scan",
+ "Wipe Scan",
+ "Explicit",
+ NULL,
+ };
+ static const char * const mpeg_mpeg4_level[] = {
+ "0",
+ "0b",
+ "1",
+ "2",
+ "3",
+ "3b",
+ "4",
+ "5",
+ NULL,
+ };
+ static const char * const mpeg4_profile[] = {
+ "Simple",
+ "Advanced Simple",
+ "Core",
+ "Simple Scalable",
+ "Advanced Coding Efficiency",
+ NULL,
+ };
+
+ static const char * const vpx_golden_frame_sel[] = {
+ "Use Previous Frame",
+ "Use Previous Specific Frame",
+ NULL,
+ };
+
+ static const char * const flash_led_mode[] = {
+ "Off",
+ "Flash",
+ "Torch",
+ NULL,
+ };
+ static const char * const flash_strobe_source[] = {
+ "Software",
+ "External",
+ NULL,
+ };
+
+ static const char * const jpeg_chroma_subsampling[] = {
+ "4:4:4",
+ "4:2:2",
+ "4:2:0",
+ "4:1:1",
+ "4:1:0",
+ "Gray",
+ NULL,
+ };
+ static const char * const dv_tx_mode[] = {
+ "DVI-D",
+ "HDMI",
+ NULL,
+ };
+ static const char * const dv_rgb_range[] = {
+ "Automatic",
+ "RGB limited range (16-235)",
+ "RGB full range (0-255)",
+ NULL,
+ };
+ static const char * const detect_md_mode[] = {
+ "Disabled",
+ "Global",
+ "Threshold Grid",
+ "Region Grid",
+ NULL,
+ };
+
+
+ switch (id) {
+ case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
+ return mpeg_audio_sampling_freq;
+ case V4L2_CID_MPEG_AUDIO_ENCODING:
+ return mpeg_audio_encoding;
+ case V4L2_CID_MPEG_AUDIO_L1_BITRATE:
+ return mpeg_audio_l1_bitrate;
+ case V4L2_CID_MPEG_AUDIO_L2_BITRATE:
+ return mpeg_audio_l2_bitrate;
+ case V4L2_CID_MPEG_AUDIO_L3_BITRATE:
+ return mpeg_audio_l3_bitrate;
+ case V4L2_CID_MPEG_AUDIO_AC3_BITRATE:
+ return mpeg_audio_ac3_bitrate;
+ case V4L2_CID_MPEG_AUDIO_MODE:
+ return mpeg_audio_mode;
+ case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION:
+ return mpeg_audio_mode_extension;
+ case V4L2_CID_MPEG_AUDIO_EMPHASIS:
+ return mpeg_audio_emphasis;
+ case V4L2_CID_MPEG_AUDIO_CRC:
+ return mpeg_audio_crc;
+ case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK:
+ case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK:
+ return mpeg_audio_dec_playback;
+ case V4L2_CID_MPEG_VIDEO_ENCODING:
+ return mpeg_video_encoding;
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ return mpeg_video_aspect;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ return mpeg_video_bitrate_mode;
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ return mpeg_stream_type;
+ case V4L2_CID_MPEG_STREAM_VBI_FMT:
+ return mpeg_stream_vbi_fmt;
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ return camera_power_line_frequency;
+ case V4L2_CID_EXPOSURE_AUTO:
+ return camera_exposure_auto;
+ case V4L2_CID_EXPOSURE_METERING:
+ return camera_exposure_metering;
+ case V4L2_CID_AUTO_FOCUS_RANGE:
+ return camera_auto_focus_range;
+ case V4L2_CID_COLORFX:
+ return colorfx;
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
+ return auto_n_preset_white_balance;
+ case V4L2_CID_ISO_SENSITIVITY_AUTO:
+ return camera_iso_sensitivity_auto;
+ case V4L2_CID_SCENE_MODE:
+ return scene_mode;
+ case V4L2_CID_TUNE_PREEMPHASIS:
+ return tune_emphasis;
+ case V4L2_CID_TUNE_DEEMPHASIS:
+ return tune_emphasis;
+ case V4L2_CID_FLASH_LED_MODE:
+ return flash_led_mode;
+ case V4L2_CID_FLASH_STROBE_SOURCE:
+ return flash_strobe_source;
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ return header_mode;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ return multi_slice;
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ return entropy_mode;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ return mpeg_h264_level;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ return h264_loop_filter;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ return h264_profile;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
+ return vui_sar_idc;
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE:
+ return h264_fp_arrangement_type;
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE:
+ return h264_fmo_map_type;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ return mpeg_mpeg4_level;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ return mpeg4_profile;
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
+ return vpx_golden_frame_sel;
+ case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
+ return jpeg_chroma_subsampling;
+ case V4L2_CID_DV_TX_MODE:
+ return dv_tx_mode;
+ case V4L2_CID_DV_TX_RGB_RANGE:
+ case V4L2_CID_DV_RX_RGB_RANGE:
+ return dv_rgb_range;
+ case V4L2_CID_DETECT_MD_MODE:
+ return detect_md_mode;
+
+ default:
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_get_menu);
+
+#define __v4l2_qmenu_int_len(arr, len) ({ *(len) = ARRAY_SIZE(arr); arr; })
+/*
+ * Returns NULL or an s64 type array containing the menu for given
+ * control ID. The total number of the menu items is returned in @len.
+ */
+const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len)
+{
+ static const s64 qmenu_int_vpx_num_partitions[] = {
+ 1, 2, 4, 8,
+ };
+
+ static const s64 qmenu_int_vpx_num_ref_frames[] = {
+ 1, 2, 3,
+ };
+
+ switch (id) {
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS:
+ return __v4l2_qmenu_int_len(qmenu_int_vpx_num_partitions, len);
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES:
+ return __v4l2_qmenu_int_len(qmenu_int_vpx_num_ref_frames, len);
+ default:
+ *len = 0;
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_get_int_menu);
+
+/* Return the control name. */
+const char *v4l2_ctrl_get_name(u32 id)
+{
+ switch (id) {
+ /* USER controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_USER_CLASS: return "User Controls";
+ case V4L2_CID_BRIGHTNESS: return "Brightness";
+ case V4L2_CID_CONTRAST: return "Contrast";
+ case V4L2_CID_SATURATION: return "Saturation";
+ case V4L2_CID_HUE: return "Hue";
+ case V4L2_CID_AUDIO_VOLUME: return "Volume";
+ case V4L2_CID_AUDIO_BALANCE: return "Balance";
+ case V4L2_CID_AUDIO_BASS: return "Bass";
+ case V4L2_CID_AUDIO_TREBLE: return "Treble";
+ case V4L2_CID_AUDIO_MUTE: return "Mute";
+ case V4L2_CID_AUDIO_LOUDNESS: return "Loudness";
+ case V4L2_CID_BLACK_LEVEL: return "Black Level";
+ case V4L2_CID_AUTO_WHITE_BALANCE: return "White Balance, Automatic";
+ case V4L2_CID_DO_WHITE_BALANCE: return "Do White Balance";
+ case V4L2_CID_RED_BALANCE: return "Red Balance";
+ case V4L2_CID_BLUE_BALANCE: return "Blue Balance";
+ case V4L2_CID_GAMMA: return "Gamma";
+ case V4L2_CID_EXPOSURE: return "Exposure";
+ case V4L2_CID_AUTOGAIN: return "Gain, Automatic";
+ case V4L2_CID_GAIN: return "Gain";
+ case V4L2_CID_HFLIP: return "Horizontal Flip";
+ case V4L2_CID_VFLIP: return "Vertical Flip";
+ case V4L2_CID_POWER_LINE_FREQUENCY: return "Power Line Frequency";
+ case V4L2_CID_HUE_AUTO: return "Hue, Automatic";
+ case V4L2_CID_WHITE_BALANCE_TEMPERATURE: return "White Balance Temperature";
+ case V4L2_CID_SHARPNESS: return "Sharpness";
+ case V4L2_CID_BACKLIGHT_COMPENSATION: return "Backlight Compensation";
+ case V4L2_CID_CHROMA_AGC: return "Chroma AGC";
+ case V4L2_CID_COLOR_KILLER: return "Color Killer";
+ case V4L2_CID_COLORFX: return "Color Effects";
+ case V4L2_CID_AUTOBRIGHTNESS: return "Brightness, Automatic";
+ case V4L2_CID_BAND_STOP_FILTER: return "Band-Stop Filter";
+ case V4L2_CID_ROTATE: return "Rotate";
+ case V4L2_CID_BG_COLOR: return "Background Color";
+ case V4L2_CID_CHROMA_GAIN: return "Chroma Gain";
+ case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1";
+ case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2";
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Min Number of Capture Buffers";
+ case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Min Number of Output Buffers";
+ case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component";
+ case V4L2_CID_COLORFX_CBCR: return "Color Effects, CbCr";
+
+ /* Codec controls */
+ /* The MPEG controls are applicable to all codec controls
+ * and the 'MPEG' part of the define is historical */
+ /* Keep the order of the 'case's the same as in videodev2.h! */
+ case V4L2_CID_MPEG_CLASS: return "Codec Controls";
+ case V4L2_CID_MPEG_STREAM_TYPE: return "Stream Type";
+ case V4L2_CID_MPEG_STREAM_PID_PMT: return "Stream PMT Program ID";
+ case V4L2_CID_MPEG_STREAM_PID_AUDIO: return "Stream Audio Program ID";
+ case V4L2_CID_MPEG_STREAM_PID_VIDEO: return "Stream Video Program ID";
+ case V4L2_CID_MPEG_STREAM_PID_PCR: return "Stream PCR Program ID";
+ case V4L2_CID_MPEG_STREAM_PES_ID_AUDIO: return "Stream PES Audio ID";
+ case V4L2_CID_MPEG_STREAM_PES_ID_VIDEO: return "Stream PES Video ID";
+ case V4L2_CID_MPEG_STREAM_VBI_FMT: return "Stream VBI Format";
+ case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: return "Audio Sampling Frequency";
+ case V4L2_CID_MPEG_AUDIO_ENCODING: return "Audio Encoding";
+ case V4L2_CID_MPEG_AUDIO_L1_BITRATE: return "Audio Layer I Bitrate";
+ case V4L2_CID_MPEG_AUDIO_L2_BITRATE: return "Audio Layer II Bitrate";
+ case V4L2_CID_MPEG_AUDIO_L3_BITRATE: return "Audio Layer III Bitrate";
+ case V4L2_CID_MPEG_AUDIO_MODE: return "Audio Stereo Mode";
+ case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: return "Audio Stereo Mode Extension";
+ case V4L2_CID_MPEG_AUDIO_EMPHASIS: return "Audio Emphasis";
+ case V4L2_CID_MPEG_AUDIO_CRC: return "Audio CRC";
+ case V4L2_CID_MPEG_AUDIO_MUTE: return "Audio Mute";
+ case V4L2_CID_MPEG_AUDIO_AAC_BITRATE: return "Audio AAC Bitrate";
+ case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: return "Audio AC-3 Bitrate";
+ case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK: return "Audio Playback";
+ case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK: return "Audio Multilingual Playback";
+ case V4L2_CID_MPEG_VIDEO_ENCODING: return "Video Encoding";
+ case V4L2_CID_MPEG_VIDEO_ASPECT: return "Video Aspect";
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES: return "Video B Frames";
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE: return "Video GOP Size";
+ case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: return "Video GOP Closure";
+ case V4L2_CID_MPEG_VIDEO_PULLDOWN: return "Video Pulldown";
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: return "Video Bitrate Mode";
+ case V4L2_CID_MPEG_VIDEO_BITRATE: return "Video Bitrate";
+ case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: return "Video Peak Bitrate";
+ case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return "Video Temporal Decimation";
+ case V4L2_CID_MPEG_VIDEO_MUTE: return "Video Mute";
+ case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV";
+ case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface";
+ case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable";
+ case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "Number of Intra Refresh MBs";
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable";
+ case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control";
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode";
+ case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "Max Number of Reference Pics";
+ case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable";
+ case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size";
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entropy Mode";
+ case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I-Frame Period";
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level";
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset";
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset";
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: return "H264 Loop Filter Mode";
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE: return "H264 Profile";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT: return "Vertical Size of SAR";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH: return "Horizontal Size of SAR";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC";
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING: return "H264 Enable Frame Packing SEI";
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0: return "H264 Set Curr. Frame as Frame0";
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE: return "H264 FP Arrangement Type";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO: return "H264 Flexible MB Ordering";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE: return "H264 Map Type for FMO";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP: return "H264 FMO Number of Slice Groups";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION: return "H264 FMO Direction of Change";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE: return "H264 FMO Size of 1st Slice Grp";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH: return "H264 FMO No. of Consecutive MBs";
+ case V4L2_CID_MPEG_VIDEO_H264_ASO: return "H264 Arbitrary Slice Ordering";
+ case V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER: return "H264 ASO Slice Order";
+ case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING: return "Enable H264 Hierarchical Coding";
+ case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE: return "H264 Hierarchical Coding Type";
+ case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER:return "H264 Number of HC Layers";
+ case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP:
+ return "H264 Set QP Value for HC Layers";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "Maximum Bytes in a Slice";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "Number of MBs in a Slice";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "Slice Partitioning Method";
+ case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size";
+ case V4L2_CID_MPEG_VIDEO_DEC_PTS: return "Video Decoder PTS";
+ case V4L2_CID_MPEG_VIDEO_DEC_FRAME: return "Video Decoder Frame Count";
+ case V4L2_CID_MPEG_VIDEO_VBV_DELAY: return "Initial Delay for VBV Control";
+ case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE: return "Horizontal MV Search Range";
+ case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: return "Vertical MV Search Range";
+ case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header";
+
+ /* VPX controls */
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: return "VPX Number of Partitions";
+ case V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4: return "VPX Intra Mode Decision Disable";
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES: return "VPX No. of Refs for P Frame";
+ case V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL: return "VPX Loop Filter Level Range";
+ case V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS: return "VPX Deblocking Effect Control";
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD: return "VPX Golden Frame Refresh Period";
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: return "VPX Golden Frame Indicator";
+ case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP: return "VPX Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP: return "VPX Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP: return "VPX I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP: return "VPX P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_PROFILE: return "VPX Profile";
+
+ /* CAMERA controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_CAMERA_CLASS: return "Camera Controls";
+ case V4L2_CID_EXPOSURE_AUTO: return "Auto Exposure";
+ case V4L2_CID_EXPOSURE_ABSOLUTE: return "Exposure Time, Absolute";
+ case V4L2_CID_EXPOSURE_AUTO_PRIORITY: return "Exposure, Dynamic Framerate";
+ case V4L2_CID_PAN_RELATIVE: return "Pan, Relative";
+ case V4L2_CID_TILT_RELATIVE: return "Tilt, Relative";
+ case V4L2_CID_PAN_RESET: return "Pan, Reset";
+ case V4L2_CID_TILT_RESET: return "Tilt, Reset";
+ case V4L2_CID_PAN_ABSOLUTE: return "Pan, Absolute";
+ case V4L2_CID_TILT_ABSOLUTE: return "Tilt, Absolute";
+ case V4L2_CID_FOCUS_ABSOLUTE: return "Focus, Absolute";
+ case V4L2_CID_FOCUS_RELATIVE: return "Focus, Relative";
+ case V4L2_CID_FOCUS_AUTO: return "Focus, Automatic Continuous";
+ case V4L2_CID_ZOOM_ABSOLUTE: return "Zoom, Absolute";
+ case V4L2_CID_ZOOM_RELATIVE: return "Zoom, Relative";
+ case V4L2_CID_ZOOM_CONTINUOUS: return "Zoom, Continuous";
+ case V4L2_CID_PRIVACY: return "Privacy";
+ case V4L2_CID_IRIS_ABSOLUTE: return "Iris, Absolute";
+ case V4L2_CID_IRIS_RELATIVE: return "Iris, Relative";
+ case V4L2_CID_AUTO_EXPOSURE_BIAS: return "Auto Exposure, Bias";
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: return "White Balance, Auto & Preset";
+ case V4L2_CID_WIDE_DYNAMIC_RANGE: return "Wide Dynamic Range";
+ case V4L2_CID_IMAGE_STABILIZATION: return "Image Stabilization";
+ case V4L2_CID_ISO_SENSITIVITY: return "ISO Sensitivity";
+ case V4L2_CID_ISO_SENSITIVITY_AUTO: return "ISO Sensitivity, Auto";
+ case V4L2_CID_EXPOSURE_METERING: return "Exposure, Metering Mode";
+ case V4L2_CID_SCENE_MODE: return "Scene Mode";
+ case V4L2_CID_3A_LOCK: return "3A Lock";
+ case V4L2_CID_AUTO_FOCUS_START: return "Auto Focus, Start";
+ case V4L2_CID_AUTO_FOCUS_STOP: return "Auto Focus, Stop";
+ case V4L2_CID_AUTO_FOCUS_STATUS: return "Auto Focus, Status";
+ case V4L2_CID_AUTO_FOCUS_RANGE: return "Auto Focus, Range";
+ case V4L2_CID_PAN_SPEED: return "Pan, Speed";
+ case V4L2_CID_TILT_SPEED: return "Tilt, Speed";
+
+ /* FM Radio Modulator controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_FM_TX_CLASS: return "FM Radio Modulator Controls";
+ case V4L2_CID_RDS_TX_DEVIATION: return "RDS Signal Deviation";
+ case V4L2_CID_RDS_TX_PI: return "RDS Program ID";
+ case V4L2_CID_RDS_TX_PTY: return "RDS Program Type";
+ case V4L2_CID_RDS_TX_PS_NAME: return "RDS PS Name";
+ case V4L2_CID_RDS_TX_RADIO_TEXT: return "RDS Radio Text";
+ case V4L2_CID_RDS_TX_MONO_STEREO: return "RDS Stereo";
+ case V4L2_CID_RDS_TX_ARTIFICIAL_HEAD: return "RDS Artificial Head";
+ case V4L2_CID_RDS_TX_COMPRESSED: return "RDS Compressed";
+ case V4L2_CID_RDS_TX_DYNAMIC_PTY: return "RDS Dynamic PTY";
+ case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT: return "RDS Traffic Announcement";
+ case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM: return "RDS Traffic Program";
+ case V4L2_CID_RDS_TX_MUSIC_SPEECH: return "RDS Music";
+ case V4L2_CID_RDS_TX_ALT_FREQS_ENABLE: return "RDS Enable Alt Frequencies";
+ case V4L2_CID_RDS_TX_ALT_FREQS: return "RDS Alternate Frequencies";
+ case V4L2_CID_AUDIO_LIMITER_ENABLED: return "Audio Limiter Feature Enabled";
+ case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time";
+ case V4L2_CID_AUDIO_LIMITER_DEVIATION: return "Audio Limiter Deviation";
+ case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Enabled";
+ case V4L2_CID_AUDIO_COMPRESSION_GAIN: return "Audio Compression Gain";
+ case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold";
+ case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time";
+ case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME: return "Audio Compression Release Time";
+ case V4L2_CID_PILOT_TONE_ENABLED: return "Pilot Tone Feature Enabled";
+ case V4L2_CID_PILOT_TONE_DEVIATION: return "Pilot Tone Deviation";
+ case V4L2_CID_PILOT_TONE_FREQUENCY: return "Pilot Tone Frequency";
+ case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-Emphasis";
+ case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level";
+ case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor";
+
+ /* Flash controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_FLASH_CLASS: return "Flash Controls";
+ case V4L2_CID_FLASH_LED_MODE: return "LED Mode";
+ case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe Source";
+ case V4L2_CID_FLASH_STROBE: return "Strobe";
+ case V4L2_CID_FLASH_STROBE_STOP: return "Stop Strobe";
+ case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe Status";
+ case V4L2_CID_FLASH_TIMEOUT: return "Strobe Timeout";
+ case V4L2_CID_FLASH_INTENSITY: return "Intensity, Flash Mode";
+ case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, Torch Mode";
+ case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, Indicator";
+ case V4L2_CID_FLASH_FAULT: return "Faults";
+ case V4L2_CID_FLASH_CHARGE: return "Charge";
+ case V4L2_CID_FLASH_READY: return "Ready to Strobe";
+
+ /* JPEG encoder controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_JPEG_CLASS: return "JPEG Compression Controls";
+ case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: return "Chroma Subsampling";
+ case V4L2_CID_JPEG_RESTART_INTERVAL: return "Restart Interval";
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY: return "Compression Quality";
+ case V4L2_CID_JPEG_ACTIVE_MARKER: return "Active Markers";
+
+ /* Image source controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_IMAGE_SOURCE_CLASS: return "Image Source Controls";
+ case V4L2_CID_VBLANK: return "Vertical Blanking";
+ case V4L2_CID_HBLANK: return "Horizontal Blanking";
+ case V4L2_CID_ANALOGUE_GAIN: return "Analogue Gain";
+ case V4L2_CID_TEST_PATTERN_RED: return "Red Pixel Value";
+ case V4L2_CID_TEST_PATTERN_GREENR: return "Green (Red) Pixel Value";
+ case V4L2_CID_TEST_PATTERN_BLUE: return "Blue Pixel Value";
+ case V4L2_CID_TEST_PATTERN_GREENB: return "Green (Blue) Pixel Value";
+
+ /* Image processing controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_IMAGE_PROC_CLASS: return "Image Processing Controls";
+ case V4L2_CID_LINK_FREQ: return "Link Frequency";
+ case V4L2_CID_PIXEL_RATE: return "Pixel Rate";
+ case V4L2_CID_TEST_PATTERN: return "Test Pattern";
+
+ /* DV controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_DV_CLASS: return "Digital Video Controls";
+ case V4L2_CID_DV_TX_HOTPLUG: return "Hotplug Present";
+ case V4L2_CID_DV_TX_RXSENSE: return "RxSense Present";
+ case V4L2_CID_DV_TX_EDID_PRESENT: return "EDID Present";
+ case V4L2_CID_DV_TX_MODE: return "Transmit Mode";
+ case V4L2_CID_DV_TX_RGB_RANGE: return "Tx RGB Quantization Range";
+ case V4L2_CID_DV_RX_POWER_PRESENT: return "Power Present";
+ case V4L2_CID_DV_RX_RGB_RANGE: return "Rx RGB Quantization Range";
+
+ case V4L2_CID_FM_RX_CLASS: return "FM Radio Receiver Controls";
+ case V4L2_CID_TUNE_DEEMPHASIS: return "De-Emphasis";
+ case V4L2_CID_RDS_RECEPTION: return "RDS Reception";
+ case V4L2_CID_RF_TUNER_CLASS: return "RF Tuner Controls";
+ case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO: return "LNA Gain, Auto";
+ case V4L2_CID_RF_TUNER_LNA_GAIN: return "LNA Gain";
+ case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO: return "Mixer Gain, Auto";
+ case V4L2_CID_RF_TUNER_MIXER_GAIN: return "Mixer Gain";
+ case V4L2_CID_RF_TUNER_IF_GAIN_AUTO: return "IF Gain, Auto";
+ case V4L2_CID_RF_TUNER_IF_GAIN: return "IF Gain";
+ case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: return "Bandwidth, Auto";
+ case V4L2_CID_RF_TUNER_BANDWIDTH: return "Bandwidth";
+ case V4L2_CID_RF_TUNER_PLL_LOCK: return "PLL Lock";
+ case V4L2_CID_RDS_RX_PTY: return "RDS Program Type";
+ case V4L2_CID_RDS_RX_PS_NAME: return "RDS PS Name";
+ case V4L2_CID_RDS_RX_RADIO_TEXT: return "RDS Radio Text";
+ case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT: return "RDS Traffic Announcement";
+ case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM: return "RDS Traffic Program";
+ case V4L2_CID_RDS_RX_MUSIC_SPEECH: return "RDS Music";
+
+ /* Detection controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_DETECT_CLASS: return "Detection Controls";
+ case V4L2_CID_DETECT_MD_MODE: return "Motion Detection Mode";
+ case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD: return "MD Global Threshold";
+ case V4L2_CID_DETECT_MD_THRESHOLD_GRID: return "MD Threshold Grid";
+ case V4L2_CID_DETECT_MD_REGION_GRID: return "MD Region Grid";
+ default:
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_get_name);
+
+void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
+ s64 *min, s64 *max, u64 *step, s64 *def, u32 *flags)
+{
+ *name = v4l2_ctrl_get_name(id);
+ *flags = 0;
+
+ switch (id) {
+ case V4L2_CID_AUDIO_MUTE:
+ case V4L2_CID_AUDIO_LOUDNESS:
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ case V4L2_CID_AUTOGAIN:
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ case V4L2_CID_HUE_AUTO:
+ case V4L2_CID_CHROMA_AGC:
+ case V4L2_CID_COLOR_KILLER:
+ case V4L2_CID_AUTOBRIGHTNESS:
+ case V4L2_CID_MPEG_AUDIO_MUTE:
+ case V4L2_CID_MPEG_VIDEO_MUTE:
+ case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE:
+ case V4L2_CID_MPEG_VIDEO_PULLDOWN:
+ case V4L2_CID_EXPOSURE_AUTO_PRIORITY:
+ case V4L2_CID_FOCUS_AUTO:
+ case V4L2_CID_PRIVACY:
+ case V4L2_CID_AUDIO_LIMITER_ENABLED:
+ case V4L2_CID_AUDIO_COMPRESSION_ENABLED:
+ case V4L2_CID_PILOT_TONE_ENABLED:
+ case V4L2_CID_ILLUMINATORS_1:
+ case V4L2_CID_ILLUMINATORS_2:
+ case V4L2_CID_FLASH_STROBE_STATUS:
+ case V4L2_CID_FLASH_CHARGE:
+ case V4L2_CID_FLASH_READY:
+ case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
+ case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
+ case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
+ case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL:
+ case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER:
+ case V4L2_CID_WIDE_DYNAMIC_RANGE:
+ case V4L2_CID_IMAGE_STABILIZATION:
+ case V4L2_CID_RDS_RECEPTION:
+ case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO:
+ case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO:
+ case V4L2_CID_RF_TUNER_IF_GAIN_AUTO:
+ case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO:
+ case V4L2_CID_RF_TUNER_PLL_LOCK:
+ case V4L2_CID_RDS_TX_MONO_STEREO:
+ case V4L2_CID_RDS_TX_ARTIFICIAL_HEAD:
+ case V4L2_CID_RDS_TX_COMPRESSED:
+ case V4L2_CID_RDS_TX_DYNAMIC_PTY:
+ case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT:
+ case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM:
+ case V4L2_CID_RDS_TX_MUSIC_SPEECH:
+ case V4L2_CID_RDS_TX_ALT_FREQS_ENABLE:
+ case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT:
+ case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM:
+ case V4L2_CID_RDS_RX_MUSIC_SPEECH:
+ *type = V4L2_CTRL_TYPE_BOOLEAN;
+ *min = 0;
+ *max = *step = 1;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE:
+ case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ break;
+ case V4L2_CID_PAN_RESET:
+ case V4L2_CID_TILT_RESET:
+ case V4L2_CID_FLASH_STROBE:
+ case V4L2_CID_FLASH_STROBE_STOP:
+ case V4L2_CID_AUTO_FOCUS_START:
+ case V4L2_CID_AUTO_FOCUS_STOP:
+ *type = V4L2_CTRL_TYPE_BUTTON;
+ *flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
+ V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+ *min = *max = *step = *def = 0;
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
+ case V4L2_CID_MPEG_AUDIO_ENCODING:
+ case V4L2_CID_MPEG_AUDIO_L1_BITRATE:
+ case V4L2_CID_MPEG_AUDIO_L2_BITRATE:
+ case V4L2_CID_MPEG_AUDIO_L3_BITRATE:
+ case V4L2_CID_MPEG_AUDIO_AC3_BITRATE:
+ case V4L2_CID_MPEG_AUDIO_MODE:
+ case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION:
+ case V4L2_CID_MPEG_AUDIO_EMPHASIS:
+ case V4L2_CID_MPEG_AUDIO_CRC:
+ case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK:
+ case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK:
+ case V4L2_CID_MPEG_VIDEO_ENCODING:
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ case V4L2_CID_MPEG_STREAM_VBI_FMT:
+ case V4L2_CID_EXPOSURE_AUTO:
+ case V4L2_CID_AUTO_FOCUS_RANGE:
+ case V4L2_CID_COLORFX:
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
+ case V4L2_CID_TUNE_PREEMPHASIS:
+ case V4L2_CID_FLASH_LED_MODE:
+ case V4L2_CID_FLASH_STROBE_SOURCE:
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE:
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
+ case V4L2_CID_ISO_SENSITIVITY_AUTO:
+ case V4L2_CID_EXPOSURE_METERING:
+ case V4L2_CID_SCENE_MODE:
+ case V4L2_CID_DV_TX_MODE:
+ case V4L2_CID_DV_TX_RGB_RANGE:
+ case V4L2_CID_DV_RX_RGB_RANGE:
+ case V4L2_CID_TEST_PATTERN:
+ case V4L2_CID_TUNE_DEEMPHASIS:
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
+ case V4L2_CID_DETECT_MD_MODE:
+ *type = V4L2_CTRL_TYPE_MENU;
+ break;
+ case V4L2_CID_LINK_FREQ:
+ *type = V4L2_CTRL_TYPE_INTEGER_MENU;
+ break;
+ case V4L2_CID_RDS_TX_PS_NAME:
+ case V4L2_CID_RDS_TX_RADIO_TEXT:
+ case V4L2_CID_RDS_RX_PS_NAME:
+ case V4L2_CID_RDS_RX_RADIO_TEXT:
+ *type = V4L2_CTRL_TYPE_STRING;
+ break;
+ case V4L2_CID_ISO_SENSITIVITY:
+ case V4L2_CID_AUTO_EXPOSURE_BIAS:
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS:
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES:
+ *type = V4L2_CTRL_TYPE_INTEGER_MENU;
+ break;
+ case V4L2_CID_USER_CLASS:
+ case V4L2_CID_CAMERA_CLASS:
+ case V4L2_CID_MPEG_CLASS:
+ case V4L2_CID_FM_TX_CLASS:
+ case V4L2_CID_FLASH_CLASS:
+ case V4L2_CID_JPEG_CLASS:
+ case V4L2_CID_IMAGE_SOURCE_CLASS:
+ case V4L2_CID_IMAGE_PROC_CLASS:
+ case V4L2_CID_DV_CLASS:
+ case V4L2_CID_FM_RX_CLASS:
+ case V4L2_CID_RF_TUNER_CLASS:
+ case V4L2_CID_DETECT_CLASS:
+ *type = V4L2_CTRL_TYPE_CTRL_CLASS;
+ /* You can neither read not write these */
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY;
+ *min = *max = *step = *def = 0;
+ break;
+ case V4L2_CID_BG_COLOR:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ *step = 1;
+ *min = 0;
+ /* Max is calculated as RGB888 that is 2^24 */
+ *max = 0xFFFFFF;
+ break;
+ case V4L2_CID_FLASH_FAULT:
+ case V4L2_CID_JPEG_ACTIVE_MARKER:
+ case V4L2_CID_3A_LOCK:
+ case V4L2_CID_AUTO_FOCUS_STATUS:
+ case V4L2_CID_DV_TX_HOTPLUG:
+ case V4L2_CID_DV_TX_RXSENSE:
+ case V4L2_CID_DV_TX_EDID_PRESENT:
+ case V4L2_CID_DV_RX_POWER_PRESENT:
+ *type = V4L2_CTRL_TYPE_BITMASK;
+ break;
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
+ case V4L2_CID_MPEG_VIDEO_DEC_PTS:
+ *type = V4L2_CTRL_TYPE_INTEGER64;
+ *flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY;
+ *min = *def = 0;
+ *max = 0x1ffffffffLL;
+ *step = 1;
+ break;
+ case V4L2_CID_MPEG_VIDEO_DEC_FRAME:
+ *type = V4L2_CTRL_TYPE_INTEGER64;
+ *flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY;
+ *min = *def = 0;
+ *max = 0x7fffffffffffffffLL;
+ *step = 1;
+ break;
+ case V4L2_CID_PIXEL_RATE:
+ *type = V4L2_CTRL_TYPE_INTEGER64;
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
+ case V4L2_CID_DETECT_MD_REGION_GRID:
+ *type = V4L2_CTRL_TYPE_U8;
+ break;
+ case V4L2_CID_DETECT_MD_THRESHOLD_GRID:
+ *type = V4L2_CTRL_TYPE_U16;
+ break;
+ case V4L2_CID_RDS_TX_ALT_FREQS:
+ *type = V4L2_CTRL_TYPE_U32;
+ break;
+ default:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ break;
+ }
+ switch (id) {
+ case V4L2_CID_MPEG_AUDIO_ENCODING:
+ case V4L2_CID_MPEG_AUDIO_MODE:
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ *flags |= V4L2_CTRL_FLAG_UPDATE;
+ break;
+ case V4L2_CID_AUDIO_VOLUME:
+ case V4L2_CID_AUDIO_BALANCE:
+ case V4L2_CID_AUDIO_BASS:
+ case V4L2_CID_AUDIO_TREBLE:
+ case V4L2_CID_BRIGHTNESS:
+ case V4L2_CID_CONTRAST:
+ case V4L2_CID_SATURATION:
+ case V4L2_CID_HUE:
+ case V4L2_CID_RED_BALANCE:
+ case V4L2_CID_BLUE_BALANCE:
+ case V4L2_CID_GAMMA:
+ case V4L2_CID_SHARPNESS:
+ case V4L2_CID_CHROMA_GAIN:
+ case V4L2_CID_RDS_TX_DEVIATION:
+ case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME:
+ case V4L2_CID_AUDIO_LIMITER_DEVIATION:
+ case V4L2_CID_AUDIO_COMPRESSION_GAIN:
+ case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD:
+ case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME:
+ case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME:
+ case V4L2_CID_PILOT_TONE_DEVIATION:
+ case V4L2_CID_PILOT_TONE_FREQUENCY:
+ case V4L2_CID_TUNE_POWER_LEVEL:
+ case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
+ case V4L2_CID_RF_TUNER_LNA_GAIN:
+ case V4L2_CID_RF_TUNER_MIXER_GAIN:
+ case V4L2_CID_RF_TUNER_IF_GAIN:
+ case V4L2_CID_RF_TUNER_BANDWIDTH:
+ case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD:
+ *flags |= V4L2_CTRL_FLAG_SLIDER;
+ break;
+ case V4L2_CID_PAN_RELATIVE:
+ case V4L2_CID_TILT_RELATIVE:
+ case V4L2_CID_FOCUS_RELATIVE:
+ case V4L2_CID_IRIS_RELATIVE:
+ case V4L2_CID_ZOOM_RELATIVE:
+ *flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
+ V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+ break;
+ case V4L2_CID_FLASH_STROBE_STATUS:
+ case V4L2_CID_AUTO_FOCUS_STATUS:
+ case V4L2_CID_FLASH_READY:
+ case V4L2_CID_DV_TX_HOTPLUG:
+ case V4L2_CID_DV_TX_RXSENSE:
+ case V4L2_CID_DV_TX_EDID_PRESENT:
+ case V4L2_CID_DV_RX_POWER_PRESENT:
+ case V4L2_CID_RDS_RX_PTY:
+ case V4L2_CID_RDS_RX_PS_NAME:
+ case V4L2_CID_RDS_RX_RADIO_TEXT:
+ case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT:
+ case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM:
+ case V4L2_CID_RDS_RX_MUSIC_SPEECH:
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
+ case V4L2_CID_RF_TUNER_PLL_LOCK:
+ *flags |= V4L2_CTRL_FLAG_VOLATILE;
+ break;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_fill);
+
+static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
+{
+ memset(ev->reserved, 0, sizeof(ev->reserved));
+ ev->type = V4L2_EVENT_CTRL;
+ ev->id = ctrl->id;
+ ev->u.ctrl.changes = changes;
+ ev->u.ctrl.type = ctrl->type;
+ ev->u.ctrl.flags = ctrl->flags;
+ if (ctrl->is_ptr)
+ ev->u.ctrl.value64 = 0;
+ else
+ ev->u.ctrl.value64 = *ctrl->p_cur.p_s64;
+ ev->u.ctrl.minimum = ctrl->minimum;
+ ev->u.ctrl.maximum = ctrl->maximum;
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU
+ || ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU)
+ ev->u.ctrl.step = 1;
+ else
+ ev->u.ctrl.step = ctrl->step;
+ ev->u.ctrl.default_value = ctrl->default_value;
+}
+
+static void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes)
+{
+ struct v4l2_event ev;
+ struct v4l2_subscribed_event *sev;
+
+ if (list_empty(&ctrl->ev_subs))
+ return;
+ fill_event(&ev, ctrl, changes);
+
+ list_for_each_entry(sev, &ctrl->ev_subs, node)
+ if (sev->fh != fh ||
+ (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK))
+ v4l2_event_queue_fh(sev->fh, &ev);
+}
+
+static bool std_equal(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr1,
+ union v4l2_ctrl_ptr ptr2)
+{
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_BUTTON:
+ return false;
+ case V4L2_CTRL_TYPE_STRING:
+ idx *= ctrl->elem_size;
+ /* strings are always 0-terminated */
+ return !strcmp(ptr1.p_char + idx, ptr2.p_char + idx);
+ case V4L2_CTRL_TYPE_INTEGER64:
+ return ptr1.p_s64[idx] == ptr2.p_s64[idx];
+ case V4L2_CTRL_TYPE_U8:
+ return ptr1.p_u8[idx] == ptr2.p_u8[idx];
+ case V4L2_CTRL_TYPE_U16:
+ return ptr1.p_u16[idx] == ptr2.p_u16[idx];
+ case V4L2_CTRL_TYPE_U32:
+ return ptr1.p_u32[idx] == ptr2.p_u32[idx];
+ default:
+ if (ctrl->is_int)
+ return ptr1.p_s32[idx] == ptr2.p_s32[idx];
+ idx *= ctrl->elem_size;
+ return !memcmp(ptr1.p + idx, ptr2.p + idx, ctrl->elem_size);
+ }
+}
+
+static void std_init(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_STRING:
+ idx *= ctrl->elem_size;
+ memset(ptr.p_char + idx, ' ', ctrl->minimum);
+ ptr.p_char[idx + ctrl->minimum] = '\0';
+ break;
+ case V4L2_CTRL_TYPE_INTEGER64:
+ ptr.p_s64[idx] = ctrl->default_value;
+ break;
+ case V4L2_CTRL_TYPE_INTEGER:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_BITMASK:
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ ptr.p_s32[idx] = ctrl->default_value;
+ break;
+ case V4L2_CTRL_TYPE_U8:
+ ptr.p_u8[idx] = ctrl->default_value;
+ break;
+ case V4L2_CTRL_TYPE_U16:
+ ptr.p_u16[idx] = ctrl->default_value;
+ break;
+ case V4L2_CTRL_TYPE_U32:
+ ptr.p_u32[idx] = ctrl->default_value;
+ break;
+ default:
+ idx *= ctrl->elem_size;
+ memset(ptr.p + idx, 0, ctrl->elem_size);
+ break;
+ }
+}
+
+static void std_log(const struct v4l2_ctrl *ctrl)
+{
+ union v4l2_ctrl_ptr ptr = ctrl->p_cur;
+
+ if (ctrl->is_array) {
+ unsigned i;
+
+ for (i = 0; i < ctrl->nr_of_dims; i++)
+ pr_cont("[%u]", ctrl->dims[i]);
+ pr_cont(" ");
+ }
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ pr_cont("%d", *ptr.p_s32);
+ break;
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ pr_cont("%s", *ptr.p_s32 ? "true" : "false");
+ break;
+ case V4L2_CTRL_TYPE_MENU:
+ pr_cont("%s", ctrl->qmenu[*ptr.p_s32]);
+ break;
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ pr_cont("%lld", ctrl->qmenu_int[*ptr.p_s32]);
+ break;
+ case V4L2_CTRL_TYPE_BITMASK:
+ pr_cont("0x%08x", *ptr.p_s32);
+ break;
+ case V4L2_CTRL_TYPE_INTEGER64:
+ pr_cont("%lld", *ptr.p_s64);
+ break;
+ case V4L2_CTRL_TYPE_STRING:
+ pr_cont("%s", ptr.p_char);
+ break;
+ case V4L2_CTRL_TYPE_U8:
+ pr_cont("%u", (unsigned)*ptr.p_u8);
+ break;
+ case V4L2_CTRL_TYPE_U16:
+ pr_cont("%u", (unsigned)*ptr.p_u16);
+ break;
+ case V4L2_CTRL_TYPE_U32:
+ pr_cont("%u", (unsigned)*ptr.p_u32);
+ break;
+ default:
+ pr_cont("unknown type %d", ctrl->type);
+ break;
+ }
+}
+
+/*
+ * Round towards the closest legal value. Be careful when we are
+ * close to the maximum range of the control type to prevent
+ * wrap-arounds.
+ */
+#define ROUND_TO_RANGE(val, offset_type, ctrl) \
+({ \
+ offset_type offset; \
+ if ((ctrl)->maximum >= 0 && \
+ val >= (ctrl)->maximum - (s32)((ctrl)->step / 2)) \
+ val = (ctrl)->maximum; \
+ else \
+ val += (s32)((ctrl)->step / 2); \
+ val = clamp_t(typeof(val), val, \
+ (ctrl)->minimum, (ctrl)->maximum); \
+ offset = (val) - (ctrl)->minimum; \
+ offset = (ctrl)->step * (offset / (u32)(ctrl)->step); \
+ val = (ctrl)->minimum + offset; \
+ 0; \
+})
+
+/* Validate a new control */
+static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ size_t len;
+ u64 offset;
+ s64 val;
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ return ROUND_TO_RANGE(ptr.p_s32[idx], u32, ctrl);
+ case V4L2_CTRL_TYPE_INTEGER64:
+ /*
+ * We can't use the ROUND_TO_RANGE define here due to
+ * the u64 divide that needs special care.
+ */
+ val = ptr.p_s64[idx];
+ if (ctrl->maximum >= 0 && val >= ctrl->maximum - (s64)(ctrl->step / 2))
+ val = ctrl->maximum;
+ else
+ val += (s64)(ctrl->step / 2);
+ val = clamp_t(s64, val, ctrl->minimum, ctrl->maximum);
+ offset = val - ctrl->minimum;
+ do_div(offset, ctrl->step);
+ ptr.p_s64[idx] = ctrl->minimum + offset * ctrl->step;
+ return 0;
+ case V4L2_CTRL_TYPE_U8:
+ return ROUND_TO_RANGE(ptr.p_u8[idx], u8, ctrl);
+ case V4L2_CTRL_TYPE_U16:
+ return ROUND_TO_RANGE(ptr.p_u16[idx], u16, ctrl);
+ case V4L2_CTRL_TYPE_U32:
+ return ROUND_TO_RANGE(ptr.p_u32[idx], u32, ctrl);
+
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ ptr.p_s32[idx] = !!ptr.p_s32[idx];
+ return 0;
+
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ if (ptr.p_s32[idx] < ctrl->minimum || ptr.p_s32[idx] > ctrl->maximum)
+ return -ERANGE;
+ if (ctrl->menu_skip_mask & (1 << ptr.p_s32[idx]))
+ return -EINVAL;
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU &&
+ ctrl->qmenu[ptr.p_s32[idx]][0] == '\0')
+ return -EINVAL;
+ return 0;
+
+ case V4L2_CTRL_TYPE_BITMASK:
+ ptr.p_s32[idx] &= ctrl->maximum;
+ return 0;
+
+ case V4L2_CTRL_TYPE_BUTTON:
+ case V4L2_CTRL_TYPE_CTRL_CLASS:
+ ptr.p_s32[idx] = 0;
+ return 0;
+
+ case V4L2_CTRL_TYPE_STRING:
+ idx *= ctrl->elem_size;
+ len = strlen(ptr.p_char + idx);
+ if (len < ctrl->minimum)
+ return -ERANGE;
+ if ((len - (u32)ctrl->minimum) % (u32)ctrl->step)
+ return -ERANGE;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct v4l2_ctrl_type_ops std_type_ops = {
+ .equal = std_equal,
+ .init = std_init,
+ .log = std_log,
+ .validate = std_validate,
+};
+
+/* Helper function: copy the given control value back to the caller */
+static int ptr_to_user(struct v4l2_ext_control *c,
+ struct v4l2_ctrl *ctrl,
+ union v4l2_ctrl_ptr ptr)
+{
+ u32 len;
+
+ if (ctrl->is_ptr && !ctrl->is_string)
+ return copy_to_user(c->ptr, ptr.p, c->size) ?
+ -EFAULT : 0;
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_STRING:
+ len = strlen(ptr.p_char);
+ if (c->size < len + 1) {
+ c->size = ctrl->elem_size;
+ return -ENOSPC;
+ }
+ return copy_to_user(c->string, ptr.p_char, len + 1) ?
+ -EFAULT : 0;
+ case V4L2_CTRL_TYPE_INTEGER64:
+ c->value64 = *ptr.p_s64;
+ break;
+ default:
+ c->value = *ptr.p_s32;
+ break;
+ }
+ return 0;
+}
+
+/* Helper function: copy the current control value back to the caller */
+static int cur_to_user(struct v4l2_ext_control *c,
+ struct v4l2_ctrl *ctrl)
+{
+ return ptr_to_user(c, ctrl, ctrl->p_cur);
+}
+
+/* Helper function: copy the new control value back to the caller */
+static int new_to_user(struct v4l2_ext_control *c,
+ struct v4l2_ctrl *ctrl)
+{
+ return ptr_to_user(c, ctrl, ctrl->p_new);
+}
+
+/* Helper function: copy the caller-provider value to the given control value */
+static int user_to_ptr(struct v4l2_ext_control *c,
+ struct v4l2_ctrl *ctrl,
+ union v4l2_ctrl_ptr ptr)
+{
+ int ret;
+ u32 size;
+
+ ctrl->is_new = 1;
+ if (ctrl->is_ptr && !ctrl->is_string) {
+ unsigned idx;
+
+ ret = copy_from_user(ptr.p, c->ptr, c->size) ? -EFAULT : 0;
+ if (ret || !ctrl->is_array)
+ return ret;
+ for (idx = c->size / ctrl->elem_size; idx < ctrl->elems; idx++)
+ ctrl->type_ops->init(ctrl, idx, ptr);
+ return 0;
+ }
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER64:
+ *ptr.p_s64 = c->value64;
+ break;
+ case V4L2_CTRL_TYPE_STRING:
+ size = c->size;
+ if (size == 0)
+ return -ERANGE;
+ if (size > ctrl->maximum + 1)
+ size = ctrl->maximum + 1;
+ ret = copy_from_user(ptr.p_char, c->string, size) ? -EFAULT : 0;
+ if (!ret) {
+ char last = ptr.p_char[size - 1];
+
+ ptr.p_char[size - 1] = 0;
+ /* If the string was longer than ctrl->maximum,
+ then return an error. */
+ if (strlen(ptr.p_char) == ctrl->maximum && last)
+ return -ERANGE;
+ }
+ return ret;
+ default:
+ *ptr.p_s32 = c->value;
+ break;
+ }
+ return 0;
+}
+
+/* Helper function: copy the caller-provider value as the new control value */
+static int user_to_new(struct v4l2_ext_control *c,
+ struct v4l2_ctrl *ctrl)
+{
+ return user_to_ptr(c, ctrl, ctrl->p_new);
+}
+
+/* Copy the one value to another. */
+static void ptr_to_ptr(struct v4l2_ctrl *ctrl,
+ union v4l2_ctrl_ptr from, union v4l2_ctrl_ptr to)
+{
+ if (ctrl == NULL)
+ return;
+ memcpy(to.p, from.p, ctrl->elems * ctrl->elem_size);
+}
+
+/* Copy the new value to the current value. */
+static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags)
+{
+ bool changed;
+
+ if (ctrl == NULL)
+ return;
+
+ /* has_changed is set by cluster_changed */
+ changed = ctrl->has_changed;
+ if (changed)
+ ptr_to_ptr(ctrl, ctrl->p_new, ctrl->p_cur);
+
+ if (ch_flags & V4L2_EVENT_CTRL_CH_FLAGS) {
+ /* Note: CH_FLAGS is only set for auto clusters. */
+ ctrl->flags &=
+ ~(V4L2_CTRL_FLAG_INACTIVE | V4L2_CTRL_FLAG_VOLATILE);
+ if (!is_cur_manual(ctrl->cluster[0])) {
+ ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
+ if (ctrl->cluster[0]->has_volatiles)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ }
+ fh = NULL;
+ }
+ if (changed || ch_flags) {
+ /* If a control was changed that was not one of the controls
+ modified by the application, then send the event to all. */
+ if (!ctrl->is_new)
+ fh = NULL;
+ send_event(fh, ctrl,
+ (changed ? V4L2_EVENT_CTRL_CH_VALUE : 0) | ch_flags);
+ if (ctrl->call_notify && changed && ctrl->handler->notify)
+ ctrl->handler->notify(ctrl, ctrl->handler->notify_priv);
+ }
+}
+
+/* Copy the current value to the new value */
+static void cur_to_new(struct v4l2_ctrl *ctrl)
+{
+ if (ctrl == NULL)
+ return;
+ ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new);
+}
+
+/* Return non-zero if one or more of the controls in the cluster has a new
+ value that differs from the current value. */
+static int cluster_changed(struct v4l2_ctrl *master)
+{
+ bool changed = false;
+ unsigned idx;
+ int i;
+
+ for (i = 0; i < master->ncontrols; i++) {
+ struct v4l2_ctrl *ctrl = master->cluster[i];
+ bool ctrl_changed = false;
+
+ if (ctrl == NULL)
+ continue;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_EXECUTE_ON_WRITE)
+ changed = ctrl_changed = true;
+
+ /*
+ * Set has_changed to false to avoid generating
+ * the event V4L2_EVENT_CTRL_CH_VALUE
+ */
+ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
+ ctrl->has_changed = false;
+ continue;
+ }
+
+ for (idx = 0; !ctrl_changed && idx < ctrl->elems; idx++)
+ ctrl_changed = !ctrl->type_ops->equal(ctrl, idx,
+ ctrl->p_cur, ctrl->p_new);
+ ctrl->has_changed = ctrl_changed;
+ changed |= ctrl->has_changed;
+ }
+ return changed;
+}
+
+/* Control range checking */
+static int check_range(enum v4l2_ctrl_type type,
+ s64 min, s64 max, u64 step, s64 def)
+{
+ switch (type) {
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ if (step != 1 || max > 1 || min < 0)
+ return -ERANGE;
+ /* fall through */
+ case V4L2_CTRL_TYPE_U8:
+ case V4L2_CTRL_TYPE_U16:
+ case V4L2_CTRL_TYPE_U32:
+ case V4L2_CTRL_TYPE_INTEGER:
+ case V4L2_CTRL_TYPE_INTEGER64:
+ if (step == 0 || min > max || def < min || def > max)
+ return -ERANGE;
+ return 0;
+ case V4L2_CTRL_TYPE_BITMASK:
+ if (step || min || !max || (def & ~max))
+ return -ERANGE;
+ return 0;
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ if (min > max || def < min || def > max)
+ return -ERANGE;
+ /* Note: step == menu_skip_mask for menu controls.
+ So here we check if the default value is masked out. */
+ if (step && ((1 << def) & step))
+ return -EINVAL;
+ return 0;
+ case V4L2_CTRL_TYPE_STRING:
+ if (min > max || min < 0 || step < 1 || def)
+ return -ERANGE;
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+/* Validate a new control */
+static int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new)
+{
+ unsigned idx;
+ int err = 0;
+
+ if (!ctrl->is_ptr) {
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_BITMASK:
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ case V4L2_CTRL_TYPE_BUTTON:
+ case V4L2_CTRL_TYPE_CTRL_CLASS:
+ case V4L2_CTRL_TYPE_INTEGER64:
+ return ctrl->type_ops->validate(ctrl, 0, p_new);
+ default:
+ break;
+ }
+ }
+ for (idx = 0; !err && idx < ctrl->elems; idx++)
+ err = ctrl->type_ops->validate(ctrl, idx, p_new);
+ return err;
+}
+
+static inline u32 node2id(struct list_head *node)
+{
+ return list_entry(node, struct v4l2_ctrl_ref, node)->ctrl->id;
+}
+
+/* Set the handler's error code if it wasn't set earlier already */
+static inline int handler_set_err(struct v4l2_ctrl_handler *hdl, int err)
+{
+ if (hdl->error == 0)
+ hdl->error = err;
+ return err;
+}
+
+/* Initialize the handler */
+int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
+ unsigned nr_of_controls_hint,
+ struct lock_class_key *key, const char *name)
+{
+ hdl->lock = &hdl->_lock;
+ mutex_init(hdl->lock);
+ lockdep_set_class_and_name(hdl->lock, key, name);
+ INIT_LIST_HEAD(&hdl->ctrls);
+ INIT_LIST_HEAD(&hdl->ctrl_refs);
+ hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8;
+ hdl->buckets = kcalloc(hdl->nr_of_buckets, sizeof(hdl->buckets[0]),
+ GFP_KERNEL);
+ hdl->error = hdl->buckets ? 0 : -ENOMEM;
+ return hdl->error;
+}
+EXPORT_SYMBOL(v4l2_ctrl_handler_init_class);
+
+/* Free all controls and control refs */
+void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
+{
+ struct v4l2_ctrl_ref *ref, *next_ref;
+ struct v4l2_ctrl *ctrl, *next_ctrl;
+ struct v4l2_subscribed_event *sev, *next_sev;
+
+ if (hdl == NULL || hdl->buckets == NULL)
+ return;
+
+ mutex_lock(hdl->lock);
+ /* Free all nodes */
+ list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) {
+ list_del(&ref->node);
+ kfree(ref);
+ }
+ /* Free all controls owned by the handler */
+ list_for_each_entry_safe(ctrl, next_ctrl, &hdl->ctrls, node) {
+ list_del(&ctrl->node);
+ list_for_each_entry_safe(sev, next_sev, &ctrl->ev_subs, node)
+ list_del(&sev->node);
+ kfree(ctrl);
+ }
+ kfree(hdl->buckets);
+ hdl->buckets = NULL;
+ hdl->cached = NULL;
+ hdl->error = 0;
+ mutex_unlock(hdl->lock);
+}
+EXPORT_SYMBOL(v4l2_ctrl_handler_free);
+
+/* For backwards compatibility: V4L2_CID_PRIVATE_BASE should no longer
+ be used except in G_CTRL, S_CTRL, QUERYCTRL and QUERYMENU when dealing
+ with applications that do not use the NEXT_CTRL flag.
+
+ We just find the n-th private user control. It's O(N), but that should not
+ be an issue in this particular case. */
+static struct v4l2_ctrl_ref *find_private_ref(
+ struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref;
+
+ id -= V4L2_CID_PRIVATE_BASE;
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ /* Search for private user controls that are compatible with
+ VIDIOC_G/S_CTRL. */
+ if (V4L2_CTRL_ID2CLASS(ref->ctrl->id) == V4L2_CTRL_CLASS_USER &&
+ V4L2_CTRL_DRIVER_PRIV(ref->ctrl->id)) {
+ if (!ref->ctrl->is_int)
+ continue;
+ if (id == 0)
+ return ref;
+ id--;
+ }
+ }
+ return NULL;
+}
+
+/* Find a control with the given ID. */
+static struct v4l2_ctrl_ref *find_ref(struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref;
+ int bucket;
+
+ id &= V4L2_CTRL_ID_MASK;
+
+ /* Old-style private controls need special handling */
+ if (id >= V4L2_CID_PRIVATE_BASE)
+ return find_private_ref(hdl, id);
+ bucket = id % hdl->nr_of_buckets;
+
+ /* Simple optimization: cache the last control found */
+ if (hdl->cached && hdl->cached->ctrl->id == id)
+ return hdl->cached;
+
+ /* Not in cache, search the hash */
+ ref = hdl->buckets ? hdl->buckets[bucket] : NULL;
+ while (ref && ref->ctrl->id != id)
+ ref = ref->next;
+
+ if (ref)
+ hdl->cached = ref; /* cache it! */
+ return ref;
+}
+
+/* Find a control with the given ID. Take the handler's lock first. */
+static struct v4l2_ctrl_ref *find_ref_lock(
+ struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref = NULL;
+
+ if (hdl) {
+ mutex_lock(hdl->lock);
+ ref = find_ref(hdl, id);
+ mutex_unlock(hdl->lock);
+ }
+ return ref;
+}
+
+/* Find a control with the given ID. */
+struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
+
+ return ref ? ref->ctrl : NULL;
+}
+EXPORT_SYMBOL(v4l2_ctrl_find);
+
+/* Allocate a new v4l2_ctrl_ref and hook it into the handler. */
+static int handler_new_ref(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_ctrl_ref *ref;
+ struct v4l2_ctrl_ref *new_ref;
+ u32 id = ctrl->id;
+ u32 class_ctrl = V4L2_CTRL_ID2CLASS(id) | 1;
+ int bucket = id % hdl->nr_of_buckets; /* which bucket to use */
+
+ /*
+ * Automatically add the control class if it is not yet present and
+ * the new control is not a compound control.
+ */
+ if (ctrl->type < V4L2_CTRL_COMPOUND_TYPES &&
+ id != class_ctrl && find_ref_lock(hdl, class_ctrl) == NULL)
+ if (!v4l2_ctrl_new_std(hdl, NULL, class_ctrl, 0, 0, 0, 0))
+ return hdl->error;
+
+ if (hdl->error)
+ return hdl->error;
+
+ new_ref = kzalloc(sizeof(*new_ref), GFP_KERNEL);
+ if (!new_ref)
+ return handler_set_err(hdl, -ENOMEM);
+ new_ref->ctrl = ctrl;
+ if (ctrl->handler == hdl) {
+ /* By default each control starts in a cluster of its own.
+ new_ref->ctrl is basically a cluster array with one
+ element, so that's perfect to use as the cluster pointer.
+ But only do this for the handler that owns the control. */
+ ctrl->cluster = &new_ref->ctrl;
+ ctrl->ncontrols = 1;
+ }
+
+ INIT_LIST_HEAD(&new_ref->node);
+
+ mutex_lock(hdl->lock);
+
+ /* Add immediately at the end of the list if the list is empty, or if
+ the last element in the list has a lower ID.
+ This ensures that when elements are added in ascending order the
+ insertion is an O(1) operation. */
+ if (list_empty(&hdl->ctrl_refs) || id > node2id(hdl->ctrl_refs.prev)) {
+ list_add_tail(&new_ref->node, &hdl->ctrl_refs);
+ goto insert_in_hash;
+ }
+
+ /* Find insert position in sorted list */
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ if (ref->ctrl->id < id)
+ continue;
+ /* Don't add duplicates */
+ if (ref->ctrl->id == id) {
+ kfree(new_ref);
+ goto unlock;
+ }
+ list_add(&new_ref->node, ref->node.prev);
+ break;
+ }
+
+insert_in_hash:
+ /* Insert the control node in the hash */
+ new_ref->next = hdl->buckets[bucket];
+ hdl->buckets[bucket] = new_ref;
+
+unlock:
+ mutex_unlock(hdl->lock);
+ return 0;
+}
+
+/* Add a new control */
+static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ const struct v4l2_ctrl_type_ops *type_ops,
+ u32 id, const char *name, enum v4l2_ctrl_type type,
+ s64 min, s64 max, u64 step, s64 def,
+ const u32 dims[V4L2_CTRL_MAX_DIMS], u32 elem_size,
+ u32 flags, const char * const *qmenu,
+ const s64 *qmenu_int, void *priv)
+{
+ struct v4l2_ctrl *ctrl;
+ unsigned sz_extra;
+ unsigned nr_of_dims = 0;
+ unsigned elems = 1;
+ bool is_array;
+ unsigned tot_ctrl_size;
+ unsigned idx;
+ void *data;
+ int err;
+
+ if (hdl->error)
+ return NULL;
+
+ while (dims && dims[nr_of_dims]) {
+ elems *= dims[nr_of_dims];
+ nr_of_dims++;
+ if (nr_of_dims == V4L2_CTRL_MAX_DIMS)
+ break;
+ }
+ is_array = nr_of_dims > 0;
+
+ /* Prefill elem_size for all types handled by std_type_ops */
+ switch (type) {
+ case V4L2_CTRL_TYPE_INTEGER64:
+ elem_size = sizeof(s64);
+ break;
+ case V4L2_CTRL_TYPE_STRING:
+ elem_size = max + 1;
+ break;
+ case V4L2_CTRL_TYPE_U8:
+ elem_size = sizeof(u8);
+ break;
+ case V4L2_CTRL_TYPE_U16:
+ elem_size = sizeof(u16);
+ break;
+ case V4L2_CTRL_TYPE_U32:
+ elem_size = sizeof(u32);
+ break;
+ default:
+ if (type < V4L2_CTRL_COMPOUND_TYPES)
+ elem_size = sizeof(s32);
+ break;
+ }
+ tot_ctrl_size = elem_size * elems;
+
+ /* Sanity checks */
+ if (id == 0 || name == NULL || !elem_size ||
+ id >= V4L2_CID_PRIVATE_BASE ||
+ (type == V4L2_CTRL_TYPE_MENU && qmenu == NULL) ||
+ (type == V4L2_CTRL_TYPE_INTEGER_MENU && qmenu_int == NULL)) {
+ handler_set_err(hdl, -ERANGE);
+ return NULL;
+ }
+ err = check_range(type, min, max, step, def);
+ if (err) {
+ handler_set_err(hdl, err);
+ return NULL;
+ }
+ if (type == V4L2_CTRL_TYPE_BITMASK && ((def & ~max) || min || step)) {
+ handler_set_err(hdl, -ERANGE);
+ return NULL;
+ }
+ if (is_array &&
+ (type == V4L2_CTRL_TYPE_BUTTON ||
+ type == V4L2_CTRL_TYPE_CTRL_CLASS)) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+
+ sz_extra = 0;
+ if (type == V4L2_CTRL_TYPE_BUTTON)
+ flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
+ V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+ else if (type == V4L2_CTRL_TYPE_CTRL_CLASS)
+ flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ else if (type == V4L2_CTRL_TYPE_INTEGER64 ||
+ type == V4L2_CTRL_TYPE_STRING ||
+ type >= V4L2_CTRL_COMPOUND_TYPES ||
+ is_array)
+ sz_extra += 2 * tot_ctrl_size;
+
+ ctrl = kzalloc(sizeof(*ctrl) + sz_extra, GFP_KERNEL);
+ if (ctrl == NULL) {
+ handler_set_err(hdl, -ENOMEM);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&ctrl->node);
+ INIT_LIST_HEAD(&ctrl->ev_subs);
+ ctrl->handler = hdl;
+ ctrl->ops = ops;
+ ctrl->type_ops = type_ops ? type_ops : &std_type_ops;
+ ctrl->id = id;
+ ctrl->name = name;
+ ctrl->type = type;
+ ctrl->flags = flags;
+ ctrl->minimum = min;
+ ctrl->maximum = max;
+ ctrl->step = step;
+ ctrl->default_value = def;
+ ctrl->is_string = !is_array && type == V4L2_CTRL_TYPE_STRING;
+ ctrl->is_ptr = is_array || type >= V4L2_CTRL_COMPOUND_TYPES || ctrl->is_string;
+ ctrl->is_int = !ctrl->is_ptr && type != V4L2_CTRL_TYPE_INTEGER64;
+ ctrl->is_array = is_array;
+ ctrl->elems = elems;
+ ctrl->nr_of_dims = nr_of_dims;
+ if (nr_of_dims)
+ memcpy(ctrl->dims, dims, nr_of_dims * sizeof(dims[0]));
+ ctrl->elem_size = elem_size;
+ if (type == V4L2_CTRL_TYPE_MENU)
+ ctrl->qmenu = qmenu;
+ else if (type == V4L2_CTRL_TYPE_INTEGER_MENU)
+ ctrl->qmenu_int = qmenu_int;
+ ctrl->priv = priv;
+ ctrl->cur.val = ctrl->val = def;
+ data = &ctrl[1];
+
+ if (!ctrl->is_int) {
+ ctrl->p_new.p = data;
+ ctrl->p_cur.p = data + tot_ctrl_size;
+ } else {
+ ctrl->p_new.p = &ctrl->val;
+ ctrl->p_cur.p = &ctrl->cur.val;
+ }
+ for (idx = 0; idx < elems; idx++) {
+ ctrl->type_ops->init(ctrl, idx, ctrl->p_cur);
+ ctrl->type_ops->init(ctrl, idx, ctrl->p_new);
+ }
+
+ if (handler_new_ref(hdl, ctrl)) {
+ kfree(ctrl);
+ return NULL;
+ }
+ mutex_lock(hdl->lock);
+ list_add_tail(&ctrl->node, &hdl->ctrls);
+ mutex_unlock(hdl->lock);
+ return ctrl;
+}
+
+struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_config *cfg, void *priv)
+{
+ bool is_menu;
+ struct v4l2_ctrl *ctrl;
+ const char *name = cfg->name;
+ const char * const *qmenu = cfg->qmenu;
+ const s64 *qmenu_int = cfg->qmenu_int;
+ enum v4l2_ctrl_type type = cfg->type;
+ u32 flags = cfg->flags;
+ s64 min = cfg->min;
+ s64 max = cfg->max;
+ u64 step = cfg->step;
+ s64 def = cfg->def;
+
+ if (name == NULL)
+ v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step,
+ &def, &flags);
+
+ is_menu = (cfg->type == V4L2_CTRL_TYPE_MENU ||
+ cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU);
+ if (is_menu)
+ WARN_ON(step);
+ else
+ WARN_ON(cfg->menu_skip_mask);
+ if (cfg->type == V4L2_CTRL_TYPE_MENU && qmenu == NULL)
+ qmenu = v4l2_ctrl_get_menu(cfg->id);
+ else if (cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU &&
+ qmenu_int == NULL) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+
+ ctrl = v4l2_ctrl_new(hdl, cfg->ops, cfg->type_ops, cfg->id, name,
+ type, min, max,
+ is_menu ? cfg->menu_skip_mask : step, def,
+ cfg->dims, cfg->elem_size,
+ flags, qmenu, qmenu_int, priv);
+ if (ctrl)
+ ctrl->is_private = cfg->is_private;
+ return ctrl;
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_custom);
+
+/* Helper function for standard non-menu controls */
+struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ u32 id, s64 min, s64 max, u64 step, s64 def)
+{
+ const char *name;
+ enum v4l2_ctrl_type type;
+ u32 flags;
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+ if (type == V4L2_CTRL_TYPE_MENU ||
+ type == V4L2_CTRL_TYPE_INTEGER_MENU ||
+ type >= V4L2_CTRL_COMPOUND_TYPES) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
+ min, max, step, def, NULL, 0,
+ flags, NULL, NULL, NULL);
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_std);
+
+/* Helper function for standard menu controls */
+struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ u32 id, u8 _max, u64 mask, u8 _def)
+{
+ const char * const *qmenu = NULL;
+ const s64 *qmenu_int = NULL;
+ unsigned int qmenu_int_len = 0;
+ const char *name;
+ enum v4l2_ctrl_type type;
+ s64 min;
+ s64 max = _max;
+ s64 def = _def;
+ u64 step;
+ u32 flags;
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+
+ if (type == V4L2_CTRL_TYPE_MENU)
+ qmenu = v4l2_ctrl_get_menu(id);
+ else if (type == V4L2_CTRL_TYPE_INTEGER_MENU)
+ qmenu_int = v4l2_ctrl_get_int_menu(id, &qmenu_int_len);
+
+ if ((!qmenu && !qmenu_int) || (qmenu_int && max > qmenu_int_len)) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
+ 0, max, mask, def, NULL, 0,
+ flags, qmenu, qmenu_int, NULL);
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_std_menu);
+
+/* Helper function for standard menu controls with driver defined menu */
+struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops, u32 id, u8 _max,
+ u64 mask, u8 _def, const char * const *qmenu)
+{
+ enum v4l2_ctrl_type type;
+ const char *name;
+ u32 flags;
+ u64 step;
+ s64 min;
+ s64 max = _max;
+ s64 def = _def;
+
+ /* v4l2_ctrl_new_std_menu_items() should only be called for
+ * standard controls without a standard menu.
+ */
+ if (v4l2_ctrl_get_menu(id)) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+ if (type != V4L2_CTRL_TYPE_MENU || qmenu == NULL) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
+ 0, max, mask, def, NULL, 0,
+ flags, qmenu, NULL, NULL);
+
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_std_menu_items);
+
+/* Helper function for standard integer menu controls */
+struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ u32 id, u8 _max, u8 _def, const s64 *qmenu_int)
+{
+ const char *name;
+ enum v4l2_ctrl_type type;
+ s64 min;
+ u64 step;
+ s64 max = _max;
+ s64 def = _def;
+ u32 flags;
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+ if (type != V4L2_CTRL_TYPE_INTEGER_MENU) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
+ 0, max, 0, def, NULL, 0,
+ flags, NULL, qmenu_int, NULL);
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_int_menu);
+
+/* Add a control from another handler to this handler */
+struct v4l2_ctrl *v4l2_ctrl_add_ctrl(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ctrl *ctrl)
+{
+ if (hdl == NULL || hdl->error)
+ return NULL;
+ if (ctrl == NULL) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ if (ctrl->handler == hdl)
+ return ctrl;
+ return handler_new_ref(hdl, ctrl) ? NULL : ctrl;
+}
+EXPORT_SYMBOL(v4l2_ctrl_add_ctrl);
+
+/* Add the controls from another handler to our own. */
+int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ctrl_handler *add,
+ bool (*filter)(const struct v4l2_ctrl *ctrl))
+{
+ struct v4l2_ctrl_ref *ref;
+ int ret = 0;
+
+ /* Do nothing if either handler is NULL or if they are the same */
+ if (!hdl || !add || hdl == add)
+ return 0;
+ if (hdl->error)
+ return hdl->error;
+ mutex_lock(add->lock);
+ list_for_each_entry(ref, &add->ctrl_refs, node) {
+ struct v4l2_ctrl *ctrl = ref->ctrl;
+
+ /* Skip handler-private controls. */
+ if (ctrl->is_private)
+ continue;
+ /* And control classes */
+ if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
+ continue;
+ /* Filter any unwanted controls */
+ if (filter && !filter(ctrl))
+ continue;
+ ret = handler_new_ref(hdl, ctrl);
+ if (ret)
+ break;
+ }
+ mutex_unlock(add->lock);
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_ctrl_add_handler);
+
+bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl)
+{
+ if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_TX)
+ return true;
+ if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_RX)
+ return true;
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ case V4L2_CID_AUDIO_VOLUME:
+ case V4L2_CID_AUDIO_BALANCE:
+ case V4L2_CID_AUDIO_BASS:
+ case V4L2_CID_AUDIO_TREBLE:
+ case V4L2_CID_AUDIO_LOUDNESS:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+EXPORT_SYMBOL(v4l2_ctrl_radio_filter);
+
+/* Cluster controls */
+void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls)
+{
+ bool has_volatiles = false;
+ int i;
+
+ /* The first control is the master control and it must not be NULL */
+ if (WARN_ON(ncontrols == 0 || controls[0] == NULL))
+ return;
+
+ for (i = 0; i < ncontrols; i++) {
+ if (controls[i]) {
+ controls[i]->cluster = controls;
+ controls[i]->ncontrols = ncontrols;
+ if (controls[i]->flags & V4L2_CTRL_FLAG_VOLATILE)
+ has_volatiles = true;
+ }
+ }
+ controls[0]->has_volatiles = has_volatiles;
+}
+EXPORT_SYMBOL(v4l2_ctrl_cluster);
+
+void v4l2_ctrl_auto_cluster(unsigned ncontrols, struct v4l2_ctrl **controls,
+ u8 manual_val, bool set_volatile)
+{
+ struct v4l2_ctrl *master = controls[0];
+ u32 flag = 0;
+ int i;
+
+ v4l2_ctrl_cluster(ncontrols, controls);
+ WARN_ON(ncontrols <= 1);
+ WARN_ON(manual_val < master->minimum || manual_val > master->maximum);
+ WARN_ON(set_volatile && !has_op(master, g_volatile_ctrl));
+ master->is_auto = true;
+ master->has_volatiles = set_volatile;
+ master->manual_mode_value = manual_val;
+ master->flags |= V4L2_CTRL_FLAG_UPDATE;
+
+ if (!is_cur_manual(master))
+ flag = V4L2_CTRL_FLAG_INACTIVE |
+ (set_volatile ? V4L2_CTRL_FLAG_VOLATILE : 0);
+
+ for (i = 1; i < ncontrols; i++)
+ if (controls[i])
+ controls[i]->flags |= flag;
+}
+EXPORT_SYMBOL(v4l2_ctrl_auto_cluster);
+
+/* Activate/deactivate a control. */
+void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active)
+{
+ /* invert since the actual flag is called 'inactive' */
+ bool inactive = !active;
+ bool old;
+
+ if (ctrl == NULL)
+ return;
+
+ if (inactive)
+ /* set V4L2_CTRL_FLAG_INACTIVE */
+ old = test_and_set_bit(4, &ctrl->flags);
+ else
+ /* clear V4L2_CTRL_FLAG_INACTIVE */
+ old = test_and_clear_bit(4, &ctrl->flags);
+ if (old != inactive)
+ send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
+}
+EXPORT_SYMBOL(v4l2_ctrl_activate);
+
+/* Grab/ungrab a control.
+ Typically used when streaming starts and you want to grab controls,
+ preventing the user from changing them.
+
+ Just call this and the framework will block any attempts to change
+ these controls. */
+void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed)
+{
+ bool old;
+
+ if (ctrl == NULL)
+ return;
+
+ v4l2_ctrl_lock(ctrl);
+ if (grabbed)
+ /* set V4L2_CTRL_FLAG_GRABBED */
+ old = test_and_set_bit(1, &ctrl->flags);
+ else
+ /* clear V4L2_CTRL_FLAG_GRABBED */
+ old = test_and_clear_bit(1, &ctrl->flags);
+ if (old != grabbed)
+ send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
+ v4l2_ctrl_unlock(ctrl);
+}
+EXPORT_SYMBOL(v4l2_ctrl_grab);
+
+/* Log the control name and value */
+static void log_ctrl(const struct v4l2_ctrl *ctrl,
+ const char *prefix, const char *colon)
+{
+ if (ctrl->flags & (V4L2_CTRL_FLAG_DISABLED | V4L2_CTRL_FLAG_WRITE_ONLY))
+ return;
+ if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
+ return;
+
+ pr_info("%s%s%s: ", prefix, colon, ctrl->name);
+
+ ctrl->type_ops->log(ctrl);
+
+ if (ctrl->flags & (V4L2_CTRL_FLAG_INACTIVE |
+ V4L2_CTRL_FLAG_GRABBED |
+ V4L2_CTRL_FLAG_VOLATILE)) {
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+ pr_cont(" inactive");
+ if (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)
+ pr_cont(" grabbed");
+ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE)
+ pr_cont(" volatile");
+ }
+ pr_cont("\n");
+}
+
+/* Log all controls owned by the handler */
+void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
+ const char *prefix)
+{
+ struct v4l2_ctrl *ctrl;
+ const char *colon = "";
+ int len;
+
+ if (hdl == NULL)
+ return;
+ if (prefix == NULL)
+ prefix = "";
+ len = strlen(prefix);
+ if (len && prefix[len - 1] != ' ')
+ colon = ": ";
+ mutex_lock(hdl->lock);
+ list_for_each_entry(ctrl, &hdl->ctrls, node)
+ if (!(ctrl->flags & V4L2_CTRL_FLAG_DISABLED))
+ log_ctrl(ctrl, prefix, colon);
+ mutex_unlock(hdl->lock);
+}
+EXPORT_SYMBOL(v4l2_ctrl_handler_log_status);
+
+int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd)
+{
+ v4l2_ctrl_handler_log_status(sd->ctrl_handler, sd->name);
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_ctrl_subdev_log_status);
+
+/* Call s_ctrl for all controls owned by the handler */
+int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
+{
+ struct v4l2_ctrl *ctrl;
+ int ret = 0;
+
+ if (hdl == NULL)
+ return 0;
+ mutex_lock(hdl->lock);
+ list_for_each_entry(ctrl, &hdl->ctrls, node)
+ ctrl->done = false;
+
+ list_for_each_entry(ctrl, &hdl->ctrls, node) {
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ int i;
+
+ /* Skip if this control was already handled by a cluster. */
+ /* Skip button controls and read-only controls. */
+ if (ctrl->done || ctrl->type == V4L2_CTRL_TYPE_BUTTON ||
+ (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY))
+ continue;
+
+ for (i = 0; i < master->ncontrols; i++) {
+ if (master->cluster[i]) {
+ cur_to_new(master->cluster[i]);
+ master->cluster[i]->is_new = 1;
+ master->cluster[i]->done = true;
+ }
+ }
+ ret = call_op(master, s_ctrl);
+ if (ret)
+ break;
+ }
+ mutex_unlock(hdl->lock);
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_ctrl_handler_setup);
+
+/* Implement VIDIOC_QUERY_EXT_CTRL */
+int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctrl *qc)
+{
+ const unsigned next_flags = V4L2_CTRL_FLAG_NEXT_CTRL | V4L2_CTRL_FLAG_NEXT_COMPOUND;
+ u32 id = qc->id & V4L2_CTRL_ID_MASK;
+ struct v4l2_ctrl_ref *ref;
+ struct v4l2_ctrl *ctrl;
+
+ if (hdl == NULL)
+ return -EINVAL;
+
+ mutex_lock(hdl->lock);
+
+ /* Try to find it */
+ ref = find_ref(hdl, id);
+
+ if ((qc->id & next_flags) && !list_empty(&hdl->ctrl_refs)) {
+ bool is_compound;
+ /* Match any control that is not hidden */
+ unsigned mask = 1;
+ bool match = false;
+
+ if ((qc->id & next_flags) == V4L2_CTRL_FLAG_NEXT_COMPOUND) {
+ /* Match any hidden control */
+ match = true;
+ } else if ((qc->id & next_flags) == next_flags) {
+ /* Match any control, compound or not */
+ mask = 0;
+ }
+
+ /* Find the next control with ID > qc->id */
+
+ /* Did we reach the end of the control list? */
+ if (id >= node2id(hdl->ctrl_refs.prev)) {
+ ref = NULL; /* Yes, so there is no next control */
+ } else if (ref) {
+ /* We found a control with the given ID, so just get
+ the next valid one in the list. */
+ list_for_each_entry_continue(ref, &hdl->ctrl_refs, node) {
+ is_compound =
+ ref->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES;
+ if (id < ref->ctrl->id &&
+ (is_compound & mask) == match)
+ break;
+ }
+ if (&ref->node == &hdl->ctrl_refs)
+ ref = NULL;
+ } else {
+ /* No control with the given ID exists, so start
+ searching for the next largest ID. We know there
+ is one, otherwise the first 'if' above would have
+ been true. */
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ is_compound =
+ ref->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES;
+ if (id < ref->ctrl->id &&
+ (is_compound & mask) == match)
+ break;
+ }
+ if (&ref->node == &hdl->ctrl_refs)
+ ref = NULL;
+ }
+ }
+ mutex_unlock(hdl->lock);
+
+ if (!ref)
+ return -EINVAL;
+
+ ctrl = ref->ctrl;
+ memset(qc, 0, sizeof(*qc));
+ if (id >= V4L2_CID_PRIVATE_BASE)
+ qc->id = id;
+ else
+ qc->id = ctrl->id;
+ strlcpy(qc->name, ctrl->name, sizeof(qc->name));
+ qc->flags = ctrl->flags;
+ qc->type = ctrl->type;
+ if (ctrl->is_ptr)
+ qc->flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
+ qc->elem_size = ctrl->elem_size;
+ qc->elems = ctrl->elems;
+ qc->nr_of_dims = ctrl->nr_of_dims;
+ memcpy(qc->dims, ctrl->dims, qc->nr_of_dims * sizeof(qc->dims[0]));
+ qc->minimum = ctrl->minimum;
+ qc->maximum = ctrl->maximum;
+ qc->default_value = ctrl->default_value;
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU
+ || ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU)
+ qc->step = 1;
+ else
+ qc->step = ctrl->step;
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_query_ext_ctrl);
+
+/* Implement VIDIOC_QUERYCTRL */
+int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
+{
+ struct v4l2_query_ext_ctrl qec = { qc->id };
+ int rc;
+
+ rc = v4l2_query_ext_ctrl(hdl, &qec);
+ if (rc)
+ return rc;
+
+ qc->id = qec.id;
+ qc->type = qec.type;
+ qc->flags = qec.flags;
+ strlcpy(qc->name, qec.name, sizeof(qc->name));
+ switch (qc->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ case V4L2_CTRL_TYPE_STRING:
+ case V4L2_CTRL_TYPE_BITMASK:
+ qc->minimum = qec.minimum;
+ qc->maximum = qec.maximum;
+ qc->step = qec.step;
+ qc->default_value = qec.default_value;
+ break;
+ default:
+ qc->minimum = 0;
+ qc->maximum = 0;
+ qc->step = 0;
+ qc->default_value = 0;
+ break;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_queryctrl);
+
+int v4l2_subdev_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
+{
+ if (qc->id & (V4L2_CTRL_FLAG_NEXT_CTRL | V4L2_CTRL_FLAG_NEXT_COMPOUND))
+ return -EINVAL;
+ return v4l2_queryctrl(sd->ctrl_handler, qc);
+}
+EXPORT_SYMBOL(v4l2_subdev_queryctrl);
+
+/* Implement VIDIOC_QUERYMENU */
+int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm)
+{
+ struct v4l2_ctrl *ctrl;
+ u32 i = qm->index;
+
+ ctrl = v4l2_ctrl_find(hdl, qm->id);
+ if (!ctrl)
+ return -EINVAL;
+
+ qm->reserved = 0;
+ /* Sanity checks */
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_MENU:
+ if (ctrl->qmenu == NULL)
+ return -EINVAL;
+ break;
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ if (ctrl->qmenu_int == NULL)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (i < ctrl->minimum || i > ctrl->maximum)
+ return -EINVAL;
+
+ /* Use mask to see if this menu item should be skipped */
+ if (ctrl->menu_skip_mask & (1 << i))
+ return -EINVAL;
+ /* Empty menu items should also be skipped */
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU) {
+ if (ctrl->qmenu[i] == NULL || ctrl->qmenu[i][0] == '\0')
+ return -EINVAL;
+ strlcpy(qm->name, ctrl->qmenu[i], sizeof(qm->name));
+ } else {
+ qm->value = ctrl->qmenu_int[i];
+ }
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_querymenu);
+
+int v4l2_subdev_querymenu(struct v4l2_subdev *sd, struct v4l2_querymenu *qm)
+{
+ return v4l2_querymenu(sd->ctrl_handler, qm);
+}
+EXPORT_SYMBOL(v4l2_subdev_querymenu);
+
+
+
+/* Some general notes on the atomic requirements of VIDIOC_G/TRY/S_EXT_CTRLS:
+
+ It is not a fully atomic operation, just best-effort only. After all, if
+ multiple controls have to be set through multiple i2c writes (for example)
+ then some initial writes may succeed while others fail. Thus leaving the
+ system in an inconsistent state. The question is how much effort you are
+ willing to spend on trying to make something atomic that really isn't.
+
+ From the point of view of an application the main requirement is that
+ when you call VIDIOC_S_EXT_CTRLS and some values are invalid then an
+ error should be returned without actually affecting any controls.
+
+ If all the values are correct, then it is acceptable to just give up
+ in case of low-level errors.
+
+ It is important though that the application can tell when only a partial
+ configuration was done. The way we do that is through the error_idx field
+ of struct v4l2_ext_controls: if that is equal to the count field then no
+ controls were affected. Otherwise all controls before that index were
+ successful in performing their 'get' or 'set' operation, the control at
+ the given index failed, and you don't know what happened with the controls
+ after the failed one. Since if they were part of a control cluster they
+ could have been successfully processed (if a cluster member was encountered
+ at index < error_idx), they could have failed (if a cluster member was at
+ error_idx), or they may not have been processed yet (if the first cluster
+ member appeared after error_idx).
+
+ It is all fairly theoretical, though. In practice all you can do is to
+ bail out. If error_idx == count, then it is an application bug. If
+ error_idx < count then it is only an application bug if the error code was
+ EBUSY. That usually means that something started streaming just when you
+ tried to set the controls. In all other cases it is a driver/hardware
+ problem and all you can do is to retry or bail out.
+
+ Note that these rules do not apply to VIDIOC_TRY_EXT_CTRLS: since that
+ never modifies controls the error_idx is just set to whatever control
+ has an invalid value.
+ */
+
+/* Prepare for the extended g/s/try functions.
+ Find the controls in the control array and do some basic checks. */
+static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs,
+ struct v4l2_ctrl_helper *helpers,
+ bool get)
+{
+ struct v4l2_ctrl_helper *h;
+ bool have_clusters = false;
+ u32 i;
+
+ for (i = 0, h = helpers; i < cs->count; i++, h++) {
+ struct v4l2_ext_control *c = &cs->controls[i];
+ struct v4l2_ctrl_ref *ref;
+ struct v4l2_ctrl *ctrl;
+ u32 id = c->id & V4L2_CTRL_ID_MASK;
+
+ cs->error_idx = i;
+
+ if (cs->ctrl_class && V4L2_CTRL_ID2CLASS(id) != cs->ctrl_class)
+ return -EINVAL;
+
+ /* Old-style private controls are not allowed for
+ extended controls */
+ if (id >= V4L2_CID_PRIVATE_BASE)
+ return -EINVAL;
+ ref = find_ref_lock(hdl, id);
+ if (ref == NULL)
+ return -EINVAL;
+ ctrl = ref->ctrl;
+ if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED)
+ return -EINVAL;
+
+ if (ctrl->cluster[0]->ncontrols > 1)
+ have_clusters = true;
+ if (ctrl->cluster[0] != ctrl)
+ ref = find_ref_lock(hdl, ctrl->cluster[0]->id);
+ if (ctrl->is_ptr && !ctrl->is_string) {
+ unsigned tot_size = ctrl->elems * ctrl->elem_size;
+
+ if (c->size < tot_size) {
+ if (get) {
+ c->size = tot_size;
+ return -ENOSPC;
+ }
+ return -EFAULT;
+ }
+ c->size = tot_size;
+ }
+ /* Store the ref to the master control of the cluster */
+ h->mref = ref;
+ h->ctrl = ctrl;
+ /* Initially set next to 0, meaning that there is no other
+ control in this helper array belonging to the same
+ cluster */
+ h->next = 0;
+ }
+
+ /* We are done if there were no controls that belong to a multi-
+ control cluster. */
+ if (!have_clusters)
+ return 0;
+
+ /* The code below figures out in O(n) time which controls in the list
+ belong to the same cluster. */
+
+ /* This has to be done with the handler lock taken. */
+ mutex_lock(hdl->lock);
+
+ /* First zero the helper field in the master control references */
+ for (i = 0; i < cs->count; i++)
+ helpers[i].mref->helper = NULL;
+ for (i = 0, h = helpers; i < cs->count; i++, h++) {
+ struct v4l2_ctrl_ref *mref = h->mref;
+
+ /* If the mref->helper is set, then it points to an earlier
+ helper that belongs to the same cluster. */
+ if (mref->helper) {
+ /* Set the next field of mref->helper to the current
+ index: this means that that earlier helper now
+ points to the next helper in the same cluster. */
+ mref->helper->next = i;
+ /* mref should be set only for the first helper in the
+ cluster, clear the others. */
+ h->mref = NULL;
+ }
+ /* Point the mref helper to the current helper struct. */
+ mref->helper = h;
+ }
+ mutex_unlock(hdl->lock);
+ return 0;
+}
+
+/* Handles the corner case where cs->count == 0. It checks whether the
+ specified control class exists. If that class ID is 0, then it checks
+ whether there are any controls at all. */
+static int class_check(struct v4l2_ctrl_handler *hdl, u32 ctrl_class)
+{
+ if (ctrl_class == 0)
+ return list_empty(&hdl->ctrl_refs) ? -EINVAL : 0;
+ return find_ref_lock(hdl, ctrl_class | 1) ? 0 : -EINVAL;
+}
+
+
+
+/* Get extended controls. Allocates the helpers array if needed. */
+int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs)
+{
+ struct v4l2_ctrl_helper helper[4];
+ struct v4l2_ctrl_helper *helpers = helper;
+ int ret;
+ int i, j;
+
+ cs->error_idx = cs->count;
+ cs->ctrl_class = V4L2_CTRL_ID2CLASS(cs->ctrl_class);
+
+ if (hdl == NULL)
+ return -EINVAL;
+
+ if (cs->count == 0)
+ return class_check(hdl, cs->ctrl_class);
+
+ if (cs->count > ARRAY_SIZE(helper)) {
+ helpers = kmalloc_array(cs->count, sizeof(helper[0]),
+ GFP_KERNEL);
+ if (helpers == NULL)
+ return -ENOMEM;
+ }
+
+ ret = prepare_ext_ctrls(hdl, cs, helpers, true);
+ cs->error_idx = cs->count;
+
+ for (i = 0; !ret && i < cs->count; i++)
+ if (helpers[i].ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
+ ret = -EACCES;
+
+ for (i = 0; !ret && i < cs->count; i++) {
+ int (*ctrl_to_user)(struct v4l2_ext_control *c,
+ struct v4l2_ctrl *ctrl) = cur_to_user;
+ struct v4l2_ctrl *master;
+
+ if (helpers[i].mref == NULL)
+ continue;
+
+ master = helpers[i].mref->ctrl;
+ cs->error_idx = i;
+
+ v4l2_ctrl_lock(master);
+
+ /* g_volatile_ctrl will update the new control values */
+ if ((master->flags & V4L2_CTRL_FLAG_VOLATILE) ||
+ (master->has_volatiles && !is_cur_manual(master))) {
+ for (j = 0; j < master->ncontrols; j++)
+ cur_to_new(master->cluster[j]);
+ ret = call_op(master, g_volatile_ctrl);
+ ctrl_to_user = new_to_user;
+ }
+ /* If OK, then copy the current (for non-volatile controls)
+ or the new (for volatile controls) control values to the
+ caller */
+ if (!ret) {
+ u32 idx = i;
+
+ do {
+ ret = ctrl_to_user(cs->controls + idx,
+ helpers[idx].ctrl);
+ idx = helpers[idx].next;
+ } while (!ret && idx);
+ }
+ v4l2_ctrl_unlock(master);
+ }
+
+ if (cs->count > ARRAY_SIZE(helper))
+ kfree(helpers);
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_g_ext_ctrls);
+
+int v4l2_subdev_g_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs)
+{
+ return v4l2_g_ext_ctrls(sd->ctrl_handler, cs);
+}
+EXPORT_SYMBOL(v4l2_subdev_g_ext_ctrls);
+
+/* Helper function to get a single control */
+static int get_ctrl(struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c)
+{
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ int ret = 0;
+ int i;
+
+ /* Compound controls are not supported. The new_to_user() and
+ * cur_to_user() calls below would need to be modified not to access
+ * userspace memory when called from get_ctrl().
+ */
+ if (!ctrl->is_int)
+ return -EINVAL;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
+ return -EACCES;
+
+ v4l2_ctrl_lock(master);
+ /* g_volatile_ctrl will update the current control values */
+ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
+ for (i = 0; i < master->ncontrols; i++)
+ cur_to_new(master->cluster[i]);
+ ret = call_op(master, g_volatile_ctrl);
+ new_to_user(c, ctrl);
+ } else {
+ cur_to_user(c, ctrl);
+ }
+ v4l2_ctrl_unlock(master);
+ return ret;
+}
+
+int v4l2_g_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *control)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id);
+ struct v4l2_ext_control c;
+ int ret;
+
+ if (ctrl == NULL || !ctrl->is_int)
+ return -EINVAL;
+ ret = get_ctrl(ctrl, &c);
+ control->value = c.value;
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_g_ctrl);
+
+int v4l2_subdev_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *control)
+{
+ return v4l2_g_ctrl(sd->ctrl_handler, control);
+}
+EXPORT_SYMBOL(v4l2_subdev_g_ctrl);
+
+s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_ext_control c;
+
+ /* It's a driver bug if this happens. */
+ WARN_ON(!ctrl->is_int);
+ c.value = 0;
+ get_ctrl(ctrl, &c);
+ return c.value;
+}
+EXPORT_SYMBOL(v4l2_ctrl_g_ctrl);
+
+s64 v4l2_ctrl_g_ctrl_int64(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_ext_control c;
+
+ /* It's a driver bug if this happens. */
+ WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64);
+ c.value = 0;
+ get_ctrl(ctrl, &c);
+ return c.value;
+}
+EXPORT_SYMBOL(v4l2_ctrl_g_ctrl_int64);
+
+
+/* Core function that calls try/s_ctrl and ensures that the new value is
+ copied to the current value on a set.
+ Must be called with ctrl->handler->lock held. */
+static int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master,
+ bool set, u32 ch_flags)
+{
+ bool update_flag;
+ int ret;
+ int i;
+
+ /* Go through the cluster and either validate the new value or
+ (if no new value was set), copy the current value to the new
+ value, ensuring a consistent view for the control ops when
+ called. */
+ for (i = 0; i < master->ncontrols; i++) {
+ struct v4l2_ctrl *ctrl = master->cluster[i];
+
+ if (ctrl == NULL)
+ continue;
+
+ if (!ctrl->is_new) {
+ cur_to_new(ctrl);
+ continue;
+ }
+ /* Check again: it may have changed since the
+ previous check in try_or_set_ext_ctrls(). */
+ if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
+ return -EBUSY;
+ }
+
+ ret = call_op(master, try_ctrl);
+
+ /* Don't set if there is no change */
+ if (ret || !set || !cluster_changed(master))
+ return ret;
+ ret = call_op(master, s_ctrl);
+ if (ret)
+ return ret;
+
+ /* If OK, then make the new values permanent. */
+ update_flag = is_cur_manual(master) != is_new_manual(master);
+ for (i = 0; i < master->ncontrols; i++)
+ new_to_cur(fh, master->cluster[i], ch_flags |
+ ((update_flag && i > 0) ? V4L2_EVENT_CTRL_CH_FLAGS : 0));
+ return 0;
+}
+
+/* Validate controls. */
+static int validate_ctrls(struct v4l2_ext_controls *cs,
+ struct v4l2_ctrl_helper *helpers, bool set)
+{
+ unsigned i;
+ int ret = 0;
+
+ cs->error_idx = cs->count;
+ for (i = 0; i < cs->count; i++) {
+ struct v4l2_ctrl *ctrl = helpers[i].ctrl;
+ union v4l2_ctrl_ptr p_new;
+
+ cs->error_idx = i;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
+ return -EACCES;
+ /* This test is also done in try_set_control_cluster() which
+ is called in atomic context, so that has the final say,
+ but it makes sense to do an up-front check as well. Once
+ an error occurs in try_set_control_cluster() some other
+ controls may have been set already and we want to do a
+ best-effort to avoid that. */
+ if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
+ return -EBUSY;
+ /*
+ * Skip validation for now if the payload needs to be copied
+ * from userspace into kernelspace. We'll validate those later.
+ */
+ if (ctrl->is_ptr)
+ continue;
+ if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
+ p_new.p_s64 = &cs->controls[i].value64;
+ else
+ p_new.p_s32 = &cs->controls[i].value;
+ ret = validate_new(ctrl, p_new);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/* Obtain the current volatile values of an autocluster and mark them
+ as new. */
+static void update_from_auto_cluster(struct v4l2_ctrl *master)
+{
+ int i;
+
+ for (i = 0; i < master->ncontrols; i++)
+ cur_to_new(master->cluster[i]);
+ if (!call_op(master, g_volatile_ctrl))
+ for (i = 1; i < master->ncontrols; i++)
+ if (master->cluster[i])
+ master->cluster[i]->is_new = 1;
+}
+
+/* Try or try-and-set controls */
+static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs,
+ bool set)
+{
+ struct v4l2_ctrl_helper helper[4];
+ struct v4l2_ctrl_helper *helpers = helper;
+ unsigned i, j;
+ int ret;
+
+ cs->error_idx = cs->count;
+ cs->ctrl_class = V4L2_CTRL_ID2CLASS(cs->ctrl_class);
+
+ if (hdl == NULL)
+ return -EINVAL;
+
+ if (cs->count == 0)
+ return class_check(hdl, cs->ctrl_class);
+
+ if (cs->count > ARRAY_SIZE(helper)) {
+ helpers = kmalloc_array(cs->count, sizeof(helper[0]),
+ GFP_KERNEL);
+ if (!helpers)
+ return -ENOMEM;
+ }
+ ret = prepare_ext_ctrls(hdl, cs, helpers, false);
+ if (!ret)
+ ret = validate_ctrls(cs, helpers, set);
+ if (ret && set)
+ cs->error_idx = cs->count;
+ for (i = 0; !ret && i < cs->count; i++) {
+ struct v4l2_ctrl *master;
+ u32 idx = i;
+
+ if (helpers[i].mref == NULL)
+ continue;
+
+ cs->error_idx = i;
+ master = helpers[i].mref->ctrl;
+ v4l2_ctrl_lock(master);
+
+ /* Reset the 'is_new' flags of the cluster */
+ for (j = 0; j < master->ncontrols; j++)
+ if (master->cluster[j])
+ master->cluster[j]->is_new = 0;
+
+ /* For volatile autoclusters that are currently in auto mode
+ we need to discover if it will be set to manual mode.
+ If so, then we have to copy the current volatile values
+ first since those will become the new manual values (which
+ may be overwritten by explicit new values from this set
+ of controls). */
+ if (master->is_auto && master->has_volatiles &&
+ !is_cur_manual(master)) {
+ /* Pick an initial non-manual value */
+ s32 new_auto_val = master->manual_mode_value + 1;
+ u32 tmp_idx = idx;
+
+ do {
+ /* Check if the auto control is part of the
+ list, and remember the new value. */
+ if (helpers[tmp_idx].ctrl == master)
+ new_auto_val = cs->controls[tmp_idx].value;
+ tmp_idx = helpers[tmp_idx].next;
+ } while (tmp_idx);
+ /* If the new value == the manual value, then copy
+ the current volatile values. */
+ if (new_auto_val == master->manual_mode_value)
+ update_from_auto_cluster(master);
+ }
+
+ /* Copy the new caller-supplied control values.
+ user_to_new() sets 'is_new' to 1. */
+ do {
+ struct v4l2_ctrl *ctrl = helpers[idx].ctrl;
+
+ ret = user_to_new(cs->controls + idx, ctrl);
+ if (!ret && ctrl->is_ptr)
+ ret = validate_new(ctrl, ctrl->p_new);
+ idx = helpers[idx].next;
+ } while (!ret && idx);
+
+ if (!ret)
+ ret = try_or_set_cluster(fh, master, set, 0);
+
+ /* Copy the new values back to userspace. */
+ if (!ret) {
+ idx = i;
+ do {
+ ret = new_to_user(cs->controls + idx,
+ helpers[idx].ctrl);
+ idx = helpers[idx].next;
+ } while (!ret && idx);
+ }
+ v4l2_ctrl_unlock(master);
+ }
+
+ if (cs->count > ARRAY_SIZE(helper))
+ kfree(helpers);
+ return ret;
+}
+
+int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs)
+{
+ return try_set_ext_ctrls(NULL, hdl, cs, false);
+}
+EXPORT_SYMBOL(v4l2_try_ext_ctrls);
+
+int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs)
+{
+ return try_set_ext_ctrls(fh, hdl, cs, true);
+}
+EXPORT_SYMBOL(v4l2_s_ext_ctrls);
+
+int v4l2_subdev_try_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs)
+{
+ return try_set_ext_ctrls(NULL, sd->ctrl_handler, cs, false);
+}
+EXPORT_SYMBOL(v4l2_subdev_try_ext_ctrls);
+
+int v4l2_subdev_s_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs)
+{
+ return try_set_ext_ctrls(NULL, sd->ctrl_handler, cs, true);
+}
+EXPORT_SYMBOL(v4l2_subdev_s_ext_ctrls);
+
+/* Helper function for VIDIOC_S_CTRL compatibility */
+static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags)
+{
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ int ret;
+ int i;
+
+ /* Reset the 'is_new' flags of the cluster */
+ for (i = 0; i < master->ncontrols; i++)
+ if (master->cluster[i])
+ master->cluster[i]->is_new = 0;
+
+ ret = validate_new(ctrl, ctrl->p_new);
+ if (ret)
+ return ret;
+
+ /* For autoclusters with volatiles that are switched from auto to
+ manual mode we have to update the current volatile values since
+ those will become the initial manual values after such a switch. */
+ if (master->is_auto && master->has_volatiles && ctrl == master &&
+ !is_cur_manual(master) && ctrl->val == master->manual_mode_value)
+ update_from_auto_cluster(master);
+
+ ctrl->is_new = 1;
+ return try_or_set_cluster(fh, master, true, ch_flags);
+}
+
+/* Helper function for VIDIOC_S_CTRL compatibility */
+static int set_ctrl_lock(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
+ struct v4l2_ext_control *c)
+{
+ int ret;
+
+ v4l2_ctrl_lock(ctrl);
+ user_to_new(c, ctrl);
+ ret = set_ctrl(fh, ctrl, 0);
+ if (!ret)
+ cur_to_user(c, ctrl);
+ v4l2_ctrl_unlock(ctrl);
+ return ret;
+}
+
+int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
+ struct v4l2_control *control)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id);
+ struct v4l2_ext_control c = { control->id };
+ int ret;
+
+ if (ctrl == NULL || !ctrl->is_int)
+ return -EINVAL;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
+ return -EACCES;
+
+ c.value = control->value;
+ ret = set_ctrl_lock(fh, ctrl, &c);
+ control->value = c.value;
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_s_ctrl);
+
+int v4l2_subdev_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *control)
+{
+ return v4l2_s_ctrl(NULL, sd->ctrl_handler, control);
+}
+EXPORT_SYMBOL(v4l2_subdev_s_ctrl);
+
+int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
+{
+ lockdep_assert_held(ctrl->handler->lock);
+
+ /* It's a driver bug if this happens. */
+ WARN_ON(!ctrl->is_int);
+ ctrl->val = val;
+ return set_ctrl(NULL, ctrl, 0);
+}
+EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl);
+
+int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val)
+{
+ lockdep_assert_held(ctrl->handler->lock);
+
+ /* It's a driver bug if this happens. */
+ WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64);
+ *ctrl->p_new.p_s64 = val;
+ return set_ctrl(NULL, ctrl, 0);
+}
+EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_int64);
+
+int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
+{
+ lockdep_assert_held(ctrl->handler->lock);
+
+ /* It's a driver bug if this happens. */
+ WARN_ON(ctrl->type != V4L2_CTRL_TYPE_STRING);
+ strlcpy(ctrl->p_new.p_char, s, ctrl->maximum + 1);
+ return set_ctrl(NULL, ctrl, 0);
+}
+EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string);
+
+void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv)
+{
+ if (ctrl == NULL)
+ return;
+ if (notify == NULL) {
+ ctrl->call_notify = 0;
+ return;
+ }
+ if (WARN_ON(ctrl->handler->notify && ctrl->handler->notify != notify))
+ return;
+ ctrl->handler->notify = notify;
+ ctrl->handler->notify_priv = priv;
+ ctrl->call_notify = 1;
+}
+EXPORT_SYMBOL(v4l2_ctrl_notify);
+
+int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
+ s64 min, s64 max, u64 step, s64 def)
+{
+ bool changed;
+ int ret;
+
+ lockdep_assert_held(ctrl->handler->lock);
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ case V4L2_CTRL_TYPE_INTEGER64:
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ case V4L2_CTRL_TYPE_BITMASK:
+ case V4L2_CTRL_TYPE_U8:
+ case V4L2_CTRL_TYPE_U16:
+ case V4L2_CTRL_TYPE_U32:
+ if (ctrl->is_array)
+ return -EINVAL;
+ ret = check_range(ctrl->type, min, max, step, def);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ctrl->minimum = min;
+ ctrl->maximum = max;
+ ctrl->step = step;
+ ctrl->default_value = def;
+ cur_to_new(ctrl);
+ if (validate_new(ctrl, ctrl->p_new)) {
+ if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
+ *ctrl->p_new.p_s64 = def;
+ else
+ *ctrl->p_new.p_s32 = def;
+ }
+
+ if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
+ changed = *ctrl->p_new.p_s64 != *ctrl->p_cur.p_s64;
+ else
+ changed = *ctrl->p_new.p_s32 != *ctrl->p_cur.p_s32;
+ if (changed)
+ ret = set_ctrl(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE);
+ else
+ send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE);
+ return ret;
+}
+EXPORT_SYMBOL(__v4l2_ctrl_modify_range);
+
+static int v4l2_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned elems)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
+
+ if (ctrl == NULL)
+ return -EINVAL;
+
+ v4l2_ctrl_lock(ctrl);
+ list_add_tail(&sev->node, &ctrl->ev_subs);
+ if (ctrl->type != V4L2_CTRL_TYPE_CTRL_CLASS &&
+ (sev->flags & V4L2_EVENT_SUB_FL_SEND_INITIAL)) {
+ struct v4l2_event ev;
+ u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
+
+ if (!(ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY))
+ changes |= V4L2_EVENT_CTRL_CH_VALUE;
+ fill_event(&ev, ctrl, changes);
+ /* Mark the queue as active, allowing this initial
+ event to be accepted. */
+ sev->elems = elems;
+ v4l2_event_queue_fh(sev->fh, &ev);
+ }
+ v4l2_ctrl_unlock(ctrl);
+ return 0;
+}
+
+static void v4l2_ctrl_del_event(struct v4l2_subscribed_event *sev)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
+
+ v4l2_ctrl_lock(ctrl);
+ list_del(&sev->node);
+ v4l2_ctrl_unlock(ctrl);
+}
+
+void v4l2_ctrl_replace(struct v4l2_event *old, const struct v4l2_event *new)
+{
+ u32 old_changes = old->u.ctrl.changes;
+
+ old->u.ctrl = new->u.ctrl;
+ old->u.ctrl.changes |= old_changes;
+}
+EXPORT_SYMBOL(v4l2_ctrl_replace);
+
+void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new)
+{
+ new->u.ctrl.changes |= old->u.ctrl.changes;
+}
+EXPORT_SYMBOL(v4l2_ctrl_merge);
+
+const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops = {
+ .add = v4l2_ctrl_add_event,
+ .del = v4l2_ctrl_del_event,
+ .replace = v4l2_ctrl_replace,
+ .merge = v4l2_ctrl_merge,
+};
+EXPORT_SYMBOL(v4l2_ctrl_sub_ev_ops);
+
+int v4l2_ctrl_log_status(struct file *file, void *fh)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_fh *vfh = file->private_data;
+
+ if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) && vfd->v4l2_dev)
+ v4l2_ctrl_handler_log_status(vfh->ctrl_handler,
+ vfd->v4l2_dev->name);
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_ctrl_log_status);
+
+int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ if (sub->type == V4L2_EVENT_CTRL)
+ return v4l2_event_subscribe(fh, sub, 0, &v4l2_ctrl_sub_ev_ops);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(v4l2_ctrl_subscribe_event);
+
+int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ if (!sd->ctrl_handler)
+ return -EINVAL;
+ return v4l2_ctrl_subscribe_event(fh, sub);
+}
+EXPORT_SYMBOL(v4l2_ctrl_subdev_subscribe_event);
+
+unsigned int v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ if (v4l2_event_pending(fh))
+ return POLLPRI;
+ poll_wait(file, &fh->wait, wait);
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_ctrl_poll);
diff --git a/kernel/drivers/media/v4l2-core/v4l2-dev.c b/kernel/drivers/media/v4l2-core/v4l2-dev.c
new file mode 100644
index 000000000..71a1b93b0
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/v4l2-dev.c
@@ -0,0 +1,1008 @@
+/*
+ * Video capture interface for Linux version 2
+ *
+ * A generic video device interface for the LINUX operating system
+ * using a set of device structures/vectors for low level operations.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1)
+ * Mauro Carvalho Chehab <mchehab@infradead.org> (version 2)
+ *
+ * Fixes: 20000516 Claudio Matsuoka <claudio@conectiva.com>
+ * - Added procfs support
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+
+#define VIDEO_NUM_DEVICES 256
+#define VIDEO_NAME "video4linux"
+
+/*
+ * sysfs stuff
+ */
+
+static ssize_t index_show(struct device *cd,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(cd);
+
+ return sprintf(buf, "%i\n", vdev->index);
+}
+static DEVICE_ATTR_RO(index);
+
+static ssize_t dev_debug_show(struct device *cd,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(cd);
+
+ return sprintf(buf, "%i\n", vdev->dev_debug);
+}
+
+static ssize_t dev_debug_store(struct device *cd, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct video_device *vdev = to_video_device(cd);
+ int res = 0;
+ u16 value;
+
+ res = kstrtou16(buf, 0, &value);
+ if (res)
+ return res;
+
+ vdev->dev_debug = value;
+ return len;
+}
+static DEVICE_ATTR_RW(dev_debug);
+
+static ssize_t name_show(struct device *cd,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(cd);
+
+ return sprintf(buf, "%.*s\n", (int)sizeof(vdev->name), vdev->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static struct attribute *video_device_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_dev_debug.attr,
+ &dev_attr_index.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(video_device);
+
+/*
+ * Active devices
+ */
+static struct video_device *video_device[VIDEO_NUM_DEVICES];
+static DEFINE_MUTEX(videodev_lock);
+static DECLARE_BITMAP(devnode_nums[VFL_TYPE_MAX], VIDEO_NUM_DEVICES);
+
+/* Device node utility functions */
+
+/* Note: these utility functions all assume that vfl_type is in the range
+ [0, VFL_TYPE_MAX-1]. */
+
+#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES
+/* Return the bitmap corresponding to vfl_type. */
+static inline unsigned long *devnode_bits(int vfl_type)
+{
+ /* Any types not assigned to fixed minor ranges must be mapped to
+ one single bitmap for the purposes of finding a free node number
+ since all those unassigned types use the same minor range. */
+ int idx = (vfl_type > VFL_TYPE_RADIO) ? VFL_TYPE_MAX - 1 : vfl_type;
+
+ return devnode_nums[idx];
+}
+#else
+/* Return the bitmap corresponding to vfl_type. */
+static inline unsigned long *devnode_bits(int vfl_type)
+{
+ return devnode_nums[vfl_type];
+}
+#endif
+
+/* Mark device node number vdev->num as used */
+static inline void devnode_set(struct video_device *vdev)
+{
+ set_bit(vdev->num, devnode_bits(vdev->vfl_type));
+}
+
+/* Mark device node number vdev->num as unused */
+static inline void devnode_clear(struct video_device *vdev)
+{
+ clear_bit(vdev->num, devnode_bits(vdev->vfl_type));
+}
+
+/* Try to find a free device node number in the range [from, to> */
+static inline int devnode_find(struct video_device *vdev, int from, int to)
+{
+ return find_next_zero_bit(devnode_bits(vdev->vfl_type), to, from);
+}
+
+struct video_device *video_device_alloc(void)
+{
+ return kzalloc(sizeof(struct video_device), GFP_KERNEL);
+}
+EXPORT_SYMBOL(video_device_alloc);
+
+void video_device_release(struct video_device *vdev)
+{
+ kfree(vdev);
+}
+EXPORT_SYMBOL(video_device_release);
+
+void video_device_release_empty(struct video_device *vdev)
+{
+ /* Do nothing */
+ /* Only valid when the video_device struct is a static. */
+}
+EXPORT_SYMBOL(video_device_release_empty);
+
+static inline void video_get(struct video_device *vdev)
+{
+ get_device(&vdev->dev);
+}
+
+static inline void video_put(struct video_device *vdev)
+{
+ put_device(&vdev->dev);
+}
+
+/* Called when the last user of the video device exits. */
+static void v4l2_device_release(struct device *cd)
+{
+ struct video_device *vdev = to_video_device(cd);
+ struct v4l2_device *v4l2_dev = vdev->v4l2_dev;
+
+ mutex_lock(&videodev_lock);
+ if (WARN_ON(video_device[vdev->minor] != vdev)) {
+ /* should not happen */
+ mutex_unlock(&videodev_lock);
+ return;
+ }
+
+ /* Free up this device for reuse */
+ video_device[vdev->minor] = NULL;
+
+ /* Delete the cdev on this minor as well */
+ cdev_del(vdev->cdev);
+ /* Just in case some driver tries to access this from
+ the release() callback. */
+ vdev->cdev = NULL;
+
+ /* Mark device node number as free */
+ devnode_clear(vdev);
+
+ mutex_unlock(&videodev_lock);
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ if (v4l2_dev->mdev &&
+ vdev->vfl_type != VFL_TYPE_SUBDEV)
+ media_device_unregister_entity(&vdev->entity);
+#endif
+
+ /* Do not call v4l2_device_put if there is no release callback set.
+ * Drivers that have no v4l2_device release callback might free the
+ * v4l2_dev instance in the video_device release callback below, so we
+ * must perform this check here.
+ *
+ * TODO: In the long run all drivers that use v4l2_device should use the
+ * v4l2_device release callback. This check will then be unnecessary.
+ */
+ if (v4l2_dev->release == NULL)
+ v4l2_dev = NULL;
+
+ /* Release video_device and perform other
+ cleanups as needed. */
+ vdev->release(vdev);
+
+ /* Decrease v4l2_device refcount */
+ if (v4l2_dev)
+ v4l2_device_put(v4l2_dev);
+}
+
+static struct class video_class = {
+ .name = VIDEO_NAME,
+ .dev_groups = video_device_groups,
+};
+
+struct video_device *video_devdata(struct file *file)
+{
+ return video_device[iminor(file_inode(file))];
+}
+EXPORT_SYMBOL(video_devdata);
+
+
+/* Priority handling */
+
+static inline bool prio_is_valid(enum v4l2_priority prio)
+{
+ return prio == V4L2_PRIORITY_BACKGROUND ||
+ prio == V4L2_PRIORITY_INTERACTIVE ||
+ prio == V4L2_PRIORITY_RECORD;
+}
+
+void v4l2_prio_init(struct v4l2_prio_state *global)
+{
+ memset(global, 0, sizeof(*global));
+}
+EXPORT_SYMBOL(v4l2_prio_init);
+
+int v4l2_prio_change(struct v4l2_prio_state *global, enum v4l2_priority *local,
+ enum v4l2_priority new)
+{
+ if (!prio_is_valid(new))
+ return -EINVAL;
+ if (*local == new)
+ return 0;
+
+ atomic_inc(&global->prios[new]);
+ if (prio_is_valid(*local))
+ atomic_dec(&global->prios[*local]);
+ *local = new;
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_prio_change);
+
+void v4l2_prio_open(struct v4l2_prio_state *global, enum v4l2_priority *local)
+{
+ v4l2_prio_change(global, local, V4L2_PRIORITY_DEFAULT);
+}
+EXPORT_SYMBOL(v4l2_prio_open);
+
+void v4l2_prio_close(struct v4l2_prio_state *global, enum v4l2_priority local)
+{
+ if (prio_is_valid(local))
+ atomic_dec(&global->prios[local]);
+}
+EXPORT_SYMBOL(v4l2_prio_close);
+
+enum v4l2_priority v4l2_prio_max(struct v4l2_prio_state *global)
+{
+ if (atomic_read(&global->prios[V4L2_PRIORITY_RECORD]) > 0)
+ return V4L2_PRIORITY_RECORD;
+ if (atomic_read(&global->prios[V4L2_PRIORITY_INTERACTIVE]) > 0)
+ return V4L2_PRIORITY_INTERACTIVE;
+ if (atomic_read(&global->prios[V4L2_PRIORITY_BACKGROUND]) > 0)
+ return V4L2_PRIORITY_BACKGROUND;
+ return V4L2_PRIORITY_UNSET;
+}
+EXPORT_SYMBOL(v4l2_prio_max);
+
+int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local)
+{
+ return (local < v4l2_prio_max(global)) ? -EBUSY : 0;
+}
+EXPORT_SYMBOL(v4l2_prio_check);
+
+
+static ssize_t v4l2_read(struct file *filp, char __user *buf,
+ size_t sz, loff_t *off)
+{
+ struct video_device *vdev = video_devdata(filp);
+ int ret = -ENODEV;
+
+ if (!vdev->fops->read)
+ return -EINVAL;
+ if (video_is_registered(vdev))
+ ret = vdev->fops->read(filp, buf, sz, off);
+ if ((vdev->dev_debug & V4L2_DEV_DEBUG_FOP) &&
+ (vdev->dev_debug & V4L2_DEV_DEBUG_STREAMING))
+ printk(KERN_DEBUG "%s: read: %zd (%d)\n",
+ video_device_node_name(vdev), sz, ret);
+ return ret;
+}
+
+static ssize_t v4l2_write(struct file *filp, const char __user *buf,
+ size_t sz, loff_t *off)
+{
+ struct video_device *vdev = video_devdata(filp);
+ int ret = -ENODEV;
+
+ if (!vdev->fops->write)
+ return -EINVAL;
+ if (video_is_registered(vdev))
+ ret = vdev->fops->write(filp, buf, sz, off);
+ if ((vdev->dev_debug & V4L2_DEV_DEBUG_FOP) &&
+ (vdev->dev_debug & V4L2_DEV_DEBUG_STREAMING))
+ printk(KERN_DEBUG "%s: write: %zd (%d)\n",
+ video_device_node_name(vdev), sz, ret);
+ return ret;
+}
+
+static unsigned int v4l2_poll(struct file *filp, struct poll_table_struct *poll)
+{
+ struct video_device *vdev = video_devdata(filp);
+ unsigned int res = POLLERR | POLLHUP;
+
+ if (!vdev->fops->poll)
+ return DEFAULT_POLLMASK;
+ if (video_is_registered(vdev))
+ res = vdev->fops->poll(filp, poll);
+ if (vdev->dev_debug & V4L2_DEV_DEBUG_POLL)
+ printk(KERN_DEBUG "%s: poll: %08x\n",
+ video_device_node_name(vdev), res);
+ return res;
+}
+
+static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct video_device *vdev = video_devdata(filp);
+ int ret = -ENODEV;
+
+ if (vdev->fops->unlocked_ioctl) {
+ struct mutex *lock = v4l2_ioctl_get_lock(vdev, cmd);
+
+ if (lock && mutex_lock_interruptible(lock))
+ return -ERESTARTSYS;
+ if (video_is_registered(vdev))
+ ret = vdev->fops->unlocked_ioctl(filp, cmd, arg);
+ if (lock)
+ mutex_unlock(lock);
+ } else
+ ret = -ENOTTY;
+
+ return ret;
+}
+
+#ifdef CONFIG_MMU
+#define v4l2_get_unmapped_area NULL
+#else
+static unsigned long v4l2_get_unmapped_area(struct file *filp,
+ unsigned long addr, unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ struct video_device *vdev = video_devdata(filp);
+ int ret;
+
+ if (!vdev->fops->get_unmapped_area)
+ return -ENOSYS;
+ if (!video_is_registered(vdev))
+ return -ENODEV;
+ ret = vdev->fops->get_unmapped_area(filp, addr, len, pgoff, flags);
+ if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
+ printk(KERN_DEBUG "%s: get_unmapped_area (%d)\n",
+ video_device_node_name(vdev), ret);
+ return ret;
+}
+#endif
+
+static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm)
+{
+ struct video_device *vdev = video_devdata(filp);
+ int ret = -ENODEV;
+
+ if (!vdev->fops->mmap)
+ return -ENODEV;
+ if (video_is_registered(vdev))
+ ret = vdev->fops->mmap(filp, vm);
+ if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
+ printk(KERN_DEBUG "%s: mmap (%d)\n",
+ video_device_node_name(vdev), ret);
+ return ret;
+}
+
+/* Override for the open function */
+static int v4l2_open(struct inode *inode, struct file *filp)
+{
+ struct video_device *vdev;
+ int ret = 0;
+
+ /* Check if the video device is available */
+ mutex_lock(&videodev_lock);
+ vdev = video_devdata(filp);
+ /* return ENODEV if the video device has already been removed. */
+ if (vdev == NULL || !video_is_registered(vdev)) {
+ mutex_unlock(&videodev_lock);
+ return -ENODEV;
+ }
+ /* and increase the device refcount */
+ video_get(vdev);
+ mutex_unlock(&videodev_lock);
+ if (vdev->fops->open) {
+ if (video_is_registered(vdev))
+ ret = vdev->fops->open(filp);
+ else
+ ret = -ENODEV;
+ }
+
+ if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
+ printk(KERN_DEBUG "%s: open (%d)\n",
+ video_device_node_name(vdev), ret);
+ /* decrease the refcount in case of an error */
+ if (ret)
+ video_put(vdev);
+ return ret;
+}
+
+/* Override for the release function */
+static int v4l2_release(struct inode *inode, struct file *filp)
+{
+ struct video_device *vdev = video_devdata(filp);
+ int ret = 0;
+
+ if (vdev->fops->release)
+ ret = vdev->fops->release(filp);
+ if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
+ printk(KERN_DEBUG "%s: release\n",
+ video_device_node_name(vdev));
+
+ /* decrease the refcount unconditionally since the release()
+ return value is ignored. */
+ video_put(vdev);
+ return ret;
+}
+
+static const struct file_operations v4l2_fops = {
+ .owner = THIS_MODULE,
+ .read = v4l2_read,
+ .write = v4l2_write,
+ .open = v4l2_open,
+ .get_unmapped_area = v4l2_get_unmapped_area,
+ .mmap = v4l2_mmap,
+ .unlocked_ioctl = v4l2_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = v4l2_compat_ioctl32,
+#endif
+ .release = v4l2_release,
+ .poll = v4l2_poll,
+ .llseek = no_llseek,
+};
+
+/**
+ * get_index - assign stream index number based on v4l2_dev
+ * @vdev: video_device to assign index number to, vdev->v4l2_dev should be assigned
+ *
+ * Note that when this is called the new device has not yet been registered
+ * in the video_device array, but it was able to obtain a minor number.
+ *
+ * This means that we can always obtain a free stream index number since
+ * the worst case scenario is that there are VIDEO_NUM_DEVICES - 1 slots in
+ * use of the video_device array.
+ *
+ * Returns a free index number.
+ */
+static int get_index(struct video_device *vdev)
+{
+ /* This can be static since this function is called with the global
+ videodev_lock held. */
+ static DECLARE_BITMAP(used, VIDEO_NUM_DEVICES);
+ int i;
+
+ bitmap_zero(used, VIDEO_NUM_DEVICES);
+
+ for (i = 0; i < VIDEO_NUM_DEVICES; i++) {
+ if (video_device[i] != NULL &&
+ video_device[i]->v4l2_dev == vdev->v4l2_dev) {
+ set_bit(video_device[i]->index, used);
+ }
+ }
+
+ return find_first_zero_bit(used, VIDEO_NUM_DEVICES);
+}
+
+#define SET_VALID_IOCTL(ops, cmd, op) \
+ if (ops->op) \
+ set_bit(_IOC_NR(cmd), valid_ioctls)
+
+/* This determines which ioctls are actually implemented in the driver.
+ It's a one-time thing which simplifies video_ioctl2 as it can just do
+ a bit test.
+
+ Note that drivers can override this by setting bits to 1 in
+ vdev->valid_ioctls. If an ioctl is marked as 1 when this function is
+ called, then that ioctl will actually be marked as unimplemented.
+
+ It does that by first setting up the local valid_ioctls bitmap, and
+ at the end do a:
+
+ vdev->valid_ioctls = valid_ioctls & ~(vdev->valid_ioctls)
+ */
+static void determine_valid_ioctls(struct video_device *vdev)
+{
+ DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE);
+ const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops;
+ bool is_vid = vdev->vfl_type == VFL_TYPE_GRABBER;
+ bool is_vbi = vdev->vfl_type == VFL_TYPE_VBI;
+ bool is_radio = vdev->vfl_type == VFL_TYPE_RADIO;
+ bool is_sdr = vdev->vfl_type == VFL_TYPE_SDR;
+ bool is_rx = vdev->vfl_dir != VFL_DIR_TX;
+ bool is_tx = vdev->vfl_dir != VFL_DIR_RX;
+
+ bitmap_zero(valid_ioctls, BASE_VIDIOC_PRIVATE);
+
+ /* vfl_type and vfl_dir independent ioctls */
+
+ SET_VALID_IOCTL(ops, VIDIOC_QUERYCAP, vidioc_querycap);
+ set_bit(_IOC_NR(VIDIOC_G_PRIORITY), valid_ioctls);
+ set_bit(_IOC_NR(VIDIOC_S_PRIORITY), valid_ioctls);
+
+ /* Note: the control handler can also be passed through the filehandle,
+ and that can't be tested here. If the bit for these control ioctls
+ is set, then the ioctl is valid. But if it is 0, then it can still
+ be valid if the filehandle passed the control handler. */
+ if (vdev->ctrl_handler || ops->vidioc_queryctrl)
+ set_bit(_IOC_NR(VIDIOC_QUERYCTRL), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_query_ext_ctrl)
+ set_bit(_IOC_NR(VIDIOC_QUERY_EXT_CTRL), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_g_ctrl || ops->vidioc_g_ext_ctrls)
+ set_bit(_IOC_NR(VIDIOC_G_CTRL), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_s_ctrl || ops->vidioc_s_ext_ctrls)
+ set_bit(_IOC_NR(VIDIOC_S_CTRL), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_g_ext_ctrls)
+ set_bit(_IOC_NR(VIDIOC_G_EXT_CTRLS), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_s_ext_ctrls)
+ set_bit(_IOC_NR(VIDIOC_S_EXT_CTRLS), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_try_ext_ctrls)
+ set_bit(_IOC_NR(VIDIOC_TRY_EXT_CTRLS), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_querymenu)
+ set_bit(_IOC_NR(VIDIOC_QUERYMENU), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FREQUENCY, vidioc_g_frequency);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FREQUENCY, vidioc_s_frequency);
+ SET_VALID_IOCTL(ops, VIDIOC_LOG_STATUS, vidioc_log_status);
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ set_bit(_IOC_NR(VIDIOC_DBG_G_CHIP_INFO), valid_ioctls);
+ set_bit(_IOC_NR(VIDIOC_DBG_G_REGISTER), valid_ioctls);
+ set_bit(_IOC_NR(VIDIOC_DBG_S_REGISTER), valid_ioctls);
+#endif
+ /* yes, really vidioc_subscribe_event */
+ SET_VALID_IOCTL(ops, VIDIOC_DQEVENT, vidioc_subscribe_event);
+ SET_VALID_IOCTL(ops, VIDIOC_SUBSCRIBE_EVENT, vidioc_subscribe_event);
+ SET_VALID_IOCTL(ops, VIDIOC_UNSUBSCRIBE_EVENT, vidioc_unsubscribe_event);
+ if (ops->vidioc_enum_freq_bands || ops->vidioc_g_tuner || ops->vidioc_g_modulator)
+ set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls);
+
+ if (is_vid) {
+ /* video specific ioctls */
+ if ((is_rx && (ops->vidioc_enum_fmt_vid_cap ||
+ ops->vidioc_enum_fmt_vid_cap_mplane ||
+ ops->vidioc_enum_fmt_vid_overlay)) ||
+ (is_tx && (ops->vidioc_enum_fmt_vid_out ||
+ ops->vidioc_enum_fmt_vid_out_mplane)))
+ set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
+ if ((is_rx && (ops->vidioc_g_fmt_vid_cap ||
+ ops->vidioc_g_fmt_vid_cap_mplane ||
+ ops->vidioc_g_fmt_vid_overlay)) ||
+ (is_tx && (ops->vidioc_g_fmt_vid_out ||
+ ops->vidioc_g_fmt_vid_out_mplane ||
+ ops->vidioc_g_fmt_vid_out_overlay)))
+ set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
+ if ((is_rx && (ops->vidioc_s_fmt_vid_cap ||
+ ops->vidioc_s_fmt_vid_cap_mplane ||
+ ops->vidioc_s_fmt_vid_overlay)) ||
+ (is_tx && (ops->vidioc_s_fmt_vid_out ||
+ ops->vidioc_s_fmt_vid_out_mplane ||
+ ops->vidioc_s_fmt_vid_out_overlay)))
+ set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
+ if ((is_rx && (ops->vidioc_try_fmt_vid_cap ||
+ ops->vidioc_try_fmt_vid_cap_mplane ||
+ ops->vidioc_try_fmt_vid_overlay)) ||
+ (is_tx && (ops->vidioc_try_fmt_vid_out ||
+ ops->vidioc_try_fmt_vid_out_mplane ||
+ ops->vidioc_try_fmt_vid_out_overlay)))
+ set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_OVERLAY, vidioc_overlay);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FBUF, vidioc_g_fbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FBUF, vidioc_s_fbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_G_JPEGCOMP, vidioc_g_jpegcomp);
+ SET_VALID_IOCTL(ops, VIDIOC_S_JPEGCOMP, vidioc_s_jpegcomp);
+ SET_VALID_IOCTL(ops, VIDIOC_G_ENC_INDEX, vidioc_g_enc_index);
+ SET_VALID_IOCTL(ops, VIDIOC_ENCODER_CMD, vidioc_encoder_cmd);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_ENCODER_CMD, vidioc_try_encoder_cmd);
+ SET_VALID_IOCTL(ops, VIDIOC_DECODER_CMD, vidioc_decoder_cmd);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals);
+ if (ops->vidioc_g_crop || ops->vidioc_g_selection)
+ set_bit(_IOC_NR(VIDIOC_G_CROP), valid_ioctls);
+ if (ops->vidioc_s_crop || ops->vidioc_s_selection)
+ set_bit(_IOC_NR(VIDIOC_S_CROP), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_G_SELECTION, vidioc_g_selection);
+ SET_VALID_IOCTL(ops, VIDIOC_S_SELECTION, vidioc_s_selection);
+ if (ops->vidioc_cropcap || ops->vidioc_g_selection)
+ set_bit(_IOC_NR(VIDIOC_CROPCAP), valid_ioctls);
+ } else if (is_vbi) {
+ /* vbi specific ioctls */
+ if ((is_rx && (ops->vidioc_g_fmt_vbi_cap ||
+ ops->vidioc_g_fmt_sliced_vbi_cap)) ||
+ (is_tx && (ops->vidioc_g_fmt_vbi_out ||
+ ops->vidioc_g_fmt_sliced_vbi_out)))
+ set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
+ if ((is_rx && (ops->vidioc_s_fmt_vbi_cap ||
+ ops->vidioc_s_fmt_sliced_vbi_cap)) ||
+ (is_tx && (ops->vidioc_s_fmt_vbi_out ||
+ ops->vidioc_s_fmt_sliced_vbi_out)))
+ set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
+ if ((is_rx && (ops->vidioc_try_fmt_vbi_cap ||
+ ops->vidioc_try_fmt_sliced_vbi_cap)) ||
+ (is_tx && (ops->vidioc_try_fmt_vbi_out ||
+ ops->vidioc_try_fmt_sliced_vbi_out)))
+ set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_G_SLICED_VBI_CAP, vidioc_g_sliced_vbi_cap);
+ } else if (is_sdr) {
+ /* SDR specific ioctls */
+ if (ops->vidioc_enum_fmt_sdr_cap)
+ set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
+ if (ops->vidioc_g_fmt_sdr_cap)
+ set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
+ if (ops->vidioc_s_fmt_sdr_cap)
+ set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
+ if (ops->vidioc_try_fmt_sdr_cap)
+ set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
+ }
+
+ if (is_vid || is_vbi || is_sdr) {
+ /* ioctls valid for video, vbi or sdr */
+ SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
+ SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
+ SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_EXPBUF, vidioc_expbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_CREATE_BUFS, vidioc_create_bufs);
+ SET_VALID_IOCTL(ops, VIDIOC_PREPARE_BUF, vidioc_prepare_buf);
+ SET_VALID_IOCTL(ops, VIDIOC_STREAMON, vidioc_streamon);
+ SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
+ }
+
+ if (is_vid || is_vbi) {
+ /* ioctls valid for video or vbi */
+ if (ops->vidioc_s_std)
+ set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_S_STD, vidioc_s_std);
+ SET_VALID_IOCTL(ops, VIDIOC_G_STD, vidioc_g_std);
+ if (is_rx) {
+ SET_VALID_IOCTL(ops, VIDIOC_QUERYSTD, vidioc_querystd);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input);
+ SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input);
+ SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDIO, vidioc_enumaudio);
+ SET_VALID_IOCTL(ops, VIDIOC_G_AUDIO, vidioc_g_audio);
+ SET_VALID_IOCTL(ops, VIDIOC_S_AUDIO, vidioc_s_audio);
+ SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings);
+ SET_VALID_IOCTL(ops, VIDIOC_S_EDID, vidioc_s_edid);
+ }
+ if (is_tx) {
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output);
+ SET_VALID_IOCTL(ops, VIDIOC_G_OUTPUT, vidioc_g_output);
+ SET_VALID_IOCTL(ops, VIDIOC_S_OUTPUT, vidioc_s_output);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDOUT, vidioc_enumaudout);
+ SET_VALID_IOCTL(ops, VIDIOC_G_AUDOUT, vidioc_g_audout);
+ SET_VALID_IOCTL(ops, VIDIOC_S_AUDOUT, vidioc_s_audout);
+ }
+ if (ops->vidioc_g_parm || (vdev->vfl_type == VFL_TYPE_GRABBER &&
+ ops->vidioc_g_std))
+ set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
+ SET_VALID_IOCTL(ops, VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings);
+ SET_VALID_IOCTL(ops, VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings);
+ SET_VALID_IOCTL(ops, VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_EDID, vidioc_g_edid);
+ }
+ if (is_tx && (is_radio || is_sdr)) {
+ /* radio transmitter only ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_G_MODULATOR, vidioc_g_modulator);
+ SET_VALID_IOCTL(ops, VIDIOC_S_MODULATOR, vidioc_s_modulator);
+ }
+ if (is_rx) {
+ /* receiver only ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_G_TUNER, vidioc_g_tuner);
+ SET_VALID_IOCTL(ops, VIDIOC_S_TUNER, vidioc_s_tuner);
+ SET_VALID_IOCTL(ops, VIDIOC_S_HW_FREQ_SEEK, vidioc_s_hw_freq_seek);
+ }
+
+ bitmap_andnot(vdev->valid_ioctls, valid_ioctls, vdev->valid_ioctls,
+ BASE_VIDIOC_PRIVATE);
+}
+
+/**
+ * __video_register_device - register video4linux devices
+ * @vdev: video device structure we want to register
+ * @type: type of device to register
+ * @nr: which device node number (0 == /dev/video0, 1 == /dev/video1, ...
+ * -1 == first free)
+ * @warn_if_nr_in_use: warn if the desired device node number
+ * was already in use and another number was chosen instead.
+ * @owner: module that owns the video device node
+ *
+ * The registration code assigns minor numbers and device node numbers
+ * based on the requested type and registers the new device node with
+ * the kernel.
+ *
+ * This function assumes that struct video_device was zeroed when it
+ * was allocated and does not contain any stale date.
+ *
+ * An error is returned if no free minor or device node number could be
+ * found, or if the registration of the device node failed.
+ *
+ * Zero is returned on success.
+ *
+ * Valid types are
+ *
+ * %VFL_TYPE_GRABBER - A frame grabber
+ *
+ * %VFL_TYPE_VBI - Vertical blank data (undecoded)
+ *
+ * %VFL_TYPE_RADIO - A radio card
+ *
+ * %VFL_TYPE_SUBDEV - A subdevice
+ *
+ * %VFL_TYPE_SDR - Software Defined Radio
+ */
+int __video_register_device(struct video_device *vdev, int type, int nr,
+ int warn_if_nr_in_use, struct module *owner)
+{
+ int i = 0;
+ int ret;
+ int minor_offset = 0;
+ int minor_cnt = VIDEO_NUM_DEVICES;
+ const char *name_base;
+
+ /* A minor value of -1 marks this video device as never
+ having been registered */
+ vdev->minor = -1;
+
+ /* the release callback MUST be present */
+ if (WARN_ON(!vdev->release))
+ return -EINVAL;
+ /* the v4l2_dev pointer MUST be present */
+ if (WARN_ON(!vdev->v4l2_dev))
+ return -EINVAL;
+
+ /* v4l2_fh support */
+ spin_lock_init(&vdev->fh_lock);
+ INIT_LIST_HEAD(&vdev->fh_list);
+
+ /* Part 1: check device type */
+ switch (type) {
+ case VFL_TYPE_GRABBER:
+ name_base = "video";
+ break;
+ case VFL_TYPE_VBI:
+ name_base = "vbi";
+ break;
+ case VFL_TYPE_RADIO:
+ name_base = "radio";
+ break;
+ case VFL_TYPE_SUBDEV:
+ name_base = "v4l-subdev";
+ break;
+ case VFL_TYPE_SDR:
+ /* Use device name 'swradio' because 'sdr' was already taken. */
+ name_base = "swradio";
+ break;
+ default:
+ printk(KERN_ERR "%s called with unknown type: %d\n",
+ __func__, type);
+ return -EINVAL;
+ }
+
+ vdev->vfl_type = type;
+ vdev->cdev = NULL;
+ if (vdev->dev_parent == NULL)
+ vdev->dev_parent = vdev->v4l2_dev->dev;
+ if (vdev->ctrl_handler == NULL)
+ vdev->ctrl_handler = vdev->v4l2_dev->ctrl_handler;
+ /* If the prio state pointer is NULL, then use the v4l2_device
+ prio state. */
+ if (vdev->prio == NULL)
+ vdev->prio = &vdev->v4l2_dev->prio;
+
+ /* Part 2: find a free minor, device node number and device index. */
+#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES
+ /* Keep the ranges for the first four types for historical
+ * reasons.
+ * Newer devices (not yet in place) should use the range
+ * of 128-191 and just pick the first free minor there
+ * (new style). */
+ switch (type) {
+ case VFL_TYPE_GRABBER:
+ minor_offset = 0;
+ minor_cnt = 64;
+ break;
+ case VFL_TYPE_RADIO:
+ minor_offset = 64;
+ minor_cnt = 64;
+ break;
+ case VFL_TYPE_VBI:
+ minor_offset = 224;
+ minor_cnt = 32;
+ break;
+ default:
+ minor_offset = 128;
+ minor_cnt = 64;
+ break;
+ }
+#endif
+
+ /* Pick a device node number */
+ mutex_lock(&videodev_lock);
+ nr = devnode_find(vdev, nr == -1 ? 0 : nr, minor_cnt);
+ if (nr == minor_cnt)
+ nr = devnode_find(vdev, 0, minor_cnt);
+ if (nr == minor_cnt) {
+ printk(KERN_ERR "could not get a free device node number\n");
+ mutex_unlock(&videodev_lock);
+ return -ENFILE;
+ }
+#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES
+ /* 1-on-1 mapping of device node number to minor number */
+ i = nr;
+#else
+ /* The device node number and minor numbers are independent, so
+ we just find the first free minor number. */
+ for (i = 0; i < VIDEO_NUM_DEVICES; i++)
+ if (video_device[i] == NULL)
+ break;
+ if (i == VIDEO_NUM_DEVICES) {
+ mutex_unlock(&videodev_lock);
+ printk(KERN_ERR "could not get a free minor\n");
+ return -ENFILE;
+ }
+#endif
+ vdev->minor = i + minor_offset;
+ vdev->num = nr;
+ devnode_set(vdev);
+
+ /* Should not happen since we thought this minor was free */
+ WARN_ON(video_device[vdev->minor] != NULL);
+ vdev->index = get_index(vdev);
+ video_device[vdev->minor] = vdev;
+ mutex_unlock(&videodev_lock);
+
+ if (vdev->ioctl_ops)
+ determine_valid_ioctls(vdev);
+
+ /* Part 3: Initialize the character device */
+ vdev->cdev = cdev_alloc();
+ if (vdev->cdev == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ vdev->cdev->ops = &v4l2_fops;
+ vdev->cdev->owner = owner;
+ ret = cdev_add(vdev->cdev, MKDEV(VIDEO_MAJOR, vdev->minor), 1);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: cdev_add failed\n", __func__);
+ kfree(vdev->cdev);
+ vdev->cdev = NULL;
+ goto cleanup;
+ }
+
+ /* Part 4: register the device with sysfs */
+ vdev->dev.class = &video_class;
+ vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor);
+ vdev->dev.parent = vdev->dev_parent;
+ dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num);
+ ret = device_register(&vdev->dev);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: device_register failed\n", __func__);
+ goto cleanup;
+ }
+ /* Register the release callback that will be called when the last
+ reference to the device goes away. */
+ vdev->dev.release = v4l2_device_release;
+
+ if (nr != -1 && nr != vdev->num && warn_if_nr_in_use)
+ printk(KERN_WARNING "%s: requested %s%d, got %s\n", __func__,
+ name_base, nr, video_device_node_name(vdev));
+
+ /* Increase v4l2_device refcount */
+ v4l2_device_get(vdev->v4l2_dev);
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ /* Part 5: Register the entity. */
+ if (vdev->v4l2_dev->mdev &&
+ vdev->vfl_type != VFL_TYPE_SUBDEV) {
+ vdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L;
+ vdev->entity.name = vdev->name;
+ vdev->entity.info.dev.major = VIDEO_MAJOR;
+ vdev->entity.info.dev.minor = vdev->minor;
+ ret = media_device_register_entity(vdev->v4l2_dev->mdev,
+ &vdev->entity);
+ if (ret < 0)
+ printk(KERN_WARNING
+ "%s: media_device_register_entity failed\n",
+ __func__);
+ }
+#endif
+ /* Part 6: Activate this minor. The char device can now be used. */
+ set_bit(V4L2_FL_REGISTERED, &vdev->flags);
+
+ return 0;
+
+cleanup:
+ mutex_lock(&videodev_lock);
+ if (vdev->cdev)
+ cdev_del(vdev->cdev);
+ video_device[vdev->minor] = NULL;
+ devnode_clear(vdev);
+ mutex_unlock(&videodev_lock);
+ /* Mark this video device as never having been registered. */
+ vdev->minor = -1;
+ return ret;
+}
+EXPORT_SYMBOL(__video_register_device);
+
+/**
+ * video_unregister_device - unregister a video4linux device
+ * @vdev: the device to unregister
+ *
+ * This unregisters the passed device. Future open calls will
+ * be met with errors.
+ */
+void video_unregister_device(struct video_device *vdev)
+{
+ /* Check if vdev was ever registered at all */
+ if (!vdev || !video_is_registered(vdev))
+ return;
+
+ mutex_lock(&videodev_lock);
+ /* This must be in a critical section to prevent a race with v4l2_open.
+ * Once this bit has been cleared video_get may never be called again.
+ */
+ clear_bit(V4L2_FL_REGISTERED, &vdev->flags);
+ mutex_unlock(&videodev_lock);
+ device_unregister(&vdev->dev);
+}
+EXPORT_SYMBOL(video_unregister_device);
+
+/*
+ * Initialise video for linux
+ */
+static int __init videodev_init(void)
+{
+ dev_t dev = MKDEV(VIDEO_MAJOR, 0);
+ int ret;
+
+ printk(KERN_INFO "Linux video capture interface: v2.00\n");
+ ret = register_chrdev_region(dev, VIDEO_NUM_DEVICES, VIDEO_NAME);
+ if (ret < 0) {
+ printk(KERN_WARNING "videodev: unable to get major %d\n",
+ VIDEO_MAJOR);
+ return ret;
+ }
+
+ ret = class_register(&video_class);
+ if (ret < 0) {
+ unregister_chrdev_region(dev, VIDEO_NUM_DEVICES);
+ printk(KERN_WARNING "video_dev: class_register failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void __exit videodev_exit(void)
+{
+ dev_t dev = MKDEV(VIDEO_MAJOR, 0);
+
+ class_unregister(&video_class);
+ unregister_chrdev_region(dev, VIDEO_NUM_DEVICES);
+}
+
+subsys_initcall(videodev_init);
+module_exit(videodev_exit)
+
+MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_DESCRIPTION("Device registrar for Video4Linux drivers v2");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(VIDEO_MAJOR);
diff --git a/kernel/drivers/media/v4l2-core/v4l2-device.c b/kernel/drivers/media/v4l2-core/v4l2-device.c
new file mode 100644
index 000000000..5b0a30b92
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/v4l2-device.c
@@ -0,0 +1,296 @@
+/*
+ V4L2 device support.
+
+ Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#if defined(CONFIG_SPI)
+#include <linux/spi/spi.h>
+#endif
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+
+int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
+{
+ if (v4l2_dev == NULL)
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&v4l2_dev->subdevs);
+ spin_lock_init(&v4l2_dev->lock);
+ v4l2_prio_init(&v4l2_dev->prio);
+ kref_init(&v4l2_dev->ref);
+ get_device(dev);
+ v4l2_dev->dev = dev;
+ if (dev == NULL) {
+ /* If dev == NULL, then name must be filled in by the caller */
+ if (WARN_ON(!v4l2_dev->name[0]))
+ return -EINVAL;
+ return 0;
+ }
+
+ /* Set name to driver name + device name if it is empty. */
+ if (!v4l2_dev->name[0])
+ snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "%s %s",
+ dev->driver->name, dev_name(dev));
+ if (!dev_get_drvdata(dev))
+ dev_set_drvdata(dev, v4l2_dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_device_register);
+
+static void v4l2_device_release(struct kref *ref)
+{
+ struct v4l2_device *v4l2_dev =
+ container_of(ref, struct v4l2_device, ref);
+
+ if (v4l2_dev->release)
+ v4l2_dev->release(v4l2_dev);
+}
+
+int v4l2_device_put(struct v4l2_device *v4l2_dev)
+{
+ return kref_put(&v4l2_dev->ref, v4l2_device_release);
+}
+EXPORT_SYMBOL_GPL(v4l2_device_put);
+
+int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
+ atomic_t *instance)
+{
+ int num = atomic_inc_return(instance) - 1;
+ int len = strlen(basename);
+
+ if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
+ snprintf(v4l2_dev->name, sizeof(v4l2_dev->name),
+ "%s-%d", basename, num);
+ else
+ snprintf(v4l2_dev->name, sizeof(v4l2_dev->name),
+ "%s%d", basename, num);
+ return num;
+}
+EXPORT_SYMBOL_GPL(v4l2_device_set_name);
+
+void v4l2_device_disconnect(struct v4l2_device *v4l2_dev)
+{
+ if (v4l2_dev->dev == NULL)
+ return;
+
+ if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev)
+ dev_set_drvdata(v4l2_dev->dev, NULL);
+ put_device(v4l2_dev->dev);
+ v4l2_dev->dev = NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_device_disconnect);
+
+void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
+{
+ struct v4l2_subdev *sd, *next;
+
+ /* Just return if v4l2_dev is NULL or if it was already
+ * unregistered before. */
+ if (v4l2_dev == NULL || !v4l2_dev->name[0])
+ return;
+ v4l2_device_disconnect(v4l2_dev);
+
+ /* Unregister subdevs */
+ list_for_each_entry_safe(sd, next, &v4l2_dev->subdevs, list) {
+ v4l2_device_unregister_subdev(sd);
+#if IS_ENABLED(CONFIG_I2C)
+ if (sd->flags & V4L2_SUBDEV_FL_IS_I2C) {
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ /* We need to unregister the i2c client explicitly.
+ We cannot rely on i2c_del_adapter to always
+ unregister clients for us, since if the i2c bus
+ is a platform bus, then it is never deleted. */
+ if (client)
+ i2c_unregister_device(client);
+ continue;
+ }
+#endif
+#if defined(CONFIG_SPI)
+ if (sd->flags & V4L2_SUBDEV_FL_IS_SPI) {
+ struct spi_device *spi = v4l2_get_subdevdata(sd);
+
+ if (spi)
+ spi_unregister_device(spi);
+ continue;
+ }
+#endif
+ }
+ /* Mark as unregistered, thus preventing duplicate unregistrations */
+ v4l2_dev->name[0] = '\0';
+}
+EXPORT_SYMBOL_GPL(v4l2_device_unregister);
+
+int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
+ struct v4l2_subdev *sd)
+{
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ struct media_entity *entity = &sd->entity;
+#endif
+ int err;
+
+ /* Check for valid input */
+ if (v4l2_dev == NULL || sd == NULL || !sd->name[0])
+ return -EINVAL;
+
+ /* Warn if we apparently re-register a subdev */
+ WARN_ON(sd->v4l2_dev != NULL);
+
+ /*
+ * The reason to acquire the module here is to avoid unloading
+ * a module of sub-device which is registered to a media
+ * device. To make it possible to unload modules for media
+ * devices that also register sub-devices, do not
+ * try_module_get() such sub-device owners.
+ */
+ sd->owner_v4l2_dev = v4l2_dev->dev && v4l2_dev->dev->driver &&
+ sd->owner == v4l2_dev->dev->driver->owner;
+
+ if (!sd->owner_v4l2_dev && !try_module_get(sd->owner))
+ return -ENODEV;
+
+ sd->v4l2_dev = v4l2_dev;
+ if (sd->internal_ops && sd->internal_ops->registered) {
+ err = sd->internal_ops->registered(sd);
+ if (err)
+ goto error_module;
+ }
+
+ /* This just returns 0 if either of the two args is NULL */
+ err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler, NULL);
+ if (err)
+ goto error_unregister;
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ /* Register the entity. */
+ if (v4l2_dev->mdev) {
+ err = media_device_register_entity(v4l2_dev->mdev, entity);
+ if (err < 0)
+ goto error_unregister;
+ }
+#endif
+
+ spin_lock(&v4l2_dev->lock);
+ list_add_tail(&sd->list, &v4l2_dev->subdevs);
+ spin_unlock(&v4l2_dev->lock);
+
+ return 0;
+
+error_unregister:
+ if (sd->internal_ops && sd->internal_ops->unregistered)
+ sd->internal_ops->unregistered(sd);
+error_module:
+ if (!sd->owner_v4l2_dev)
+ module_put(sd->owner);
+ sd->v4l2_dev = NULL;
+ return err;
+}
+EXPORT_SYMBOL_GPL(v4l2_device_register_subdev);
+
+static void v4l2_device_release_subdev_node(struct video_device *vdev)
+{
+ struct v4l2_subdev *sd = video_get_drvdata(vdev);
+ sd->devnode = NULL;
+ kfree(vdev);
+}
+
+int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
+{
+ struct video_device *vdev;
+ struct v4l2_subdev *sd;
+ int err;
+
+ /* Register a device node for every subdev marked with the
+ * V4L2_SUBDEV_FL_HAS_DEVNODE flag.
+ */
+ list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
+ if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
+ continue;
+
+ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+ if (!vdev) {
+ err = -ENOMEM;
+ goto clean_up;
+ }
+
+ video_set_drvdata(vdev, sd);
+ strlcpy(vdev->name, sd->name, sizeof(vdev->name));
+ vdev->v4l2_dev = v4l2_dev;
+ vdev->fops = &v4l2_subdev_fops;
+ vdev->release = v4l2_device_release_subdev_node;
+ vdev->ctrl_handler = sd->ctrl_handler;
+ err = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1,
+ sd->owner);
+ if (err < 0) {
+ kfree(vdev);
+ goto clean_up;
+ }
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ sd->entity.info.dev.major = VIDEO_MAJOR;
+ sd->entity.info.dev.minor = vdev->minor;
+#endif
+ sd->devnode = vdev;
+ }
+ return 0;
+
+clean_up:
+ list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
+ if (!sd->devnode)
+ break;
+ video_unregister_device(sd->devnode);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(v4l2_device_register_subdev_nodes);
+
+void v4l2_device_unregister_subdev(struct v4l2_subdev *sd)
+{
+ struct v4l2_device *v4l2_dev;
+
+ /* return if it isn't registered */
+ if (sd == NULL || sd->v4l2_dev == NULL)
+ return;
+
+ v4l2_dev = sd->v4l2_dev;
+
+ spin_lock(&v4l2_dev->lock);
+ list_del(&sd->list);
+ spin_unlock(&v4l2_dev->lock);
+
+ if (sd->internal_ops && sd->internal_ops->unregistered)
+ sd->internal_ops->unregistered(sd);
+ sd->v4l2_dev = NULL;
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ if (v4l2_dev->mdev) {
+ media_entity_remove_links(&sd->entity);
+ media_device_unregister_entity(&sd->entity);
+ }
+#endif
+ video_unregister_device(sd->devnode);
+ if (!sd->owner_v4l2_dev)
+ module_put(sd->owner);
+}
+EXPORT_SYMBOL_GPL(v4l2_device_unregister_subdev);
diff --git a/kernel/drivers/media/v4l2-core/v4l2-dv-timings.c b/kernel/drivers/media/v4l2-core/v4l2-dv-timings.c
new file mode 100644
index 000000000..c0e96382f
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -0,0 +1,632 @@
+/*
+ * v4l2-dv-timings - dv-timings helper functions
+ *
+ * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-dv-timings.h>
+
+MODULE_AUTHOR("Hans Verkuil");
+MODULE_DESCRIPTION("V4L2 DV Timings Helper Functions");
+MODULE_LICENSE("GPL");
+
+const struct v4l2_dv_timings v4l2_dv_timings_presets[] = {
+ V4L2_DV_BT_CEA_640X480P59_94,
+ V4L2_DV_BT_CEA_720X480I59_94,
+ V4L2_DV_BT_CEA_720X480P59_94,
+ V4L2_DV_BT_CEA_720X576I50,
+ V4L2_DV_BT_CEA_720X576P50,
+ V4L2_DV_BT_CEA_1280X720P24,
+ V4L2_DV_BT_CEA_1280X720P25,
+ V4L2_DV_BT_CEA_1280X720P30,
+ V4L2_DV_BT_CEA_1280X720P50,
+ V4L2_DV_BT_CEA_1280X720P60,
+ V4L2_DV_BT_CEA_1920X1080P24,
+ V4L2_DV_BT_CEA_1920X1080P25,
+ V4L2_DV_BT_CEA_1920X1080P30,
+ V4L2_DV_BT_CEA_1920X1080I50,
+ V4L2_DV_BT_CEA_1920X1080P50,
+ V4L2_DV_BT_CEA_1920X1080I60,
+ V4L2_DV_BT_CEA_1920X1080P60,
+ V4L2_DV_BT_DMT_640X350P85,
+ V4L2_DV_BT_DMT_640X400P85,
+ V4L2_DV_BT_DMT_720X400P85,
+ V4L2_DV_BT_DMT_640X480P72,
+ V4L2_DV_BT_DMT_640X480P75,
+ V4L2_DV_BT_DMT_640X480P85,
+ V4L2_DV_BT_DMT_800X600P56,
+ V4L2_DV_BT_DMT_800X600P60,
+ V4L2_DV_BT_DMT_800X600P72,
+ V4L2_DV_BT_DMT_800X600P75,
+ V4L2_DV_BT_DMT_800X600P85,
+ V4L2_DV_BT_DMT_800X600P120_RB,
+ V4L2_DV_BT_DMT_848X480P60,
+ V4L2_DV_BT_DMT_1024X768I43,
+ V4L2_DV_BT_DMT_1024X768P60,
+ V4L2_DV_BT_DMT_1024X768P70,
+ V4L2_DV_BT_DMT_1024X768P75,
+ V4L2_DV_BT_DMT_1024X768P85,
+ V4L2_DV_BT_DMT_1024X768P120_RB,
+ V4L2_DV_BT_DMT_1152X864P75,
+ V4L2_DV_BT_DMT_1280X768P60_RB,
+ V4L2_DV_BT_DMT_1280X768P60,
+ V4L2_DV_BT_DMT_1280X768P75,
+ V4L2_DV_BT_DMT_1280X768P85,
+ V4L2_DV_BT_DMT_1280X768P120_RB,
+ V4L2_DV_BT_DMT_1280X800P60_RB,
+ V4L2_DV_BT_DMT_1280X800P60,
+ V4L2_DV_BT_DMT_1280X800P75,
+ V4L2_DV_BT_DMT_1280X800P85,
+ V4L2_DV_BT_DMT_1280X800P120_RB,
+ V4L2_DV_BT_DMT_1280X960P60,
+ V4L2_DV_BT_DMT_1280X960P85,
+ V4L2_DV_BT_DMT_1280X960P120_RB,
+ V4L2_DV_BT_DMT_1280X1024P60,
+ V4L2_DV_BT_DMT_1280X1024P75,
+ V4L2_DV_BT_DMT_1280X1024P85,
+ V4L2_DV_BT_DMT_1280X1024P120_RB,
+ V4L2_DV_BT_DMT_1360X768P60,
+ V4L2_DV_BT_DMT_1360X768P120_RB,
+ V4L2_DV_BT_DMT_1366X768P60,
+ V4L2_DV_BT_DMT_1366X768P60_RB,
+ V4L2_DV_BT_DMT_1400X1050P60_RB,
+ V4L2_DV_BT_DMT_1400X1050P60,
+ V4L2_DV_BT_DMT_1400X1050P75,
+ V4L2_DV_BT_DMT_1400X1050P85,
+ V4L2_DV_BT_DMT_1400X1050P120_RB,
+ V4L2_DV_BT_DMT_1440X900P60_RB,
+ V4L2_DV_BT_DMT_1440X900P60,
+ V4L2_DV_BT_DMT_1440X900P75,
+ V4L2_DV_BT_DMT_1440X900P85,
+ V4L2_DV_BT_DMT_1440X900P120_RB,
+ V4L2_DV_BT_DMT_1600X900P60_RB,
+ V4L2_DV_BT_DMT_1600X1200P60,
+ V4L2_DV_BT_DMT_1600X1200P65,
+ V4L2_DV_BT_DMT_1600X1200P70,
+ V4L2_DV_BT_DMT_1600X1200P75,
+ V4L2_DV_BT_DMT_1600X1200P85,
+ V4L2_DV_BT_DMT_1600X1200P120_RB,
+ V4L2_DV_BT_DMT_1680X1050P60_RB,
+ V4L2_DV_BT_DMT_1680X1050P60,
+ V4L2_DV_BT_DMT_1680X1050P75,
+ V4L2_DV_BT_DMT_1680X1050P85,
+ V4L2_DV_BT_DMT_1680X1050P120_RB,
+ V4L2_DV_BT_DMT_1792X1344P60,
+ V4L2_DV_BT_DMT_1792X1344P75,
+ V4L2_DV_BT_DMT_1792X1344P120_RB,
+ V4L2_DV_BT_DMT_1856X1392P60,
+ V4L2_DV_BT_DMT_1856X1392P75,
+ V4L2_DV_BT_DMT_1856X1392P120_RB,
+ V4L2_DV_BT_DMT_1920X1200P60_RB,
+ V4L2_DV_BT_DMT_1920X1200P60,
+ V4L2_DV_BT_DMT_1920X1200P75,
+ V4L2_DV_BT_DMT_1920X1200P85,
+ V4L2_DV_BT_DMT_1920X1200P120_RB,
+ V4L2_DV_BT_DMT_1920X1440P60,
+ V4L2_DV_BT_DMT_1920X1440P75,
+ V4L2_DV_BT_DMT_1920X1440P120_RB,
+ V4L2_DV_BT_DMT_2048X1152P60_RB,
+ V4L2_DV_BT_DMT_2560X1600P60_RB,
+ V4L2_DV_BT_DMT_2560X1600P60,
+ V4L2_DV_BT_DMT_2560X1600P75,
+ V4L2_DV_BT_DMT_2560X1600P85,
+ V4L2_DV_BT_DMT_2560X1600P120_RB,
+ V4L2_DV_BT_CEA_3840X2160P24,
+ V4L2_DV_BT_CEA_3840X2160P25,
+ V4L2_DV_BT_CEA_3840X2160P30,
+ V4L2_DV_BT_CEA_3840X2160P50,
+ V4L2_DV_BT_CEA_3840X2160P60,
+ V4L2_DV_BT_CEA_4096X2160P24,
+ V4L2_DV_BT_CEA_4096X2160P25,
+ V4L2_DV_BT_CEA_4096X2160P30,
+ V4L2_DV_BT_CEA_4096X2160P50,
+ V4L2_DV_BT_DMT_4096X2160P59_94_RB,
+ V4L2_DV_BT_CEA_4096X2160P60,
+ { }
+};
+EXPORT_SYMBOL_GPL(v4l2_dv_timings_presets);
+
+bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
+ const struct v4l2_dv_timings_cap *dvcap,
+ v4l2_check_dv_timings_fnc fnc,
+ void *fnc_handle)
+{
+ const struct v4l2_bt_timings *bt = &t->bt;
+ const struct v4l2_bt_timings_cap *cap = &dvcap->bt;
+ u32 caps = cap->capabilities;
+
+ if (t->type != V4L2_DV_BT_656_1120)
+ return false;
+ if (t->type != dvcap->type ||
+ bt->height < cap->min_height ||
+ bt->height > cap->max_height ||
+ bt->width < cap->min_width ||
+ bt->width > cap->max_width ||
+ bt->pixelclock < cap->min_pixelclock ||
+ bt->pixelclock > cap->max_pixelclock ||
+ (cap->standards && bt->standards &&
+ !(bt->standards & cap->standards)) ||
+ (bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) ||
+ (!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE)))
+ return false;
+ return fnc == NULL || fnc(t, fnc_handle);
+}
+EXPORT_SYMBOL_GPL(v4l2_valid_dv_timings);
+
+int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings *t,
+ const struct v4l2_dv_timings_cap *cap,
+ v4l2_check_dv_timings_fnc fnc,
+ void *fnc_handle)
+{
+ u32 i, idx;
+
+ memset(t->reserved, 0, sizeof(t->reserved));
+ for (i = idx = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
+ if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap,
+ fnc, fnc_handle) &&
+ idx++ == t->index) {
+ t->timings = v4l2_dv_timings_presets[i];
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(v4l2_enum_dv_timings_cap);
+
+bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
+ const struct v4l2_dv_timings_cap *cap,
+ unsigned pclock_delta,
+ v4l2_check_dv_timings_fnc fnc,
+ void *fnc_handle)
+{
+ int i;
+
+ if (!v4l2_valid_dv_timings(t, cap, fnc, fnc_handle))
+ return false;
+
+ for (i = 0; i < v4l2_dv_timings_presets[i].bt.width; i++) {
+ if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap,
+ fnc, fnc_handle) &&
+ v4l2_match_dv_timings(t, v4l2_dv_timings_presets + i,
+ pclock_delta)) {
+ *t = v4l2_dv_timings_presets[i];
+ return true;
+ }
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cap);
+
+/**
+ * v4l2_match_dv_timings - check if two timings match
+ * @t1 - compare this v4l2_dv_timings struct...
+ * @t2 - with this struct.
+ * @pclock_delta - the allowed pixelclock deviation.
+ *
+ * Compare t1 with t2 with a given margin of error for the pixelclock.
+ */
+bool v4l2_match_dv_timings(const struct v4l2_dv_timings *t1,
+ const struct v4l2_dv_timings *t2,
+ unsigned pclock_delta)
+{
+ if (t1->type != t2->type || t1->type != V4L2_DV_BT_656_1120)
+ return false;
+ if (t1->bt.width == t2->bt.width &&
+ t1->bt.height == t2->bt.height &&
+ t1->bt.interlaced == t2->bt.interlaced &&
+ t1->bt.polarities == t2->bt.polarities &&
+ t1->bt.pixelclock >= t2->bt.pixelclock - pclock_delta &&
+ t1->bt.pixelclock <= t2->bt.pixelclock + pclock_delta &&
+ t1->bt.hfrontporch == t2->bt.hfrontporch &&
+ t1->bt.vfrontporch == t2->bt.vfrontporch &&
+ t1->bt.vsync == t2->bt.vsync &&
+ t1->bt.vbackporch == t2->bt.vbackporch &&
+ (!t1->bt.interlaced ||
+ (t1->bt.il_vfrontporch == t2->bt.il_vfrontporch &&
+ t1->bt.il_vsync == t2->bt.il_vsync &&
+ t1->bt.il_vbackporch == t2->bt.il_vbackporch)))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL_GPL(v4l2_match_dv_timings);
+
+void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
+ const struct v4l2_dv_timings *t, bool detailed)
+{
+ const struct v4l2_bt_timings *bt = &t->bt;
+ u32 htot, vtot;
+
+ if (t->type != V4L2_DV_BT_656_1120)
+ return;
+
+ htot = V4L2_DV_BT_FRAME_WIDTH(bt);
+ vtot = V4L2_DV_BT_FRAME_HEIGHT(bt);
+
+ if (prefix == NULL)
+ prefix = "";
+
+ pr_info("%s: %s%ux%u%s%u (%ux%u)\n", dev_prefix, prefix,
+ bt->width, bt->height, bt->interlaced ? "i" : "p",
+ (htot * vtot) > 0 ? ((u32)bt->pixelclock / (htot * vtot)) : 0,
+ htot, vtot);
+
+ if (!detailed)
+ return;
+
+ pr_info("%s: horizontal: fp = %u, %ssync = %u, bp = %u\n",
+ dev_prefix, bt->hfrontporch,
+ (bt->polarities & V4L2_DV_HSYNC_POS_POL) ? "+" : "-",
+ bt->hsync, bt->hbackporch);
+ pr_info("%s: vertical: fp = %u, %ssync = %u, bp = %u\n",
+ dev_prefix, bt->vfrontporch,
+ (bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-",
+ bt->vsync, bt->vbackporch);
+ pr_info("%s: pixelclock: %llu\n", dev_prefix, bt->pixelclock);
+ pr_info("%s: flags (0x%x):%s%s%s%s%s\n", dev_prefix, bt->flags,
+ (bt->flags & V4L2_DV_FL_REDUCED_BLANKING) ?
+ " REDUCED_BLANKING" : "",
+ (bt->flags & V4L2_DV_FL_CAN_REDUCE_FPS) ?
+ " CAN_REDUCE_FPS" : "",
+ (bt->flags & V4L2_DV_FL_REDUCED_FPS) ?
+ " REDUCED_FPS" : "",
+ (bt->flags & V4L2_DV_FL_HALF_LINE) ?
+ " HALF_LINE" : "",
+ (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) ?
+ " CE_VIDEO" : "");
+ pr_info("%s: standards (0x%x):%s%s%s%s\n", dev_prefix, bt->standards,
+ (bt->standards & V4L2_DV_BT_STD_CEA861) ? " CEA" : "",
+ (bt->standards & V4L2_DV_BT_STD_DMT) ? " DMT" : "",
+ (bt->standards & V4L2_DV_BT_STD_CVT) ? " CVT" : "",
+ (bt->standards & V4L2_DV_BT_STD_GTF) ? " GTF" : "");
+}
+EXPORT_SYMBOL_GPL(v4l2_print_dv_timings);
+
+/*
+ * CVT defines
+ * Based on Coordinated Video Timings Standard
+ * version 1.1 September 10, 2003
+ */
+
+#define CVT_PXL_CLK_GRAN 250000 /* pixel clock granularity */
+
+/* Normal blanking */
+#define CVT_MIN_V_BPORCH 7 /* lines */
+#define CVT_MIN_V_PORCH_RND 3 /* lines */
+#define CVT_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */
+
+/* Normal blanking for CVT uses GTF to calculate horizontal blanking */
+#define CVT_CELL_GRAN 8 /* character cell granularity */
+#define CVT_M 600 /* blanking formula gradient */
+#define CVT_C 40 /* blanking formula offset */
+#define CVT_K 128 /* blanking formula scaling factor */
+#define CVT_J 20 /* blanking formula scaling factor */
+#define CVT_C_PRIME (((CVT_C - CVT_J) * CVT_K / 256) + CVT_J)
+#define CVT_M_PRIME (CVT_K * CVT_M / 256)
+
+/* Reduced Blanking */
+#define CVT_RB_MIN_V_BPORCH 7 /* lines */
+#define CVT_RB_V_FPORCH 3 /* lines */
+#define CVT_RB_MIN_V_BLANK 460 /* us */
+#define CVT_RB_H_SYNC 32 /* pixels */
+#define CVT_RB_H_BPORCH 80 /* pixels */
+#define CVT_RB_H_BLANK 160 /* pixels */
+
+/** v4l2_detect_cvt - detect if the given timings follow the CVT standard
+ * @frame_height - the total height of the frame (including blanking) in lines.
+ * @hfreq - the horizontal frequency in Hz.
+ * @vsync - the height of the vertical sync in lines.
+ * @polarities - the horizontal and vertical polarities (same as struct
+ * v4l2_bt_timings polarities).
+ * @fmt - the resulting timings.
+ *
+ * This function will attempt to detect if the given values correspond to a
+ * valid CVT format. If so, then it will return true, and fmt will be filled
+ * in with the found CVT timings.
+ *
+ * TODO: VESA defined a new version 2 of their reduced blanking
+ * formula. Support for that is currently missing in this CVT
+ * detection function.
+ */
+bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
+ u32 polarities, struct v4l2_dv_timings *fmt)
+{
+ int v_fp, v_bp, h_fp, h_bp, hsync;
+ int frame_width, image_height, image_width;
+ bool reduced_blanking;
+ unsigned pix_clk;
+
+ if (vsync < 4 || vsync > 7)
+ return false;
+
+ if (polarities == V4L2_DV_VSYNC_POS_POL)
+ reduced_blanking = false;
+ else if (polarities == V4L2_DV_HSYNC_POS_POL)
+ reduced_blanking = true;
+ else
+ return false;
+
+ /* Vertical */
+ if (reduced_blanking) {
+ v_fp = CVT_RB_V_FPORCH;
+ v_bp = (CVT_RB_MIN_V_BLANK * hfreq + 1999999) / 1000000;
+ v_bp -= vsync + v_fp;
+
+ if (v_bp < CVT_RB_MIN_V_BPORCH)
+ v_bp = CVT_RB_MIN_V_BPORCH;
+ } else {
+ v_fp = CVT_MIN_V_PORCH_RND;
+ v_bp = (CVT_MIN_VSYNC_BP * hfreq + 1999999) / 1000000 - vsync;
+
+ if (v_bp < CVT_MIN_V_BPORCH)
+ v_bp = CVT_MIN_V_BPORCH;
+ }
+ image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1;
+
+ /* Aspect ratio based on vsync */
+ switch (vsync) {
+ case 4:
+ image_width = (image_height * 4) / 3;
+ break;
+ case 5:
+ image_width = (image_height * 16) / 9;
+ break;
+ case 6:
+ image_width = (image_height * 16) / 10;
+ break;
+ case 7:
+ /* special case */
+ if (image_height == 1024)
+ image_width = (image_height * 5) / 4;
+ else if (image_height == 768)
+ image_width = (image_height * 15) / 9;
+ else
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ image_width = image_width & ~7;
+
+ /* Horizontal */
+ if (reduced_blanking) {
+ pix_clk = (image_width + CVT_RB_H_BLANK) * hfreq;
+ pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN;
+
+ h_bp = CVT_RB_H_BPORCH;
+ hsync = CVT_RB_H_SYNC;
+ h_fp = CVT_RB_H_BLANK - h_bp - hsync;
+
+ frame_width = image_width + CVT_RB_H_BLANK;
+ } else {
+ unsigned ideal_duty_cycle_per_myriad =
+ 100 * CVT_C_PRIME - (CVT_M_PRIME * 100000) / hfreq;
+ int h_blank;
+
+ if (ideal_duty_cycle_per_myriad < 2000)
+ ideal_duty_cycle_per_myriad = 2000;
+
+ h_blank = image_width * ideal_duty_cycle_per_myriad /
+ (10000 - ideal_duty_cycle_per_myriad);
+ h_blank = (h_blank / (2 * CVT_CELL_GRAN)) * 2 * CVT_CELL_GRAN;
+
+ pix_clk = (image_width + h_blank) * hfreq;
+ pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN;
+
+ h_bp = h_blank / 2;
+ frame_width = image_width + h_blank;
+
+ hsync = (frame_width * 8 + 50) / 100;
+ hsync = hsync - hsync % CVT_CELL_GRAN;
+ h_fp = h_blank - hsync - h_bp;
+ }
+
+ fmt->type = V4L2_DV_BT_656_1120;
+ fmt->bt.polarities = polarities;
+ fmt->bt.width = image_width;
+ fmt->bt.height = image_height;
+ fmt->bt.hfrontporch = h_fp;
+ fmt->bt.vfrontporch = v_fp;
+ fmt->bt.hsync = hsync;
+ fmt->bt.vsync = vsync;
+ fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync;
+ fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync;
+ fmt->bt.pixelclock = pix_clk;
+ fmt->bt.standards = V4L2_DV_BT_STD_CVT;
+ if (reduced_blanking)
+ fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
+ return true;
+}
+EXPORT_SYMBOL_GPL(v4l2_detect_cvt);
+
+/*
+ * GTF defines
+ * Based on Generalized Timing Formula Standard
+ * Version 1.1 September 2, 1999
+ */
+
+#define GTF_PXL_CLK_GRAN 250000 /* pixel clock granularity */
+
+#define GTF_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */
+#define GTF_V_FP 1 /* vertical front porch (lines) */
+#define GTF_CELL_GRAN 8 /* character cell granularity */
+
+/* Default */
+#define GTF_D_M 600 /* blanking formula gradient */
+#define GTF_D_C 40 /* blanking formula offset */
+#define GTF_D_K 128 /* blanking formula scaling factor */
+#define GTF_D_J 20 /* blanking formula scaling factor */
+#define GTF_D_C_PRIME ((((GTF_D_C - GTF_D_J) * GTF_D_K) / 256) + GTF_D_J)
+#define GTF_D_M_PRIME ((GTF_D_K * GTF_D_M) / 256)
+
+/* Secondary */
+#define GTF_S_M 3600 /* blanking formula gradient */
+#define GTF_S_C 40 /* blanking formula offset */
+#define GTF_S_K 128 /* blanking formula scaling factor */
+#define GTF_S_J 35 /* blanking formula scaling factor */
+#define GTF_S_C_PRIME ((((GTF_S_C - GTF_S_J) * GTF_S_K) / 256) + GTF_S_J)
+#define GTF_S_M_PRIME ((GTF_S_K * GTF_S_M) / 256)
+
+/** v4l2_detect_gtf - detect if the given timings follow the GTF standard
+ * @frame_height - the total height of the frame (including blanking) in lines.
+ * @hfreq - the horizontal frequency in Hz.
+ * @vsync - the height of the vertical sync in lines.
+ * @polarities - the horizontal and vertical polarities (same as struct
+ * v4l2_bt_timings polarities).
+ * @aspect - preferred aspect ratio. GTF has no method of determining the
+ * aspect ratio in order to derive the image width from the
+ * image height, so it has to be passed explicitly. Usually
+ * the native screen aspect ratio is used for this. If it
+ * is not filled in correctly, then 16:9 will be assumed.
+ * @fmt - the resulting timings.
+ *
+ * This function will attempt to detect if the given values correspond to a
+ * valid GTF format. If so, then it will return true, and fmt will be filled
+ * in with the found GTF timings.
+ */
+bool v4l2_detect_gtf(unsigned frame_height,
+ unsigned hfreq,
+ unsigned vsync,
+ u32 polarities,
+ struct v4l2_fract aspect,
+ struct v4l2_dv_timings *fmt)
+{
+ int pix_clk;
+ int v_fp, v_bp, h_fp, hsync;
+ int frame_width, image_height, image_width;
+ bool default_gtf;
+ int h_blank;
+
+ if (vsync != 3)
+ return false;
+
+ if (polarities == V4L2_DV_VSYNC_POS_POL)
+ default_gtf = true;
+ else if (polarities == V4L2_DV_HSYNC_POS_POL)
+ default_gtf = false;
+ else
+ return false;
+
+ /* Vertical */
+ v_fp = GTF_V_FP;
+ v_bp = (GTF_MIN_VSYNC_BP * hfreq + 999999) / 1000000 - vsync;
+ image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1;
+
+ if (aspect.numerator == 0 || aspect.denominator == 0) {
+ aspect.numerator = 16;
+ aspect.denominator = 9;
+ }
+ image_width = ((image_height * aspect.numerator) / aspect.denominator);
+ image_width = (image_width + GTF_CELL_GRAN/2) & ~(GTF_CELL_GRAN - 1);
+
+ /* Horizontal */
+ if (default_gtf)
+ h_blank = ((image_width * GTF_D_C_PRIME * hfreq) -
+ (image_width * GTF_D_M_PRIME * 1000) +
+ (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000) / 2) /
+ (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000);
+ else
+ h_blank = ((image_width * GTF_S_C_PRIME * hfreq) -
+ (image_width * GTF_S_M_PRIME * 1000) +
+ (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000) / 2) /
+ (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000);
+
+ h_blank = h_blank - h_blank % (2 * GTF_CELL_GRAN);
+ frame_width = image_width + h_blank;
+
+ pix_clk = (image_width + h_blank) * hfreq;
+ pix_clk = pix_clk / GTF_PXL_CLK_GRAN * GTF_PXL_CLK_GRAN;
+
+ hsync = (frame_width * 8 + 50) / 100;
+ hsync = hsync - hsync % GTF_CELL_GRAN;
+
+ h_fp = h_blank / 2 - hsync;
+
+ fmt->type = V4L2_DV_BT_656_1120;
+ fmt->bt.polarities = polarities;
+ fmt->bt.width = image_width;
+ fmt->bt.height = image_height;
+ fmt->bt.hfrontporch = h_fp;
+ fmt->bt.vfrontporch = v_fp;
+ fmt->bt.hsync = hsync;
+ fmt->bt.vsync = vsync;
+ fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync;
+ fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync;
+ fmt->bt.pixelclock = pix_clk;
+ fmt->bt.standards = V4L2_DV_BT_STD_GTF;
+ if (!default_gtf)
+ fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
+ return true;
+}
+EXPORT_SYMBOL_GPL(v4l2_detect_gtf);
+
+/** v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
+ * 0x15 and 0x16 from the EDID.
+ * @hor_landscape - byte 0x15 from the EDID.
+ * @vert_portrait - byte 0x16 from the EDID.
+ *
+ * Determines the aspect ratio from the EDID.
+ * See VESA Enhanced EDID standard, release A, rev 2, section 3.6.2:
+ * "Horizontal and Vertical Screen Size or Aspect Ratio"
+ */
+struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
+{
+ struct v4l2_fract aspect = { 16, 9 };
+ u32 tmp;
+ u8 ratio;
+
+ /* Nothing filled in, fallback to 16:9 */
+ if (!hor_landscape && !vert_portrait)
+ return aspect;
+ /* Both filled in, so they are interpreted as the screen size in cm */
+ if (hor_landscape && vert_portrait) {
+ aspect.numerator = hor_landscape;
+ aspect.denominator = vert_portrait;
+ return aspect;
+ }
+ /* Only one is filled in, so interpret them as a ratio:
+ (val + 99) / 100 */
+ ratio = hor_landscape | vert_portrait;
+ /* Change some rounded values into the exact aspect ratio */
+ if (ratio == 79) {
+ aspect.numerator = 16;
+ aspect.denominator = 9;
+ } else if (ratio == 34) {
+ aspect.numerator = 4;
+ aspect.denominator = 3;
+ } else if (ratio == 68) {
+ aspect.numerator = 15;
+ aspect.denominator = 9;
+ } else {
+ aspect.numerator = hor_landscape + 99;
+ aspect.denominator = 100;
+ }
+ if (hor_landscape)
+ return aspect;
+ /* The aspect ratio is for portrait, so swap numerator and denominator */
+ tmp = aspect.denominator;
+ aspect.denominator = aspect.numerator;
+ aspect.numerator = tmp;
+ return aspect;
+}
+EXPORT_SYMBOL_GPL(v4l2_calc_aspect_ratio);
diff --git a/kernel/drivers/media/v4l2-core/v4l2-event.c b/kernel/drivers/media/v4l2-core/v4l2-event.c
new file mode 100644
index 000000000..8761aab99
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/v4l2-event.c
@@ -0,0 +1,356 @@
+/*
+ * v4l2-event.c
+ *
+ * V4L2 events.
+ *
+ * Copyright (C) 2009--2010 Nokia Corporation.
+ *
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
+{
+ idx += sev->first;
+ return idx >= sev->elems ? idx - sev->elems : idx;
+}
+
+static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
+{
+ struct v4l2_kevent *kev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+
+ if (list_empty(&fh->available)) {
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+ return -ENOENT;
+ }
+
+ WARN_ON(fh->navailable == 0);
+
+ kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
+ list_del(&kev->list);
+ fh->navailable--;
+
+ kev->event.pending = fh->navailable;
+ *event = kev->event;
+ kev->sev->first = sev_pos(kev->sev, 1);
+ kev->sev->in_use--;
+
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+
+ return 0;
+}
+
+int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
+ int nonblocking)
+{
+ int ret;
+
+ if (nonblocking)
+ return __v4l2_event_dequeue(fh, event);
+
+ /* Release the vdev lock while waiting */
+ if (fh->vdev->lock)
+ mutex_unlock(fh->vdev->lock);
+
+ do {
+ ret = wait_event_interruptible(fh->wait,
+ fh->navailable != 0);
+ if (ret < 0)
+ break;
+
+ ret = __v4l2_event_dequeue(fh, event);
+ } while (ret == -ENOENT);
+
+ if (fh->vdev->lock)
+ mutex_lock(fh->vdev->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
+
+/* Caller must hold fh->vdev->fh_lock! */
+static struct v4l2_subscribed_event *v4l2_event_subscribed(
+ struct v4l2_fh *fh, u32 type, u32 id)
+{
+ struct v4l2_subscribed_event *sev;
+
+ assert_spin_locked(&fh->vdev->fh_lock);
+
+ list_for_each_entry(sev, &fh->subscribed, list)
+ if (sev->type == type && sev->id == id)
+ return sev;
+
+ return NULL;
+}
+
+static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
+ const struct timespec *ts)
+{
+ struct v4l2_subscribed_event *sev;
+ struct v4l2_kevent *kev;
+ bool copy_payload = true;
+
+ /* Are we subscribed? */
+ sev = v4l2_event_subscribed(fh, ev->type, ev->id);
+ if (sev == NULL)
+ return;
+
+ /*
+ * If the event has been added to the fh->subscribed list, but its
+ * add op has not completed yet elems will be 0, treat this as
+ * not being subscribed.
+ */
+ if (!sev->elems)
+ return;
+
+ /* Increase event sequence number on fh. */
+ fh->sequence++;
+
+ /* Do we have any free events? */
+ if (sev->in_use == sev->elems) {
+ /* no, remove the oldest one */
+ kev = sev->events + sev_pos(sev, 0);
+ list_del(&kev->list);
+ sev->in_use--;
+ sev->first = sev_pos(sev, 1);
+ fh->navailable--;
+ if (sev->elems == 1) {
+ if (sev->ops && sev->ops->replace) {
+ sev->ops->replace(&kev->event, ev);
+ copy_payload = false;
+ }
+ } else if (sev->ops && sev->ops->merge) {
+ struct v4l2_kevent *second_oldest =
+ sev->events + sev_pos(sev, 0);
+ sev->ops->merge(&kev->event, &second_oldest->event);
+ }
+ }
+
+ /* Take one and fill it. */
+ kev = sev->events + sev_pos(sev, sev->in_use);
+ kev->event.type = ev->type;
+ if (copy_payload)
+ kev->event.u = ev->u;
+ kev->event.id = ev->id;
+ kev->event.timestamp = *ts;
+ kev->event.sequence = fh->sequence;
+ sev->in_use++;
+ list_add_tail(&kev->list, &fh->available);
+
+ fh->navailable++;
+
+ wake_up_all(&fh->wait);
+}
+
+void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
+{
+ struct v4l2_fh *fh;
+ unsigned long flags;
+ struct timespec timestamp;
+
+ ktime_get_ts(&timestamp);
+
+ spin_lock_irqsave(&vdev->fh_lock, flags);
+
+ list_for_each_entry(fh, &vdev->fh_list, list)
+ __v4l2_event_queue_fh(fh, ev, &timestamp);
+
+ spin_unlock_irqrestore(&vdev->fh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_event_queue);
+
+void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
+{
+ unsigned long flags;
+ struct timespec timestamp;
+
+ ktime_get_ts(&timestamp);
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ __v4l2_event_queue_fh(fh, ev, &timestamp);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
+
+int v4l2_event_pending(struct v4l2_fh *fh)
+{
+ return fh->navailable;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_pending);
+
+int v4l2_event_subscribe(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub, unsigned elems,
+ const struct v4l2_subscribed_event_ops *ops)
+{
+ struct v4l2_subscribed_event *sev, *found_ev;
+ unsigned long flags;
+ unsigned i;
+
+ if (sub->type == V4L2_EVENT_ALL)
+ return -EINVAL;
+
+ if (elems < 1)
+ elems = 1;
+
+ sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
+ if (!sev)
+ return -ENOMEM;
+ for (i = 0; i < elems; i++)
+ sev->events[i].sev = sev;
+ sev->type = sub->type;
+ sev->id = sub->id;
+ sev->flags = sub->flags;
+ sev->fh = fh;
+ sev->ops = ops;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
+ if (!found_ev)
+ list_add(&sev->list, &fh->subscribed);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+
+ if (found_ev) {
+ kfree(sev);
+ return 0; /* Already listening */
+ }
+
+ if (sev->ops && sev->ops->add) {
+ int ret = sev->ops->add(sev, elems);
+ if (ret) {
+ sev->ops = NULL;
+ v4l2_event_unsubscribe(fh, sub);
+ return ret;
+ }
+ }
+
+ /* Mark as ready for use */
+ sev->elems = elems;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
+
+void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
+{
+ struct v4l2_event_subscription sub;
+ struct v4l2_subscribed_event *sev;
+ unsigned long flags;
+
+ do {
+ sev = NULL;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ if (!list_empty(&fh->subscribed)) {
+ sev = list_first_entry(&fh->subscribed,
+ struct v4l2_subscribed_event, list);
+ sub.type = sev->type;
+ sub.id = sev->id;
+ }
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+ if (sev)
+ v4l2_event_unsubscribe(fh, &sub);
+ } while (sev);
+}
+EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
+
+int v4l2_event_unsubscribe(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ struct v4l2_subscribed_event *sev;
+ unsigned long flags;
+ int i;
+
+ if (sub->type == V4L2_EVENT_ALL) {
+ v4l2_event_unsubscribe_all(fh);
+ return 0;
+ }
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+
+ sev = v4l2_event_subscribed(fh, sub->type, sub->id);
+ if (sev != NULL) {
+ /* Remove any pending events for this subscription */
+ for (i = 0; i < sev->in_use; i++) {
+ list_del(&sev->events[sev_pos(sev, i)].list);
+ fh->navailable--;
+ }
+ list_del(&sev->list);
+ }
+
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+
+ if (sev && sev->ops && sev->ops->del)
+ sev->ops->del(sev);
+
+ kfree(sev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
+
+int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_unsubscribe(fh, sub);
+}
+EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
+
+static void v4l2_event_src_replace(struct v4l2_event *old,
+ const struct v4l2_event *new)
+{
+ u32 old_changes = old->u.src_change.changes;
+
+ old->u.src_change = new->u.src_change;
+ old->u.src_change.changes |= old_changes;
+}
+
+static void v4l2_event_src_merge(const struct v4l2_event *old,
+ struct v4l2_event *new)
+{
+ new->u.src_change.changes |= old->u.src_change.changes;
+}
+
+static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
+ .replace = v4l2_event_src_replace,
+ .merge = v4l2_event_src_merge,
+};
+
+int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
+ return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
+
+int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
+{
+ return v4l2_src_change_event_subscribe(fh, sub);
+}
+EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);
diff --git a/kernel/drivers/media/v4l2-core/v4l2-fh.c b/kernel/drivers/media/v4l2-core/v4l2-fh.c
new file mode 100644
index 000000000..c97067a25
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/v4l2-fh.c
@@ -0,0 +1,125 @@
+/*
+ * v4l2-fh.c
+ *
+ * V4L2 file handles.
+ *
+ * Copyright (C) 2009--2010 Nokia Corporation.
+ *
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
+{
+ fh->vdev = vdev;
+ /* Inherit from video_device. May be overridden by the driver. */
+ fh->ctrl_handler = vdev->ctrl_handler;
+ INIT_LIST_HEAD(&fh->list);
+ set_bit(V4L2_FL_USES_V4L2_FH, &fh->vdev->flags);
+ /*
+ * determine_valid_ioctls() does not know if struct v4l2_fh
+ * is used by this driver, but here we do. So enable the
+ * prio ioctls here.
+ */
+ set_bit(_IOC_NR(VIDIOC_G_PRIORITY), vdev->valid_ioctls);
+ set_bit(_IOC_NR(VIDIOC_S_PRIORITY), vdev->valid_ioctls);
+ fh->prio = V4L2_PRIORITY_UNSET;
+ init_waitqueue_head(&fh->wait);
+ INIT_LIST_HEAD(&fh->available);
+ INIT_LIST_HEAD(&fh->subscribed);
+ fh->sequence = -1;
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_init);
+
+void v4l2_fh_add(struct v4l2_fh *fh)
+{
+ unsigned long flags;
+
+ v4l2_prio_open(fh->vdev->prio, &fh->prio);
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ list_add(&fh->list, &fh->vdev->fh_list);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_add);
+
+int v4l2_fh_open(struct file *filp)
+{
+ struct video_device *vdev = video_devdata(filp);
+ struct v4l2_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+
+ filp->private_data = fh;
+ if (fh == NULL)
+ return -ENOMEM;
+ v4l2_fh_init(fh, vdev);
+ v4l2_fh_add(fh);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_open);
+
+void v4l2_fh_del(struct v4l2_fh *fh)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ list_del_init(&fh->list);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+ v4l2_prio_close(fh->vdev->prio, fh->prio);
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_del);
+
+void v4l2_fh_exit(struct v4l2_fh *fh)
+{
+ if (fh->vdev == NULL)
+ return;
+ v4l2_event_unsubscribe_all(fh);
+ fh->vdev = NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_exit);
+
+int v4l2_fh_release(struct file *filp)
+{
+ struct v4l2_fh *fh = filp->private_data;
+
+ if (fh) {
+ v4l2_fh_del(fh);
+ v4l2_fh_exit(fh);
+ kfree(fh);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_release);
+
+int v4l2_fh_is_singular(struct v4l2_fh *fh)
+{
+ unsigned long flags;
+ int is_singular;
+
+ if (fh == NULL || fh->vdev == NULL)
+ return 0;
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ is_singular = list_is_singular(&fh->list);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+ return is_singular;
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_is_singular);
diff --git a/kernel/drivers/media/v4l2-core/v4l2-ioctl.c b/kernel/drivers/media/v4l2-core/v4l2-ioctl.c
new file mode 100644
index 000000000..aa407cb5f
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -0,0 +1,2596 @@
+/*
+ * Video capture interface for Linux version 2
+ *
+ * A generic framework to process V4L2 ioctl commands.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1)
+ * Mauro Carvalho Chehab <mchehab@infradead.org> (version 2)
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+
+#include <linux/videodev2.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-core.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/v4l2.h>
+
+/* Zero out the end of the struct pointed to by p. Everything after, but
+ * not including, the specified field is cleared. */
+#define CLEAR_AFTER_FIELD(p, field) \
+ memset((u8 *)(p) + offsetof(typeof(*(p)), field) + sizeof((p)->field), \
+ 0, sizeof(*(p)) - offsetof(typeof(*(p)), field) - sizeof((p)->field))
+
+#define is_valid_ioctl(vfd, cmd) test_bit(_IOC_NR(cmd), (vfd)->valid_ioctls)
+
+struct std_descr {
+ v4l2_std_id std;
+ const char *descr;
+};
+
+static const struct std_descr standards[] = {
+ { V4L2_STD_NTSC, "NTSC" },
+ { V4L2_STD_NTSC_M, "NTSC-M" },
+ { V4L2_STD_NTSC_M_JP, "NTSC-M-JP" },
+ { V4L2_STD_NTSC_M_KR, "NTSC-M-KR" },
+ { V4L2_STD_NTSC_443, "NTSC-443" },
+ { V4L2_STD_PAL, "PAL" },
+ { V4L2_STD_PAL_BG, "PAL-BG" },
+ { V4L2_STD_PAL_B, "PAL-B" },
+ { V4L2_STD_PAL_B1, "PAL-B1" },
+ { V4L2_STD_PAL_G, "PAL-G" },
+ { V4L2_STD_PAL_H, "PAL-H" },
+ { V4L2_STD_PAL_I, "PAL-I" },
+ { V4L2_STD_PAL_DK, "PAL-DK" },
+ { V4L2_STD_PAL_D, "PAL-D" },
+ { V4L2_STD_PAL_D1, "PAL-D1" },
+ { V4L2_STD_PAL_K, "PAL-K" },
+ { V4L2_STD_PAL_M, "PAL-M" },
+ { V4L2_STD_PAL_N, "PAL-N" },
+ { V4L2_STD_PAL_Nc, "PAL-Nc" },
+ { V4L2_STD_PAL_60, "PAL-60" },
+ { V4L2_STD_SECAM, "SECAM" },
+ { V4L2_STD_SECAM_B, "SECAM-B" },
+ { V4L2_STD_SECAM_G, "SECAM-G" },
+ { V4L2_STD_SECAM_H, "SECAM-H" },
+ { V4L2_STD_SECAM_DK, "SECAM-DK" },
+ { V4L2_STD_SECAM_D, "SECAM-D" },
+ { V4L2_STD_SECAM_K, "SECAM-K" },
+ { V4L2_STD_SECAM_K1, "SECAM-K1" },
+ { V4L2_STD_SECAM_L, "SECAM-L" },
+ { V4L2_STD_SECAM_LC, "SECAM-Lc" },
+ { 0, "Unknown" }
+};
+
+/* video4linux standard ID conversion to standard name
+ */
+const char *v4l2_norm_to_name(v4l2_std_id id)
+{
+ u32 myid = id;
+ int i;
+
+ /* HACK: ppc32 architecture doesn't have __ucmpdi2 function to handle
+ 64 bit comparations. So, on that architecture, with some gcc
+ variants, compilation fails. Currently, the max value is 30bit wide.
+ */
+ BUG_ON(myid != id);
+
+ for (i = 0; standards[i].std; i++)
+ if (myid == standards[i].std)
+ break;
+ return standards[i].descr;
+}
+EXPORT_SYMBOL(v4l2_norm_to_name);
+
+/* Returns frame period for the given standard */
+void v4l2_video_std_frame_period(int id, struct v4l2_fract *frameperiod)
+{
+ if (id & V4L2_STD_525_60) {
+ frameperiod->numerator = 1001;
+ frameperiod->denominator = 30000;
+ } else {
+ frameperiod->numerator = 1;
+ frameperiod->denominator = 25;
+ }
+}
+EXPORT_SYMBOL(v4l2_video_std_frame_period);
+
+/* Fill in the fields of a v4l2_standard structure according to the
+ 'id' and 'transmission' parameters. Returns negative on error. */
+int v4l2_video_std_construct(struct v4l2_standard *vs,
+ int id, const char *name)
+{
+ vs->id = id;
+ v4l2_video_std_frame_period(id, &vs->frameperiod);
+ vs->framelines = (id & V4L2_STD_525_60) ? 525 : 625;
+ strlcpy(vs->name, name, sizeof(vs->name));
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_video_std_construct);
+
+/* ----------------------------------------------------------------- */
+/* some arrays for pretty-printing debug messages of enum types */
+
+const char *v4l2_field_names[] = {
+ [V4L2_FIELD_ANY] = "any",
+ [V4L2_FIELD_NONE] = "none",
+ [V4L2_FIELD_TOP] = "top",
+ [V4L2_FIELD_BOTTOM] = "bottom",
+ [V4L2_FIELD_INTERLACED] = "interlaced",
+ [V4L2_FIELD_SEQ_TB] = "seq-tb",
+ [V4L2_FIELD_SEQ_BT] = "seq-bt",
+ [V4L2_FIELD_ALTERNATE] = "alternate",
+ [V4L2_FIELD_INTERLACED_TB] = "interlaced-tb",
+ [V4L2_FIELD_INTERLACED_BT] = "interlaced-bt",
+};
+EXPORT_SYMBOL(v4l2_field_names);
+
+const char *v4l2_type_names[] = {
+ [V4L2_BUF_TYPE_VIDEO_CAPTURE] = "vid-cap",
+ [V4L2_BUF_TYPE_VIDEO_OVERLAY] = "vid-overlay",
+ [V4L2_BUF_TYPE_VIDEO_OUTPUT] = "vid-out",
+ [V4L2_BUF_TYPE_VBI_CAPTURE] = "vbi-cap",
+ [V4L2_BUF_TYPE_VBI_OUTPUT] = "vbi-out",
+ [V4L2_BUF_TYPE_SLICED_VBI_CAPTURE] = "sliced-vbi-cap",
+ [V4L2_BUF_TYPE_SLICED_VBI_OUTPUT] = "sliced-vbi-out",
+ [V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY] = "vid-out-overlay",
+ [V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE] = "vid-cap-mplane",
+ [V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE] = "vid-out-mplane",
+ [V4L2_BUF_TYPE_SDR_CAPTURE] = "sdr-cap",
+};
+EXPORT_SYMBOL(v4l2_type_names);
+
+static const char *v4l2_memory_names[] = {
+ [V4L2_MEMORY_MMAP] = "mmap",
+ [V4L2_MEMORY_USERPTR] = "userptr",
+ [V4L2_MEMORY_OVERLAY] = "overlay",
+ [V4L2_MEMORY_DMABUF] = "dmabuf",
+};
+
+#define prt_names(a, arr) (((unsigned)(a)) < ARRAY_SIZE(arr) ? arr[a] : "unknown")
+
+/* ------------------------------------------------------------------ */
+/* debug help functions */
+
+static void v4l_print_querycap(const void *arg, bool write_only)
+{
+ const struct v4l2_capability *p = arg;
+
+ pr_cont("driver=%.*s, card=%.*s, bus=%.*s, version=0x%08x, "
+ "capabilities=0x%08x, device_caps=0x%08x\n",
+ (int)sizeof(p->driver), p->driver,
+ (int)sizeof(p->card), p->card,
+ (int)sizeof(p->bus_info), p->bus_info,
+ p->version, p->capabilities, p->device_caps);
+}
+
+static void v4l_print_enuminput(const void *arg, bool write_only)
+{
+ const struct v4l2_input *p = arg;
+
+ pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, tuner=%u, "
+ "std=0x%08Lx, status=0x%x, capabilities=0x%x\n",
+ p->index, (int)sizeof(p->name), p->name, p->type, p->audioset,
+ p->tuner, (unsigned long long)p->std, p->status,
+ p->capabilities);
+}
+
+static void v4l_print_enumoutput(const void *arg, bool write_only)
+{
+ const struct v4l2_output *p = arg;
+
+ pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, "
+ "modulator=%u, std=0x%08Lx, capabilities=0x%x\n",
+ p->index, (int)sizeof(p->name), p->name, p->type, p->audioset,
+ p->modulator, (unsigned long long)p->std, p->capabilities);
+}
+
+static void v4l_print_audio(const void *arg, bool write_only)
+{
+ const struct v4l2_audio *p = arg;
+
+ if (write_only)
+ pr_cont("index=%u, mode=0x%x\n", p->index, p->mode);
+ else
+ pr_cont("index=%u, name=%.*s, capability=0x%x, mode=0x%x\n",
+ p->index, (int)sizeof(p->name), p->name,
+ p->capability, p->mode);
+}
+
+static void v4l_print_audioout(const void *arg, bool write_only)
+{
+ const struct v4l2_audioout *p = arg;
+
+ if (write_only)
+ pr_cont("index=%u\n", p->index);
+ else
+ pr_cont("index=%u, name=%.*s, capability=0x%x, mode=0x%x\n",
+ p->index, (int)sizeof(p->name), p->name,
+ p->capability, p->mode);
+}
+
+static void v4l_print_fmtdesc(const void *arg, bool write_only)
+{
+ const struct v4l2_fmtdesc *p = arg;
+
+ pr_cont("index=%u, type=%s, flags=0x%x, pixelformat=%c%c%c%c, description='%.*s'\n",
+ p->index, prt_names(p->type, v4l2_type_names),
+ p->flags, (p->pixelformat & 0xff),
+ (p->pixelformat >> 8) & 0xff,
+ (p->pixelformat >> 16) & 0xff,
+ (p->pixelformat >> 24) & 0xff,
+ (int)sizeof(p->description), p->description);
+}
+
+static void v4l_print_format(const void *arg, bool write_only)
+{
+ const struct v4l2_format *p = arg;
+ const struct v4l2_pix_format *pix;
+ const struct v4l2_pix_format_mplane *mp;
+ const struct v4l2_vbi_format *vbi;
+ const struct v4l2_sliced_vbi_format *sliced;
+ const struct v4l2_window *win;
+ const struct v4l2_sdr_format *sdr;
+ unsigned i;
+
+ pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
+ switch (p->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ pix = &p->fmt.pix;
+ pr_cont(", width=%u, height=%u, "
+ "pixelformat=%c%c%c%c, field=%s, "
+ "bytesperline=%u, sizeimage=%u, colorspace=%d, "
+ "flags=0x%x, ycbcr_enc=%u, quantization=%u\n",
+ pix->width, pix->height,
+ (pix->pixelformat & 0xff),
+ (pix->pixelformat >> 8) & 0xff,
+ (pix->pixelformat >> 16) & 0xff,
+ (pix->pixelformat >> 24) & 0xff,
+ prt_names(pix->field, v4l2_field_names),
+ pix->bytesperline, pix->sizeimage,
+ pix->colorspace, pix->flags, pix->ycbcr_enc,
+ pix->quantization);
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ mp = &p->fmt.pix_mp;
+ pr_cont(", width=%u, height=%u, "
+ "format=%c%c%c%c, field=%s, "
+ "colorspace=%d, num_planes=%u, flags=0x%x, "
+ "ycbcr_enc=%u, quantization=%u\n",
+ mp->width, mp->height,
+ (mp->pixelformat & 0xff),
+ (mp->pixelformat >> 8) & 0xff,
+ (mp->pixelformat >> 16) & 0xff,
+ (mp->pixelformat >> 24) & 0xff,
+ prt_names(mp->field, v4l2_field_names),
+ mp->colorspace, mp->num_planes, mp->flags,
+ mp->ycbcr_enc, mp->quantization);
+ for (i = 0; i < mp->num_planes; i++)
+ printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i,
+ mp->plane_fmt[i].bytesperline,
+ mp->plane_fmt[i].sizeimage);
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
+ win = &p->fmt.win;
+ /* Note: we can't print the clip list here since the clips
+ * pointer is a userspace pointer, not a kernelspace
+ * pointer. */
+ pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, chromakey=0x%08x, clipcount=%u, clips=%p, bitmap=%p, global_alpha=0x%02x\n",
+ win->w.width, win->w.height, win->w.left, win->w.top,
+ prt_names(win->field, v4l2_field_names),
+ win->chromakey, win->clipcount, win->clips,
+ win->bitmap, win->global_alpha);
+ break;
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ vbi = &p->fmt.vbi;
+ pr_cont(", sampling_rate=%u, offset=%u, samples_per_line=%u, "
+ "sample_format=%c%c%c%c, start=%u,%u, count=%u,%u\n",
+ vbi->sampling_rate, vbi->offset,
+ vbi->samples_per_line,
+ (vbi->sample_format & 0xff),
+ (vbi->sample_format >> 8) & 0xff,
+ (vbi->sample_format >> 16) & 0xff,
+ (vbi->sample_format >> 24) & 0xff,
+ vbi->start[0], vbi->start[1],
+ vbi->count[0], vbi->count[1]);
+ break;
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ sliced = &p->fmt.sliced;
+ pr_cont(", service_set=0x%08x, io_size=%d\n",
+ sliced->service_set, sliced->io_size);
+ for (i = 0; i < 24; i++)
+ printk(KERN_DEBUG "line[%02u]=0x%04x, 0x%04x\n", i,
+ sliced->service_lines[0][i],
+ sliced->service_lines[1][i]);
+ break;
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ sdr = &p->fmt.sdr;
+ pr_cont(", pixelformat=%c%c%c%c\n",
+ (sdr->pixelformat >> 0) & 0xff,
+ (sdr->pixelformat >> 8) & 0xff,
+ (sdr->pixelformat >> 16) & 0xff,
+ (sdr->pixelformat >> 24) & 0xff);
+ break;
+ }
+}
+
+static void v4l_print_framebuffer(const void *arg, bool write_only)
+{
+ const struct v4l2_framebuffer *p = arg;
+
+ pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, "
+ "height=%u, pixelformat=%c%c%c%c, "
+ "bytesperline=%u, sizeimage=%u, colorspace=%d\n",
+ p->capability, p->flags, p->base,
+ p->fmt.width, p->fmt.height,
+ (p->fmt.pixelformat & 0xff),
+ (p->fmt.pixelformat >> 8) & 0xff,
+ (p->fmt.pixelformat >> 16) & 0xff,
+ (p->fmt.pixelformat >> 24) & 0xff,
+ p->fmt.bytesperline, p->fmt.sizeimage,
+ p->fmt.colorspace);
+}
+
+static void v4l_print_buftype(const void *arg, bool write_only)
+{
+ pr_cont("type=%s\n", prt_names(*(u32 *)arg, v4l2_type_names));
+}
+
+static void v4l_print_modulator(const void *arg, bool write_only)
+{
+ const struct v4l2_modulator *p = arg;
+
+ if (write_only)
+ pr_cont("index=%u, txsubchans=0x%x\n", p->index, p->txsubchans);
+ else
+ pr_cont("index=%u, name=%.*s, capability=0x%x, "
+ "rangelow=%u, rangehigh=%u, txsubchans=0x%x\n",
+ p->index, (int)sizeof(p->name), p->name, p->capability,
+ p->rangelow, p->rangehigh, p->txsubchans);
+}
+
+static void v4l_print_tuner(const void *arg, bool write_only)
+{
+ const struct v4l2_tuner *p = arg;
+
+ if (write_only)
+ pr_cont("index=%u, audmode=%u\n", p->index, p->audmode);
+ else
+ pr_cont("index=%u, name=%.*s, type=%u, capability=0x%x, "
+ "rangelow=%u, rangehigh=%u, signal=%u, afc=%d, "
+ "rxsubchans=0x%x, audmode=%u\n",
+ p->index, (int)sizeof(p->name), p->name, p->type,
+ p->capability, p->rangelow,
+ p->rangehigh, p->signal, p->afc,
+ p->rxsubchans, p->audmode);
+}
+
+static void v4l_print_frequency(const void *arg, bool write_only)
+{
+ const struct v4l2_frequency *p = arg;
+
+ pr_cont("tuner=%u, type=%u, frequency=%u\n",
+ p->tuner, p->type, p->frequency);
+}
+
+static void v4l_print_standard(const void *arg, bool write_only)
+{
+ const struct v4l2_standard *p = arg;
+
+ pr_cont("index=%u, id=0x%Lx, name=%.*s, fps=%u/%u, "
+ "framelines=%u\n", p->index,
+ (unsigned long long)p->id, (int)sizeof(p->name), p->name,
+ p->frameperiod.numerator,
+ p->frameperiod.denominator,
+ p->framelines);
+}
+
+static void v4l_print_std(const void *arg, bool write_only)
+{
+ pr_cont("std=0x%08Lx\n", *(const long long unsigned *)arg);
+}
+
+static void v4l_print_hw_freq_seek(const void *arg, bool write_only)
+{
+ const struct v4l2_hw_freq_seek *p = arg;
+
+ pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u, "
+ "rangelow=%u, rangehigh=%u\n",
+ p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing,
+ p->rangelow, p->rangehigh);
+}
+
+static void v4l_print_requestbuffers(const void *arg, bool write_only)
+{
+ const struct v4l2_requestbuffers *p = arg;
+
+ pr_cont("count=%d, type=%s, memory=%s\n",
+ p->count,
+ prt_names(p->type, v4l2_type_names),
+ prt_names(p->memory, v4l2_memory_names));
+}
+
+static void v4l_print_buffer(const void *arg, bool write_only)
+{
+ const struct v4l2_buffer *p = arg;
+ const struct v4l2_timecode *tc = &p->timecode;
+ const struct v4l2_plane *plane;
+ int i;
+
+ pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, "
+ "flags=0x%08x, field=%s, sequence=%d, memory=%s",
+ p->timestamp.tv_sec / 3600,
+ (int)(p->timestamp.tv_sec / 60) % 60,
+ (int)(p->timestamp.tv_sec % 60),
+ (long)p->timestamp.tv_usec,
+ p->index,
+ prt_names(p->type, v4l2_type_names),
+ p->flags, prt_names(p->field, v4l2_field_names),
+ p->sequence, prt_names(p->memory, v4l2_memory_names));
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(p->type) && p->m.planes) {
+ pr_cont("\n");
+ for (i = 0; i < p->length; ++i) {
+ plane = &p->m.planes[i];
+ printk(KERN_DEBUG
+ "plane %d: bytesused=%d, data_offset=0x%08x, "
+ "offset/userptr=0x%lx, length=%d\n",
+ i, plane->bytesused, plane->data_offset,
+ plane->m.userptr, plane->length);
+ }
+ } else {
+ pr_cont(", bytesused=%d, offset/userptr=0x%lx, length=%d\n",
+ p->bytesused, p->m.userptr, p->length);
+ }
+
+ printk(KERN_DEBUG "timecode=%02d:%02d:%02d type=%d, "
+ "flags=0x%08x, frames=%d, userbits=0x%08x\n",
+ tc->hours, tc->minutes, tc->seconds,
+ tc->type, tc->flags, tc->frames, *(__u32 *)tc->userbits);
+}
+
+static void v4l_print_exportbuffer(const void *arg, bool write_only)
+{
+ const struct v4l2_exportbuffer *p = arg;
+
+ pr_cont("fd=%d, type=%s, index=%u, plane=%u, flags=0x%08x\n",
+ p->fd, prt_names(p->type, v4l2_type_names),
+ p->index, p->plane, p->flags);
+}
+
+static void v4l_print_create_buffers(const void *arg, bool write_only)
+{
+ const struct v4l2_create_buffers *p = arg;
+
+ pr_cont("index=%d, count=%d, memory=%s, ",
+ p->index, p->count,
+ prt_names(p->memory, v4l2_memory_names));
+ v4l_print_format(&p->format, write_only);
+}
+
+static void v4l_print_streamparm(const void *arg, bool write_only)
+{
+ const struct v4l2_streamparm *p = arg;
+
+ pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
+
+ if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ const struct v4l2_captureparm *c = &p->parm.capture;
+
+ pr_cont(", capability=0x%x, capturemode=0x%x, timeperframe=%d/%d, "
+ "extendedmode=%d, readbuffers=%d\n",
+ c->capability, c->capturemode,
+ c->timeperframe.numerator, c->timeperframe.denominator,
+ c->extendedmode, c->readbuffers);
+ } else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ const struct v4l2_outputparm *c = &p->parm.output;
+
+ pr_cont(", capability=0x%x, outputmode=0x%x, timeperframe=%d/%d, "
+ "extendedmode=%d, writebuffers=%d\n",
+ c->capability, c->outputmode,
+ c->timeperframe.numerator, c->timeperframe.denominator,
+ c->extendedmode, c->writebuffers);
+ } else {
+ pr_cont("\n");
+ }
+}
+
+static void v4l_print_queryctrl(const void *arg, bool write_only)
+{
+ const struct v4l2_queryctrl *p = arg;
+
+ pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%d/%d, "
+ "step=%d, default=%d, flags=0x%08x\n",
+ p->id, p->type, (int)sizeof(p->name), p->name,
+ p->minimum, p->maximum,
+ p->step, p->default_value, p->flags);
+}
+
+static void v4l_print_query_ext_ctrl(const void *arg, bool write_only)
+{
+ const struct v4l2_query_ext_ctrl *p = arg;
+
+ pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%lld/%lld, "
+ "step=%lld, default=%lld, flags=0x%08x, elem_size=%u, elems=%u, "
+ "nr_of_dims=%u, dims=%u,%u,%u,%u\n",
+ p->id, p->type, (int)sizeof(p->name), p->name,
+ p->minimum, p->maximum,
+ p->step, p->default_value, p->flags,
+ p->elem_size, p->elems, p->nr_of_dims,
+ p->dims[0], p->dims[1], p->dims[2], p->dims[3]);
+}
+
+static void v4l_print_querymenu(const void *arg, bool write_only)
+{
+ const struct v4l2_querymenu *p = arg;
+
+ pr_cont("id=0x%x, index=%d\n", p->id, p->index);
+}
+
+static void v4l_print_control(const void *arg, bool write_only)
+{
+ const struct v4l2_control *p = arg;
+
+ pr_cont("id=0x%x, value=%d\n", p->id, p->value);
+}
+
+static void v4l_print_ext_controls(const void *arg, bool write_only)
+{
+ const struct v4l2_ext_controls *p = arg;
+ int i;
+
+ pr_cont("class=0x%x, count=%d, error_idx=%d",
+ p->ctrl_class, p->count, p->error_idx);
+ for (i = 0; i < p->count; i++) {
+ if (!p->controls[i].size)
+ pr_cont(", id/val=0x%x/0x%x",
+ p->controls[i].id, p->controls[i].value);
+ else
+ pr_cont(", id/size=0x%x/%u",
+ p->controls[i].id, p->controls[i].size);
+ }
+ pr_cont("\n");
+}
+
+static void v4l_print_cropcap(const void *arg, bool write_only)
+{
+ const struct v4l2_cropcap *p = arg;
+
+ pr_cont("type=%s, bounds wxh=%dx%d, x,y=%d,%d, "
+ "defrect wxh=%dx%d, x,y=%d,%d, "
+ "pixelaspect %d/%d\n",
+ prt_names(p->type, v4l2_type_names),
+ p->bounds.width, p->bounds.height,
+ p->bounds.left, p->bounds.top,
+ p->defrect.width, p->defrect.height,
+ p->defrect.left, p->defrect.top,
+ p->pixelaspect.numerator, p->pixelaspect.denominator);
+}
+
+static void v4l_print_crop(const void *arg, bool write_only)
+{
+ const struct v4l2_crop *p = arg;
+
+ pr_cont("type=%s, wxh=%dx%d, x,y=%d,%d\n",
+ prt_names(p->type, v4l2_type_names),
+ p->c.width, p->c.height,
+ p->c.left, p->c.top);
+}
+
+static void v4l_print_selection(const void *arg, bool write_only)
+{
+ const struct v4l2_selection *p = arg;
+
+ pr_cont("type=%s, target=%d, flags=0x%x, wxh=%dx%d, x,y=%d,%d\n",
+ prt_names(p->type, v4l2_type_names),
+ p->target, p->flags,
+ p->r.width, p->r.height, p->r.left, p->r.top);
+}
+
+static void v4l_print_jpegcompression(const void *arg, bool write_only)
+{
+ const struct v4l2_jpegcompression *p = arg;
+
+ pr_cont("quality=%d, APPn=%d, APP_len=%d, "
+ "COM_len=%d, jpeg_markers=0x%x\n",
+ p->quality, p->APPn, p->APP_len,
+ p->COM_len, p->jpeg_markers);
+}
+
+static void v4l_print_enc_idx(const void *arg, bool write_only)
+{
+ const struct v4l2_enc_idx *p = arg;
+
+ pr_cont("entries=%d, entries_cap=%d\n",
+ p->entries, p->entries_cap);
+}
+
+static void v4l_print_encoder_cmd(const void *arg, bool write_only)
+{
+ const struct v4l2_encoder_cmd *p = arg;
+
+ pr_cont("cmd=%d, flags=0x%x\n",
+ p->cmd, p->flags);
+}
+
+static void v4l_print_decoder_cmd(const void *arg, bool write_only)
+{
+ const struct v4l2_decoder_cmd *p = arg;
+
+ pr_cont("cmd=%d, flags=0x%x\n", p->cmd, p->flags);
+
+ if (p->cmd == V4L2_DEC_CMD_START)
+ pr_info("speed=%d, format=%u\n",
+ p->start.speed, p->start.format);
+ else if (p->cmd == V4L2_DEC_CMD_STOP)
+ pr_info("pts=%llu\n", p->stop.pts);
+}
+
+static void v4l_print_dbg_chip_info(const void *arg, bool write_only)
+{
+ const struct v4l2_dbg_chip_info *p = arg;
+
+ pr_cont("type=%u, ", p->match.type);
+ if (p->match.type == V4L2_CHIP_MATCH_I2C_DRIVER)
+ pr_cont("name=%.*s, ",
+ (int)sizeof(p->match.name), p->match.name);
+ else
+ pr_cont("addr=%u, ", p->match.addr);
+ pr_cont("name=%.*s\n", (int)sizeof(p->name), p->name);
+}
+
+static void v4l_print_dbg_register(const void *arg, bool write_only)
+{
+ const struct v4l2_dbg_register *p = arg;
+
+ pr_cont("type=%u, ", p->match.type);
+ if (p->match.type == V4L2_CHIP_MATCH_I2C_DRIVER)
+ pr_cont("name=%.*s, ",
+ (int)sizeof(p->match.name), p->match.name);
+ else
+ pr_cont("addr=%u, ", p->match.addr);
+ pr_cont("reg=0x%llx, val=0x%llx\n",
+ p->reg, p->val);
+}
+
+static void v4l_print_dv_timings(const void *arg, bool write_only)
+{
+ const struct v4l2_dv_timings *p = arg;
+
+ switch (p->type) {
+ case V4L2_DV_BT_656_1120:
+ pr_cont("type=bt-656/1120, interlaced=%u, "
+ "pixelclock=%llu, "
+ "width=%u, height=%u, polarities=0x%x, "
+ "hfrontporch=%u, hsync=%u, "
+ "hbackporch=%u, vfrontporch=%u, "
+ "vsync=%u, vbackporch=%u, "
+ "il_vfrontporch=%u, il_vsync=%u, "
+ "il_vbackporch=%u, standards=0x%x, flags=0x%x\n",
+ p->bt.interlaced, p->bt.pixelclock,
+ p->bt.width, p->bt.height,
+ p->bt.polarities, p->bt.hfrontporch,
+ p->bt.hsync, p->bt.hbackporch,
+ p->bt.vfrontporch, p->bt.vsync,
+ p->bt.vbackporch, p->bt.il_vfrontporch,
+ p->bt.il_vsync, p->bt.il_vbackporch,
+ p->bt.standards, p->bt.flags);
+ break;
+ default:
+ pr_cont("type=%d\n", p->type);
+ break;
+ }
+}
+
+static void v4l_print_enum_dv_timings(const void *arg, bool write_only)
+{
+ const struct v4l2_enum_dv_timings *p = arg;
+
+ pr_cont("index=%u, ", p->index);
+ v4l_print_dv_timings(&p->timings, write_only);
+}
+
+static void v4l_print_dv_timings_cap(const void *arg, bool write_only)
+{
+ const struct v4l2_dv_timings_cap *p = arg;
+
+ switch (p->type) {
+ case V4L2_DV_BT_656_1120:
+ pr_cont("type=bt-656/1120, width=%u-%u, height=%u-%u, "
+ "pixelclock=%llu-%llu, standards=0x%x, capabilities=0x%x\n",
+ p->bt.min_width, p->bt.max_width,
+ p->bt.min_height, p->bt.max_height,
+ p->bt.min_pixelclock, p->bt.max_pixelclock,
+ p->bt.standards, p->bt.capabilities);
+ break;
+ default:
+ pr_cont("type=%u\n", p->type);
+ break;
+ }
+}
+
+static void v4l_print_frmsizeenum(const void *arg, bool write_only)
+{
+ const struct v4l2_frmsizeenum *p = arg;
+
+ pr_cont("index=%u, pixelformat=%c%c%c%c, type=%u",
+ p->index,
+ (p->pixel_format & 0xff),
+ (p->pixel_format >> 8) & 0xff,
+ (p->pixel_format >> 16) & 0xff,
+ (p->pixel_format >> 24) & 0xff,
+ p->type);
+ switch (p->type) {
+ case V4L2_FRMSIZE_TYPE_DISCRETE:
+ pr_cont(", wxh=%ux%u\n",
+ p->discrete.width, p->discrete.height);
+ break;
+ case V4L2_FRMSIZE_TYPE_STEPWISE:
+ pr_cont(", min=%ux%u, max=%ux%u, step=%ux%u\n",
+ p->stepwise.min_width, p->stepwise.min_height,
+ p->stepwise.step_width, p->stepwise.step_height,
+ p->stepwise.max_width, p->stepwise.max_height);
+ break;
+ case V4L2_FRMSIZE_TYPE_CONTINUOUS:
+ /* fall through */
+ default:
+ pr_cont("\n");
+ break;
+ }
+}
+
+static void v4l_print_frmivalenum(const void *arg, bool write_only)
+{
+ const struct v4l2_frmivalenum *p = arg;
+
+ pr_cont("index=%u, pixelformat=%c%c%c%c, wxh=%ux%u, type=%u",
+ p->index,
+ (p->pixel_format & 0xff),
+ (p->pixel_format >> 8) & 0xff,
+ (p->pixel_format >> 16) & 0xff,
+ (p->pixel_format >> 24) & 0xff,
+ p->width, p->height, p->type);
+ switch (p->type) {
+ case V4L2_FRMIVAL_TYPE_DISCRETE:
+ pr_cont(", fps=%d/%d\n",
+ p->discrete.numerator,
+ p->discrete.denominator);
+ break;
+ case V4L2_FRMIVAL_TYPE_STEPWISE:
+ pr_cont(", min=%d/%d, max=%d/%d, step=%d/%d\n",
+ p->stepwise.min.numerator,
+ p->stepwise.min.denominator,
+ p->stepwise.max.numerator,
+ p->stepwise.max.denominator,
+ p->stepwise.step.numerator,
+ p->stepwise.step.denominator);
+ break;
+ case V4L2_FRMIVAL_TYPE_CONTINUOUS:
+ /* fall through */
+ default:
+ pr_cont("\n");
+ break;
+ }
+}
+
+static void v4l_print_event(const void *arg, bool write_only)
+{
+ const struct v4l2_event *p = arg;
+ const struct v4l2_event_ctrl *c;
+
+ pr_cont("type=0x%x, pending=%u, sequence=%u, id=%u, "
+ "timestamp=%lu.%9.9lu\n",
+ p->type, p->pending, p->sequence, p->id,
+ p->timestamp.tv_sec, p->timestamp.tv_nsec);
+ switch (p->type) {
+ case V4L2_EVENT_VSYNC:
+ printk(KERN_DEBUG "field=%s\n",
+ prt_names(p->u.vsync.field, v4l2_field_names));
+ break;
+ case V4L2_EVENT_CTRL:
+ c = &p->u.ctrl;
+ printk(KERN_DEBUG "changes=0x%x, type=%u, ",
+ c->changes, c->type);
+ if (c->type == V4L2_CTRL_TYPE_INTEGER64)
+ pr_cont("value64=%lld, ", c->value64);
+ else
+ pr_cont("value=%d, ", c->value);
+ pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, "
+ "default_value=%d\n",
+ c->flags, c->minimum, c->maximum,
+ c->step, c->default_value);
+ break;
+ case V4L2_EVENT_FRAME_SYNC:
+ pr_cont("frame_sequence=%u\n",
+ p->u.frame_sync.frame_sequence);
+ break;
+ }
+}
+
+static void v4l_print_event_subscription(const void *arg, bool write_only)
+{
+ const struct v4l2_event_subscription *p = arg;
+
+ pr_cont("type=0x%x, id=0x%x, flags=0x%x\n",
+ p->type, p->id, p->flags);
+}
+
+static void v4l_print_sliced_vbi_cap(const void *arg, bool write_only)
+{
+ const struct v4l2_sliced_vbi_cap *p = arg;
+ int i;
+
+ pr_cont("type=%s, service_set=0x%08x\n",
+ prt_names(p->type, v4l2_type_names), p->service_set);
+ for (i = 0; i < 24; i++)
+ printk(KERN_DEBUG "line[%02u]=0x%04x, 0x%04x\n", i,
+ p->service_lines[0][i],
+ p->service_lines[1][i]);
+}
+
+static void v4l_print_freq_band(const void *arg, bool write_only)
+{
+ const struct v4l2_frequency_band *p = arg;
+
+ pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, "
+ "rangelow=%u, rangehigh=%u, modulation=0x%x\n",
+ p->tuner, p->type, p->index,
+ p->capability, p->rangelow,
+ p->rangehigh, p->modulation);
+}
+
+static void v4l_print_edid(const void *arg, bool write_only)
+{
+ const struct v4l2_edid *p = arg;
+
+ pr_cont("pad=%u, start_block=%u, blocks=%u\n",
+ p->pad, p->start_block, p->blocks);
+}
+
+static void v4l_print_u32(const void *arg, bool write_only)
+{
+ pr_cont("value=%u\n", *(const u32 *)arg);
+}
+
+static void v4l_print_newline(const void *arg, bool write_only)
+{
+ pr_cont("\n");
+}
+
+static void v4l_print_default(const void *arg, bool write_only)
+{
+ pr_cont("driver-specific ioctl\n");
+}
+
+static int check_ext_ctrls(struct v4l2_ext_controls *c, int allow_priv)
+{
+ __u32 i;
+
+ /* zero the reserved fields */
+ c->reserved[0] = c->reserved[1] = 0;
+ for (i = 0; i < c->count; i++)
+ c->controls[i].reserved2[0] = 0;
+
+ /* V4L2_CID_PRIVATE_BASE cannot be used as control class
+ when using extended controls.
+ Only when passed in through VIDIOC_G_CTRL and VIDIOC_S_CTRL
+ is it allowed for backwards compatibility.
+ */
+ if (!allow_priv && c->ctrl_class == V4L2_CID_PRIVATE_BASE)
+ return 0;
+ if (c->ctrl_class == 0)
+ return 1;
+ /* Check that all controls are from the same control class. */
+ for (i = 0; i < c->count; i++) {
+ if (V4L2_CTRL_ID2CLASS(c->controls[i].id) != c->ctrl_class) {
+ c->error_idx = i;
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static int check_fmt(struct file *file, enum v4l2_buf_type type)
+{
+ struct video_device *vfd = video_devdata(file);
+ const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
+ bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
+ bool is_vbi = vfd->vfl_type == VFL_TYPE_VBI;
+ bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
+ bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
+ bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
+
+ if (ops == NULL)
+ return -EINVAL;
+
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ if (is_vid && is_rx &&
+ (ops->vidioc_g_fmt_vid_cap || ops->vidioc_g_fmt_vid_cap_mplane))
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (is_vid && is_rx && ops->vidioc_g_fmt_vid_cap_mplane)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ if (is_vid && is_rx && ops->vidioc_g_fmt_vid_overlay)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (is_vid && is_tx &&
+ (ops->vidioc_g_fmt_vid_out || ops->vidioc_g_fmt_vid_out_mplane))
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (is_vid && is_tx && ops->vidioc_g_fmt_vid_out_mplane)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
+ if (is_vid && is_tx && ops->vidioc_g_fmt_vid_out_overlay)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ if (is_vbi && is_rx && ops->vidioc_g_fmt_vbi_cap)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ if (is_vbi && is_tx && ops->vidioc_g_fmt_vbi_out)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ if (is_vbi && is_rx && ops->vidioc_g_fmt_sliced_vbi_cap)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ if (is_vbi && is_tx && ops->vidioc_g_fmt_sliced_vbi_out)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (is_sdr && is_rx && ops->vidioc_g_fmt_sdr_cap)
+ return 0;
+ break;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static void v4l_sanitize_format(struct v4l2_format *fmt)
+{
+ unsigned int offset;
+
+ /*
+ * The v4l2_pix_format structure has been extended with fields that were
+ * not previously required to be set to zero by applications. The priv
+ * field, when set to a magic value, indicates the the extended fields
+ * are valid. Otherwise they will contain undefined values. To simplify
+ * the API towards drivers zero the extended fields and set the priv
+ * field to the magic value when the extended pixel format structure
+ * isn't used by applications.
+ */
+
+ if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ fmt->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return;
+
+ if (fmt->fmt.pix.priv == V4L2_PIX_FMT_PRIV_MAGIC)
+ return;
+
+ fmt->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+
+ offset = offsetof(struct v4l2_pix_format, priv)
+ + sizeof(fmt->fmt.pix.priv);
+ memset(((void *)&fmt->fmt.pix) + offset, 0,
+ sizeof(fmt->fmt.pix) - offset);
+}
+
+static int v4l_querycap(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_capability *cap = (struct v4l2_capability *)arg;
+ int ret;
+
+ cap->version = LINUX_VERSION_CODE;
+
+ ret = ops->vidioc_querycap(file, fh, cap);
+
+ cap->capabilities |= V4L2_CAP_EXT_PIX_FORMAT;
+ /*
+ * Drivers MUST fill in device_caps, so check for this and
+ * warn if it was forgotten.
+ */
+ WARN_ON(!(cap->capabilities & V4L2_CAP_DEVICE_CAPS) ||
+ !cap->device_caps);
+ cap->device_caps |= V4L2_CAP_EXT_PIX_FORMAT;
+
+ return ret;
+}
+
+static int v4l_s_input(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ return ops->vidioc_s_input(file, fh, *(unsigned int *)arg);
+}
+
+static int v4l_s_output(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ return ops->vidioc_s_output(file, fh, *(unsigned int *)arg);
+}
+
+static int v4l_g_priority(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd;
+ u32 *p = arg;
+
+ vfd = video_devdata(file);
+ *p = v4l2_prio_max(vfd->prio);
+ return 0;
+}
+
+static int v4l_s_priority(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd;
+ struct v4l2_fh *vfh;
+ u32 *p = arg;
+
+ vfd = video_devdata(file);
+ if (!test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags))
+ return -ENOTTY;
+ vfh = file->private_data;
+ return v4l2_prio_change(vfd->prio, &vfh->prio, *p);
+}
+
+static int v4l_enuminput(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_input *p = arg;
+
+ /*
+ * We set the flags for CAP_DV_TIMINGS &
+ * CAP_STD here based on ioctl handler provided by the
+ * driver. If the driver doesn't support these
+ * for a specific input, it must override these flags.
+ */
+ if (is_valid_ioctl(vfd, VIDIOC_S_STD))
+ p->capabilities |= V4L2_IN_CAP_STD;
+
+ return ops->vidioc_enum_input(file, fh, p);
+}
+
+static int v4l_enumoutput(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_output *p = arg;
+
+ /*
+ * We set the flags for CAP_DV_TIMINGS &
+ * CAP_STD here based on ioctl handler provided by the
+ * driver. If the driver doesn't support these
+ * for a specific output, it must override these flags.
+ */
+ if (is_valid_ioctl(vfd, VIDIOC_S_STD))
+ p->capabilities |= V4L2_OUT_CAP_STD;
+
+ return ops->vidioc_enum_output(file, fh, p);
+}
+
+static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_fmtdesc *p = arg;
+ struct video_device *vfd = video_devdata(file);
+ bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
+ bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
+ bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
+ bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
+
+ switch (p->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_vid_cap))
+ break;
+ return ops->vidioc_enum_fmt_vid_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_vid_cap_mplane))
+ break;
+ return ops->vidioc_enum_fmt_vid_cap_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_vid_overlay))
+ break;
+ return ops->vidioc_enum_fmt_vid_overlay(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (unlikely(!is_tx || !is_vid || !ops->vidioc_enum_fmt_vid_out))
+ break;
+ return ops->vidioc_enum_fmt_vid_out(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (unlikely(!is_tx || !is_vid || !ops->vidioc_enum_fmt_vid_out_mplane))
+ break;
+ return ops->vidioc_enum_fmt_vid_out_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (unlikely(!is_rx || !is_sdr || !ops->vidioc_enum_fmt_sdr_cap))
+ break;
+ return ops->vidioc_enum_fmt_sdr_cap(file, fh, arg);
+ }
+ return -EINVAL;
+}
+
+static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_format *p = arg;
+ struct video_device *vfd = video_devdata(file);
+ bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
+ bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
+ bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
+ bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
+ int ret;
+
+ /*
+ * fmt can't be cleared for these overlay types due to the 'clips'
+ * 'clipcount' and 'bitmap' pointers in struct v4l2_window.
+ * Those are provided by the user. So handle these two overlay types
+ * first, and then just do a simple memset for the other types.
+ */
+ switch (p->type) {
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: {
+ struct v4l2_clip __user *clips = p->fmt.win.clips;
+ u32 clipcount = p->fmt.win.clipcount;
+ void __user *bitmap = p->fmt.win.bitmap;
+
+ memset(&p->fmt, 0, sizeof(p->fmt));
+ p->fmt.win.clips = clips;
+ p->fmt.win.clipcount = clipcount;
+ p->fmt.win.bitmap = bitmap;
+ break;
+ }
+ default:
+ memset(&p->fmt, 0, sizeof(p->fmt));
+ break;
+ }
+
+ switch (p->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_vid_cap))
+ break;
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg);
+ /* just in case the driver zeroed it again */
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ return ret;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_vid_cap_mplane))
+ break;
+ return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_vid_overlay))
+ break;
+ return ops->vidioc_g_fmt_vid_overlay(file, fh, arg);
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ if (unlikely(!is_rx || is_vid || !ops->vidioc_g_fmt_vbi_cap))
+ break;
+ return ops->vidioc_g_fmt_vbi_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ if (unlikely(!is_rx || is_vid || !ops->vidioc_g_fmt_sliced_vbi_cap))
+ break;
+ return ops->vidioc_g_fmt_sliced_vbi_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out))
+ break;
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ ret = ops->vidioc_g_fmt_vid_out(file, fh, arg);
+ /* just in case the driver zeroed it again */
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ return ret;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out_mplane))
+ break;
+ return ops->vidioc_g_fmt_vid_out_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
+ if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out_overlay))
+ break;
+ return ops->vidioc_g_fmt_vid_out_overlay(file, fh, arg);
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ if (unlikely(!is_tx || is_vid || !ops->vidioc_g_fmt_vbi_out))
+ break;
+ return ops->vidioc_g_fmt_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ if (unlikely(!is_tx || is_vid || !ops->vidioc_g_fmt_sliced_vbi_out))
+ break;
+ return ops->vidioc_g_fmt_sliced_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (unlikely(!is_rx || !is_sdr || !ops->vidioc_g_fmt_sdr_cap))
+ break;
+ return ops->vidioc_g_fmt_sdr_cap(file, fh, arg);
+ }
+ return -EINVAL;
+}
+
+static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_format *p = arg;
+ struct video_device *vfd = video_devdata(file);
+ bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
+ bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
+ bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
+ bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
+ int ret;
+
+ v4l_sanitize_format(p);
+
+ switch (p->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_cap))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.pix);
+ ret = ops->vidioc_s_fmt_vid_cap(file, fh, arg);
+ /* just in case the driver zeroed it again */
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ return ret;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_cap_mplane))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.pix_mp);
+ return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_overlay))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.win);
+ return ops->vidioc_s_fmt_vid_overlay(file, fh, arg);
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ if (unlikely(!is_rx || is_vid || !ops->vidioc_s_fmt_vbi_cap))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.vbi);
+ return ops->vidioc_s_fmt_vbi_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ if (unlikely(!is_rx || is_vid || !ops->vidioc_s_fmt_sliced_vbi_cap))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.sliced);
+ return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.pix);
+ ret = ops->vidioc_s_fmt_vid_out(file, fh, arg);
+ /* just in case the driver zeroed it again */
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ return ret;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out_mplane))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.pix_mp);
+ return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
+ if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out_overlay))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.win);
+ return ops->vidioc_s_fmt_vid_out_overlay(file, fh, arg);
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ if (unlikely(!is_tx || is_vid || !ops->vidioc_s_fmt_vbi_out))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.vbi);
+ return ops->vidioc_s_fmt_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ if (unlikely(!is_tx || is_vid || !ops->vidioc_s_fmt_sliced_vbi_out))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.sliced);
+ return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (unlikely(!is_rx || !is_sdr || !ops->vidioc_s_fmt_sdr_cap))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.sdr);
+ return ops->vidioc_s_fmt_sdr_cap(file, fh, arg);
+ }
+ return -EINVAL;
+}
+
+static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_format *p = arg;
+ struct video_device *vfd = video_devdata(file);
+ bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
+ bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
+ bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
+ bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
+ int ret;
+
+ v4l_sanitize_format(p);
+
+ switch (p->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_cap))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.pix);
+ ret = ops->vidioc_try_fmt_vid_cap(file, fh, arg);
+ /* just in case the driver zeroed it again */
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ return ret;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_cap_mplane))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.pix_mp);
+ return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_overlay))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.win);
+ return ops->vidioc_try_fmt_vid_overlay(file, fh, arg);
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ if (unlikely(!is_rx || is_vid || !ops->vidioc_try_fmt_vbi_cap))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.vbi);
+ return ops->vidioc_try_fmt_vbi_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ if (unlikely(!is_rx || is_vid || !ops->vidioc_try_fmt_sliced_vbi_cap))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.sliced);
+ return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.pix);
+ ret = ops->vidioc_try_fmt_vid_out(file, fh, arg);
+ /* just in case the driver zeroed it again */
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ return ret;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out_mplane))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.pix_mp);
+ return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
+ if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out_overlay))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.win);
+ return ops->vidioc_try_fmt_vid_out_overlay(file, fh, arg);
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ if (unlikely(!is_tx || is_vid || !ops->vidioc_try_fmt_vbi_out))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.vbi);
+ return ops->vidioc_try_fmt_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ if (unlikely(!is_tx || is_vid || !ops->vidioc_try_fmt_sliced_vbi_out))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.sliced);
+ return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (unlikely(!is_rx || !is_sdr || !ops->vidioc_try_fmt_sdr_cap))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.sdr);
+ return ops->vidioc_try_fmt_sdr_cap(file, fh, arg);
+ }
+ return -EINVAL;
+}
+
+static int v4l_streamon(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ return ops->vidioc_streamon(file, fh, *(unsigned int *)arg);
+}
+
+static int v4l_streamoff(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ return ops->vidioc_streamoff(file, fh, *(unsigned int *)arg);
+}
+
+static int v4l_g_tuner(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_tuner *p = arg;
+ int err;
+
+ p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ err = ops->vidioc_g_tuner(file, fh, p);
+ if (!err)
+ p->capability |= V4L2_TUNER_CAP_FREQ_BANDS;
+ return err;
+}
+
+static int v4l_s_tuner(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_tuner *p = arg;
+
+ p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ return ops->vidioc_s_tuner(file, fh, p);
+}
+
+static int v4l_g_modulator(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_modulator *p = arg;
+ int err;
+
+ err = ops->vidioc_g_modulator(file, fh, p);
+ if (!err)
+ p->capability |= V4L2_TUNER_CAP_FREQ_BANDS;
+ return err;
+}
+
+static int v4l_g_frequency(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_frequency *p = arg;
+
+ if (vfd->vfl_type == VFL_TYPE_SDR)
+ p->type = V4L2_TUNER_ADC;
+ else
+ p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ return ops->vidioc_g_frequency(file, fh, p);
+}
+
+static int v4l_s_frequency(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ const struct v4l2_frequency *p = arg;
+ enum v4l2_tuner_type type;
+
+ if (vfd->vfl_type == VFL_TYPE_SDR) {
+ if (p->type != V4L2_TUNER_ADC && p->type != V4L2_TUNER_RF)
+ return -EINVAL;
+ } else {
+ type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ if (type != p->type)
+ return -EINVAL;
+ }
+ return ops->vidioc_s_frequency(file, fh, p);
+}
+
+static int v4l_enumstd(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_standard *p = arg;
+ v4l2_std_id id = vfd->tvnorms, curr_id = 0;
+ unsigned int index = p->index, i, j = 0;
+ const char *descr = "";
+
+ /* Return -ENODATA if the tvnorms for the current input
+ or output is 0, meaning that it doesn't support this API. */
+ if (id == 0)
+ return -ENODATA;
+
+ /* Return norm array in a canonical way */
+ for (i = 0; i <= index && id; i++) {
+ /* last std value in the standards array is 0, so this
+ while always ends there since (id & 0) == 0. */
+ while ((id & standards[j].std) != standards[j].std)
+ j++;
+ curr_id = standards[j].std;
+ descr = standards[j].descr;
+ j++;
+ if (curr_id == 0)
+ break;
+ if (curr_id != V4L2_STD_PAL &&
+ curr_id != V4L2_STD_SECAM &&
+ curr_id != V4L2_STD_NTSC)
+ id &= ~curr_id;
+ }
+ if (i <= index)
+ return -EINVAL;
+
+ v4l2_video_std_construct(p, curr_id, descr);
+ return 0;
+}
+
+static int v4l_s_std(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ v4l2_std_id id = *(v4l2_std_id *)arg, norm;
+
+ norm = id & vfd->tvnorms;
+ if (vfd->tvnorms && !norm) /* Check if std is supported */
+ return -EINVAL;
+
+ /* Calls the specific handler */
+ return ops->vidioc_s_std(file, fh, norm);
+}
+
+static int v4l_querystd(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ v4l2_std_id *p = arg;
+
+ /*
+ * If no signal is detected, then the driver should return
+ * V4L2_STD_UNKNOWN. Otherwise it should return tvnorms with
+ * any standards that do not apply removed.
+ *
+ * This means that tuners, audio and video decoders can join
+ * their efforts to improve the standards detection.
+ */
+ *p = vfd->tvnorms;
+ return ops->vidioc_querystd(file, fh, arg);
+}
+
+static int v4l_s_hw_freq_seek(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_hw_freq_seek *p = arg;
+ enum v4l2_tuner_type type;
+
+ /* s_hw_freq_seek is not supported for SDR for now */
+ if (vfd->vfl_type == VFL_TYPE_SDR)
+ return -EINVAL;
+
+ type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ if (p->type != type)
+ return -EINVAL;
+ return ops->vidioc_s_hw_freq_seek(file, fh, p);
+}
+
+static int v4l_overlay(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ return ops->vidioc_overlay(file, fh, *(unsigned int *)arg);
+}
+
+static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_requestbuffers *p = arg;
+ int ret = check_fmt(file, p->type);
+
+ if (ret)
+ return ret;
+
+ CLEAR_AFTER_FIELD(p, memory);
+
+ return ops->vidioc_reqbufs(file, fh, p);
+}
+
+static int v4l_querybuf(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_buffer *p = arg;
+ int ret = check_fmt(file, p->type);
+
+ return ret ? ret : ops->vidioc_querybuf(file, fh, p);
+}
+
+static int v4l_qbuf(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_buffer *p = arg;
+ int ret = check_fmt(file, p->type);
+
+ return ret ? ret : ops->vidioc_qbuf(file, fh, p);
+}
+
+static int v4l_dqbuf(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_buffer *p = arg;
+ int ret = check_fmt(file, p->type);
+
+ return ret ? ret : ops->vidioc_dqbuf(file, fh, p);
+}
+
+static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_create_buffers *create = arg;
+ int ret = check_fmt(file, create->format.type);
+
+ if (ret)
+ return ret;
+
+ v4l_sanitize_format(&create->format);
+
+ ret = ops->vidioc_create_bufs(file, fh, create);
+
+ if (create->format.type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ create->format.type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ create->format.fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+
+ return ret;
+}
+
+static int v4l_prepare_buf(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_buffer *b = arg;
+ int ret = check_fmt(file, b->type);
+
+ return ret ? ret : ops->vidioc_prepare_buf(file, fh, b);
+}
+
+static int v4l_g_parm(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_streamparm *p = arg;
+ v4l2_std_id std;
+ int ret = check_fmt(file, p->type);
+
+ if (ret)
+ return ret;
+ if (ops->vidioc_g_parm)
+ return ops->vidioc_g_parm(file, fh, p);
+ if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+ p->parm.capture.readbuffers = 2;
+ ret = ops->vidioc_g_std(file, fh, &std);
+ if (ret == 0)
+ v4l2_video_std_frame_period(std, &p->parm.capture.timeperframe);
+ return ret;
+}
+
+static int v4l_s_parm(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_streamparm *p = arg;
+ int ret = check_fmt(file, p->type);
+
+ return ret ? ret : ops->vidioc_s_parm(file, fh, p);
+}
+
+static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_queryctrl *p = arg;
+ struct v4l2_fh *vfh =
+ test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_queryctrl(vfh->ctrl_handler, p);
+ if (vfd->ctrl_handler)
+ return v4l2_queryctrl(vfd->ctrl_handler, p);
+ if (ops->vidioc_queryctrl)
+ return ops->vidioc_queryctrl(file, fh, p);
+ return -ENOTTY;
+}
+
+static int v4l_query_ext_ctrl(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_query_ext_ctrl *p = arg;
+ struct v4l2_fh *vfh =
+ test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_query_ext_ctrl(vfh->ctrl_handler, p);
+ if (vfd->ctrl_handler)
+ return v4l2_query_ext_ctrl(vfd->ctrl_handler, p);
+ if (ops->vidioc_query_ext_ctrl)
+ return ops->vidioc_query_ext_ctrl(file, fh, p);
+ return -ENOTTY;
+}
+
+static int v4l_querymenu(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_querymenu *p = arg;
+ struct v4l2_fh *vfh =
+ test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_querymenu(vfh->ctrl_handler, p);
+ if (vfd->ctrl_handler)
+ return v4l2_querymenu(vfd->ctrl_handler, p);
+ if (ops->vidioc_querymenu)
+ return ops->vidioc_querymenu(file, fh, p);
+ return -ENOTTY;
+}
+
+static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_control *p = arg;
+ struct v4l2_fh *vfh =
+ test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_ext_controls ctrls;
+ struct v4l2_ext_control ctrl;
+
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_g_ctrl(vfh->ctrl_handler, p);
+ if (vfd->ctrl_handler)
+ return v4l2_g_ctrl(vfd->ctrl_handler, p);
+ if (ops->vidioc_g_ctrl)
+ return ops->vidioc_g_ctrl(file, fh, p);
+ if (ops->vidioc_g_ext_ctrls == NULL)
+ return -ENOTTY;
+
+ ctrls.ctrl_class = V4L2_CTRL_ID2CLASS(p->id);
+ ctrls.count = 1;
+ ctrls.controls = &ctrl;
+ ctrl.id = p->id;
+ ctrl.value = p->value;
+ if (check_ext_ctrls(&ctrls, 1)) {
+ int ret = ops->vidioc_g_ext_ctrls(file, fh, &ctrls);
+
+ if (ret == 0)
+ p->value = ctrl.value;
+ return ret;
+ }
+ return -EINVAL;
+}
+
+static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_control *p = arg;
+ struct v4l2_fh *vfh =
+ test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_ext_controls ctrls;
+ struct v4l2_ext_control ctrl;
+
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_s_ctrl(vfh, vfh->ctrl_handler, p);
+ if (vfd->ctrl_handler)
+ return v4l2_s_ctrl(NULL, vfd->ctrl_handler, p);
+ if (ops->vidioc_s_ctrl)
+ return ops->vidioc_s_ctrl(file, fh, p);
+ if (ops->vidioc_s_ext_ctrls == NULL)
+ return -ENOTTY;
+
+ ctrls.ctrl_class = V4L2_CTRL_ID2CLASS(p->id);
+ ctrls.count = 1;
+ ctrls.controls = &ctrl;
+ ctrl.id = p->id;
+ ctrl.value = p->value;
+ if (check_ext_ctrls(&ctrls, 1))
+ return ops->vidioc_s_ext_ctrls(file, fh, &ctrls);
+ return -EINVAL;
+}
+
+static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_ext_controls *p = arg;
+ struct v4l2_fh *vfh =
+ test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+
+ p->error_idx = p->count;
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_g_ext_ctrls(vfh->ctrl_handler, p);
+ if (vfd->ctrl_handler)
+ return v4l2_g_ext_ctrls(vfd->ctrl_handler, p);
+ if (ops->vidioc_g_ext_ctrls == NULL)
+ return -ENOTTY;
+ return check_ext_ctrls(p, 0) ? ops->vidioc_g_ext_ctrls(file, fh, p) :
+ -EINVAL;
+}
+
+static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_ext_controls *p = arg;
+ struct v4l2_fh *vfh =
+ test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+
+ p->error_idx = p->count;
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, p);
+ if (vfd->ctrl_handler)
+ return v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, p);
+ if (ops->vidioc_s_ext_ctrls == NULL)
+ return -ENOTTY;
+ return check_ext_ctrls(p, 0) ? ops->vidioc_s_ext_ctrls(file, fh, p) :
+ -EINVAL;
+}
+
+static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_ext_controls *p = arg;
+ struct v4l2_fh *vfh =
+ test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+
+ p->error_idx = p->count;
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_try_ext_ctrls(vfh->ctrl_handler, p);
+ if (vfd->ctrl_handler)
+ return v4l2_try_ext_ctrls(vfd->ctrl_handler, p);
+ if (ops->vidioc_try_ext_ctrls == NULL)
+ return -ENOTTY;
+ return check_ext_ctrls(p, 0) ? ops->vidioc_try_ext_ctrls(file, fh, p) :
+ -EINVAL;
+}
+
+static int v4l_g_crop(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_crop *p = arg;
+ struct v4l2_selection s = {
+ .type = p->type,
+ };
+ int ret;
+
+ if (ops->vidioc_g_crop)
+ return ops->vidioc_g_crop(file, fh, p);
+ /* simulate capture crop using selection api */
+
+ /* crop means compose for output devices */
+ if (V4L2_TYPE_IS_OUTPUT(p->type))
+ s.target = V4L2_SEL_TGT_COMPOSE_ACTIVE;
+ else
+ s.target = V4L2_SEL_TGT_CROP_ACTIVE;
+
+ ret = ops->vidioc_g_selection(file, fh, &s);
+
+ /* copying results to old structure on success */
+ if (!ret)
+ p->c = s.r;
+ return ret;
+}
+
+static int v4l_s_crop(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_crop *p = arg;
+ struct v4l2_selection s = {
+ .type = p->type,
+ .r = p->c,
+ };
+
+ if (ops->vidioc_s_crop)
+ return ops->vidioc_s_crop(file, fh, p);
+ /* simulate capture crop using selection api */
+
+ /* crop means compose for output devices */
+ if (V4L2_TYPE_IS_OUTPUT(p->type))
+ s.target = V4L2_SEL_TGT_COMPOSE_ACTIVE;
+ else
+ s.target = V4L2_SEL_TGT_CROP_ACTIVE;
+
+ return ops->vidioc_s_selection(file, fh, &s);
+}
+
+static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_cropcap *p = arg;
+
+ if (ops->vidioc_g_selection) {
+ struct v4l2_selection s = { .type = p->type };
+ int ret;
+
+ /* obtaining bounds */
+ if (V4L2_TYPE_IS_OUTPUT(p->type))
+ s.target = V4L2_SEL_TGT_COMPOSE_BOUNDS;
+ else
+ s.target = V4L2_SEL_TGT_CROP_BOUNDS;
+
+ ret = ops->vidioc_g_selection(file, fh, &s);
+ if (ret)
+ return ret;
+ p->bounds = s.r;
+
+ /* obtaining defrect */
+ if (V4L2_TYPE_IS_OUTPUT(p->type))
+ s.target = V4L2_SEL_TGT_COMPOSE_DEFAULT;
+ else
+ s.target = V4L2_SEL_TGT_CROP_DEFAULT;
+
+ ret = ops->vidioc_g_selection(file, fh, &s);
+ if (ret)
+ return ret;
+ p->defrect = s.r;
+ }
+
+ /* setting trivial pixelaspect */
+ p->pixelaspect.numerator = 1;
+ p->pixelaspect.denominator = 1;
+
+ if (ops->vidioc_cropcap)
+ return ops->vidioc_cropcap(file, fh, p);
+
+ return 0;
+}
+
+static int v4l_log_status(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ int ret;
+
+ if (vfd->v4l2_dev)
+ pr_info("%s: ================= START STATUS =================\n",
+ vfd->v4l2_dev->name);
+ ret = ops->vidioc_log_status(file, fh);
+ if (vfd->v4l2_dev)
+ pr_info("%s: ================== END STATUS ==================\n",
+ vfd->v4l2_dev->name);
+ return ret;
+}
+
+static int v4l_dbg_g_register(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ struct v4l2_dbg_register *p = arg;
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_subdev *sd;
+ int idx = 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (p->match.type == V4L2_CHIP_MATCH_SUBDEV) {
+ if (vfd->v4l2_dev == NULL)
+ return -EINVAL;
+ v4l2_device_for_each_subdev(sd, vfd->v4l2_dev)
+ if (p->match.addr == idx++)
+ return v4l2_subdev_call(sd, core, g_register, p);
+ return -EINVAL;
+ }
+ if (ops->vidioc_g_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE &&
+ (ops->vidioc_g_chip_info || p->match.addr == 0))
+ return ops->vidioc_g_register(file, fh, p);
+ return -EINVAL;
+#else
+ return -ENOTTY;
+#endif
+}
+
+static int v4l_dbg_s_register(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ const struct v4l2_dbg_register *p = arg;
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_subdev *sd;
+ int idx = 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (p->match.type == V4L2_CHIP_MATCH_SUBDEV) {
+ if (vfd->v4l2_dev == NULL)
+ return -EINVAL;
+ v4l2_device_for_each_subdev(sd, vfd->v4l2_dev)
+ if (p->match.addr == idx++)
+ return v4l2_subdev_call(sd, core, s_register, p);
+ return -EINVAL;
+ }
+ if (ops->vidioc_s_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE &&
+ (ops->vidioc_g_chip_info || p->match.addr == 0))
+ return ops->vidioc_s_register(file, fh, p);
+ return -EINVAL;
+#else
+ return -ENOTTY;
+#endif
+}
+
+static int v4l_dbg_g_chip_info(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_dbg_chip_info *p = arg;
+ struct v4l2_subdev *sd;
+ int idx = 0;
+
+ switch (p->match.type) {
+ case V4L2_CHIP_MATCH_BRIDGE:
+ if (ops->vidioc_s_register)
+ p->flags |= V4L2_CHIP_FL_WRITABLE;
+ if (ops->vidioc_g_register)
+ p->flags |= V4L2_CHIP_FL_READABLE;
+ strlcpy(p->name, vfd->v4l2_dev->name, sizeof(p->name));
+ if (ops->vidioc_g_chip_info)
+ return ops->vidioc_g_chip_info(file, fh, arg);
+ if (p->match.addr)
+ return -EINVAL;
+ return 0;
+
+ case V4L2_CHIP_MATCH_SUBDEV:
+ if (vfd->v4l2_dev == NULL)
+ break;
+ v4l2_device_for_each_subdev(sd, vfd->v4l2_dev) {
+ if (p->match.addr != idx++)
+ continue;
+ if (sd->ops->core && sd->ops->core->s_register)
+ p->flags |= V4L2_CHIP_FL_WRITABLE;
+ if (sd->ops->core && sd->ops->core->g_register)
+ p->flags |= V4L2_CHIP_FL_READABLE;
+ strlcpy(p->name, sd->name, sizeof(p->name));
+ return 0;
+ }
+ break;
+ }
+ return -EINVAL;
+#else
+ return -ENOTTY;
+#endif
+}
+
+static int v4l_dqevent(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ return v4l2_event_dequeue(fh, arg, file->f_flags & O_NONBLOCK);
+}
+
+static int v4l_subscribe_event(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ return ops->vidioc_subscribe_event(fh, arg);
+}
+
+static int v4l_unsubscribe_event(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ return ops->vidioc_unsubscribe_event(fh, arg);
+}
+
+static int v4l_g_sliced_vbi_cap(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_sliced_vbi_cap *p = arg;
+ int ret = check_fmt(file, p->type);
+
+ if (ret)
+ return ret;
+
+ /* Clear up to type, everything after type is zeroed already */
+ memset(p, 0, offsetof(struct v4l2_sliced_vbi_cap, type));
+
+ return ops->vidioc_g_sliced_vbi_cap(file, fh, p);
+}
+
+static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_frequency_band *p = arg;
+ enum v4l2_tuner_type type;
+ int err;
+
+ if (vfd->vfl_type == VFL_TYPE_SDR) {
+ if (p->type != V4L2_TUNER_ADC && p->type != V4L2_TUNER_RF)
+ return -EINVAL;
+ type = p->type;
+ } else {
+ type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ if (type != p->type)
+ return -EINVAL;
+ }
+ if (ops->vidioc_enum_freq_bands) {
+ err = ops->vidioc_enum_freq_bands(file, fh, p);
+ if (err != -ENOTTY)
+ return err;
+ }
+ if (is_valid_ioctl(vfd, VIDIOC_G_TUNER)) {
+ struct v4l2_tuner t = {
+ .index = p->tuner,
+ .type = type,
+ };
+
+ if (p->index)
+ return -EINVAL;
+ err = ops->vidioc_g_tuner(file, fh, &t);
+ if (err)
+ return err;
+ p->capability = t.capability | V4L2_TUNER_CAP_FREQ_BANDS;
+ p->rangelow = t.rangelow;
+ p->rangehigh = t.rangehigh;
+ p->modulation = (type == V4L2_TUNER_RADIO) ?
+ V4L2_BAND_MODULATION_FM : V4L2_BAND_MODULATION_VSB;
+ return 0;
+ }
+ if (is_valid_ioctl(vfd, VIDIOC_G_MODULATOR)) {
+ struct v4l2_modulator m = {
+ .index = p->tuner,
+ };
+
+ if (type != V4L2_TUNER_RADIO)
+ return -EINVAL;
+ if (p->index)
+ return -EINVAL;
+ err = ops->vidioc_g_modulator(file, fh, &m);
+ if (err)
+ return err;
+ p->capability = m.capability | V4L2_TUNER_CAP_FREQ_BANDS;
+ p->rangelow = m.rangelow;
+ p->rangehigh = m.rangehigh;
+ p->modulation = (type == V4L2_TUNER_RADIO) ?
+ V4L2_BAND_MODULATION_FM : V4L2_BAND_MODULATION_VSB;
+ return 0;
+ }
+ return -ENOTTY;
+}
+
+struct v4l2_ioctl_info {
+ unsigned int ioctl;
+ u32 flags;
+ const char * const name;
+ union {
+ u32 offset;
+ int (*func)(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *p);
+ } u;
+ void (*debug)(const void *arg, bool write_only);
+};
+
+/* This control needs a priority check */
+#define INFO_FL_PRIO (1 << 0)
+/* This control can be valid if the filehandle passes a control handler. */
+#define INFO_FL_CTRL (1 << 1)
+/* This is a standard ioctl, no need for special code */
+#define INFO_FL_STD (1 << 2)
+/* This is ioctl has its own function */
+#define INFO_FL_FUNC (1 << 3)
+/* Queuing ioctl */
+#define INFO_FL_QUEUE (1 << 4)
+/* Zero struct from after the field to the end */
+#define INFO_FL_CLEAR(v4l2_struct, field) \
+ ((offsetof(struct v4l2_struct, field) + \
+ sizeof(((struct v4l2_struct *)0)->field)) << 16)
+#define INFO_FL_CLEAR_MASK (_IOC_SIZEMASK << 16)
+
+#define IOCTL_INFO_STD(_ioctl, _vidioc, _debug, _flags) \
+ [_IOC_NR(_ioctl)] = { \
+ .ioctl = _ioctl, \
+ .flags = _flags | INFO_FL_STD, \
+ .name = #_ioctl, \
+ .u.offset = offsetof(struct v4l2_ioctl_ops, _vidioc), \
+ .debug = _debug, \
+ }
+
+#define IOCTL_INFO_FNC(_ioctl, _func, _debug, _flags) \
+ [_IOC_NR(_ioctl)] = { \
+ .ioctl = _ioctl, \
+ .flags = _flags | INFO_FL_FUNC, \
+ .name = #_ioctl, \
+ .u.func = _func, \
+ .debug = _debug, \
+ }
+
+static struct v4l2_ioctl_info v4l2_ioctls[] = {
+ IOCTL_INFO_FNC(VIDIOC_QUERYCAP, v4l_querycap, v4l_print_querycap, 0),
+ IOCTL_INFO_FNC(VIDIOC_ENUM_FMT, v4l_enum_fmt, v4l_print_fmtdesc, INFO_FL_CLEAR(v4l2_fmtdesc, type)),
+ IOCTL_INFO_FNC(VIDIOC_G_FMT, v4l_g_fmt, v4l_print_format, 0),
+ IOCTL_INFO_FNC(VIDIOC_S_FMT, v4l_s_fmt, v4l_print_format, INFO_FL_PRIO),
+ IOCTL_INFO_FNC(VIDIOC_REQBUFS, v4l_reqbufs, v4l_print_requestbuffers, INFO_FL_PRIO | INFO_FL_QUEUE),
+ IOCTL_INFO_FNC(VIDIOC_QUERYBUF, v4l_querybuf, v4l_print_buffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_buffer, length)),
+ IOCTL_INFO_STD(VIDIOC_G_FBUF, vidioc_g_fbuf, v4l_print_framebuffer, 0),
+ IOCTL_INFO_STD(VIDIOC_S_FBUF, vidioc_s_fbuf, v4l_print_framebuffer, INFO_FL_PRIO),
+ IOCTL_INFO_FNC(VIDIOC_OVERLAY, v4l_overlay, v4l_print_u32, INFO_FL_PRIO),
+ IOCTL_INFO_FNC(VIDIOC_QBUF, v4l_qbuf, v4l_print_buffer, INFO_FL_QUEUE),
+ IOCTL_INFO_STD(VIDIOC_EXPBUF, vidioc_expbuf, v4l_print_exportbuffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_exportbuffer, flags)),
+ IOCTL_INFO_FNC(VIDIOC_DQBUF, v4l_dqbuf, v4l_print_buffer, INFO_FL_QUEUE),
+ IOCTL_INFO_FNC(VIDIOC_STREAMON, v4l_streamon, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
+ IOCTL_INFO_FNC(VIDIOC_STREAMOFF, v4l_streamoff, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
+ IOCTL_INFO_FNC(VIDIOC_G_PARM, v4l_g_parm, v4l_print_streamparm, INFO_FL_CLEAR(v4l2_streamparm, type)),
+ IOCTL_INFO_FNC(VIDIOC_S_PARM, v4l_s_parm, v4l_print_streamparm, INFO_FL_PRIO),
+ IOCTL_INFO_STD(VIDIOC_G_STD, vidioc_g_std, v4l_print_std, 0),
+ IOCTL_INFO_FNC(VIDIOC_S_STD, v4l_s_std, v4l_print_std, INFO_FL_PRIO),
+ IOCTL_INFO_FNC(VIDIOC_ENUMSTD, v4l_enumstd, v4l_print_standard, INFO_FL_CLEAR(v4l2_standard, index)),
+ IOCTL_INFO_FNC(VIDIOC_ENUMINPUT, v4l_enuminput, v4l_print_enuminput, INFO_FL_CLEAR(v4l2_input, index)),
+ IOCTL_INFO_FNC(VIDIOC_G_CTRL, v4l_g_ctrl, v4l_print_control, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_control, id)),
+ IOCTL_INFO_FNC(VIDIOC_S_CTRL, v4l_s_ctrl, v4l_print_control, INFO_FL_PRIO | INFO_FL_CTRL),
+ IOCTL_INFO_FNC(VIDIOC_G_TUNER, v4l_g_tuner, v4l_print_tuner, INFO_FL_CLEAR(v4l2_tuner, index)),
+ IOCTL_INFO_FNC(VIDIOC_S_TUNER, v4l_s_tuner, v4l_print_tuner, INFO_FL_PRIO),
+ IOCTL_INFO_STD(VIDIOC_G_AUDIO, vidioc_g_audio, v4l_print_audio, 0),
+ IOCTL_INFO_STD(VIDIOC_S_AUDIO, vidioc_s_audio, v4l_print_audio, INFO_FL_PRIO),
+ IOCTL_INFO_FNC(VIDIOC_QUERYCTRL, v4l_queryctrl, v4l_print_queryctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_queryctrl, id)),
+ IOCTL_INFO_FNC(VIDIOC_QUERYMENU, v4l_querymenu, v4l_print_querymenu, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_querymenu, index)),
+ IOCTL_INFO_STD(VIDIOC_G_INPUT, vidioc_g_input, v4l_print_u32, 0),
+ IOCTL_INFO_FNC(VIDIOC_S_INPUT, v4l_s_input, v4l_print_u32, INFO_FL_PRIO),
+ IOCTL_INFO_STD(VIDIOC_G_EDID, vidioc_g_edid, v4l_print_edid, 0),
+ IOCTL_INFO_STD(VIDIOC_S_EDID, vidioc_s_edid, v4l_print_edid, INFO_FL_PRIO),
+ IOCTL_INFO_STD(VIDIOC_G_OUTPUT, vidioc_g_output, v4l_print_u32, 0),
+ IOCTL_INFO_FNC(VIDIOC_S_OUTPUT, v4l_s_output, v4l_print_u32, INFO_FL_PRIO),
+ IOCTL_INFO_FNC(VIDIOC_ENUMOUTPUT, v4l_enumoutput, v4l_print_enumoutput, INFO_FL_CLEAR(v4l2_output, index)),
+ IOCTL_INFO_STD(VIDIOC_G_AUDOUT, vidioc_g_audout, v4l_print_audioout, 0),
+ IOCTL_INFO_STD(VIDIOC_S_AUDOUT, vidioc_s_audout, v4l_print_audioout, INFO_FL_PRIO),
+ IOCTL_INFO_FNC(VIDIOC_G_MODULATOR, v4l_g_modulator, v4l_print_modulator, INFO_FL_CLEAR(v4l2_modulator, index)),
+ IOCTL_INFO_STD(VIDIOC_S_MODULATOR, vidioc_s_modulator, v4l_print_modulator, INFO_FL_PRIO),
+ IOCTL_INFO_FNC(VIDIOC_G_FREQUENCY, v4l_g_frequency, v4l_print_frequency, INFO_FL_CLEAR(v4l2_frequency, tuner)),
+ IOCTL_INFO_FNC(VIDIOC_S_FREQUENCY, v4l_s_frequency, v4l_print_frequency, INFO_FL_PRIO),
+ IOCTL_INFO_FNC(VIDIOC_CROPCAP, v4l_cropcap, v4l_print_cropcap, INFO_FL_CLEAR(v4l2_cropcap, type)),
+ IOCTL_INFO_FNC(VIDIOC_G_CROP, v4l_g_crop, v4l_print_crop, INFO_FL_CLEAR(v4l2_crop, type)),
+ IOCTL_INFO_FNC(VIDIOC_S_CROP, v4l_s_crop, v4l_print_crop, INFO_FL_PRIO),
+ IOCTL_INFO_STD(VIDIOC_G_SELECTION, vidioc_g_selection, v4l_print_selection, INFO_FL_CLEAR(v4l2_selection, r)),
+ IOCTL_INFO_STD(VIDIOC_S_SELECTION, vidioc_s_selection, v4l_print_selection, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_selection, r)),
+ IOCTL_INFO_STD(VIDIOC_G_JPEGCOMP, vidioc_g_jpegcomp, v4l_print_jpegcompression, 0),
+ IOCTL_INFO_STD(VIDIOC_S_JPEGCOMP, vidioc_s_jpegcomp, v4l_print_jpegcompression, INFO_FL_PRIO),
+ IOCTL_INFO_FNC(VIDIOC_QUERYSTD, v4l_querystd, v4l_print_std, 0),
+ IOCTL_INFO_FNC(VIDIOC_TRY_FMT, v4l_try_fmt, v4l_print_format, 0),
+ IOCTL_INFO_STD(VIDIOC_ENUMAUDIO, vidioc_enumaudio, v4l_print_audio, INFO_FL_CLEAR(v4l2_audio, index)),
+ IOCTL_INFO_STD(VIDIOC_ENUMAUDOUT, vidioc_enumaudout, v4l_print_audioout, INFO_FL_CLEAR(v4l2_audioout, index)),
+ IOCTL_INFO_FNC(VIDIOC_G_PRIORITY, v4l_g_priority, v4l_print_u32, 0),
+ IOCTL_INFO_FNC(VIDIOC_S_PRIORITY, v4l_s_priority, v4l_print_u32, INFO_FL_PRIO),
+ IOCTL_INFO_FNC(VIDIOC_G_SLICED_VBI_CAP, v4l_g_sliced_vbi_cap, v4l_print_sliced_vbi_cap, INFO_FL_CLEAR(v4l2_sliced_vbi_cap, type)),
+ IOCTL_INFO_FNC(VIDIOC_LOG_STATUS, v4l_log_status, v4l_print_newline, 0),
+ IOCTL_INFO_FNC(VIDIOC_G_EXT_CTRLS, v4l_g_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL),
+ IOCTL_INFO_FNC(VIDIOC_S_EXT_CTRLS, v4l_s_ext_ctrls, v4l_print_ext_controls, INFO_FL_PRIO | INFO_FL_CTRL),
+ IOCTL_INFO_FNC(VIDIOC_TRY_EXT_CTRLS, v4l_try_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL),
+ IOCTL_INFO_STD(VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes, v4l_print_frmsizeenum, INFO_FL_CLEAR(v4l2_frmsizeenum, pixel_format)),
+ IOCTL_INFO_STD(VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals, v4l_print_frmivalenum, INFO_FL_CLEAR(v4l2_frmivalenum, height)),
+ IOCTL_INFO_STD(VIDIOC_G_ENC_INDEX, vidioc_g_enc_index, v4l_print_enc_idx, 0),
+ IOCTL_INFO_STD(VIDIOC_ENCODER_CMD, vidioc_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_encoder_cmd, flags)),
+ IOCTL_INFO_STD(VIDIOC_TRY_ENCODER_CMD, vidioc_try_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_CLEAR(v4l2_encoder_cmd, flags)),
+ IOCTL_INFO_STD(VIDIOC_DECODER_CMD, vidioc_decoder_cmd, v4l_print_decoder_cmd, INFO_FL_PRIO),
+ IOCTL_INFO_STD(VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd, v4l_print_decoder_cmd, 0),
+ IOCTL_INFO_FNC(VIDIOC_DBG_S_REGISTER, v4l_dbg_s_register, v4l_print_dbg_register, 0),
+ IOCTL_INFO_FNC(VIDIOC_DBG_G_REGISTER, v4l_dbg_g_register, v4l_print_dbg_register, 0),
+ IOCTL_INFO_FNC(VIDIOC_S_HW_FREQ_SEEK, v4l_s_hw_freq_seek, v4l_print_hw_freq_seek, INFO_FL_PRIO),
+ IOCTL_INFO_STD(VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings, v4l_print_dv_timings, INFO_FL_PRIO),
+ IOCTL_INFO_STD(VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings, v4l_print_dv_timings, 0),
+ IOCTL_INFO_FNC(VIDIOC_DQEVENT, v4l_dqevent, v4l_print_event, 0),
+ IOCTL_INFO_FNC(VIDIOC_SUBSCRIBE_EVENT, v4l_subscribe_event, v4l_print_event_subscription, 0),
+ IOCTL_INFO_FNC(VIDIOC_UNSUBSCRIBE_EVENT, v4l_unsubscribe_event, v4l_print_event_subscription, 0),
+ IOCTL_INFO_FNC(VIDIOC_CREATE_BUFS, v4l_create_bufs, v4l_print_create_buffers, INFO_FL_PRIO | INFO_FL_QUEUE),
+ IOCTL_INFO_FNC(VIDIOC_PREPARE_BUF, v4l_prepare_buf, v4l_print_buffer, INFO_FL_QUEUE),
+ IOCTL_INFO_STD(VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings, v4l_print_enum_dv_timings, 0),
+ IOCTL_INFO_STD(VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings, v4l_print_dv_timings, 0),
+ IOCTL_INFO_STD(VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap, v4l_print_dv_timings_cap, INFO_FL_CLEAR(v4l2_dv_timings_cap, type)),
+ IOCTL_INFO_FNC(VIDIOC_ENUM_FREQ_BANDS, v4l_enum_freq_bands, v4l_print_freq_band, 0),
+ IOCTL_INFO_FNC(VIDIOC_DBG_G_CHIP_INFO, v4l_dbg_g_chip_info, v4l_print_dbg_chip_info, INFO_FL_CLEAR(v4l2_dbg_chip_info, match)),
+ IOCTL_INFO_FNC(VIDIOC_QUERY_EXT_CTRL, v4l_query_ext_ctrl, v4l_print_query_ext_ctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_query_ext_ctrl, id)),
+};
+#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
+
+bool v4l2_is_known_ioctl(unsigned int cmd)
+{
+ if (_IOC_NR(cmd) >= V4L2_IOCTLS)
+ return false;
+ return v4l2_ioctls[_IOC_NR(cmd)].ioctl == cmd;
+}
+
+struct mutex *v4l2_ioctl_get_lock(struct video_device *vdev, unsigned cmd)
+{
+ if (_IOC_NR(cmd) >= V4L2_IOCTLS)
+ return vdev->lock;
+ if (test_bit(_IOC_NR(cmd), vdev->disable_locking))
+ return NULL;
+ if (vdev->queue && vdev->queue->lock &&
+ (v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE))
+ return vdev->queue->lock;
+ return vdev->lock;
+}
+
+/* Common ioctl debug function. This function can be used by
+ external ioctl messages as well as internal V4L ioctl */
+void v4l_printk_ioctl(const char *prefix, unsigned int cmd)
+{
+ const char *dir, *type;
+
+ if (prefix)
+ printk(KERN_DEBUG "%s: ", prefix);
+
+ switch (_IOC_TYPE(cmd)) {
+ case 'd':
+ type = "v4l2_int";
+ break;
+ case 'V':
+ if (_IOC_NR(cmd) >= V4L2_IOCTLS) {
+ type = "v4l2";
+ break;
+ }
+ pr_cont("%s", v4l2_ioctls[_IOC_NR(cmd)].name);
+ return;
+ default:
+ type = "unknown";
+ break;
+ }
+
+ switch (_IOC_DIR(cmd)) {
+ case _IOC_NONE: dir = "--"; break;
+ case _IOC_READ: dir = "r-"; break;
+ case _IOC_WRITE: dir = "-w"; break;
+ case _IOC_READ | _IOC_WRITE: dir = "rw"; break;
+ default: dir = "*ERR*"; break;
+ }
+ pr_cont("%s ioctl '%c', dir=%s, #%d (0x%08x)",
+ type, _IOC_TYPE(cmd), dir, _IOC_NR(cmd), cmd);
+}
+EXPORT_SYMBOL(v4l_printk_ioctl);
+
+static long __video_do_ioctl(struct file *file,
+ unsigned int cmd, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
+ bool write_only = false;
+ struct v4l2_ioctl_info default_info;
+ const struct v4l2_ioctl_info *info;
+ void *fh = file->private_data;
+ struct v4l2_fh *vfh = NULL;
+ int dev_debug = vfd->dev_debug;
+ long ret = -ENOTTY;
+
+ if (ops == NULL) {
+ pr_warn("%s: has no ioctl_ops.\n",
+ video_device_node_name(vfd));
+ return ret;
+ }
+
+ if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags))
+ vfh = file->private_data;
+
+ if (v4l2_is_known_ioctl(cmd)) {
+ info = &v4l2_ioctls[_IOC_NR(cmd)];
+
+ if (!test_bit(_IOC_NR(cmd), vfd->valid_ioctls) &&
+ !((info->flags & INFO_FL_CTRL) && vfh && vfh->ctrl_handler))
+ goto done;
+
+ if (vfh && (info->flags & INFO_FL_PRIO)) {
+ ret = v4l2_prio_check(vfd->prio, vfh->prio);
+ if (ret)
+ goto done;
+ }
+ } else {
+ default_info.ioctl = cmd;
+ default_info.flags = 0;
+ default_info.debug = v4l_print_default;
+ info = &default_info;
+ }
+
+ write_only = _IOC_DIR(cmd) == _IOC_WRITE;
+ if (info->flags & INFO_FL_STD) {
+ typedef int (*vidioc_op)(struct file *file, void *fh, void *p);
+ const void *p = vfd->ioctl_ops;
+ const vidioc_op *vidioc = p + info->u.offset;
+
+ ret = (*vidioc)(file, fh, arg);
+ } else if (info->flags & INFO_FL_FUNC) {
+ ret = info->u.func(ops, file, fh, arg);
+ } else if (!ops->vidioc_default) {
+ ret = -ENOTTY;
+ } else {
+ ret = ops->vidioc_default(file, fh,
+ vfh ? v4l2_prio_check(vfd->prio, vfh->prio) >= 0 : 0,
+ cmd, arg);
+ }
+
+done:
+ if (dev_debug & (V4L2_DEV_DEBUG_IOCTL | V4L2_DEV_DEBUG_IOCTL_ARG)) {
+ if (!(dev_debug & V4L2_DEV_DEBUG_STREAMING) &&
+ (cmd == VIDIOC_QBUF || cmd == VIDIOC_DQBUF))
+ return ret;
+
+ v4l_printk_ioctl(video_device_node_name(vfd), cmd);
+ if (ret < 0)
+ pr_cont(": error %ld", ret);
+ if (!(dev_debug & V4L2_DEV_DEBUG_IOCTL_ARG))
+ pr_cont("\n");
+ else if (_IOC_DIR(cmd) == _IOC_NONE)
+ info->debug(arg, write_only);
+ else {
+ pr_cont(": ");
+ info->debug(arg, write_only);
+ }
+ }
+
+ return ret;
+}
+
+static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
+ void __user **user_ptr, void ***kernel_ptr)
+{
+ int ret = 0;
+
+ switch (cmd) {
+ case VIDIOC_PREPARE_BUF:
+ case VIDIOC_QUERYBUF:
+ case VIDIOC_QBUF:
+ case VIDIOC_DQBUF: {
+ struct v4l2_buffer *buf = parg;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(buf->type) && buf->length > 0) {
+ if (buf->length > VIDEO_MAX_PLANES) {
+ ret = -EINVAL;
+ break;
+ }
+ *user_ptr = (void __user *)buf->m.planes;
+ *kernel_ptr = (void **)&buf->m.planes;
+ *array_size = sizeof(struct v4l2_plane) * buf->length;
+ ret = 1;
+ }
+ break;
+ }
+
+ case VIDIOC_G_EDID:
+ case VIDIOC_S_EDID: {
+ struct v4l2_edid *edid = parg;
+
+ if (edid->blocks) {
+ if (edid->blocks > 256) {
+ ret = -EINVAL;
+ break;
+ }
+ *user_ptr = (void __user *)edid->edid;
+ *kernel_ptr = (void **)&edid->edid;
+ *array_size = edid->blocks * 128;
+ ret = 1;
+ }
+ break;
+ }
+
+ case VIDIOC_S_EXT_CTRLS:
+ case VIDIOC_G_EXT_CTRLS:
+ case VIDIOC_TRY_EXT_CTRLS: {
+ struct v4l2_ext_controls *ctrls = parg;
+
+ if (ctrls->count != 0) {
+ if (ctrls->count > V4L2_CID_MAX_CTRLS) {
+ ret = -EINVAL;
+ break;
+ }
+ *user_ptr = (void __user *)ctrls->controls;
+ *kernel_ptr = (void **)&ctrls->controls;
+ *array_size = sizeof(struct v4l2_ext_control)
+ * ctrls->count;
+ ret = 1;
+ }
+ break;
+ }
+ }
+
+ return ret;
+}
+
+long
+video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
+ v4l2_kioctl func)
+{
+ char sbuf[128];
+ void *mbuf = NULL;
+ void *parg = (void *)arg;
+ long err = -EINVAL;
+ bool has_array_args;
+ size_t array_size = 0;
+ void __user *user_ptr = NULL;
+ void **kernel_ptr = NULL;
+
+ /* Copy arguments into temp kernel buffer */
+ if (_IOC_DIR(cmd) != _IOC_NONE) {
+ if (_IOC_SIZE(cmd) <= sizeof(sbuf)) {
+ parg = sbuf;
+ } else {
+ /* too big to allocate from stack */
+ mbuf = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
+ if (NULL == mbuf)
+ return -ENOMEM;
+ parg = mbuf;
+ }
+
+ err = -EFAULT;
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
+ unsigned int n = _IOC_SIZE(cmd);
+
+ /*
+ * In some cases, only a few fields are used as input,
+ * i.e. when the app sets "index" and then the driver
+ * fills in the rest of the structure for the thing
+ * with that index. We only need to copy up the first
+ * non-input field.
+ */
+ if (v4l2_is_known_ioctl(cmd)) {
+ u32 flags = v4l2_ioctls[_IOC_NR(cmd)].flags;
+ if (flags & INFO_FL_CLEAR_MASK)
+ n = (flags & INFO_FL_CLEAR_MASK) >> 16;
+ }
+
+ if (copy_from_user(parg, (void __user *)arg, n))
+ goto out;
+
+ /* zero out anything we don't copy from userspace */
+ if (n < _IOC_SIZE(cmd))
+ memset((u8 *)parg + n, 0, _IOC_SIZE(cmd) - n);
+ } else {
+ /* read-only ioctl */
+ memset(parg, 0, _IOC_SIZE(cmd));
+ }
+ }
+
+ err = check_array_args(cmd, parg, &array_size, &user_ptr, &kernel_ptr);
+ if (err < 0)
+ goto out;
+ has_array_args = err;
+
+ if (has_array_args) {
+ /*
+ * When adding new types of array args, make sure that the
+ * parent argument to ioctl (which contains the pointer to the
+ * array) fits into sbuf (so that mbuf will still remain
+ * unused up to here).
+ */
+ mbuf = kmalloc(array_size, GFP_KERNEL);
+ err = -ENOMEM;
+ if (NULL == mbuf)
+ goto out_array_args;
+ err = -EFAULT;
+ if (copy_from_user(mbuf, user_ptr, array_size))
+ goto out_array_args;
+ *kernel_ptr = mbuf;
+ }
+
+ /* Handles IOCTL */
+ err = func(file, cmd, parg);
+ if (err == -ENOIOCTLCMD)
+ err = -ENOTTY;
+ if (err == 0) {
+ if (cmd == VIDIOC_DQBUF)
+ trace_v4l2_dqbuf(video_devdata(file)->minor, parg);
+ else if (cmd == VIDIOC_QBUF)
+ trace_v4l2_qbuf(video_devdata(file)->minor, parg);
+ }
+
+ if (has_array_args) {
+ *kernel_ptr = (void __force *)user_ptr;
+ if (copy_to_user(user_ptr, mbuf, array_size))
+ err = -EFAULT;
+ goto out_array_args;
+ }
+ /* VIDIOC_QUERY_DV_TIMINGS can return an error, but still have valid
+ results that must be returned. */
+ if (err < 0 && cmd != VIDIOC_QUERY_DV_TIMINGS)
+ goto out;
+
+out_array_args:
+ /* Copy results into user buffer */
+ switch (_IOC_DIR(cmd)) {
+ case _IOC_READ:
+ case (_IOC_WRITE | _IOC_READ):
+ if (copy_to_user((void __user *)arg, parg, _IOC_SIZE(cmd)))
+ err = -EFAULT;
+ break;
+ }
+
+out:
+ kfree(mbuf);
+ return err;
+}
+EXPORT_SYMBOL(video_usercopy);
+
+long video_ioctl2(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, __video_do_ioctl);
+}
+EXPORT_SYMBOL(video_ioctl2);
diff --git a/kernel/drivers/media/v4l2-core/v4l2-mem2mem.c b/kernel/drivers/media/v4l2-core/v4l2-mem2mem.c
new file mode 100644
index 000000000..73824a5ad
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -0,0 +1,873 @@
+/*
+ * Memory-to-memory device framework for Video for Linux 2 and videobuf.
+ *
+ * Helper functions for devices that use videobuf buffers for both their
+ * source and destination.
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <media/videobuf2-core.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+
+MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
+MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
+MODULE_LICENSE("GPL");
+
+static bool debug;
+module_param(debug, bool, 0644);
+
+#define dprintk(fmt, arg...) \
+ do { \
+ if (debug) \
+ printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
+ } while (0)
+
+
+/* Instance is already queued on the job_queue */
+#define TRANS_QUEUED (1 << 0)
+/* Instance is currently running in hardware */
+#define TRANS_RUNNING (1 << 1)
+/* Instance is currently aborting */
+#define TRANS_ABORT (1 << 2)
+
+
+/* Offset base for buffers on the destination queue - used to distinguish
+ * between source and destination buffers when mmapping - they receive the same
+ * offsets but for different queues */
+#define DST_QUEUE_OFF_BASE (1 << 30)
+
+
+/**
+ * struct v4l2_m2m_dev - per-device context
+ * @curr_ctx: currently running instance
+ * @job_queue: instances queued to run
+ * @job_spinlock: protects job_queue
+ * @m2m_ops: driver callbacks
+ */
+struct v4l2_m2m_dev {
+ struct v4l2_m2m_ctx *curr_ctx;
+
+ struct list_head job_queue;
+ spinlock_t job_spinlock;
+
+ const struct v4l2_m2m_ops *m2m_ops;
+};
+
+static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &m2m_ctx->out_q_ctx;
+ else
+ return &m2m_ctx->cap_q_ctx;
+}
+
+/**
+ * v4l2_m2m_get_vq() - return vb2_queue for the given type
+ */
+struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type)
+{
+ struct v4l2_m2m_queue_ctx *q_ctx;
+
+ q_ctx = get_queue_ctx(m2m_ctx, type);
+ if (!q_ctx)
+ return NULL;
+
+ return &q_ctx->q;
+}
+EXPORT_SYMBOL(v4l2_m2m_get_vq);
+
+/**
+ * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
+ */
+void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
+{
+ struct v4l2_m2m_buffer *b;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
+
+ if (list_empty(&q_ctx->rdy_queue)) {
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+ return NULL;
+ }
+
+ b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+ return &b->vb;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
+
+/**
+ * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
+ * return it
+ */
+void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
+{
+ struct v4l2_m2m_buffer *b;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
+ if (list_empty(&q_ctx->rdy_queue)) {
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+ return NULL;
+ }
+ b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
+ list_del(&b->list);
+ q_ctx->num_rdy--;
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+
+ return &b->vb;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
+
+/*
+ * Scheduling handlers
+ */
+
+/**
+ * v4l2_m2m_get_curr_priv() - return driver private data for the currently
+ * running instance or NULL if no instance is running
+ */
+void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
+{
+ unsigned long flags;
+ void *ret = NULL;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ if (m2m_dev->curr_ctx)
+ ret = m2m_dev->curr_ctx->priv;
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
+
+/**
+ * v4l2_m2m_try_run() - select next job to perform and run it if possible
+ *
+ * Get next transaction (if present) from the waiting jobs list and run it.
+ */
+static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ if (NULL != m2m_dev->curr_ctx) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("Another instance is running, won't run now\n");
+ return;
+ }
+
+ if (list_empty(&m2m_dev->job_queue)) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("No job pending\n");
+ return;
+ }
+
+ m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
+ struct v4l2_m2m_ctx, queue);
+ m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
+}
+
+/**
+ * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
+ * the pending job queue and add it if so.
+ * @m2m_ctx: m2m context assigned to the instance to be checked
+ *
+ * There are three basic requirements an instance has to meet to be able to run:
+ * 1) at least one source buffer has to be queued,
+ * 2) at least one destination buffer has to be queued,
+ * 3) streaming has to be on.
+ *
+ * If a queue is buffered (for example a decoder hardware ringbuffer that has
+ * to be drained before doing streamoff), allow scheduling without v4l2 buffers
+ * on that queue.
+ *
+ * There may also be additional, custom requirements. In such case the driver
+ * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
+ * return 1 if the instance is ready.
+ * An example of the above could be an instance that requires more than one
+ * src/dst buffer per transaction.
+ */
+void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ struct v4l2_m2m_dev *m2m_dev;
+ unsigned long flags_job, flags_out, flags_cap;
+
+ m2m_dev = m2m_ctx->m2m_dev;
+ dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
+
+ if (!m2m_ctx->out_q_ctx.q.streaming
+ || !m2m_ctx->cap_q_ctx.q.streaming) {
+ dprintk("Streaming needs to be on for both queues\n");
+ return;
+ }
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
+
+ /* If the context is aborted then don't schedule it */
+ if (m2m_ctx->job_flags & TRANS_ABORT) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+ dprintk("Aborted context\n");
+ return;
+ }
+
+ if (m2m_ctx->job_flags & TRANS_QUEUED) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+ dprintk("On job queue already\n");
+ return;
+ }
+
+ spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
+ if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
+ && !m2m_ctx->out_q_ctx.buffered) {
+ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
+ flags_out);
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+ dprintk("No input buffers available\n");
+ return;
+ }
+ spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
+ if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
+ && !m2m_ctx->cap_q_ctx.buffered) {
+ spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
+ flags_cap);
+ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
+ flags_out);
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+ dprintk("No output buffers available\n");
+ return;
+ }
+ spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
+ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
+
+ if (m2m_dev->m2m_ops->job_ready
+ && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+ dprintk("Driver not ready\n");
+ return;
+ }
+
+ list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
+ m2m_ctx->job_flags |= TRANS_QUEUED;
+
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+
+ v4l2_m2m_try_run(m2m_dev);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
+
+/**
+ * v4l2_m2m_cancel_job() - cancel pending jobs for the context
+ *
+ * In case of streamoff or release called on any context,
+ * 1] If the context is currently running, then abort job will be called
+ * 2] If the context is queued, then the context will be removed from
+ * the job_queue
+ */
+static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ struct v4l2_m2m_dev *m2m_dev;
+ unsigned long flags;
+
+ m2m_dev = m2m_ctx->m2m_dev;
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+
+ m2m_ctx->job_flags |= TRANS_ABORT;
+ if (m2m_ctx->job_flags & TRANS_RUNNING) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
+ dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
+ wait_event(m2m_ctx->finished,
+ !(m2m_ctx->job_flags & TRANS_RUNNING));
+ } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
+ list_del(&m2m_ctx->queue);
+ m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("m2m_ctx: %p had been on queue and was removed\n",
+ m2m_ctx);
+ } else {
+ /* Do nothing, was not on queue/running */
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ }
+}
+
+/**
+ * v4l2_m2m_job_finish() - inform the framework that a job has been finished
+ * and have it clean up
+ *
+ * Called by a driver to yield back the device after it has finished with it.
+ * Should be called as soon as possible after reaching a state which allows
+ * other instances to take control of the device.
+ *
+ * This function has to be called only after device_run() callback has been
+ * called on the driver. To prevent recursion, it should not be called directly
+ * from the device_run() callback though.
+ */
+void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("Called by an instance not currently running\n");
+ return;
+ }
+
+ list_del(&m2m_dev->curr_ctx->queue);
+ m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
+ wake_up(&m2m_dev->curr_ctx->finished);
+ m2m_dev->curr_ctx = NULL;
+
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ /* This instance might have more buffers ready, but since we do not
+ * allow more than one job on the job_queue per instance, each has
+ * to be scheduled separately after the previous one finishes. */
+ v4l2_m2m_try_schedule(m2m_ctx);
+ v4l2_m2m_try_run(m2m_dev);
+}
+EXPORT_SYMBOL(v4l2_m2m_job_finish);
+
+/**
+ * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
+ */
+int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
+ return vb2_reqbufs(vq, reqbufs);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
+
+/**
+ * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
+ *
+ * See v4l2_m2m_mmap() documentation for details.
+ */
+int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf)
+{
+ struct vb2_queue *vq;
+ int ret = 0;
+ unsigned int i;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+ ret = vb2_querybuf(vq, buf);
+
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
+ if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
+ for (i = 0; i < buf->length; ++i)
+ buf->m.planes[i].m.mem_offset
+ += DST_QUEUE_OFF_BASE;
+ } else {
+ buf->m.offset += DST_QUEUE_OFF_BASE;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
+
+/**
+ * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
+ * the type
+ */
+int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf)
+{
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+ ret = vb2_qbuf(vq, buf);
+ if (!ret)
+ v4l2_m2m_try_schedule(m2m_ctx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
+
+/**
+ * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
+ * the type
+ */
+int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf)
+{
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+ return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
+
+/**
+ * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
+ * on the type
+ */
+int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_create_buffers *create)
+{
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
+ return vb2_create_bufs(vq, create);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
+
+/**
+ * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
+ * the type
+ */
+int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_exportbuffer *eb)
+{
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
+ return vb2_expbuf(vq, eb);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
+/**
+ * v4l2_m2m_streamon() - turn on streaming for a video queue
+ */
+int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type)
+{
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, type);
+ ret = vb2_streamon(vq, type);
+ if (!ret)
+ v4l2_m2m_try_schedule(m2m_ctx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
+
+/**
+ * v4l2_m2m_streamoff() - turn off streaming for a video queue
+ */
+int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type)
+{
+ struct v4l2_m2m_dev *m2m_dev;
+ struct v4l2_m2m_queue_ctx *q_ctx;
+ unsigned long flags_job, flags;
+ int ret;
+
+ /* wait until the current context is dequeued from job_queue */
+ v4l2_m2m_cancel_job(m2m_ctx);
+
+ q_ctx = get_queue_ctx(m2m_ctx, type);
+ ret = vb2_streamoff(&q_ctx->q, type);
+ if (ret)
+ return ret;
+
+ m2m_dev = m2m_ctx->m2m_dev;
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
+ /* We should not be scheduled anymore, since we're dropping a queue. */
+ if (m2m_ctx->job_flags & TRANS_QUEUED)
+ list_del(&m2m_ctx->queue);
+ m2m_ctx->job_flags = 0;
+
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
+ /* Drop queue, since streamoff returns device to the same state as after
+ * calling reqbufs. */
+ INIT_LIST_HEAD(&q_ctx->rdy_queue);
+ q_ctx->num_rdy = 0;
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+
+ if (m2m_dev->curr_ctx == m2m_ctx) {
+ m2m_dev->curr_ctx = NULL;
+ wake_up(&m2m_ctx->finished);
+ }
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
+
+/**
+ * v4l2_m2m_poll() - poll replacement, for destination buffers only
+ *
+ * Call from the driver's poll() function. Will poll both queues. If a buffer
+ * is available to dequeue (with dqbuf) from the source queue, this will
+ * indicate that a non-blocking write can be performed, while read will be
+ * returned in case of the destination queue.
+ */
+unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct poll_table_struct *wait)
+{
+ struct video_device *vfd = video_devdata(file);
+ unsigned long req_events = poll_requested_events(wait);
+ struct vb2_queue *src_q, *dst_q;
+ struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
+ unsigned int rc = 0;
+ unsigned long flags;
+
+ if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
+ struct v4l2_fh *fh = file->private_data;
+
+ if (v4l2_event_pending(fh))
+ rc = POLLPRI;
+ else if (req_events & POLLPRI)
+ poll_wait(file, &fh->wait, wait);
+ if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM)))
+ return rc;
+ }
+
+ src_q = v4l2_m2m_get_src_vq(m2m_ctx);
+ dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
+
+ /*
+ * There has to be at least one buffer queued on each queued_list, which
+ * means either in driver already or waiting for driver to claim it
+ * and start processing.
+ */
+ if ((!src_q->streaming || list_empty(&src_q->queued_list))
+ && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
+ rc |= POLLERR;
+ goto end;
+ }
+
+ if (m2m_ctx->m2m_dev->m2m_ops->unlock)
+ m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
+ else if (m2m_ctx->q_lock)
+ mutex_unlock(m2m_ctx->q_lock);
+
+ if (list_empty(&src_q->done_list))
+ poll_wait(file, &src_q->done_wq, wait);
+ if (list_empty(&dst_q->done_list))
+ poll_wait(file, &dst_q->done_wq, wait);
+
+ if (m2m_ctx->m2m_dev->m2m_ops->lock)
+ m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
+ else if (m2m_ctx->q_lock) {
+ if (mutex_lock_interruptible(m2m_ctx->q_lock)) {
+ rc |= POLLERR;
+ goto end;
+ }
+ }
+
+ spin_lock_irqsave(&src_q->done_lock, flags);
+ if (!list_empty(&src_q->done_list))
+ src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
+ done_entry);
+ if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
+ || src_vb->state == VB2_BUF_STATE_ERROR))
+ rc |= POLLOUT | POLLWRNORM;
+ spin_unlock_irqrestore(&src_q->done_lock, flags);
+
+ spin_lock_irqsave(&dst_q->done_lock, flags);
+ if (!list_empty(&dst_q->done_list))
+ dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
+ done_entry);
+ if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
+ || dst_vb->state == VB2_BUF_STATE_ERROR))
+ rc |= POLLIN | POLLRDNORM;
+ spin_unlock_irqrestore(&dst_q->done_lock, flags);
+
+end:
+ return rc;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
+
+/**
+ * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
+ *
+ * Call from driver's mmap() function. Will handle mmap() for both queues
+ * seamlessly for videobuffer, which will receive normal per-queue offsets and
+ * proper videobuf queue pointers. The differentiation is made outside videobuf
+ * by adding a predefined offset to buffers from one of the queues and
+ * subtracting it before passing it back to videobuf. Only drivers (and
+ * thus applications) receive modified offsets.
+ */
+int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct vm_area_struct *vma)
+{
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ struct vb2_queue *vq;
+
+ if (offset < DST_QUEUE_OFF_BASE) {
+ vq = v4l2_m2m_get_src_vq(m2m_ctx);
+ } else {
+ vq = v4l2_m2m_get_dst_vq(m2m_ctx);
+ vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
+ }
+
+ return vb2_mmap(vq, vma);
+}
+EXPORT_SYMBOL(v4l2_m2m_mmap);
+
+/**
+ * v4l2_m2m_init() - initialize per-driver m2m data
+ *
+ * Usually called from driver's probe() function.
+ */
+struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
+{
+ struct v4l2_m2m_dev *m2m_dev;
+
+ if (!m2m_ops || WARN_ON(!m2m_ops->device_run) ||
+ WARN_ON(!m2m_ops->job_abort))
+ return ERR_PTR(-EINVAL);
+
+ m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
+ if (!m2m_dev)
+ return ERR_PTR(-ENOMEM);
+
+ m2m_dev->curr_ctx = NULL;
+ m2m_dev->m2m_ops = m2m_ops;
+ INIT_LIST_HEAD(&m2m_dev->job_queue);
+ spin_lock_init(&m2m_dev->job_spinlock);
+
+ return m2m_dev;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_init);
+
+/**
+ * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
+ *
+ * Usually called from driver's remove() function.
+ */
+void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
+{
+ kfree(m2m_dev);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_release);
+
+/**
+ * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
+ * @priv - driver's instance private data
+ * @m2m_dev - a previously initialized m2m_dev struct
+ * @vq_init - a callback for queue type-specific initialization function to be
+ * used for initializing videobuf_queues
+ *
+ * Usually called from driver's open() function.
+ */
+struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
+ void *drv_priv,
+ int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
+{
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
+ int ret;
+
+ m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
+ if (!m2m_ctx)
+ return ERR_PTR(-ENOMEM);
+
+ m2m_ctx->priv = drv_priv;
+ m2m_ctx->m2m_dev = m2m_dev;
+ init_waitqueue_head(&m2m_ctx->finished);
+
+ out_q_ctx = &m2m_ctx->out_q_ctx;
+ cap_q_ctx = &m2m_ctx->cap_q_ctx;
+
+ INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
+ INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
+ spin_lock_init(&out_q_ctx->rdy_spinlock);
+ spin_lock_init(&cap_q_ctx->rdy_spinlock);
+
+ INIT_LIST_HEAD(&m2m_ctx->queue);
+
+ ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
+
+ if (ret)
+ goto err;
+ /*
+ * If both queues use same mutex assign it as the common buffer
+ * queues lock to the m2m context. This lock is used in the
+ * v4l2_m2m_ioctl_* helpers.
+ */
+ if (out_q_ctx->q.lock == cap_q_ctx->q.lock)
+ m2m_ctx->q_lock = out_q_ctx->q.lock;
+
+ return m2m_ctx;
+err:
+ kfree(m2m_ctx);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
+
+/**
+ * v4l2_m2m_ctx_release() - release m2m context
+ *
+ * Usually called from driver's release() function.
+ */
+void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ /* wait until the current context is dequeued from job_queue */
+ v4l2_m2m_cancel_job(m2m_ctx);
+
+ vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
+ vb2_queue_release(&m2m_ctx->out_q_ctx.q);
+
+ kfree(m2m_ctx);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
+
+/**
+ * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
+ *
+ * Call from buf_queue(), videobuf_queue_ops callback.
+ */
+void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
+{
+ struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb);
+ struct v4l2_m2m_queue_ctx *q_ctx;
+ unsigned long flags;
+
+ q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type);
+ if (!q_ctx)
+ return;
+
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
+ list_add_tail(&b->list, &q_ctx->rdy_queue);
+ q_ctx->num_rdy++;
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
+
+/* Videobuf2 ioctl helpers */
+
+int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *rb)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
+
+int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *create)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
+
+int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
+
+int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
+
+int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
+
+int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
+
+int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
+
+int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
+
+/*
+ * v4l2_file_operations helpers. It is assumed here same lock is used
+ * for the output and the capture buffer queue.
+ */
+
+int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct v4l2_fh *fh = file->private_data;
+ struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
+ int ret;
+
+ if (m2m_ctx->q_lock && mutex_lock_interruptible(m2m_ctx->q_lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_m2m_mmap(file, m2m_ctx, vma);
+
+ if (m2m_ctx->q_lock)
+ mutex_unlock(m2m_ctx->q_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
+
+unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
+{
+ struct v4l2_fh *fh = file->private_data;
+ struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
+ unsigned int ret;
+
+ if (m2m_ctx->q_lock)
+ mutex_lock(m2m_ctx->q_lock);
+
+ ret = v4l2_m2m_poll(file, m2m_ctx, wait);
+
+ if (m2m_ctx->q_lock)
+ mutex_unlock(m2m_ctx->q_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);
+
diff --git a/kernel/drivers/media/v4l2-core/v4l2-of.c b/kernel/drivers/media/v4l2-core/v4l2-of.c
new file mode 100644
index 000000000..83143d39d
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/v4l2-of.c
@@ -0,0 +1,228 @@
+/*
+ * V4L2 OF binding parsing library
+ *
+ * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * Copyright (C) 2012 Renesas Electronics Corp.
+ * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <media/v4l2-of.h>
+
+static int v4l2_of_parse_csi_bus(const struct device_node *node,
+ struct v4l2_of_endpoint *endpoint)
+{
+ struct v4l2_of_bus_mipi_csi2 *bus = &endpoint->bus.mipi_csi2;
+ struct property *prop;
+ bool have_clk_lane = false;
+ unsigned int flags = 0;
+ u32 v;
+
+ prop = of_find_property(node, "data-lanes", NULL);
+ if (prop) {
+ const __be32 *lane = NULL;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(bus->data_lanes); i++) {
+ lane = of_prop_next_u32(prop, lane, &v);
+ if (!lane)
+ break;
+ bus->data_lanes[i] = v;
+ }
+ bus->num_data_lanes = i;
+ }
+
+ prop = of_find_property(node, "lane-polarities", NULL);
+ if (prop) {
+ const __be32 *polarity = NULL;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(bus->lane_polarities); i++) {
+ polarity = of_prop_next_u32(prop, polarity, &v);
+ if (!polarity)
+ break;
+ bus->lane_polarities[i] = v;
+ }
+
+ if (i < 1 + bus->num_data_lanes /* clock + data */) {
+ pr_warn("%s: too few lane-polarities entries (need %u, got %u)\n",
+ node->full_name, 1 + bus->num_data_lanes, i);
+ return -EINVAL;
+ }
+ }
+
+ if (!of_property_read_u32(node, "clock-lanes", &v)) {
+ bus->clock_lane = v;
+ have_clk_lane = true;
+ }
+
+ if (of_get_property(node, "clock-noncontinuous", &v))
+ flags |= V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK;
+ else if (have_clk_lane || bus->num_data_lanes > 0)
+ flags |= V4L2_MBUS_CSI2_CONTINUOUS_CLOCK;
+
+ bus->flags = flags;
+ endpoint->bus_type = V4L2_MBUS_CSI2;
+
+ return 0;
+}
+
+static void v4l2_of_parse_parallel_bus(const struct device_node *node,
+ struct v4l2_of_endpoint *endpoint)
+{
+ struct v4l2_of_bus_parallel *bus = &endpoint->bus.parallel;
+ unsigned int flags = 0;
+ u32 v;
+
+ if (!of_property_read_u32(node, "hsync-active", &v))
+ flags |= v ? V4L2_MBUS_HSYNC_ACTIVE_HIGH :
+ V4L2_MBUS_HSYNC_ACTIVE_LOW;
+
+ if (!of_property_read_u32(node, "vsync-active", &v))
+ flags |= v ? V4L2_MBUS_VSYNC_ACTIVE_HIGH :
+ V4L2_MBUS_VSYNC_ACTIVE_LOW;
+
+ if (!of_property_read_u32(node, "pclk-sample", &v))
+ flags |= v ? V4L2_MBUS_PCLK_SAMPLE_RISING :
+ V4L2_MBUS_PCLK_SAMPLE_FALLING;
+
+ if (!of_property_read_u32(node, "field-even-active", &v))
+ flags |= v ? V4L2_MBUS_FIELD_EVEN_HIGH :
+ V4L2_MBUS_FIELD_EVEN_LOW;
+ if (flags)
+ endpoint->bus_type = V4L2_MBUS_PARALLEL;
+ else
+ endpoint->bus_type = V4L2_MBUS_BT656;
+
+ if (!of_property_read_u32(node, "data-active", &v))
+ flags |= v ? V4L2_MBUS_DATA_ACTIVE_HIGH :
+ V4L2_MBUS_DATA_ACTIVE_LOW;
+
+ if (of_get_property(node, "slave-mode", &v))
+ flags |= V4L2_MBUS_SLAVE;
+ else
+ flags |= V4L2_MBUS_MASTER;
+
+ if (!of_property_read_u32(node, "bus-width", &v))
+ bus->bus_width = v;
+
+ if (!of_property_read_u32(node, "data-shift", &v))
+ bus->data_shift = v;
+
+ if (!of_property_read_u32(node, "sync-on-green-active", &v))
+ flags |= v ? V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH :
+ V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW;
+
+ bus->flags = flags;
+
+}
+
+/**
+ * v4l2_of_parse_endpoint() - parse all endpoint node properties
+ * @node: pointer to endpoint device_node
+ * @endpoint: pointer to the V4L2 OF endpoint data structure
+ *
+ * All properties are optional. If none are found, we don't set any flags.
+ * This means the port has a static configuration and no properties have
+ * to be specified explicitly.
+ * If any properties that identify the bus as parallel are found and
+ * slave-mode isn't set, we set V4L2_MBUS_MASTER. Similarly, if we recognise
+ * the bus as serial CSI-2 and clock-noncontinuous isn't set, we set the
+ * V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag.
+ * The caller should hold a reference to @node.
+ *
+ * Return: 0.
+ */
+int v4l2_of_parse_endpoint(const struct device_node *node,
+ struct v4l2_of_endpoint *endpoint)
+{
+ int rval;
+
+ of_graph_parse_endpoint(node, &endpoint->base);
+ endpoint->bus_type = 0;
+ memset(&endpoint->bus, 0, sizeof(endpoint->bus));
+
+ rval = v4l2_of_parse_csi_bus(node, endpoint);
+ if (rval)
+ return rval;
+ /*
+ * Parse the parallel video bus properties only if none
+ * of the MIPI CSI-2 specific properties were found.
+ */
+ if (endpoint->bus.mipi_csi2.flags == 0)
+ v4l2_of_parse_parallel_bus(node, endpoint);
+
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_of_parse_endpoint);
+
+/**
+ * v4l2_of_parse_link() - parse a link between two endpoints
+ * @node: pointer to the endpoint at the local end of the link
+ * @link: pointer to the V4L2 OF link data structure
+ *
+ * Fill the link structure with the local and remote nodes and port numbers.
+ * The local_node and remote_node fields are set to point to the local and
+ * remote port's parent nodes respectively (the port parent node being the
+ * parent node of the port node if that node isn't a 'ports' node, or the
+ * grand-parent node of the port node otherwise).
+ *
+ * A reference is taken to both the local and remote nodes, the caller must use
+ * v4l2_of_put_link() to drop the references when done with the link.
+ *
+ * Return: 0 on success, or -ENOLINK if the remote endpoint can't be found.
+ */
+int v4l2_of_parse_link(const struct device_node *node,
+ struct v4l2_of_link *link)
+{
+ struct device_node *np;
+
+ memset(link, 0, sizeof(*link));
+
+ np = of_get_parent(node);
+ of_property_read_u32(np, "reg", &link->local_port);
+ np = of_get_next_parent(np);
+ if (of_node_cmp(np->name, "ports") == 0)
+ np = of_get_next_parent(np);
+ link->local_node = np;
+
+ np = of_parse_phandle(node, "remote-endpoint", 0);
+ if (!np) {
+ of_node_put(link->local_node);
+ return -ENOLINK;
+ }
+
+ np = of_get_parent(np);
+ of_property_read_u32(np, "reg", &link->remote_port);
+ np = of_get_next_parent(np);
+ if (of_node_cmp(np->name, "ports") == 0)
+ np = of_get_next_parent(np);
+ link->remote_node = np;
+
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_of_parse_link);
+
+/**
+ * v4l2_of_put_link() - drop references to nodes in a link
+ * @link: pointer to the V4L2 OF link data structure
+ *
+ * Drop references to the local and remote nodes in the link. This function must
+ * be called on every link parsed with v4l2_of_parse_link().
+ */
+void v4l2_of_put_link(struct v4l2_of_link *link)
+{
+ of_node_put(link->local_node);
+ of_node_put(link->remote_node);
+}
+EXPORT_SYMBOL(v4l2_of_put_link);
diff --git a/kernel/drivers/media/v4l2-core/v4l2-subdev.c b/kernel/drivers/media/v4l2-core/v4l2-subdev.c
new file mode 100644
index 000000000..63596063b
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/v4l2-subdev.c
@@ -0,0 +1,590 @@
+/*
+ * V4L2 sub-device
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/ioctl.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+#include <linux/export.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+
+static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
+{
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+ fh->pad = kzalloc(sizeof(*fh->pad) * sd->entity.num_pads, GFP_KERNEL);
+ if (fh->pad == NULL)
+ return -ENOMEM;
+#endif
+ return 0;
+}
+
+static void subdev_fh_free(struct v4l2_subdev_fh *fh)
+{
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+ kfree(fh->pad);
+ fh->pad = NULL;
+#endif
+}
+
+static int subdev_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct v4l2_subdev_fh *subdev_fh;
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ struct media_entity *entity = NULL;
+#endif
+ int ret;
+
+ subdev_fh = kzalloc(sizeof(*subdev_fh), GFP_KERNEL);
+ if (subdev_fh == NULL)
+ return -ENOMEM;
+
+ ret = subdev_fh_init(subdev_fh, sd);
+ if (ret) {
+ kfree(subdev_fh);
+ return ret;
+ }
+
+ v4l2_fh_init(&subdev_fh->vfh, vdev);
+ v4l2_fh_add(&subdev_fh->vfh);
+ file->private_data = &subdev_fh->vfh;
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ if (sd->v4l2_dev->mdev) {
+ entity = media_entity_get(&sd->entity);
+ if (!entity) {
+ ret = -EBUSY;
+ goto err;
+ }
+ }
+#endif
+
+ if (sd->internal_ops && sd->internal_ops->open) {
+ ret = sd->internal_ops->open(sd, subdev_fh);
+ if (ret < 0)
+ goto err;
+ }
+
+ return 0;
+
+err:
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ media_entity_put(entity);
+#endif
+ v4l2_fh_del(&subdev_fh->vfh);
+ v4l2_fh_exit(&subdev_fh->vfh);
+ subdev_fh_free(subdev_fh);
+ kfree(subdev_fh);
+
+ return ret;
+}
+
+static int subdev_close(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
+
+ if (sd->internal_ops && sd->internal_ops->close)
+ sd->internal_ops->close(sd, subdev_fh);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ if (sd->v4l2_dev->mdev)
+ media_entity_put(&sd->entity);
+#endif
+ v4l2_fh_del(vfh);
+ v4l2_fh_exit(vfh);
+ subdev_fh_free(subdev_fh);
+ kfree(subdev_fh);
+ file->private_data = NULL;
+
+ return 0;
+}
+
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+static int check_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_format *format)
+{
+ if (format->which != V4L2_SUBDEV_FORMAT_TRY &&
+ format->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ if (format->pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int check_crop(struct v4l2_subdev *sd, struct v4l2_subdev_crop *crop)
+{
+ if (crop->which != V4L2_SUBDEV_FORMAT_TRY &&
+ crop->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ if (crop->pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int check_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_selection *sel)
+{
+ if (sel->which != V4L2_SUBDEV_FORMAT_TRY &&
+ sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ if (sel->pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int check_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
+{
+ if (edid->pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ if (edid->blocks && edid->edid == NULL)
+ return -EINVAL;
+
+ return 0;
+}
+#endif
+
+static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct v4l2_fh *vfh = file->private_data;
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+ struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
+ int rval;
+#endif
+
+ switch (cmd) {
+ case VIDIOC_QUERYCTRL:
+ return v4l2_queryctrl(vfh->ctrl_handler, arg);
+
+ case VIDIOC_QUERY_EXT_CTRL:
+ return v4l2_query_ext_ctrl(vfh->ctrl_handler, arg);
+
+ case VIDIOC_QUERYMENU:
+ return v4l2_querymenu(vfh->ctrl_handler, arg);
+
+ case VIDIOC_G_CTRL:
+ return v4l2_g_ctrl(vfh->ctrl_handler, arg);
+
+ case VIDIOC_S_CTRL:
+ return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg);
+
+ case VIDIOC_G_EXT_CTRLS:
+ return v4l2_g_ext_ctrls(vfh->ctrl_handler, arg);
+
+ case VIDIOC_S_EXT_CTRLS:
+ return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, arg);
+
+ case VIDIOC_TRY_EXT_CTRLS:
+ return v4l2_try_ext_ctrls(vfh->ctrl_handler, arg);
+
+ case VIDIOC_DQEVENT:
+ if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
+ return -ENOIOCTLCMD;
+
+ return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
+
+ case VIDIOC_SUBSCRIBE_EVENT:
+ return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
+
+ case VIDIOC_UNSUBSCRIBE_EVENT:
+ return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ case VIDIOC_DBG_G_REGISTER:
+ {
+ struct v4l2_dbg_register *p = arg;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return v4l2_subdev_call(sd, core, g_register, p);
+ }
+ case VIDIOC_DBG_S_REGISTER:
+ {
+ struct v4l2_dbg_register *p = arg;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return v4l2_subdev_call(sd, core, s_register, p);
+ }
+#endif
+
+ case VIDIOC_LOG_STATUS: {
+ int ret;
+
+ pr_info("%s: ================= START STATUS =================\n",
+ sd->name);
+ ret = v4l2_subdev_call(sd, core, log_status);
+ pr_info("%s: ================== END STATUS ==================\n",
+ sd->name);
+ return ret;
+ }
+
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+ case VIDIOC_SUBDEV_G_FMT: {
+ struct v4l2_subdev_format *format = arg;
+
+ rval = check_format(sd, format);
+ if (rval)
+ return rval;
+
+ return v4l2_subdev_call(sd, pad, get_fmt, subdev_fh->pad, format);
+ }
+
+ case VIDIOC_SUBDEV_S_FMT: {
+ struct v4l2_subdev_format *format = arg;
+
+ rval = check_format(sd, format);
+ if (rval)
+ return rval;
+
+ return v4l2_subdev_call(sd, pad, set_fmt, subdev_fh->pad, format);
+ }
+
+ case VIDIOC_SUBDEV_G_CROP: {
+ struct v4l2_subdev_crop *crop = arg;
+ struct v4l2_subdev_selection sel;
+
+ rval = check_crop(sd, crop);
+ if (rval)
+ return rval;
+
+ memset(&sel, 0, sizeof(sel));
+ sel.which = crop->which;
+ sel.pad = crop->pad;
+ sel.target = V4L2_SEL_TGT_CROP;
+
+ rval = v4l2_subdev_call(
+ sd, pad, get_selection, subdev_fh->pad, &sel);
+
+ crop->rect = sel.r;
+
+ return rval;
+ }
+
+ case VIDIOC_SUBDEV_S_CROP: {
+ struct v4l2_subdev_crop *crop = arg;
+ struct v4l2_subdev_selection sel;
+
+ rval = check_crop(sd, crop);
+ if (rval)
+ return rval;
+
+ memset(&sel, 0, sizeof(sel));
+ sel.which = crop->which;
+ sel.pad = crop->pad;
+ sel.target = V4L2_SEL_TGT_CROP;
+ sel.r = crop->rect;
+
+ rval = v4l2_subdev_call(
+ sd, pad, set_selection, subdev_fh->pad, &sel);
+
+ crop->rect = sel.r;
+
+ return rval;
+ }
+
+ case VIDIOC_SUBDEV_ENUM_MBUS_CODE: {
+ struct v4l2_subdev_mbus_code_enum *code = arg;
+
+ if (code->which != V4L2_SUBDEV_FORMAT_TRY &&
+ code->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ if (code->pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sd, pad, enum_mbus_code, subdev_fh->pad,
+ code);
+ }
+
+ case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: {
+ struct v4l2_subdev_frame_size_enum *fse = arg;
+
+ if (fse->which != V4L2_SUBDEV_FORMAT_TRY &&
+ fse->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ if (fse->pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sd, pad, enum_frame_size, subdev_fh->pad,
+ fse);
+ }
+
+ case VIDIOC_SUBDEV_G_FRAME_INTERVAL: {
+ struct v4l2_subdev_frame_interval *fi = arg;
+
+ if (fi->pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sd, video, g_frame_interval, arg);
+ }
+
+ case VIDIOC_SUBDEV_S_FRAME_INTERVAL: {
+ struct v4l2_subdev_frame_interval *fi = arg;
+
+ if (fi->pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sd, video, s_frame_interval, arg);
+ }
+
+ case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: {
+ struct v4l2_subdev_frame_interval_enum *fie = arg;
+
+ if (fie->which != V4L2_SUBDEV_FORMAT_TRY &&
+ fie->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ if (fie->pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sd, pad, enum_frame_interval, subdev_fh->pad,
+ fie);
+ }
+
+ case VIDIOC_SUBDEV_G_SELECTION: {
+ struct v4l2_subdev_selection *sel = arg;
+
+ rval = check_selection(sd, sel);
+ if (rval)
+ return rval;
+
+ return v4l2_subdev_call(
+ sd, pad, get_selection, subdev_fh->pad, sel);
+ }
+
+ case VIDIOC_SUBDEV_S_SELECTION: {
+ struct v4l2_subdev_selection *sel = arg;
+
+ rval = check_selection(sd, sel);
+ if (rval)
+ return rval;
+
+ return v4l2_subdev_call(
+ sd, pad, set_selection, subdev_fh->pad, sel);
+ }
+
+ case VIDIOC_G_EDID: {
+ struct v4l2_subdev_edid *edid = arg;
+
+ rval = check_edid(sd, edid);
+ if (rval)
+ return rval;
+
+ return v4l2_subdev_call(sd, pad, get_edid, edid);
+ }
+
+ case VIDIOC_S_EDID: {
+ struct v4l2_subdev_edid *edid = arg;
+
+ rval = check_edid(sd, edid);
+ if (rval)
+ return rval;
+
+ return v4l2_subdev_call(sd, pad, set_edid, edid);
+ }
+
+ case VIDIOC_SUBDEV_DV_TIMINGS_CAP: {
+ struct v4l2_dv_timings_cap *cap = arg;
+
+ if (cap->pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
+ }
+
+ case VIDIOC_SUBDEV_ENUM_DV_TIMINGS: {
+ struct v4l2_enum_dv_timings *dvt = arg;
+
+ if (dvt->pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sd, pad, enum_dv_timings, dvt);
+ }
+
+ case VIDIOC_SUBDEV_QUERY_DV_TIMINGS:
+ return v4l2_subdev_call(sd, video, query_dv_timings, arg);
+
+ case VIDIOC_SUBDEV_G_DV_TIMINGS:
+ return v4l2_subdev_call(sd, video, g_dv_timings, arg);
+
+ case VIDIOC_SUBDEV_S_DV_TIMINGS:
+ return v4l2_subdev_call(sd, video, s_dv_timings, arg);
+#endif
+ default:
+ return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
+ }
+
+ return 0;
+}
+
+static long subdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, subdev_do_ioctl);
+}
+
+#ifdef CONFIG_COMPAT
+static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+
+ return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg);
+}
+#endif
+
+static unsigned int subdev_poll(struct file *file, poll_table *wait)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct v4l2_fh *fh = file->private_data;
+
+ if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
+ return POLLERR;
+
+ poll_wait(file, &fh->wait, wait);
+
+ if (v4l2_event_pending(fh))
+ return POLLPRI;
+
+ return 0;
+}
+
+const struct v4l2_file_operations v4l2_subdev_fops = {
+ .owner = THIS_MODULE,
+ .open = subdev_open,
+ .unlocked_ioctl = subdev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = subdev_compat_ioctl32,
+#endif
+ .release = subdev_close,
+ .poll = subdev_poll,
+};
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ /* The width, height and code must match. */
+ if (source_fmt->format.width != sink_fmt->format.width
+ || source_fmt->format.height != sink_fmt->format.height
+ || source_fmt->format.code != sink_fmt->format.code)
+ return -EINVAL;
+
+ /* The field order must match, or the sink field order must be NONE
+ * to support interlaced hardware connected to bridges that support
+ * progressive formats only.
+ */
+ if (source_fmt->format.field != sink_fmt->format.field &&
+ sink_fmt->format.field != V4L2_FIELD_NONE)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default);
+
+static int
+v4l2_subdev_link_validate_get_format(struct media_pad *pad,
+ struct v4l2_subdev_format *fmt)
+{
+ if (media_entity_type(pad->entity) == MEDIA_ENT_T_V4L2_SUBDEV) {
+ struct v4l2_subdev *sd =
+ media_entity_to_v4l2_subdev(pad->entity);
+
+ fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt->pad = pad->index;
+ return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
+ }
+
+ WARN(pad->entity->type != MEDIA_ENT_T_DEVNODE_V4L,
+ "Driver bug! Wrong media entity type 0x%08x, entity %s\n",
+ pad->entity->type, pad->entity->name);
+
+ return -EINVAL;
+}
+
+int v4l2_subdev_link_validate(struct media_link *link)
+{
+ struct v4l2_subdev *sink;
+ struct v4l2_subdev_format sink_fmt, source_fmt;
+ int rval;
+
+ rval = v4l2_subdev_link_validate_get_format(
+ link->source, &source_fmt);
+ if (rval < 0)
+ return 0;
+
+ rval = v4l2_subdev_link_validate_get_format(
+ link->sink, &sink_fmt);
+ if (rval < 0)
+ return 0;
+
+ sink = media_entity_to_v4l2_subdev(link->sink->entity);
+
+ rval = v4l2_subdev_call(sink, pad, link_validate, link,
+ &source_fmt, &sink_fmt);
+ if (rval != -ENOIOCTLCMD)
+ return rval;
+
+ return v4l2_subdev_link_validate_default(
+ sink, link, &source_fmt, &sink_fmt);
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate);
+#endif /* CONFIG_MEDIA_CONTROLLER */
+
+void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
+{
+ INIT_LIST_HEAD(&sd->list);
+ BUG_ON(!ops);
+ sd->ops = ops;
+ sd->v4l2_dev = NULL;
+ sd->flags = 0;
+ sd->name[0] = '\0';
+ sd->grp_id = 0;
+ sd->dev_priv = NULL;
+ sd->host_priv = NULL;
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ sd->entity.name = sd->name;
+ sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+#endif
+}
+EXPORT_SYMBOL(v4l2_subdev_init);
diff --git a/kernel/drivers/media/v4l2-core/videobuf-core.c b/kernel/drivers/media/v4l2-core/videobuf-core.c
new file mode 100644
index 000000000..926836d18
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/videobuf-core.c
@@ -0,0 +1,1196 @@
+/*
+ * generic helper functions for handling video4linux capture buffers
+ *
+ * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ *
+ * Highly based on video-buf written originally by:
+ * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
+ * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2006 Ted Walther and John Sokol
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <media/videobuf-core.h>
+
+#define MAGIC_BUFFER 0x20070728
+#define MAGIC_CHECK(is, should) \
+ do { \
+ if (unlikely((is) != (should))) { \
+ printk(KERN_ERR \
+ "magic mismatch: %x (expected %x)\n", \
+ is, should); \
+ BUG(); \
+ } \
+ } while (0)
+
+static int debug;
+module_param(debug, int, 0644);
+
+MODULE_DESCRIPTION("helper module to manage video4linux buffers");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_LICENSE("GPL");
+
+#define dprintk(level, fmt, arg...) \
+ do { \
+ if (debug >= level) \
+ printk(KERN_DEBUG "vbuf: " fmt, ## arg); \
+ } while (0)
+
+/* --------------------------------------------------------------------- */
+
+#define CALL(q, f, arg...) \
+ ((q->int_ops->f) ? q->int_ops->f(arg) : 0)
+#define CALLPTR(q, f, arg...) \
+ ((q->int_ops->f) ? q->int_ops->f(arg) : NULL)
+
+struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q)
+{
+ struct videobuf_buffer *vb;
+
+ BUG_ON(q->msize < sizeof(*vb));
+
+ if (!q->int_ops || !q->int_ops->alloc_vb) {
+ printk(KERN_ERR "No specific ops defined!\n");
+ BUG();
+ }
+
+ vb = q->int_ops->alloc_vb(q->msize);
+ if (NULL != vb) {
+ init_waitqueue_head(&vb->done);
+ vb->magic = MAGIC_BUFFER;
+ }
+
+ return vb;
+}
+EXPORT_SYMBOL_GPL(videobuf_alloc_vb);
+
+static int is_state_active_or_queued(struct videobuf_queue *q, struct videobuf_buffer *vb)
+{
+ unsigned long flags;
+ bool rc;
+
+ spin_lock_irqsave(q->irqlock, flags);
+ rc = vb->state != VIDEOBUF_ACTIVE && vb->state != VIDEOBUF_QUEUED;
+ spin_unlock_irqrestore(q->irqlock, flags);
+ return rc;
+};
+
+int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
+ int non_blocking, int intr)
+{
+ bool is_ext_locked;
+ int ret = 0;
+
+ MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
+
+ if (non_blocking) {
+ if (is_state_active_or_queued(q, vb))
+ return 0;
+ return -EAGAIN;
+ }
+
+ is_ext_locked = q->ext_lock && mutex_is_locked(q->ext_lock);
+
+ /* Release vdev lock to prevent this wait from blocking outside access to
+ the device. */
+ if (is_ext_locked)
+ mutex_unlock(q->ext_lock);
+ if (intr)
+ ret = wait_event_interruptible(vb->done, is_state_active_or_queued(q, vb));
+ else
+ wait_event(vb->done, is_state_active_or_queued(q, vb));
+ /* Relock */
+ if (is_ext_locked)
+ mutex_lock(q->ext_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(videobuf_waiton);
+
+int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
+ struct v4l2_framebuffer *fbuf)
+{
+ MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+
+ return CALL(q, iolock, q, vb, fbuf);
+}
+EXPORT_SYMBOL_GPL(videobuf_iolock);
+
+void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
+ struct videobuf_buffer *buf)
+{
+ if (q->int_ops->vaddr)
+ return q->int_ops->vaddr(buf);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr);
+
+/* --------------------------------------------------------------------- */
+
+
+void videobuf_queue_core_init(struct videobuf_queue *q,
+ const struct videobuf_queue_ops *ops,
+ struct device *dev,
+ spinlock_t *irqlock,
+ enum v4l2_buf_type type,
+ enum v4l2_field field,
+ unsigned int msize,
+ void *priv,
+ struct videobuf_qtype_ops *int_ops,
+ struct mutex *ext_lock)
+{
+ BUG_ON(!q);
+ memset(q, 0, sizeof(*q));
+ q->irqlock = irqlock;
+ q->ext_lock = ext_lock;
+ q->dev = dev;
+ q->type = type;
+ q->field = field;
+ q->msize = msize;
+ q->ops = ops;
+ q->priv_data = priv;
+ q->int_ops = int_ops;
+
+ /* All buffer operations are mandatory */
+ BUG_ON(!q->ops->buf_setup);
+ BUG_ON(!q->ops->buf_prepare);
+ BUG_ON(!q->ops->buf_queue);
+ BUG_ON(!q->ops->buf_release);
+
+ /* Lock is mandatory for queue_cancel to work */
+ BUG_ON(!irqlock);
+
+ /* Having implementations for abstract methods are mandatory */
+ BUG_ON(!q->int_ops);
+
+ mutex_init(&q->vb_lock);
+ init_waitqueue_head(&q->wait);
+ INIT_LIST_HEAD(&q->stream);
+}
+EXPORT_SYMBOL_GPL(videobuf_queue_core_init);
+
+/* Locking: Only usage in bttv unsafe find way to remove */
+int videobuf_queue_is_busy(struct videobuf_queue *q)
+{
+ int i;
+
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+
+ if (q->streaming) {
+ dprintk(1, "busy: streaming active\n");
+ return 1;
+ }
+ if (q->reading) {
+ dprintk(1, "busy: pending read #1\n");
+ return 1;
+ }
+ if (q->read_buf) {
+ dprintk(1, "busy: pending read #2\n");
+ return 1;
+ }
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+ if (q->bufs[i]->map) {
+ dprintk(1, "busy: buffer #%d mapped\n", i);
+ return 1;
+ }
+ if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
+ dprintk(1, "busy: buffer #%d queued\n", i);
+ return 1;
+ }
+ if (q->bufs[i]->state == VIDEOBUF_ACTIVE) {
+ dprintk(1, "busy: buffer #%d avtive\n", i);
+ return 1;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
+
+/**
+ * __videobuf_free() - free all the buffers and their control structures
+ *
+ * This function can only be called if streaming/reading is off, i.e. no buffers
+ * are under control of the driver.
+ */
+/* Locking: Caller holds q->vb_lock */
+static int __videobuf_free(struct videobuf_queue *q)
+{
+ int i;
+
+ dprintk(1, "%s\n", __func__);
+ if (!q)
+ return 0;
+
+ if (q->streaming || q->reading) {
+ dprintk(1, "Cannot free buffers when streaming or reading\n");
+ return -EBUSY;
+ }
+
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+
+ for (i = 0; i < VIDEO_MAX_FRAME; i++)
+ if (q->bufs[i] && q->bufs[i]->map) {
+ dprintk(1, "Cannot free mmapped buffers\n");
+ return -EBUSY;
+ }
+
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+ q->ops->buf_release(q, q->bufs[i]);
+ kfree(q->bufs[i]);
+ q->bufs[i] = NULL;
+ }
+
+ return 0;
+}
+
+/* Locking: Caller holds q->vb_lock */
+void videobuf_queue_cancel(struct videobuf_queue *q)
+{
+ unsigned long flags = 0;
+ int i;
+
+ q->streaming = 0;
+ q->reading = 0;
+ wake_up_interruptible_sync(&q->wait);
+
+ /* remove queued buffers from list */
+ spin_lock_irqsave(q->irqlock, flags);
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+ if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
+ list_del(&q->bufs[i]->queue);
+ q->bufs[i]->state = VIDEOBUF_ERROR;
+ wake_up_all(&q->bufs[i]->done);
+ }
+ }
+ spin_unlock_irqrestore(q->irqlock, flags);
+
+ /* free all buffers + clear queue */
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+ q->ops->buf_release(q, q->bufs[i]);
+ }
+ INIT_LIST_HEAD(&q->stream);
+}
+EXPORT_SYMBOL_GPL(videobuf_queue_cancel);
+
+/* --------------------------------------------------------------------- */
+
+/* Locking: Caller holds q->vb_lock */
+enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
+{
+ enum v4l2_field field = q->field;
+
+ BUG_ON(V4L2_FIELD_ANY == field);
+
+ if (V4L2_FIELD_ALTERNATE == field) {
+ if (V4L2_FIELD_TOP == q->last) {
+ field = V4L2_FIELD_BOTTOM;
+ q->last = V4L2_FIELD_BOTTOM;
+ } else {
+ field = V4L2_FIELD_TOP;
+ q->last = V4L2_FIELD_TOP;
+ }
+ }
+ return field;
+}
+EXPORT_SYMBOL_GPL(videobuf_next_field);
+
+/* Locking: Caller holds q->vb_lock */
+static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
+ struct videobuf_buffer *vb, enum v4l2_buf_type type)
+{
+ MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+
+ b->index = vb->i;
+ b->type = type;
+
+ b->memory = vb->memory;
+ switch (b->memory) {
+ case V4L2_MEMORY_MMAP:
+ b->m.offset = vb->boff;
+ b->length = vb->bsize;
+ break;
+ case V4L2_MEMORY_USERPTR:
+ b->m.userptr = vb->baddr;
+ b->length = vb->bsize;
+ break;
+ case V4L2_MEMORY_OVERLAY:
+ b->m.offset = vb->boff;
+ break;
+ case V4L2_MEMORY_DMABUF:
+ /* DMABUF is not handled in videobuf framework */
+ break;
+ }
+
+ b->flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ if (vb->map)
+ b->flags |= V4L2_BUF_FLAG_MAPPED;
+
+ switch (vb->state) {
+ case VIDEOBUF_PREPARED:
+ case VIDEOBUF_QUEUED:
+ case VIDEOBUF_ACTIVE:
+ b->flags |= V4L2_BUF_FLAG_QUEUED;
+ break;
+ case VIDEOBUF_ERROR:
+ b->flags |= V4L2_BUF_FLAG_ERROR;
+ /* fall through */
+ case VIDEOBUF_DONE:
+ b->flags |= V4L2_BUF_FLAG_DONE;
+ break;
+ case VIDEOBUF_NEEDS_INIT:
+ case VIDEOBUF_IDLE:
+ /* nothing */
+ break;
+ }
+
+ b->field = vb->field;
+ b->timestamp = vb->ts;
+ b->bytesused = vb->size;
+ b->sequence = vb->field_count >> 1;
+}
+
+int videobuf_mmap_free(struct videobuf_queue *q)
+{
+ int ret;
+ videobuf_queue_lock(q);
+ ret = __videobuf_free(q);
+ videobuf_queue_unlock(q);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(videobuf_mmap_free);
+
+/* Locking: Caller holds q->vb_lock */
+int __videobuf_mmap_setup(struct videobuf_queue *q,
+ unsigned int bcount, unsigned int bsize,
+ enum v4l2_memory memory)
+{
+ unsigned int i;
+ int err;
+
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+
+ err = __videobuf_free(q);
+ if (0 != err)
+ return err;
+
+ /* Allocate and initialize buffers */
+ for (i = 0; i < bcount; i++) {
+ q->bufs[i] = videobuf_alloc_vb(q);
+
+ if (NULL == q->bufs[i])
+ break;
+
+ q->bufs[i]->i = i;
+ q->bufs[i]->memory = memory;
+ q->bufs[i]->bsize = bsize;
+ switch (memory) {
+ case V4L2_MEMORY_MMAP:
+ q->bufs[i]->boff = PAGE_ALIGN(bsize) * i;
+ break;
+ case V4L2_MEMORY_USERPTR:
+ case V4L2_MEMORY_OVERLAY:
+ case V4L2_MEMORY_DMABUF:
+ /* nothing */
+ break;
+ }
+ }
+
+ if (!i)
+ return -ENOMEM;
+
+ dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i, bsize);
+
+ return i;
+}
+EXPORT_SYMBOL_GPL(__videobuf_mmap_setup);
+
+int videobuf_mmap_setup(struct videobuf_queue *q,
+ unsigned int bcount, unsigned int bsize,
+ enum v4l2_memory memory)
+{
+ int ret;
+ videobuf_queue_lock(q);
+ ret = __videobuf_mmap_setup(q, bcount, bsize, memory);
+ videobuf_queue_unlock(q);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
+
+int videobuf_reqbufs(struct videobuf_queue *q,
+ struct v4l2_requestbuffers *req)
+{
+ unsigned int size, count;
+ int retval;
+
+ if (req->memory != V4L2_MEMORY_MMAP &&
+ req->memory != V4L2_MEMORY_USERPTR &&
+ req->memory != V4L2_MEMORY_OVERLAY) {
+ dprintk(1, "reqbufs: memory type invalid\n");
+ return -EINVAL;
+ }
+
+ videobuf_queue_lock(q);
+ if (req->type != q->type) {
+ dprintk(1, "reqbufs: queue type invalid\n");
+ retval = -EINVAL;
+ goto done;
+ }
+
+ if (q->streaming) {
+ dprintk(1, "reqbufs: streaming already exists\n");
+ retval = -EBUSY;
+ goto done;
+ }
+ if (!list_empty(&q->stream)) {
+ dprintk(1, "reqbufs: stream running\n");
+ retval = -EBUSY;
+ goto done;
+ }
+
+ if (req->count == 0) {
+ dprintk(1, "reqbufs: count invalid (%d)\n", req->count);
+ retval = __videobuf_free(q);
+ goto done;
+ }
+
+ count = req->count;
+ if (count > VIDEO_MAX_FRAME)
+ count = VIDEO_MAX_FRAME;
+ size = 0;
+ q->ops->buf_setup(q, &count, &size);
+ dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
+ count, size,
+ (unsigned int)((count * PAGE_ALIGN(size)) >> PAGE_SHIFT));
+
+ retval = __videobuf_mmap_setup(q, count, size, req->memory);
+ if (retval < 0) {
+ dprintk(1, "reqbufs: mmap setup returned %d\n", retval);
+ goto done;
+ }
+
+ req->count = retval;
+ retval = 0;
+
+ done:
+ videobuf_queue_unlock(q);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(videobuf_reqbufs);
+
+int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
+{
+ int ret = -EINVAL;
+
+ videobuf_queue_lock(q);
+ if (unlikely(b->type != q->type)) {
+ dprintk(1, "querybuf: Wrong type.\n");
+ goto done;
+ }
+ if (unlikely(b->index >= VIDEO_MAX_FRAME)) {
+ dprintk(1, "querybuf: index out of range.\n");
+ goto done;
+ }
+ if (unlikely(NULL == q->bufs[b->index])) {
+ dprintk(1, "querybuf: buffer is null.\n");
+ goto done;
+ }
+
+ videobuf_status(q, b, q->bufs[b->index], q->type);
+
+ ret = 0;
+done:
+ videobuf_queue_unlock(q);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(videobuf_querybuf);
+
+int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
+{
+ struct videobuf_buffer *buf;
+ enum v4l2_field field;
+ unsigned long flags = 0;
+ int retval;
+
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+
+ if (b->memory == V4L2_MEMORY_MMAP)
+ down_read(&current->mm->mmap_sem);
+
+ videobuf_queue_lock(q);
+ retval = -EBUSY;
+ if (q->reading) {
+ dprintk(1, "qbuf: Reading running...\n");
+ goto done;
+ }
+ retval = -EINVAL;
+ if (b->type != q->type) {
+ dprintk(1, "qbuf: Wrong type.\n");
+ goto done;
+ }
+ if (b->index >= VIDEO_MAX_FRAME) {
+ dprintk(1, "qbuf: index out of range.\n");
+ goto done;
+ }
+ buf = q->bufs[b->index];
+ if (NULL == buf) {
+ dprintk(1, "qbuf: buffer is null.\n");
+ goto done;
+ }
+ MAGIC_CHECK(buf->magic, MAGIC_BUFFER);
+ if (buf->memory != b->memory) {
+ dprintk(1, "qbuf: memory type is wrong.\n");
+ goto done;
+ }
+ if (buf->state != VIDEOBUF_NEEDS_INIT && buf->state != VIDEOBUF_IDLE) {
+ dprintk(1, "qbuf: buffer is already queued or active.\n");
+ goto done;
+ }
+
+ switch (b->memory) {
+ case V4L2_MEMORY_MMAP:
+ if (0 == buf->baddr) {
+ dprintk(1, "qbuf: mmap requested "
+ "but buffer addr is zero!\n");
+ goto done;
+ }
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
+ || q->type == V4L2_BUF_TYPE_VBI_OUTPUT
+ || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) {
+ buf->size = b->bytesused;
+ buf->field = b->field;
+ buf->ts = b->timestamp;
+ }
+ break;
+ case V4L2_MEMORY_USERPTR:
+ if (b->length < buf->bsize) {
+ dprintk(1, "qbuf: buffer length is not enough\n");
+ goto done;
+ }
+ if (VIDEOBUF_NEEDS_INIT != buf->state &&
+ buf->baddr != b->m.userptr)
+ q->ops->buf_release(q, buf);
+ buf->baddr = b->m.userptr;
+ break;
+ case V4L2_MEMORY_OVERLAY:
+ buf->boff = b->m.offset;
+ break;
+ default:
+ dprintk(1, "qbuf: wrong memory type\n");
+ goto done;
+ }
+
+ dprintk(1, "qbuf: requesting next field\n");
+ field = videobuf_next_field(q);
+ retval = q->ops->buf_prepare(q, buf, field);
+ if (0 != retval) {
+ dprintk(1, "qbuf: buffer_prepare returned %d\n", retval);
+ goto done;
+ }
+
+ list_add_tail(&buf->stream, &q->stream);
+ if (q->streaming) {
+ spin_lock_irqsave(q->irqlock, flags);
+ q->ops->buf_queue(q, buf);
+ spin_unlock_irqrestore(q->irqlock, flags);
+ }
+ dprintk(1, "qbuf: succeeded\n");
+ retval = 0;
+ wake_up_interruptible_sync(&q->wait);
+
+done:
+ videobuf_queue_unlock(q);
+
+ if (b->memory == V4L2_MEMORY_MMAP)
+ up_read(&current->mm->mmap_sem);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(videobuf_qbuf);
+
+/* Locking: Caller holds q->vb_lock */
+static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock)
+{
+ int retval;
+
+checks:
+ if (!q->streaming) {
+ dprintk(1, "next_buffer: Not streaming\n");
+ retval = -EINVAL;
+ goto done;
+ }
+
+ if (list_empty(&q->stream)) {
+ if (noblock) {
+ retval = -EAGAIN;
+ dprintk(2, "next_buffer: no buffers to dequeue\n");
+ goto done;
+ } else {
+ dprintk(2, "next_buffer: waiting on buffer\n");
+
+ /* Drop lock to avoid deadlock with qbuf */
+ videobuf_queue_unlock(q);
+
+ /* Checking list_empty and streaming is safe without
+ * locks because we goto checks to validate while
+ * holding locks before proceeding */
+ retval = wait_event_interruptible(q->wait,
+ !list_empty(&q->stream) || !q->streaming);
+ videobuf_queue_lock(q);
+
+ if (retval)
+ goto done;
+
+ goto checks;
+ }
+ }
+
+ retval = 0;
+
+done:
+ return retval;
+}
+
+/* Locking: Caller holds q->vb_lock */
+static int stream_next_buffer(struct videobuf_queue *q,
+ struct videobuf_buffer **vb, int nonblocking)
+{
+ int retval;
+ struct videobuf_buffer *buf = NULL;
+
+ retval = stream_next_buffer_check_queue(q, nonblocking);
+ if (retval)
+ goto done;
+
+ buf = list_entry(q->stream.next, struct videobuf_buffer, stream);
+ retval = videobuf_waiton(q, buf, nonblocking, 1);
+ if (retval < 0)
+ goto done;
+
+ *vb = buf;
+done:
+ return retval;
+}
+
+int videobuf_dqbuf(struct videobuf_queue *q,
+ struct v4l2_buffer *b, int nonblocking)
+{
+ struct videobuf_buffer *buf = NULL;
+ int retval;
+
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+
+ memset(b, 0, sizeof(*b));
+ videobuf_queue_lock(q);
+
+ retval = stream_next_buffer(q, &buf, nonblocking);
+ if (retval < 0) {
+ dprintk(1, "dqbuf: next_buffer error: %i\n", retval);
+ goto done;
+ }
+
+ switch (buf->state) {
+ case VIDEOBUF_ERROR:
+ dprintk(1, "dqbuf: state is error\n");
+ break;
+ case VIDEOBUF_DONE:
+ dprintk(1, "dqbuf: state is done\n");
+ break;
+ default:
+ dprintk(1, "dqbuf: state invalid\n");
+ retval = -EINVAL;
+ goto done;
+ }
+ CALL(q, sync, q, buf);
+ videobuf_status(q, b, buf, q->type);
+ list_del(&buf->stream);
+ buf->state = VIDEOBUF_IDLE;
+ b->flags &= ~V4L2_BUF_FLAG_DONE;
+done:
+ videobuf_queue_unlock(q);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(videobuf_dqbuf);
+
+int videobuf_streamon(struct videobuf_queue *q)
+{
+ struct videobuf_buffer *buf;
+ unsigned long flags = 0;
+ int retval;
+
+ videobuf_queue_lock(q);
+ retval = -EBUSY;
+ if (q->reading)
+ goto done;
+ retval = 0;
+ if (q->streaming)
+ goto done;
+ q->streaming = 1;
+ spin_lock_irqsave(q->irqlock, flags);
+ list_for_each_entry(buf, &q->stream, stream)
+ if (buf->state == VIDEOBUF_PREPARED)
+ q->ops->buf_queue(q, buf);
+ spin_unlock_irqrestore(q->irqlock, flags);
+
+ wake_up_interruptible_sync(&q->wait);
+done:
+ videobuf_queue_unlock(q);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(videobuf_streamon);
+
+/* Locking: Caller holds q->vb_lock */
+static int __videobuf_streamoff(struct videobuf_queue *q)
+{
+ if (!q->streaming)
+ return -EINVAL;
+
+ videobuf_queue_cancel(q);
+
+ return 0;
+}
+
+int videobuf_streamoff(struct videobuf_queue *q)
+{
+ int retval;
+
+ videobuf_queue_lock(q);
+ retval = __videobuf_streamoff(q);
+ videobuf_queue_unlock(q);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(videobuf_streamoff);
+
+/* Locking: Caller holds q->vb_lock */
+static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
+ char __user *data,
+ size_t count, loff_t *ppos)
+{
+ enum v4l2_field field;
+ unsigned long flags = 0;
+ int retval;
+
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+
+ /* setup stuff */
+ q->read_buf = videobuf_alloc_vb(q);
+ if (NULL == q->read_buf)
+ return -ENOMEM;
+
+ q->read_buf->memory = V4L2_MEMORY_USERPTR;
+ q->read_buf->baddr = (unsigned long)data;
+ q->read_buf->bsize = count;
+
+ field = videobuf_next_field(q);
+ retval = q->ops->buf_prepare(q, q->read_buf, field);
+ if (0 != retval)
+ goto done;
+
+ /* start capture & wait */
+ spin_lock_irqsave(q->irqlock, flags);
+ q->ops->buf_queue(q, q->read_buf);
+ spin_unlock_irqrestore(q->irqlock, flags);
+ retval = videobuf_waiton(q, q->read_buf, 0, 0);
+ if (0 == retval) {
+ CALL(q, sync, q, q->read_buf);
+ if (VIDEOBUF_ERROR == q->read_buf->state)
+ retval = -EIO;
+ else
+ retval = q->read_buf->size;
+ }
+
+done:
+ /* cleanup */
+ q->ops->buf_release(q, q->read_buf);
+ kfree(q->read_buf);
+ q->read_buf = NULL;
+ return retval;
+}
+
+static int __videobuf_copy_to_user(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
+ char __user *data, size_t count,
+ int nonblocking)
+{
+ void *vaddr = CALLPTR(q, vaddr, buf);
+
+ /* copy to userspace */
+ if (count > buf->size - q->read_off)
+ count = buf->size - q->read_off;
+
+ if (copy_to_user(data, vaddr + q->read_off, count))
+ return -EFAULT;
+
+ return count;
+}
+
+static int __videobuf_copy_stream(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
+ char __user *data, size_t count, size_t pos,
+ int vbihack, int nonblocking)
+{
+ unsigned int *fc = CALLPTR(q, vaddr, buf);
+
+ if (vbihack) {
+ /* dirty, undocumented hack -- pass the frame counter
+ * within the last four bytes of each vbi data block.
+ * We need that one to maintain backward compatibility
+ * to all vbi decoding software out there ... */
+ fc += (buf->size >> 2) - 1;
+ *fc = buf->field_count >> 1;
+ dprintk(1, "vbihack: %d\n", *fc);
+ }
+
+ /* copy stuff using the common method */
+ count = __videobuf_copy_to_user(q, buf, data, count, nonblocking);
+
+ if ((count == -EFAULT) && (pos == 0))
+ return -EFAULT;
+
+ return count;
+}
+
+ssize_t videobuf_read_one(struct videobuf_queue *q,
+ char __user *data, size_t count, loff_t *ppos,
+ int nonblocking)
+{
+ enum v4l2_field field;
+ unsigned long flags = 0;
+ unsigned size = 0, nbufs = 1;
+ int retval;
+
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+
+ videobuf_queue_lock(q);
+
+ q->ops->buf_setup(q, &nbufs, &size);
+
+ if (NULL == q->read_buf &&
+ count >= size &&
+ !nonblocking) {
+ retval = videobuf_read_zerocopy(q, data, count, ppos);
+ if (retval >= 0 || retval == -EIO)
+ /* ok, all done */
+ goto done;
+ /* fallback to kernel bounce buffer on failures */
+ }
+
+ if (NULL == q->read_buf) {
+ /* need to capture a new frame */
+ retval = -ENOMEM;
+ q->read_buf = videobuf_alloc_vb(q);
+
+ dprintk(1, "video alloc=0x%p\n", q->read_buf);
+ if (NULL == q->read_buf)
+ goto done;
+ q->read_buf->memory = V4L2_MEMORY_USERPTR;
+ q->read_buf->bsize = count; /* preferred size */
+ field = videobuf_next_field(q);
+ retval = q->ops->buf_prepare(q, q->read_buf, field);
+
+ if (0 != retval) {
+ kfree(q->read_buf);
+ q->read_buf = NULL;
+ goto done;
+ }
+
+ spin_lock_irqsave(q->irqlock, flags);
+ q->ops->buf_queue(q, q->read_buf);
+ spin_unlock_irqrestore(q->irqlock, flags);
+
+ q->read_off = 0;
+ }
+
+ /* wait until capture is done */
+ retval = videobuf_waiton(q, q->read_buf, nonblocking, 1);
+ if (0 != retval)
+ goto done;
+
+ CALL(q, sync, q, q->read_buf);
+
+ if (VIDEOBUF_ERROR == q->read_buf->state) {
+ /* catch I/O errors */
+ q->ops->buf_release(q, q->read_buf);
+ kfree(q->read_buf);
+ q->read_buf = NULL;
+ retval = -EIO;
+ goto done;
+ }
+
+ /* Copy to userspace */
+ retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking);
+ if (retval < 0)
+ goto done;
+
+ q->read_off += retval;
+ if (q->read_off == q->read_buf->size) {
+ /* all data copied, cleanup */
+ q->ops->buf_release(q, q->read_buf);
+ kfree(q->read_buf);
+ q->read_buf = NULL;
+ }
+
+done:
+ videobuf_queue_unlock(q);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(videobuf_read_one);
+
+/* Locking: Caller holds q->vb_lock */
+static int __videobuf_read_start(struct videobuf_queue *q)
+{
+ enum v4l2_field field;
+ unsigned long flags = 0;
+ unsigned int count = 0, size = 0;
+ int err, i;
+
+ q->ops->buf_setup(q, &count, &size);
+ if (count < 2)
+ count = 2;
+ if (count > VIDEO_MAX_FRAME)
+ count = VIDEO_MAX_FRAME;
+ size = PAGE_ALIGN(size);
+
+ err = __videobuf_mmap_setup(q, count, size, V4L2_MEMORY_USERPTR);
+ if (err < 0)
+ return err;
+
+ count = err;
+
+ for (i = 0; i < count; i++) {
+ field = videobuf_next_field(q);
+ err = q->ops->buf_prepare(q, q->bufs[i], field);
+ if (err)
+ return err;
+ list_add_tail(&q->bufs[i]->stream, &q->stream);
+ }
+ spin_lock_irqsave(q->irqlock, flags);
+ for (i = 0; i < count; i++)
+ q->ops->buf_queue(q, q->bufs[i]);
+ spin_unlock_irqrestore(q->irqlock, flags);
+ q->reading = 1;
+ return 0;
+}
+
+static void __videobuf_read_stop(struct videobuf_queue *q)
+{
+ int i;
+
+ videobuf_queue_cancel(q);
+ __videobuf_free(q);
+ INIT_LIST_HEAD(&q->stream);
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+ kfree(q->bufs[i]);
+ q->bufs[i] = NULL;
+ }
+ q->read_buf = NULL;
+}
+
+int videobuf_read_start(struct videobuf_queue *q)
+{
+ int rc;
+
+ videobuf_queue_lock(q);
+ rc = __videobuf_read_start(q);
+ videobuf_queue_unlock(q);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(videobuf_read_start);
+
+void videobuf_read_stop(struct videobuf_queue *q)
+{
+ videobuf_queue_lock(q);
+ __videobuf_read_stop(q);
+ videobuf_queue_unlock(q);
+}
+EXPORT_SYMBOL_GPL(videobuf_read_stop);
+
+void videobuf_stop(struct videobuf_queue *q)
+{
+ videobuf_queue_lock(q);
+
+ if (q->streaming)
+ __videobuf_streamoff(q);
+
+ if (q->reading)
+ __videobuf_read_stop(q);
+
+ videobuf_queue_unlock(q);
+}
+EXPORT_SYMBOL_GPL(videobuf_stop);
+
+ssize_t videobuf_read_stream(struct videobuf_queue *q,
+ char __user *data, size_t count, loff_t *ppos,
+ int vbihack, int nonblocking)
+{
+ int rc, retval;
+ unsigned long flags = 0;
+
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+
+ dprintk(2, "%s\n", __func__);
+ videobuf_queue_lock(q);
+ retval = -EBUSY;
+ if (q->streaming)
+ goto done;
+ if (!q->reading) {
+ retval = __videobuf_read_start(q);
+ if (retval < 0)
+ goto done;
+ }
+
+ retval = 0;
+ while (count > 0) {
+ /* get / wait for data */
+ if (NULL == q->read_buf) {
+ q->read_buf = list_entry(q->stream.next,
+ struct videobuf_buffer,
+ stream);
+ list_del(&q->read_buf->stream);
+ q->read_off = 0;
+ }
+ rc = videobuf_waiton(q, q->read_buf, nonblocking, 1);
+ if (rc < 0) {
+ if (0 == retval)
+ retval = rc;
+ break;
+ }
+
+ if (q->read_buf->state == VIDEOBUF_DONE) {
+ rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count,
+ retval, vbihack, nonblocking);
+ if (rc < 0) {
+ retval = rc;
+ break;
+ }
+ retval += rc;
+ count -= rc;
+ q->read_off += rc;
+ } else {
+ /* some error */
+ q->read_off = q->read_buf->size;
+ if (0 == retval)
+ retval = -EIO;
+ }
+
+ /* requeue buffer when done with copying */
+ if (q->read_off == q->read_buf->size) {
+ list_add_tail(&q->read_buf->stream,
+ &q->stream);
+ spin_lock_irqsave(q->irqlock, flags);
+ q->ops->buf_queue(q, q->read_buf);
+ spin_unlock_irqrestore(q->irqlock, flags);
+ q->read_buf = NULL;
+ }
+ if (retval < 0)
+ break;
+ }
+
+done:
+ videobuf_queue_unlock(q);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(videobuf_read_stream);
+
+unsigned int videobuf_poll_stream(struct file *file,
+ struct videobuf_queue *q,
+ poll_table *wait)
+{
+ unsigned long req_events = poll_requested_events(wait);
+ struct videobuf_buffer *buf = NULL;
+ unsigned int rc = 0;
+
+ videobuf_queue_lock(q);
+ if (q->streaming) {
+ if (!list_empty(&q->stream))
+ buf = list_entry(q->stream.next,
+ struct videobuf_buffer, stream);
+ } else if (req_events & (POLLIN | POLLRDNORM)) {
+ if (!q->reading)
+ __videobuf_read_start(q);
+ if (!q->reading) {
+ rc = POLLERR;
+ } else if (NULL == q->read_buf) {
+ q->read_buf = list_entry(q->stream.next,
+ struct videobuf_buffer,
+ stream);
+ list_del(&q->read_buf->stream);
+ q->read_off = 0;
+ }
+ buf = q->read_buf;
+ }
+ if (!buf)
+ rc = POLLERR;
+
+ if (0 == rc) {
+ poll_wait(file, &buf->done, wait);
+ if (buf->state == VIDEOBUF_DONE ||
+ buf->state == VIDEOBUF_ERROR) {
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ rc = POLLOUT | POLLWRNORM;
+ break;
+ default:
+ rc = POLLIN | POLLRDNORM;
+ break;
+ }
+ }
+ }
+ videobuf_queue_unlock(q);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(videobuf_poll_stream);
+
+int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
+{
+ int rc = -EINVAL;
+ int i;
+
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+
+ if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) {
+ dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
+ return -EINVAL;
+ }
+
+ videobuf_queue_lock(q);
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ struct videobuf_buffer *buf = q->bufs[i];
+
+ if (buf && buf->memory == V4L2_MEMORY_MMAP &&
+ buf->boff == (vma->vm_pgoff << PAGE_SHIFT)) {
+ rc = CALL(q, mmap_mapper, q, buf, vma);
+ break;
+ }
+ }
+ videobuf_queue_unlock(q);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(videobuf_mmap_mapper);
diff --git a/kernel/drivers/media/v4l2-core/videobuf-dma-contig.c b/kernel/drivers/media/v4l2-core/videobuf-dma-contig.c
new file mode 100644
index 000000000..e02353e34
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/videobuf-dma-contig.c
@@ -0,0 +1,412 @@
+/*
+ * helper functions for physically contiguous capture buffers
+ *
+ * The functions support hardware lacking scatter gather support
+ * (i.e. the buffers must be linear in physical memory)
+ *
+ * Copyright (c) 2008 Magnus Damm
+ *
+ * Based on videobuf-vmalloc.c,
+ * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <media/videobuf-dma-contig.h>
+
+struct videobuf_dma_contig_memory {
+ u32 magic;
+ void *vaddr;
+ dma_addr_t dma_handle;
+ unsigned long size;
+};
+
+#define MAGIC_DC_MEM 0x0733ac61
+#define MAGIC_CHECK(is, should) \
+ if (unlikely((is) != (should))) { \
+ pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
+ BUG(); \
+ }
+
+static int __videobuf_dc_alloc(struct device *dev,
+ struct videobuf_dma_contig_memory *mem,
+ unsigned long size, gfp_t flags)
+{
+ mem->size = size;
+ mem->vaddr = dma_alloc_coherent(dev, mem->size,
+ &mem->dma_handle, flags);
+
+ if (!mem->vaddr) {
+ dev_err(dev, "memory alloc size %ld failed\n", mem->size);
+ return -ENOMEM;
+ }
+
+ dev_dbg(dev, "dma mapped data is at %p (%ld)\n", mem->vaddr, mem->size);
+
+ return 0;
+}
+
+static void __videobuf_dc_free(struct device *dev,
+ struct videobuf_dma_contig_memory *mem)
+{
+ dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle);
+
+ mem->vaddr = NULL;
+}
+
+static void videobuf_vm_open(struct vm_area_struct *vma)
+{
+ struct videobuf_mapping *map = vma->vm_private_data;
+
+ dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
+ map, map->count, vma->vm_start, vma->vm_end);
+
+ map->count++;
+}
+
+static void videobuf_vm_close(struct vm_area_struct *vma)
+{
+ struct videobuf_mapping *map = vma->vm_private_data;
+ struct videobuf_queue *q = map->q;
+ int i;
+
+ dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
+ map, map->count, vma->vm_start, vma->vm_end);
+
+ map->count--;
+ if (0 == map->count) {
+ struct videobuf_dma_contig_memory *mem;
+
+ dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
+ videobuf_queue_lock(q);
+
+ /* We need first to cancel streams, before unmapping */
+ if (q->streaming)
+ videobuf_queue_cancel(q);
+
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+
+ if (q->bufs[i]->map != map)
+ continue;
+
+ mem = q->bufs[i]->priv;
+ if (mem) {
+ /* This callback is called only if kernel has
+ allocated memory and this memory is mmapped.
+ In this case, memory should be freed,
+ in order to do memory unmap.
+ */
+
+ MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
+
+ /* vfree is not atomic - can't be
+ called with IRQ's disabled
+ */
+ dev_dbg(q->dev, "buf[%d] freeing %p\n",
+ i, mem->vaddr);
+
+ __videobuf_dc_free(q->dev, mem);
+ mem->vaddr = NULL;
+ }
+
+ q->bufs[i]->map = NULL;
+ q->bufs[i]->baddr = 0;
+ }
+
+ kfree(map);
+
+ videobuf_queue_unlock(q);
+ }
+}
+
+static const struct vm_operations_struct videobuf_vm_ops = {
+ .open = videobuf_vm_open,
+ .close = videobuf_vm_close,
+};
+
+/**
+ * videobuf_dma_contig_user_put() - reset pointer to user space buffer
+ * @mem: per-buffer private videobuf-dma-contig data
+ *
+ * This function resets the user space pointer
+ */
+static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
+{
+ mem->dma_handle = 0;
+ mem->size = 0;
+}
+
+/**
+ * videobuf_dma_contig_user_get() - setup user space memory pointer
+ * @mem: per-buffer private videobuf-dma-contig data
+ * @vb: video buffer to map
+ *
+ * This function validates and sets up a pointer to user space memory.
+ * Only physically contiguous pfn-mapped memory is accepted.
+ *
+ * Returns 0 if successful.
+ */
+static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
+ struct videobuf_buffer *vb)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long prev_pfn, this_pfn;
+ unsigned long pages_done, user_address;
+ unsigned int offset;
+ int ret;
+
+ offset = vb->baddr & ~PAGE_MASK;
+ mem->size = PAGE_ALIGN(vb->size + offset);
+ ret = -EINVAL;
+
+ down_read(&mm->mmap_sem);
+
+ vma = find_vma(mm, vb->baddr);
+ if (!vma)
+ goto out_up;
+
+ if ((vb->baddr + mem->size) > vma->vm_end)
+ goto out_up;
+
+ pages_done = 0;
+ prev_pfn = 0; /* kill warning */
+ user_address = vb->baddr;
+
+ while (pages_done < (mem->size >> PAGE_SHIFT)) {
+ ret = follow_pfn(vma, user_address, &this_pfn);
+ if (ret)
+ break;
+
+ if (pages_done == 0)
+ mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
+ else if (this_pfn != (prev_pfn + 1))
+ ret = -EFAULT;
+
+ if (ret)
+ break;
+
+ prev_pfn = this_pfn;
+ user_address += PAGE_SIZE;
+ pages_done++;
+ }
+
+out_up:
+ up_read(&current->mm->mmap_sem);
+
+ return ret;
+}
+
+static struct videobuf_buffer *__videobuf_alloc(size_t size)
+{
+ struct videobuf_dma_contig_memory *mem;
+ struct videobuf_buffer *vb;
+
+ vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
+ if (vb) {
+ vb->priv = ((char *)vb) + size;
+ mem = vb->priv;
+ mem->magic = MAGIC_DC_MEM;
+ }
+
+ return vb;
+}
+
+static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
+{
+ struct videobuf_dma_contig_memory *mem = buf->priv;
+
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
+
+ return mem->vaddr;
+}
+
+static int __videobuf_iolock(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ struct v4l2_framebuffer *fbuf)
+{
+ struct videobuf_dma_contig_memory *mem = vb->priv;
+
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
+
+ switch (vb->memory) {
+ case V4L2_MEMORY_MMAP:
+ dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
+
+ /* All handling should be done by __videobuf_mmap_mapper() */
+ if (!mem->vaddr) {
+ dev_err(q->dev, "memory is not alloced/mmapped.\n");
+ return -EINVAL;
+ }
+ break;
+ case V4L2_MEMORY_USERPTR:
+ dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
+
+ /* handle pointer from user space */
+ if (vb->baddr)
+ return videobuf_dma_contig_user_get(mem, vb);
+
+ /* allocate memory for the read() method */
+ if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size),
+ GFP_KERNEL))
+ return -ENOMEM;
+ break;
+ case V4L2_MEMORY_OVERLAY:
+ default:
+ dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __videobuf_mmap_mapper(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
+ struct vm_area_struct *vma)
+{
+ struct videobuf_dma_contig_memory *mem;
+ struct videobuf_mapping *map;
+ int retval;
+ unsigned long size;
+
+ dev_dbg(q->dev, "%s\n", __func__);
+
+ /* create mapping + update buffer list */
+ map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ buf->map = map;
+ map->q = q;
+
+ buf->baddr = vma->vm_start;
+
+ mem = buf->priv;
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
+
+ if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize),
+ GFP_KERNEL | __GFP_COMP))
+ goto error;
+
+ /* Try to remap memory */
+ size = vma->vm_end - vma->vm_start;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ /* the "vm_pgoff" is just used in v4l2 to find the
+ * corresponding buffer data structure which is allocated
+ * earlier and it does not mean the offset from the physical
+ * buffer start address as usual. So set it to 0 to pass
+ * the sanity check in vm_iomap_memory().
+ */
+ vma->vm_pgoff = 0;
+
+ retval = vm_iomap_memory(vma, mem->dma_handle, size);
+ if (retval) {
+ dev_err(q->dev, "mmap: remap failed with error %d. ",
+ retval);
+ dma_free_coherent(q->dev, mem->size,
+ mem->vaddr, mem->dma_handle);
+ goto error;
+ }
+
+ vma->vm_ops = &videobuf_vm_ops;
+ vma->vm_flags |= VM_DONTEXPAND;
+ vma->vm_private_data = map;
+
+ dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
+ map, q, vma->vm_start, vma->vm_end,
+ (long int)buf->bsize, vma->vm_pgoff, buf->i);
+
+ videobuf_vm_open(vma);
+
+ return 0;
+
+error:
+ kfree(map);
+ return -ENOMEM;
+}
+
+static struct videobuf_qtype_ops qops = {
+ .magic = MAGIC_QTYPE_OPS,
+ .alloc_vb = __videobuf_alloc,
+ .iolock = __videobuf_iolock,
+ .mmap_mapper = __videobuf_mmap_mapper,
+ .vaddr = __videobuf_to_vaddr,
+};
+
+void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
+ const struct videobuf_queue_ops *ops,
+ struct device *dev,
+ spinlock_t *irqlock,
+ enum v4l2_buf_type type,
+ enum v4l2_field field,
+ unsigned int msize,
+ void *priv,
+ struct mutex *ext_lock)
+{
+ videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
+ priv, &qops, ext_lock);
+}
+EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
+
+dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
+{
+ struct videobuf_dma_contig_memory *mem = buf->priv;
+
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
+
+ return mem->dma_handle;
+}
+EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
+
+void videobuf_dma_contig_free(struct videobuf_queue *q,
+ struct videobuf_buffer *buf)
+{
+ struct videobuf_dma_contig_memory *mem = buf->priv;
+
+ /* mmapped memory can't be freed here, otherwise mmapped region
+ would be released, while still needed. In this case, the memory
+ release should happen inside videobuf_vm_close().
+ So, it should free memory only if the memory were allocated for
+ read() operation.
+ */
+ if (buf->memory != V4L2_MEMORY_USERPTR)
+ return;
+
+ if (!mem)
+ return;
+
+ MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
+
+ /* handle user space pointer case */
+ if (buf->baddr) {
+ videobuf_dma_contig_user_put(mem);
+ return;
+ }
+
+ /* read() method */
+ if (mem->vaddr) {
+ __videobuf_dc_free(q->dev, mem);
+ mem->vaddr = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
+
+MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
+MODULE_AUTHOR("Magnus Damm");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/media/v4l2-core/videobuf-dma-sg.c b/kernel/drivers/media/v4l2-core/videobuf-dma-sg.c
new file mode 100644
index 000000000..f669cedca
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -0,0 +1,684 @@
+/*
+ * helper functions for SG DMA video4linux capture buffers
+ *
+ * The functions expect the hardware being able to scatter gather
+ * (i.e. the buffers are not linear in physical memory, but fragmented
+ * into PAGE_SIZE chunks). They also assume the driver does not need
+ * to touch the video data.
+ *
+ * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ *
+ * Highly based on video-buf written originally by:
+ * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
+ * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2006 Ted Walther and John Sokol
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/scatterlist.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+#include <media/videobuf-dma-sg.h>
+
+#define MAGIC_DMABUF 0x19721112
+#define MAGIC_SG_MEM 0x17890714
+
+#define MAGIC_CHECK(is, should) \
+ if (unlikely((is) != (should))) { \
+ printk(KERN_ERR "magic mismatch: %x (expected %x)\n", \
+ is, should); \
+ BUG(); \
+ }
+
+static int debug;
+module_param(debug, int, 0644);
+
+MODULE_DESCRIPTION("helper module to manage video4linux dma sg buffers");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_LICENSE("GPL");
+
+#define dprintk(level, fmt, arg...) \
+ if (debug >= level) \
+ printk(KERN_DEBUG "vbuf-sg: " fmt , ## arg)
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Return a scatterlist for some page-aligned vmalloc()'ed memory
+ * block (NULL on errors). Memory for the scatterlist is allocated
+ * using kmalloc. The caller must free the memory.
+ */
+static struct scatterlist *videobuf_vmalloc_to_sg(unsigned char *virt,
+ int nr_pages)
+{
+ struct scatterlist *sglist;
+ struct page *pg;
+ int i;
+
+ sglist = vzalloc(nr_pages * sizeof(*sglist));
+ if (NULL == sglist)
+ return NULL;
+ sg_init_table(sglist, nr_pages);
+ for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
+ pg = vmalloc_to_page(virt);
+ if (NULL == pg)
+ goto err;
+ BUG_ON(PageHighMem(pg));
+ sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
+ }
+ return sglist;
+
+err:
+ vfree(sglist);
+ return NULL;
+}
+
+/*
+ * Return a scatterlist for a an array of userpages (NULL on errors).
+ * Memory for the scatterlist is allocated using kmalloc. The caller
+ * must free the memory.
+ */
+static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
+ int nr_pages, int offset, size_t size)
+{
+ struct scatterlist *sglist;
+ int i;
+
+ if (NULL == pages[0])
+ return NULL;
+ sglist = vmalloc(nr_pages * sizeof(*sglist));
+ if (NULL == sglist)
+ return NULL;
+ sg_init_table(sglist, nr_pages);
+
+ if (PageHighMem(pages[0]))
+ /* DMA to highmem pages might not work */
+ goto highmem;
+ sg_set_page(&sglist[0], pages[0],
+ min_t(size_t, PAGE_SIZE - offset, size), offset);
+ size -= min_t(size_t, PAGE_SIZE - offset, size);
+ for (i = 1; i < nr_pages; i++) {
+ if (NULL == pages[i])
+ goto nopage;
+ if (PageHighMem(pages[i]))
+ goto highmem;
+ sg_set_page(&sglist[i], pages[i], min_t(size_t, PAGE_SIZE, size), 0);
+ size -= min_t(size_t, PAGE_SIZE, size);
+ }
+ return sglist;
+
+nopage:
+ dprintk(2, "sgl: oops - no page\n");
+ vfree(sglist);
+ return NULL;
+
+highmem:
+ dprintk(2, "sgl: oops - highmem page\n");
+ vfree(sglist);
+ return NULL;
+}
+
+/* --------------------------------------------------------------------- */
+
+struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf)
+{
+ struct videobuf_dma_sg_memory *mem = buf->priv;
+ BUG_ON(!mem);
+
+ MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
+
+ return &mem->dma;
+}
+EXPORT_SYMBOL_GPL(videobuf_to_dma);
+
+static void videobuf_dma_init(struct videobuf_dmabuf *dma)
+{
+ memset(dma, 0, sizeof(*dma));
+ dma->magic = MAGIC_DMABUF;
+}
+
+static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
+ int direction, unsigned long data, unsigned long size)
+{
+ unsigned long first, last;
+ int err, rw = 0;
+
+ dma->direction = direction;
+ switch (dma->direction) {
+ case DMA_FROM_DEVICE:
+ rw = READ;
+ break;
+ case DMA_TO_DEVICE:
+ rw = WRITE;
+ break;
+ default:
+ BUG();
+ }
+
+ first = (data & PAGE_MASK) >> PAGE_SHIFT;
+ last = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT;
+ dma->offset = data & ~PAGE_MASK;
+ dma->size = size;
+ dma->nr_pages = last-first+1;
+ dma->pages = kmalloc(dma->nr_pages * sizeof(struct page *), GFP_KERNEL);
+ if (NULL == dma->pages)
+ return -ENOMEM;
+
+ dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
+ data, size, dma->nr_pages);
+
+ err = get_user_pages(current, current->mm,
+ data & PAGE_MASK, dma->nr_pages,
+ rw == READ, 1, /* force */
+ dma->pages, NULL);
+
+ if (err != dma->nr_pages) {
+ dma->nr_pages = (err >= 0) ? err : 0;
+ dprintk(1, "get_user_pages: err=%d [%d]\n", err, dma->nr_pages);
+ return err < 0 ? err : -EINVAL;
+ }
+ return 0;
+}
+
+static int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
+ unsigned long data, unsigned long size)
+{
+ int ret;
+
+ down_read(&current->mm->mmap_sem);
+ ret = videobuf_dma_init_user_locked(dma, direction, data, size);
+ up_read(&current->mm->mmap_sem);
+
+ return ret;
+}
+
+static int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction,
+ int nr_pages)
+{
+ int i;
+
+ dprintk(1, "init kernel [%d pages]\n", nr_pages);
+
+ dma->direction = direction;
+ dma->vaddr_pages = kcalloc(nr_pages, sizeof(*dma->vaddr_pages),
+ GFP_KERNEL);
+ if (!dma->vaddr_pages)
+ return -ENOMEM;
+
+ dma->dma_addr = kcalloc(nr_pages, sizeof(*dma->dma_addr), GFP_KERNEL);
+ if (!dma->dma_addr) {
+ kfree(dma->vaddr_pages);
+ return -ENOMEM;
+ }
+ for (i = 0; i < nr_pages; i++) {
+ void *addr;
+
+ addr = dma_alloc_coherent(dma->dev, PAGE_SIZE,
+ &(dma->dma_addr[i]), GFP_KERNEL);
+ if (addr == NULL)
+ goto out_free_pages;
+
+ dma->vaddr_pages[i] = virt_to_page(addr);
+ }
+ dma->vaddr = vmap(dma->vaddr_pages, nr_pages, VM_MAP | VM_IOREMAP,
+ PAGE_KERNEL);
+ if (NULL == dma->vaddr) {
+ dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages);
+ goto out_free_pages;
+ }
+
+ dprintk(1, "vmalloc is at addr 0x%08lx, size=%d\n",
+ (unsigned long)dma->vaddr,
+ nr_pages << PAGE_SHIFT);
+
+ memset(dma->vaddr, 0, nr_pages << PAGE_SHIFT);
+ dma->nr_pages = nr_pages;
+
+ return 0;
+out_free_pages:
+ while (i > 0) {
+ void *addr;
+
+ i--;
+ addr = page_address(dma->vaddr_pages[i]);
+ dma_free_coherent(dma->dev, PAGE_SIZE, addr, dma->dma_addr[i]);
+ }
+ kfree(dma->dma_addr);
+ dma->dma_addr = NULL;
+ kfree(dma->vaddr_pages);
+ dma->vaddr_pages = NULL;
+
+ return -ENOMEM;
+
+}
+
+static int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction,
+ dma_addr_t addr, int nr_pages)
+{
+ dprintk(1, "init overlay [%d pages @ bus 0x%lx]\n",
+ nr_pages, (unsigned long)addr);
+ dma->direction = direction;
+
+ if (0 == addr)
+ return -EINVAL;
+
+ dma->bus_addr = addr;
+ dma->nr_pages = nr_pages;
+
+ return 0;
+}
+
+static int videobuf_dma_map(struct device *dev, struct videobuf_dmabuf *dma)
+{
+ MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
+ BUG_ON(0 == dma->nr_pages);
+
+ if (dma->pages) {
+ dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages,
+ dma->offset, dma->size);
+ }
+ if (dma->vaddr) {
+ dma->sglist = videobuf_vmalloc_to_sg(dma->vaddr,
+ dma->nr_pages);
+ }
+ if (dma->bus_addr) {
+ dma->sglist = vmalloc(sizeof(*dma->sglist));
+ if (NULL != dma->sglist) {
+ dma->sglen = 1;
+ sg_dma_address(&dma->sglist[0]) = dma->bus_addr
+ & PAGE_MASK;
+ dma->sglist[0].offset = dma->bus_addr & ~PAGE_MASK;
+ sg_dma_len(&dma->sglist[0]) = dma->nr_pages * PAGE_SIZE;
+ }
+ }
+ if (NULL == dma->sglist) {
+ dprintk(1, "scatterlist is NULL\n");
+ return -ENOMEM;
+ }
+ if (!dma->bus_addr) {
+ dma->sglen = dma_map_sg(dev, dma->sglist,
+ dma->nr_pages, dma->direction);
+ if (0 == dma->sglen) {
+ printk(KERN_WARNING
+ "%s: videobuf_map_sg failed\n", __func__);
+ vfree(dma->sglist);
+ dma->sglist = NULL;
+ dma->sglen = 0;
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+int videobuf_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma)
+{
+ MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
+
+ if (!dma->sglen)
+ return 0;
+
+ dma_unmap_sg(dev, dma->sglist, dma->sglen, dma->direction);
+
+ vfree(dma->sglist);
+ dma->sglist = NULL;
+ dma->sglen = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(videobuf_dma_unmap);
+
+int videobuf_dma_free(struct videobuf_dmabuf *dma)
+{
+ int i;
+ MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
+ BUG_ON(dma->sglen);
+
+ if (dma->pages) {
+ for (i = 0; i < dma->nr_pages; i++)
+ page_cache_release(dma->pages[i]);
+ kfree(dma->pages);
+ dma->pages = NULL;
+ }
+
+ if (dma->dma_addr) {
+ for (i = 0; i < dma->nr_pages; i++) {
+ void *addr;
+
+ addr = page_address(dma->vaddr_pages[i]);
+ dma_free_coherent(dma->dev, PAGE_SIZE, addr,
+ dma->dma_addr[i]);
+ }
+ kfree(dma->dma_addr);
+ dma->dma_addr = NULL;
+ kfree(dma->vaddr_pages);
+ dma->vaddr_pages = NULL;
+ vunmap(dma->vaddr);
+ dma->vaddr = NULL;
+ }
+
+ if (dma->bus_addr)
+ dma->bus_addr = 0;
+ dma->direction = DMA_NONE;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(videobuf_dma_free);
+
+/* --------------------------------------------------------------------- */
+
+static void videobuf_vm_open(struct vm_area_struct *vma)
+{
+ struct videobuf_mapping *map = vma->vm_private_data;
+
+ dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map,
+ map->count, vma->vm_start, vma->vm_end);
+
+ map->count++;
+}
+
+static void videobuf_vm_close(struct vm_area_struct *vma)
+{
+ struct videobuf_mapping *map = vma->vm_private_data;
+ struct videobuf_queue *q = map->q;
+ struct videobuf_dma_sg_memory *mem;
+ int i;
+
+ dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map,
+ map->count, vma->vm_start, vma->vm_end);
+
+ map->count--;
+ if (0 == map->count) {
+ dprintk(1, "munmap %p q=%p\n", map, q);
+ videobuf_queue_lock(q);
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+ mem = q->bufs[i]->priv;
+ if (!mem)
+ continue;
+
+ MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
+
+ if (q->bufs[i]->map != map)
+ continue;
+ q->bufs[i]->map = NULL;
+ q->bufs[i]->baddr = 0;
+ q->ops->buf_release(q, q->bufs[i]);
+ }
+ videobuf_queue_unlock(q);
+ kfree(map);
+ }
+ return;
+}
+
+/*
+ * Get a anonymous page for the mapping. Make sure we can DMA to that
+ * memory location with 32bit PCI devices (i.e. don't use highmem for
+ * now ...). Bounce buffers don't work very well for the data rates
+ * video capture has.
+ */
+static int videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct page *page;
+
+ dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n",
+ (unsigned long)vmf->virtual_address,
+ vma->vm_start, vma->vm_end);
+
+ page = alloc_page(GFP_USER | __GFP_DMA32);
+ if (!page)
+ return VM_FAULT_OOM;
+ clear_user_highpage(page, (unsigned long)vmf->virtual_address);
+ vmf->page = page;
+
+ return 0;
+}
+
+static const struct vm_operations_struct videobuf_vm_ops = {
+ .open = videobuf_vm_open,
+ .close = videobuf_vm_close,
+ .fault = videobuf_vm_fault,
+};
+
+/* ---------------------------------------------------------------------
+ * SG handlers for the generic methods
+ */
+
+/* Allocated area consists on 3 parts:
+ struct video_buffer
+ struct <driver>_buffer (cx88_buffer, saa7134_buf, ...)
+ struct videobuf_dma_sg_memory
+ */
+
+static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
+{
+ struct videobuf_dma_sg_memory *mem;
+ struct videobuf_buffer *vb;
+
+ vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
+ if (!vb)
+ return vb;
+
+ mem = vb->priv = ((char *)vb) + size;
+ mem->magic = MAGIC_SG_MEM;
+
+ videobuf_dma_init(&mem->dma);
+
+ dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
+ __func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
+ mem, (long)sizeof(*mem));
+
+ return vb;
+}
+
+static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
+{
+ struct videobuf_dma_sg_memory *mem = buf->priv;
+ BUG_ON(!mem);
+
+ MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
+
+ return mem->dma.vaddr;
+}
+
+static int __videobuf_iolock(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ struct v4l2_framebuffer *fbuf)
+{
+ int err, pages;
+ dma_addr_t bus;
+ struct videobuf_dma_sg_memory *mem = vb->priv;
+ BUG_ON(!mem);
+
+ MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
+
+ if (!mem->dma.dev)
+ mem->dma.dev = q->dev;
+ else
+ WARN_ON(mem->dma.dev != q->dev);
+
+ switch (vb->memory) {
+ case V4L2_MEMORY_MMAP:
+ case V4L2_MEMORY_USERPTR:
+ if (0 == vb->baddr) {
+ /* no userspace addr -- kernel bounce buffer */
+ pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
+ err = videobuf_dma_init_kernel(&mem->dma,
+ DMA_FROM_DEVICE,
+ pages);
+ if (0 != err)
+ return err;
+ } else if (vb->memory == V4L2_MEMORY_USERPTR) {
+ /* dma directly to userspace */
+ err = videobuf_dma_init_user(&mem->dma,
+ DMA_FROM_DEVICE,
+ vb->baddr, vb->bsize);
+ if (0 != err)
+ return err;
+ } else {
+ /* NOTE: HACK: videobuf_iolock on V4L2_MEMORY_MMAP
+ buffers can only be called from videobuf_qbuf
+ we take current->mm->mmap_sem there, to prevent
+ locking inversion, so don't take it here */
+
+ err = videobuf_dma_init_user_locked(&mem->dma,
+ DMA_FROM_DEVICE,
+ vb->baddr, vb->bsize);
+ if (0 != err)
+ return err;
+ }
+ break;
+ case V4L2_MEMORY_OVERLAY:
+ if (NULL == fbuf)
+ return -EINVAL;
+ /* FIXME: need sanity checks for vb->boff */
+ /*
+ * Using a double cast to avoid compiler warnings when
+ * building for PAE. Compiler doesn't like direct casting
+ * of a 32 bit ptr to 64 bit integer.
+ */
+ bus = (dma_addr_t)(unsigned long)fbuf->base + vb->boff;
+ pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
+ err = videobuf_dma_init_overlay(&mem->dma, DMA_FROM_DEVICE,
+ bus, pages);
+ if (0 != err)
+ return err;
+ break;
+ default:
+ BUG();
+ }
+ err = videobuf_dma_map(q->dev, &mem->dma);
+ if (0 != err)
+ return err;
+
+ return 0;
+}
+
+static int __videobuf_sync(struct videobuf_queue *q,
+ struct videobuf_buffer *buf)
+{
+ struct videobuf_dma_sg_memory *mem = buf->priv;
+ BUG_ON(!mem || !mem->dma.sglen);
+
+ MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
+ MAGIC_CHECK(mem->dma.magic, MAGIC_DMABUF);
+
+ dma_sync_sg_for_cpu(q->dev, mem->dma.sglist,
+ mem->dma.sglen, mem->dma.direction);
+
+ return 0;
+}
+
+static int __videobuf_mmap_mapper(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
+ struct vm_area_struct *vma)
+{
+ struct videobuf_dma_sg_memory *mem = buf->priv;
+ struct videobuf_mapping *map;
+ unsigned int first, last, size = 0, i;
+ int retval;
+
+ retval = -EINVAL;
+
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
+
+ /* look for first buffer to map */
+ for (first = 0; first < VIDEO_MAX_FRAME; first++) {
+ if (buf == q->bufs[first]) {
+ size = PAGE_ALIGN(q->bufs[first]->bsize);
+ break;
+ }
+ }
+
+ /* paranoia, should never happen since buf is always valid. */
+ if (!size) {
+ dprintk(1, "mmap app bug: offset invalid [offset=0x%lx]\n",
+ (vma->vm_pgoff << PAGE_SHIFT));
+ goto done;
+ }
+
+ last = first;
+
+ /* create mapping + update buffer list */
+ retval = -ENOMEM;
+ map = kmalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
+ if (NULL == map)
+ goto done;
+
+ size = 0;
+ for (i = first; i <= last; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+ q->bufs[i]->map = map;
+ q->bufs[i]->baddr = vma->vm_start + size;
+ size += PAGE_ALIGN(q->bufs[i]->bsize);
+ }
+
+ map->count = 1;
+ map->q = q;
+ vma->vm_ops = &videobuf_vm_ops;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
+ vma->vm_private_data = map;
+ dprintk(1, "mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
+ map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last);
+ retval = 0;
+
+done:
+ return retval;
+}
+
+static struct videobuf_qtype_ops sg_ops = {
+ .magic = MAGIC_QTYPE_OPS,
+
+ .alloc_vb = __videobuf_alloc_vb,
+ .iolock = __videobuf_iolock,
+ .sync = __videobuf_sync,
+ .mmap_mapper = __videobuf_mmap_mapper,
+ .vaddr = __videobuf_to_vaddr,
+};
+
+void *videobuf_sg_alloc(size_t size)
+{
+ struct videobuf_queue q;
+
+ /* Required to make generic handler to call __videobuf_alloc */
+ q.int_ops = &sg_ops;
+
+ q.msize = size;
+
+ return videobuf_alloc_vb(&q);
+}
+EXPORT_SYMBOL_GPL(videobuf_sg_alloc);
+
+void videobuf_queue_sg_init(struct videobuf_queue *q,
+ const struct videobuf_queue_ops *ops,
+ struct device *dev,
+ spinlock_t *irqlock,
+ enum v4l2_buf_type type,
+ enum v4l2_field field,
+ unsigned int msize,
+ void *priv,
+ struct mutex *ext_lock)
+{
+ videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
+ priv, &sg_ops, ext_lock);
+}
+EXPORT_SYMBOL_GPL(videobuf_queue_sg_init);
+
diff --git a/kernel/drivers/media/v4l2-core/videobuf-dvb.c b/kernel/drivers/media/v4l2-core/videobuf-dvb.c
new file mode 100644
index 000000000..b7efa4516
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/videobuf-dvb.c
@@ -0,0 +1,398 @@
+/*
+ *
+ * some helper function for simple DVB cards which simply DMA the
+ * complete transport stream and let the computer sort everything else
+ * (i.e. we are using the software demux, ...). Also uses the
+ * video-buf to manage DMA buffers.
+ *
+ * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kthread.h>
+#include <linux/file.h>
+#include <linux/slab.h>
+
+#include <linux/freezer.h>
+
+#include <media/videobuf-core.h>
+#include <media/videobuf-dvb.h>
+
+/* ------------------------------------------------------------------ */
+
+MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
+MODULE_LICENSE("GPL");
+
+static unsigned int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug,"enable debug messages");
+
+#define dprintk(fmt, arg...) if (debug) \
+ printk(KERN_DEBUG "%s/dvb: " fmt, dvb->name , ## arg)
+
+/* ------------------------------------------------------------------ */
+
+static int videobuf_dvb_thread(void *data)
+{
+ struct videobuf_dvb *dvb = data;
+ struct videobuf_buffer *buf;
+ unsigned long flags;
+ void *outp;
+
+ dprintk("dvb thread started\n");
+ set_freezable();
+ videobuf_read_start(&dvb->dvbq);
+
+ for (;;) {
+ /* fetch next buffer */
+ buf = list_entry(dvb->dvbq.stream.next,
+ struct videobuf_buffer, stream);
+ list_del(&buf->stream);
+ videobuf_waiton(&dvb->dvbq, buf, 0, 1);
+
+ /* no more feeds left or stop_feed() asked us to quit */
+ if (0 == dvb->nfeeds)
+ break;
+ if (kthread_should_stop())
+ break;
+ try_to_freeze();
+
+ /* feed buffer data to demux */
+ outp = videobuf_queue_to_vaddr(&dvb->dvbq, buf);
+
+ if (buf->state == VIDEOBUF_DONE)
+ dvb_dmx_swfilter(&dvb->demux, outp,
+ buf->size);
+
+ /* requeue buffer */
+ list_add_tail(&buf->stream,&dvb->dvbq.stream);
+ spin_lock_irqsave(dvb->dvbq.irqlock,flags);
+ dvb->dvbq.ops->buf_queue(&dvb->dvbq,buf);
+ spin_unlock_irqrestore(dvb->dvbq.irqlock,flags);
+ }
+
+ videobuf_read_stop(&dvb->dvbq);
+ dprintk("dvb thread stopped\n");
+
+ /* Hmm, linux becomes *very* unhappy without this ... */
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ }
+ return 0;
+}
+
+static int videobuf_dvb_start_feed(struct dvb_demux_feed *feed)
+{
+ struct dvb_demux *demux = feed->demux;
+ struct videobuf_dvb *dvb = demux->priv;
+ int rc;
+
+ if (!demux->dmx.frontend)
+ return -EINVAL;
+
+ mutex_lock(&dvb->lock);
+ dvb->nfeeds++;
+ rc = dvb->nfeeds;
+
+ if (NULL != dvb->thread)
+ goto out;
+ dvb->thread = kthread_run(videobuf_dvb_thread,
+ dvb, "%s dvb", dvb->name);
+ if (IS_ERR(dvb->thread)) {
+ rc = PTR_ERR(dvb->thread);
+ dvb->thread = NULL;
+ }
+
+out:
+ mutex_unlock(&dvb->lock);
+ return rc;
+}
+
+static int videobuf_dvb_stop_feed(struct dvb_demux_feed *feed)
+{
+ struct dvb_demux *demux = feed->demux;
+ struct videobuf_dvb *dvb = demux->priv;
+ int err = 0;
+
+ mutex_lock(&dvb->lock);
+ dvb->nfeeds--;
+ if (0 == dvb->nfeeds && NULL != dvb->thread) {
+ err = kthread_stop(dvb->thread);
+ dvb->thread = NULL;
+ }
+ mutex_unlock(&dvb->lock);
+ return err;
+}
+
+static int videobuf_dvb_register_adapter(struct videobuf_dvb_frontends *fe,
+ struct module *module,
+ void *adapter_priv,
+ struct device *device,
+ char *adapter_name,
+ short *adapter_nr,
+ int mfe_shared)
+{
+ int result;
+
+ mutex_init(&fe->lock);
+
+ /* register adapter */
+ result = dvb_register_adapter(&fe->adapter, adapter_name, module,
+ device, adapter_nr);
+ if (result < 0) {
+ printk(KERN_WARNING "%s: dvb_register_adapter failed (errno = %d)\n",
+ adapter_name, result);
+ }
+ fe->adapter.priv = adapter_priv;
+ fe->adapter.mfe_shared = mfe_shared;
+
+ return result;
+}
+
+static int videobuf_dvb_register_frontend(struct dvb_adapter *adapter,
+ struct videobuf_dvb *dvb)
+{
+ int result;
+
+ /* register frontend */
+ result = dvb_register_frontend(adapter, dvb->frontend);
+ if (result < 0) {
+ printk(KERN_WARNING "%s: dvb_register_frontend failed (errno = %d)\n",
+ dvb->name, result);
+ goto fail_frontend;
+ }
+
+ /* register demux stuff */
+ dvb->demux.dmx.capabilities =
+ DMX_TS_FILTERING | DMX_SECTION_FILTERING |
+ DMX_MEMORY_BASED_FILTERING;
+ dvb->demux.priv = dvb;
+ dvb->demux.filternum = 256;
+ dvb->demux.feednum = 256;
+ dvb->demux.start_feed = videobuf_dvb_start_feed;
+ dvb->demux.stop_feed = videobuf_dvb_stop_feed;
+ result = dvb_dmx_init(&dvb->demux);
+ if (result < 0) {
+ printk(KERN_WARNING "%s: dvb_dmx_init failed (errno = %d)\n",
+ dvb->name, result);
+ goto fail_dmx;
+ }
+
+ dvb->dmxdev.filternum = 256;
+ dvb->dmxdev.demux = &dvb->demux.dmx;
+ dvb->dmxdev.capabilities = 0;
+ result = dvb_dmxdev_init(&dvb->dmxdev, adapter);
+
+ if (result < 0) {
+ printk(KERN_WARNING "%s: dvb_dmxdev_init failed (errno = %d)\n",
+ dvb->name, result);
+ goto fail_dmxdev;
+ }
+
+ dvb->fe_hw.source = DMX_FRONTEND_0;
+ result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
+ if (result < 0) {
+ printk(KERN_WARNING "%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
+ dvb->name, result);
+ goto fail_fe_hw;
+ }
+
+ dvb->fe_mem.source = DMX_MEMORY_FE;
+ result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
+ if (result < 0) {
+ printk(KERN_WARNING "%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
+ dvb->name, result);
+ goto fail_fe_mem;
+ }
+
+ result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw);
+ if (result < 0) {
+ printk(KERN_WARNING "%s: connect_frontend failed (errno = %d)\n",
+ dvb->name, result);
+ goto fail_fe_conn;
+ }
+
+ /* register network adapter */
+ result = dvb_net_init(adapter, &dvb->net, &dvb->demux.dmx);
+ if (result < 0) {
+ printk(KERN_WARNING "%s: dvb_net_init failed (errno = %d)\n",
+ dvb->name, result);
+ goto fail_fe_conn;
+ }
+ return 0;
+
+fail_fe_conn:
+ dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem);
+fail_fe_mem:
+ dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw);
+fail_fe_hw:
+ dvb_dmxdev_release(&dvb->dmxdev);
+fail_dmxdev:
+ dvb_dmx_release(&dvb->demux);
+fail_dmx:
+ dvb_unregister_frontend(dvb->frontend);
+fail_frontend:
+ dvb_frontend_detach(dvb->frontend);
+ dvb->frontend = NULL;
+
+ return result;
+}
+
+/* ------------------------------------------------------------------ */
+/* Register a single adapter and one or more frontends */
+int videobuf_dvb_register_bus(struct videobuf_dvb_frontends *f,
+ struct module *module,
+ void *adapter_priv,
+ struct device *device,
+ short *adapter_nr,
+ int mfe_shared)
+{
+ struct list_head *list, *q;
+ struct videobuf_dvb_frontend *fe;
+ int res;
+
+ fe = videobuf_dvb_get_frontend(f, 1);
+ if (!fe) {
+ printk(KERN_WARNING "Unable to register the adapter which has no frontends\n");
+ return -EINVAL;
+ }
+
+ /* Bring up the adapter */
+ res = videobuf_dvb_register_adapter(f, module, adapter_priv, device,
+ fe->dvb.name, adapter_nr, mfe_shared);
+ if (res < 0) {
+ printk(KERN_WARNING "videobuf_dvb_register_adapter failed (errno = %d)\n", res);
+ return res;
+ }
+
+ /* Attach all of the frontends to the adapter */
+ mutex_lock(&f->lock);
+ list_for_each_safe(list, q, &f->felist) {
+ fe = list_entry(list, struct videobuf_dvb_frontend, felist);
+ res = videobuf_dvb_register_frontend(&f->adapter, &fe->dvb);
+ if (res < 0) {
+ printk(KERN_WARNING "%s: videobuf_dvb_register_frontend failed (errno = %d)\n",
+ fe->dvb.name, res);
+ goto err;
+ }
+ }
+ mutex_unlock(&f->lock);
+ return 0;
+
+err:
+ mutex_unlock(&f->lock);
+ videobuf_dvb_unregister_bus(f);
+ return res;
+}
+EXPORT_SYMBOL(videobuf_dvb_register_bus);
+
+void videobuf_dvb_unregister_bus(struct videobuf_dvb_frontends *f)
+{
+ videobuf_dvb_dealloc_frontends(f);
+
+ dvb_unregister_adapter(&f->adapter);
+}
+EXPORT_SYMBOL(videobuf_dvb_unregister_bus);
+
+struct videobuf_dvb_frontend *videobuf_dvb_get_frontend(
+ struct videobuf_dvb_frontends *f, int id)
+{
+ struct list_head *list, *q;
+ struct videobuf_dvb_frontend *fe, *ret = NULL;
+
+ mutex_lock(&f->lock);
+
+ list_for_each_safe(list, q, &f->felist) {
+ fe = list_entry(list, struct videobuf_dvb_frontend, felist);
+ if (fe->id == id) {
+ ret = fe;
+ break;
+ }
+ }
+
+ mutex_unlock(&f->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(videobuf_dvb_get_frontend);
+
+int videobuf_dvb_find_frontend(struct videobuf_dvb_frontends *f,
+ struct dvb_frontend *p)
+{
+ struct list_head *list, *q;
+ struct videobuf_dvb_frontend *fe = NULL;
+ int ret = 0;
+
+ mutex_lock(&f->lock);
+
+ list_for_each_safe(list, q, &f->felist) {
+ fe = list_entry(list, struct videobuf_dvb_frontend, felist);
+ if (fe->dvb.frontend == p) {
+ ret = fe->id;
+ break;
+ }
+ }
+
+ mutex_unlock(&f->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(videobuf_dvb_find_frontend);
+
+struct videobuf_dvb_frontend *videobuf_dvb_alloc_frontend(
+ struct videobuf_dvb_frontends *f, int id)
+{
+ struct videobuf_dvb_frontend *fe;
+
+ fe = kzalloc(sizeof(struct videobuf_dvb_frontend), GFP_KERNEL);
+ if (fe == NULL)
+ goto fail_alloc;
+
+ fe->id = id;
+ mutex_init(&fe->dvb.lock);
+
+ mutex_lock(&f->lock);
+ list_add_tail(&fe->felist, &f->felist);
+ mutex_unlock(&f->lock);
+
+fail_alloc:
+ return fe;
+}
+EXPORT_SYMBOL(videobuf_dvb_alloc_frontend);
+
+void videobuf_dvb_dealloc_frontends(struct videobuf_dvb_frontends *f)
+{
+ struct list_head *list, *q;
+ struct videobuf_dvb_frontend *fe;
+
+ mutex_lock(&f->lock);
+ list_for_each_safe(list, q, &f->felist) {
+ fe = list_entry(list, struct videobuf_dvb_frontend, felist);
+ if (fe->dvb.net.dvbdev) {
+ dvb_net_release(&fe->dvb.net);
+ fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
+ &fe->dvb.fe_mem);
+ fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
+ &fe->dvb.fe_hw);
+ dvb_dmxdev_release(&fe->dvb.dmxdev);
+ dvb_dmx_release(&fe->dvb.demux);
+ dvb_unregister_frontend(fe->dvb.frontend);
+ }
+ if (fe->dvb.frontend)
+ /* always allocated, may have been reset */
+ dvb_frontend_detach(fe->dvb.frontend);
+ list_del(list); /* remove list entry */
+ kfree(fe); /* free frontend allocation */
+ }
+ mutex_unlock(&f->lock);
+}
+EXPORT_SYMBOL(videobuf_dvb_dealloc_frontends);
diff --git a/kernel/drivers/media/v4l2-core/videobuf-vmalloc.c b/kernel/drivers/media/v4l2-core/videobuf-vmalloc.c
new file mode 100644
index 000000000..2ff7fcc77
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/videobuf-vmalloc.c
@@ -0,0 +1,349 @@
+/*
+ * helper functions for vmalloc video4linux capture buffers
+ *
+ * The functions expect the hardware being able to scatter gather
+ * (i.e. the buffers are not linear in physical memory, but fragmented
+ * into PAGE_SIZE chunks). They also assume the driver does not need
+ * to touch the video data.
+ *
+ * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+#include <media/videobuf-vmalloc.h>
+
+#define MAGIC_DMABUF 0x17760309
+#define MAGIC_VMAL_MEM 0x18221223
+
+#define MAGIC_CHECK(is, should) \
+ if (unlikely((is) != (should))) { \
+ printk(KERN_ERR "magic mismatch: %x (expected %x)\n", \
+ is, should); \
+ BUG(); \
+ }
+
+static int debug;
+module_param(debug, int, 0644);
+
+MODULE_DESCRIPTION("helper module to manage video4linux vmalloc buffers");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_LICENSE("GPL");
+
+#define dprintk(level, fmt, arg...) \
+ if (debug >= level) \
+ printk(KERN_DEBUG "vbuf-vmalloc: " fmt , ## arg)
+
+
+/***************************************************************************/
+
+static void videobuf_vm_open(struct vm_area_struct *vma)
+{
+ struct videobuf_mapping *map = vma->vm_private_data;
+
+ dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
+ map->count, vma->vm_start, vma->vm_end);
+
+ map->count++;
+}
+
+static void videobuf_vm_close(struct vm_area_struct *vma)
+{
+ struct videobuf_mapping *map = vma->vm_private_data;
+ struct videobuf_queue *q = map->q;
+ int i;
+
+ dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
+ map->count, vma->vm_start, vma->vm_end);
+
+ map->count--;
+ if (0 == map->count) {
+ struct videobuf_vmalloc_memory *mem;
+
+ dprintk(1, "munmap %p q=%p\n", map, q);
+ videobuf_queue_lock(q);
+
+ /* We need first to cancel streams, before unmapping */
+ if (q->streaming)
+ videobuf_queue_cancel(q);
+
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+
+ if (q->bufs[i]->map != map)
+ continue;
+
+ mem = q->bufs[i]->priv;
+ if (mem) {
+ /* This callback is called only if kernel has
+ allocated memory and this memory is mmapped.
+ In this case, memory should be freed,
+ in order to do memory unmap.
+ */
+
+ MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
+
+ /* vfree is not atomic - can't be
+ called with IRQ's disabled
+ */
+ dprintk(1, "%s: buf[%d] freeing (%p)\n",
+ __func__, i, mem->vaddr);
+
+ vfree(mem->vaddr);
+ mem->vaddr = NULL;
+ }
+
+ q->bufs[i]->map = NULL;
+ q->bufs[i]->baddr = 0;
+ }
+
+ kfree(map);
+
+ videobuf_queue_unlock(q);
+ }
+
+ return;
+}
+
+static const struct vm_operations_struct videobuf_vm_ops = {
+ .open = videobuf_vm_open,
+ .close = videobuf_vm_close,
+};
+
+/* ---------------------------------------------------------------------
+ * vmalloc handlers for the generic methods
+ */
+
+/* Allocated area consists on 3 parts:
+ struct video_buffer
+ struct <driver>_buffer (cx88_buffer, saa7134_buf, ...)
+ struct videobuf_dma_sg_memory
+ */
+
+static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
+{
+ struct videobuf_vmalloc_memory *mem;
+ struct videobuf_buffer *vb;
+
+ vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
+ if (!vb)
+ return vb;
+
+ mem = vb->priv = ((char *)vb) + size;
+ mem->magic = MAGIC_VMAL_MEM;
+
+ dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
+ __func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
+ mem, (long)sizeof(*mem));
+
+ return vb;
+}
+
+static int __videobuf_iolock(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ struct v4l2_framebuffer *fbuf)
+{
+ struct videobuf_vmalloc_memory *mem = vb->priv;
+ int pages;
+
+ BUG_ON(!mem);
+
+ MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
+
+ switch (vb->memory) {
+ case V4L2_MEMORY_MMAP:
+ dprintk(1, "%s memory method MMAP\n", __func__);
+
+ /* All handling should be done by __videobuf_mmap_mapper() */
+ if (!mem->vaddr) {
+ printk(KERN_ERR "memory is not alloced/mmapped.\n");
+ return -EINVAL;
+ }
+ break;
+ case V4L2_MEMORY_USERPTR:
+ pages = PAGE_ALIGN(vb->size);
+
+ dprintk(1, "%s memory method USERPTR\n", __func__);
+
+ if (vb->baddr) {
+ printk(KERN_ERR "USERPTR is currently not supported\n");
+ return -EINVAL;
+ }
+
+ /* The only USERPTR currently supported is the one needed for
+ * read() method.
+ */
+
+ mem->vaddr = vmalloc_user(pages);
+ if (!mem->vaddr) {
+ printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
+ return -ENOMEM;
+ }
+ dprintk(1, "vmalloc is at addr %p (%d pages)\n",
+ mem->vaddr, pages);
+
+#if 0
+ int rc;
+ /* Kernel userptr is used also by read() method. In this case,
+ there's no need to remap, since data will be copied to user
+ */
+ if (!vb->baddr)
+ return 0;
+
+ /* FIXME: to properly support USERPTR, remap should occur.
+ The code below won't work, since mem->vma = NULL
+ */
+ /* Try to remap memory */
+ rc = remap_vmalloc_range(mem->vma, (void *)vb->baddr, 0);
+ if (rc < 0) {
+ printk(KERN_ERR "mmap: remap failed with error %d", rc);
+ return -ENOMEM;
+ }
+#endif
+
+ break;
+ case V4L2_MEMORY_OVERLAY:
+ default:
+ dprintk(1, "%s memory method OVERLAY/unknown\n", __func__);
+
+ /* Currently, doesn't support V4L2_MEMORY_OVERLAY */
+ printk(KERN_ERR "Memory method currently unsupported.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __videobuf_mmap_mapper(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
+ struct vm_area_struct *vma)
+{
+ struct videobuf_vmalloc_memory *mem;
+ struct videobuf_mapping *map;
+ int retval, pages;
+
+ dprintk(1, "%s\n", __func__);
+
+ /* create mapping + update buffer list */
+ map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
+ if (NULL == map)
+ return -ENOMEM;
+
+ buf->map = map;
+ map->q = q;
+
+ buf->baddr = vma->vm_start;
+
+ mem = buf->priv;
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
+
+ pages = PAGE_ALIGN(vma->vm_end - vma->vm_start);
+ mem->vaddr = vmalloc_user(pages);
+ if (!mem->vaddr) {
+ printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
+ goto error;
+ }
+ dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vaddr, pages);
+
+ /* Try to remap memory */
+ retval = remap_vmalloc_range(vma, mem->vaddr, 0);
+ if (retval < 0) {
+ printk(KERN_ERR "mmap: remap failed with error %d. ", retval);
+ vfree(mem->vaddr);
+ goto error;
+ }
+
+ vma->vm_ops = &videobuf_vm_ops;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = map;
+
+ dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
+ map, q, vma->vm_start, vma->vm_end,
+ (long int)buf->bsize,
+ vma->vm_pgoff, buf->i);
+
+ videobuf_vm_open(vma);
+
+ return 0;
+
+error:
+ mem = NULL;
+ kfree(map);
+ return -ENOMEM;
+}
+
+static struct videobuf_qtype_ops qops = {
+ .magic = MAGIC_QTYPE_OPS,
+
+ .alloc_vb = __videobuf_alloc_vb,
+ .iolock = __videobuf_iolock,
+ .mmap_mapper = __videobuf_mmap_mapper,
+ .vaddr = videobuf_to_vmalloc,
+};
+
+void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
+ const struct videobuf_queue_ops *ops,
+ struct device *dev,
+ spinlock_t *irqlock,
+ enum v4l2_buf_type type,
+ enum v4l2_field field,
+ unsigned int msize,
+ void *priv,
+ struct mutex *ext_lock)
+{
+ videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
+ priv, &qops, ext_lock);
+}
+EXPORT_SYMBOL_GPL(videobuf_queue_vmalloc_init);
+
+void *videobuf_to_vmalloc(struct videobuf_buffer *buf)
+{
+ struct videobuf_vmalloc_memory *mem = buf->priv;
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
+
+ return mem->vaddr;
+}
+EXPORT_SYMBOL_GPL(videobuf_to_vmalloc);
+
+void videobuf_vmalloc_free(struct videobuf_buffer *buf)
+{
+ struct videobuf_vmalloc_memory *mem = buf->priv;
+
+ /* mmapped memory can't be freed here, otherwise mmapped region
+ would be released, while still needed. In this case, the memory
+ release should happen inside videobuf_vm_close().
+ So, it should free memory only if the memory were allocated for
+ read() operation.
+ */
+ if ((buf->memory != V4L2_MEMORY_USERPTR) || buf->baddr)
+ return;
+
+ if (!mem)
+ return;
+
+ MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
+
+ vfree(mem->vaddr);
+ mem->vaddr = NULL;
+
+ return;
+}
+EXPORT_SYMBOL_GPL(videobuf_vmalloc_free);
+
diff --git a/kernel/drivers/media/v4l2-core/videobuf2-core.c b/kernel/drivers/media/v4l2-core/videobuf2-core.c
new file mode 100644
index 000000000..66ada01c7
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/videobuf2-core.c
@@ -0,0 +1,3539 @@
+/*
+ * videobuf2-core.c - V4L2 driver helper framework
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <pawel@osciak.com>
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * The vb2_thread implementation was based on code from videobuf-dvb.c:
+ * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-common.h>
+#include <media/videobuf2-core.h>
+
+static int debug;
+module_param(debug, int, 0644);
+
+#define dprintk(level, fmt, arg...) \
+ do { \
+ if (debug >= level) \
+ pr_info("vb2: %s: " fmt, __func__, ## arg); \
+ } while (0)
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+
+/*
+ * If advanced debugging is on, then count how often each op is called
+ * successfully, which can either be per-buffer or per-queue.
+ *
+ * This makes it easy to check that the 'init' and 'cleanup'
+ * (and variations thereof) stay balanced.
+ */
+
+#define log_memop(vb, op) \
+ dprintk(2, "call_memop(%p, %d, %s)%s\n", \
+ (vb)->vb2_queue, (vb)->v4l2_buf.index, #op, \
+ (vb)->vb2_queue->mem_ops->op ? "" : " (nop)")
+
+#define call_memop(vb, op, args...) \
+({ \
+ struct vb2_queue *_q = (vb)->vb2_queue; \
+ int err; \
+ \
+ log_memop(vb, op); \
+ err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \
+ if (!err) \
+ (vb)->cnt_mem_ ## op++; \
+ err; \
+})
+
+#define call_ptr_memop(vb, op, args...) \
+({ \
+ struct vb2_queue *_q = (vb)->vb2_queue; \
+ void *ptr; \
+ \
+ log_memop(vb, op); \
+ ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL; \
+ if (!IS_ERR_OR_NULL(ptr)) \
+ (vb)->cnt_mem_ ## op++; \
+ ptr; \
+})
+
+#define call_void_memop(vb, op, args...) \
+({ \
+ struct vb2_queue *_q = (vb)->vb2_queue; \
+ \
+ log_memop(vb, op); \
+ if (_q->mem_ops->op) \
+ _q->mem_ops->op(args); \
+ (vb)->cnt_mem_ ## op++; \
+})
+
+#define log_qop(q, op) \
+ dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \
+ (q)->ops->op ? "" : " (nop)")
+
+#define call_qop(q, op, args...) \
+({ \
+ int err; \
+ \
+ log_qop(q, op); \
+ err = (q)->ops->op ? (q)->ops->op(args) : 0; \
+ if (!err) \
+ (q)->cnt_ ## op++; \
+ err; \
+})
+
+#define call_void_qop(q, op, args...) \
+({ \
+ log_qop(q, op); \
+ if ((q)->ops->op) \
+ (q)->ops->op(args); \
+ (q)->cnt_ ## op++; \
+})
+
+#define log_vb_qop(vb, op, args...) \
+ dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \
+ (vb)->vb2_queue, (vb)->v4l2_buf.index, #op, \
+ (vb)->vb2_queue->ops->op ? "" : " (nop)")
+
+#define call_vb_qop(vb, op, args...) \
+({ \
+ int err; \
+ \
+ log_vb_qop(vb, op); \
+ err = (vb)->vb2_queue->ops->op ? \
+ (vb)->vb2_queue->ops->op(args) : 0; \
+ if (!err) \
+ (vb)->cnt_ ## op++; \
+ err; \
+})
+
+#define call_void_vb_qop(vb, op, args...) \
+({ \
+ log_vb_qop(vb, op); \
+ if ((vb)->vb2_queue->ops->op) \
+ (vb)->vb2_queue->ops->op(args); \
+ (vb)->cnt_ ## op++; \
+})
+
+#else
+
+#define call_memop(vb, op, args...) \
+ ((vb)->vb2_queue->mem_ops->op ? \
+ (vb)->vb2_queue->mem_ops->op(args) : 0)
+
+#define call_ptr_memop(vb, op, args...) \
+ ((vb)->vb2_queue->mem_ops->op ? \
+ (vb)->vb2_queue->mem_ops->op(args) : NULL)
+
+#define call_void_memop(vb, op, args...) \
+ do { \
+ if ((vb)->vb2_queue->mem_ops->op) \
+ (vb)->vb2_queue->mem_ops->op(args); \
+ } while (0)
+
+#define call_qop(q, op, args...) \
+ ((q)->ops->op ? (q)->ops->op(args) : 0)
+
+#define call_void_qop(q, op, args...) \
+ do { \
+ if ((q)->ops->op) \
+ (q)->ops->op(args); \
+ } while (0)
+
+#define call_vb_qop(vb, op, args...) \
+ ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
+
+#define call_void_vb_qop(vb, op, args...) \
+ do { \
+ if ((vb)->vb2_queue->ops->op) \
+ (vb)->vb2_queue->ops->op(args); \
+ } while (0)
+
+#endif
+
+/* Flags that are set by the vb2 core */
+#define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
+ V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
+ V4L2_BUF_FLAG_PREPARED | \
+ V4L2_BUF_FLAG_TIMESTAMP_MASK)
+/* Output buffer flags that should be passed on to the driver */
+#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
+ V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
+
+static void __vb2_queue_cancel(struct vb2_queue *q);
+
+/**
+ * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
+ */
+static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ enum dma_data_direction dma_dir =
+ V4L2_TYPE_IS_OUTPUT(q->type) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ void *mem_priv;
+ int plane;
+
+ /*
+ * Allocate memory for all planes in this buffer
+ * NOTE: mmapped areas should be page aligned
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]);
+
+ mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane],
+ size, dma_dir, q->gfp_flags);
+ if (IS_ERR_OR_NULL(mem_priv))
+ goto free;
+
+ /* Associate allocator private data with this plane */
+ vb->planes[plane].mem_priv = mem_priv;
+ vb->v4l2_planes[plane].length = q->plane_sizes[plane];
+ }
+
+ return 0;
+free:
+ /* Free already allocated memory if one of the allocations failed */
+ for (; plane > 0; --plane) {
+ call_void_memop(vb, put, vb->planes[plane - 1].mem_priv);
+ vb->planes[plane - 1].mem_priv = NULL;
+ }
+
+ return -ENOMEM;
+}
+
+/**
+ * __vb2_buf_mem_free() - free memory of the given buffer
+ */
+static void __vb2_buf_mem_free(struct vb2_buffer *vb)
+{
+ unsigned int plane;
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ call_void_memop(vb, put, vb->planes[plane].mem_priv);
+ vb->planes[plane].mem_priv = NULL;
+ dprintk(3, "freed plane %d of buffer %d\n", plane,
+ vb->v4l2_buf.index);
+ }
+}
+
+/**
+ * __vb2_buf_userptr_put() - release userspace memory associated with
+ * a USERPTR buffer
+ */
+static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
+{
+ unsigned int plane;
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ if (vb->planes[plane].mem_priv)
+ call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
+ vb->planes[plane].mem_priv = NULL;
+ }
+}
+
+/**
+ * __vb2_plane_dmabuf_put() - release memory associated with
+ * a DMABUF shared plane
+ */
+static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
+{
+ if (!p->mem_priv)
+ return;
+
+ if (p->dbuf_mapped)
+ call_void_memop(vb, unmap_dmabuf, p->mem_priv);
+
+ call_void_memop(vb, detach_dmabuf, p->mem_priv);
+ dma_buf_put(p->dbuf);
+ memset(p, 0, sizeof(*p));
+}
+
+/**
+ * __vb2_buf_dmabuf_put() - release memory associated with
+ * a DMABUF shared buffer
+ */
+static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
+{
+ unsigned int plane;
+
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
+}
+
+/**
+ * __setup_lengths() - setup initial lengths for every plane in
+ * every buffer on the queue
+ */
+static void __setup_lengths(struct vb2_queue *q, unsigned int n)
+{
+ unsigned int buffer, plane;
+ struct vb2_buffer *vb;
+
+ for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
+ vb = q->bufs[buffer];
+ if (!vb)
+ continue;
+
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ vb->v4l2_planes[plane].length = q->plane_sizes[plane];
+ }
+}
+
+/**
+ * __setup_offsets() - setup unique offsets ("cookies") for every plane in
+ * every buffer on the queue
+ */
+static void __setup_offsets(struct vb2_queue *q, unsigned int n)
+{
+ unsigned int buffer, plane;
+ struct vb2_buffer *vb;
+ unsigned long off;
+
+ if (q->num_buffers) {
+ struct v4l2_plane *p;
+ vb = q->bufs[q->num_buffers - 1];
+ p = &vb->v4l2_planes[vb->num_planes - 1];
+ off = PAGE_ALIGN(p->m.mem_offset + p->length);
+ } else {
+ off = 0;
+ }
+
+ for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
+ vb = q->bufs[buffer];
+ if (!vb)
+ continue;
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ vb->v4l2_planes[plane].m.mem_offset = off;
+
+ dprintk(3, "buffer %d, plane %d offset 0x%08lx\n",
+ buffer, plane, off);
+
+ off += vb->v4l2_planes[plane].length;
+ off = PAGE_ALIGN(off);
+ }
+ }
+}
+
+/**
+ * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
+ * video buffer memory for all buffers/planes on the queue and initializes the
+ * queue
+ *
+ * Returns the number of buffers successfully allocated.
+ */
+static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
+ unsigned int num_buffers, unsigned int num_planes)
+{
+ unsigned int buffer;
+ struct vb2_buffer *vb;
+ int ret;
+
+ for (buffer = 0; buffer < num_buffers; ++buffer) {
+ /* Allocate videobuf buffer structures */
+ vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
+ if (!vb) {
+ dprintk(1, "memory alloc for buffer struct failed\n");
+ break;
+ }
+
+ /* Length stores number of planes for multiplanar buffers */
+ if (V4L2_TYPE_IS_MULTIPLANAR(q->type))
+ vb->v4l2_buf.length = num_planes;
+
+ vb->state = VB2_BUF_STATE_DEQUEUED;
+ vb->vb2_queue = q;
+ vb->num_planes = num_planes;
+ vb->v4l2_buf.index = q->num_buffers + buffer;
+ vb->v4l2_buf.type = q->type;
+ vb->v4l2_buf.memory = memory;
+
+ /* Allocate video buffer memory for the MMAP type */
+ if (memory == V4L2_MEMORY_MMAP) {
+ ret = __vb2_buf_mem_alloc(vb);
+ if (ret) {
+ dprintk(1, "failed allocating memory for "
+ "buffer %d\n", buffer);
+ kfree(vb);
+ break;
+ }
+ /*
+ * Call the driver-provided buffer initialization
+ * callback, if given. An error in initialization
+ * results in queue setup failure.
+ */
+ ret = call_vb_qop(vb, buf_init, vb);
+ if (ret) {
+ dprintk(1, "buffer %d %p initialization"
+ " failed\n", buffer, vb);
+ __vb2_buf_mem_free(vb);
+ kfree(vb);
+ break;
+ }
+ }
+
+ q->bufs[q->num_buffers + buffer] = vb;
+ }
+
+ __setup_lengths(q, buffer);
+ if (memory == V4L2_MEMORY_MMAP)
+ __setup_offsets(q, buffer);
+
+ dprintk(1, "allocated %d buffers, %d plane(s) each\n",
+ buffer, num_planes);
+
+ return buffer;
+}
+
+/**
+ * __vb2_free_mem() - release all video buffer memory for a given queue
+ */
+static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
+{
+ unsigned int buffer;
+ struct vb2_buffer *vb;
+
+ for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+ ++buffer) {
+ vb = q->bufs[buffer];
+ if (!vb)
+ continue;
+
+ /* Free MMAP buffers or release USERPTR buffers */
+ if (q->memory == V4L2_MEMORY_MMAP)
+ __vb2_buf_mem_free(vb);
+ else if (q->memory == V4L2_MEMORY_DMABUF)
+ __vb2_buf_dmabuf_put(vb);
+ else
+ __vb2_buf_userptr_put(vb);
+ }
+}
+
+/**
+ * __vb2_queue_free() - free buffers at the end of the queue - video memory and
+ * related information, if no buffers are left return the queue to an
+ * uninitialized state. Might be called even if the queue has already been freed.
+ */
+static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
+{
+ unsigned int buffer;
+
+ /*
+ * Sanity check: when preparing a buffer the queue lock is released for
+ * a short while (see __buf_prepare for the details), which would allow
+ * a race with a reqbufs which can call this function. Removing the
+ * buffers from underneath __buf_prepare is obviously a bad idea, so we
+ * check if any of the buffers is in the state PREPARING, and if so we
+ * just return -EAGAIN.
+ */
+ for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+ ++buffer) {
+ if (q->bufs[buffer] == NULL)
+ continue;
+ if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
+ dprintk(1, "preparing buffers, cannot free\n");
+ return -EAGAIN;
+ }
+ }
+
+ /* Call driver-provided cleanup function for each buffer, if provided */
+ for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+ ++buffer) {
+ struct vb2_buffer *vb = q->bufs[buffer];
+
+ if (vb && vb->planes[0].mem_priv)
+ call_void_vb_qop(vb, buf_cleanup, vb);
+ }
+
+ /* Release video buffer memory */
+ __vb2_free_mem(q, buffers);
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ /*
+ * Check that all the calls were balances during the life-time of this
+ * queue. If not (or if the debug level is 1 or up), then dump the
+ * counters to the kernel log.
+ */
+ if (q->num_buffers) {
+ bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
+ q->cnt_wait_prepare != q->cnt_wait_finish;
+
+ if (unbalanced || debug) {
+ pr_info("vb2: counters for queue %p:%s\n", q,
+ unbalanced ? " UNBALANCED!" : "");
+ pr_info("vb2: setup: %u start_streaming: %u stop_streaming: %u\n",
+ q->cnt_queue_setup, q->cnt_start_streaming,
+ q->cnt_stop_streaming);
+ pr_info("vb2: wait_prepare: %u wait_finish: %u\n",
+ q->cnt_wait_prepare, q->cnt_wait_finish);
+ }
+ q->cnt_queue_setup = 0;
+ q->cnt_wait_prepare = 0;
+ q->cnt_wait_finish = 0;
+ q->cnt_start_streaming = 0;
+ q->cnt_stop_streaming = 0;
+ }
+ for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+ struct vb2_buffer *vb = q->bufs[buffer];
+ bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
+ vb->cnt_mem_prepare != vb->cnt_mem_finish ||
+ vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
+ vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
+ vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
+ vb->cnt_buf_queue != vb->cnt_buf_done ||
+ vb->cnt_buf_prepare != vb->cnt_buf_finish ||
+ vb->cnt_buf_init != vb->cnt_buf_cleanup;
+
+ if (unbalanced || debug) {
+ pr_info("vb2: counters for queue %p, buffer %d:%s\n",
+ q, buffer, unbalanced ? " UNBALANCED!" : "");
+ pr_info("vb2: buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
+ vb->cnt_buf_init, vb->cnt_buf_cleanup,
+ vb->cnt_buf_prepare, vb->cnt_buf_finish);
+ pr_info("vb2: buf_queue: %u buf_done: %u\n",
+ vb->cnt_buf_queue, vb->cnt_buf_done);
+ pr_info("vb2: alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
+ vb->cnt_mem_alloc, vb->cnt_mem_put,
+ vb->cnt_mem_prepare, vb->cnt_mem_finish,
+ vb->cnt_mem_mmap);
+ pr_info("vb2: get_userptr: %u put_userptr: %u\n",
+ vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
+ pr_info("vb2: attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
+ vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
+ vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
+ pr_info("vb2: get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
+ vb->cnt_mem_get_dmabuf,
+ vb->cnt_mem_num_users,
+ vb->cnt_mem_vaddr,
+ vb->cnt_mem_cookie);
+ }
+ }
+#endif
+
+ /* Free videobuf buffers */
+ for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+ ++buffer) {
+ kfree(q->bufs[buffer]);
+ q->bufs[buffer] = NULL;
+ }
+
+ q->num_buffers -= buffers;
+ if (!q->num_buffers) {
+ q->memory = 0;
+ INIT_LIST_HEAD(&q->queued_list);
+ }
+ return 0;
+}
+
+/**
+ * __verify_planes_array() - verify that the planes array passed in struct
+ * v4l2_buffer from userspace can be safely used
+ */
+static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+ if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
+ return 0;
+
+ /* Is memory for copying plane information present? */
+ if (NULL == b->m.planes) {
+ dprintk(1, "multi-planar buffer passed but "
+ "planes array not provided\n");
+ return -EINVAL;
+ }
+
+ if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) {
+ dprintk(1, "incorrect planes array length, "
+ "expected %d, got %d\n", vb->num_planes, b->length);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * __verify_length() - Verify that the bytesused value for each plane fits in
+ * the plane length and that the data offset doesn't exceed the bytesused value.
+ */
+static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+ unsigned int length;
+ unsigned int bytesused;
+ unsigned int plane;
+
+ if (!V4L2_TYPE_IS_OUTPUT(b->type))
+ return 0;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ length = (b->memory == V4L2_MEMORY_USERPTR ||
+ b->memory == V4L2_MEMORY_DMABUF)
+ ? b->m.planes[plane].length
+ : vb->v4l2_planes[plane].length;
+ bytesused = b->m.planes[plane].bytesused
+ ? b->m.planes[plane].bytesused : length;
+
+ if (b->m.planes[plane].bytesused > length)
+ return -EINVAL;
+
+ if (b->m.planes[plane].data_offset > 0 &&
+ b->m.planes[plane].data_offset >= bytesused)
+ return -EINVAL;
+ }
+ } else {
+ length = (b->memory == V4L2_MEMORY_USERPTR)
+ ? b->length : vb->v4l2_planes[0].length;
+ bytesused = b->bytesused ? b->bytesused : length;
+
+ if (b->bytesused > length)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * __buffer_in_use() - return true if the buffer is in use and
+ * the queue cannot be freed (by the means of REQBUFS(0)) call
+ */
+static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
+{
+ unsigned int plane;
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ void *mem_priv = vb->planes[plane].mem_priv;
+ /*
+ * If num_users() has not been provided, call_memop
+ * will return 0, apparently nobody cares about this
+ * case anyway. If num_users() returns more than 1,
+ * we are not the only user of the plane's memory.
+ */
+ if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)
+ return true;
+ }
+ return false;
+}
+
+/**
+ * __buffers_in_use() - return true if any buffers on the queue are in use and
+ * the queue cannot be freed (by the means of REQBUFS(0)) call
+ */
+static bool __buffers_in_use(struct vb2_queue *q)
+{
+ unsigned int buffer;
+ for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+ if (__buffer_in_use(q, q->bufs[buffer]))
+ return true;
+ }
+ return false;
+}
+
+/**
+ * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
+ * returned to userspace
+ */
+static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+
+ /* Copy back data such as timestamp, flags, etc. */
+ memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m));
+ b->reserved2 = vb->v4l2_buf.reserved2;
+ b->reserved = vb->v4l2_buf.reserved;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) {
+ /*
+ * Fill in plane-related data if userspace provided an array
+ * for it. The caller has already verified memory and size.
+ */
+ b->length = vb->num_planes;
+ memcpy(b->m.planes, vb->v4l2_planes,
+ b->length * sizeof(struct v4l2_plane));
+ } else {
+ /*
+ * We use length and offset in v4l2_planes array even for
+ * single-planar buffers, but userspace does not.
+ */
+ b->length = vb->v4l2_planes[0].length;
+ b->bytesused = vb->v4l2_planes[0].bytesused;
+ if (q->memory == V4L2_MEMORY_MMAP)
+ b->m.offset = vb->v4l2_planes[0].m.mem_offset;
+ else if (q->memory == V4L2_MEMORY_USERPTR)
+ b->m.userptr = vb->v4l2_planes[0].m.userptr;
+ else if (q->memory == V4L2_MEMORY_DMABUF)
+ b->m.fd = vb->v4l2_planes[0].m.fd;
+ }
+
+ /*
+ * Clear any buffer state related flags.
+ */
+ b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
+ b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
+ if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
+ V4L2_BUF_FLAG_TIMESTAMP_COPY) {
+ /*
+ * For non-COPY timestamps, drop timestamp source bits
+ * and obtain the timestamp source from the queue.
+ */
+ b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ }
+
+ switch (vb->state) {
+ case VB2_BUF_STATE_QUEUED:
+ case VB2_BUF_STATE_ACTIVE:
+ b->flags |= V4L2_BUF_FLAG_QUEUED;
+ break;
+ case VB2_BUF_STATE_ERROR:
+ b->flags |= V4L2_BUF_FLAG_ERROR;
+ /* fall through */
+ case VB2_BUF_STATE_DONE:
+ b->flags |= V4L2_BUF_FLAG_DONE;
+ break;
+ case VB2_BUF_STATE_PREPARED:
+ b->flags |= V4L2_BUF_FLAG_PREPARED;
+ break;
+ case VB2_BUF_STATE_PREPARING:
+ case VB2_BUF_STATE_DEQUEUED:
+ /* nothing */
+ break;
+ }
+
+ if (__buffer_in_use(q, vb))
+ b->flags |= V4L2_BUF_FLAG_MAPPED;
+}
+
+/**
+ * vb2_querybuf() - query video buffer information
+ * @q: videobuf queue
+ * @b: buffer struct passed from userspace to vidioc_querybuf handler
+ * in driver
+ *
+ * Should be called from vidioc_querybuf ioctl handler in driver.
+ * This function will verify the passed v4l2_buffer structure and fill the
+ * relevant information for the userspace.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_querybuf handler in driver.
+ */
+int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+ struct vb2_buffer *vb;
+ int ret;
+
+ if (b->type != q->type) {
+ dprintk(1, "wrong buffer type\n");
+ return -EINVAL;
+ }
+
+ if (b->index >= q->num_buffers) {
+ dprintk(1, "buffer index out of range\n");
+ return -EINVAL;
+ }
+ vb = q->bufs[b->index];
+ ret = __verify_planes_array(vb, b);
+ if (!ret)
+ __fill_v4l2_buffer(vb, b);
+ return ret;
+}
+EXPORT_SYMBOL(vb2_querybuf);
+
+/**
+ * __verify_userptr_ops() - verify that all memory operations required for
+ * USERPTR queue type have been provided
+ */
+static int __verify_userptr_ops(struct vb2_queue *q)
+{
+ if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
+ !q->mem_ops->put_userptr)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * __verify_mmap_ops() - verify that all memory operations required for
+ * MMAP queue type have been provided
+ */
+static int __verify_mmap_ops(struct vb2_queue *q)
+{
+ if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
+ !q->mem_ops->put || !q->mem_ops->mmap)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * __verify_dmabuf_ops() - verify that all memory operations required for
+ * DMABUF queue type have been provided
+ */
+static int __verify_dmabuf_ops(struct vb2_queue *q)
+{
+ if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
+ !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
+ !q->mem_ops->unmap_dmabuf)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * __verify_memory_type() - Check whether the memory type and buffer type
+ * passed to a buffer operation are compatible with the queue.
+ */
+static int __verify_memory_type(struct vb2_queue *q,
+ enum v4l2_memory memory, enum v4l2_buf_type type)
+{
+ if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR &&
+ memory != V4L2_MEMORY_DMABUF) {
+ dprintk(1, "unsupported memory type\n");
+ return -EINVAL;
+ }
+
+ if (type != q->type) {
+ dprintk(1, "requested type is incorrect\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Make sure all the required memory ops for given memory type
+ * are available.
+ */
+ if (memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
+ dprintk(1, "MMAP for current setup unsupported\n");
+ return -EINVAL;
+ }
+
+ if (memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
+ dprintk(1, "USERPTR for current setup unsupported\n");
+ return -EINVAL;
+ }
+
+ if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
+ dprintk(1, "DMABUF for current setup unsupported\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Place the busy tests at the end: -EBUSY can be ignored when
+ * create_bufs is called with count == 0, but count == 0 should still
+ * do the memory and type validation.
+ */
+ if (vb2_fileio_is_active(q)) {
+ dprintk(1, "file io in progress\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/**
+ * __reqbufs() - Initiate streaming
+ * @q: videobuf2 queue
+ * @req: struct passed from userspace to vidioc_reqbufs handler in driver
+ *
+ * Should be called from vidioc_reqbufs ioctl handler of a driver.
+ * This function:
+ * 1) verifies streaming parameters passed from the userspace,
+ * 2) sets up the queue,
+ * 3) negotiates number of buffers and planes per buffer with the driver
+ * to be used during streaming,
+ * 4) allocates internal buffer structures (struct vb2_buffer), according to
+ * the agreed parameters,
+ * 5) for MMAP memory type, allocates actual video memory, using the
+ * memory handling/allocation routines provided during queue initialization
+ *
+ * If req->count is 0, all the memory will be freed instead.
+ * If the queue has been allocated previously (by a previous vb2_reqbufs) call
+ * and the queue is not busy, memory will be reallocated.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_reqbufs handler in driver.
+ */
+static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
+{
+ unsigned int num_buffers, allocated_buffers, num_planes = 0;
+ int ret;
+
+ if (q->streaming) {
+ dprintk(1, "streaming active\n");
+ return -EBUSY;
+ }
+
+ if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
+ /*
+ * We already have buffers allocated, so first check if they
+ * are not in use and can be freed.
+ */
+ mutex_lock(&q->mmap_lock);
+ if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) {
+ mutex_unlock(&q->mmap_lock);
+ dprintk(1, "memory in use, cannot free\n");
+ return -EBUSY;
+ }
+
+ /*
+ * Call queue_cancel to clean up any buffers in the PREPARED or
+ * QUEUED state which is possible if buffers were prepared or
+ * queued without ever calling STREAMON.
+ */
+ __vb2_queue_cancel(q);
+ ret = __vb2_queue_free(q, q->num_buffers);
+ mutex_unlock(&q->mmap_lock);
+ if (ret)
+ return ret;
+
+ /*
+ * In case of REQBUFS(0) return immediately without calling
+ * driver's queue_setup() callback and allocating resources.
+ */
+ if (req->count == 0)
+ return 0;
+ }
+
+ /*
+ * Make sure the requested values and current defaults are sane.
+ */
+ num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME);
+ num_buffers = max_t(unsigned int, num_buffers, q->min_buffers_needed);
+ memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
+ memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
+ q->memory = req->memory;
+
+ /*
+ * Ask the driver how many buffers and planes per buffer it requires.
+ * Driver also sets the size and allocator context for each plane.
+ */
+ ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes,
+ q->plane_sizes, q->alloc_ctx);
+ if (ret)
+ return ret;
+
+ /* Finally, allocate buffers and video memory */
+ allocated_buffers = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes);
+ if (allocated_buffers == 0) {
+ dprintk(1, "memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * There is no point in continuing if we can't allocate the minimum
+ * number of buffers needed by this vb2_queue.
+ */
+ if (allocated_buffers < q->min_buffers_needed)
+ ret = -ENOMEM;
+
+ /*
+ * Check if driver can handle the allocated number of buffers.
+ */
+ if (!ret && allocated_buffers < num_buffers) {
+ num_buffers = allocated_buffers;
+
+ ret = call_qop(q, queue_setup, q, NULL, &num_buffers,
+ &num_planes, q->plane_sizes, q->alloc_ctx);
+
+ if (!ret && allocated_buffers < num_buffers)
+ ret = -ENOMEM;
+
+ /*
+ * Either the driver has accepted a smaller number of buffers,
+ * or .queue_setup() returned an error
+ */
+ }
+
+ mutex_lock(&q->mmap_lock);
+ q->num_buffers = allocated_buffers;
+
+ if (ret < 0) {
+ /*
+ * Note: __vb2_queue_free() will subtract 'allocated_buffers'
+ * from q->num_buffers.
+ */
+ __vb2_queue_free(q, allocated_buffers);
+ mutex_unlock(&q->mmap_lock);
+ return ret;
+ }
+ mutex_unlock(&q->mmap_lock);
+
+ /*
+ * Return the number of successfully allocated buffers
+ * to the userspace.
+ */
+ req->count = allocated_buffers;
+ q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
+
+ return 0;
+}
+
+/**
+ * vb2_reqbufs() - Wrapper for __reqbufs() that also verifies the memory and
+ * type values.
+ * @q: videobuf2 queue
+ * @req: struct passed from userspace to vidioc_reqbufs handler in driver
+ */
+int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
+{
+ int ret = __verify_memory_type(q, req->memory, req->type);
+
+ return ret ? ret : __reqbufs(q, req);
+}
+EXPORT_SYMBOL_GPL(vb2_reqbufs);
+
+/**
+ * __create_bufs() - Allocate buffers and any required auxiliary structs
+ * @q: videobuf2 queue
+ * @create: creation parameters, passed from userspace to vidioc_create_bufs
+ * handler in driver
+ *
+ * Should be called from vidioc_create_bufs ioctl handler of a driver.
+ * This function:
+ * 1) verifies parameter sanity
+ * 2) calls the .queue_setup() queue operation
+ * 3) performs any necessary memory allocations
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_create_bufs handler in driver.
+ */
+static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
+{
+ unsigned int num_planes = 0, num_buffers, allocated_buffers;
+ int ret;
+
+ if (q->num_buffers == VIDEO_MAX_FRAME) {
+ dprintk(1, "maximum number of buffers already allocated\n");
+ return -ENOBUFS;
+ }
+
+ if (!q->num_buffers) {
+ memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
+ memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
+ q->memory = create->memory;
+ q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
+ }
+
+ num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
+
+ /*
+ * Ask the driver, whether the requested number of buffers, planes per
+ * buffer and their sizes are acceptable
+ */
+ ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
+ &num_planes, q->plane_sizes, q->alloc_ctx);
+ if (ret)
+ return ret;
+
+ /* Finally, allocate buffers and video memory */
+ allocated_buffers = __vb2_queue_alloc(q, create->memory, num_buffers,
+ num_planes);
+ if (allocated_buffers == 0) {
+ dprintk(1, "memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Check if driver can handle the so far allocated number of buffers.
+ */
+ if (allocated_buffers < num_buffers) {
+ num_buffers = allocated_buffers;
+
+ /*
+ * q->num_buffers contains the total number of buffers, that the
+ * queue driver has set up
+ */
+ ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
+ &num_planes, q->plane_sizes, q->alloc_ctx);
+
+ if (!ret && allocated_buffers < num_buffers)
+ ret = -ENOMEM;
+
+ /*
+ * Either the driver has accepted a smaller number of buffers,
+ * or .queue_setup() returned an error
+ */
+ }
+
+ mutex_lock(&q->mmap_lock);
+ q->num_buffers += allocated_buffers;
+
+ if (ret < 0) {
+ /*
+ * Note: __vb2_queue_free() will subtract 'allocated_buffers'
+ * from q->num_buffers.
+ */
+ __vb2_queue_free(q, allocated_buffers);
+ mutex_unlock(&q->mmap_lock);
+ return -ENOMEM;
+ }
+ mutex_unlock(&q->mmap_lock);
+
+ /*
+ * Return the number of successfully allocated buffers
+ * to the userspace.
+ */
+ create->count = allocated_buffers;
+
+ return 0;
+}
+
+/**
+ * vb2_create_bufs() - Wrapper for __create_bufs() that also verifies the
+ * memory and type values.
+ * @q: videobuf2 queue
+ * @create: creation parameters, passed from userspace to vidioc_create_bufs
+ * handler in driver
+ */
+int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
+{
+ int ret = __verify_memory_type(q, create->memory, create->format.type);
+
+ create->index = q->num_buffers;
+ if (create->count == 0)
+ return ret != -EBUSY ? ret : 0;
+ return ret ? ret : __create_bufs(q, create);
+}
+EXPORT_SYMBOL_GPL(vb2_create_bufs);
+
+/**
+ * vb2_plane_vaddr() - Return a kernel virtual address of a given plane
+ * @vb: vb2_buffer to which the plane in question belongs to
+ * @plane_no: plane number for which the address is to be returned
+ *
+ * This function returns a kernel virtual address of a given plane if
+ * such a mapping exist, NULL otherwise.
+ */
+void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
+{
+ if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
+ return NULL;
+
+ return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
+
+}
+EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
+
+/**
+ * vb2_plane_cookie() - Return allocator specific cookie for the given plane
+ * @vb: vb2_buffer to which the plane in question belongs to
+ * @plane_no: plane number for which the cookie is to be returned
+ *
+ * This function returns an allocator specific cookie for a given plane if
+ * available, NULL otherwise. The allocator should provide some simple static
+ * inline function, which would convert this cookie to the allocator specific
+ * type that can be used directly by the driver to access the buffer. This can
+ * be for example physical address, pointer to scatter list or IOMMU mapping.
+ */
+void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
+{
+ if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
+ return NULL;
+
+ return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv);
+}
+EXPORT_SYMBOL_GPL(vb2_plane_cookie);
+
+/**
+ * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished
+ * @vb: vb2_buffer returned from the driver
+ * @state: either VB2_BUF_STATE_DONE if the operation finished successfully
+ * or VB2_BUF_STATE_ERROR if the operation finished with an error.
+ * If start_streaming fails then it should return buffers with state
+ * VB2_BUF_STATE_QUEUED to put them back into the queue.
+ *
+ * This function should be called by the driver after a hardware operation on
+ * a buffer is finished and the buffer may be returned to userspace. The driver
+ * cannot use this buffer anymore until it is queued back to it by videobuf
+ * by the means of buf_queue callback. Only buffers previously queued to the
+ * driver by buf_queue can be passed to this function.
+ *
+ * While streaming a buffer can only be returned in state DONE or ERROR.
+ * The start_streaming op can also return them in case the DMA engine cannot
+ * be started for some reason. In that case the buffers should be returned with
+ * state QUEUED.
+ */
+void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned long flags;
+ unsigned int plane;
+
+ if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
+ return;
+
+ if (WARN_ON(state != VB2_BUF_STATE_DONE &&
+ state != VB2_BUF_STATE_ERROR &&
+ state != VB2_BUF_STATE_QUEUED))
+ state = VB2_BUF_STATE_ERROR;
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ /*
+ * Although this is not a callback, it still does have to balance
+ * with the buf_queue op. So update this counter manually.
+ */
+ vb->cnt_buf_done++;
+#endif
+ dprintk(4, "done processing on buffer %d, state: %d\n",
+ vb->v4l2_buf.index, state);
+
+ /* sync buffers */
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ call_void_memop(vb, finish, vb->planes[plane].mem_priv);
+
+ /* Add the buffer to the done buffers list */
+ spin_lock_irqsave(&q->done_lock, flags);
+ vb->state = state;
+ if (state != VB2_BUF_STATE_QUEUED)
+ list_add_tail(&vb->done_entry, &q->done_list);
+ atomic_dec(&q->owned_by_drv_count);
+ spin_unlock_irqrestore(&q->done_lock, flags);
+
+ if (state == VB2_BUF_STATE_QUEUED)
+ return;
+
+ /* Inform any processes that may be waiting for buffers */
+ wake_up(&q->done_wq);
+}
+EXPORT_SYMBOL_GPL(vb2_buffer_done);
+
+/**
+ * vb2_discard_done() - discard all buffers marked as DONE
+ * @q: videobuf2 queue
+ *
+ * This function is intended to be used with suspend/resume operations. It
+ * discards all 'done' buffers as they would be too old to be requested after
+ * resume.
+ *
+ * Drivers must stop the hardware and synchronize with interrupt handlers and/or
+ * delayed works before calling this function to make sure no buffer will be
+ * touched by the driver and/or hardware.
+ */
+void vb2_discard_done(struct vb2_queue *q)
+{
+ struct vb2_buffer *vb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->done_lock, flags);
+ list_for_each_entry(vb, &q->done_list, done_entry)
+ vb->state = VB2_BUF_STATE_ERROR;
+ spin_unlock_irqrestore(&q->done_lock, flags);
+}
+EXPORT_SYMBOL_GPL(vb2_discard_done);
+
+/**
+ * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
+ * v4l2_buffer by the userspace. The caller has already verified that struct
+ * v4l2_buffer has a valid number of planes.
+ */
+static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
+ struct v4l2_plane *v4l2_planes)
+{
+ unsigned int plane;
+
+ if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+ if (WARN_ON_ONCE(b->bytesused == 0)) {
+ pr_warn_once("use of bytesused == 0 is deprecated and will be removed in the future,\n");
+ if (vb->vb2_queue->allow_zero_bytesused)
+ pr_warn_once("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
+ else
+ pr_warn_once("use the actual size instead.\n");
+ }
+ }
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
+ if (b->memory == V4L2_MEMORY_USERPTR) {
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ v4l2_planes[plane].m.userptr =
+ b->m.planes[plane].m.userptr;
+ v4l2_planes[plane].length =
+ b->m.planes[plane].length;
+ }
+ }
+ if (b->memory == V4L2_MEMORY_DMABUF) {
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ v4l2_planes[plane].m.fd =
+ b->m.planes[plane].m.fd;
+ v4l2_planes[plane].length =
+ b->m.planes[plane].length;
+ }
+ }
+
+ /* Fill in driver-provided information for OUTPUT types */
+ if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+ /*
+ * Will have to go up to b->length when API starts
+ * accepting variable number of planes.
+ *
+ * If bytesused == 0 for the output buffer, then fall
+ * back to the full buffer size. In that case
+ * userspace clearly never bothered to set it and
+ * it's a safe assumption that they really meant to
+ * use the full plane sizes.
+ *
+ * Some drivers, e.g. old codec drivers, use bytesused == 0
+ * as a way to indicate that streaming is finished.
+ * In that case, the driver should use the
+ * allow_zero_bytesused flag to keep old userspace
+ * applications working.
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ struct v4l2_plane *pdst = &v4l2_planes[plane];
+ struct v4l2_plane *psrc = &b->m.planes[plane];
+
+ if (vb->vb2_queue->allow_zero_bytesused)
+ pdst->bytesused = psrc->bytesused;
+ else
+ pdst->bytesused = psrc->bytesused ?
+ psrc->bytesused : pdst->length;
+ pdst->data_offset = psrc->data_offset;
+ }
+ }
+ } else {
+ /*
+ * Single-planar buffers do not use planes array,
+ * so fill in relevant v4l2_buffer struct fields instead.
+ * In videobuf we use our internal V4l2_planes struct for
+ * single-planar buffers as well, for simplicity.
+ *
+ * If bytesused == 0 for the output buffer, then fall back
+ * to the full buffer size as that's a sensible default.
+ *
+ * Some drivers, e.g. old codec drivers, use bytesused == 0 as
+ * a way to indicate that streaming is finished. In that case,
+ * the driver should use the allow_zero_bytesused flag to keep
+ * old userspace applications working.
+ */
+ if (b->memory == V4L2_MEMORY_USERPTR) {
+ v4l2_planes[0].m.userptr = b->m.userptr;
+ v4l2_planes[0].length = b->length;
+ }
+
+ if (b->memory == V4L2_MEMORY_DMABUF) {
+ v4l2_planes[0].m.fd = b->m.fd;
+ v4l2_planes[0].length = b->length;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+ if (vb->vb2_queue->allow_zero_bytesused)
+ v4l2_planes[0].bytesused = b->bytesused;
+ else
+ v4l2_planes[0].bytesused = b->bytesused ?
+ b->bytesused : v4l2_planes[0].length;
+ } else
+ v4l2_planes[0].bytesused = 0;
+
+ }
+
+ /* Zero flags that the vb2 core handles */
+ vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
+ if ((vb->vb2_queue->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
+ V4L2_BUF_FLAG_TIMESTAMP_COPY || !V4L2_TYPE_IS_OUTPUT(b->type)) {
+ /*
+ * Non-COPY timestamps and non-OUTPUT queues will get
+ * their timestamp and timestamp source flags from the
+ * queue.
+ */
+ vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+ /*
+ * For output buffers mask out the timecode flag:
+ * this will be handled later in vb2_internal_qbuf().
+ * The 'field' is valid metadata for this output buffer
+ * and so that needs to be copied here.
+ */
+ vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TIMECODE;
+ vb->v4l2_buf.field = b->field;
+ } else {
+ /* Zero any output buffer flags as this is a capture buffer */
+ vb->v4l2_buf.flags &= ~V4L2_BUFFER_OUT_FLAGS;
+ }
+}
+
+/**
+ * __qbuf_mmap() - handle qbuf of an MMAP buffer
+ */
+static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+ __fill_vb2_buffer(vb, b, vb->v4l2_planes);
+ return call_vb_qop(vb, buf_prepare, vb);
+}
+
+/**
+ * __qbuf_userptr() - handle qbuf of a USERPTR buffer
+ */
+static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+ struct v4l2_plane planes[VIDEO_MAX_PLANES];
+ struct vb2_queue *q = vb->vb2_queue;
+ void *mem_priv;
+ unsigned int plane;
+ int ret;
+ enum dma_data_direction dma_dir =
+ V4L2_TYPE_IS_OUTPUT(q->type) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ bool reacquired = vb->planes[0].mem_priv == NULL;
+
+ memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
+ /* Copy relevant information provided by the userspace */
+ __fill_vb2_buffer(vb, b, planes);
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ /* Skip the plane if already verified */
+ if (vb->v4l2_planes[plane].m.userptr &&
+ vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr
+ && vb->v4l2_planes[plane].length == planes[plane].length)
+ continue;
+
+ dprintk(3, "userspace address for plane %d changed, "
+ "reacquiring memory\n", plane);
+
+ /* Check if the provided plane buffer is large enough */
+ if (planes[plane].length < q->plane_sizes[plane]) {
+ dprintk(1, "provided buffer size %u is less than "
+ "setup size %u for plane %d\n",
+ planes[plane].length,
+ q->plane_sizes[plane], plane);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Release previously acquired memory if present */
+ if (vb->planes[plane].mem_priv) {
+ if (!reacquired) {
+ reacquired = true;
+ call_void_vb_qop(vb, buf_cleanup, vb);
+ }
+ call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
+ }
+
+ vb->planes[plane].mem_priv = NULL;
+ memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
+
+ /* Acquire each plane's memory */
+ mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane],
+ planes[plane].m.userptr,
+ planes[plane].length, dma_dir);
+ if (IS_ERR_OR_NULL(mem_priv)) {
+ dprintk(1, "failed acquiring userspace "
+ "memory for plane %d\n", plane);
+ ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;
+ goto err;
+ }
+ vb->planes[plane].mem_priv = mem_priv;
+ }
+
+ /*
+ * Now that everything is in order, copy relevant information
+ * provided by userspace.
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ vb->v4l2_planes[plane] = planes[plane];
+
+ if (reacquired) {
+ /*
+ * One or more planes changed, so we must call buf_init to do
+ * the driver-specific initialization on the newly acquired
+ * buffer, if provided.
+ */
+ ret = call_vb_qop(vb, buf_init, vb);
+ if (ret) {
+ dprintk(1, "buffer initialization failed\n");
+ goto err;
+ }
+ }
+
+ ret = call_vb_qop(vb, buf_prepare, vb);
+ if (ret) {
+ dprintk(1, "buffer preparation failed\n");
+ call_void_vb_qop(vb, buf_cleanup, vb);
+ goto err;
+ }
+
+ return 0;
+err:
+ /* In case of errors, release planes that were already acquired */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ if (vb->planes[plane].mem_priv)
+ call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
+ vb->planes[plane].mem_priv = NULL;
+ vb->v4l2_planes[plane].m.userptr = 0;
+ vb->v4l2_planes[plane].length = 0;
+ }
+
+ return ret;
+}
+
+/**
+ * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer
+ */
+static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+ struct v4l2_plane planes[VIDEO_MAX_PLANES];
+ struct vb2_queue *q = vb->vb2_queue;
+ void *mem_priv;
+ unsigned int plane;
+ int ret;
+ enum dma_data_direction dma_dir =
+ V4L2_TYPE_IS_OUTPUT(q->type) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ bool reacquired = vb->planes[0].mem_priv == NULL;
+
+ memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
+ /* Copy relevant information provided by the userspace */
+ __fill_vb2_buffer(vb, b, planes);
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
+
+ if (IS_ERR_OR_NULL(dbuf)) {
+ dprintk(1, "invalid dmabuf fd for plane %d\n",
+ plane);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* use DMABUF size if length is not provided */
+ if (planes[plane].length == 0)
+ planes[plane].length = dbuf->size;
+
+ if (planes[plane].length < q->plane_sizes[plane]) {
+ dprintk(1, "invalid dmabuf length for plane %d\n",
+ plane);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Skip the plane if already verified */
+ if (dbuf == vb->planes[plane].dbuf &&
+ vb->v4l2_planes[plane].length == planes[plane].length) {
+ dma_buf_put(dbuf);
+ continue;
+ }
+
+ dprintk(1, "buffer for plane %d changed\n", plane);
+
+ if (!reacquired) {
+ reacquired = true;
+ call_void_vb_qop(vb, buf_cleanup, vb);
+ }
+
+ /* Release previously acquired memory if present */
+ __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
+ memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
+
+ /* Acquire each plane's memory */
+ mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_ctx[plane],
+ dbuf, planes[plane].length, dma_dir);
+ if (IS_ERR(mem_priv)) {
+ dprintk(1, "failed to attach dmabuf\n");
+ ret = PTR_ERR(mem_priv);
+ dma_buf_put(dbuf);
+ goto err;
+ }
+
+ vb->planes[plane].dbuf = dbuf;
+ vb->planes[plane].mem_priv = mem_priv;
+ }
+
+ /* TODO: This pins the buffer(s) with dma_buf_map_attachment()).. but
+ * really we want to do this just before the DMA, not while queueing
+ * the buffer(s)..
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
+ if (ret) {
+ dprintk(1, "failed to map dmabuf for plane %d\n",
+ plane);
+ goto err;
+ }
+ vb->planes[plane].dbuf_mapped = 1;
+ }
+
+ /*
+ * Now that everything is in order, copy relevant information
+ * provided by userspace.
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ vb->v4l2_planes[plane] = planes[plane];
+
+ if (reacquired) {
+ /*
+ * Call driver-specific initialization on the newly acquired buffer,
+ * if provided.
+ */
+ ret = call_vb_qop(vb, buf_init, vb);
+ if (ret) {
+ dprintk(1, "buffer initialization failed\n");
+ goto err;
+ }
+ }
+
+ ret = call_vb_qop(vb, buf_prepare, vb);
+ if (ret) {
+ dprintk(1, "buffer preparation failed\n");
+ call_void_vb_qop(vb, buf_cleanup, vb);
+ goto err;
+ }
+
+ return 0;
+err:
+ /* In case of errors, release planes that were already acquired */
+ __vb2_buf_dmabuf_put(vb);
+
+ return ret;
+}
+
+/**
+ * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
+ */
+static void __enqueue_in_driver(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned int plane;
+
+ vb->state = VB2_BUF_STATE_ACTIVE;
+ atomic_inc(&q->owned_by_drv_count);
+
+ /* sync buffers */
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
+
+ call_void_vb_qop(vb, buf_queue, vb);
+}
+
+static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ int ret;
+
+ ret = __verify_length(vb, b);
+ if (ret < 0) {
+ dprintk(1, "plane parameters verification failed: %d\n", ret);
+ return ret;
+ }
+ if (b->field == V4L2_FIELD_ALTERNATE && V4L2_TYPE_IS_OUTPUT(q->type)) {
+ /*
+ * If the format's field is ALTERNATE, then the buffer's field
+ * should be either TOP or BOTTOM, not ALTERNATE since that
+ * makes no sense. The driver has to know whether the
+ * buffer represents a top or a bottom field in order to
+ * program any DMA correctly. Using ALTERNATE is wrong, since
+ * that just says that it is either a top or a bottom field,
+ * but not which of the two it is.
+ */
+ dprintk(1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
+ return -EINVAL;
+ }
+
+ if (q->error) {
+ dprintk(1, "fatal error occurred on queue\n");
+ return -EIO;
+ }
+
+ vb->state = VB2_BUF_STATE_PREPARING;
+ vb->v4l2_buf.timestamp.tv_sec = 0;
+ vb->v4l2_buf.timestamp.tv_usec = 0;
+ vb->v4l2_buf.sequence = 0;
+
+ switch (q->memory) {
+ case V4L2_MEMORY_MMAP:
+ ret = __qbuf_mmap(vb, b);
+ break;
+ case V4L2_MEMORY_USERPTR:
+ down_read(&current->mm->mmap_sem);
+ ret = __qbuf_userptr(vb, b);
+ up_read(&current->mm->mmap_sem);
+ break;
+ case V4L2_MEMORY_DMABUF:
+ ret = __qbuf_dmabuf(vb, b);
+ break;
+ default:
+ WARN(1, "Invalid queue type\n");
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ dprintk(1, "buffer preparation failed: %d\n", ret);
+ vb->state = ret ? VB2_BUF_STATE_DEQUEUED : VB2_BUF_STATE_PREPARED;
+
+ return ret;
+}
+
+static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
+ const char *opname)
+{
+ if (b->type != q->type) {
+ dprintk(1, "%s: invalid buffer type\n", opname);
+ return -EINVAL;
+ }
+
+ if (b->index >= q->num_buffers) {
+ dprintk(1, "%s: buffer index out of range\n", opname);
+ return -EINVAL;
+ }
+
+ if (q->bufs[b->index] == NULL) {
+ /* Should never happen */
+ dprintk(1, "%s: buffer is NULL\n", opname);
+ return -EINVAL;
+ }
+
+ if (b->memory != q->memory) {
+ dprintk(1, "%s: invalid memory type\n", opname);
+ return -EINVAL;
+ }
+
+ return __verify_planes_array(q->bufs[b->index], b);
+}
+
+/**
+ * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
+ * @q: videobuf2 queue
+ * @b: buffer structure passed from userspace to vidioc_prepare_buf
+ * handler in driver
+ *
+ * Should be called from vidioc_prepare_buf ioctl handler of a driver.
+ * This function:
+ * 1) verifies the passed buffer,
+ * 2) calls buf_prepare callback in the driver (if provided), in which
+ * driver-specific buffer initialization can be performed,
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_prepare_buf handler in driver.
+ */
+int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+ struct vb2_buffer *vb;
+ int ret;
+
+ if (vb2_fileio_is_active(q)) {
+ dprintk(1, "file io in progress\n");
+ return -EBUSY;
+ }
+
+ ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf");
+ if (ret)
+ return ret;
+
+ vb = q->bufs[b->index];
+ if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+ dprintk(1, "invalid buffer state %d\n",
+ vb->state);
+ return -EINVAL;
+ }
+
+ ret = __buf_prepare(vb, b);
+ if (!ret) {
+ /* Fill buffer information for the userspace */
+ __fill_v4l2_buffer(vb, b);
+
+ dprintk(1, "prepare of buffer %d succeeded\n", vb->v4l2_buf.index);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vb2_prepare_buf);
+
+/**
+ * vb2_start_streaming() - Attempt to start streaming.
+ * @q: videobuf2 queue
+ *
+ * Attempt to start streaming. When this function is called there must be
+ * at least q->min_buffers_needed buffers queued up (i.e. the minimum
+ * number of buffers required for the DMA engine to function). If the
+ * @start_streaming op fails it is supposed to return all the driver-owned
+ * buffers back to vb2 in state QUEUED. Check if that happened and if
+ * not warn and reclaim them forcefully.
+ */
+static int vb2_start_streaming(struct vb2_queue *q)
+{
+ struct vb2_buffer *vb;
+ int ret;
+
+ /*
+ * If any buffers were queued before streamon,
+ * we can now pass them to driver for processing.
+ */
+ list_for_each_entry(vb, &q->queued_list, queued_entry)
+ __enqueue_in_driver(vb);
+
+ /* Tell the driver to start streaming */
+ q->start_streaming_called = 1;
+ ret = call_qop(q, start_streaming, q,
+ atomic_read(&q->owned_by_drv_count));
+ if (!ret)
+ return 0;
+
+ q->start_streaming_called = 0;
+
+ dprintk(1, "driver refused to start streaming\n");
+ /*
+ * If you see this warning, then the driver isn't cleaning up properly
+ * after a failed start_streaming(). See the start_streaming()
+ * documentation in videobuf2-core.h for more information how buffers
+ * should be returned to vb2 in start_streaming().
+ */
+ if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
+ unsigned i;
+
+ /*
+ * Forcefully reclaim buffers if the driver did not
+ * correctly return them to vb2.
+ */
+ for (i = 0; i < q->num_buffers; ++i) {
+ vb = q->bufs[i];
+ if (vb->state == VB2_BUF_STATE_ACTIVE)
+ vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
+ }
+ /* Must be zero now */
+ WARN_ON(atomic_read(&q->owned_by_drv_count));
+ }
+ /*
+ * If done_list is not empty, then start_streaming() didn't call
+ * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or
+ * STATE_DONE.
+ */
+ WARN_ON(!list_empty(&q->done_list));
+ return ret;
+}
+
+static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+ int ret = vb2_queue_or_prepare_buf(q, b, "qbuf");
+ struct vb2_buffer *vb;
+
+ if (ret)
+ return ret;
+
+ vb = q->bufs[b->index];
+
+ switch (vb->state) {
+ case VB2_BUF_STATE_DEQUEUED:
+ ret = __buf_prepare(vb, b);
+ if (ret)
+ return ret;
+ break;
+ case VB2_BUF_STATE_PREPARED:
+ break;
+ case VB2_BUF_STATE_PREPARING:
+ dprintk(1, "buffer still being prepared\n");
+ return -EINVAL;
+ default:
+ dprintk(1, "invalid buffer state %d\n", vb->state);
+ return -EINVAL;
+ }
+
+ /*
+ * Add to the queued buffers list, a buffer will stay on it until
+ * dequeued in dqbuf.
+ */
+ list_add_tail(&vb->queued_entry, &q->queued_list);
+ q->queued_count++;
+ q->waiting_for_buffers = false;
+ vb->state = VB2_BUF_STATE_QUEUED;
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ /*
+ * For output buffers copy the timestamp if needed,
+ * and the timecode field and flag if needed.
+ */
+ if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
+ V4L2_BUF_FLAG_TIMESTAMP_COPY)
+ vb->v4l2_buf.timestamp = b->timestamp;
+ vb->v4l2_buf.flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
+ if (b->flags & V4L2_BUF_FLAG_TIMECODE)
+ vb->v4l2_buf.timecode = b->timecode;
+ }
+
+ /*
+ * If already streaming, give the buffer to driver for processing.
+ * If not, the buffer will be given to driver on next streamon.
+ */
+ if (q->start_streaming_called)
+ __enqueue_in_driver(vb);
+
+ /* Fill buffer information for the userspace */
+ __fill_v4l2_buffer(vb, b);
+
+ /*
+ * If streamon has been called, and we haven't yet called
+ * start_streaming() since not enough buffers were queued, and
+ * we now have reached the minimum number of queued buffers,
+ * then we can finally call start_streaming().
+ */
+ if (q->streaming && !q->start_streaming_called &&
+ q->queued_count >= q->min_buffers_needed) {
+ ret = vb2_start_streaming(q);
+ if (ret)
+ return ret;
+ }
+
+ dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index);
+ return 0;
+}
+
+/**
+ * vb2_qbuf() - Queue a buffer from userspace
+ * @q: videobuf2 queue
+ * @b: buffer structure passed from userspace to vidioc_qbuf handler
+ * in driver
+ *
+ * Should be called from vidioc_qbuf ioctl handler of a driver.
+ * This function:
+ * 1) verifies the passed buffer,
+ * 2) if necessary, calls buf_prepare callback in the driver (if provided), in
+ * which driver-specific buffer initialization can be performed,
+ * 3) if streaming is on, queues the buffer in driver by the means of buf_queue
+ * callback for processing.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_qbuf handler in driver.
+ */
+int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+ if (vb2_fileio_is_active(q)) {
+ dprintk(1, "file io in progress\n");
+ return -EBUSY;
+ }
+
+ return vb2_internal_qbuf(q, b);
+}
+EXPORT_SYMBOL_GPL(vb2_qbuf);
+
+/**
+ * __vb2_wait_for_done_vb() - wait for a buffer to become available
+ * for dequeuing
+ *
+ * Will sleep if required for nonblocking == false.
+ */
+static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
+{
+ /*
+ * All operations on vb_done_list are performed under done_lock
+ * spinlock protection. However, buffers may be removed from
+ * it and returned to userspace only while holding both driver's
+ * lock and the done_lock spinlock. Thus we can be sure that as
+ * long as we hold the driver's lock, the list will remain not
+ * empty if list_empty() check succeeds.
+ */
+
+ for (;;) {
+ int ret;
+
+ if (!q->streaming) {
+ dprintk(1, "streaming off, will not wait for buffers\n");
+ return -EINVAL;
+ }
+
+ if (q->error) {
+ dprintk(1, "Queue in error state, will not wait for buffers\n");
+ return -EIO;
+ }
+
+ if (!list_empty(&q->done_list)) {
+ /*
+ * Found a buffer that we were waiting for.
+ */
+ break;
+ }
+
+ if (nonblocking) {
+ dprintk(1, "nonblocking and no buffers to dequeue, "
+ "will not wait\n");
+ return -EAGAIN;
+ }
+
+ /*
+ * We are streaming and blocking, wait for another buffer to
+ * become ready or for streamoff. Driver's lock is released to
+ * allow streamoff or qbuf to be called while waiting.
+ */
+ call_void_qop(q, wait_prepare, q);
+
+ /*
+ * All locks have been released, it is safe to sleep now.
+ */
+ dprintk(3, "will sleep waiting for buffers\n");
+ ret = wait_event_interruptible(q->done_wq,
+ !list_empty(&q->done_list) || !q->streaming ||
+ q->error);
+
+ /*
+ * We need to reevaluate both conditions again after reacquiring
+ * the locks or return an error if one occurred.
+ */
+ call_void_qop(q, wait_finish, q);
+ if (ret) {
+ dprintk(1, "sleep was interrupted\n");
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/**
+ * __vb2_get_done_vb() - get a buffer ready for dequeuing
+ *
+ * Will sleep if required for nonblocking == false.
+ */
+static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
+ struct v4l2_buffer *b, int nonblocking)
+{
+ unsigned long flags;
+ int ret;
+
+ /*
+ * Wait for at least one buffer to become available on the done_list.
+ */
+ ret = __vb2_wait_for_done_vb(q, nonblocking);
+ if (ret)
+ return ret;
+
+ /*
+ * Driver's lock has been held since we last verified that done_list
+ * is not empty, so no need for another list_empty(done_list) check.
+ */
+ spin_lock_irqsave(&q->done_lock, flags);
+ *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
+ /*
+ * Only remove the buffer from done_list if v4l2_buffer can handle all
+ * the planes.
+ */
+ ret = __verify_planes_array(*vb, b);
+ if (!ret)
+ list_del(&(*vb)->done_entry);
+ spin_unlock_irqrestore(&q->done_lock, flags);
+
+ return ret;
+}
+
+/**
+ * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2
+ * @q: videobuf2 queue
+ *
+ * This function will wait until all buffers that have been given to the driver
+ * by buf_queue() are given back to vb2 with vb2_buffer_done(). It doesn't call
+ * wait_prepare, wait_finish pair. It is intended to be called with all locks
+ * taken, for example from stop_streaming() callback.
+ */
+int vb2_wait_for_all_buffers(struct vb2_queue *q)
+{
+ if (!q->streaming) {
+ dprintk(1, "streaming off, will not wait for buffers\n");
+ return -EINVAL;
+ }
+
+ if (q->start_streaming_called)
+ wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count));
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
+
+/**
+ * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
+ */
+static void __vb2_dqbuf(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned int i;
+
+ /* nothing to do if the buffer is already dequeued */
+ if (vb->state == VB2_BUF_STATE_DEQUEUED)
+ return;
+
+ vb->state = VB2_BUF_STATE_DEQUEUED;
+
+ /* unmap DMABUF buffer */
+ if (q->memory == V4L2_MEMORY_DMABUF)
+ for (i = 0; i < vb->num_planes; ++i) {
+ if (!vb->planes[i].dbuf_mapped)
+ continue;
+ call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
+ vb->planes[i].dbuf_mapped = 0;
+ }
+}
+
+static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
+{
+ struct vb2_buffer *vb = NULL;
+ int ret;
+
+ if (b->type != q->type) {
+ dprintk(1, "invalid buffer type\n");
+ return -EINVAL;
+ }
+ ret = __vb2_get_done_vb(q, &vb, b, nonblocking);
+ if (ret < 0)
+ return ret;
+
+ switch (vb->state) {
+ case VB2_BUF_STATE_DONE:
+ dprintk(3, "returning done buffer\n");
+ break;
+ case VB2_BUF_STATE_ERROR:
+ dprintk(3, "returning done buffer with errors\n");
+ break;
+ default:
+ dprintk(1, "invalid buffer state\n");
+ return -EINVAL;
+ }
+
+ call_void_vb_qop(vb, buf_finish, vb);
+
+ /* Fill buffer information for the userspace */
+ __fill_v4l2_buffer(vb, b);
+ /* Remove from videobuf queue */
+ list_del(&vb->queued_entry);
+ q->queued_count--;
+ /* go back to dequeued state */
+ __vb2_dqbuf(vb);
+
+ dprintk(1, "dqbuf of buffer %d, with state %d\n",
+ vb->v4l2_buf.index, vb->state);
+
+ return 0;
+}
+
+/**
+ * vb2_dqbuf() - Dequeue a buffer to the userspace
+ * @q: videobuf2 queue
+ * @b: buffer structure passed from userspace to vidioc_dqbuf handler
+ * in driver
+ * @nonblocking: if true, this call will not sleep waiting for a buffer if no
+ * buffers ready for dequeuing are present. Normally the driver
+ * would be passing (file->f_flags & O_NONBLOCK) here
+ *
+ * Should be called from vidioc_dqbuf ioctl handler of a driver.
+ * This function:
+ * 1) verifies the passed buffer,
+ * 2) calls buf_finish callback in the driver (if provided), in which
+ * driver can perform any additional operations that may be required before
+ * returning the buffer to userspace, such as cache sync,
+ * 3) the buffer struct members are filled with relevant information for
+ * the userspace.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_dqbuf handler in driver.
+ */
+int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
+{
+ if (vb2_fileio_is_active(q)) {
+ dprintk(1, "file io in progress\n");
+ return -EBUSY;
+ }
+ return vb2_internal_dqbuf(q, b, nonblocking);
+}
+EXPORT_SYMBOL_GPL(vb2_dqbuf);
+
+/**
+ * __vb2_queue_cancel() - cancel and stop (pause) streaming
+ *
+ * Removes all queued buffers from driver's queue and all buffers queued by
+ * userspace from videobuf's queue. Returns to state after reqbufs.
+ */
+static void __vb2_queue_cancel(struct vb2_queue *q)
+{
+ unsigned int i;
+
+ /*
+ * Tell driver to stop all transactions and release all queued
+ * buffers.
+ */
+ if (q->start_streaming_called)
+ call_void_qop(q, stop_streaming, q);
+
+ /*
+ * If you see this warning, then the driver isn't cleaning up properly
+ * in stop_streaming(). See the stop_streaming() documentation in
+ * videobuf2-core.h for more information how buffers should be returned
+ * to vb2 in stop_streaming().
+ */
+ if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
+ for (i = 0; i < q->num_buffers; ++i)
+ if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE)
+ vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR);
+ /* Must be zero now */
+ WARN_ON(atomic_read(&q->owned_by_drv_count));
+ }
+
+ q->streaming = 0;
+ q->start_streaming_called = 0;
+ q->queued_count = 0;
+ q->error = 0;
+
+ /*
+ * Remove all buffers from videobuf's list...
+ */
+ INIT_LIST_HEAD(&q->queued_list);
+ /*
+ * ...and done list; userspace will not receive any buffers it
+ * has not already dequeued before initiating cancel.
+ */
+ INIT_LIST_HEAD(&q->done_list);
+ atomic_set(&q->owned_by_drv_count, 0);
+ wake_up_all(&q->done_wq);
+
+ /*
+ * Reinitialize all buffers for next use.
+ * Make sure to call buf_finish for any queued buffers. Normally
+ * that's done in dqbuf, but that's not going to happen when we
+ * cancel the whole queue. Note: this code belongs here, not in
+ * __vb2_dqbuf() since in vb2_internal_dqbuf() there is a critical
+ * call to __fill_v4l2_buffer() after buf_finish(). That order can't
+ * be changed, so we can't move the buf_finish() to __vb2_dqbuf().
+ */
+ for (i = 0; i < q->num_buffers; ++i) {
+ struct vb2_buffer *vb = q->bufs[i];
+
+ if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+ vb->state = VB2_BUF_STATE_PREPARED;
+ call_void_vb_qop(vb, buf_finish, vb);
+ }
+ __vb2_dqbuf(vb);
+ }
+}
+
+static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
+{
+ int ret;
+
+ if (type != q->type) {
+ dprintk(1, "invalid stream type\n");
+ return -EINVAL;
+ }
+
+ if (q->streaming) {
+ dprintk(3, "already streaming\n");
+ return 0;
+ }
+
+ if (!q->num_buffers) {
+ dprintk(1, "no buffers have been allocated\n");
+ return -EINVAL;
+ }
+
+ if (q->num_buffers < q->min_buffers_needed) {
+ dprintk(1, "need at least %u allocated buffers\n",
+ q->min_buffers_needed);
+ return -EINVAL;
+ }
+
+ /*
+ * Tell driver to start streaming provided sufficient buffers
+ * are available.
+ */
+ if (q->queued_count >= q->min_buffers_needed) {
+ ret = vb2_start_streaming(q);
+ if (ret) {
+ __vb2_queue_cancel(q);
+ return ret;
+ }
+ }
+
+ q->streaming = 1;
+
+ dprintk(3, "successful\n");
+ return 0;
+}
+
+/**
+ * vb2_queue_error() - signal a fatal error on the queue
+ * @q: videobuf2 queue
+ *
+ * Flag that a fatal unrecoverable error has occurred and wake up all processes
+ * waiting on the queue. Polling will now set POLLERR and queuing and dequeuing
+ * buffers will return -EIO.
+ *
+ * The error flag will be cleared when cancelling the queue, either from
+ * vb2_streamoff or vb2_queue_release. Drivers should thus not call this
+ * function before starting the stream, otherwise the error flag will remain set
+ * until the queue is released when closing the device node.
+ */
+void vb2_queue_error(struct vb2_queue *q)
+{
+ q->error = 1;
+
+ wake_up_all(&q->done_wq);
+}
+EXPORT_SYMBOL_GPL(vb2_queue_error);
+
+/**
+ * vb2_streamon - start streaming
+ * @q: videobuf2 queue
+ * @type: type argument passed from userspace to vidioc_streamon handler
+ *
+ * Should be called from vidioc_streamon handler of a driver.
+ * This function:
+ * 1) verifies current state
+ * 2) passes any previously queued buffers to the driver and starts streaming
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_streamon handler in the driver.
+ */
+int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
+{
+ if (vb2_fileio_is_active(q)) {
+ dprintk(1, "file io in progress\n");
+ return -EBUSY;
+ }
+ return vb2_internal_streamon(q, type);
+}
+EXPORT_SYMBOL_GPL(vb2_streamon);
+
+static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
+{
+ if (type != q->type) {
+ dprintk(1, "invalid stream type\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Cancel will pause streaming and remove all buffers from the driver
+ * and videobuf, effectively returning control over them to userspace.
+ *
+ * Note that we do this even if q->streaming == 0: if you prepare or
+ * queue buffers, and then call streamoff without ever having called
+ * streamon, you would still expect those buffers to be returned to
+ * their normal dequeued state.
+ */
+ __vb2_queue_cancel(q);
+ q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
+
+ dprintk(3, "successful\n");
+ return 0;
+}
+
+/**
+ * vb2_streamoff - stop streaming
+ * @q: videobuf2 queue
+ * @type: type argument passed from userspace to vidioc_streamoff handler
+ *
+ * Should be called from vidioc_streamoff handler of a driver.
+ * This function:
+ * 1) verifies current state,
+ * 2) stop streaming and dequeues any queued buffers, including those previously
+ * passed to the driver (after waiting for the driver to finish).
+ *
+ * This call can be used for pausing playback.
+ * The return values from this function are intended to be directly returned
+ * from vidioc_streamoff handler in the driver
+ */
+int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
+{
+ if (vb2_fileio_is_active(q)) {
+ dprintk(1, "file io in progress\n");
+ return -EBUSY;
+ }
+ return vb2_internal_streamoff(q, type);
+}
+EXPORT_SYMBOL_GPL(vb2_streamoff);
+
+/**
+ * __find_plane_by_offset() - find plane associated with the given offset off
+ */
+static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
+ unsigned int *_buffer, unsigned int *_plane)
+{
+ struct vb2_buffer *vb;
+ unsigned int buffer, plane;
+
+ /*
+ * Go over all buffers and their planes, comparing the given offset
+ * with an offset assigned to each plane. If a match is found,
+ * return its buffer and plane numbers.
+ */
+ for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+ vb = q->bufs[buffer];
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ if (vb->v4l2_planes[plane].m.mem_offset == off) {
+ *_buffer = buffer;
+ *_plane = plane;
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * vb2_expbuf() - Export a buffer as a file descriptor
+ * @q: videobuf2 queue
+ * @eb: export buffer structure passed from userspace to vidioc_expbuf
+ * handler in driver
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_expbuf handler in driver.
+ */
+int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
+{
+ struct vb2_buffer *vb = NULL;
+ struct vb2_plane *vb_plane;
+ int ret;
+ struct dma_buf *dbuf;
+
+ if (q->memory != V4L2_MEMORY_MMAP) {
+ dprintk(1, "queue is not currently set up for mmap\n");
+ return -EINVAL;
+ }
+
+ if (!q->mem_ops->get_dmabuf) {
+ dprintk(1, "queue does not support DMA buffer exporting\n");
+ return -EINVAL;
+ }
+
+ if (eb->flags & ~(O_CLOEXEC | O_ACCMODE)) {
+ dprintk(1, "queue does support only O_CLOEXEC and access mode flags\n");
+ return -EINVAL;
+ }
+
+ if (eb->type != q->type) {
+ dprintk(1, "invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ if (eb->index >= q->num_buffers) {
+ dprintk(1, "buffer index out of range\n");
+ return -EINVAL;
+ }
+
+ vb = q->bufs[eb->index];
+
+ if (eb->plane >= vb->num_planes) {
+ dprintk(1, "buffer plane out of range\n");
+ return -EINVAL;
+ }
+
+ if (vb2_fileio_is_active(q)) {
+ dprintk(1, "expbuf: file io in progress\n");
+ return -EBUSY;
+ }
+
+ vb_plane = &vb->planes[eb->plane];
+
+ dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE);
+ if (IS_ERR_OR_NULL(dbuf)) {
+ dprintk(1, "failed to export buffer %d, plane %d\n",
+ eb->index, eb->plane);
+ return -EINVAL;
+ }
+
+ ret = dma_buf_fd(dbuf, eb->flags & ~O_ACCMODE);
+ if (ret < 0) {
+ dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
+ eb->index, eb->plane, ret);
+ dma_buf_put(dbuf);
+ return ret;
+ }
+
+ dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
+ eb->index, eb->plane, ret);
+ eb->fd = ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_expbuf);
+
+/**
+ * vb2_mmap() - map video buffers into application address space
+ * @q: videobuf2 queue
+ * @vma: vma passed to the mmap file operation handler in the driver
+ *
+ * Should be called from mmap file operation handler of a driver.
+ * This function maps one plane of one of the available video buffers to
+ * userspace. To map whole video memory allocated on reqbufs, this function
+ * has to be called once per each plane per each buffer previously allocated.
+ *
+ * When the userspace application calls mmap, it passes to it an offset returned
+ * to it earlier by the means of vidioc_querybuf handler. That offset acts as
+ * a "cookie", which is then used to identify the plane to be mapped.
+ * This function finds a plane with a matching offset and a mapping is performed
+ * by the means of a provided memory operation.
+ *
+ * The return values from this function are intended to be directly returned
+ * from the mmap handler in driver.
+ */
+int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
+{
+ unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
+ struct vb2_buffer *vb;
+ unsigned int buffer = 0, plane = 0;
+ int ret;
+ unsigned long length;
+
+ if (q->memory != V4L2_MEMORY_MMAP) {
+ dprintk(1, "queue is not currently set up for mmap\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Check memory area access mode.
+ */
+ if (!(vma->vm_flags & VM_SHARED)) {
+ dprintk(1, "invalid vma flags, VM_SHARED needed\n");
+ return -EINVAL;
+ }
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ if (!(vma->vm_flags & VM_WRITE)) {
+ dprintk(1, "invalid vma flags, VM_WRITE needed\n");
+ return -EINVAL;
+ }
+ } else {
+ if (!(vma->vm_flags & VM_READ)) {
+ dprintk(1, "invalid vma flags, VM_READ needed\n");
+ return -EINVAL;
+ }
+ }
+ if (vb2_fileio_is_active(q)) {
+ dprintk(1, "mmap: file io in progress\n");
+ return -EBUSY;
+ }
+
+ /*
+ * Find the plane corresponding to the offset passed by userspace.
+ */
+ ret = __find_plane_by_offset(q, off, &buffer, &plane);
+ if (ret)
+ return ret;
+
+ vb = q->bufs[buffer];
+
+ /*
+ * MMAP requires page_aligned buffers.
+ * The buffer length was page_aligned at __vb2_buf_mem_alloc(),
+ * so, we need to do the same here.
+ */
+ length = PAGE_ALIGN(vb->v4l2_planes[plane].length);
+ if (length < (vma->vm_end - vma->vm_start)) {
+ dprintk(1,
+ "MMAP invalid, as it would overflow buffer length\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&q->mmap_lock);
+ ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
+ mutex_unlock(&q->mmap_lock);
+ if (ret)
+ return ret;
+
+ dprintk(3, "buffer %d, plane %d successfully mapped\n", buffer, plane);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_mmap);
+
+#ifndef CONFIG_MMU
+unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags)
+{
+ unsigned long off = pgoff << PAGE_SHIFT;
+ struct vb2_buffer *vb;
+ unsigned int buffer, plane;
+ void *vaddr;
+ int ret;
+
+ if (q->memory != V4L2_MEMORY_MMAP) {
+ dprintk(1, "queue is not currently set up for mmap\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Find the plane corresponding to the offset passed by userspace.
+ */
+ ret = __find_plane_by_offset(q, off, &buffer, &plane);
+ if (ret)
+ return ret;
+
+ vb = q->bufs[buffer];
+
+ vaddr = vb2_plane_vaddr(vb, plane);
+ return vaddr ? (unsigned long)vaddr : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
+#endif
+
+static int __vb2_init_fileio(struct vb2_queue *q, int read);
+static int __vb2_cleanup_fileio(struct vb2_queue *q);
+
+/**
+ * vb2_poll() - implements poll userspace operation
+ * @q: videobuf2 queue
+ * @file: file argument passed to the poll file operation handler
+ * @wait: wait argument passed to the poll file operation handler
+ *
+ * This function implements poll file operation handler for a driver.
+ * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
+ * be informed that the file descriptor of a video device is available for
+ * reading.
+ * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
+ * will be reported as available for writing.
+ *
+ * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any
+ * pending events.
+ *
+ * The return values from this function are intended to be directly returned
+ * from poll handler in driver.
+ */
+unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
+{
+ struct video_device *vfd = video_devdata(file);
+ unsigned long req_events = poll_requested_events(wait);
+ struct vb2_buffer *vb = NULL;
+ unsigned int res = 0;
+ unsigned long flags;
+
+ if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
+ struct v4l2_fh *fh = file->private_data;
+
+ if (v4l2_event_pending(fh))
+ res = POLLPRI;
+ else if (req_events & POLLPRI)
+ poll_wait(file, &fh->wait, wait);
+ }
+
+ if (!V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLIN | POLLRDNORM)))
+ return res;
+ if (V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLOUT | POLLWRNORM)))
+ return res;
+
+ /*
+ * Start file I/O emulator only if streaming API has not been used yet.
+ */
+ if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
+ if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
+ (req_events & (POLLIN | POLLRDNORM))) {
+ if (__vb2_init_fileio(q, 1))
+ return res | POLLERR;
+ }
+ if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
+ (req_events & (POLLOUT | POLLWRNORM))) {
+ if (__vb2_init_fileio(q, 0))
+ return res | POLLERR;
+ /*
+ * Write to OUTPUT queue can be done immediately.
+ */
+ return res | POLLOUT | POLLWRNORM;
+ }
+ }
+
+ /*
+ * There is nothing to wait for if the queue isn't streaming, or if the
+ * error flag is set.
+ */
+ if (!vb2_is_streaming(q) || q->error)
+ return res | POLLERR;
+ /*
+ * For compatibility with vb1: if QBUF hasn't been called yet, then
+ * return POLLERR as well. This only affects capture queues, output
+ * queues will always initialize waiting_for_buffers to false.
+ */
+ if (q->waiting_for_buffers)
+ return res | POLLERR;
+
+ /*
+ * For output streams you can write as long as there are fewer buffers
+ * queued than there are buffers available.
+ */
+ if (V4L2_TYPE_IS_OUTPUT(q->type) && q->queued_count < q->num_buffers)
+ return res | POLLOUT | POLLWRNORM;
+
+ if (list_empty(&q->done_list))
+ poll_wait(file, &q->done_wq, wait);
+
+ /*
+ * Take first buffer available for dequeuing.
+ */
+ spin_lock_irqsave(&q->done_lock, flags);
+ if (!list_empty(&q->done_list))
+ vb = list_first_entry(&q->done_list, struct vb2_buffer,
+ done_entry);
+ spin_unlock_irqrestore(&q->done_lock, flags);
+
+ if (vb && (vb->state == VB2_BUF_STATE_DONE
+ || vb->state == VB2_BUF_STATE_ERROR)) {
+ return (V4L2_TYPE_IS_OUTPUT(q->type)) ?
+ res | POLLOUT | POLLWRNORM :
+ res | POLLIN | POLLRDNORM;
+ }
+ return res;
+}
+EXPORT_SYMBOL_GPL(vb2_poll);
+
+/**
+ * vb2_queue_init() - initialize a videobuf2 queue
+ * @q: videobuf2 queue; this structure should be allocated in driver
+ *
+ * The vb2_queue structure should be allocated by the driver. The driver is
+ * responsible of clearing it's content and setting initial values for some
+ * required entries before calling this function.
+ * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
+ * to the struct vb2_queue description in include/media/videobuf2-core.h
+ * for more information.
+ */
+int vb2_queue_init(struct vb2_queue *q)
+{
+ /*
+ * Sanity check
+ */
+ if (WARN_ON(!q) ||
+ WARN_ON(!q->ops) ||
+ WARN_ON(!q->mem_ops) ||
+ WARN_ON(!q->type) ||
+ WARN_ON(!q->io_modes) ||
+ WARN_ON(!q->ops->queue_setup) ||
+ WARN_ON(!q->ops->buf_queue) ||
+ WARN_ON(q->timestamp_flags &
+ ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
+ V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
+ return -EINVAL;
+
+ /* Warn that the driver should choose an appropriate timestamp type */
+ WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
+ V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
+
+ INIT_LIST_HEAD(&q->queued_list);
+ INIT_LIST_HEAD(&q->done_list);
+ spin_lock_init(&q->done_lock);
+ mutex_init(&q->mmap_lock);
+ init_waitqueue_head(&q->done_wq);
+
+ if (q->buf_struct_size == 0)
+ q->buf_struct_size = sizeof(struct vb2_buffer);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_queue_init);
+
+/**
+ * vb2_queue_release() - stop streaming, release the queue and free memory
+ * @q: videobuf2 queue
+ *
+ * This function stops streaming and performs necessary clean ups, including
+ * freeing video buffer memory. The driver is responsible for freeing
+ * the vb2_queue structure itself.
+ */
+void vb2_queue_release(struct vb2_queue *q)
+{
+ __vb2_cleanup_fileio(q);
+ __vb2_queue_cancel(q);
+ mutex_lock(&q->mmap_lock);
+ __vb2_queue_free(q, q->num_buffers);
+ mutex_unlock(&q->mmap_lock);
+}
+EXPORT_SYMBOL_GPL(vb2_queue_release);
+
+/**
+ * struct vb2_fileio_buf - buffer context used by file io emulator
+ *
+ * vb2 provides a compatibility layer and emulator of file io (read and
+ * write) calls on top of streaming API. This structure is used for
+ * tracking context related to the buffers.
+ */
+struct vb2_fileio_buf {
+ void *vaddr;
+ unsigned int size;
+ unsigned int pos;
+ unsigned int queued:1;
+};
+
+/**
+ * struct vb2_fileio_data - queue context used by file io emulator
+ *
+ * @cur_index: the index of the buffer currently being read from or
+ * written to. If equal to q->num_buffers then a new buffer
+ * must be dequeued.
+ * @initial_index: in the read() case all buffers are queued up immediately
+ * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles
+ * buffers. However, in the write() case no buffers are initially
+ * queued, instead whenever a buffer is full it is queued up by
+ * __vb2_perform_fileio(). Only once all available buffers have
+ * been queued up will __vb2_perform_fileio() start to dequeue
+ * buffers. This means that initially __vb2_perform_fileio()
+ * needs to know what buffer index to use when it is queuing up
+ * the buffers for the first time. That initial index is stored
+ * in this field. Once it is equal to q->num_buffers all
+ * available buffers have been queued and __vb2_perform_fileio()
+ * should start the normal dequeue/queue cycle.
+ *
+ * vb2 provides a compatibility layer and emulator of file io (read and
+ * write) calls on top of streaming API. For proper operation it required
+ * this structure to save the driver state between each call of the read
+ * or write function.
+ */
+struct vb2_fileio_data {
+ struct v4l2_requestbuffers req;
+ struct v4l2_plane p;
+ struct v4l2_buffer b;
+ struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME];
+ unsigned int cur_index;
+ unsigned int initial_index;
+ unsigned int q_count;
+ unsigned int dq_count;
+ unsigned read_once:1;
+ unsigned write_immediately:1;
+};
+
+/**
+ * __vb2_init_fileio() - initialize file io emulator
+ * @q: videobuf2 queue
+ * @read: mode selector (1 means read, 0 means write)
+ */
+static int __vb2_init_fileio(struct vb2_queue *q, int read)
+{
+ struct vb2_fileio_data *fileio;
+ int i, ret;
+ unsigned int count = 0;
+
+ /*
+ * Sanity check
+ */
+ if (WARN_ON((read && !(q->io_modes & VB2_READ)) ||
+ (!read && !(q->io_modes & VB2_WRITE))))
+ return -EINVAL;
+
+ /*
+ * Check if device supports mapping buffers to kernel virtual space.
+ */
+ if (!q->mem_ops->vaddr)
+ return -EBUSY;
+
+ /*
+ * Check if streaming api has not been already activated.
+ */
+ if (q->streaming || q->num_buffers > 0)
+ return -EBUSY;
+
+ /*
+ * Start with count 1, driver can increase it in queue_setup()
+ */
+ count = 1;
+
+ dprintk(3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n",
+ (read) ? "read" : "write", count, q->fileio_read_once,
+ q->fileio_write_immediately);
+
+ fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
+ if (fileio == NULL)
+ return -ENOMEM;
+
+ fileio->read_once = q->fileio_read_once;
+ fileio->write_immediately = q->fileio_write_immediately;
+
+ /*
+ * Request buffers and use MMAP type to force driver
+ * to allocate buffers by itself.
+ */
+ fileio->req.count = count;
+ fileio->req.memory = V4L2_MEMORY_MMAP;
+ fileio->req.type = q->type;
+ q->fileio = fileio;
+ ret = __reqbufs(q, &fileio->req);
+ if (ret)
+ goto err_kfree;
+
+ /*
+ * Check if plane_count is correct
+ * (multiplane buffers are not supported).
+ */
+ if (q->bufs[0]->num_planes != 1) {
+ ret = -EBUSY;
+ goto err_reqbufs;
+ }
+
+ /*
+ * Get kernel address of each buffer.
+ */
+ for (i = 0; i < q->num_buffers; i++) {
+ fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
+ if (fileio->bufs[i].vaddr == NULL) {
+ ret = -EINVAL;
+ goto err_reqbufs;
+ }
+ fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
+ }
+
+ /*
+ * Read mode requires pre queuing of all buffers.
+ */
+ if (read) {
+ bool is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
+
+ /*
+ * Queue all buffers.
+ */
+ for (i = 0; i < q->num_buffers; i++) {
+ struct v4l2_buffer *b = &fileio->b;
+
+ memset(b, 0, sizeof(*b));
+ b->type = q->type;
+ if (is_multiplanar) {
+ memset(&fileio->p, 0, sizeof(fileio->p));
+ b->m.planes = &fileio->p;
+ b->length = 1;
+ }
+ b->memory = q->memory;
+ b->index = i;
+ ret = vb2_internal_qbuf(q, b);
+ if (ret)
+ goto err_reqbufs;
+ fileio->bufs[i].queued = 1;
+ }
+ /*
+ * All buffers have been queued, so mark that by setting
+ * initial_index to q->num_buffers
+ */
+ fileio->initial_index = q->num_buffers;
+ fileio->cur_index = q->num_buffers;
+ }
+
+ /*
+ * Start streaming.
+ */
+ ret = vb2_internal_streamon(q, q->type);
+ if (ret)
+ goto err_reqbufs;
+
+ return ret;
+
+err_reqbufs:
+ fileio->req.count = 0;
+ __reqbufs(q, &fileio->req);
+
+err_kfree:
+ q->fileio = NULL;
+ kfree(fileio);
+ return ret;
+}
+
+/**
+ * __vb2_cleanup_fileio() - free resourced used by file io emulator
+ * @q: videobuf2 queue
+ */
+static int __vb2_cleanup_fileio(struct vb2_queue *q)
+{
+ struct vb2_fileio_data *fileio = q->fileio;
+
+ if (fileio) {
+ vb2_internal_streamoff(q, q->type);
+ q->fileio = NULL;
+ fileio->req.count = 0;
+ vb2_reqbufs(q, &fileio->req);
+ kfree(fileio);
+ dprintk(3, "file io emulator closed\n");
+ }
+ return 0;
+}
+
+/**
+ * __vb2_perform_fileio() - perform a single file io (read or write) operation
+ * @q: videobuf2 queue
+ * @data: pointed to target userspace buffer
+ * @count: number of bytes to read or write
+ * @ppos: file handle position tracking pointer
+ * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking)
+ * @read: access mode selector (1 means read, 0 means write)
+ */
+static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
+ loff_t *ppos, int nonblock, int read)
+{
+ struct vb2_fileio_data *fileio;
+ struct vb2_fileio_buf *buf;
+ bool is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
+ /*
+ * When using write() to write data to an output video node the vb2 core
+ * should set timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody
+ * else is able to provide this information with the write() operation.
+ */
+ bool set_timestamp = !read &&
+ (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
+ V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ int ret, index;
+
+ dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n",
+ read ? "read" : "write", (long)*ppos, count,
+ nonblock ? "non" : "");
+
+ if (!data)
+ return -EINVAL;
+
+ /*
+ * Initialize emulator on first call.
+ */
+ if (!vb2_fileio_is_active(q)) {
+ ret = __vb2_init_fileio(q, read);
+ dprintk(3, "vb2_init_fileio result: %d\n", ret);
+ if (ret)
+ return ret;
+ }
+ fileio = q->fileio;
+
+ /*
+ * Check if we need to dequeue the buffer.
+ */
+ index = fileio->cur_index;
+ if (index >= q->num_buffers) {
+ /*
+ * Call vb2_dqbuf to get buffer back.
+ */
+ memset(&fileio->b, 0, sizeof(fileio->b));
+ fileio->b.type = q->type;
+ fileio->b.memory = q->memory;
+ if (is_multiplanar) {
+ memset(&fileio->p, 0, sizeof(fileio->p));
+ fileio->b.m.planes = &fileio->p;
+ fileio->b.length = 1;
+ }
+ ret = vb2_internal_dqbuf(q, &fileio->b, nonblock);
+ dprintk(5, "vb2_dqbuf result: %d\n", ret);
+ if (ret)
+ return ret;
+ fileio->dq_count += 1;
+
+ fileio->cur_index = index = fileio->b.index;
+ buf = &fileio->bufs[index];
+
+ /*
+ * Get number of bytes filled by the driver
+ */
+ buf->pos = 0;
+ buf->queued = 0;
+ buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
+ : vb2_plane_size(q->bufs[index], 0);
+ /* Compensate for data_offset on read in the multiplanar case. */
+ if (is_multiplanar && read &&
+ fileio->b.m.planes[0].data_offset < buf->size) {
+ buf->pos = fileio->b.m.planes[0].data_offset;
+ buf->size -= buf->pos;
+ }
+ } else {
+ buf = &fileio->bufs[index];
+ }
+
+ /*
+ * Limit count on last few bytes of the buffer.
+ */
+ if (buf->pos + count > buf->size) {
+ count = buf->size - buf->pos;
+ dprintk(5, "reducing read count: %zd\n", count);
+ }
+
+ /*
+ * Transfer data to userspace.
+ */
+ dprintk(3, "copying %zd bytes - buffer %d, offset %u\n",
+ count, index, buf->pos);
+ if (read)
+ ret = copy_to_user(data, buf->vaddr + buf->pos, count);
+ else
+ ret = copy_from_user(buf->vaddr + buf->pos, data, count);
+ if (ret) {
+ dprintk(3, "error copying data\n");
+ return -EFAULT;
+ }
+
+ /*
+ * Update counters.
+ */
+ buf->pos += count;
+ *ppos += count;
+
+ /*
+ * Queue next buffer if required.
+ */
+ if (buf->pos == buf->size || (!read && fileio->write_immediately)) {
+ /*
+ * Check if this is the last buffer to read.
+ */
+ if (read && fileio->read_once && fileio->dq_count == 1) {
+ dprintk(3, "read limit reached\n");
+ return __vb2_cleanup_fileio(q);
+ }
+
+ /*
+ * Call vb2_qbuf and give buffer to the driver.
+ */
+ memset(&fileio->b, 0, sizeof(fileio->b));
+ fileio->b.type = q->type;
+ fileio->b.memory = q->memory;
+ fileio->b.index = index;
+ fileio->b.bytesused = buf->pos;
+ if (is_multiplanar) {
+ memset(&fileio->p, 0, sizeof(fileio->p));
+ fileio->p.bytesused = buf->pos;
+ fileio->b.m.planes = &fileio->p;
+ fileio->b.length = 1;
+ }
+ if (set_timestamp)
+ v4l2_get_timestamp(&fileio->b.timestamp);
+ ret = vb2_internal_qbuf(q, &fileio->b);
+ dprintk(5, "vb2_dbuf result: %d\n", ret);
+ if (ret)
+ return ret;
+
+ /*
+ * Buffer has been queued, update the status
+ */
+ buf->pos = 0;
+ buf->queued = 1;
+ buf->size = vb2_plane_size(q->bufs[index], 0);
+ fileio->q_count += 1;
+ /*
+ * If we are queuing up buffers for the first time, then
+ * increase initial_index by one.
+ */
+ if (fileio->initial_index < q->num_buffers)
+ fileio->initial_index++;
+ /*
+ * The next buffer to use is either a buffer that's going to be
+ * queued for the first time (initial_index < q->num_buffers)
+ * or it is equal to q->num_buffers, meaning that the next
+ * time we need to dequeue a buffer since we've now queued up
+ * all the 'first time' buffers.
+ */
+ fileio->cur_index = fileio->initial_index;
+ }
+
+ /*
+ * Return proper number of bytes processed.
+ */
+ if (ret == 0)
+ ret = count;
+ return ret;
+}
+
+size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
+ loff_t *ppos, int nonblocking)
+{
+ return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
+}
+EXPORT_SYMBOL_GPL(vb2_read);
+
+size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
+ loff_t *ppos, int nonblocking)
+{
+ return __vb2_perform_fileio(q, (char __user *) data, count,
+ ppos, nonblocking, 0);
+}
+EXPORT_SYMBOL_GPL(vb2_write);
+
+struct vb2_threadio_data {
+ struct task_struct *thread;
+ vb2_thread_fnc fnc;
+ void *priv;
+ bool stop;
+};
+
+static int vb2_thread(void *data)
+{
+ struct vb2_queue *q = data;
+ struct vb2_threadio_data *threadio = q->threadio;
+ struct vb2_fileio_data *fileio = q->fileio;
+ bool set_timestamp = false;
+ int prequeue = 0;
+ int index = 0;
+ int ret = 0;
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ prequeue = q->num_buffers;
+ set_timestamp =
+ (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
+ V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ }
+
+ set_freezable();
+
+ for (;;) {
+ struct vb2_buffer *vb;
+
+ /*
+ * Call vb2_dqbuf to get buffer back.
+ */
+ memset(&fileio->b, 0, sizeof(fileio->b));
+ fileio->b.type = q->type;
+ fileio->b.memory = q->memory;
+ if (prequeue) {
+ fileio->b.index = index++;
+ prequeue--;
+ } else {
+ call_void_qop(q, wait_finish, q);
+ if (!threadio->stop)
+ ret = vb2_internal_dqbuf(q, &fileio->b, 0);
+ call_void_qop(q, wait_prepare, q);
+ dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
+ }
+ if (ret || threadio->stop)
+ break;
+ try_to_freeze();
+
+ vb = q->bufs[fileio->b.index];
+ if (!(fileio->b.flags & V4L2_BUF_FLAG_ERROR))
+ if (threadio->fnc(vb, threadio->priv))
+ break;
+ call_void_qop(q, wait_finish, q);
+ if (set_timestamp)
+ v4l2_get_timestamp(&fileio->b.timestamp);
+ if (!threadio->stop)
+ ret = vb2_internal_qbuf(q, &fileio->b);
+ call_void_qop(q, wait_prepare, q);
+ if (ret || threadio->stop)
+ break;
+ }
+
+ /* Hmm, linux becomes *very* unhappy without this ... */
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ }
+ return 0;
+}
+
+/*
+ * This function should not be used for anything else but the videobuf2-dvb
+ * support. If you think you have another good use-case for this, then please
+ * contact the linux-media mailinglist first.
+ */
+int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
+ const char *thread_name)
+{
+ struct vb2_threadio_data *threadio;
+ int ret = 0;
+
+ if (q->threadio)
+ return -EBUSY;
+ if (vb2_is_busy(q))
+ return -EBUSY;
+ if (WARN_ON(q->fileio))
+ return -EBUSY;
+
+ threadio = kzalloc(sizeof(*threadio), GFP_KERNEL);
+ if (threadio == NULL)
+ return -ENOMEM;
+ threadio->fnc = fnc;
+ threadio->priv = priv;
+
+ ret = __vb2_init_fileio(q, !V4L2_TYPE_IS_OUTPUT(q->type));
+ dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
+ if (ret)
+ goto nomem;
+ q->threadio = threadio;
+ threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name);
+ if (IS_ERR(threadio->thread)) {
+ ret = PTR_ERR(threadio->thread);
+ threadio->thread = NULL;
+ goto nothread;
+ }
+ return 0;
+
+nothread:
+ __vb2_cleanup_fileio(q);
+nomem:
+ kfree(threadio);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vb2_thread_start);
+
+int vb2_thread_stop(struct vb2_queue *q)
+{
+ struct vb2_threadio_data *threadio = q->threadio;
+ int err;
+
+ if (threadio == NULL)
+ return 0;
+ threadio->stop = true;
+ /* Wake up all pending sleeps in the thread */
+ vb2_queue_error(q);
+ err = kthread_stop(threadio->thread);
+ __vb2_cleanup_fileio(q);
+ threadio->thread = NULL;
+ kfree(threadio);
+ q->threadio = NULL;
+ return err;
+}
+EXPORT_SYMBOL_GPL(vb2_thread_stop);
+
+/*
+ * The following functions are not part of the vb2 core API, but are helper
+ * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
+ * and struct vb2_ops.
+ * They contain boilerplate code that most if not all drivers have to do
+ * and so they simplify the driver code.
+ */
+
+/* The queue is busy if there is a owner and you are not that owner. */
+static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
+{
+ return vdev->queue->owner && vdev->queue->owner != file->private_data;
+}
+
+/* vb2 ioctl helpers */
+
+int vb2_ioctl_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *p)
+{
+ struct video_device *vdev = video_devdata(file);
+ int res = __verify_memory_type(vdev->queue, p->memory, p->type);
+
+ if (res)
+ return res;
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ res = __reqbufs(vdev->queue, p);
+ /* If count == 0, then the owner has released all buffers and he
+ is no longer owner of the queue. Otherwise we have a new owner. */
+ if (res == 0)
+ vdev->queue->owner = p->count ? file->private_data : NULL;
+ return res;
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
+
+int vb2_ioctl_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *p)
+{
+ struct video_device *vdev = video_devdata(file);
+ int res = __verify_memory_type(vdev->queue, p->memory, p->format.type);
+
+ p->index = vdev->queue->num_buffers;
+ /* If count == 0, then just check if memory and type are valid.
+ Any -EBUSY result from __verify_memory_type can be mapped to 0. */
+ if (p->count == 0)
+ return res != -EBUSY ? res : 0;
+ if (res)
+ return res;
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ res = __create_bufs(vdev->queue, p);
+ if (res == 0)
+ vdev->queue->owner = file->private_data;
+ return res;
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
+
+int vb2_ioctl_prepare_buf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ return vb2_prepare_buf(vdev->queue, p);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
+
+int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ /* No need to call vb2_queue_is_busy(), anyone can query buffers. */
+ return vb2_querybuf(vdev->queue, p);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
+
+int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ return vb2_qbuf(vdev->queue, p);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
+
+int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
+
+int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ return vb2_streamon(vdev->queue, i);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
+
+int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ return vb2_streamoff(vdev->queue, i);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
+
+int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ return vb2_expbuf(vdev->queue, p);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
+
+/* v4l2_file_operations helpers */
+
+int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ return vb2_mmap(vdev->queue, vma);
+}
+EXPORT_SYMBOL_GPL(vb2_fop_mmap);
+
+int _vb2_fop_release(struct file *file, struct mutex *lock)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (lock)
+ mutex_lock(lock);
+ if (file->private_data == vdev->queue->owner) {
+ vb2_queue_release(vdev->queue);
+ vdev->queue->owner = NULL;
+ }
+ if (lock)
+ mutex_unlock(lock);
+ return v4l2_fh_release(file);
+}
+EXPORT_SYMBOL_GPL(_vb2_fop_release);
+
+int vb2_fop_release(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
+
+ return _vb2_fop_release(file, lock);
+}
+EXPORT_SYMBOL_GPL(vb2_fop_release);
+
+ssize_t vb2_fop_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
+ int err = -EBUSY;
+
+ if (!(vdev->queue->io_modes & VB2_WRITE))
+ return -EINVAL;
+ if (lock && mutex_lock_interruptible(lock))
+ return -ERESTARTSYS;
+ if (vb2_queue_is_busy(vdev, file))
+ goto exit;
+ err = vb2_write(vdev->queue, buf, count, ppos,
+ file->f_flags & O_NONBLOCK);
+ if (vdev->queue->fileio)
+ vdev->queue->owner = file->private_data;
+exit:
+ if (lock)
+ mutex_unlock(lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(vb2_fop_write);
+
+ssize_t vb2_fop_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
+ int err = -EBUSY;
+
+ if (!(vdev->queue->io_modes & VB2_READ))
+ return -EINVAL;
+ if (lock && mutex_lock_interruptible(lock))
+ return -ERESTARTSYS;
+ if (vb2_queue_is_busy(vdev, file))
+ goto exit;
+ err = vb2_read(vdev->queue, buf, count, ppos,
+ file->f_flags & O_NONBLOCK);
+ if (vdev->queue->fileio)
+ vdev->queue->owner = file->private_data;
+exit:
+ if (lock)
+ mutex_unlock(lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(vb2_fop_read);
+
+unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct vb2_queue *q = vdev->queue;
+ struct mutex *lock = q->lock ? q->lock : vdev->lock;
+ unsigned res;
+ void *fileio;
+
+ /*
+ * If this helper doesn't know how to lock, then you shouldn't be using
+ * it but you should write your own.
+ */
+ WARN_ON(!lock);
+
+ if (lock && mutex_lock_interruptible(lock))
+ return POLLERR;
+
+ fileio = q->fileio;
+
+ res = vb2_poll(vdev->queue, file, wait);
+
+ /* If fileio was started, then we have a new queue owner. */
+ if (!fileio && q->fileio)
+ q->owner = file->private_data;
+ if (lock)
+ mutex_unlock(lock);
+ return res;
+}
+EXPORT_SYMBOL_GPL(vb2_fop_poll);
+
+#ifndef CONFIG_MMU
+unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
+}
+EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
+#endif
+
+/* vb2_ops helpers. Only use if vq->lock is non-NULL. */
+
+void vb2_ops_wait_prepare(struct vb2_queue *vq)
+{
+ mutex_unlock(vq->lock);
+}
+EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
+
+void vb2_ops_wait_finish(struct vb2_queue *vq)
+{
+ mutex_lock(vq->lock);
+}
+EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
+
+MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
+MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/media/v4l2-core/videobuf2-dma-contig.c b/kernel/drivers/media/v4l2-core/videobuf2-dma-contig.c
new file mode 100644
index 000000000..644dec73d
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -0,0 +1,885 @@
+/*
+ * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <pawel@osciak.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-memops.h>
+
+struct vb2_dc_conf {
+ struct device *dev;
+};
+
+struct vb2_dc_buf {
+ struct device *dev;
+ void *vaddr;
+ unsigned long size;
+ dma_addr_t dma_addr;
+ enum dma_data_direction dma_dir;
+ struct sg_table *dma_sgt;
+
+ /* MMAP related */
+ struct vb2_vmarea_handler handler;
+ atomic_t refcount;
+ struct sg_table *sgt_base;
+
+ /* USERPTR related */
+ struct vm_area_struct *vma;
+
+ /* DMABUF related */
+ struct dma_buf_attachment *db_attach;
+};
+
+/*********************************************/
+/* scatterlist table functions */
+/*********************************************/
+
+
+static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
+ void (*cb)(struct page *pg))
+{
+ struct scatterlist *s;
+ unsigned int i;
+
+ for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
+ struct page *page = sg_page(s);
+ unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
+ >> PAGE_SHIFT;
+ unsigned int j;
+
+ for (j = 0; j < n_pages; ++j, ++page)
+ cb(page);
+ }
+}
+
+static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
+{
+ struct scatterlist *s;
+ dma_addr_t expected = sg_dma_address(sgt->sgl);
+ unsigned int i;
+ unsigned long size = 0;
+
+ for_each_sg(sgt->sgl, s, sgt->nents, i) {
+ if (sg_dma_address(s) != expected)
+ break;
+ expected = sg_dma_address(s) + sg_dma_len(s);
+ size += sg_dma_len(s);
+ }
+ return size;
+}
+
+/*********************************************/
+/* callbacks for all buffers */
+/*********************************************/
+
+static void *vb2_dc_cookie(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ return &buf->dma_addr;
+}
+
+static void *vb2_dc_vaddr(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ if (!buf->vaddr && buf->db_attach)
+ buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
+
+ return buf->vaddr;
+}
+
+static unsigned int vb2_dc_num_users(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ return atomic_read(&buf->refcount);
+}
+
+static void vb2_dc_prepare(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ /* DMABUF exporter will flush the cache for us */
+ if (!sgt || buf->db_attach)
+ return;
+
+ dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+}
+
+static void vb2_dc_finish(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ /* DMABUF exporter will flush the cache for us */
+ if (!sgt || buf->db_attach)
+ return;
+
+ dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+}
+
+/*********************************************/
+/* callbacks for MMAP buffers */
+/*********************************************/
+
+static void vb2_dc_put(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ if (!atomic_dec_and_test(&buf->refcount))
+ return;
+
+ if (buf->sgt_base) {
+ sg_free_table(buf->sgt_base);
+ kfree(buf->sgt_base);
+ }
+ dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
+ put_device(buf->dev);
+ kfree(buf);
+}
+
+static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size,
+ enum dma_data_direction dma_dir, gfp_t gfp_flags)
+{
+ struct vb2_dc_conf *conf = alloc_ctx;
+ struct device *dev = conf->dev;
+ struct vb2_dc_buf *buf;
+
+ buf = kzalloc(sizeof *buf, GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr,
+ GFP_KERNEL | gfp_flags);
+ if (!buf->vaddr) {
+ dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
+ kfree(buf);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Prevent the device from being released while the buffer is used */
+ buf->dev = get_device(dev);
+ buf->size = size;
+ buf->dma_dir = dma_dir;
+
+ buf->handler.refcount = &buf->refcount;
+ buf->handler.put = vb2_dc_put;
+ buf->handler.arg = buf;
+
+ atomic_inc(&buf->refcount);
+
+ return buf;
+}
+
+static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+ int ret;
+
+ if (!buf) {
+ printk(KERN_ERR "No buffer to map\n");
+ return -EINVAL;
+ }
+
+ /*
+ * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
+ * map whole buffer
+ */
+ vma->vm_pgoff = 0;
+
+ ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
+ buf->dma_addr, buf->size);
+
+ if (ret) {
+ pr_err("Remapping memory failed, error: %d\n", ret);
+ return ret;
+ }
+
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = &buf->handler;
+ vma->vm_ops = &vb2_common_vm_ops;
+
+ vma->vm_ops->open(vma);
+
+ pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
+ __func__, (unsigned long)buf->dma_addr, vma->vm_start,
+ buf->size);
+
+ return 0;
+}
+
+/*********************************************/
+/* DMABUF ops for exporters */
+/*********************************************/
+
+struct vb2_dc_attachment {
+ struct sg_table sgt;
+ enum dma_data_direction dma_dir;
+};
+
+static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+ struct dma_buf_attachment *dbuf_attach)
+{
+ struct vb2_dc_attachment *attach;
+ unsigned int i;
+ struct scatterlist *rd, *wr;
+ struct sg_table *sgt;
+ struct vb2_dc_buf *buf = dbuf->priv;
+ int ret;
+
+ attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+ if (!attach)
+ return -ENOMEM;
+
+ sgt = &attach->sgt;
+ /* Copy the buf->base_sgt scatter list to the attachment, as we can't
+ * map the same scatter list to multiple attachments at the same time.
+ */
+ ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
+ if (ret) {
+ kfree(attach);
+ return -ENOMEM;
+ }
+
+ rd = buf->sgt_base->sgl;
+ wr = sgt->sgl;
+ for (i = 0; i < sgt->orig_nents; ++i) {
+ sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+ rd = sg_next(rd);
+ wr = sg_next(wr);
+ }
+
+ attach->dma_dir = DMA_NONE;
+ dbuf_attach->priv = attach;
+
+ return 0;
+}
+
+static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
+ struct dma_buf_attachment *db_attach)
+{
+ struct vb2_dc_attachment *attach = db_attach->priv;
+ struct sg_table *sgt;
+
+ if (!attach)
+ return;
+
+ sgt = &attach->sgt;
+
+ /* release the scatterlist cache */
+ if (attach->dma_dir != DMA_NONE)
+ dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dma_dir);
+ sg_free_table(sgt);
+ kfree(attach);
+ db_attach->priv = NULL;
+}
+
+static struct sg_table *vb2_dc_dmabuf_ops_map(
+ struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
+{
+ struct vb2_dc_attachment *attach = db_attach->priv;
+ /* stealing dmabuf mutex to serialize map/unmap operations */
+ struct mutex *lock = &db_attach->dmabuf->lock;
+ struct sg_table *sgt;
+ int ret;
+
+ mutex_lock(lock);
+
+ sgt = &attach->sgt;
+ /* return previously mapped sg table */
+ if (attach->dma_dir == dma_dir) {
+ mutex_unlock(lock);
+ return sgt;
+ }
+
+ /* release any previous cache */
+ if (attach->dma_dir != DMA_NONE) {
+ dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dma_dir);
+ attach->dma_dir = DMA_NONE;
+ }
+
+ /* mapping to the client with new direction */
+ ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir);
+ if (ret <= 0) {
+ pr_err("failed to map scatterlist\n");
+ mutex_unlock(lock);
+ return ERR_PTR(-EIO);
+ }
+
+ attach->dma_dir = dma_dir;
+
+ mutex_unlock(lock);
+
+ return sgt;
+}
+
+static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
+ struct sg_table *sgt, enum dma_data_direction dma_dir)
+{
+ /* nothing to be done here */
+}
+
+static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
+{
+ /* drop reference obtained in vb2_dc_get_dmabuf */
+ vb2_dc_put(dbuf->priv);
+}
+
+static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
+{
+ struct vb2_dc_buf *buf = dbuf->priv;
+
+ return buf->vaddr + pgnum * PAGE_SIZE;
+}
+
+static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
+{
+ struct vb2_dc_buf *buf = dbuf->priv;
+
+ return buf->vaddr;
+}
+
+static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
+ struct vm_area_struct *vma)
+{
+ return vb2_dc_mmap(dbuf->priv, vma);
+}
+
+static struct dma_buf_ops vb2_dc_dmabuf_ops = {
+ .attach = vb2_dc_dmabuf_ops_attach,
+ .detach = vb2_dc_dmabuf_ops_detach,
+ .map_dma_buf = vb2_dc_dmabuf_ops_map,
+ .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
+ .kmap = vb2_dc_dmabuf_ops_kmap,
+ .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
+ .vmap = vb2_dc_dmabuf_ops_vmap,
+ .mmap = vb2_dc_dmabuf_ops_mmap,
+ .release = vb2_dc_dmabuf_ops_release,
+};
+
+static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
+{
+ int ret;
+ struct sg_table *sgt;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ dev_err(buf->dev, "failed to alloc sg table\n");
+ return NULL;
+ }
+
+ ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
+ buf->size);
+ if (ret < 0) {
+ dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
+ kfree(sgt);
+ return NULL;
+ }
+
+ return sgt;
+}
+
+static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+ struct dma_buf *dbuf;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &vb2_dc_dmabuf_ops;
+ exp_info.size = buf->size;
+ exp_info.flags = flags;
+ exp_info.priv = buf;
+
+ if (!buf->sgt_base)
+ buf->sgt_base = vb2_dc_get_base_sgt(buf);
+
+ if (WARN_ON(!buf->sgt_base))
+ return NULL;
+
+ dbuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dbuf))
+ return NULL;
+
+ /* dmabuf keeps reference to vb2 buffer */
+ atomic_inc(&buf->refcount);
+
+ return dbuf;
+}
+
+/*********************************************/
+/* callbacks for USERPTR buffers */
+/*********************************************/
+
+static inline int vma_is_io(struct vm_area_struct *vma)
+{
+ return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
+}
+
+static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
+ struct vm_area_struct *vma, unsigned long *res)
+{
+ unsigned long pfn, start_pfn, prev_pfn;
+ unsigned int i;
+ int ret;
+
+ if (!vma_is_io(vma))
+ return -EFAULT;
+
+ ret = follow_pfn(vma, start, &pfn);
+ if (ret)
+ return ret;
+
+ start_pfn = pfn;
+ start += PAGE_SIZE;
+
+ for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
+ prev_pfn = pfn;
+ ret = follow_pfn(vma, start, &pfn);
+
+ if (ret) {
+ pr_err("no page for address %lu\n", start);
+ return ret;
+ }
+ if (pfn != prev_pfn + 1)
+ return -EINVAL;
+ }
+
+ *res = start_pfn;
+ return 0;
+}
+
+static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
+ int n_pages, struct vm_area_struct *vma,
+ enum dma_data_direction dma_dir)
+{
+ if (vma_is_io(vma)) {
+ unsigned int i;
+
+ for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
+ unsigned long pfn;
+ int ret = follow_pfn(vma, start, &pfn);
+
+ if (!pfn_valid(pfn))
+ return -EINVAL;
+
+ if (ret) {
+ pr_err("no page for address %lu\n", start);
+ return ret;
+ }
+ pages[i] = pfn_to_page(pfn);
+ }
+ } else {
+ int n;
+
+ n = get_user_pages(current, current->mm, start & PAGE_MASK,
+ n_pages, dma_dir == DMA_FROM_DEVICE, 1, pages, NULL);
+ /* negative error means that no page was pinned */
+ n = max(n, 0);
+ if (n != n_pages) {
+ pr_err("got only %d of %d user pages\n", n, n_pages);
+ while (n)
+ put_page(pages[--n]);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static void vb2_dc_put_dirty_page(struct page *page)
+{
+ set_page_dirty_lock(page);
+ put_page(page);
+}
+
+static void vb2_dc_put_userptr(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ if (sgt) {
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
+ /*
+ * No need to sync to CPU, it's already synced to the CPU
+ * since the finish() memop will have been called before this.
+ */
+ dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
+ buf->dma_dir, &attrs);
+ if (!vma_is_io(buf->vma))
+ vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
+
+ sg_free_table(sgt);
+ kfree(sgt);
+ }
+ vb2_put_vma(buf->vma);
+ kfree(buf);
+}
+
+/*
+ * For some kind of reserved memory there might be no struct page available,
+ * so all that can be done to support such 'pages' is to try to convert
+ * pfn to dma address or at the last resort just assume that
+ * dma address == physical address (like it has been assumed in earlier version
+ * of videobuf2-dma-contig
+ */
+
+#ifdef __arch_pfn_to_dma
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+ return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
+}
+#elif defined(__pfn_to_bus)
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+ return (dma_addr_t)__pfn_to_bus(pfn);
+}
+#elif defined(__pfn_to_phys)
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+ return (dma_addr_t)__pfn_to_phys(pfn);
+}
+#else
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+ /* really, we cannot do anything better at this point */
+ return (dma_addr_t)(pfn) << PAGE_SHIFT;
+}
+#endif
+
+static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
+ unsigned long size, enum dma_data_direction dma_dir)
+{
+ struct vb2_dc_conf *conf = alloc_ctx;
+ struct vb2_dc_buf *buf;
+ unsigned long start;
+ unsigned long end;
+ unsigned long offset;
+ struct page **pages;
+ int n_pages;
+ int ret = 0;
+ struct vm_area_struct *vma;
+ struct sg_table *sgt;
+ unsigned long contig_size;
+ unsigned long dma_align = dma_get_cache_alignment();
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
+
+ /* Only cache aligned DMA transfers are reliable */
+ if (!IS_ALIGNED(vaddr | size, dma_align)) {
+ pr_debug("user data must be aligned to %lu bytes\n", dma_align);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!size) {
+ pr_debug("size is zero\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ buf = kzalloc(sizeof *buf, GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->dev = conf->dev;
+ buf->dma_dir = dma_dir;
+
+ start = vaddr & PAGE_MASK;
+ offset = vaddr & ~PAGE_MASK;
+ end = PAGE_ALIGN(vaddr + size);
+ n_pages = (end - start) >> PAGE_SHIFT;
+
+ pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
+ if (!pages) {
+ ret = -ENOMEM;
+ pr_err("failed to allocate pages table\n");
+ goto fail_buf;
+ }
+
+ /* current->mm->mmap_sem is taken by videobuf2 core */
+ vma = find_vma(current->mm, vaddr);
+ if (!vma) {
+ pr_err("no vma for address %lu\n", vaddr);
+ ret = -EFAULT;
+ goto fail_pages;
+ }
+
+ if (vma->vm_end < vaddr + size) {
+ pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
+ ret = -EFAULT;
+ goto fail_pages;
+ }
+
+ buf->vma = vb2_get_vma(vma);
+ if (!buf->vma) {
+ pr_err("failed to copy vma\n");
+ ret = -ENOMEM;
+ goto fail_pages;
+ }
+
+ /* extract page list from userspace mapping */
+ ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, dma_dir);
+ if (ret) {
+ unsigned long pfn;
+ if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
+ buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
+ buf->size = size;
+ kfree(pages);
+ return buf;
+ }
+
+ pr_err("failed to get user pages\n");
+ goto fail_vma;
+ }
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ pr_err("failed to allocate sg table\n");
+ ret = -ENOMEM;
+ goto fail_get_user_pages;
+ }
+
+ ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
+ offset, size, GFP_KERNEL);
+ if (ret) {
+ pr_err("failed to initialize sg table\n");
+ goto fail_sgt;
+ }
+
+ /* pages are no longer needed */
+ kfree(pages);
+ pages = NULL;
+
+ /*
+ * No need to sync to the device, this will happen later when the
+ * prepare() memop is called.
+ */
+ sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
+ buf->dma_dir, &attrs);
+ if (sgt->nents <= 0) {
+ pr_err("failed to map scatterlist\n");
+ ret = -EIO;
+ goto fail_sgt_init;
+ }
+
+ contig_size = vb2_dc_get_contiguous_size(sgt);
+ if (contig_size < size) {
+ pr_err("contiguous mapping is too small %lu/%lu\n",
+ contig_size, size);
+ ret = -EFAULT;
+ goto fail_map_sg;
+ }
+
+ buf->dma_addr = sg_dma_address(sgt->sgl);
+ buf->size = size;
+ buf->dma_sgt = sgt;
+
+ return buf;
+
+fail_map_sg:
+ dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
+ buf->dma_dir, &attrs);
+
+fail_sgt_init:
+ if (!vma_is_io(buf->vma))
+ vb2_dc_sgt_foreach_page(sgt, put_page);
+ sg_free_table(sgt);
+
+fail_sgt:
+ kfree(sgt);
+
+fail_get_user_pages:
+ if (pages && !vma_is_io(buf->vma))
+ while (n_pages)
+ put_page(pages[--n_pages]);
+
+fail_vma:
+ vb2_put_vma(buf->vma);
+
+fail_pages:
+ kfree(pages); /* kfree is NULL-proof */
+
+fail_buf:
+ kfree(buf);
+
+ return ERR_PTR(ret);
+}
+
+/*********************************************/
+/* callbacks for DMABUF buffers */
+/*********************************************/
+
+static int vb2_dc_map_dmabuf(void *mem_priv)
+{
+ struct vb2_dc_buf *buf = mem_priv;
+ struct sg_table *sgt;
+ unsigned long contig_size;
+
+ if (WARN_ON(!buf->db_attach)) {
+ pr_err("trying to pin a non attached buffer\n");
+ return -EINVAL;
+ }
+
+ if (WARN_ON(buf->dma_sgt)) {
+ pr_err("dmabuf buffer is already pinned\n");
+ return 0;
+ }
+
+ /* get the associated scatterlist for this buffer */
+ sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
+ if (IS_ERR(sgt)) {
+ pr_err("Error getting dmabuf scatterlist\n");
+ return -EINVAL;
+ }
+
+ /* checking if dmabuf is big enough to store contiguous chunk */
+ contig_size = vb2_dc_get_contiguous_size(sgt);
+ if (contig_size < buf->size) {
+ pr_err("contiguous chunk is too small %lu/%lu b\n",
+ contig_size, buf->size);
+ dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
+ return -EFAULT;
+ }
+
+ buf->dma_addr = sg_dma_address(sgt->sgl);
+ buf->dma_sgt = sgt;
+ buf->vaddr = NULL;
+
+ return 0;
+}
+
+static void vb2_dc_unmap_dmabuf(void *mem_priv)
+{
+ struct vb2_dc_buf *buf = mem_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ if (WARN_ON(!buf->db_attach)) {
+ pr_err("trying to unpin a not attached buffer\n");
+ return;
+ }
+
+ if (WARN_ON(!sgt)) {
+ pr_err("dmabuf buffer is already unpinned\n");
+ return;
+ }
+
+ if (buf->vaddr) {
+ dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
+ buf->vaddr = NULL;
+ }
+ dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
+
+ buf->dma_addr = 0;
+ buf->dma_sgt = NULL;
+}
+
+static void vb2_dc_detach_dmabuf(void *mem_priv)
+{
+ struct vb2_dc_buf *buf = mem_priv;
+
+ /* if vb2 works correctly you should never detach mapped buffer */
+ if (WARN_ON(buf->dma_addr))
+ vb2_dc_unmap_dmabuf(buf);
+
+ /* detach this attachment */
+ dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
+ kfree(buf);
+}
+
+static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
+ unsigned long size, enum dma_data_direction dma_dir)
+{
+ struct vb2_dc_conf *conf = alloc_ctx;
+ struct vb2_dc_buf *buf;
+ struct dma_buf_attachment *dba;
+
+ if (dbuf->size < size)
+ return ERR_PTR(-EFAULT);
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->dev = conf->dev;
+ /* create attachment for the dmabuf with the user device */
+ dba = dma_buf_attach(dbuf, buf->dev);
+ if (IS_ERR(dba)) {
+ pr_err("failed to attach dmabuf\n");
+ kfree(buf);
+ return dba;
+ }
+
+ buf->dma_dir = dma_dir;
+ buf->size = size;
+ buf->db_attach = dba;
+
+ return buf;
+}
+
+/*********************************************/
+/* DMA CONTIG exported functions */
+/*********************************************/
+
+const struct vb2_mem_ops vb2_dma_contig_memops = {
+ .alloc = vb2_dc_alloc,
+ .put = vb2_dc_put,
+ .get_dmabuf = vb2_dc_get_dmabuf,
+ .cookie = vb2_dc_cookie,
+ .vaddr = vb2_dc_vaddr,
+ .mmap = vb2_dc_mmap,
+ .get_userptr = vb2_dc_get_userptr,
+ .put_userptr = vb2_dc_put_userptr,
+ .prepare = vb2_dc_prepare,
+ .finish = vb2_dc_finish,
+ .map_dmabuf = vb2_dc_map_dmabuf,
+ .unmap_dmabuf = vb2_dc_unmap_dmabuf,
+ .attach_dmabuf = vb2_dc_attach_dmabuf,
+ .detach_dmabuf = vb2_dc_detach_dmabuf,
+ .num_users = vb2_dc_num_users,
+};
+EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
+
+void *vb2_dma_contig_init_ctx(struct device *dev)
+{
+ struct vb2_dc_conf *conf;
+
+ conf = kzalloc(sizeof *conf, GFP_KERNEL);
+ if (!conf)
+ return ERR_PTR(-ENOMEM);
+
+ conf->dev = dev;
+
+ return conf;
+}
+EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
+
+void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
+{
+ if (!IS_ERR_OR_NULL(alloc_ctx))
+ kfree(alloc_ctx);
+}
+EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
+
+MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
+MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/media/v4l2-core/videobuf2-dma-sg.c b/kernel/drivers/media/v4l2-core/videobuf2-dma-sg.c
new file mode 100644
index 000000000..45c708e46
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -0,0 +1,754 @@
+/*
+ * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-memops.h>
+#include <media/videobuf2-dma-sg.h>
+
+static int debug;
+module_param(debug, int, 0644);
+
+#define dprintk(level, fmt, arg...) \
+ do { \
+ if (debug >= level) \
+ printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
+ } while (0)
+
+struct vb2_dma_sg_conf {
+ struct device *dev;
+};
+
+struct vb2_dma_sg_buf {
+ struct device *dev;
+ void *vaddr;
+ struct page **pages;
+ int offset;
+ enum dma_data_direction dma_dir;
+ struct sg_table sg_table;
+ /*
+ * This will point to sg_table when used with the MMAP or USERPTR
+ * memory model, and to the dma_buf sglist when used with the
+ * DMABUF memory model.
+ */
+ struct sg_table *dma_sgt;
+ size_t size;
+ unsigned int num_pages;
+ atomic_t refcount;
+ struct vb2_vmarea_handler handler;
+ struct vm_area_struct *vma;
+
+ struct dma_buf_attachment *db_attach;
+};
+
+static void vb2_dma_sg_put(void *buf_priv);
+
+static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
+ gfp_t gfp_flags)
+{
+ unsigned int last_page = 0;
+ int size = buf->size;
+
+ while (size > 0) {
+ struct page *pages;
+ int order;
+ int i;
+
+ order = get_order(size);
+ /* Dont over allocate*/
+ if ((PAGE_SIZE << order) > size)
+ order--;
+
+ pages = NULL;
+ while (!pages) {
+ pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
+ __GFP_NOWARN | gfp_flags, order);
+ if (pages)
+ break;
+
+ if (order == 0) {
+ while (last_page--)
+ __free_page(buf->pages[last_page]);
+ return -ENOMEM;
+ }
+ order--;
+ }
+
+ split_page(pages, order);
+ for (i = 0; i < (1 << order); i++)
+ buf->pages[last_page++] = &pages[i];
+
+ size -= PAGE_SIZE << order;
+ }
+
+ return 0;
+}
+
+static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
+ enum dma_data_direction dma_dir, gfp_t gfp_flags)
+{
+ struct vb2_dma_sg_conf *conf = alloc_ctx;
+ struct vb2_dma_sg_buf *buf;
+ struct sg_table *sgt;
+ int ret;
+ int num_pages;
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
+
+ if (WARN_ON(alloc_ctx == NULL))
+ return NULL;
+ buf = kzalloc(sizeof *buf, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ buf->vaddr = NULL;
+ buf->dma_dir = dma_dir;
+ buf->offset = 0;
+ buf->size = size;
+ /* size is already page aligned */
+ buf->num_pages = size >> PAGE_SHIFT;
+ buf->dma_sgt = &buf->sg_table;
+
+ buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
+ GFP_KERNEL);
+ if (!buf->pages)
+ goto fail_pages_array_alloc;
+
+ ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
+ if (ret)
+ goto fail_pages_alloc;
+
+ ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
+ buf->num_pages, 0, size, GFP_KERNEL);
+ if (ret)
+ goto fail_table_alloc;
+
+ /* Prevent the device from being released while the buffer is used */
+ buf->dev = get_device(conf->dev);
+
+ sgt = &buf->sg_table;
+ /*
+ * No need to sync to the device, this will happen later when the
+ * prepare() memop is called.
+ */
+ if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
+ buf->dma_dir, &attrs) == 0)
+ goto fail_map;
+
+ buf->handler.refcount = &buf->refcount;
+ buf->handler.put = vb2_dma_sg_put;
+ buf->handler.arg = buf;
+
+ atomic_inc(&buf->refcount);
+
+ dprintk(1, "%s: Allocated buffer of %d pages\n",
+ __func__, buf->num_pages);
+ return buf;
+
+fail_map:
+ put_device(buf->dev);
+ sg_free_table(buf->dma_sgt);
+fail_table_alloc:
+ num_pages = buf->num_pages;
+ while (num_pages--)
+ __free_page(buf->pages[num_pages]);
+fail_pages_alloc:
+ kfree(buf->pages);
+fail_pages_array_alloc:
+ kfree(buf);
+ return NULL;
+}
+
+static void vb2_dma_sg_put(void *buf_priv)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+ struct sg_table *sgt = &buf->sg_table;
+ int i = buf->num_pages;
+
+ if (atomic_dec_and_test(&buf->refcount)) {
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
+ dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
+ buf->num_pages);
+ dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
+ buf->dma_dir, &attrs);
+ if (buf->vaddr)
+ vm_unmap_ram(buf->vaddr, buf->num_pages);
+ sg_free_table(buf->dma_sgt);
+ while (--i >= 0)
+ __free_page(buf->pages[i]);
+ kfree(buf->pages);
+ put_device(buf->dev);
+ kfree(buf);
+ }
+}
+
+static void vb2_dma_sg_prepare(void *buf_priv)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ /* DMABUF exporter will flush the cache for us */
+ if (buf->db_attach)
+ return;
+
+ dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+}
+
+static void vb2_dma_sg_finish(void *buf_priv)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ /* DMABUF exporter will flush the cache for us */
+ if (buf->db_attach)
+ return;
+
+ dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+}
+
+static inline int vma_is_io(struct vm_area_struct *vma)
+{
+ return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
+}
+
+static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
+ unsigned long size,
+ enum dma_data_direction dma_dir)
+{
+ struct vb2_dma_sg_conf *conf = alloc_ctx;
+ struct vb2_dma_sg_buf *buf;
+ unsigned long first, last;
+ int num_pages_from_user;
+ struct vm_area_struct *vma;
+ struct sg_table *sgt;
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
+
+ buf = kzalloc(sizeof *buf, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ buf->vaddr = NULL;
+ buf->dev = conf->dev;
+ buf->dma_dir = dma_dir;
+ buf->offset = vaddr & ~PAGE_MASK;
+ buf->size = size;
+ buf->dma_sgt = &buf->sg_table;
+
+ first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
+ last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
+ buf->num_pages = last - first + 1;
+
+ buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
+ GFP_KERNEL);
+ if (!buf->pages)
+ goto userptr_fail_alloc_pages;
+
+ vma = find_vma(current->mm, vaddr);
+ if (!vma) {
+ dprintk(1, "no vma for address %lu\n", vaddr);
+ goto userptr_fail_find_vma;
+ }
+
+ if (vma->vm_end < vaddr + size) {
+ dprintk(1, "vma at %lu is too small for %lu bytes\n",
+ vaddr, size);
+ goto userptr_fail_find_vma;
+ }
+
+ buf->vma = vb2_get_vma(vma);
+ if (!buf->vma) {
+ dprintk(1, "failed to copy vma\n");
+ goto userptr_fail_find_vma;
+ }
+
+ if (vma_is_io(buf->vma)) {
+ for (num_pages_from_user = 0;
+ num_pages_from_user < buf->num_pages;
+ ++num_pages_from_user, vaddr += PAGE_SIZE) {
+ unsigned long pfn;
+
+ if (follow_pfn(vma, vaddr, &pfn)) {
+ dprintk(1, "no page for address %lu\n", vaddr);
+ break;
+ }
+ buf->pages[num_pages_from_user] = pfn_to_page(pfn);
+ }
+ } else
+ num_pages_from_user = get_user_pages(current, current->mm,
+ vaddr & PAGE_MASK,
+ buf->num_pages,
+ buf->dma_dir == DMA_FROM_DEVICE,
+ 1, /* force */
+ buf->pages,
+ NULL);
+
+ if (num_pages_from_user != buf->num_pages)
+ goto userptr_fail_get_user_pages;
+
+ if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
+ buf->num_pages, buf->offset, size, 0))
+ goto userptr_fail_alloc_table_from_pages;
+
+ sgt = &buf->sg_table;
+ /*
+ * No need to sync to the device, this will happen later when the
+ * prepare() memop is called.
+ */
+ if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
+ buf->dma_dir, &attrs) == 0)
+ goto userptr_fail_map;
+ return buf;
+
+userptr_fail_map:
+ sg_free_table(&buf->sg_table);
+userptr_fail_alloc_table_from_pages:
+userptr_fail_get_user_pages:
+ dprintk(1, "get_user_pages requested/got: %d/%d]\n",
+ buf->num_pages, num_pages_from_user);
+ if (!vma_is_io(buf->vma))
+ while (--num_pages_from_user >= 0)
+ put_page(buf->pages[num_pages_from_user]);
+ vb2_put_vma(buf->vma);
+userptr_fail_find_vma:
+ kfree(buf->pages);
+userptr_fail_alloc_pages:
+ kfree(buf);
+ return NULL;
+}
+
+/*
+ * @put_userptr: inform the allocator that a USERPTR buffer will no longer
+ * be used
+ */
+static void vb2_dma_sg_put_userptr(void *buf_priv)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+ struct sg_table *sgt = &buf->sg_table;
+ int i = buf->num_pages;
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
+
+ dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
+ __func__, buf->num_pages);
+ dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir, &attrs);
+ if (buf->vaddr)
+ vm_unmap_ram(buf->vaddr, buf->num_pages);
+ sg_free_table(buf->dma_sgt);
+ while (--i >= 0) {
+ if (buf->dma_dir == DMA_FROM_DEVICE)
+ set_page_dirty_lock(buf->pages[i]);
+ if (!vma_is_io(buf->vma))
+ put_page(buf->pages[i]);
+ }
+ kfree(buf->pages);
+ vb2_put_vma(buf->vma);
+ kfree(buf);
+}
+
+static void *vb2_dma_sg_vaddr(void *buf_priv)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+
+ BUG_ON(!buf);
+
+ if (!buf->vaddr) {
+ if (buf->db_attach)
+ buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
+ else
+ buf->vaddr = vm_map_ram(buf->pages,
+ buf->num_pages, -1, PAGE_KERNEL);
+ }
+
+ /* add offset in case userptr is not page-aligned */
+ return buf->vaddr ? buf->vaddr + buf->offset : NULL;
+}
+
+static unsigned int vb2_dma_sg_num_users(void *buf_priv)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+
+ return atomic_read(&buf->refcount);
+}
+
+static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+ unsigned long uaddr = vma->vm_start;
+ unsigned long usize = vma->vm_end - vma->vm_start;
+ int i = 0;
+
+ if (!buf) {
+ printk(KERN_ERR "No memory to map\n");
+ return -EINVAL;
+ }
+
+ do {
+ int ret;
+
+ ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
+ if (ret) {
+ printk(KERN_ERR "Remapping memory, error: %d\n", ret);
+ return ret;
+ }
+
+ uaddr += PAGE_SIZE;
+ usize -= PAGE_SIZE;
+ } while (usize > 0);
+
+
+ /*
+ * Use common vm_area operations to track buffer refcount.
+ */
+ vma->vm_private_data = &buf->handler;
+ vma->vm_ops = &vb2_common_vm_ops;
+
+ vma->vm_ops->open(vma);
+
+ return 0;
+}
+
+/*********************************************/
+/* DMABUF ops for exporters */
+/*********************************************/
+
+struct vb2_dma_sg_attachment {
+ struct sg_table sgt;
+ enum dma_data_direction dma_dir;
+};
+
+static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+ struct dma_buf_attachment *dbuf_attach)
+{
+ struct vb2_dma_sg_attachment *attach;
+ unsigned int i;
+ struct scatterlist *rd, *wr;
+ struct sg_table *sgt;
+ struct vb2_dma_sg_buf *buf = dbuf->priv;
+ int ret;
+
+ attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+ if (!attach)
+ return -ENOMEM;
+
+ sgt = &attach->sgt;
+ /* Copy the buf->base_sgt scatter list to the attachment, as we can't
+ * map the same scatter list to multiple attachments at the same time.
+ */
+ ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
+ if (ret) {
+ kfree(attach);
+ return -ENOMEM;
+ }
+
+ rd = buf->dma_sgt->sgl;
+ wr = sgt->sgl;
+ for (i = 0; i < sgt->orig_nents; ++i) {
+ sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+ rd = sg_next(rd);
+ wr = sg_next(wr);
+ }
+
+ attach->dma_dir = DMA_NONE;
+ dbuf_attach->priv = attach;
+
+ return 0;
+}
+
+static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
+ struct dma_buf_attachment *db_attach)
+{
+ struct vb2_dma_sg_attachment *attach = db_attach->priv;
+ struct sg_table *sgt;
+
+ if (!attach)
+ return;
+
+ sgt = &attach->sgt;
+
+ /* release the scatterlist cache */
+ if (attach->dma_dir != DMA_NONE)
+ dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dma_dir);
+ sg_free_table(sgt);
+ kfree(attach);
+ db_attach->priv = NULL;
+}
+
+static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
+ struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
+{
+ struct vb2_dma_sg_attachment *attach = db_attach->priv;
+ /* stealing dmabuf mutex to serialize map/unmap operations */
+ struct mutex *lock = &db_attach->dmabuf->lock;
+ struct sg_table *sgt;
+ int ret;
+
+ mutex_lock(lock);
+
+ sgt = &attach->sgt;
+ /* return previously mapped sg table */
+ if (attach->dma_dir == dma_dir) {
+ mutex_unlock(lock);
+ return sgt;
+ }
+
+ /* release any previous cache */
+ if (attach->dma_dir != DMA_NONE) {
+ dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dma_dir);
+ attach->dma_dir = DMA_NONE;
+ }
+
+ /* mapping to the client with new direction */
+ ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir);
+ if (ret <= 0) {
+ pr_err("failed to map scatterlist\n");
+ mutex_unlock(lock);
+ return ERR_PTR(-EIO);
+ }
+
+ attach->dma_dir = dma_dir;
+
+ mutex_unlock(lock);
+
+ return sgt;
+}
+
+static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
+ struct sg_table *sgt, enum dma_data_direction dma_dir)
+{
+ /* nothing to be done here */
+}
+
+static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
+{
+ /* drop reference obtained in vb2_dma_sg_get_dmabuf */
+ vb2_dma_sg_put(dbuf->priv);
+}
+
+static void *vb2_dma_sg_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
+{
+ struct vb2_dma_sg_buf *buf = dbuf->priv;
+
+ return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
+}
+
+static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
+{
+ struct vb2_dma_sg_buf *buf = dbuf->priv;
+
+ return vb2_dma_sg_vaddr(buf);
+}
+
+static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
+ struct vm_area_struct *vma)
+{
+ return vb2_dma_sg_mmap(dbuf->priv, vma);
+}
+
+static struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
+ .attach = vb2_dma_sg_dmabuf_ops_attach,
+ .detach = vb2_dma_sg_dmabuf_ops_detach,
+ .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
+ .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
+ .kmap = vb2_dma_sg_dmabuf_ops_kmap,
+ .kmap_atomic = vb2_dma_sg_dmabuf_ops_kmap,
+ .vmap = vb2_dma_sg_dmabuf_ops_vmap,
+ .mmap = vb2_dma_sg_dmabuf_ops_mmap,
+ .release = vb2_dma_sg_dmabuf_ops_release,
+};
+
+static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+ struct dma_buf *dbuf;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &vb2_dma_sg_dmabuf_ops;
+ exp_info.size = buf->size;
+ exp_info.flags = flags;
+ exp_info.priv = buf;
+
+ if (WARN_ON(!buf->dma_sgt))
+ return NULL;
+
+ dbuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dbuf))
+ return NULL;
+
+ /* dmabuf keeps reference to vb2 buffer */
+ atomic_inc(&buf->refcount);
+
+ return dbuf;
+}
+
+/*********************************************/
+/* callbacks for DMABUF buffers */
+/*********************************************/
+
+static int vb2_dma_sg_map_dmabuf(void *mem_priv)
+{
+ struct vb2_dma_sg_buf *buf = mem_priv;
+ struct sg_table *sgt;
+
+ if (WARN_ON(!buf->db_attach)) {
+ pr_err("trying to pin a non attached buffer\n");
+ return -EINVAL;
+ }
+
+ if (WARN_ON(buf->dma_sgt)) {
+ pr_err("dmabuf buffer is already pinned\n");
+ return 0;
+ }
+
+ /* get the associated scatterlist for this buffer */
+ sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
+ if (IS_ERR(sgt)) {
+ pr_err("Error getting dmabuf scatterlist\n");
+ return -EINVAL;
+ }
+
+ buf->dma_sgt = sgt;
+ buf->vaddr = NULL;
+
+ return 0;
+}
+
+static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
+{
+ struct vb2_dma_sg_buf *buf = mem_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ if (WARN_ON(!buf->db_attach)) {
+ pr_err("trying to unpin a not attached buffer\n");
+ return;
+ }
+
+ if (WARN_ON(!sgt)) {
+ pr_err("dmabuf buffer is already unpinned\n");
+ return;
+ }
+
+ if (buf->vaddr) {
+ dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
+ buf->vaddr = NULL;
+ }
+ dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
+
+ buf->dma_sgt = NULL;
+}
+
+static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
+{
+ struct vb2_dma_sg_buf *buf = mem_priv;
+
+ /* if vb2 works correctly you should never detach mapped buffer */
+ if (WARN_ON(buf->dma_sgt))
+ vb2_dma_sg_unmap_dmabuf(buf);
+
+ /* detach this attachment */
+ dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
+ kfree(buf);
+}
+
+static void *vb2_dma_sg_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
+ unsigned long size, enum dma_data_direction dma_dir)
+{
+ struct vb2_dma_sg_conf *conf = alloc_ctx;
+ struct vb2_dma_sg_buf *buf;
+ struct dma_buf_attachment *dba;
+
+ if (dbuf->size < size)
+ return ERR_PTR(-EFAULT);
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->dev = conf->dev;
+ /* create attachment for the dmabuf with the user device */
+ dba = dma_buf_attach(dbuf, buf->dev);
+ if (IS_ERR(dba)) {
+ pr_err("failed to attach dmabuf\n");
+ kfree(buf);
+ return dba;
+ }
+
+ buf->dma_dir = dma_dir;
+ buf->size = size;
+ buf->db_attach = dba;
+
+ return buf;
+}
+
+static void *vb2_dma_sg_cookie(void *buf_priv)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+
+ return buf->dma_sgt;
+}
+
+const struct vb2_mem_ops vb2_dma_sg_memops = {
+ .alloc = vb2_dma_sg_alloc,
+ .put = vb2_dma_sg_put,
+ .get_userptr = vb2_dma_sg_get_userptr,
+ .put_userptr = vb2_dma_sg_put_userptr,
+ .prepare = vb2_dma_sg_prepare,
+ .finish = vb2_dma_sg_finish,
+ .vaddr = vb2_dma_sg_vaddr,
+ .mmap = vb2_dma_sg_mmap,
+ .num_users = vb2_dma_sg_num_users,
+ .get_dmabuf = vb2_dma_sg_get_dmabuf,
+ .map_dmabuf = vb2_dma_sg_map_dmabuf,
+ .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf,
+ .attach_dmabuf = vb2_dma_sg_attach_dmabuf,
+ .detach_dmabuf = vb2_dma_sg_detach_dmabuf,
+ .cookie = vb2_dma_sg_cookie,
+};
+EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
+
+void *vb2_dma_sg_init_ctx(struct device *dev)
+{
+ struct vb2_dma_sg_conf *conf;
+
+ conf = kzalloc(sizeof(*conf), GFP_KERNEL);
+ if (!conf)
+ return ERR_PTR(-ENOMEM);
+
+ conf->dev = dev;
+
+ return conf;
+}
+EXPORT_SYMBOL_GPL(vb2_dma_sg_init_ctx);
+
+void vb2_dma_sg_cleanup_ctx(void *alloc_ctx)
+{
+ if (!IS_ERR_OR_NULL(alloc_ctx))
+ kfree(alloc_ctx);
+}
+EXPORT_SYMBOL_GPL(vb2_dma_sg_cleanup_ctx);
+
+MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
+MODULE_AUTHOR("Andrzej Pietrasiewicz");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/media/v4l2-core/videobuf2-dvb.c b/kernel/drivers/media/v4l2-core/videobuf2-dvb.c
new file mode 100644
index 000000000..d09269846
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/videobuf2-dvb.c
@@ -0,0 +1,336 @@
+/*
+ *
+ * some helper function for simple DVB cards which simply DMA the
+ * complete transport stream and let the computer sort everything else
+ * (i.e. we are using the software demux, ...). Also uses the
+ * video-buf to manage DMA buffers.
+ *
+ * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <media/videobuf2-dvb.h>
+
+/* ------------------------------------------------------------------ */
+
+MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
+MODULE_LICENSE("GPL");
+
+/* ------------------------------------------------------------------ */
+
+static int dvb_fnc(struct vb2_buffer *vb, void *priv)
+{
+ struct vb2_dvb *dvb = priv;
+
+ dvb_dmx_swfilter(&dvb->demux, vb2_plane_vaddr(vb, 0),
+ vb2_get_plane_payload(vb, 0));
+ return 0;
+}
+
+static int vb2_dvb_start_feed(struct dvb_demux_feed *feed)
+{
+ struct dvb_demux *demux = feed->demux;
+ struct vb2_dvb *dvb = demux->priv;
+ int rc = 0;
+
+ if (!demux->dmx.frontend)
+ return -EINVAL;
+
+ mutex_lock(&dvb->lock);
+ dvb->nfeeds++;
+
+ if (!dvb->dvbq.threadio) {
+ rc = vb2_thread_start(&dvb->dvbq, dvb_fnc, dvb, dvb->name);
+ if (rc)
+ dvb->nfeeds--;
+ }
+ if (!rc)
+ rc = dvb->nfeeds;
+ mutex_unlock(&dvb->lock);
+ return rc;
+}
+
+static int vb2_dvb_stop_feed(struct dvb_demux_feed *feed)
+{
+ struct dvb_demux *demux = feed->demux;
+ struct vb2_dvb *dvb = demux->priv;
+ int err = 0;
+
+ mutex_lock(&dvb->lock);
+ dvb->nfeeds--;
+ if (0 == dvb->nfeeds)
+ err = vb2_thread_stop(&dvb->dvbq);
+ mutex_unlock(&dvb->lock);
+ return err;
+}
+
+static int vb2_dvb_register_adapter(struct vb2_dvb_frontends *fe,
+ struct module *module,
+ void *adapter_priv,
+ struct device *device,
+ char *adapter_name,
+ short *adapter_nr,
+ int mfe_shared)
+{
+ int result;
+
+ mutex_init(&fe->lock);
+
+ /* register adapter */
+ result = dvb_register_adapter(&fe->adapter, adapter_name, module,
+ device, adapter_nr);
+ if (result < 0) {
+ pr_warn("%s: dvb_register_adapter failed (errno = %d)\n",
+ adapter_name, result);
+ }
+ fe->adapter.priv = adapter_priv;
+ fe->adapter.mfe_shared = mfe_shared;
+
+ return result;
+}
+
+static int vb2_dvb_register_frontend(struct dvb_adapter *adapter,
+ struct vb2_dvb *dvb)
+{
+ int result;
+
+ /* register frontend */
+ result = dvb_register_frontend(adapter, dvb->frontend);
+ if (result < 0) {
+ pr_warn("%s: dvb_register_frontend failed (errno = %d)\n",
+ dvb->name, result);
+ goto fail_frontend;
+ }
+
+ /* register demux stuff */
+ dvb->demux.dmx.capabilities =
+ DMX_TS_FILTERING | DMX_SECTION_FILTERING |
+ DMX_MEMORY_BASED_FILTERING;
+ dvb->demux.priv = dvb;
+ dvb->demux.filternum = 256;
+ dvb->demux.feednum = 256;
+ dvb->demux.start_feed = vb2_dvb_start_feed;
+ dvb->demux.stop_feed = vb2_dvb_stop_feed;
+ result = dvb_dmx_init(&dvb->demux);
+ if (result < 0) {
+ pr_warn("%s: dvb_dmx_init failed (errno = %d)\n",
+ dvb->name, result);
+ goto fail_dmx;
+ }
+
+ dvb->dmxdev.filternum = 256;
+ dvb->dmxdev.demux = &dvb->demux.dmx;
+ dvb->dmxdev.capabilities = 0;
+ result = dvb_dmxdev_init(&dvb->dmxdev, adapter);
+
+ if (result < 0) {
+ pr_warn("%s: dvb_dmxdev_init failed (errno = %d)\n",
+ dvb->name, result);
+ goto fail_dmxdev;
+ }
+
+ dvb->fe_hw.source = DMX_FRONTEND_0;
+ result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
+ if (result < 0) {
+ pr_warn("%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
+ dvb->name, result);
+ goto fail_fe_hw;
+ }
+
+ dvb->fe_mem.source = DMX_MEMORY_FE;
+ result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
+ if (result < 0) {
+ pr_warn("%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
+ dvb->name, result);
+ goto fail_fe_mem;
+ }
+
+ result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw);
+ if (result < 0) {
+ pr_warn("%s: connect_frontend failed (errno = %d)\n",
+ dvb->name, result);
+ goto fail_fe_conn;
+ }
+
+ /* register network adapter */
+ result = dvb_net_init(adapter, &dvb->net, &dvb->demux.dmx);
+ if (result < 0) {
+ pr_warn("%s: dvb_net_init failed (errno = %d)\n",
+ dvb->name, result);
+ goto fail_fe_conn;
+ }
+ return 0;
+
+fail_fe_conn:
+ dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem);
+fail_fe_mem:
+ dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw);
+fail_fe_hw:
+ dvb_dmxdev_release(&dvb->dmxdev);
+fail_dmxdev:
+ dvb_dmx_release(&dvb->demux);
+fail_dmx:
+ dvb_unregister_frontend(dvb->frontend);
+fail_frontend:
+ dvb_frontend_detach(dvb->frontend);
+ dvb->frontend = NULL;
+
+ return result;
+}
+
+/* ------------------------------------------------------------------ */
+/* Register a single adapter and one or more frontends */
+int vb2_dvb_register_bus(struct vb2_dvb_frontends *f,
+ struct module *module,
+ void *adapter_priv,
+ struct device *device,
+ short *adapter_nr,
+ int mfe_shared)
+{
+ struct list_head *list, *q;
+ struct vb2_dvb_frontend *fe;
+ int res;
+
+ fe = vb2_dvb_get_frontend(f, 1);
+ if (!fe) {
+ pr_warn("Unable to register the adapter which has no frontends\n");
+ return -EINVAL;
+ }
+
+ /* Bring up the adapter */
+ res = vb2_dvb_register_adapter(f, module, adapter_priv, device,
+ fe->dvb.name, adapter_nr, mfe_shared);
+ if (res < 0) {
+ pr_warn("vb2_dvb_register_adapter failed (errno = %d)\n", res);
+ return res;
+ }
+
+ /* Attach all of the frontends to the adapter */
+ mutex_lock(&f->lock);
+ list_for_each_safe(list, q, &f->felist) {
+ fe = list_entry(list, struct vb2_dvb_frontend, felist);
+ res = vb2_dvb_register_frontend(&f->adapter, &fe->dvb);
+ if (res < 0) {
+ pr_warn("%s: vb2_dvb_register_frontend failed (errno = %d)\n",
+ fe->dvb.name, res);
+ goto err;
+ }
+ }
+ mutex_unlock(&f->lock);
+ return 0;
+
+err:
+ mutex_unlock(&f->lock);
+ vb2_dvb_unregister_bus(f);
+ return res;
+}
+EXPORT_SYMBOL(vb2_dvb_register_bus);
+
+void vb2_dvb_unregister_bus(struct vb2_dvb_frontends *f)
+{
+ vb2_dvb_dealloc_frontends(f);
+
+ dvb_unregister_adapter(&f->adapter);
+}
+EXPORT_SYMBOL(vb2_dvb_unregister_bus);
+
+struct vb2_dvb_frontend *vb2_dvb_get_frontend(
+ struct vb2_dvb_frontends *f, int id)
+{
+ struct list_head *list, *q;
+ struct vb2_dvb_frontend *fe, *ret = NULL;
+
+ mutex_lock(&f->lock);
+
+ list_for_each_safe(list, q, &f->felist) {
+ fe = list_entry(list, struct vb2_dvb_frontend, felist);
+ if (fe->id == id) {
+ ret = fe;
+ break;
+ }
+ }
+
+ mutex_unlock(&f->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(vb2_dvb_get_frontend);
+
+int vb2_dvb_find_frontend(struct vb2_dvb_frontends *f,
+ struct dvb_frontend *p)
+{
+ struct list_head *list, *q;
+ struct vb2_dvb_frontend *fe = NULL;
+ int ret = 0;
+
+ mutex_lock(&f->lock);
+
+ list_for_each_safe(list, q, &f->felist) {
+ fe = list_entry(list, struct vb2_dvb_frontend, felist);
+ if (fe->dvb.frontend == p) {
+ ret = fe->id;
+ break;
+ }
+ }
+
+ mutex_unlock(&f->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(vb2_dvb_find_frontend);
+
+struct vb2_dvb_frontend *vb2_dvb_alloc_frontend(
+ struct vb2_dvb_frontends *f, int id)
+{
+ struct vb2_dvb_frontend *fe;
+
+ fe = kzalloc(sizeof(struct vb2_dvb_frontend), GFP_KERNEL);
+ if (fe == NULL)
+ return NULL;
+
+ fe->id = id;
+ mutex_init(&fe->dvb.lock);
+
+ mutex_lock(&f->lock);
+ list_add_tail(&fe->felist, &f->felist);
+ mutex_unlock(&f->lock);
+ return fe;
+}
+EXPORT_SYMBOL(vb2_dvb_alloc_frontend);
+
+void vb2_dvb_dealloc_frontends(struct vb2_dvb_frontends *f)
+{
+ struct list_head *list, *q;
+ struct vb2_dvb_frontend *fe;
+
+ mutex_lock(&f->lock);
+ list_for_each_safe(list, q, &f->felist) {
+ fe = list_entry(list, struct vb2_dvb_frontend, felist);
+ if (fe->dvb.net.dvbdev) {
+ dvb_net_release(&fe->dvb.net);
+ fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
+ &fe->dvb.fe_mem);
+ fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
+ &fe->dvb.fe_hw);
+ dvb_dmxdev_release(&fe->dvb.dmxdev);
+ dvb_dmx_release(&fe->dvb.demux);
+ dvb_unregister_frontend(fe->dvb.frontend);
+ }
+ if (fe->dvb.frontend)
+ /* always allocated, may have been reset */
+ dvb_frontend_detach(fe->dvb.frontend);
+ list_del(list); /* remove list entry */
+ kfree(fe); /* free frontend allocation */
+ }
+ mutex_unlock(&f->lock);
+}
+EXPORT_SYMBOL(vb2_dvb_dealloc_frontends);
diff --git a/kernel/drivers/media/v4l2-core/videobuf2-memops.c b/kernel/drivers/media/v4l2-core/videobuf2-memops.c
new file mode 100644
index 000000000..81c1ad8b2
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/videobuf2-memops.c
@@ -0,0 +1,187 @@
+/*
+ * videobuf2-memops.c - generic memory handling routines for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <pawel@osciak.com>
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-memops.h>
+
+/**
+ * vb2_get_vma() - acquire and lock the virtual memory area
+ * @vma: given virtual memory area
+ *
+ * This function attempts to acquire an area mapped in the userspace for
+ * the duration of a hardware operation. The area is "locked" by performing
+ * the same set of operation that are done when process calls fork() and
+ * memory areas are duplicated.
+ *
+ * Returns a copy of a virtual memory region on success or NULL.
+ */
+struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma)
+{
+ struct vm_area_struct *vma_copy;
+
+ vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
+ if (vma_copy == NULL)
+ return NULL;
+
+ if (vma->vm_ops && vma->vm_ops->open)
+ vma->vm_ops->open(vma);
+
+ if (vma->vm_file)
+ get_file(vma->vm_file);
+
+ memcpy(vma_copy, vma, sizeof(*vma));
+
+ vma_copy->vm_mm = NULL;
+ vma_copy->vm_next = NULL;
+ vma_copy->vm_prev = NULL;
+
+ return vma_copy;
+}
+EXPORT_SYMBOL_GPL(vb2_get_vma);
+
+/**
+ * vb2_put_userptr() - release a userspace virtual memory area
+ * @vma: virtual memory region associated with the area to be released
+ *
+ * This function releases the previously acquired memory area after a hardware
+ * operation.
+ */
+void vb2_put_vma(struct vm_area_struct *vma)
+{
+ if (!vma)
+ return;
+
+ if (vma->vm_ops && vma->vm_ops->close)
+ vma->vm_ops->close(vma);
+
+ if (vma->vm_file)
+ fput(vma->vm_file);
+
+ kfree(vma);
+}
+EXPORT_SYMBOL_GPL(vb2_put_vma);
+
+/**
+ * vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory
+ * @vaddr: starting virtual address of the area to be verified
+ * @size: size of the area
+ * @res_paddr: will return physical address for the given vaddr
+ * @res_vma: will return locked copy of struct vm_area for the given area
+ *
+ * This function will go through memory area of size @size mapped at @vaddr and
+ * verify that the underlying physical pages are contiguous. If they are
+ * contiguous the virtual memory area is locked and a @res_vma is filled with
+ * the copy and @res_pa set to the physical address of the buffer.
+ *
+ * Returns 0 on success.
+ */
+int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
+ struct vm_area_struct **res_vma, dma_addr_t *res_pa)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long offset, start, end;
+ unsigned long this_pfn, prev_pfn;
+ dma_addr_t pa = 0;
+
+ start = vaddr;
+ offset = start & ~PAGE_MASK;
+ end = start + size;
+
+ vma = find_vma(mm, start);
+
+ if (vma == NULL || vma->vm_end < end)
+ return -EFAULT;
+
+ for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
+ int ret = follow_pfn(vma, start, &this_pfn);
+ if (ret)
+ return ret;
+
+ if (prev_pfn == 0)
+ pa = this_pfn << PAGE_SHIFT;
+ else if (this_pfn != prev_pfn + 1)
+ return -EFAULT;
+
+ prev_pfn = this_pfn;
+ }
+
+ /*
+ * Memory is contigous, lock vma and return to the caller
+ */
+ *res_vma = vb2_get_vma(vma);
+ if (*res_vma == NULL)
+ return -ENOMEM;
+
+ *res_pa = pa + offset;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_get_contig_userptr);
+
+/**
+ * vb2_common_vm_open() - increase refcount of the vma
+ * @vma: virtual memory region for the mapping
+ *
+ * This function adds another user to the provided vma. It expects
+ * struct vb2_vmarea_handler pointer in vma->vm_private_data.
+ */
+static void vb2_common_vm_open(struct vm_area_struct *vma)
+{
+ struct vb2_vmarea_handler *h = vma->vm_private_data;
+
+ pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
+ __func__, h, atomic_read(h->refcount), vma->vm_start,
+ vma->vm_end);
+
+ atomic_inc(h->refcount);
+}
+
+/**
+ * vb2_common_vm_close() - decrease refcount of the vma
+ * @vma: virtual memory region for the mapping
+ *
+ * This function releases the user from the provided vma. It expects
+ * struct vb2_vmarea_handler pointer in vma->vm_private_data.
+ */
+static void vb2_common_vm_close(struct vm_area_struct *vma)
+{
+ struct vb2_vmarea_handler *h = vma->vm_private_data;
+
+ pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
+ __func__, h, atomic_read(h->refcount), vma->vm_start,
+ vma->vm_end);
+
+ h->put(h->arg);
+}
+
+/**
+ * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped
+ * video buffers
+ */
+const struct vm_operations_struct vb2_common_vm_ops = {
+ .open = vb2_common_vm_open,
+ .close = vb2_common_vm_close,
+};
+EXPORT_SYMBOL_GPL(vb2_common_vm_ops);
+
+MODULE_DESCRIPTION("common memory handling routines for videobuf2");
+MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/media/v4l2-core/videobuf2-vmalloc.c b/kernel/drivers/media/v4l2-core/videobuf2-vmalloc.c
new file mode 100644
index 000000000..657ab302a
--- /dev/null
+++ b/kernel/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -0,0 +1,464 @@
+/*
+ * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <pawel@osciak.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-vmalloc.h>
+#include <media/videobuf2-memops.h>
+
+struct vb2_vmalloc_buf {
+ void *vaddr;
+ struct page **pages;
+ struct vm_area_struct *vma;
+ enum dma_data_direction dma_dir;
+ unsigned long size;
+ unsigned int n_pages;
+ atomic_t refcount;
+ struct vb2_vmarea_handler handler;
+ struct dma_buf *dbuf;
+};
+
+static void vb2_vmalloc_put(void *buf_priv);
+
+static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size,
+ enum dma_data_direction dma_dir, gfp_t gfp_flags)
+{
+ struct vb2_vmalloc_buf *buf;
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
+ if (!buf)
+ return NULL;
+
+ buf->size = size;
+ buf->vaddr = vmalloc_user(buf->size);
+ buf->dma_dir = dma_dir;
+ buf->handler.refcount = &buf->refcount;
+ buf->handler.put = vb2_vmalloc_put;
+ buf->handler.arg = buf;
+
+ if (!buf->vaddr) {
+ pr_debug("vmalloc of size %ld failed\n", buf->size);
+ kfree(buf);
+ return NULL;
+ }
+
+ atomic_inc(&buf->refcount);
+ return buf;
+}
+
+static void vb2_vmalloc_put(void *buf_priv)
+{
+ struct vb2_vmalloc_buf *buf = buf_priv;
+
+ if (atomic_dec_and_test(&buf->refcount)) {
+ vfree(buf->vaddr);
+ kfree(buf);
+ }
+}
+
+static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
+ unsigned long size,
+ enum dma_data_direction dma_dir)
+{
+ struct vb2_vmalloc_buf *buf;
+ unsigned long first, last;
+ int n_pages, offset;
+ struct vm_area_struct *vma;
+ dma_addr_t physp;
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ buf->dma_dir = dma_dir;
+ offset = vaddr & ~PAGE_MASK;
+ buf->size = size;
+
+
+ vma = find_vma(current->mm, vaddr);
+ if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) {
+ if (vb2_get_contig_userptr(vaddr, size, &vma, &physp))
+ goto fail_pages_array_alloc;
+ buf->vma = vma;
+ buf->vaddr = (__force void *)ioremap_nocache(physp, size);
+ if (!buf->vaddr)
+ goto fail_pages_array_alloc;
+ } else {
+ first = vaddr >> PAGE_SHIFT;
+ last = (vaddr + size - 1) >> PAGE_SHIFT;
+ buf->n_pages = last - first + 1;
+ buf->pages = kzalloc(buf->n_pages * sizeof(struct page *),
+ GFP_KERNEL);
+ if (!buf->pages)
+ goto fail_pages_array_alloc;
+
+ /* current->mm->mmap_sem is taken by videobuf2 core */
+ n_pages = get_user_pages(current, current->mm,
+ vaddr & PAGE_MASK, buf->n_pages,
+ dma_dir == DMA_FROM_DEVICE,
+ 1, /* force */
+ buf->pages, NULL);
+ if (n_pages != buf->n_pages)
+ goto fail_get_user_pages;
+
+ buf->vaddr = vm_map_ram(buf->pages, buf->n_pages, -1,
+ PAGE_KERNEL);
+ if (!buf->vaddr)
+ goto fail_get_user_pages;
+ }
+
+ buf->vaddr += offset;
+ return buf;
+
+fail_get_user_pages:
+ pr_debug("get_user_pages requested/got: %d/%d]\n", n_pages,
+ buf->n_pages);
+ while (--n_pages >= 0)
+ put_page(buf->pages[n_pages]);
+ kfree(buf->pages);
+
+fail_pages_array_alloc:
+ kfree(buf);
+
+ return NULL;
+}
+
+static void vb2_vmalloc_put_userptr(void *buf_priv)
+{
+ struct vb2_vmalloc_buf *buf = buf_priv;
+ unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
+ unsigned int i;
+
+ if (buf->pages) {
+ if (vaddr)
+ vm_unmap_ram((void *)vaddr, buf->n_pages);
+ for (i = 0; i < buf->n_pages; ++i) {
+ if (buf->dma_dir == DMA_FROM_DEVICE)
+ set_page_dirty_lock(buf->pages[i]);
+ put_page(buf->pages[i]);
+ }
+ kfree(buf->pages);
+ } else {
+ vb2_put_vma(buf->vma);
+ iounmap((__force void __iomem *)buf->vaddr);
+ }
+ kfree(buf);
+}
+
+static void *vb2_vmalloc_vaddr(void *buf_priv)
+{
+ struct vb2_vmalloc_buf *buf = buf_priv;
+
+ if (!buf->vaddr) {
+ pr_err("Address of an unallocated plane requested "
+ "or cannot map user pointer\n");
+ return NULL;
+ }
+
+ return buf->vaddr;
+}
+
+static unsigned int vb2_vmalloc_num_users(void *buf_priv)
+{
+ struct vb2_vmalloc_buf *buf = buf_priv;
+ return atomic_read(&buf->refcount);
+}
+
+static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
+{
+ struct vb2_vmalloc_buf *buf = buf_priv;
+ int ret;
+
+ if (!buf) {
+ pr_err("No memory to map\n");
+ return -EINVAL;
+ }
+
+ ret = remap_vmalloc_range(vma, buf->vaddr, 0);
+ if (ret) {
+ pr_err("Remapping vmalloc memory, error: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Make sure that vm_areas for 2 buffers won't be merged together
+ */
+ vma->vm_flags |= VM_DONTEXPAND;
+
+ /*
+ * Use common vm_area operations to track buffer refcount.
+ */
+ vma->vm_private_data = &buf->handler;
+ vma->vm_ops = &vb2_common_vm_ops;
+
+ vma->vm_ops->open(vma);
+
+ return 0;
+}
+
+#ifdef CONFIG_HAS_DMA
+/*********************************************/
+/* DMABUF ops for exporters */
+/*********************************************/
+
+struct vb2_vmalloc_attachment {
+ struct sg_table sgt;
+ enum dma_data_direction dma_dir;
+};
+
+static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+ struct dma_buf_attachment *dbuf_attach)
+{
+ struct vb2_vmalloc_attachment *attach;
+ struct vb2_vmalloc_buf *buf = dbuf->priv;
+ int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ void *vaddr = buf->vaddr;
+ int ret;
+ int i;
+
+ attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+ if (!attach)
+ return -ENOMEM;
+
+ sgt = &attach->sgt;
+ ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
+ if (ret) {
+ kfree(attach);
+ return ret;
+ }
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ struct page *page = vmalloc_to_page(vaddr);
+
+ if (!page) {
+ sg_free_table(sgt);
+ kfree(attach);
+ return -ENOMEM;
+ }
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ vaddr += PAGE_SIZE;
+ }
+
+ attach->dma_dir = DMA_NONE;
+ dbuf_attach->priv = attach;
+ return 0;
+}
+
+static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
+ struct dma_buf_attachment *db_attach)
+{
+ struct vb2_vmalloc_attachment *attach = db_attach->priv;
+ struct sg_table *sgt;
+
+ if (!attach)
+ return;
+
+ sgt = &attach->sgt;
+
+ /* release the scatterlist cache */
+ if (attach->dma_dir != DMA_NONE)
+ dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dma_dir);
+ sg_free_table(sgt);
+ kfree(attach);
+ db_attach->priv = NULL;
+}
+
+static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
+ struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
+{
+ struct vb2_vmalloc_attachment *attach = db_attach->priv;
+ /* stealing dmabuf mutex to serialize map/unmap operations */
+ struct mutex *lock = &db_attach->dmabuf->lock;
+ struct sg_table *sgt;
+ int ret;
+
+ mutex_lock(lock);
+
+ sgt = &attach->sgt;
+ /* return previously mapped sg table */
+ if (attach->dma_dir == dma_dir) {
+ mutex_unlock(lock);
+ return sgt;
+ }
+
+ /* release any previous cache */
+ if (attach->dma_dir != DMA_NONE) {
+ dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dma_dir);
+ attach->dma_dir = DMA_NONE;
+ }
+
+ /* mapping to the client with new direction */
+ ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir);
+ if (ret <= 0) {
+ pr_err("failed to map scatterlist\n");
+ mutex_unlock(lock);
+ return ERR_PTR(-EIO);
+ }
+
+ attach->dma_dir = dma_dir;
+
+ mutex_unlock(lock);
+
+ return sgt;
+}
+
+static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
+ struct sg_table *sgt, enum dma_data_direction dma_dir)
+{
+ /* nothing to be done here */
+}
+
+static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
+{
+ /* drop reference obtained in vb2_vmalloc_get_dmabuf */
+ vb2_vmalloc_put(dbuf->priv);
+}
+
+static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
+{
+ struct vb2_vmalloc_buf *buf = dbuf->priv;
+
+ return buf->vaddr + pgnum * PAGE_SIZE;
+}
+
+static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
+{
+ struct vb2_vmalloc_buf *buf = dbuf->priv;
+
+ return buf->vaddr;
+}
+
+static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
+ struct vm_area_struct *vma)
+{
+ return vb2_vmalloc_mmap(dbuf->priv, vma);
+}
+
+static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
+ .attach = vb2_vmalloc_dmabuf_ops_attach,
+ .detach = vb2_vmalloc_dmabuf_ops_detach,
+ .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
+ .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
+ .kmap = vb2_vmalloc_dmabuf_ops_kmap,
+ .kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap,
+ .vmap = vb2_vmalloc_dmabuf_ops_vmap,
+ .mmap = vb2_vmalloc_dmabuf_ops_mmap,
+ .release = vb2_vmalloc_dmabuf_ops_release,
+};
+
+static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
+{
+ struct vb2_vmalloc_buf *buf = buf_priv;
+ struct dma_buf *dbuf;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &vb2_vmalloc_dmabuf_ops;
+ exp_info.size = buf->size;
+ exp_info.flags = flags;
+ exp_info.priv = buf;
+
+ if (WARN_ON(!buf->vaddr))
+ return NULL;
+
+ dbuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dbuf))
+ return NULL;
+
+ /* dmabuf keeps reference to vb2 buffer */
+ atomic_inc(&buf->refcount);
+
+ return dbuf;
+}
+#endif /* CONFIG_HAS_DMA */
+
+
+/*********************************************/
+/* callbacks for DMABUF buffers */
+/*********************************************/
+
+static int vb2_vmalloc_map_dmabuf(void *mem_priv)
+{
+ struct vb2_vmalloc_buf *buf = mem_priv;
+
+ buf->vaddr = dma_buf_vmap(buf->dbuf);
+
+ return buf->vaddr ? 0 : -EFAULT;
+}
+
+static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
+{
+ struct vb2_vmalloc_buf *buf = mem_priv;
+
+ dma_buf_vunmap(buf->dbuf, buf->vaddr);
+ buf->vaddr = NULL;
+}
+
+static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
+{
+ struct vb2_vmalloc_buf *buf = mem_priv;
+
+ if (buf->vaddr)
+ dma_buf_vunmap(buf->dbuf, buf->vaddr);
+
+ kfree(buf);
+}
+
+static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
+ unsigned long size, enum dma_data_direction dma_dir)
+{
+ struct vb2_vmalloc_buf *buf;
+
+ if (dbuf->size < size)
+ return ERR_PTR(-EFAULT);
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->dbuf = dbuf;
+ buf->dma_dir = dma_dir;
+ buf->size = size;
+
+ return buf;
+}
+
+
+const struct vb2_mem_ops vb2_vmalloc_memops = {
+ .alloc = vb2_vmalloc_alloc,
+ .put = vb2_vmalloc_put,
+ .get_userptr = vb2_vmalloc_get_userptr,
+ .put_userptr = vb2_vmalloc_put_userptr,
+#ifdef CONFIG_HAS_DMA
+ .get_dmabuf = vb2_vmalloc_get_dmabuf,
+#endif
+ .map_dmabuf = vb2_vmalloc_map_dmabuf,
+ .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
+ .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
+ .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
+ .vaddr = vb2_vmalloc_vaddr,
+ .mmap = vb2_vmalloc_mmap,
+ .num_users = vb2_vmalloc_num_users,
+};
+EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
+
+MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
+MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
+MODULE_LICENSE("GPL");