summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/mac_esp.c
Commit message (Collapse)AuthorAgeFilesLines
* scsi: esp_scsi: Track residual for PIO transfersFinn Thain2018-11-131-0/+2
| | | | | | | | | | | | | | | | | | | | | | | | | | [ Upstream commit fd47d919d0c336e7c22862b51ee94927ffea227a ] If a target disconnects during a PIO data transfer the command may fail when the target reconnects: scsi host1: DMA length is zero! scsi host1: cur adr[04380000] len[00000000] The scsi bus is then reset. This happens because the residual reached zero before the transfer was completed. The usual residual calculation relies on the Transfer Count registers. That works for DMA transfers but not for PIO transfers. Fix the problem by storing the PIO transfer residual and using that to correctly calculate bytes_sent. Fixes: 6fe07aaffbf0 ("[SCSI] m68k: new mac_esp scsi driver") Tested-by: Stan Johnson <userm57@yahoo.com> Signed-off-by: Finn Thain <fthain@telegraphics.com.au> Tested-by: Michael Schmitz <schmitzmic@gmail.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Sasha Levin <sashal@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
* scsi: mac_esp: Fix PIO transfers for MESSAGE IN phaseFinn Thain2017-08-111-17/+18
| | | | | | | | | | | | | | | | | | | | | When in MESSAGE IN phase, the ESP device does not automatically acknowledge each byte that is transferred by PIO. The mac_esp driver neglects to explicitly ack them, which causes a timeout during messages larger than one byte (e.g. tag bytes during reconnect). Fix this with an ESP_CMD_MOK command after each byte. The MESSAGE IN phase is also different in that each byte transferred raises ESP_INTR_FDONE. So don't exit the transfer loop for this interrupt, for this phase. That resolves the "Reconnect IRQ2 timeout" error on those Macs which use PIO transfers instead of PDMA. This patch also improves on the weak tests for unexpected interrupts and phase changes during PIO transfers. Tested-by: Stan Johnson <userm57@yahoo.com> Fixes: 02507a80b35e ("[PATCH] [SCSI] mac_esp: fix PIO mode, take 2") Signed-off-by: Finn Thain <fthain@telegraphics.com.au> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
* scsi: mac_esp: Avoid type warning from sparseFinn Thain2017-08-111-1/+1
| | | | | | | | | | | | | Avoid the following warning from "make C=1": CHECK drivers/scsi/mac_esp.c drivers/scsi/mac_esp.c:357:30: warning: incorrect type in initializer (different address spaces) drivers/scsi/mac_esp.c:357:30: expected unsigned char [usertype] *fifo drivers/scsi/mac_esp.c:357:30: got void [noderef] <asn:2>* Tested-by: Stan Johnson <userm57@yahoo.com> Signed-off-by: Finn Thain <fthain@telegraphics.com.au> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
* Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsiLinus Torvalds2017-05-041-11/+24
|\ | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | Pull SCSI updates from James Bottomley: "This update includes the usual round of major driver updates (hisi_sas, ufs, fnic, cxlflash, be2iscsi, ipr, stex). There's also the usual amount of cosmetic and spelling stuff" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (155 commits) scsi: qla4xxx: fix spelling mistake: "Tempalate" -> "Template" scsi: stex: make S6flag static scsi: mac_esp: fix to pass correct device identity to free_irq() scsi: aacraid: pci_alloc_consistent() failures on ARM64 scsi: ufs: make ufshcd_get_lists_status() register operation obvious scsi: ufs: use MASK_EE_STATUS scsi: mac_esp: Replace bogus memory barrier with spinlock scsi: fcoe: make fcoe_e_d_tov and fcoe_r_a_tov static scsi: sd_zbc: Do not write lock zones for reset scsi: sd_zbc: Remove superfluous assignments scsi: sd: sd_zbc: Rename sd_zbc_setup_write_cmnd scsi: Improve scsi_get_sense_info_fld scsi: sd: Cleanup sd_done sense data handling scsi: sd: Improve sd_completed_bytes scsi: sd: Fix function descriptions scsi: mpt3sas: remove redundant wmb scsi: mpt: Move scsi_remove_host() out of mptscsih_remove_host() scsi: sg: reset 'res_in_use' after unlinking reserved array scsi: mvumi: remove code handling zero scsi_sg_count(scmd) case scsi: fusion: fix spelling mistake: "Persistancy" -> "Persistency" ...
| * scsi: mac_esp: fix to pass correct device identity to free_irq()Wei Yongjun2017-04-271-1/+1
| | | | | | | | | | | | | | | | | | free_irq() expects the same device identity that was passed to corresponding request_irq(), otherwise the IRQ is not freed. Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com> Signed-off-by: Finn Thain <fthain@telegraphics.com.au> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
| * scsi: mac_esp: Replace bogus memory barrier with spinlockFinn Thain2017-04-251-10/+23
| | | | | | | | | | | | | | | | | | | | | | | | | | | | Commit da244654c66e ("[SCSI] mac_esp: fix for quadras with two esp chips") added mac_scsi_esp_intr() to handle the IRQ lines from a pair of on-board ESP chips (a normal shared IRQ did not work). Proper mutual exclusion was missing from that patch. This patch fixes race conditions between comparison and assignment of esp_chips[] pointers. Signed-off-by: Finn Thain <fthain@telegraphics.com.au> Reviewed-by: Michael Schmitz <schmitzmic@gmail.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
* | drivers: Clean up duplicated email addressFinn Thain2017-03-241-1/+1
|/ | | | | | | | My email address may be found in the git commit logs and in MAINTAINERS. Remove duplicate addresses so they won't have to be kept up-to-date. Signed-off-by: Finn Thain <fthain@telegraphics.com.au> Signed-off-by: Jiri Kosina <jkosina@suse.cz>
* scsi: drop owner assignment from platform_driversWolfram Sang2014-10-201-1/+0Star
| | | | | | | A platform_driver does not need to set an owner, it will be populated by the driver core. Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
* Drivers: scsi: remove __dev* attributes.Greg Kroah-Hartman2013-01-041-3/+3
| | | | | | | | | | | | | | | | CONFIG_HOTPLUG is going away as an option. As a result, the __dev* markings need to be removed. This change removes the use of __devinit, __devexit_p, __devinitdata, __devinitconst, and __devexit from these drivers. Based on patches originally written by Bill Pemberton, but redone by me in order to handle some of the coding style issues better, by hand. Cc: Bill Pemberton <wfp5p@virginia.edu> Cc: Adam Radford <linuxraid@lsi.com> Cc: "James E.J. Bottomley" <JBottomley@parallels.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
* mac_esp: rename irqFinn Thain2012-01-221-2/+1Star
| | | | | | | | Rename the "Mac ESP" irq as "ESP" to be consistent with all the other Mac drivers and ESP drivers. Signed-off-by: Finn Thain <fthain@telegraphics.com.au> Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
* m68k/mac: cleanup mac_irq_pendingFinn Thain2011-12-101-1/+2
| | | | | | | mac_irq_pending() has only one caller (mac_esp.c). Nothing tests for Baboon, PSC or OSS pending interrupts. Until that need arises, let's keep it simple and remove all the unused abstraction. Replace it with a routine to check for SCSI DRQ. Signed-off-by: Finn Thain <fthain@telegraphics.com.au> Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
* [SCSI] mac_esp: remove redundant mutual exclusionFinn Thain2011-09-221-9/+0Star
| | | | | | | | | | | Mutual exclusion is redundant here because all the paths in the call graph leading to esp_driver_ops.send_dma_cmd() happen under spin_lock_irqsave/ spin_lock_irqrestore. Remove it. Tested on a Mac Quadra 660av and a Mac LC 630. Signed-off-by: Finn Thain <fthain@telegraphics.com.au> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
* include cleanup: Update gfp.h and slab.h includes to prepare for breaking ↵Tejun Heo2010-03-301-0/+1
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
* mac68k: move mac_esp platform deviceFinn Thain2010-02-271-52/+5Star
| | | | | | | Move platform device code from the driver to the platform init function. Signed-off-by: Finn Thain <fthain@telegraphics.com.au> Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
* [SCSI] mac_esp: fix PIO mode, take 2Finn Thain2010-01-181-46/+49
| | | | | | | | | | | | | | | | | | | | The mac_esp PIO algorithm no longer works in 2.6.31 and crashes my Centris 660av. So here's a better one. Also, force async with esp_set_offset() rather than esp_slave_configure(). One of the SCSI drives I tested still doesn't like the PIO mode and fails with "esp: esp0: Reconnect IRQ2 timeout" (the same drive works fine in PDMA mode). This failure happens when esp_reconnect_with_tag() tries to read in two tag bytes but the chip only provides one (0x20). I don't know what causes this. I decided not to waste any more time trying to fix it because the best solution is to rip out the PIO mode altogether and use the DMA engine. Signed-off-by: Finn Thain <fthain@telegraphics.com.au> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
* [SCSI] mac_esp: fix for quadras with two esp chipsFinn Thain2008-12-291-23/+58
| | | | | | | | | | | | On the Quadra 900 and 950 there are two ESP chips sharing one IRQ. Because the shared IRQ is edge-triggered, we must make sure that an IRQ transition from one chip doesn't go unnoticed when the shared IRQ is already active due to the other. This patch prevents interrupts getting lost so that both SCSI busses may be used simultaneously. Signed-off-by: Finn Thain <fthain@telegraphics.com.au> Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
* [SCSI] m68k: mac_esp asm fixFinn Thain2008-12-291-9/+10
| | | | | | | | | | Fix asm constraints and arguments so as not to transfer an odd byte when there may be more words to transfer. The bug would probably also cause exceptions sometimes by transferring one too many bytes. Signed-off-by: Finn Thain <fthain@telegraphics.com.au> Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
* MODULE_LICENSE expects "GPL v2", not "GPLv2"Al Viro2008-05-221-1/+1
| | | | | | | | ... and we have few enough places using the latter to make it simpler to do search and replace... Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
* [SCSI] m68k: new mac_esp scsi driverFinn Thain2008-04-271-0/+657
| | | | | | | | | | | | | Replace the mac_esp driver with a new one based on the esp_scsi core. For esp_scsi: add support for sync transfers for the PIO mode, add a new esp_driver_ops method to get the maximum dma transfer size (like the old NCR53C9x driver), and some cleanups. Signed-off-by: Finn Thain <fthain@telegraphics.com.au> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
* [SCSI] remove m68k NCR53C9x based driversJames Bottomley2008-02-081-751/+0Star
| | | | | | | | | These drivers depend on the deprecated NCR53C9X core and need to be converted to the esp_scsi core. Acked-by: Boaz Harrosh <bharrosh@panasas.com> Cc: Linux/m68k <linux-m68k@vger.kernel.org> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
* [TC] dec_esp: Driver model for the PMAZ-AMaciej W. Rozycki2007-02-091-1/+1
| | | | | | | | | | | | | | | | | This is a set of changes that converts the PMAZ-A support to the driver model. The use of the driver model required switching to the hotplug SCSI initialization model, which in turn required a change to the core NCR53C9x driver. I decided not to break all the frontend drivers and introduced an additional parameter for esp_allocate() to select between the old and the new model. I hope this is OK, but I would be fine with converting NCR53C9x to the new model unconditionally as long as I do not have to fix all the other frontends (OK, perhaps I could do some of them ;-) ). Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org> Cc: James Bottomley <James.Bottomley@steeleye.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
* IRQ: Maintain regs pointer globally rather than passing to IRQ handlersDavid Howells2006-10-051-7/+7
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | Maintain a per-CPU global "struct pt_regs *" variable which can be used instead of passing regs around manually through all ~1800 interrupt handlers in the Linux kernel. The regs pointer is used in few places, but it potentially costs both stack space and code to pass it around. On the FRV arch, removing the regs parameter from all the genirq function results in a 20% speed up of the IRQ exit path (ie: from leaving timer_interrupt() to leaving do_IRQ()). Where appropriate, an arch may override the generic storage facility and do something different with the variable. On FRV, for instance, the address is maintained in GR28 at all times inside the kernel as part of general exception handling. Having looked over the code, it appears that the parameter may be handed down through up to twenty or so layers of functions. Consider a USB character device attached to a USB hub, attached to a USB controller that posts its interrupts through a cascaded auxiliary interrupt controller. A character device driver may want to pass regs to the sysrq handler through the input layer which adds another few layers of parameter passing. I've build this code with allyesconfig for x86_64 and i386. I've runtested the main part of the code on FRV and i386, though I can't test most of the drivers. I've also done partial conversion for powerpc and MIPS - these at least compile with minimal configurations. This will affect all archs. Mostly the changes should be relatively easy. Take do_IRQ(), store the regs pointer at the beginning, saving the old one: struct pt_regs *old_regs = set_irq_regs(regs); And put the old one back at the end: set_irq_regs(old_regs); Don't pass regs through to generic_handle_irq() or __do_IRQ(). In timer_interrupt(), this sort of change will be necessary: - update_process_times(user_mode(regs)); - profile_tick(CPU_PROFILING, regs); + update_process_times(user_mode(get_irq_regs())); + profile_tick(CPU_PROFILING); I'd like to move update_process_times()'s use of get_irq_regs() into itself, except that i386, alone of the archs, uses something other than user_mode(). Some notes on the interrupt handling in the drivers: (*) input_dev() is now gone entirely. The regs pointer is no longer stored in the input_dev struct. (*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does something different depending on whether it's been supplied with a regs pointer or not. (*) Various IRQ handler function pointers have been moved to type irq_handler_t. Signed-Off-By: David Howells <dhowells@redhat.com> (cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
* [PATCH] m68k: convert mac irq codeRoman Zippel2006-06-251-5/+2Star
| | | | | | Signed-off-by: Roman Zippel <zippel@linux-m68k.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
* [SCSI] remove Scsi_Host_Template typedefChristoph Hellwig2005-11-091-2/+2
| | | | Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
* Linux-2.6.12-rc2Linus Torvalds2005-04-171-0/+754
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
351'>1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) STMicroelectronics SA 2015
 * Authors: Yannick Fertre <yannick.fertre@st.com>
 *          Hugues Fruchet <hugues.fruchet@st.com>
 */

#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-dma-contig.h>

#include "hva.h"
#include "hva-hw.h"

#define MIN_FRAMES	1
#define MIN_STREAMS	1

#define HVA_MIN_WIDTH	32
#define HVA_MAX_WIDTH	1920
#define HVA_MIN_HEIGHT	32
#define HVA_MAX_HEIGHT	1920

/* HVA requires a 16x16 pixels alignment for frames */
#define HVA_WIDTH_ALIGNMENT	16
#define HVA_HEIGHT_ALIGNMENT	16

#define HVA_DEFAULT_WIDTH	HVA_MIN_WIDTH
#define	HVA_DEFAULT_HEIGHT	HVA_MIN_HEIGHT
#define HVA_DEFAULT_FRAME_NUM	1
#define HVA_DEFAULT_FRAME_DEN	30

#define to_type_str(type) (type == V4L2_BUF_TYPE_VIDEO_OUTPUT ? \
			   "frame" : "stream")

#define fh_to_ctx(f)    (container_of(f, struct hva_ctx, fh))

/* registry of available encoders */
static const struct hva_enc *hva_encoders[] = {
	&nv12h264enc,
	&nv21h264enc,
};

static inline int frame_size(u32 w, u32 h, u32 fmt)
{
	switch (fmt) {
	case V4L2_PIX_FMT_NV12:
	case V4L2_PIX_FMT_NV21:
		return (w * h * 3) / 2;
	default:
		return 0;
	}
}

static inline int frame_stride(u32 w, u32 fmt)
{
	switch (fmt) {
	case V4L2_PIX_FMT_NV12:
	case V4L2_PIX_FMT_NV21:
		return w;
	default:
		return 0;
	}
}

static inline int frame_alignment(u32 fmt)
{
	switch (fmt) {
	case V4L2_PIX_FMT_NV12:
	case V4L2_PIX_FMT_NV21:
		/* multiple of 2 */
		return 2;
	default:
		return 1;
	}
}

static inline int estimated_stream_size(u32 w, u32 h)
{
	/*
	 * HVA only encodes in YUV420 format, whatever the frame format.
	 * A compression ratio of 2 is assumed: thus, the maximum size
	 * of a stream is estimated to ((width x height x 3 / 2) / 2)
	 */
	return (w * h * 3) / 4;
}

static void set_default_params(struct hva_ctx *ctx)
{
	struct hva_frameinfo *frameinfo = &ctx->frameinfo;
	struct hva_streaminfo *streaminfo = &ctx->streaminfo;

	frameinfo->pixelformat = V4L2_PIX_FMT_NV12;
	frameinfo->width = HVA_DEFAULT_WIDTH;
	frameinfo->height = HVA_DEFAULT_HEIGHT;
	frameinfo->aligned_width = ALIGN(frameinfo->width,
					 HVA_WIDTH_ALIGNMENT);
	frameinfo->aligned_height = ALIGN(frameinfo->height,
					  HVA_HEIGHT_ALIGNMENT);
	frameinfo->size = frame_size(frameinfo->aligned_width,
				     frameinfo->aligned_height,
				     frameinfo->pixelformat);

	streaminfo->streamformat = V4L2_PIX_FMT_H264;
	streaminfo->width = HVA_DEFAULT_WIDTH;
	streaminfo->height = HVA_DEFAULT_HEIGHT;

	ctx->colorspace = V4L2_COLORSPACE_REC709;
	ctx->xfer_func = V4L2_XFER_FUNC_DEFAULT;
	ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
	ctx->quantization = V4L2_QUANTIZATION_DEFAULT;

	ctx->max_stream_size = estimated_stream_size(streaminfo->width,
						     streaminfo->height);
}

static const struct hva_enc *hva_find_encoder(struct hva_ctx *ctx,
					      u32 pixelformat,
					      u32 streamformat)
{
	struct hva_dev *hva = ctx_to_hdev(ctx);
	const struct hva_enc *enc;
	unsigned int i;

	for (i = 0; i < hva->nb_of_encoders; i++) {
		enc = hva->encoders[i];
		if ((enc->pixelformat == pixelformat) &&
		    (enc->streamformat == streamformat))
			return enc;
	}

	return NULL;
}

static void register_format(u32 format, u32 formats[], u32 *nb_of_formats)
{
	u32 i;
	bool found = false;

	for (i = 0; i < *nb_of_formats; i++) {
		if (format == formats[i]) {
			found = true;
			break;
		}
	}

	if (!found)
		formats[(*nb_of_formats)++] = format;
}

static void register_formats(struct hva_dev *hva)
{
	unsigned int i;

	for (i = 0; i < hva->nb_of_encoders; i++) {
		register_format(hva->encoders[i]->pixelformat,
				hva->pixelformats,
				&hva->nb_of_pixelformats);

		register_format(hva->encoders[i]->streamformat,
				hva->streamformats,
				&hva->nb_of_streamformats);
	}
}

static void register_encoders(struct hva_dev *hva)
{
	struct device *dev = hva_to_dev(hva);
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(hva_encoders); i++) {
		if (hva->nb_of_encoders >= HVA_MAX_ENCODERS) {
			dev_dbg(dev,
				"%s failed to register %s encoder (%d maximum reached)\n",
				HVA_PREFIX, hva_encoders[i]->name,
				HVA_MAX_ENCODERS);
			return;
		}

		hva->encoders[hva->nb_of_encoders++] = hva_encoders[i];
		dev_info(dev, "%s %s encoder registered\n", HVA_PREFIX,
			 hva_encoders[i]->name);
	}
}

static int hva_open_encoder(struct hva_ctx *ctx, u32 streamformat,
			    u32 pixelformat, struct hva_enc **penc)
{
	struct hva_dev *hva = ctx_to_hdev(ctx);
	struct device *dev = ctx_to_dev(ctx);
	struct hva_enc *enc;
	int ret;

	/* find an encoder which can deal with these formats */
	enc = (struct hva_enc *)hva_find_encoder(ctx, pixelformat,
						 streamformat);
	if (!enc) {
		dev_err(dev, "%s no encoder found matching %4.4s => %4.4s\n",
			ctx->name, (char *)&pixelformat, (char *)&streamformat);
		return -EINVAL;
	}

	dev_dbg(dev, "%s one encoder matching %4.4s => %4.4s\n",
		ctx->name, (char *)&pixelformat, (char *)&streamformat);

	/* update instance name */
	snprintf(ctx->name, sizeof(ctx->name), "[%3d:%4.4s]",
		 hva->instance_id, (char *)&streamformat);

	/* open encoder instance */
	ret = enc->open(ctx);
	if (ret) {
		dev_err(dev, "%s failed to open encoder instance (%d)\n",
			ctx->name, ret);
		return ret;
	}

	dev_dbg(dev, "%s %s encoder opened\n", ctx->name, enc->name);

	*penc = enc;

	return ret;
}

static void hva_dbg_summary(struct hva_ctx *ctx)
{
	struct device *dev = ctx_to_dev(ctx);
	struct hva_streaminfo *stream = &ctx->streaminfo;
	struct hva_frameinfo *frame = &ctx->frameinfo;

	if (!(ctx->flags & HVA_FLAG_STREAMINFO))
		return;

	dev_dbg(dev, "%s %4.4s %dx%d > %4.4s %dx%d %s %s: %d frames encoded, %d system errors, %d encoding errors, %d frame errors\n",
		ctx->name,
		(char *)&frame->pixelformat,
		frame->aligned_width, frame->aligned_height,
		(char *)&stream->streamformat,
		stream->width, stream->height,
		stream->profile, stream->level,
		ctx->encoded_frames,
		ctx->sys_errors,
		ctx->encode_errors,
		ctx->frame_errors);
}

/*
 * V4L2 ioctl operations
 */

static int hva_querycap(struct file *file, void *priv,
			struct v4l2_capability *cap)
{
	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
	struct hva_dev *hva = ctx_to_hdev(ctx);

	strlcpy(cap->driver, HVA_NAME, sizeof(cap->driver));
	strlcpy(cap->card, hva->vdev->name, sizeof(cap->card));
	snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
		 hva->pdev->name);

	return 0;
}

static int hva_enum_fmt_stream(struct file *file, void *priv,
			       struct v4l2_fmtdesc *f)
{
	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
	struct hva_dev *hva = ctx_to_hdev(ctx);

	if (unlikely(f->index >= hva->nb_of_streamformats))
		return -EINVAL;

	f->pixelformat = hva->streamformats[f->index];

	return 0;
}

static int hva_enum_fmt_frame(struct file *file, void *priv,
			      struct v4l2_fmtdesc *f)
{
	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
	struct hva_dev *hva = ctx_to_hdev(ctx);

	if (unlikely(f->index >= hva->nb_of_pixelformats))
		return -EINVAL;

	f->pixelformat = hva->pixelformats[f->index];

	return 0;
}

static int hva_g_fmt_stream(struct file *file, void *fh, struct v4l2_format *f)
{
	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
	struct hva_streaminfo *streaminfo = &ctx->streaminfo;

	f->fmt.pix.width = streaminfo->width;
	f->fmt.pix.height = streaminfo->height;
	f->fmt.pix.field = V4L2_FIELD_NONE;
	f->fmt.pix.colorspace = ctx->colorspace;
	f->fmt.pix.xfer_func = ctx->xfer_func;
	f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
	f->fmt.pix.quantization = ctx->quantization;
	f->fmt.pix.pixelformat = streaminfo->streamformat;
	f->fmt.pix.bytesperline = 0;
	f->fmt.pix.sizeimage = ctx->max_stream_size;

	return 0;
}

static int hva_g_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
{
	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
	struct hva_frameinfo *frameinfo = &ctx->frameinfo;

	f->fmt.pix.width = frameinfo->width;
	f->fmt.pix.height = frameinfo->height;
	f->fmt.pix.field = V4L2_FIELD_NONE;
	f->fmt.pix.colorspace = ctx->colorspace;
	f->fmt.pix.xfer_func = ctx->xfer_func;
	f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
	f->fmt.pix.quantization = ctx->quantization;
	f->fmt.pix.pixelformat = frameinfo->pixelformat;
	f->fmt.pix.bytesperline = frame_stride(frameinfo->aligned_width,
					       frameinfo->pixelformat);
	f->fmt.pix.sizeimage = frameinfo->size;

	return 0;
}

static int hva_try_fmt_stream(struct file *file, void *priv,
			      struct v4l2_format *f)
{
	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
	struct device *dev = ctx_to_dev(ctx);
	struct v4l2_pix_format *pix = &f->fmt.pix;
	u32 streamformat = pix->pixelformat;
	const struct hva_enc *enc;
	u32 width, height;
	u32 stream_size;

	enc = hva_find_encoder(ctx, ctx->frameinfo.pixelformat, streamformat);
	if (!enc) {
		dev_dbg(dev,
			"%s V4L2 TRY_FMT (CAPTURE): unsupported format %.4s\n",
			ctx->name, (char *)&pix->pixelformat);
		return -EINVAL;
	}

	width = pix->width;
	height = pix->height;
	if (ctx->flags & HVA_FLAG_FRAMEINFO) {
		/*
		 * if the frame resolution is already fixed, only allow the
		 * same stream resolution
		 */
		pix->width = ctx->frameinfo.width;
		pix->height = ctx->frameinfo.height;
		if ((pix->width != width) || (pix->height != height))
			dev_dbg(dev,
				"%s V4L2 TRY_FMT (CAPTURE): resolution updated %dx%d -> %dx%d to fit frame resolution\n",
				ctx->name, width, height,
				pix->width, pix->height);
	} else {
		/* adjust width & height */
		v4l_bound_align_image(&pix->width,
				      HVA_MIN_WIDTH, enc->max_width,
				      0,
				      &pix->height,
				      HVA_MIN_HEIGHT, enc->max_height,
				      0,
				      0);

		if ((pix->width != width) || (pix->height != height))
			dev_dbg(dev,
				"%s V4L2 TRY_FMT (CAPTURE): resolution updated %dx%d -> %dx%d to fit min/max/alignment\n",
				ctx->name, width, height,
				pix->width, pix->height);
	}

	stream_size = estimated_stream_size(pix->width, pix->height);
	if (pix->sizeimage < stream_size)
		pix->sizeimage = stream_size;

	pix->bytesperline = 0;
	pix->colorspace = ctx->colorspace;
	pix->xfer_func = ctx->xfer_func;
	pix->ycbcr_enc = ctx->ycbcr_enc;
	pix->quantization = ctx->quantization;
	pix->field = V4L2_FIELD_NONE;

	return 0;
}

static int hva_try_fmt_frame(struct file *file, void *priv,
			     struct v4l2_format *f)
{
	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
	struct device *dev = ctx_to_dev(ctx);
	struct v4l2_pix_format *pix = &f->fmt.pix;
	u32 pixelformat = pix->pixelformat;
	const struct hva_enc *enc;
	u32 width, height;

	enc = hva_find_encoder(ctx, pixelformat, ctx->streaminfo.streamformat);
	if (!enc) {
		dev_dbg(dev,
			"%s V4L2 TRY_FMT (OUTPUT): unsupported format %.4s\n",
			ctx->name, (char *)&pixelformat);
		return -EINVAL;
	}

	/* adjust width & height */
	width = pix->width;
	height = pix->height;
	v4l_bound_align_image(&pix->width,
			      HVA_MIN_WIDTH, HVA_MAX_WIDTH,
			      frame_alignment(pixelformat) - 1,
			      &pix->height,
			      HVA_MIN_HEIGHT, HVA_MAX_HEIGHT,
			      frame_alignment(pixelformat) - 1,
			      0);

	if ((pix->width != width) || (pix->height != height))
		dev_dbg(dev,
			"%s V4L2 TRY_FMT (OUTPUT): resolution updated %dx%d -> %dx%d to fit min/max/alignment\n",
			ctx->name, width, height, pix->width, pix->height);

	width = ALIGN(pix->width, HVA_WIDTH_ALIGNMENT);
	height = ALIGN(pix->height, HVA_HEIGHT_ALIGNMENT);

	if (!pix->colorspace) {
		pix->colorspace = V4L2_COLORSPACE_REC709;
		pix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
		pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
		pix->quantization = V4L2_QUANTIZATION_DEFAULT;
	}

	pix->bytesperline = frame_stride(width, pixelformat);
	pix->sizeimage = frame_size(width, height, pixelformat);
	pix->field = V4L2_FIELD_NONE;

	return 0;
}

static int hva_s_fmt_stream(struct file *file, void *fh, struct v4l2_format *f)
{
	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
	struct device *dev = ctx_to_dev(ctx);
	struct vb2_queue *vq;
	int ret;

	ret = hva_try_fmt_stream(file, fh, f);
	if (ret) {
		dev_dbg(dev, "%s V4L2 S_FMT (CAPTURE): unsupported format %.4s\n",
			ctx->name, (char *)&f->fmt.pix.pixelformat);
		return ret;
	}

	vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
	if (vb2_is_streaming(vq)) {
		dev_dbg(dev, "%s V4L2 S_FMT (CAPTURE): queue busy\n",
			ctx->name);
		return -EBUSY;
	}

	ctx->max_stream_size = f->fmt.pix.sizeimage;
	ctx->streaminfo.width = f->fmt.pix.width;
	ctx->streaminfo.height = f->fmt.pix.height;
	ctx->streaminfo.streamformat = f->fmt.pix.pixelformat;
	ctx->flags |= HVA_FLAG_STREAMINFO;

	return 0;
}

static int hva_s_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
{
	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
	struct device *dev = ctx_to_dev(ctx);
	struct v4l2_pix_format *pix = &f->fmt.pix;
	struct vb2_queue *vq;
	int ret;

	ret = hva_try_fmt_frame(file, fh, f);
	if (ret) {
		dev_dbg(dev, "%s V4L2 S_FMT (OUTPUT): unsupported format %.4s\n",
			ctx->name, (char *)&pix->pixelformat);
		return ret;
	}

	vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
	if (vb2_is_streaming(vq)) {
		dev_dbg(dev, "%s V4L2 S_FMT (OUTPUT): queue busy\n", ctx->name);
		return -EBUSY;
	}

	ctx->colorspace = pix->colorspace;
	ctx->xfer_func = pix->xfer_func;
	ctx->ycbcr_enc = pix->ycbcr_enc;
	ctx->quantization = pix->quantization;

	ctx->frameinfo.aligned_width = ALIGN(pix->width, HVA_WIDTH_ALIGNMENT);
	ctx->frameinfo.aligned_height = ALIGN(pix->height,
					      HVA_HEIGHT_ALIGNMENT);
	ctx->frameinfo.size = pix->sizeimage;
	ctx->frameinfo.pixelformat = pix->pixelformat;
	ctx->frameinfo.width = pix->width;
	ctx->frameinfo.height = pix->height;
	ctx->flags |= HVA_FLAG_FRAMEINFO;

	return 0;
}

static int hva_g_parm(struct file *file, void *fh, struct v4l2_streamparm *sp)
{
	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
	struct v4l2_fract *time_per_frame = &ctx->ctrls.time_per_frame;

	if (sp->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
		return -EINVAL;

	sp->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
	sp->parm.output.timeperframe.numerator = time_per_frame->numerator;
	sp->parm.output.timeperframe.denominator =
		time_per_frame->denominator;

	return 0;
}

static int hva_s_parm(struct file *file, void *fh, struct v4l2_streamparm *sp)
{
	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
	struct v4l2_fract *time_per_frame = &ctx->ctrls.time_per_frame;

	if (sp->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
		return -EINVAL;

	if (!sp->parm.output.timeperframe.numerator ||
	    !sp->parm.output.timeperframe.denominator)
		return hva_g_parm(file, fh, sp);

	sp->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
	time_per_frame->numerator = sp->parm.output.timeperframe.numerator;
	time_per_frame->denominator =
		sp->parm.output.timeperframe.denominator;

	return 0;
}

static int hva_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
	struct device *dev = ctx_to_dev(ctx);

	if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
		/*
		 * depending on the targeted compressed video format, the
		 * capture buffer might contain headers (e.g. H.264 SPS/PPS)
		 * filled in by the driver client; the size of these data is
		 * copied from the bytesused field of the V4L2 buffer in the
		 * payload field of the hva stream buffer
		 */
		struct vb2_queue *vq;
		struct hva_stream *stream;

		vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, buf->type);

		if (buf->index >= vq->num_buffers) {
			dev_dbg(dev, "%s buffer index %d out of range (%d)\n",
				ctx->name, buf->index, vq->num_buffers);
			return -EINVAL;
		}

		stream = (struct hva_stream *)vq->bufs[buf->index];
		stream->bytesused = buf->bytesused;
	}

	return v4l2_m2m_qbuf(file, ctx->fh.m2m_ctx, buf);
}

/* V4L2 ioctl ops */
static const struct v4l2_ioctl_ops hva_ioctl_ops = {
	.vidioc_querycap		= hva_querycap,
	.vidioc_enum_fmt_vid_cap	= hva_enum_fmt_stream,
	.vidioc_enum_fmt_vid_out	= hva_enum_fmt_frame,
	.vidioc_g_fmt_vid_cap		= hva_g_fmt_stream,
	.vidioc_g_fmt_vid_out		= hva_g_fmt_frame,
	.vidioc_try_fmt_vid_cap		= hva_try_fmt_stream,
	.vidioc_try_fmt_vid_out		= hva_try_fmt_frame,
	.vidioc_s_fmt_vid_cap		= hva_s_fmt_stream,
	.vidioc_s_fmt_vid_out		= hva_s_fmt_frame,
	.vidioc_g_parm			= hva_g_parm,
	.vidioc_s_parm			= hva_s_parm,
	.vidioc_reqbufs			= v4l2_m2m_ioctl_reqbufs,
	.vidioc_create_bufs             = v4l2_m2m_ioctl_create_bufs,
	.vidioc_querybuf		= v4l2_m2m_ioctl_querybuf,
	.vidioc_expbuf			= v4l2_m2m_ioctl_expbuf,
	.vidioc_qbuf			= hva_qbuf,
	.vidioc_dqbuf			= v4l2_m2m_ioctl_dqbuf,
	.vidioc_streamon		= v4l2_m2m_ioctl_streamon,
	.vidioc_streamoff		= v4l2_m2m_ioctl_streamoff,
	.vidioc_subscribe_event		= v4l2_ctrl_subscribe_event,
	.vidioc_unsubscribe_event	= v4l2_event_unsubscribe,
};

/*
 * V4L2 control operations
 */

static int hva_s_ctrl(struct v4l2_ctrl *ctrl)
{
	struct hva_ctx *ctx = container_of(ctrl->handler, struct hva_ctx,
					   ctrl_handler);
	struct device *dev = ctx_to_dev(ctx);

	dev_dbg(dev, "%s S_CTRL: id = %d, val = %d\n", ctx->name,
		ctrl->id, ctrl->val);

	switch (ctrl->id) {
	case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
		ctx->ctrls.bitrate_mode = ctrl->val;
		break;
	case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
		ctx->ctrls.gop_size = ctrl->val;
		break;
	case V4L2_CID_MPEG_VIDEO_BITRATE:
		ctx->ctrls.bitrate = ctrl->val;
		break;
	case V4L2_CID_MPEG_VIDEO_ASPECT:
		ctx->ctrls.aspect = ctrl->val;
		break;
	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
		ctx->ctrls.profile = ctrl->val;
		snprintf(ctx->streaminfo.profile,
			 sizeof(ctx->streaminfo.profile),
			 "%s profile",
			 v4l2_ctrl_get_menu(ctrl->id)[ctrl->val]);
		break;
	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
		ctx->ctrls.level = ctrl->val;
		snprintf(ctx->streaminfo.level,
			 sizeof(ctx->streaminfo.level),
			 "level %s",
			 v4l2_ctrl_get_menu(ctrl->id)[ctrl->val]);
		break;
	case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
		ctx->ctrls.entropy_mode = ctrl->val;
		break;
	case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE:
		ctx->ctrls.cpb_size = ctrl->val;
		break;
	case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
		ctx->ctrls.dct8x8 = ctrl->val;
		break;
	case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
		ctx->ctrls.qpmin = ctrl->val;
		break;
	case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
		ctx->ctrls.qpmax = ctrl->val;
		break;
	case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
		ctx->ctrls.vui_sar = ctrl->val;
		break;
	case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
		ctx->ctrls.vui_sar_idc = ctrl->val;
		break;
	case V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING:
		ctx->ctrls.sei_fp = ctrl->val;
		break;
	case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE:
		ctx->ctrls.sei_fp_type = ctrl->val;
		break;
	default:
		dev_dbg(dev, "%s S_CTRL: invalid control (id = %d)\n",
			ctx->name, ctrl->id);
		return -EINVAL;
	}

	return 0;
}

/* V4L2 control ops */
static const struct v4l2_ctrl_ops hva_ctrl_ops = {
	.s_ctrl = hva_s_ctrl,
};

static int hva_ctrls_setup(struct hva_ctx *ctx)
{
	struct device *dev = ctx_to_dev(ctx);
	u64 mask;
	enum v4l2_mpeg_video_h264_sei_fp_arrangement_type sei_fp_type =
		V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TOP_BOTTOM;

	v4l2_ctrl_handler_init(&ctx->ctrl_handler, 15);

	v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
			       V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
			       V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
			       0,
			       V4L2_MPEG_VIDEO_BITRATE_MODE_CBR);

	v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
			  V4L2_CID_MPEG_VIDEO_GOP_SIZE,
			  1, 60, 1, 16);

	v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
			  V4L2_CID_MPEG_VIDEO_BITRATE,
			  1000, 60000000, 1000, 20000000);

	mask = ~(1 << V4L2_MPEG_VIDEO_ASPECT_1x1);
	v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
			       V4L2_CID_MPEG_VIDEO_ASPECT,
			       V4L2_MPEG_VIDEO_ASPECT_1x1,
			       mask,
			       V4L2_MPEG_VIDEO_ASPECT_1x1);

	mask = ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
		 (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
		 (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) |
		 (1 << V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH));
	v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
			       V4L2_CID_MPEG_VIDEO_H264_PROFILE,
			       V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH,
			       mask,
			       V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);

	v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
			       V4L2_CID_MPEG_VIDEO_H264_LEVEL,
			       V4L2_MPEG_VIDEO_H264_LEVEL_4_2,
			       0,
			       V4L2_MPEG_VIDEO_H264_LEVEL_4_0);

	v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
			       V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
			       V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
			       0,
			       V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC);

	v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
			  V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE,
			  1, 10000, 1, 3000);

	v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
			  V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM,
			  0, 1, 1, 0);

	v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
			  V4L2_CID_MPEG_VIDEO_H264_MIN_QP,
			  0, 51, 1, 5);

	v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
			  V4L2_CID_MPEG_VIDEO_H264_MAX_QP,
			  0, 51, 1, 51);

	v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
			  V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE,
			  0, 1, 1, 1);

	mask = ~(1 << V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1);
	v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
			       V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC,
			       V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1,
			       mask,
			       V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1);

	v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
			  V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING,
			  0, 1, 1, 0);

	mask = ~(1 << sei_fp_type);
	v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
			       V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE,
			       sei_fp_type,
			       mask,
			       sei_fp_type);

	if (ctx->ctrl_handler.error) {
		int err = ctx->ctrl_handler.error;

		dev_dbg(dev, "%s controls setup failed (%d)\n",
			ctx->name, err);
		v4l2_ctrl_handler_free(&ctx->ctrl_handler);
		return err;
	}

	v4l2_ctrl_handler_setup(&ctx->ctrl_handler);

	/* set default time per frame */
	ctx->ctrls.time_per_frame.numerator = HVA_DEFAULT_FRAME_NUM;
	ctx->ctrls.time_per_frame.denominator = HVA_DEFAULT_FRAME_DEN;

	return 0;
}

/*
 * mem-to-mem operations
 */

static void hva_run_work(struct work_struct *work)
{
	struct hva_ctx *ctx = container_of(work, struct hva_ctx, run_work);
	struct vb2_v4l2_buffer *src_buf, *dst_buf;
	const struct hva_enc *enc = ctx->enc;
	struct hva_frame *frame;
	struct hva_stream *stream;
	int ret;

	/* protect instance against reentrancy */
	mutex_lock(&ctx->lock);

#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
	hva_dbg_perf_begin(ctx);
#endif

	src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
	dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);

	frame = to_hva_frame(src_buf);
	stream = to_hva_stream(dst_buf);
	frame->vbuf.sequence = ctx->frame_num++;

	ret = enc->encode(ctx, frame, stream);

	vb2_set_plane_payload(&dst_buf->vb2_buf, 0, stream->bytesused);
	if (ret) {
		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
	} else {
		/* propagate frame timestamp */
		dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
		dst_buf->field = V4L2_FIELD_NONE;
		dst_buf->sequence = ctx->stream_num - 1;

		ctx->encoded_frames++;

#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
		hva_dbg_perf_end(ctx, stream);
#endif

		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
	}

	mutex_unlock(&ctx->lock);

	v4l2_m2m_job_finish(ctx->hva_dev->m2m_dev, ctx->fh.m2m_ctx);
}

static void hva_device_run(void *priv)
{
	struct hva_ctx *ctx = priv;
	struct hva_dev *hva = ctx_to_hdev(ctx);

	queue_work(hva->work_queue, &ctx->run_work);
}

static void hva_job_abort(void *priv)
{
	struct hva_ctx *ctx = priv;
	struct device *dev = ctx_to_dev(ctx);

	dev_dbg(dev, "%s aborting job\n", ctx->name);

	ctx->aborting = true;
}

static int hva_job_ready(void *priv)
{
	struct hva_ctx *ctx = priv;
	struct device *dev = ctx_to_dev(ctx);

	if (!v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx)) {
		dev_dbg(dev, "%s job not ready: no frame buffers\n",
			ctx->name);
		return 0;
	}

	if (!v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx)) {
		dev_dbg(dev, "%s job not ready: no stream buffers\n",
			ctx->name);
		return 0;
	}

	if (ctx->aborting) {
		dev_dbg(dev, "%s job not ready: aborting\n", ctx->name);
		return 0;
	}

	return 1;
}

/* mem-to-mem ops */
static const struct v4l2_m2m_ops hva_m2m_ops = {
	.device_run	= hva_device_run,
	.job_abort	= hva_job_abort,
	.job_ready	= hva_job_ready,
};

/*
 * VB2 queue operations
 */

static int hva_queue_setup(struct vb2_queue *vq,
			   unsigned int *num_buffers, unsigned int *num_planes,
			   unsigned int sizes[], struct device *alloc_devs[])
{
	struct hva_ctx *ctx = vb2_get_drv_priv(vq);
	struct device *dev = ctx_to_dev(ctx);
	unsigned int size;

	dev_dbg(dev, "%s %s queue setup: num_buffers %d\n", ctx->name,
		to_type_str(vq->type), *num_buffers);

	size = vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ?
		ctx->frameinfo.size : ctx->max_stream_size;

	if (*num_planes)
		return sizes[0] < size ? -EINVAL : 0;

	/* only one plane supported */
	*num_planes = 1;
	sizes[0] = size;

	return 0;
}

static int hva_buf_prepare(struct vb2_buffer *vb)
{
	struct hva_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
	struct device *dev = ctx_to_dev(ctx);
	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);

	if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
		struct hva_frame *frame = to_hva_frame(vbuf);

		if (vbuf->field == V4L2_FIELD_ANY)
			vbuf->field = V4L2_FIELD_NONE;
		if (vbuf->field != V4L2_FIELD_NONE) {
			dev_dbg(dev,
				"%s frame[%d] prepare: %d field not supported\n",
				ctx->name, vb->index, vbuf->field);
			return -EINVAL;
		}

		if (!frame->prepared) {
			/* get memory addresses */
			frame->vaddr = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
			frame->paddr = vb2_dma_contig_plane_dma_addr(
					&vbuf->vb2_buf, 0);
			frame->info = ctx->frameinfo;
			frame->prepared = true;

			dev_dbg(dev,
				"%s frame[%d] prepared; virt=%p, phy=%pad\n",
				ctx->name, vb->index,
				frame->vaddr, &frame->paddr);
		}
	} else {
		struct hva_stream *stream = to_hva_stream(vbuf);

		if (!stream->prepared) {
			/* get memory addresses */
			stream->vaddr = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
			stream->paddr = vb2_dma_contig_plane_dma_addr(
					&vbuf->vb2_buf, 0);
			stream->size = vb2_plane_size(&vbuf->vb2_buf, 0);
			stream->prepared = true;

			dev_dbg(dev,
				"%s stream[%d] prepared; virt=%p, phy=%pad\n",
				ctx->name, vb->index,
				stream->vaddr, &stream->paddr);
		}
	}

	return 0;
}

static void hva_buf_queue(struct vb2_buffer *vb)
{
	struct hva_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);

	if (ctx->fh.m2m_ctx)
		v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}

static int hva_start_streaming(struct vb2_queue *vq, unsigned int count)
{
	struct hva_ctx *ctx = vb2_get_drv_priv(vq);
	struct hva_dev *hva = ctx_to_hdev(ctx);
	struct device *dev = ctx_to_dev(ctx);
	struct vb2_v4l2_buffer *vbuf;
	int ret;
	unsigned int i;
	bool found = false;

	dev_dbg(dev, "%s %s start streaming\n", ctx->name,
		to_type_str(vq->type));

	/* open encoder when both start_streaming have been called */
	if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
		if (!vb2_start_streaming_called(&ctx->fh.m2m_ctx->cap_q_ctx.q))
			return 0;
	} else {
		if (!vb2_start_streaming_called(&ctx->fh.m2m_ctx->out_q_ctx.q))
			return 0;
	}

	/* store the instance context in the instances array */
	for (i = 0; i < HVA_MAX_INSTANCES; i++) {
		if (!hva->instances[i]) {
			hva->instances[i] = ctx;
			/* save the context identifier in the context */
			ctx->id = i;
			found = true;
			break;
		}
	}

	if (!found) {
		dev_err(dev, "%s maximum instances reached\n", ctx->name);
		ret = -ENOMEM;
		goto err;
	}

	hva->nb_of_instances++;

	if (!ctx->enc) {
		ret = hva_open_encoder(ctx,
				       ctx->streaminfo.streamformat,
				       ctx->frameinfo.pixelformat,
				       &ctx->enc);
		if (ret < 0)
			goto err_ctx;
	}

	return 0;

err_ctx:
	hva->instances[ctx->id] = NULL;
	hva->nb_of_instances--;
err:
	if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
		/* return of all pending buffers to vb2 (in queued state) */
		while ((vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
			v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_QUEUED);
	} else {
		/* return of all pending buffers to vb2 (in queued state) */
		while ((vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
			v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_QUEUED);
	}

	ctx->sys_errors++;

	return ret;
}

static void hva_stop_streaming(struct vb2_queue *vq)
{
	struct hva_ctx *ctx = vb2_get_drv_priv(vq);
	struct hva_dev *hva = ctx_to_hdev(ctx);
	struct device *dev = ctx_to_dev(ctx);
	const struct hva_enc *enc = ctx->enc;
	struct vb2_v4l2_buffer *vbuf;

	dev_dbg(dev, "%s %s stop streaming\n", ctx->name,
		to_type_str(vq->type));

	if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
		/* return of all pending buffers to vb2 (in error state) */
		ctx->frame_num = 0;
		while ((vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
			v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
	} else {
		/* return of all pending buffers to vb2 (in error state) */
		ctx->stream_num = 0;
		while ((vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
			v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
	}

	if ((V4L2_TYPE_IS_OUTPUT(vq->type) &&
	     vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q)) ||
	    (!V4L2_TYPE_IS_OUTPUT(vq->type) &&
	     vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))) {
		dev_dbg(dev, "%s %s out=%d cap=%d\n",
			ctx->name, to_type_str(vq->type),
			vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q),
			vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q));
		return;
	}

	/* close encoder when both stop_streaming have been called */
	if (enc) {
		dev_dbg(dev, "%s %s encoder closed\n", ctx->name, enc->name);
		enc->close(ctx);
		ctx->enc = NULL;

		/* clear instance context in instances array */
		hva->instances[ctx->id] = NULL;
		hva->nb_of_instances--;
	}

	ctx->aborting = false;
}

/* VB2 queue ops */
static const struct vb2_ops hva_qops = {
	.queue_setup		= hva_queue_setup,
	.buf_prepare		= hva_buf_prepare,
	.buf_queue		= hva_buf_queue,
	.start_streaming	= hva_start_streaming,
	.stop_streaming		= hva_stop_streaming,
	.wait_prepare		= vb2_ops_wait_prepare,
	.wait_finish		= vb2_ops_wait_finish,
};

/*
 * V4L2 file operations
 */

static int queue_init(struct hva_ctx *ctx, struct vb2_queue *vq)
{
	vq->io_modes = VB2_MMAP | VB2_DMABUF;
	vq->drv_priv = ctx;
	vq->ops = &hva_qops;
	vq->mem_ops = &vb2_dma_contig_memops;
	vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
	vq->lock = &ctx->hva_dev->lock;

	return vb2_queue_init(vq);
}

static int hva_queue_init(void *priv, struct vb2_queue *src_vq,
			  struct vb2_queue *dst_vq)
{
	struct hva_ctx *ctx = priv;
	int ret;

	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
	src_vq->buf_struct_size = sizeof(struct hva_frame);
	src_vq->min_buffers_needed = MIN_FRAMES;
	src_vq->dev = ctx->hva_dev->dev;

	ret = queue_init(ctx, src_vq);
	if (ret)
		return ret;

	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	dst_vq->buf_struct_size = sizeof(struct hva_stream);
	dst_vq->min_buffers_needed = MIN_STREAMS;
	dst_vq->dev = ctx->hva_dev->dev;

	return queue_init(ctx, dst_vq);
}

static int hva_open(struct file *file)
{
	struct hva_dev *hva = video_drvdata(file);
	struct device *dev = hva_to_dev(hva);
	struct hva_ctx *ctx;
	int ret;

	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx) {
		ret = -ENOMEM;
		goto out;
	}
	ctx->hva_dev = hva;

	INIT_WORK(&ctx->run_work, hva_run_work);
	v4l2_fh_init(&ctx->fh, video_devdata(file));
	file->private_data = &ctx->fh;
	v4l2_fh_add(&ctx->fh);

	ret = hva_ctrls_setup(ctx);
	if (ret) {
		dev_err(dev, "%s [x:x] failed to setup controls\n",
			HVA_PREFIX);
		ctx->sys_errors++;
		goto err_fh;
	}
	ctx->fh.ctrl_handler = &ctx->ctrl_handler;

	mutex_init(&ctx->lock);

	ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(hva->m2m_dev, ctx,
					    &hva_queue_init);
	if (IS_ERR(ctx->fh.m2m_ctx)) {
		ret = PTR_ERR(ctx->fh.m2m_ctx);
		dev_err(dev, "%s failed to initialize m2m context (%d)\n",
			HVA_PREFIX, ret);
		ctx->sys_errors++;
		goto err_ctrls;
	}

	/* set the instance name */
	mutex_lock(&hva->lock);
	hva->instance_id++;
	snprintf(ctx->name, sizeof(ctx->name), "[%3d:----]",
		 hva->instance_id);
	mutex_unlock(&hva->lock);

	/* default parameters for frame and stream */
	set_default_params(ctx);

#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
	hva_dbg_ctx_create(ctx);
#endif

	dev_info(dev, "%s encoder instance created\n", ctx->name);

	return 0;

err_ctrls:
	v4l2_ctrl_handler_free(&ctx->ctrl_handler);
err_fh:
	v4l2_fh_del(&ctx->fh);
	v4l2_fh_exit(&ctx->fh);
	kfree(ctx);
out:
	return ret;
}

static int hva_release(struct file *file)
{
	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
	struct hva_dev *hva = ctx_to_hdev(ctx);
	struct device *dev = ctx_to_dev(ctx);
	const struct hva_enc *enc = ctx->enc;

	if (enc) {
		dev_dbg(dev, "%s %s encoder closed\n", ctx->name, enc->name);
		enc->close(ctx);
		ctx->enc = NULL;

		/* clear instance context in instances array */
		hva->instances[ctx->id] = NULL;
		hva->nb_of_instances--;
	}

	/* trace a summary of instance before closing (debug purpose) */
	hva_dbg_summary(ctx);

	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);

	v4l2_ctrl_handler_free(&ctx->ctrl_handler);

	v4l2_fh_del(&ctx->fh);
	v4l2_fh_exit(&ctx->fh);

#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
	hva_dbg_ctx_remove(ctx);
#endif

	dev_info(dev, "%s encoder instance released\n", ctx->name);

	kfree(ctx);

	return 0;
}

/* V4L2 file ops */
static const struct v4l2_file_operations hva_fops = {
	.owner			= THIS_MODULE,
	.open			= hva_open,
	.release		= hva_release,
	.unlocked_ioctl		= video_ioctl2,
	.mmap			= v4l2_m2m_fop_mmap,
	.poll			= v4l2_m2m_fop_poll,
};

/*
 * Platform device operations
 */

static int hva_register_device(struct hva_dev *hva)
{
	int ret;
	struct video_device *vdev;
	struct device *dev;

	if (!hva)
		return -ENODEV;
	dev = hva_to_dev(hva);

	hva->m2m_dev = v4l2_m2m_init(&hva_m2m_ops);
	if (IS_ERR(hva->m2m_dev)) {
		dev_err(dev, "%s failed to initialize v4l2-m2m device\n",
			HVA_PREFIX);
		ret = PTR_ERR(hva->m2m_dev);
		goto err;
	}

	vdev = video_device_alloc();
	if (!vdev) {
		dev_err(dev, "%s failed to allocate video device\n",
			HVA_PREFIX);
		ret = -ENOMEM;
		goto err_m2m_release;
	}

	vdev->fops = &hva_fops;
	vdev->ioctl_ops = &hva_ioctl_ops;
	vdev->release = video_device_release;
	vdev->lock = &hva->lock;
	vdev->vfl_dir = VFL_DIR_M2M;
	vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
	vdev->v4l2_dev = &hva->v4l2_dev;
	snprintf(vdev->name, sizeof(vdev->name), "%s%lx", HVA_NAME,
		 hva->ip_version);

	ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
	if (ret) {
		dev_err(dev, "%s failed to register video device\n",
			HVA_PREFIX);
		goto err_vdev_release;
	}

	hva->vdev = vdev;
	video_set_drvdata(vdev, hva);
	return 0;

err_vdev_release:
	video_device_release(vdev);
err_m2m_release:
	v4l2_m2m_release(hva->m2m_dev);
err:
	return ret;
}

static void hva_unregister_device(struct hva_dev *hva)
{
	if (!hva)
		return;

	if (hva->m2m_dev)
		v4l2_m2m_release(hva->m2m_dev);

	video_unregister_device(hva->vdev);
}

static int hva_probe(struct platform_device *pdev)
{
	struct hva_dev *hva;
	struct device *dev = &pdev->dev;
	int ret;

	hva = devm_kzalloc(dev, sizeof(*hva), GFP_KERNEL);
	if (!hva) {
		ret = -ENOMEM;
		goto err;
	}

	hva->dev = dev;
	hva->pdev = pdev;
	platform_set_drvdata(pdev, hva);

	mutex_init(&hva->lock);

	/* probe hardware */
	ret = hva_hw_probe(pdev, hva);
	if (ret)
		goto err;

	/* register all available encoders */
	register_encoders(hva);

	/* register all supported formats */
	register_formats(hva);

	/* register on V4L2 */
	ret = v4l2_device_register(dev, &hva->v4l2_dev);
	if (ret) {
		dev_err(dev, "%s %s failed to register V4L2 device\n",
			HVA_PREFIX, HVA_NAME);
		goto err_hw;
	}

#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
	hva_debugfs_create(hva);
#endif

	hva->work_queue = create_workqueue(HVA_NAME);
	if (!hva->work_queue) {
		dev_err(dev, "%s %s failed to allocate work queue\n",
			HVA_PREFIX, HVA_NAME);
		ret = -ENOMEM;
		goto err_v4l2;
	}

	/* register device */
	ret = hva_register_device(hva);
	if (ret)
		goto err_work_queue;

	dev_info(dev, "%s %s registered as /dev/video%d\n", HVA_PREFIX,
		 HVA_NAME, hva->vdev->num);

	return 0;

err_work_queue:
	destroy_workqueue(hva->work_queue);
err_v4l2:
#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
	hva_debugfs_remove(hva);
#endif
	v4l2_device_unregister(&hva->v4l2_dev);
err_hw:
	hva_hw_remove(hva);
err:
	return ret;
}

static int hva_remove(struct platform_device *pdev)
{
	struct hva_dev *hva = platform_get_drvdata(pdev);
	struct device *dev = hva_to_dev(hva);

	hva_unregister_device(hva);

	destroy_workqueue(hva->work_queue);

	hva_hw_remove(hva);

#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
	hva_debugfs_remove(hva);
#endif

	v4l2_device_unregister(&hva->v4l2_dev);

	dev_info(dev, "%s %s removed\n", HVA_PREFIX, pdev->name);

	return 0;
}

/* PM ops */
static const struct dev_pm_ops hva_pm_ops = {
	.runtime_suspend	= hva_hw_runtime_suspend,
	.runtime_resume		= hva_hw_runtime_resume,
};

static const struct of_device_id hva_match_types[] = {
	{
	 .compatible = "st,st-hva",
	},
	{ /* end node */ }
};

MODULE_DEVICE_TABLE(of, hva_match_types);

static struct platform_driver hva_driver = {
	.probe  = hva_probe,
	.remove = hva_remove,
	.driver = {
		.name		= HVA_NAME,
		.of_match_table	= hva_match_types,
		.pm		= &hva_pm_ops,
		},
};

module_platform_driver(hva_driver);

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>");
MODULE_DESCRIPTION("STMicroelectronics HVA video encoder V4L2 driver");