summaryrefslogtreecommitdiffstats
path: root/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c')
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c159
1 files changed, 87 insertions, 72 deletions
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
index 16af735af5c3..1c180ead4a20 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
@@ -4,10 +4,11 @@
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
- * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
- * Dave Stevenson <dsteve@broadcom.com>
- * Simon Mellor <simellor@broadcom.com>
- * Luke Diamand <luked@broadcom.com>
+ * Authors: Vincent Sanders @ Collabora
+ * Dave Stevenson @ Broadcom
+ * (now dave.stevenson@raspberrypi.org)
+ * Simon Mellor @ Broadcom
+ * Luke Diamand @ Broadcom
*
* V4L2 driver MMAL vchiq interface code
*/
@@ -117,8 +118,10 @@ struct mmal_msg_context {
union {
struct {
- /* work struct for defered callback - must come first */
+ /* work struct for buffer_cb callback */
struct work_struct work;
+ /* work struct for deferred callback */
+ struct work_struct buffer_to_host_work;
/* mmal instance */
struct vchiq_mmal_instance *instance;
/* mmal port */
@@ -161,11 +164,15 @@ struct vchiq_mmal_instance {
void *bulk_scratch;
struct idr context_map;
- spinlock_t context_map_lock;
+ /* protect accesses to context_map */
+ struct mutex context_map_lock;
/* component to use next */
int component_idx;
struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
+
+ /* ordered workqueue to process all bulk operations */
+ struct workqueue_struct *bulk_wq;
};
static struct mmal_msg_context *
@@ -184,10 +191,10 @@ get_msg_context(struct vchiq_mmal_instance *instance)
* that when we service the VCHI reply, we can look up what
* message is being replied to.
*/
- spin_lock(&instance->context_map_lock);
+ mutex_lock(&instance->context_map_lock);
handle = idr_alloc(&instance->context_map, msg_context,
0, 0, GFP_KERNEL);
- spin_unlock(&instance->context_map_lock);
+ mutex_unlock(&instance->context_map_lock);
if (handle < 0) {
kfree(msg_context);
@@ -211,9 +218,9 @@ release_msg_context(struct mmal_msg_context *msg_context)
{
struct vchiq_mmal_instance *instance = msg_context->instance;
- spin_lock(&instance->context_map_lock);
+ mutex_lock(&instance->context_map_lock);
idr_remove(&instance->context_map, msg_context->handle);
- spin_unlock(&instance->context_map_lock);
+ mutex_unlock(&instance->context_map_lock);
kfree(msg_context);
}
@@ -239,6 +246,8 @@ static void buffer_work_cb(struct work_struct *work)
struct mmal_msg_context *msg_context =
container_of(work, struct mmal_msg_context, u.bulk.work);
+ atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu);
+
msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
msg_context->u.bulk.port,
msg_context->u.bulk.status,
@@ -247,7 +256,44 @@ static void buffer_work_cb(struct work_struct *work)
msg_context->u.bulk.mmal_flags,
msg_context->u.bulk.dts,
msg_context->u.bulk.pts);
+}
+
+/* workqueue scheduled callback to handle receiving buffers
+ *
+ * VCHI will allow up to 4 bulk receives to be scheduled before blocking.
+ * If we block in the service_callback context then we can't process the
+ * VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked
+ * vchi_bulk_queue_receive() call to complete.
+ */
+static void buffer_to_host_work_cb(struct work_struct *work)
+{
+ struct mmal_msg_context *msg_context =
+ container_of(work, struct mmal_msg_context,
+ u.bulk.buffer_to_host_work);
+ struct vchiq_mmal_instance *instance = msg_context->instance;
+ unsigned long len = msg_context->u.bulk.buffer_used;
+ int ret;
+ if (!len)
+ /* Dummy receive to ensure the buffers remain in order */
+ len = 8;
+ /* queue the bulk submission */
+ vchi_service_use(instance->handle);
+ ret = vchi_bulk_queue_receive(instance->handle,
+ msg_context->u.bulk.buffer->buffer,
+ /* Actual receive needs to be a multiple
+ * of 4 bytes
+ */
+ (len + 3) & ~3,
+ VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
+ VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
+ msg_context);
+
+ vchi_service_release(instance->handle);
+
+ if (ret != 0)
+ pr_err("%s: ctx: %p, vchi_bulk_queue_receive failed %d\n",
+ __func__, msg_context, ret);
}
/* enqueue a bulk receive for a given message context */
@@ -256,7 +302,6 @@ static int bulk_receive(struct vchiq_mmal_instance *instance,
struct mmal_msg_context *msg_context)
{
unsigned long rd_len;
- int ret;
rd_len = msg->u.buffer_from_host.buffer_header.length;
@@ -287,50 +332,13 @@ static int bulk_receive(struct vchiq_mmal_instance *instance,
/* store length */
msg_context->u.bulk.buffer_used = rd_len;
- msg_context->u.bulk.mmal_flags =
- msg->u.buffer_from_host.buffer_header.flags;
msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
- /* queue the bulk submission */
- vchi_service_use(instance->handle);
- ret = vchi_bulk_queue_receive(instance->handle,
- msg_context->u.bulk.buffer->buffer,
- /* Actual receive needs to be a multiple
- * of 4 bytes
- */
- (rd_len + 3) & ~3,
- VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
- VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
- msg_context);
-
- vchi_service_release(instance->handle);
-
- return ret;
-}
-
-/* enque a dummy bulk receive for a given message context */
-static int dummy_bulk_receive(struct vchiq_mmal_instance *instance,
- struct mmal_msg_context *msg_context)
-{
- int ret;
-
- /* zero length indicates this was a dummy transfer */
- msg_context->u.bulk.buffer_used = 0;
-
- /* queue the bulk submission */
- vchi_service_use(instance->handle);
-
- ret = vchi_bulk_queue_receive(instance->handle,
- instance->bulk_scratch,
- 8,
- VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
- VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
- msg_context);
+ queue_work(msg_context->instance->bulk_wq,
+ &msg_context->u.bulk.buffer_to_host_work);
- vchi_service_release(instance->handle);
-
- return ret;
+ return 0;
}
/* data in message, memcpy from packet into output buffer */
@@ -378,6 +386,10 @@ buffer_from_host(struct vchiq_mmal_instance *instance,
/* initialise work structure ready to schedule callback */
INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
+ INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
+ buffer_to_host_work_cb);
+
+ atomic_inc(&port->buffers_with_vpu);
/* prep the buffer from host message */
memset(&m, 0xbc, sizeof(m)); /* just to make debug clearer */
@@ -447,6 +459,9 @@ static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
return;
}
+ msg_context->u.bulk.mmal_flags =
+ msg->u.buffer_from_host.buffer_header.flags;
+
if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
/* message reception had an error */
pr_warn("error %d in reply\n", msg->h.status);
@@ -458,7 +473,7 @@ static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
if (msg->u.buffer_from_host.buffer_header.flags &
MMAL_BUFFER_HEADER_FLAG_EOS) {
msg_context->u.bulk.status =
- dummy_bulk_receive(instance, msg_context);
+ bulk_receive(instance, msg, msg_context);
if (msg_context->u.bulk.status == 0)
return; /* successful bulk submission, bulk
* completion will trigger callback
@@ -635,7 +650,7 @@ static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
if (payload_len >
(MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
pr_err("payload length %d exceeds max:%d\n", payload_len,
- (int)(MMAL_MSG_MAX_SIZE -
+ (int)(MMAL_MSG_MAX_SIZE -
sizeof(struct mmal_msg_header)));
return -EINVAL;
}
@@ -838,9 +853,9 @@ static int port_info_get(struct vchiq_mmal_instance *instance,
goto release_msg;
if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
- port->enabled = false;
+ port->enabled = 0;
else
- port->enabled = true;
+ port->enabled = 1;
/* copy the values out of the message */
port->handle = rmsg->u.port_info_get_reply.port_handle;
@@ -1252,9 +1267,10 @@ static int port_parameter_get(struct vchiq_mmal_instance *instance,
memcpy(value, &rmsg->u.port_parameter_get_reply.value,
*value_size);
*value_size = rmsg->u.port_parameter_get_reply.size;
- } else
+ } else {
memcpy(value, &rmsg->u.port_parameter_get_reply.value,
rmsg->u.port_parameter_get_reply.size);
+ }
pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
ret, port->component->handle, port->handle, parameter_id);
@@ -1276,7 +1292,7 @@ static int port_disable(struct vchiq_mmal_instance *instance,
if (!port->enabled)
return 0;
- port->enabled = false;
+ port->enabled = 0;
ret = port_action_port(instance, port,
MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
@@ -1323,22 +1339,12 @@ static int port_enable(struct vchiq_mmal_instance *instance,
if (port->enabled)
return 0;
- /* ensure there are enough buffers queued to cover the buffer headers */
- if (port->buffer_cb) {
- hdr_count = 0;
- list_for_each(buf_head, &port->buffers) {
- hdr_count++;
- }
- if (hdr_count < port->current_buffer.num)
- return -ENOSPC;
- }
-
ret = port_action_port(instance, port,
MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
if (ret)
goto done;
- port->enabled = true;
+ port->enabled = 1;
if (port->buffer_cb) {
/* send buffer headers to videocore */
@@ -1505,7 +1511,7 @@ int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
pr_err("failed disconnecting src port\n");
goto release_unlock;
}
- src->connected->enabled = false;
+ src->connected->enabled = 0;
src->connected = NULL;
}
@@ -1752,7 +1758,7 @@ int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
ret = disable_component(instance, component);
if (ret == 0)
- component->enabled = false;
+ component->enabled = 0;
mutex_unlock(&instance->vchiq_mutex);
@@ -1792,6 +1798,9 @@ int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
mutex_unlock(&instance->vchiq_mutex);
+ flush_workqueue(instance->bulk_wq);
+ destroy_workqueue(instance->bulk_wq);
+
vfree(instance->bulk_scratch);
idr_destroy(&instance->context_map);
@@ -1849,11 +1858,16 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
instance->bulk_scratch = vmalloc(PAGE_SIZE);
- spin_lock_init(&instance->context_map_lock);
+ mutex_init(&instance->context_map_lock);
idr_init_base(&instance->context_map, 1);
params.callback_param = instance;
+ instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq",
+ WQ_MEM_RECLAIM);
+ if (!instance->bulk_wq)
+ goto err_free;
+
status = vchi_service_open(vchi_instance, &params, &instance->handle);
if (status) {
pr_err("Failed to open VCHI service connection (status=%d)\n",
@@ -1868,8 +1882,9 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
return 0;
err_close_services:
-
vchi_service_close(instance->handle);
+ destroy_workqueue(instance->bulk_wq);
+err_free:
vfree(instance->bulk_scratch);
kfree(instance);
return -ENODEV;