summaryrefslogtreecommitdiffstats
path: root/include/qemu/coroutine.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/qemu/coroutine.h')
-rw-r--r--include/qemu/coroutine.h33
1 files changed, 23 insertions, 10 deletions
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
index aae33cce17..89650a2d7f 100644
--- a/include/qemu/coroutine.h
+++ b/include/qemu/coroutine.h
@@ -198,14 +198,25 @@ typedef struct CoQueue {
*/
void qemu_co_queue_init(CoQueue *queue);
+typedef enum {
+ /*
+ * Enqueue at front instead of back. Use this to re-queue a request when
+ * its wait condition is not satisfied after being woken up.
+ */
+ CO_QUEUE_WAIT_FRONT = 0x1,
+} CoQueueWaitFlags;
+
/**
* Adds the current coroutine to the CoQueue and transfers control to the
* caller of the coroutine. The mutex is unlocked during the wait and
* locked again afterwards.
*/
#define qemu_co_queue_wait(queue, lock) \
- qemu_co_queue_wait_impl(queue, QEMU_MAKE_LOCKABLE(lock))
-void coroutine_fn qemu_co_queue_wait_impl(CoQueue *queue, QemuLockable *lock);
+ qemu_co_queue_wait_impl(queue, QEMU_MAKE_LOCKABLE(lock), 0)
+#define qemu_co_queue_wait_flags(queue, lock, flags) \
+ qemu_co_queue_wait_impl(queue, QEMU_MAKE_LOCKABLE(lock), (flags))
+void coroutine_fn qemu_co_queue_wait_impl(CoQueue *queue, QemuLockable *lock,
+ CoQueueWaitFlags flags);
/**
* Removes the next coroutine from the CoQueue, and queue it to run after
@@ -276,7 +287,7 @@ void qemu_co_rwlock_init(CoRwlock *lock);
* of a parallel writer, control is transferred to the caller of the current
* coroutine.
*/
-void qemu_co_rwlock_rdlock(CoRwlock *lock);
+void coroutine_fn qemu_co_rwlock_rdlock(CoRwlock *lock);
/**
* Write Locks the CoRwlock from a reader. This is a bit more efficient than
@@ -285,7 +296,7 @@ void qemu_co_rwlock_rdlock(CoRwlock *lock);
* to the caller of the current coroutine; another writer might run while
* @qemu_co_rwlock_upgrade blocks.
*/
-void qemu_co_rwlock_upgrade(CoRwlock *lock);
+void coroutine_fn qemu_co_rwlock_upgrade(CoRwlock *lock);
/**
* Downgrades a write-side critical section to a reader. Downgrading with
@@ -293,20 +304,20 @@ void qemu_co_rwlock_upgrade(CoRwlock *lock);
* followed by @qemu_co_rwlock_rdlock. This makes it more efficient, but
* may also sometimes be necessary for correctness.
*/
-void qemu_co_rwlock_downgrade(CoRwlock *lock);
+void coroutine_fn qemu_co_rwlock_downgrade(CoRwlock *lock);
/**
* Write Locks the mutex. If the lock cannot be taken immediately because
* of a parallel reader, control is transferred to the caller of the current
* coroutine.
*/
-void qemu_co_rwlock_wrlock(CoRwlock *lock);
+void coroutine_fn qemu_co_rwlock_wrlock(CoRwlock *lock);
/**
* Unlocks the read/write lock and schedules the next coroutine that was
* waiting for this lock to be run.
*/
-void qemu_co_rwlock_unlock(CoRwlock *lock);
+void coroutine_fn qemu_co_rwlock_unlock(CoRwlock *lock);
typedef struct QemuCoSleep {
Coroutine *to_wake;
@@ -378,8 +389,9 @@ void qemu_coroutine_dec_pool_size(unsigned int additional_pool_size);
* The same interface as qemu_sendv_recvv(), with added yielding.
* XXX should mark these as coroutine_fn
*/
-ssize_t qemu_co_sendv_recvv(int sockfd, struct iovec *iov, unsigned iov_cnt,
- size_t offset, size_t bytes, bool do_send);
+ssize_t coroutine_fn qemu_co_sendv_recvv(int sockfd, struct iovec *iov,
+ unsigned iov_cnt, size_t offset,
+ size_t bytes, bool do_send);
#define qemu_co_recvv(sockfd, iov, iov_cnt, offset, bytes) \
qemu_co_sendv_recvv(sockfd, iov, iov_cnt, offset, bytes, false)
#define qemu_co_sendv(sockfd, iov, iov_cnt, offset, bytes) \
@@ -388,7 +400,8 @@ ssize_t qemu_co_sendv_recvv(int sockfd, struct iovec *iov, unsigned iov_cnt,
/**
* The same as above, but with just a single buffer
*/
-ssize_t qemu_co_send_recv(int sockfd, void *buf, size_t bytes, bool do_send);
+ssize_t coroutine_fn qemu_co_send_recv(int sockfd, void *buf, size_t bytes,
+ bool do_send);
#define qemu_co_recv(sockfd, buf, bytes) \
qemu_co_send_recv(sockfd, buf, bytes, false)
#define qemu_co_send(sockfd, buf, bytes) \