/* * For io context allocations
*/ staticstruct kmem_cache *iocontext_cachep;
#ifdef CONFIG_BLK_ICQ /** * get_io_context - increment reference count to io_context * @ioc: io_context to get * * Increment reference count to @ioc.
*/ staticvoid get_io_context(struct io_context *ioc)
{
BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
atomic_long_inc(&ioc->refcount);
}
/* * Exit an icq. Called with ioc locked for blk-mq, and with both ioc * and queue locked for legacy.
*/ staticvoid ioc_exit_icq(struct io_cq *icq)
{ struct elevator_type *et = icq->q->elevator->type;
/* * Release an icq. Called with ioc locked for blk-mq, and with both ioc * and queue locked for legacy.
*/ staticvoid ioc_destroy_icq(struct io_cq *icq)
{ struct io_context *ioc = icq->ioc; struct request_queue *q = icq->q; struct elevator_type *et = q->elevator->type;
/* * Both setting lookup hint to and clearing it from @icq are done * under queue_lock. If it's not pointing to @icq now, it never * will. Hint assignment itself can race safely.
*/ if (rcu_access_pointer(ioc->icq_hint) == icq)
rcu_assign_pointer(ioc->icq_hint, NULL);
ioc_exit_icq(icq);
/* * @icq->q might have gone away by the time RCU callback runs * making it impossible to determine icq_cache. Record it in @icq.
*/
icq->__rcu_icq_cache = et->icq_cache;
icq->flags |= ICQ_DESTROYED;
kfree_rcu(icq, __rcu_head);
}
/* * Slow path for ioc release in put_io_context(). Performs double-lock * dancing to unlink all icq's and then frees ioc.
*/ staticvoid ioc_release_fn(struct work_struct *work)
{ struct io_context *ioc = container_of(work, struct io_context,
release_work);
spin_lock_irq(&ioc->lock);
if (spin_trylock(&q->queue_lock)) {
ioc_destroy_icq(icq);
spin_unlock(&q->queue_lock);
} else { /* Make sure q and icq cannot be freed. */
rcu_read_lock();
/* Re-acquire the locks in the correct order. */
spin_unlock(&ioc->lock);
spin_lock(&q->queue_lock);
spin_lock(&ioc->lock);
/* * Releasing icqs requires reverse order double locking and we may already be * holding a queue_lock. Do it asynchronously from a workqueue.
*/ staticbool ioc_delay_free(struct io_context *ioc)
{ unsignedlong flags;
/** * ioc_clear_queue - break any ioc association with the specified queue * @q: request_queue being cleared * * Walk @q->icq_list and exit all io_cq's.
*/ void ioc_clear_queue(struct request_queue *q)
{
spin_lock_irq(&q->queue_lock); while (!list_empty(&q->icq_list)) { struct io_cq *icq =
list_first_entry(&q->icq_list, struct io_cq, q_node);
/* * Other context won't hold ioc lock to wait for queue_lock, see * details in ioc_release_fn().
*/
spin_lock(&icq->ioc->lock);
ioc_destroy_icq(icq);
spin_unlock(&icq->ioc->lock);
}
spin_unlock_irq(&q->queue_lock);
} #else/* CONFIG_BLK_ICQ */ staticinlinevoid ioc_exit_icqs(struct io_context *ioc)
{
} staticinlinebool ioc_delay_free(struct io_context *ioc)
{ returnfalse;
} #endif/* CONFIG_BLK_ICQ */
/** * put_io_context - put a reference of io_context * @ioc: io_context to put * * Decrement reference count of @ioc and release it if the count reaches * zero.
*/ void put_io_context(struct io_context *ioc)
{
BUG_ON(atomic_long_read(&ioc->refcount) <= 0); if (atomic_long_dec_and_test(&ioc->refcount) && !ioc_delay_free(ioc))
kmem_cache_free(iocontext_cachep, ioc);
}
EXPORT_SYMBOL_GPL(put_io_context);
/* Called by the exiting task */ void exit_io_context(struct task_struct *task)
{ struct io_context *ioc;
task_lock(task);
ioc = task->io_context;
task->io_context = NULL;
task_unlock(task);
if (atomic_dec_and_test(&ioc->active_ref)) {
ioc_exit_icqs(ioc);
put_io_context(ioc);
}
}
staticstruct io_context *alloc_io_context(gfp_t gfp_flags, int node)
{ struct io_context *ioc;
ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
node); if (unlikely(!ioc)) return NULL;
/* * Share io context with parent, if CLONE_IO is set
*/ if (clone_flags & CLONE_IO) {
atomic_inc(&ioc->active_ref);
tsk->io_context = ioc;
} elseif (ioprio_valid(ioc->ioprio)) {
tsk->io_context = alloc_io_context(GFP_KERNEL, NUMA_NO_NODE); if (!tsk->io_context) return -ENOMEM;
tsk->io_context->ioprio = ioc->ioprio;
}
return 0;
}
#ifdef CONFIG_BLK_ICQ /** * ioc_lookup_icq - lookup io_cq from ioc in io issue path * @q: the associated request_queue * * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called * from io issue path, either return NULL if current issue io to @q for the * first time, or return a valid icq.
*/ struct io_cq *ioc_lookup_icq(struct request_queue *q)
{ struct io_context *ioc = current->io_context; struct io_cq *icq;
/* * icq's are indexed from @ioc using radix tree and hint pointer, * both of which are protected with RCU, io issue path ensures that * both request_queue and current task are valid, the found icq * is guaranteed to be valid until the io is done.
*/
rcu_read_lock();
icq = rcu_dereference(ioc->icq_hint); if (icq && icq->q == q) goto out;
/** * ioc_create_icq - create and link io_cq * @q: request_queue of interest * * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they * will be created using @gfp_mask. * * The caller is responsible for ensuring @ioc won't go away and @q is * alive and will stay alive until this function returns.
*/ staticstruct io_cq *ioc_create_icq(struct request_queue *q)
{ struct io_context *ioc = current->io_context; struct elevator_type *et = q->elevator->type; struct io_cq *icq;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.