// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*/
/** * wake_up_glock - Wake up waiters on a glock * @gl: the glock
*/ staticvoid wake_up_glock(struct gfs2_glock *gl)
{
wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);
if (waitqueue_active(wq))
__wake_up(wq, TASK_NORMAL, 1, &gl->gl_name);
}
/** * glock_blocked_by_withdraw - determine if we can still use a glock * @gl: the glock * * We need to allow some glocks to be enqueued, dequeued, promoted, and demoted * when we're withdrawn. For example, to maintain metadata integrity, we should * disallow the use of inode and rgrp glocks when withdrawn. Other glocks like * the iopen or freeze glock may be safely used because none of their * metadata goes through the journal. So in general, we should disallow all * glocks that are journaled, and allow all the others. One exception is: * we need to allow our active journal to be promoted and demoted so others * may recover it and we can reacquire it when they're done.
*/ staticbool glock_blocked_by_withdraw(struct gfs2_glock *gl)
{ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
if (!gfs2_withdrawing_or_withdrawn(sdp)) returnfalse; if (gl->gl_ops->go_flags & GLOF_NONDISK) returnfalse; if (!sdp->sd_jdesc ||
gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr) returnfalse; returntrue;
}
/* * Enqueue the glock on the work queue. Passes one glock reference on to the * work queue.
*/ staticvoid gfs2_glock_queue_work(struct gfs2_glock *gl, unsignedlong delay) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
if (!queue_delayed_work(sdp->sd_glock_wq, &gl->gl_work, delay)) { /* * We are holding the lockref spinlock, and the work was still * queued above. The queued work (glock_work_func) takes that * spinlock before dropping its glock reference(s), so it * cannot have dropped them in the meantime.
*/
GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2);
gl->gl_lockref.count--;
}
}
/** * gfs2_glock_put() - Decrement reference count on glock * @gl: The glock to put *
*/
void gfs2_glock_put(struct gfs2_glock *gl)
{ if (__gfs2_glock_put_or_lock(gl)) return;
__gfs2_glock_put(gl);
}
/* * gfs2_glock_put_async - Decrement reference count without sleeping * @gl: The glock to put * * Decrement the reference count on glock immediately unless it is the last * reference. Defer putting the last reference to work queue context.
*/ void gfs2_glock_put_async(struct gfs2_glock *gl)
{ if (__gfs2_glock_put_or_lock(gl)) return;
/** * may_grant - check if it's ok to grant a new lock * @gl: The glock * @current_gh: One of the current holders of @gl * @gh: The lock request which we wish to grant * * With our current compatibility rules, if a glock has one or more active * holders (HIF_HOLDER flag set), any of those holders can be passed in as * @current_gh; they are all the same as far as compatibility with the new @gh * goes. * * Returns true if it's ok to grant the lock.
*/
switch(current_gh->gh_state) { case LM_ST_EXCLUSIVE: /* * Here we make a special exception to grant holders * who agree to share the EX lock with other holders * who also have the bit set. If the original holder * has the LM_FLAG_NODE_SCOPE bit set, we grant more * holders with the bit set.
*/ return gh->gh_state == LM_ST_EXCLUSIVE &&
(current_gh->gh_flags & LM_FLAG_NODE_SCOPE) &&
(gh->gh_flags & LM_FLAG_NODE_SCOPE);
case LM_ST_SHARED: case LM_ST_DEFERRED: return gh->gh_state == current_gh->gh_state;
default: returnfalse;
}
}
if (gl->gl_state == gh->gh_state) returntrue; if (gh->gh_flags & GL_EXACT) returnfalse; if (gl->gl_state == LM_ST_EXCLUSIVE) { return gh->gh_state == LM_ST_SHARED ||
gh->gh_state == LM_ST_DEFERRED;
} if (gh->gh_flags & LM_FLAG_ANY) return gl->gl_state != LM_ST_UNLOCKED; returnfalse;
}
if (!list_empty(&gl->gl_holders)) {
gh = list_first_entry(&gl->gl_holders, struct gfs2_holder,
gh_list); if (test_bit(HIF_HOLDER, &gh->gh_iflags)) return gh;
} return NULL;
}
/* * gfs2_instantiate - Call the glops instantiate function * @gh: The glock holder * * Returns: 0 if instantiate was successful, or error.
*/ int gfs2_instantiate(struct gfs2_holder *gh)
{ struct gfs2_glock *gl = gh->gh_gl; conststruct gfs2_glock_operations *glops = gl->gl_ops; int ret;
again: if (!test_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags)) goto done;
/* * Since we unlock the lockref lock, we set a flag to indicate * instantiate is in progress.
*/ if (test_and_set_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags)) {
wait_on_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG,
TASK_UNINTERRUPTIBLE); /* * Here we just waited for a different instantiate to finish. * But that may not have been successful, as when a process * locks an inode glock _before_ it has an actual inode to * instantiate into. So we check again. This process might * have an inode to instantiate, so might be successful.
*/ goto again;
}
ret = glops->go_instantiate(gl); if (!ret)
clear_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
clear_and_wake_up_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags); if (ret) return ret;
done: if (glops->go_held) return glops->go_held(gh); return 0;
}
/** * do_promote - promote as many requests as possible on the current queue * @gl: The glock * * Returns true on success (i.e., progress was made or there are no waiters).
*/
current_gh = find_first_holder(gl);
list_for_each_entry(gh, &gl->gl_holders, gh_list) { if (test_bit(HIF_HOLDER, &gh->gh_iflags)) continue; if (!may_grant(gl, current_gh, gh)) { /* * If we get here, it means we may not grant this * holder for some reason. If this holder is at the * head of the list, it means we have a blocked holder * at the head, so return false.
*/ if (list_is_first(&gh->gh_list, &gl->gl_holders)) returnfalse;
do_error(gl, 0); /* Fail queued try locks */ break;
}
set_bit(HIF_HOLDER, &gh->gh_iflags);
trace_gfs2_promote(gh);
gfs2_holder_wake(gh); if (!current_gh)
current_gh = gh;
} returntrue;
}
/** * find_first_waiter - find the first gh that's waiting for the glock * @gl: the glock
*/
/** * find_last_waiter - find the last gh that's waiting for the glock * @gl: the glock * * This also is a fast way of finding out if there are any waiters.
*/
/* Demote to UN request arrived during demote to SH or DF */ if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
gl->gl_state != LM_ST_UNLOCKED &&
gl->gl_demote_state == LM_ST_UNLOCKED)
gl->gl_target = LM_ST_UNLOCKED;
/* Check for state != intended state */ if (unlikely(gl->gl_state != gl->gl_target)) { struct gfs2_holder *gh = find_first_waiter(gl);
if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { if (ret & LM_OUT_CANCELED) {
list_del_init(&gh->gh_list);
trace_gfs2_glock_queue(gh, 0);
gfs2_holder_wake(gh);
gl->gl_target = gl->gl_state; goto out;
} /* Some error or failed "try lock" - report it */ if ((ret & LM_OUT_ERROR) ||
(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
gl->gl_target = gl->gl_state;
do_error(gl, ret); goto out;
}
} switch(gl->gl_state) { /* Unlocked due to conversion deadlock, try again */ case LM_ST_UNLOCKED:
do_xmote(gl, gh, gl->gl_target); break; /* Conversion fails, unlock and try again */ case LM_ST_SHARED: case LM_ST_DEFERRED:
do_xmote(gl, gh, LM_ST_UNLOCKED); break; default: /* Everything else */
fs_err(gl->gl_name.ln_sbd, "glock %u:%llu requested=%u ret=%u\n",
gl->gl_name.ln_type, gl->gl_name.ln_number,
gl->gl_req, ret);
GLOCK_BUG_ON(gl, 1);
} return;
}
/* Fast path - we got what we asked for */ if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
gfs2_demote_wake(gl); if (gl->gl_state != LM_ST_UNLOCKED) { if (glops->go_xmote_bh) { int rv;
/** * do_xmote - Calls the DLM to change the state of a lock * @gl: The lock state * @gh: The holder (only for promotes) * @target: The target lock state *
*/
spin_unlock(&gl->gl_lockref.lock);
ret = glops->go_sync(gl); /* If we had a problem syncing (due to io errors or whatever, * we should not invalidate the metadata or tell dlm to * release the glock to other nodes.
*/ if (ret) { if (cmpxchg(&sdp->sd_log_error, 0, ret)) {
fs_err(sdp, "Error %d syncing glock\n", ret);
gfs2_dump_glock(NULL, gl, true);
}
spin_lock(&gl->gl_lockref.lock); goto skip_inval;
}
if (target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) { /* * The call to go_sync should have cleared out the ail list. * If there are still items, we have a problem. We ought to * withdraw, but we can't because the withdraw code also uses * glocks. Warn about the error, dump the glock, then fall * through and wait for logd to do the withdraw for us.
*/ if ((atomic_read(&gl->gl_ail_count) != 0) &&
(!cmpxchg(&sdp->sd_log_error, 0, -EIO))) {
gfs2_glock_assert_warn(gl,
!atomic_read(&gl->gl_ail_count));
gfs2_dump_glock(NULL, gl, true);
}
glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
}
spin_lock(&gl->gl_lockref.lock);
skip_inval:
gl->gl_lockref.count++; /* * Check for an error encountered since we called go_sync and go_inval. * If so, we can't withdraw from the glock code because the withdraw * code itself uses glocks (see function signal_our_withdraw) to * change the mount to read-only. Most importantly, we must not call * dlm to unlock the glock until the journal is in a known good state * (after journal replay) otherwise other nodes may use the object * (rgrp or dinode) and then later, journal replay will corrupt the * file system. The best we can do here is wait for the logd daemon * to see sd_log_error and withdraw, and in the meantime, requeue the * work for later. * * We make a special exception for some system glocks, such as the * system statfs inode glock, which needs to be granted before the * gfs2_quotad daemon can exit, and that exit needs to finish before * we can unmount the withdrawn file system. * * However, if we're just unlocking the lock (say, for unmount, when * gfs2_gl_hash_clear calls clear_glock) and recovery is complete * then it's okay to tell dlm to unlock it.
*/ if (unlikely(sdp->sd_log_error) && !gfs2_withdrawing_or_withdrawn(sdp))
gfs2_withdraw_delayed(sdp); if (glock_blocked_by_withdraw(gl) &&
(target != LM_ST_UNLOCKED ||
test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) { if (!is_system_glock(gl)) {
request_demote(gl, LM_ST_UNLOCKED, 0, false); /* * Ordinarily, we would call dlm and its callback would call * finish_xmote, which would call state_change() to the new state. * Since we withdrew, we won't call dlm, so call state_change * manually, but to the UNLOCKED state we desire.
*/
state_change(gl, LM_ST_UNLOCKED); /* * We skip telling dlm to do the locking, so we won't get a * reply that would otherwise clear GLF_LOCK. So we clear it here.
*/ if (!test_bit(GLF_CANCELING, &gl->gl_flags))
clear_bit(GLF_LOCK, &gl->gl_flags);
clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD); return;
}
}
if (ls->ls_ops->lm_lock) {
set_bit(GLF_PENDING_REPLY, &gl->gl_flags);
spin_unlock(&gl->gl_lockref.lock);
ret = ls->ls_ops->lm_lock(gl, target, lck_flags);
spin_lock(&gl->gl_lockref.lock);
if (!ret) { /* The operation will be completed asynchronously. */ return;
}
clear_bit(GLF_PENDING_REPLY, &gl->gl_flags);
if (ret == -ENODEV && gl->gl_target == LM_ST_UNLOCKED &&
target == LM_ST_UNLOCKED) { /* * The lockspace has been released and the lock has * been unlocked implicitly.
*/
} else {
fs_err(sdp, "lm_lock ret %d\n", ret);
target = gl->gl_state | LM_OUT_ERROR;
}
}
/** * glock_set_object - set the gl_object field of a glock * @gl: the glock * @object: the object
*/ void glock_set_object(struct gfs2_glock *gl, void *object)
{ void *prev_object;
/** * glock_clear_object - clear the gl_object field of a glock * @gl: the glock * @object: object the glock currently points at
*/ void glock_clear_object(struct gfs2_glock *gl, void *object)
{ void *prev_object;
spin_lock(&gl->gl_lockref.lock);
ip = gl->gl_object; if (ip && !igrab(&ip->i_inode))
ip = NULL;
spin_unlock(&gl->gl_lockref.lock); if (ip) {
wait_on_inode(&ip->i_inode); if (is_bad_inode(&ip->i_inode)) {
iput(&ip->i_inode);
ip = NULL;
}
} return ip;
}
/* * If there is contention on the iopen glock and we have an inode, try * to grab and release the inode so that it can be evicted. The * GIF_DEFER_DELETE flag indicates to gfs2_evict_inode() that the inode * should not be deleted locally. This will allow the remote node to * go ahead and delete the inode without us having to do it, which will * avoid rgrp glock thrashing. * * The remote node is likely still holding the corresponding inode * glock, so it will run before we get to verify that the delete has * happened below. (Verification is triggered by the call to * gfs2_queue_verify_delete() in gfs2_evict_inode().)
*/
ip = gfs2_grab_existing_inode(gl); if (ip) {
set_bit(GLF_DEFER_DELETE, &gl->gl_flags);
d_prune_aliases(&ip->i_inode);
iput(&ip->i_inode);
clear_bit(GLF_DEFER_DELETE, &gl->gl_flags);
/* If the inode was evicted, gl->gl_object will now be NULL. */
ip = gfs2_grab_existing_inode(gl); if (ip) {
gfs2_glock_poke(ip->i_gl);
iput(&ip->i_inode);
}
}
}
if (!delay) {
clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
gfs2_set_demote(GLF_DEMOTE, gl);
}
}
run_queue(gl, 0); if (delay) { /* Keep one glock reference for the work we requeue. */
drop_refs--;
gfs2_glock_queue_work(gl, delay);
}
/* Drop the remaining glock references manually. */
GLOCK_BUG_ON(gl, gl->gl_lockref.count < drop_refs);
gl->gl_lockref.count -= drop_refs; if (!gl->gl_lockref.count) { if (gl->gl_state == LM_ST_UNLOCKED) {
__gfs2_glock_put(gl); return;
}
gfs2_glock_add_to_lru(gl);
}
spin_unlock(&gl->gl_lockref.lock);
}
/** * gfs2_glock_get() - Get a glock, or create one if one doesn't exist * @sdp: The GFS2 superblock * @number: the lock number * @glops: The glock_operations to use * @create: If 0, don't create the glock if it doesn't exist * @glp: the glock is returned here * * This does not lock a glock, just finds/creates structures for one. * * Returns: errno
*/
tmp = find_insert_glock(&name, gl); if (tmp) {
gfs2_glock_dealloc(&gl->gl_rcu); if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_kill_wait);
if (IS_ERR(tmp)) return PTR_ERR(tmp);
gl = tmp;
}
found:
*glp = gl; return 0;
}
/** * __gfs2_holder_init - initialize a struct gfs2_holder in the default way * @gl: the glock * @state: the state we're requesting * @flags: the modifier flags * @gh: the holder structure *
*/
/** * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it * @state: the state we're requesting * @flags: the modifier flags * @gh: the holder structure * * Don't mess with the glock. *
*/
staticvoid gfs2_glock_update_hold_time(struct gfs2_glock *gl, unsignedlong start_time)
{ /* Have we waited longer that a second? */ if (time_after(jiffies, start_time + HZ)) { /* Lengthen the minimum hold time. */
gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR,
GL_GLOCK_MAX_HOLD);
}
}
/** * gfs2_glock_holder_ready - holder is ready and its error code can be collected * @gh: the glock holder * * Called when a glock holder no longer needs to be waited for because it is * now either held (HIF_HOLDER set; gh_error == 0), or acquiring the lock has * failed (gh_error != 0).
*/
int gfs2_glock_holder_ready(struct gfs2_holder *gh)
{ if (gh->gh_error || (gh->gh_flags & GL_SKIP)) return gh->gh_error;
gh->gh_error = gfs2_instantiate(gh); if (gh->gh_error)
gfs2_glock_dq(gh); return gh->gh_error;
}
/** * gfs2_glock_wait - wait on a glock acquisition * @gh: the glock holder * * Returns: 0 on success
*/
int gfs2_glock_wait(struct gfs2_holder *gh)
{ unsignedlong start_time = jiffies;
staticint glocks_pending(unsignedint num_gh, struct gfs2_holder *ghs)
{ int i;
for (i = 0; i < num_gh; i++) if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) return 1; return 0;
}
/** * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions * @num_gh: the number of holders in the array * @ghs: the glock holder array * * Returns: 0 on success, meaning all glocks have been granted and are held. * -ESTALE if the request timed out, meaning all glocks were released, * and the caller should retry the operation.
*/
int gfs2_glock_async_wait(unsignedint num_gh, struct gfs2_holder *ghs)
{ struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd; int i, ret = 0, timeout = 0; unsignedlong start_time = jiffies;
might_sleep(); /* * Total up the (minimum hold time * 2) of all glocks and use that to * determine the max amount of time we should wait.
*/ for (i = 0; i < num_gh; i++)
timeout += ghs[i].gh_gl->gl_hold_time << 1;
if (!wait_event_timeout(sdp->sd_async_glock_wait,
!glocks_pending(num_gh, ghs), timeout)) {
ret = -ESTALE; /* request timed out. */ goto out;
}
for (i = 0; i < num_gh; i++) { struct gfs2_holder *gh = &ghs[i]; int ret2;
if (test_bit(HIF_HOLDER, &gh->gh_iflags)) {
gfs2_glock_update_hold_time(gh->gh_gl,
start_time);
}
ret2 = gfs2_glock_holder_ready(gh); if (!ret)
ret = ret2;
}
out: if (ret) { for (i = 0; i < num_gh; i++) { struct gfs2_holder *gh = &ghs[i];
gfs2_glock_dq(gh);
}
} return ret;
}
/** * request_demote - process a demote request * @gl: the glock * @state: the state the caller wants us to change to * @delay: zero to demote immediately; otherwise pending demote * @remote: true if this came from a different cluster node * * There are only two requests that we are going to see in actual * practise: LM_ST_SHARED and LM_ST_UNLOCKED
*/
/** * add_to_queue - Add a holder to the wait queue (but look for recursion) * @gh: the holder structure to add * * Eventually we should move the recursive locking trap to a * debugging option or something like that. This is the fast * path and needs to have the minimum number of distractions. *
*/
/** * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock) * @gh: the holder structure * * if (gh->gh_flags & GL_ASYNC), this never returns an error * * Returns: 0, GLR_TRYFAILED, or errno on failure
*/
int gfs2_glock_nq(struct gfs2_holder *gh)
{ struct gfs2_glock *gl = gh->gh_gl; int error;
if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP)) return -EIO;
if (gh->gh_flags & GL_NOBLOCK) { struct gfs2_holder *current_gh;
error = 0; if (!(gh->gh_flags & GL_ASYNC))
error = gfs2_glock_wait(gh);
return error;
}
/** * gfs2_glock_poll - poll to see if an async request has been completed * @gh: the holder * * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
*/
/* * This holder should not be cached, so mark it for demote. * Note: this should be done before the glock_needs_demote * check below.
*/ if (gh->gh_flags & GL_NOCACHE)
request_demote(gl, LM_ST_UNLOCKED, 0, false);
/* * If there hasn't been a demote request we are done. * (Let the remaining holders, if any, keep holding it.)
*/ if (!glock_needs_demote(gl)) { if (list_empty(&gl->gl_holders))
fast_path = 1;
}
/** * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock) * @gh: the glock holder *
*/ void gfs2_glock_dq(struct gfs2_holder *gh)
{ struct gfs2_glock *gl = gh->gh_gl; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
spin_lock(&gl->gl_lockref.lock); if (!gfs2_holder_queued(gh)) { /* * May have already been dequeued because the locking request * was GL_ASYNC and it has failed in the meantime.
*/ goto out;
}
/* * If we're in the process of file system withdraw, we cannot just * dequeue any glocks until our journal is recovered, lest we introduce * file system corruption. We need two exceptions to this rule: We need * to allow unlocking of nondisk glocks and the glock for our own * journal that needs recovery.
*/ if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) &&
glock_blocked_by_withdraw(gl) &&
gh->gh_gl != sdp->sd_jinode_gl) {
sdp->sd_glock_dqs_held++;
spin_unlock(&gl->gl_lockref.lock);
might_sleep();
wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
TASK_UNINTERRUPTIBLE);
spin_lock(&gl->gl_lockref.lock);
}
/** * gfs2_glock_nq_num - acquire a glock based on lock number * @sdp: the filesystem * @number: the lock number * @glops: the glock operations for the type of glock * @state: the state to acquire the glock in * @flags: modifier flags for the acquisition * @gh: the struct gfs2_holder * * Returns: errno
*/
if (a->ln_number > b->ln_number) return 1; if (a->ln_number < b->ln_number) return -1;
BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type); return 0;
}
/** * nq_m_sync - synchronously acquire more than one glock in deadlock free order * @num_gh: the number of structures * @ghs: an array of struct gfs2_holder structures * @p: placeholder for the holder structure to pass back * * Returns: 0 on success (all glocks acquired), * errno on failure (no glocks acquired)
*/
for (x = 0; x < num_gh; x++) {
error = gfs2_glock_nq(p[x]); if (error) { while (x--)
gfs2_glock_dq(p[x]); break;
}
}
return error;
}
/** * gfs2_glock_nq_m - acquire multiple glocks * @num_gh: the number of structures * @ghs: an array of struct gfs2_holder structures * * Returns: 0 on success (all glocks acquired), * errno on failure (no glocks acquired)
*/
int gfs2_glock_nq_m(unsignedint num_gh, struct gfs2_holder *ghs)
{ struct gfs2_holder *tmp[4]; struct gfs2_holder **pph = tmp; int error = 0;
switch(num_gh) { case 0: return 0; case 1: return gfs2_glock_nq(ghs); default: if (num_gh <= 4) break;
pph = kmalloc_array(num_gh, sizeof(struct gfs2_holder *),
GFP_NOFS); if (!pph) return -ENOMEM;
}
error = nq_m_sync(num_gh, ghs, pph);
if (pph != tmp)
kfree(pph);
return error;
}
/** * gfs2_glock_dq_m - release multiple glocks * @num_gh: the number of structures * @ghs: an array of struct gfs2_holder structures *
*/
/** * gfs2_should_freeze - Figure out if glock should be frozen * @gl: The glock in question * * Glocks are not frozen if (a) the result of the dlm operation is * an error, (b) the locking operation was an unlock operation or * (c) if there is a "noexp" flagged request anywhere in the queue * * Returns: 1 if freezing should occur, 0 otherwise
*/
if (gl->gl_reply & ~LM_OUT_ST_MASK) return 0; if (gl->gl_target == LM_ST_UNLOCKED) return 0;
list_for_each_entry(gh, &gl->gl_holders, gh_list) { if (test_bit(HIF_HOLDER, &gh->gh_iflags)) continue; if (LM_FLAG_NOEXP & gh->gh_flags) return 0;
}
return 1;
}
/** * gfs2_glock_complete - Callback used by locking * @gl: Pointer to the glock * @ret: The return value from the dlm * * The gl_reply field is under the gl_lockref.lock lock so that it is ok * to use a bitfield shared with other glock state fields.
*/
/** * gfs2_dispose_glock_lru - Demote a list of glocks * @list: The list to dispose of * * Disposing of glocks may involve disk accesses, so that here we sort * the glocks by number (i.e. disk location of the inodes) so that if * there are any such accesses, they'll be sent in order (mostly). * * Must be called under the lru_lock, but may drop and retake this * lock. While the lru_lock is dropped, entries may vanish from the * list, but no new entries will appear on the list (since it is * private)
*/
/** * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote * @nr: The number of entries to scan * * This function selects the entries on the LRU which are able to * be demoted, and then kicks off the process by calling * gfs2_dispose_glock_lru() above.
*/
/** * glock_hash_walk - Call a function for glock in a hash bucket * @examiner: the function * @sdp: the filesystem * * Note that the function can be called multiple times on the same * object. So the user must ensure that the function can cope with * that.
*/
while (!timed_out) {
wait_event_timeout(sdp->sd_kill_wait,
!atomic_read(&sdp->sd_glock_disposal),
HZ * 60); if (!atomic_read(&sdp->sd_glock_disposal)) break;
timed_out = time_after(jiffies, start + (HZ * 600));
fs_warn(sdp, "%u glocks left after %u seconds%s\n",
atomic_read(&sdp->sd_glock_disposal),
jiffies_to_msecs(jiffies - start) / 1000,
timed_out ? ":" : "; still waiting");
}
gfs2_lm_unmount(sdp);
gfs2_free_dead_glocks(sdp);
glock_hash_walk(dump_glock_func, sdp);
destroy_workqueue(sdp->sd_glock_wq);
sdp->sd_glock_wq = NULL;
}
staticconstchar *state2str(unsigned state)
{ switch(state) { case LM_ST_UNLOCKED: return"UN"; case LM_ST_SHARED: return"SH"; case LM_ST_DEFERRED: return"DF"; case LM_ST_EXCLUSIVE: return"EX";
} return"??";
}
staticconstchar *hflags2str(char *buf, u16 flags, unsignedlong iflags)
{ char *p = buf; if (flags & LM_FLAG_TRY)
*p++ = 't'; if (flags & LM_FLAG_TRY_1CB)
*p++ = 'T'; if (flags & LM_FLAG_NOEXP)
*p++ = 'e'; if (flags & LM_FLAG_ANY)
*p++ = 'A'; if (flags & LM_FLAG_NODE_SCOPE)
*p++ = 'n'; if (flags & GL_ASYNC)
*p++ = 'a'; if (flags & GL_EXACT)
*p++ = 'E'; if (flags & GL_NOCACHE)
*p++ = 'c'; if (test_bit(HIF_HOLDER, &iflags))
*p++ = 'H'; if (test_bit(HIF_WAIT, &iflags))
*p++ = 'W'; if (flags & GL_SKIP)
*p++ = 's';
*p = 0; return buf;
}
/** * dump_holder - print information about a glock holder * @seq: the seq_file struct * @gh: the glock holder * @fs_id_buf: pointer to file system id (if requested) *
*/
if (test_bit(GLF_LOCK, gflags))
*p++ = 'l'; if (test_bit(GLF_DEMOTE, gflags))
*p++ = 'D'; if (test_bit(GLF_PENDING_DEMOTE, gflags))
*p++ = 'd'; if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
*p++ = 'p'; if (test_bit(GLF_DIRTY, gflags))
*p++ = 'y'; if (test_bit(GLF_LFLUSH, gflags))
*p++ = 'f'; if (test_bit(GLF_PENDING_REPLY, gflags))
*p++ = 'R'; if (test_bit(GLF_HAVE_REPLY, gflags))
*p++ = 'r'; if (test_bit(GLF_INITIAL, gflags))
*p++ = 'a'; if (test_bit(GLF_HAVE_FROZEN_REPLY, gflags))
*p++ = 'F'; if (!list_empty(&gl->gl_holders))
*p++ = 'q'; if (test_bit(GLF_LRU, gflags))
*p++ = 'L'; if (gl->gl_object)
*p++ = 'o'; if (test_bit(GLF_BLOCKING, gflags))
*p++ = 'b'; if (test_bit(GLF_UNLOCKED, gflags))
*p++ = 'x'; if (test_bit(GLF_INSTANTIATE_NEEDED, gflags))
*p++ = 'n'; if (test_bit(GLF_INSTANTIATE_IN_PROG, gflags))
*p++ = 'N'; if (test_bit(GLF_TRY_TO_EVICT, gflags))
*p++ = 'e'; if (test_bit(GLF_VERIFY_DELETE, gflags))
*p++ = 'E'; if (test_bit(GLF_DEFER_DELETE, gflags))
*p++ = 's'; if (test_bit(GLF_CANCELING, gflags))
*p++ = 'C';
*p = 0; return buf;
}
/** * gfs2_dump_glock - print information about a glock * @seq: The seq_file struct * @gl: the glock * @fsid: If true, also dump the file system id * * The file format is as follows: * One line per object, capital letters are used to indicate objects * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented, * other objects are indented by a single space and follow the glock to * which they are related. Fields are indicated by lower case letters * followed by a colon and the field value, except for strings which are in * [] so that its possible to see if they are composed of spaces for * example. The field's are n = number (id of the object), f = flags, * t = type, s = state, r = refcount, e = error, p = pid. *
*/
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.