struct ufs_buffer_head * _ubh_bread_ (struct ufs_sb_private_info * uspi, struct super_block *sb, u64 fragment, u64 size)
{ struct ufs_buffer_head * ubh; unsigned i, j ;
u64 count = 0; if (size & ~uspi->s_fmask) return NULL;
count = size >> uspi->s_fshift; if (count > UFS_MAXFRAG) return NULL;
ubh = kmalloc (sizeof (struct ufs_buffer_head), GFP_NOFS); if (!ubh) return NULL;
ubh->fragment = fragment;
ubh->count = count; for (i = 0; i < count; i++) if (!(ubh->bh[i] = sb_bread(sb, fragment + i))) goto failed; for (; i < UFS_MAXFRAG; i++)
ubh->bh[i] = NULL; return ubh;
failed: for (j = 0; j < i; j++)
brelse (ubh->bh[j]);
kfree(ubh); return NULL;
}
struct ufs_buffer_head * ubh_bread_uspi (struct ufs_sb_private_info * uspi, struct super_block *sb, u64 fragment, u64 size)
{ unsigned i, j;
u64 count = 0; if (size & ~uspi->s_fmask) return NULL;
count = size >> uspi->s_fshift; if (count <= 0 || count > UFS_MAXFRAG) return NULL;
USPI_UBH(uspi)->fragment = fragment;
USPI_UBH(uspi)->count = count; for (i = 0; i < count; i++) if (!(USPI_UBH(uspi)->bh[i] = sb_bread(sb, fragment + i))) goto failed; for (; i < UFS_MAXFRAG; i++)
USPI_UBH(uspi)->bh[i] = NULL; return USPI_UBH(uspi);
failed: for (j = 0; j < i; j++)
brelse (USPI_UBH(uspi)->bh[j]); return NULL;
}
void ubh_brelse (struct ufs_buffer_head * ubh)
{ unsigned i; if (!ubh) return; for (i = 0; i < ubh->count; i++)
brelse (ubh->bh[i]);
kfree (ubh);
}
void ubh_brelse_uspi (struct ufs_sb_private_info * uspi)
{ unsigned i; if (!USPI_UBH(uspi)) return; for ( i = 0; i < USPI_UBH(uspi)->count; i++ ) {
brelse (USPI_UBH(uspi)->bh[i]);
USPI_UBH(uspi)->bh[i] = NULL;
}
}
void ubh_mark_buffer_dirty (struct ufs_buffer_head * ubh)
{ unsigned i; if (!ubh) return; for ( i = 0; i < ubh->count; i++ )
mark_buffer_dirty (ubh->bh[i]);
}
void ubh_sync_block(struct ufs_buffer_head *ubh)
{ if (ubh) { unsigned i;
for (i = 0; i < ubh->count; i++)
write_dirty_buffer(ubh->bh[i], 0);
for (i = 0; i < ubh->count; i++)
wait_on_buffer(ubh->bh[i]);
}
}
void ubh_bforget (struct ufs_buffer_head * ubh)
{ unsigned i; if (!ubh) return; for ( i = 0; i < ubh->count; i++ ) if ( ubh->bh[i] )
bforget (ubh->bh[i]);
}
int ubh_buffer_dirty (struct ufs_buffer_head * ubh)
{ unsigned i; unsigned result = 0; if (!ubh) return 0; for ( i = 0; i < ubh->count; i++ )
result |= buffer_dirty(ubh->bh[i]); return result;
}
/** * ufs_get_locked_folio() - locate, pin and lock a pagecache folio, if not exist * read it from disk. * @mapping: the address_space to search * @index: the page index * * Locates the desired pagecache folio, if not exist we'll read it, * locks it, increments its reference * count and returns its address. *
*/ struct folio *ufs_get_locked_folio(struct address_space *mapping,
pgoff_t index)
{ struct inode *inode = mapping->host; struct folio *folio = filemap_lock_folio(mapping, index); if (IS_ERR(folio)) {
folio = read_mapping_folio(mapping, index, NULL);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.