/** * nd_is_uuid_unique - verify that no other namespace has @uuid * @dev: any device on a nvdimm_bus * @uuid: uuid to check * * Returns: %true if the uuid is unique, %false if not
*/ bool nd_is_uuid_unique(struct device *dev, uuid_t *uuid)
{ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
if (!nvdimm_bus) returnfalse;
WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev)); if (device_for_each_child(&nvdimm_bus->dev, uuid,
is_namespace_uuid_busy) != 0) returnfalse; returntrue;
}
/* * There is no namespace label (is_namespace_io()), or the label * indicates the default sector size.
*/ return 512;
}
EXPORT_SYMBOL(pmem_sector_size);
staticint nd_namespace_label_update(struct nd_region *nd_region, struct device *dev)
{
dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim, "namespace must be idle during label update\n"); if (dev->driver || to_ndns(dev)->claim) return 0;
/* * Only allow label writes that will result in a valid namespace * or deletion of an existing namespace.
*/ if (is_namespace_pmem(dev)) { struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
resource_size_t size = resource_size(&nspm->nsio.res);
/** * shrink_dpa_allocation - for each dimm in region free n bytes for label_id * @nd_region: the set of dimms to reclaim @n bytes from * @label_id: unique identifier for the namespace consuming this dpa range * @n: number of bytes per-dimm to release * * Assumes resources are ordered. Starting from the end try to * adjust_resource() the allocation to @n, but if @n is larger than the * allocation delete it and find the 'new' last allocation in the label * set. * * Returns: %0 on success on -errno on error
*/ staticint shrink_dpa_allocation(struct nd_region *nd_region, struct nd_label_id *label_id, resource_size_t n)
{ int i;
for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; int rc;
/** * space_valid() - validate free dpa space against constraints * @nd_region: hosting region of the free space * @ndd: dimm device data for debug * @label_id: namespace id to allocate space * @prev: potential allocation that precedes free space * @next: allocation that follows the given free space range * @exist: first allocation with same id in the mapping * @n: range that must satisfied for pmem allocations * @valid: free space range to validate * * BLK-space is valid as long as it does not precede a PMEM * allocation in a given region. PMEM-space must be contiguous * and adjacent to an existing allocation (if one * exists). If reserving PMEM any space is valid.
*/ staticvoid space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd, struct nd_label_id *label_id, struct resource *prev, struct resource *next, struct resource *exist,
resource_size_t n, struct resource *valid)
{ bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0; unsignedlong align;
/* ignore resources outside this nd_mapping */ if (res->start > mapping_end) continue; if (res->end < nd_mapping->start) continue;
/* space at the beginning of the mapping */ if (!first++ && res->start > nd_mapping->start) {
valid.start = nd_mapping->start;
valid.end = res->start - 1;
space_valid(nd_region, ndd, label_id, NULL, next, exist,
to_allocate, &valid);
available = resource_size(&valid); if (available)
loc = ALLOC_BEFORE;
}
/* space between allocations */ if (!loc && next) {
valid.start = res->start + resource_size(res);
valid.end = min(mapping_end, next->start - 1);
space_valid(nd_region, ndd, label_id, res, next, exist,
to_allocate, &valid);
available = resource_size(&valid); if (available)
loc = ALLOC_MID;
}
/* space at the end of the mapping */ if (!loc && !next) {
valid.start = res->start + resource_size(res);
valid.end = mapping_end;
space_valid(nd_region, ndd, label_id, res, next, exist,
to_allocate, &valid);
available = resource_size(&valid); if (available)
loc = ALLOC_AFTER;
}
n -= allocate; if (n) { /* * Retry scan with newly inserted resources. * For example, if we did an ALLOC_BEFORE * insertion there may also have been space * available for an ALLOC_AFTER insertion, so we * need to check this same resource again
*/ goto retry;
} else return 0;
}
if (strncmp("pmem", label_id->id, 4) == 0) return 0;
retry:
for_each_dpa_resource(ndd, res) { int rc; struct resource *next = res->sibling;
resource_size_t end = res->start + resource_size(res);
if (!next || strcmp(res->name, label_id->id) != 0
|| strcmp(next->name, label_id->id) != 0
|| end != next->start) continue;
end += resource_size(next);
nvdimm_free_dpa(ndd, next);
rc = adjust_resource(res, res->start, end - res->start);
nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc); if (rc) return rc;
res->flags |= DPA_RESOURCE_ADJUSTED; goto retry;
}
return 0;
}
int __reserve_free_pmem(struct device *dev, void *data)
{ struct nvdimm *nvdimm = data; struct nd_region *nd_region; struct nd_label_id label_id; int i;
if (!is_memory(dev)) return 0;
nd_region = to_nd_region(dev); if (nd_region->ndr_mappings == 0) return 0;
memset(&label_id, 0, sizeof(label_id));
strcat(label_id.id, "pmem-reserve"); for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i];
resource_size_t n, rem = 0;
if (nd_mapping->nvdimm != nvdimm) continue;
n = nd_pmem_available_dpa(nd_region, nd_mapping); if (n == 0) return 0;
rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
dev_WARN_ONCE(&nd_region->dev, rem, "pmem reserve underrun: %#llx of %#llx bytes\n",
(unsignedlonglong) n - rem,
(unsignedlonglong) n); return rem ? -ENXIO : 0;
}
/** * grow_dpa_allocation - for each dimm allocate n bytes for @label_id * @nd_region: the set of dimms to allocate @n more bytes from * @label_id: unique identifier for the namespace consuming this dpa range * @n: number of bytes per-dimm to add to the existing allocation * * Assumes resources are ordered. For BLK regions, first consume * BLK-only available DPA free space, then consume PMEM-aliased DPA * space starting at the highest DPA. For PMEM regions start * allocations from the start of an interleave set and end at the first * BLK allocation or the end of the interleave set, whichever comes * first. * * Returns: %0 on success on -errno on error
*/ staticint grow_dpa_allocation(struct nd_region *nd_region, struct nd_label_id *label_id, resource_size_t n)
{ int i;
for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i];
resource_size_t rem = n; int rc;
rem = scan_allocate(nd_region, nd_mapping, label_id, rem);
dev_WARN_ONCE(&nd_region->dev, rem, "allocation underrun: %#llx of %#llx bytes\n",
(unsignedlonglong) n - rem,
(unsignedlonglong) n); if (rem) return -ENXIO;
rc = merge_dpa(nd_region, nd_mapping, label_id); if (rc) return rc;
}
if (is_namespace_pmem(dev)) { struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
uuid = nspm->uuid;
id = nspm->id;
}
/* * We need a uuid for the allocation-label and dimm(s) on which * to store the label.
*/ if (uuid_not_set(uuid, dev, __func__)) return -ENXIO; if (nd_region->ndr_mappings == 0) {
dev_dbg(dev, "not associated with dimm(s)\n"); return -ENXIO;
}
div_u64_rem(val, nd_region->align, &remainder); if (remainder) {
dev_dbg(dev, "%llu is not %ldK aligned\n", val,
nd_region->align / SZ_1K); return -EINVAL;
}
nd_label_gen_id(&label_id, uuid, flags); for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_mapping = &nd_region->mapping[i];
ndd = to_ndd(nd_mapping);
/* * All dimms in an interleave set, need to be enabled * for the size to be changed.
*/ if (!ndd) return -ENXIO;
allocated += nvdimm_allocated_dpa(ndd, &label_id);
}
available = nd_region_allocatable_dpa(nd_region);
if (val > available + allocated) return -ENOSPC;
if (val == allocated) return 0;
val = div_u64(val, nd_region->ndr_mappings);
allocated = div_u64(allocated, nd_region->ndr_mappings); if (val < allocated)
rc = shrink_dpa_allocation(nd_region, &label_id,
allocated - val); else
rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
if (rc) return rc;
if (is_namespace_pmem(dev)) { struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
nd_namespace_pmem_set_resource(nd_region, nspm,
val * nd_region->ndr_mappings);
}
/* * Try to delete the namespace if we deleted all of its * allocation, this is not the seed or 0th device for the * region, and it is not actively claimed by a btt, pfn, or dax * instance.
*/ if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
nd_device_unregister(dev, ND_ASYNC);
if (IS_ERR(uuid)) return PTR_ERR(uuid); if (uuid) return sprintf(buf, "%pUb\n", uuid); return sprintf(buf, "\n");
}
/** * namespace_update_uuid - check for a unique uuid and whether we're "renaming" * @nd_region: parent region so we can updates all dimms in the set * @dev: namespace type for generating label_id * @new_uuid: incoming uuid * @old_uuid: reference to the uuid storage location in the namespace object * * Returns: %0 on success on -errno on error
*/ staticint namespace_update_uuid(struct nd_region *nd_region, struct device *dev, uuid_t *new_uuid,
uuid_t **old_uuid)
{ struct nd_label_id old_label_id; struct nd_label_id new_label_id; int i;
if (!nd_is_uuid_unique(dev, new_uuid)) return -EINVAL;
if (*old_uuid == NULL) goto out;
/* * If we've already written a label with this uuid, then it's * too late to rename because we can't reliably update the uuid * without losing the old namespace. Userspace must delete this * namespace to abandon the old uuid.
*/ for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i];
/* * This check by itself is sufficient because old_uuid * would be NULL above if this uuid did not exist in the * currently written set. * * FIXME: can we delete uuid with zero dpa allocated?
*/ if (list_empty(&nd_mapping->labels)) return -EBUSY;
}
/* no address to convey if the namespace has no allocation */ if (resource_size(res) == 0) return -ENXIO; return sprintf(buf, "%#llx\n", (unsignedlonglong) res->start);
} static DEVICE_ATTR_ADMIN_RO(resource);
staticint btt_claim_class(struct device *dev)
{ struct nd_region *nd_region = to_nd_region(dev->parent); int i, loop_bitmask = 0;
for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); struct nd_namespace_index *nsindex;
/* * If any of the DIMMs do not support labels the only * possible BTT format is v1.
*/ if (!ndd) {
loop_bitmask = 0; break;
}
nsindex = to_namespace_index(ndd, ndd->ns_current); if (nsindex == NULL)
loop_bitmask |= 1; else { /* check whether existing labels are v1.1 or v1.2 */ if (__le16_to_cpu(nsindex->major) == 1
&& __le16_to_cpu(nsindex->minor) == 1)
loop_bitmask |= 2; else
loop_bitmask |= 4;
}
} /* * If nsindex is null loop_bitmask's bit 0 will be set, and if an index * block is found, a v1.1 label for any mapping will set bit 1, and a * v1.2 label will set bit 2. * * At the end of the loop, at most one of the three bits must be set. * If multiple bits were set, it means the different mappings disagree * about their labels, and this must be cleaned up first. * * If all the label index blocks are found to agree, nsindex of NULL * implies labels haven't been initialized yet, and when they will, * they will be of the 1.2 format, so we can assume BTT2.0 * * If 1.1 labels are found, we enforce BTT1.1, and if 1.2 labels are * found, we enforce BTT2.0 * * If the loop was never entered, default to BTT1.1 (legacy namespaces)
*/ switch (loop_bitmask) { case 0: case 2: return NVDIMM_CCLASS_BTT; case 1: case 4: return NVDIMM_CCLASS_BTT2; default: return -ENXIO;
}
}
if (is_namespace_pmem(dev)) { if (a == &dev_attr_size.attr) return 0644;
return a->mode;
}
/* base is_namespace_io() attributes */ if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr ||
a == &dev_attr_holder.attr || a == &dev_attr_holder_class.attr ||
a == &dev_attr_force_raw.attr || a == &dev_attr_mode.attr ||
a == &dev_attr_resource.attr) return a->mode;
/* * Flush any in-progess probes / removals in the driver * for the raw personality of this namespace.
*/
device_lock(&ndns->dev);
device_unlock(&ndns->dev); if (ndns->dev.driver) {
dev_dbg(&ndns->dev, "is active, can't bind %s\n",
dev_name(dev)); return ERR_PTR(-EBUSY);
} if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev, "host (%s) vs claim (%s) mismatch\n",
dev_name(dev),
dev_name(ndns->claim))) return ERR_PTR(-ENXIO);
} else {
ndns = to_ndns(dev); if (ndns->claim) {
dev_dbg(dev, "claimed by %s, failing probe\n",
dev_name(ndns->claim));
return ERR_PTR(-ENXIO);
}
}
if (nvdimm_namespace_locked(ndns)) return ERR_PTR(-EACCES);
size = nvdimm_namespace_capacity(ndns); if (size < ND_MIN_NAMESPACE_SIZE) {
dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
&size, ND_MIN_NAMESPACE_SIZE); return ERR_PTR(-ENODEV);
}
/* * Note, alignment validation for fsdax and devdax mode * namespaces happens in nd_pfn_validate() where infoblock * padding parameters can be applied.
*/ if (pmem_should_map_pages(dev)) { struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); struct resource *res = &nsio->res;
if (!IS_ALIGNED(res->start | (res->end + 1),
memremap_compat_align())) {
dev_err(&ndns->dev, "%pr misaligned, unable to map\n", res); return ERR_PTR(-EOPNOTSUPP);
}
}
if (is_namespace_pmem(&ndns->dev)) { struct nd_namespace_pmem *nspm;
nspm = to_nd_namespace_pmem(&ndns->dev); if (uuid_not_set(nspm->uuid, &ndns->dev, __func__)) return ERR_PTR(-ENODEV);
}
for (i = 0; i < nd_region->ndr_mappings; i++) {
nsl_get_uuid(ndd, nd_label, &uuid); if (has_uuid_at_pos(nd_region, &uuid, cookie, i)) continue; if (has_uuid_at_pos(nd_region, &uuid, altcookie, i)) continue; break;
}
if (i < nd_region->ndr_mappings) { struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
/* * Give up if we don't find an instance of a uuid at each * position (from 0 to nd_region->ndr_mappings - 1), or if we * find a dimm with two instances of the same uuid.
*/
dev_err(&nd_region->dev, "%s missing label for %pUb\n",
nvdimm_name(nvdimm), nsl_uuid_raw(ndd, nd_label));
rc = -EINVAL; goto err;
}
/* * Fix up each mapping's 'labels' to have the validated pmem label for * that position at labels[0], and NULL at labels[1]. In the process, * check that the namespace aligns with interleave-set.
*/
nsl_get_uuid(ndd, nd_label, &uuid);
rc = select_pmem_id(nd_region, &uuid); if (rc) goto err;
/* Calculate total size and populate namespace properties from label0 */ for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_namespace_label *label0; struct nvdimm_drvdata *ndd;
/* * Seed creation failures are not fatal, provisioning is simply * disabled until memory becomes available
*/ if (!nd_region->ns_seed)
dev_err(&nd_region->dev, "failed to create namespace\n"); else {
device_initialize(nd_region->ns_seed);
lockdep_set_class(&nd_region->ns_seed->mutex,
&nvdimm_namespace_key);
nd_device_register(nd_region->ns_seed);
}
}
void nd_region_create_dax_seed(struct nd_region *nd_region)
{
WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
nd_region->dax_seed = nd_dax_create(nd_region); /* * Seed creation failures are not fatal, provisioning is simply * disabled until memory becomes available
*/ if (!nd_region->dax_seed)
dev_err(&nd_region->dev, "failed to create dax namespace\n");
}
void nd_region_create_pfn_seed(struct nd_region *nd_region)
{
WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
nd_region->pfn_seed = nd_pfn_create(nd_region); /* * Seed creation failures are not fatal, provisioning is simply * disabled until memory becomes available
*/ if (!nd_region->pfn_seed)
dev_err(&nd_region->dev, "failed to create pfn namespace\n");
}
void nd_region_create_btt_seed(struct nd_region *nd_region)
{
WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
nd_region->btt_seed = nd_btt_create(nd_region); /* * Seed creation failures are not fatal, provisioning is simply * disabled until memory becomes available
*/ if (!nd_region->btt_seed)
dev_err(&nd_region->dev, "failed to create btt namespace\n");
}
/* skip labels that describe extents outside of the region */ if (nsl_get_dpa(ndd, nd_label) < nd_mapping->start ||
nsl_get_dpa(ndd, nd_label) > map_end) continue;
i = add_namespace_resource(nd_region, nd_label, devs, count); if (i < 0) goto err; if (i < count) continue; if (count) {
__devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL); if (!__devs) goto err;
memcpy(__devs, devs, sizeof(dev) * count);
kfree(devs);
devs = __devs;
}
dev = create_namespace_pmem(nd_region, nd_mapping, nd_label); if (IS_ERR(dev)) { switch (PTR_ERR(dev)) { case -EAGAIN: /* skip invalid labels */ continue; default: goto err;
}
} else
devs[count++] = dev;
/* lock down all mappings while we scan labels */ for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_mapping = &nd_region->mapping[i];
mutex_lock_nested(&nd_mapping->lock, i);
}
devs = scan_labels(nd_region);
for (i = 0; i < nd_region->ndr_mappings; i++) { int reverse = nd_region->ndr_mappings - 1 - i;
put_ndd(ndd);
nd_mapping->ndd = NULL; if (ndd)
atomic_dec(&nvdimm->busy);
}
}
staticint init_active_labels(struct nd_region *nd_region)
{ int i, rc = 0;
for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); struct nvdimm *nvdimm = nd_mapping->nvdimm; struct nd_label_ent *label_ent; int count, j;
/* * If the dimm is disabled then we may need to prevent * the region from being activated.
*/ if (!ndd) { if (test_bit(NDD_LOCKED, &nvdimm->flags)) /* fail, label data may be unreadable */; elseif (test_bit(NDD_LABELING, &nvdimm->flags)) /* fail, labels needed to disambiguate dpa */; else continue;
device_initialize(dev);
put_device(dev);
}
*err = j - i; /* * All of the namespaces we tried to register failed, so * fail region activation.
*/ if (*err == 0)
rc = -ENODEV;
}
kfree(devs);
if (rc == -ENODEV) return rc;
return i;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.26 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.