/** * folio_file_pfn - like folio_file_page, but return a pfn. * @folio: The folio which contains this index. * @index: The index we want to look up. * * Return: The pfn for this index.
*/ staticinline kvm_pfn_t folio_file_pfn(struct folio *folio, pgoff_t index)
{ return folio_pfn(folio) + (index & (folio_nr_pages(folio) - 1));
}
staticint __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
pgoff_t index, struct folio *folio)
{ #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
kvm_pfn_t pfn = folio_file_pfn(folio, index);
gfn_t gfn = slot->base_gfn + index - slot->gmem.pgoff; int rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio)); if (rc) {
pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx GFN %llx PFN %llx error %d.\n",
index, gfn, pfn, rc); return rc;
} #endif
/* * Process @folio, which contains @gfn, so that the guest can use it. * The folio must be locked and the gfn must be contained in @slot. * On successful return the guest sees a zero page so as to avoid * leaking host data and the up-to-date flag is set.
*/ staticint kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
gfn_t gfn, struct folio *folio)
{ unsignedlong nr_pages, i;
pgoff_t index; int r;
nr_pages = folio_nr_pages(folio); for (i = 0; i < nr_pages; i++)
clear_highpage(folio_page(folio, i));
/* * Preparing huge folios should always be safe, since it should * be possible to split them later if needed. * * Right now the folio order is always going to be zero, but the * code is ready for huge folios. The only assumption is that * the base pgoff of memslots is naturally aligned with the * requested page order, ensuring that huge folios can also use * huge page table entries for GPA->HPA mapping. * * The order will be passed when creating the guest_memfd, and * checked when creating memslots.
*/
WARN_ON(!IS_ALIGNED(slot->gmem.pgoff, 1 << folio_order(folio)));
index = gfn - slot->base_gfn + slot->gmem.pgoff;
index = ALIGN_DOWN(index, 1 << folio_order(folio));
r = __kvm_gmem_prepare_folio(kvm, slot, index, folio); if (!r)
kvm_gmem_mark_prepared(folio);
return r;
}
/* * Returns a locked folio on success. The caller is responsible for * setting the up-to-date flag before the memory is mapped into the guest. * There is no backing storage for the memory, so the folio will remain * up-to-date until it's removed. * * Ignore accessed, referenced, and dirty flags. The memory is * unevictable and there is no storage to write back to.
*/ staticstruct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
{ /* TODO: Support huge pages. */ return filemap_grab_folio(inode->i_mapping, index);
}
r = 0; for (index = start; index < end; ) { struct folio *folio;
if (signal_pending(current)) {
r = -EINTR; break;
}
folio = kvm_gmem_get_folio(inode, index); if (IS_ERR(folio)) {
r = PTR_ERR(folio); break;
}
index = folio_next_index(folio);
folio_unlock(folio);
folio_put(folio);
/* 64-bit only, wrapping the index should be impossible. */ if (WARN_ON_ONCE(!index)) break;
cond_resched();
}
filemap_invalidate_unlock_shared(mapping);
return r;
}
staticlong kvm_gmem_fallocate(struct file *file, int mode, loff_t offset,
loff_t len)
{ int ret;
if (!(mode & FALLOC_FL_KEEP_SIZE)) return -EOPNOTSUPP;
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) return -EOPNOTSUPP;
if (!PAGE_ALIGNED(offset) || !PAGE_ALIGNED(len)) return -EINVAL;
if (mode & FALLOC_FL_PUNCH_HOLE)
ret = kvm_gmem_punch_hole(file_inode(file), offset, len); else
ret = kvm_gmem_allocate(file_inode(file), offset, len);
/* * Prevent concurrent attempts to *unbind* a memslot. This is the last * reference to the file and thus no new bindings can be created, but * dereferencing the slot for existing bindings needs to be protected * against memslot updates, specifically so that unbind doesn't race * and free the memslot (kvm_gmem_get_file() will return NULL). * * Since .release is called only when the reference count is zero, * after which file_ref_get() and get_file_active() fail, * kvm_gmem_get_pfn() cannot be using the file concurrently. * file_ref_put() provides a full barrier, and get_file_active() the * matching acquire barrier.
*/
mutex_lock(&kvm->slots_lock);
/* * All in-flight operations are gone and new bindings can be created. * Zap all SPTEs pointed at by this file. Do not free the backing * memory, as its lifetime is associated with the inode, not the file.
*/
kvm_gmem_invalidate_begin(gmem, 0, -1ul);
kvm_gmem_invalidate_end(gmem, 0, -1ul);
list_del(&gmem->entry);
filemap_invalidate_unlock(inode->i_mapping);
mutex_unlock(&kvm->slots_lock);
xa_destroy(&gmem->bindings);
kfree(gmem);
kvm_put_kvm(kvm);
return 0;
}
staticinlinestruct file *kvm_gmem_get_file(struct kvm_memory_slot *slot)
{ /* * Do not return slot->gmem.file if it has already been closed; * there might be some time between the last fput() and when * kvm_gmem_release() clears slot->gmem.file.
*/ return get_file_active(&slot->gmem.file);
}
/* * Do not truncate the range, what action is taken in response to the * error is userspace's decision (assuming the architecture supports * gracefully handling memory errors). If/when the guest attempts to * access a poisoned page, kvm_gmem_get_pfn() will return -EHWPOISON, * at which point KVM can either terminate the VM or propagate the * error to userspace.
*/
start = offset >> PAGE_SHIFT;
end = start + slot->npages;
if (!xa_empty(&gmem->bindings) &&
xa_find(&gmem->bindings, &start, end - 1, XA_PRESENT)) {
filemap_invalidate_unlock(inode->i_mapping); goto err;
}
/* * memslots of flag KVM_MEM_GUEST_MEMFD are immutable to change, so * kvm_gmem_bind() must occur on a new memslot. Because the memslot * is not visible yet, kvm_gmem_get_pfn() is guaranteed to see the file.
*/
WRITE_ONCE(slot->gmem.file, file);
slot->gmem.pgoff = start;
xa_store_range(&gmem->bindings, start, end - 1, slot, GFP_KERNEL);
filemap_invalidate_unlock(inode->i_mapping);
/* * Drop the reference to the file, even on success. The file pins KVM, * not the other way 'round. Active bindings are invalidated if the * file is closed before memslots are destroyed.
*/
r = 0;
err:
fput(file); return r;
}
/* * Nothing to do if the underlying file was _already_ closed, as * kvm_gmem_release() invalidates and nullifies all bindings.
*/ if (!slot->gmem.file) return;
file = kvm_gmem_get_file(slot);
/* * However, if the file is _being_ closed, then the bindings need to be * removed as kvm_gmem_release() might not run until after the memslot * is freed. Note, modifying the bindings is safe even though the file * is dying as kvm_gmem_release() nullifies slot->gmem.file under * slots_lock, and only puts its reference to KVM after destroying all * bindings. I.e. reaching this point means kvm_gmem_release() hasn't * yet destroyed the bindings or freed the gmem_file, and can't do so * until the caller drops slots_lock.
*/ if (!file) {
__kvm_gmem_unbind(slot, slot->gmem.file->private_data); return;
}
ret = -EINVAL; while (!kvm_range_has_memory_attributes(kvm, gfn, gfn + (1 << max_order),
KVM_MEMORY_ATTRIBUTE_PRIVATE,
KVM_MEMORY_ATTRIBUTE_PRIVATE)) { if (!max_order) goto put_folio_and_exit;
max_order--;
}
p = src ? src + i * PAGE_SIZE : NULL;
ret = post_populate(kvm, gfn, pfn, p, max_order, opaque); if (!ret)
kvm_gmem_mark_prepared(folio);
put_folio_and_exit:
folio_put(folio); if (ret) break;
}
filemap_invalidate_unlock(file->f_mapping);
fput(file); return ret && !i ? ret : i;
}
EXPORT_SYMBOL_GPL(kvm_gmem_populate); #endif
Messung V0.5
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.24Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.