/* * Masking the user address is an alternative to a conditional * user_access_begin that can avoid the fencing. This only works * for dense accesses starting at the address.
*/ staticinlinevoid __user *mask_user_address(constvoid __user *ptr)
{ void __user *ret; asm("cmp %1,%0\n\t" "cmova %1,%0"
:"=r" (ret)
:"r" (runtime_const_ptr(USER_PTR_MAX)), "0" (ptr)); return ret;
} #define masked_user_access_begin(x) ({ \
__auto_type __masked_ptr = (x); \
__masked_ptr = mask_user_address(__masked_ptr); \
__uaccess_begin(); __masked_ptr; })
/* * User pointers can have tag bits on x86-64. This scheme tolerates * arbitrary values in those bits rather then masking them off. * * Enforce two rules: * 1. 'ptr' must be in the user part of the address space * 2. 'ptr+size' must not overflow into kernel addresses * * Note that we always have at least one guard page between the * max user address and the non-canonical gap, allowing us to * ignore small sizes entirely. * * In fact, we could probably remove the size check entirely, since * any kernel accesses will be in increasing address order starting * at 'ptr'. * * That's a separate optimization, for now just handle the small * constant case.
*/ staticinlinebool __access_ok(constvoid __user *ptr, unsignedlong size)
{ if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) { return valid_user_address(ptr);
} else { unsignedlong sum = size + (__force unsignedlong)ptr;
/* Handles exceptions in both to and from, but doesn't do access_ok */
__must_check unsignedlong
rep_movs_alternative(void *to, constvoid *from, unsigned len);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.