/* Get the pointer to the first HGCM parameter. */ #define VBG_IOCTL_HGCM_CALL_PARMS(a) \
((struct vmmdev_hgcm_function_parameter *)( \
(u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call))) /* Get the pointer to the first HGCM parameter in a 32-bit request. */ #define VBG_IOCTL_HGCM_CALL_PARMS32(a) \
((struct vmmdev_hgcm_function_parameter32 *)( \
(u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
/** * vbg_guest_mappings_init - Reserves memory in which the VMM can * relocate any guest mappings that are floating around. * @gdev: The Guest extension device. * * This operation is a little bit tricky since the VMM might not accept * just any address because of address clashes between the three contexts * it operates in, so we try several times. * * Failure to reserve the guest mappings is ignored.
*/ staticvoid vbg_guest_mappings_init(struct vbg_dev *gdev)
{ struct vmmdev_hypervisorinfo *req; void *guest_mappings[GUEST_MAPPINGS_TRIES]; struct page **pages = NULL;
u32 size, hypervisor_size; int i, rc;
/* Query the required space. */
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO,
VBG_KERNEL_REQUEST); if (!req) return;
/* * The VMM will report back if there is nothing it wants to map, like * for instance in VT-x and AMD-V mode.
*/ if (req->hypervisor_size == 0) goto out;
hypervisor_size = req->hypervisor_size; /* Add 4M so that we can align the vmap to 4MiB as the host requires. */
size = PAGE_ALIGN(req->hypervisor_size) + SZ_4M;
gdev->guest_mappings_dummy_page = alloc_page(GFP_HIGHUSER); if (!gdev->guest_mappings_dummy_page) goto out;
for (i = 0; i < (size >> PAGE_SHIFT); i++)
pages[i] = gdev->guest_mappings_dummy_page;
/* * Try several times, the VMM might not accept some addresses because * of address clashes between the three contexts.
*/ for (i = 0; i < GUEST_MAPPINGS_TRIES; i++) {
guest_mappings[i] = vmap(pages, (size >> PAGE_SHIFT),
VM_MAP, PAGE_KERNEL_RO); if (!guest_mappings[i]) break;
/* Free vmap's from failed attempts. */ while (--i >= 0)
vunmap(guest_mappings[i]);
/* On failure free the dummy-page backing the vmap */ if (!gdev->guest_mappings) {
__free_page(gdev->guest_mappings_dummy_page);
gdev->guest_mappings_dummy_page = NULL;
}
/** * vbg_guest_mappings_exit - Undo what vbg_guest_mappings_init did. * * @gdev: The Guest extension device.
*/ staticvoid vbg_guest_mappings_exit(struct vbg_dev *gdev)
{ struct vmmdev_hypervisorinfo *req; int rc;
if (!gdev->guest_mappings) return;
/* * Tell the host that we're going to free the memory we reserved for * it, the free it up. (Leak the memory if anything goes wrong here.)
*/
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO,
VBG_KERNEL_REQUEST); if (!req) return;
/** * vbg_report_guest_info - Report the guest information to the host. * @gdev: The Guest extension device. * * Return: %0 or negative errno value.
*/ staticint vbg_report_guest_info(struct vbg_dev *gdev)
{ /* * Allocate and fill in the two guest info reports.
*/ struct vmmdev_guest_info *req1 = NULL; struct vmmdev_guest_info2 *req2 = NULL; int rc, ret = -ENOMEM;
/* * There are two protocols here: * 1. INFO2 + INFO1. Supported by >=3.2.51. * 2. INFO1 and optionally INFO2. The old protocol. * * We try protocol 2 first. It will fail with VERR_NOT_SUPPORTED * if not supported by the VMMDev (message ordering requirement).
*/
rc = vbg_req_perform(gdev, req2); if (rc >= 0) {
rc = vbg_req_perform(gdev, req1);
} elseif (rc == VERR_NOT_SUPPORTED || rc == VERR_NOT_IMPLEMENTED) {
rc = vbg_req_perform(gdev, req1); if (rc >= 0) {
rc = vbg_req_perform(gdev, req2); if (rc == VERR_NOT_IMPLEMENTED)
rc = VINF_SUCCESS;
}
}
ret = vbg_status_code_to_errno(rc);
/** * vbg_report_driver_status - Report the guest driver status to the host. * @gdev: The Guest extension device. * @active: Flag whether the driver is now active or not. * * Return: 0 or negative errno value.
*/ staticint vbg_report_driver_status(struct vbg_dev *gdev, bool active)
{ struct vmmdev_guest_status *req; int rc;
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS,
VBG_KERNEL_REQUEST); if (!req) return -ENOMEM;
rc = vbg_req_perform(gdev, req); if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
rc = VINF_SUCCESS;
vbg_req_free(req, sizeof(*req));
return vbg_status_code_to_errno(rc);
}
/** * vbg_balloon_inflate - Inflate the balloon by one chunk. The caller * owns the balloon mutex. * @gdev: The Guest extension device. * @chunk_idx: Index of the chunk. * * Return: %0 or negative errno value.
*/ staticint vbg_balloon_inflate(struct vbg_dev *gdev, u32 chunk_idx)
{ struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req; struct page **pages; int i, rc, ret;
for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) {
pages[i] = alloc_page(GFP_KERNEL | __GFP_NOWARN); if (!pages[i]) {
ret = -ENOMEM; goto out_error;
}
out_error: while (--i >= 0)
__free_page(pages[i]);
kfree(pages);
return ret;
}
/** * vbg_balloon_deflate - Deflate the balloon by one chunk. The caller * owns the balloon mutex. * @gdev: The Guest extension device. * @chunk_idx: Index of the chunk. * * Return: %0 or negative errno value.
*/ staticint vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx)
{ struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req; struct page **pages = gdev->mem_balloon.pages[chunk_idx]; int i, rc;
for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
__free_page(pages[i]);
kfree(pages);
gdev->mem_balloon.pages[chunk_idx] = NULL;
return 0;
}
/* * Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size * the host wants the balloon to be and adjust accordingly.
*/ staticvoid vbg_balloon_work(struct work_struct *work)
{ struct vbg_dev *gdev =
container_of(work, struct vbg_dev, mem_balloon.work); struct vmmdev_memballoon_info *req = gdev->mem_balloon.get_req;
u32 i, chunks; int rc, ret;
/* * Setting this bit means that we request the value from the host and * change the guest memory balloon according to the returned value.
*/
req->event_ack = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
rc = vbg_req_perform(gdev, req); if (rc < 0) {
vbg_err("%s error, rc: %d)\n", __func__, rc); return;
}
/* * The host always returns the same maximum amount of chunks, so * we do this once.
*/ if (!gdev->mem_balloon.max_chunks) {
gdev->mem_balloon.pages =
devm_kcalloc(gdev->dev, req->phys_mem_chunks, sizeof(struct page **), GFP_KERNEL); if (!gdev->mem_balloon.pages) return;
if (chunks > gdev->mem_balloon.chunks) { /* inflate */ for (i = gdev->mem_balloon.chunks; i < chunks; i++) {
ret = vbg_balloon_inflate(gdev, i); if (ret < 0) return;
gdev->mem_balloon.chunks++;
}
} else { /* deflate */ for (i = gdev->mem_balloon.chunks; i-- > chunks;) {
ret = vbg_balloon_deflate(gdev, i); if (ret < 0) return;
/** * vbg_heartbeat_host_config - Configure the host to check guest's heartbeat * and get heartbeat interval from the host. * @gdev: The Guest extension device. * @enabled: Set true to enable guest heartbeat checks on host. * * Return: %0 or negative errno value.
*/ staticint vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
{ struct vmmdev_heartbeat *req; int rc;
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE,
VBG_KERNEL_REQUEST); if (!req) return -ENOMEM;
/** * vbg_heartbeat_init - Initializes the heartbeat timer. This feature * may be disabled by the host. * @gdev: The Guest extension device. * * Return: %0 or negative errno value.
*/ staticint vbg_heartbeat_init(struct vbg_dev *gdev)
{ int ret;
/* Make sure that heartbeat checking is disabled if we fail. */
ret = vbg_heartbeat_host_config(gdev, false); if (ret < 0) return ret;
ret = vbg_heartbeat_host_config(gdev, true); if (ret < 0) return ret;
gdev->guest_heartbeat_req = vbg_req_alloc( sizeof(*gdev->guest_heartbeat_req),
VMMDEVREQ_GUEST_HEARTBEAT,
VBG_KERNEL_REQUEST); if (!gdev->guest_heartbeat_req) return -ENOMEM;
vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n",
__func__, gdev->heartbeat_interval_ms);
mod_timer(&gdev->heartbeat_timer, 0);
/** * vbg_track_bit_usage - Applies a change to the bit usage tracker. * @tracker: The bit usage tracker. * @changed: The bits to change. * @previous: The previous value of the bits. * * Return: %true if the mask changed, %false if not.
*/ staticbool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker,
u32 changed, u32 previous)
{ bool global_change = false;
while (changed) {
u32 bit = ffs(changed) - 1;
u32 bitmask = BIT(bit);
/** * vbg_set_session_event_filter - Changes the event filter mask for the * given session. * @gdev: The Guest extension device. * @session: The session. * @or_mask: The events to add. * @not_mask: The events to remove. * @session_termination: Set if we're called by the session cleanup code. * This tweaks the error handling so we perform * proper session cleanup even if the host * misbehaves. * * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to * do session cleanup. Takes the session mutex. * * Return: 0 or negative errno value.
*/ staticint vbg_set_session_event_filter(struct vbg_dev *gdev, struct vbg_session *session,
u32 or_mask, u32 not_mask, bool session_termination)
{ struct vmmdev_mask *req;
u32 changed, previous; int rc, ret = 0;
/* * Allocate a request buffer before taking the spinlock, when * the session is being terminated the requestor is the kernel, * as we're cleaning up.
*/
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
session_termination ? VBG_KERNEL_REQUEST :
session->requestor); if (!req) { if (!session_termination) return -ENOMEM; /* Ignore allocation failure, we must do session cleanup. */
}
mutex_lock(&gdev->session_mutex);
/* Apply the changes to the session mask. */
previous = session->event_filter;
session->event_filter |= or_mask;
session->event_filter &= ~not_mask;
/* If anything actually changed, update the global usage counters. */
changed = previous ^ session->event_filter; if (!changed) goto out;
/** * vbg_reset_host_capabilities - Init and termination worker for set * guest capabilities to zero on the host. * @gdev: The Guest extension device. * * Return: %0 or negative errno value.
*/ staticint vbg_reset_host_capabilities(struct vbg_dev *gdev)
{ struct vmmdev_mask *req; int rc;
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
VBG_KERNEL_REQUEST); if (!req) return -ENOMEM;
/** * vbg_set_host_capabilities - Set guest capabilities on the host. * @gdev: The Guest extension device. * @session: The session. * @session_termination: Set if we're called by the session cleanup code. * * Must be called with gdev->session_mutex hold. * * Return: %0 or negative errno value.
*/ staticint vbg_set_host_capabilities(struct vbg_dev *gdev, struct vbg_session *session, bool session_termination)
{ struct vmmdev_mask *req;
u32 caps; int rc;
/** * vbg_acquire_session_capabilities - Acquire (get exclusive access) * guest capabilities for a session. * @gdev: The Guest extension device. * @session: The session. * @flags: Flags (VBGL_IOC_AGC_FLAGS_XXX). * @or_mask: The capabilities to add. * @not_mask: The capabilities to remove. * @session_termination: Set if we're called by the session cleanup code. * This tweaks the error handling so we perform * proper session cleanup even if the host * misbehaves. * * Takes the session mutex. * * Return: %0 or negative errno value.
*/ staticint vbg_acquire_session_capabilities(struct vbg_dev *gdev, struct vbg_session *session,
u32 or_mask, u32 not_mask,
u32 flags, bool session_termination)
{ unsignedlong irqflags; bool wakeup = false; int ret = 0;
mutex_lock(&gdev->session_mutex);
if (gdev->set_guest_caps_tracker.mask & or_mask) {
vbg_err("%s error: cannot acquire caps which are currently set\n",
__func__);
ret = -EINVAL; goto out;
}
/* * Mark any caps in the or_mask as now being in acquire-mode. Note * once caps are in acquire_mode they always stay in this mode. * This impacts event handling, so we take the event-lock.
*/
spin_lock_irqsave(&gdev->event_spinlock, irqflags);
gdev->acquire_mode_guest_caps |= or_mask;
spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
/* If we only have to switch the caps to acquire mode, we're done. */ if (flags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE) goto out;
ret = vbg_set_host_capabilities(gdev, session, session_termination); /* Roll back on failure, unless it's session termination time. */ if (ret < 0 && !session_termination) {
gdev->acquired_guest_caps &= ~or_mask;
gdev->acquired_guest_caps |= not_mask;
spin_lock_irqsave(&gdev->event_spinlock, irqflags);
session->acquired_guest_caps &= ~or_mask;
session->acquired_guest_caps |= not_mask;
spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
}
/* * If we added a capability, check if that means some other thread in * our session should be unblocked because there are events pending * (the result of vbg_get_allowed_event_mask_for_session() may change). * * HACK ALERT! When the seamless support capability is added we generate * a seamless change event so that the ring-3 client can sync with * the seamless state.
*/ if (ret == 0 && or_mask != 0) {
spin_lock_irqsave(&gdev->event_spinlock, irqflags);
if (or_mask & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
gdev->pending_events |=
VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
/** * vbg_set_session_capabilities - Sets the guest capabilities for a * session. Takes the session mutex. * @gdev: The Guest extension device. * @session: The session. * @or_mask: The capabilities to add. * @not_mask: The capabilities to remove. * @session_termination: Set if we're called by the session cleanup code. * This tweaks the error handling so we perform * proper session cleanup even if the host * misbehaves. * * Return: %0 or negative errno value.
*/ staticint vbg_set_session_capabilities(struct vbg_dev *gdev, struct vbg_session *session,
u32 or_mask, u32 not_mask, bool session_termination)
{
u32 changed, previous; int ret = 0;
mutex_lock(&gdev->session_mutex);
if (gdev->acquire_mode_guest_caps & or_mask) {
vbg_err("%s error: cannot set caps which are in acquire_mode\n",
__func__);
ret = -EBUSY; goto out;
}
/* Apply the changes to the session mask. */
previous = session->set_guest_caps;
session->set_guest_caps |= or_mask;
session->set_guest_caps &= ~not_mask;
/* If anything actually changed, update the global usage counters. */
changed = previous ^ session->set_guest_caps; if (!changed) goto out;
/** * vbg_core_init - Initializes the VBoxGuest device extension when the * device driver is loaded. * @gdev: The Guest extension device. * @fixed_events: Events that will be enabled upon init and no client * will ever be allowed to mask. * * The native code locates the VMMDev on the PCI bus and retrieve * the MMIO and I/O port ranges, this function will take care of * mapping the MMIO memory (if present). Upon successful return * the native code should set up the interrupt handler. * * Return: %0 or negative errno value.
*/ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
{ int ret = -ENOMEM;
/** * vbg_core_exit - Call this on exit to clean-up vboxguest-core managed * resources. * @gdev: The Guest extension device. * * The native code should call this before the driver is loaded, * but don't call this on shutdown.
*/ void vbg_core_exit(struct vbg_dev *gdev)
{
vbg_heartbeat_exit(gdev);
vbg_guest_mappings_exit(gdev);
/* Clear the host flags (mouse status etc). */
vbg_reset_host_event_filter(gdev, 0);
vbg_reset_host_capabilities(gdev);
vbg_core_set_mouse_status(gdev, 0);
/** * vbg_core_open_session - Creates a VBoxGuest user session. * @gdev: The Guest extension device. * @requestor: VMMDEV_REQUESTOR_* flags * * vboxguest_linux.c calls this when userspace opens the char-device. * * Return: A pointer to the new session or an ERR_PTR on error.
*/ struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor)
{ struct vbg_session *session;
session = kzalloc(sizeof(*session), GFP_KERNEL); if (!session) return ERR_PTR(-ENOMEM);
/** * vbg_core_close_session - Closes a VBoxGuest session. * @session: The session to close (and free).
*/ void vbg_core_close_session(struct vbg_session *session)
{ struct vbg_dev *gdev = session->gdev; int i, rc;
/* Must be called with the event_lock held */ static u32 vbg_get_allowed_event_mask_for_session(struct vbg_dev *gdev, struct vbg_session *session)
{
u32 acquire_mode_caps = gdev->acquire_mode_guest_caps;
u32 session_acquired_caps = session->acquired_guest_caps;
u32 allowed_events = VMMDEV_EVENT_VALID_EVENT_MASK;
/* Must be called with the event_lock held */ static u32 vbg_consume_events_locked(struct vbg_dev *gdev, struct vbg_session *session,
u32 event_mask)
{
u32 events = gdev->pending_events & event_mask;
/** * vbg_req_allowed - Checks if the VMM request is allowed in the * context of the given session. * @gdev: The Guest extension device. * @session: The calling session. * @req: The request. * * Return: %0 or negative errno value.
*/ staticint vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session, conststruct vmmdev_request_header *req)
{ conststruct vmmdev_guest_status *guest_status; bool trusted_apps_only;
switch (req->request_type) { /* Trusted users apps only. */ case VMMDEVREQ_QUERY_CREDENTIALS: case VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT: case VMMDEVREQ_REGISTER_SHARED_MODULE: case VMMDEVREQ_UNREGISTER_SHARED_MODULE: case VMMDEVREQ_WRITE_COREDUMP: case VMMDEVREQ_GET_CPU_HOTPLUG_REQ: case VMMDEVREQ_SET_CPU_HOTPLUG_STATUS: case VMMDEVREQ_CHECK_SHARED_MODULES: case VMMDEVREQ_GET_PAGE_SHARING_STATUS: case VMMDEVREQ_DEBUG_IS_PAGE_SHARED: case VMMDEVREQ_REPORT_GUEST_STATS: case VMMDEVREQ_REPORT_GUEST_USER_STATE: case VMMDEVREQ_GET_STATISTICS_CHANGE_REQ:
trusted_apps_only = true; break;
/* Anyone. */ case VMMDEVREQ_GET_MOUSE_STATUS: case VMMDEVREQ_SET_MOUSE_STATUS: case VMMDEVREQ_SET_POINTER_SHAPE: case VMMDEVREQ_GET_HOST_VERSION: case VMMDEVREQ_IDLE: case VMMDEVREQ_GET_HOST_TIME: case VMMDEVREQ_SET_POWER_STATUS: case VMMDEVREQ_ACKNOWLEDGE_EVENTS: case VMMDEVREQ_CTL_GUEST_FILTER_MASK: case VMMDEVREQ_REPORT_GUEST_STATUS: case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ: case VMMDEVREQ_VIDEMODE_SUPPORTED: case VMMDEVREQ_GET_HEIGHT_REDUCTION: case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2: case VMMDEVREQ_VIDEMODE_SUPPORTED2: case VMMDEVREQ_VIDEO_ACCEL_ENABLE: case VMMDEVREQ_VIDEO_ACCEL_FLUSH: case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION: case VMMDEVREQ_VIDEO_UPDATE_MONITOR_POSITIONS: case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX: case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ_MULTI: case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ: case VMMDEVREQ_GET_VRDPCHANGE_REQ: case VMMDEVREQ_LOG_STRING: case VMMDEVREQ_GET_SESSION_ID:
trusted_apps_only = false; break;
/* Depends on the request parameters... */ case VMMDEVREQ_REPORT_GUEST_CAPABILITIES:
guest_status = (conststruct vmmdev_guest_status *)req; switch (guest_status->facility) { case VBOXGUEST_FACILITY_TYPE_ALL: case VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER:
vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n",
guest_status->facility); return -EPERM; case VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE:
trusted_apps_only = true; break; case VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT: case VBOXGUEST_FACILITY_TYPE_SEAMLESS: case VBOXGUEST_FACILITY_TYPE_GRAPHICS: default:
trusted_apps_only = false; break;
} break;
/* Anything else is not allowed. */ default:
vbg_err("Denying userspace vmm call type %#08x\n",
req->request_type); return -EPERM;
}
if (trusted_apps_only &&
(session->requestor & VMMDEV_REQUESTOR_USER_DEVICE)) {
vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
req->request_type); return -EPERM;
}
staticint vbg_ioctl_hgcm_connect(struct vbg_dev *gdev, struct vbg_session *session, struct vbg_ioctl_hgcm_connect *conn)
{
u32 client_id; int i, ret;
if (vbg_ioctl_chk(&conn->hdr, sizeof(conn->u.in), sizeof(conn->u.out))) return -EINVAL;
/* Find a free place in the sessions clients array and claim it */
mutex_lock(&gdev->session_mutex); for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) { if (!session->hgcm_client_ids[i]) {
session->hgcm_client_ids[i] = U32_MAX; break;
}
}
mutex_unlock(&gdev->session_mutex);
if (i >= ARRAY_SIZE(session->hgcm_client_ids)) return -EMFILE;
ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc,
&client_id, &conn->hdr.rc);
staticbool vbg_param_valid(enum vmmdev_hgcm_function_parameter_type type)
{ switch (type) { case VMMDEV_HGCM_PARM_TYPE_32BIT: case VMMDEV_HGCM_PARM_TYPE_64BIT: case VMMDEV_HGCM_PARM_TYPE_LINADDR: case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: returntrue; default: returnfalse;
}
}
staticint vbg_ioctl_hgcm_call(struct vbg_dev *gdev, struct vbg_session *session, bool f32bit, struct vbg_ioctl_hgcm_call *call)
{
size_t actual_size;
u32 client_id; int i, ret;
if (call->hdr.size_in < sizeof(*call)) return -EINVAL;
if (call->hdr.size_in != call->hdr.size_out) return -EINVAL;
if (call->parm_count > VMMDEV_HGCM_MAX_PARMS) return -E2BIG;
for (i = 0; i < call->parm_count; i++) if (!vbg_param_valid(parm[i].type)) return -EINVAL;
} else { struct vmmdev_hgcm_function_parameter *parm =
VBG_IOCTL_HGCM_CALL_PARMS(call);
for (i = 0; i < call->parm_count; i++) if (!vbg_param_valid(parm[i].type)) return -EINVAL;
}
/* * Validate the client id.
*/
mutex_lock(&gdev->session_mutex); for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) if (session->hgcm_client_ids[i] == client_id) break;
mutex_unlock(&gdev->session_mutex); if (i >= ARRAY_SIZE(session->hgcm_client_ids)) {
vbg_debug("VBG_IOCTL_HGCM_CALL: INVALID handle. u32Client=%#08x\n",
client_id); return -EINVAL;
}
if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
ret = vbg_hgcm_call32(gdev, session->requestor, client_id,
call->function, call->timeout_ms,
VBG_IOCTL_HGCM_CALL_PARMS32(call),
call->parm_count, &call->hdr.rc); else
ret = vbg_hgcm_call(gdev, session->requestor, client_id,
call->function, call->timeout_ms,
VBG_IOCTL_HGCM_CALL_PARMS(call),
call->parm_count, &call->hdr.rc);
if (ret == -E2BIG) { /* E2BIG needs to be reported through the hdr.rc field. */
call->hdr.rc = VERR_OUT_OF_RANGE;
ret = 0;
}
if (ret && ret != -EINTR && ret != -ETIMEDOUT)
vbg_err("VBG_IOCTL_HGCM_CALL error: %d\n", ret);
balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks; /* * Under Linux we handle VMMDEV_EVENT_BALLOON_CHANGE_REQUEST * events entirely in the kernel, see vbg_core_isr().
*/
balloon_info->u.out.handle_in_r3 = false;
if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT) return -EINVAL;
/* Fixed size requests. */ switch (req) { case VBG_IOCTL_DRIVER_VERSION_INFO: return vbg_ioctl_driver_version_info(data); case VBG_IOCTL_HGCM_CONNECT: return vbg_ioctl_hgcm_connect(gdev, session, data); case VBG_IOCTL_HGCM_DISCONNECT: return vbg_ioctl_hgcm_disconnect(gdev, session, data); case VBG_IOCTL_WAIT_FOR_EVENTS: return vbg_ioctl_wait_for_events(gdev, session, data); case VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS: return vbg_ioctl_interrupt_all_wait_events(gdev, session, data); case VBG_IOCTL_CHANGE_FILTER_MASK: return vbg_ioctl_change_filter_mask(gdev, session, data); case VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES: return vbg_ioctl_acquire_guest_capabilities(gdev, session, data); case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES: return vbg_ioctl_change_guest_capabilities(gdev, session, data); case VBG_IOCTL_CHECK_BALLOON: return vbg_ioctl_check_balloon(gdev, data); case VBG_IOCTL_WRITE_CORE_DUMP: return vbg_ioctl_write_core_dump(gdev, session, data);
}
/* Variable sized requests. */ switch (req_no_size) { #ifdef CONFIG_COMPAT case VBG_IOCTL_HGCM_CALL_32(0):
f32bit = true;
fallthrough; #endif case VBG_IOCTL_HGCM_CALL(0): return vbg_ioctl_hgcm_call(gdev, session, f32bit, data); case VBG_IOCTL_LOG(0): case VBG_IOCTL_LOG_ALT(0): return vbg_ioctl_log(data);
}
vbg_err_ratelimited("Userspace made an unknown ioctl req %#08x\n", req); return -ENOTTY;
}
/** * vbg_core_set_mouse_status - Report guest supported mouse-features to the host. * * @gdev: The Guest extension device. * @features: The set of features to report to the host. * * Return: %0 or negative errno value.
*/ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
{ struct vmmdev_mouse_status *req; int rc;
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS,
VBG_KERNEL_REQUEST); if (!req) return -ENOMEM;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.