// SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved. * Copyright (C) 2015 Linaro Ltd.
*/
/** * struct qcom_scm_qseecom_resp - QSEECOM SCM call response. * @result: Result or status of the SCM call. See &enum qcom_scm_qseecom_result. * @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type. * @data: Response data. The type of this data is given in @resp_type.
*/ struct qcom_scm_qseecom_resp {
u64 result;
u64 resp_type;
u64 data;
};
if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN)) return qcom_scm_convention;
/* * Per the "SMC calling convention specification", the 64-bit calling * convention can only be used when the client is 64-bit, otherwise * system will encounter the undefined behaviour.
*/ #if IS_ENABLED(CONFIG_ARM64) /* * Device isn't required as there is only one argument - no device * needed to dma_map_single to secure world
*/
probed_convention = SMC_CONVENTION_ARM_64;
ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); if (!ret && res.result[0] == 1) goto found;
/* * Some SC7180 firmwares didn't implement the * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64 * calling conventions on these firmwares. Luckily we don't make any * early calls into the firmware on these SoCs so the device pointer * will be valid here to check if the compatible matches.
*/ if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
forced = true; goto found;
} #endif
probed_convention = SMC_CONVENTION_ARM_32;
ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); if (!ret && res.result[0] == 1) goto found;
/** * qcom_scm_call() - Invoke a syscall in the secure world * @dev: device * @desc: Descriptor structure containing arguments and return values * @res: Structure containing results from SMC/HVC call * * Sends a command to the SCM and waits for the command to finish processing. * This should *only* be called in pre-emptible context.
*/ staticint qcom_scm_call(struct device *dev, conststruct qcom_scm_desc *desc, struct qcom_scm_res *res)
{
might_sleep(); switch (__get_convention()) { case SMC_CONVENTION_ARM_32: case SMC_CONVENTION_ARM_64: return scm_smc_call(dev, desc, res, false); case SMC_CONVENTION_LEGACY: return scm_legacy_call(dev, desc, res); default:
pr_err("Unknown current SCM calling convention.\n"); return -EINVAL;
}
}
/** * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() * @dev: device * @desc: Descriptor structure containing arguments and return values * @res: Structure containing results from SMC/HVC call * * Sends a command to the SCM and waits for the command to finish processing. * This can be called in atomic context.
*/ staticint qcom_scm_call_atomic(struct device *dev, conststruct qcom_scm_desc *desc, struct qcom_scm_res *res)
{ switch (__get_convention()) { case SMC_CONVENTION_ARM_32: case SMC_CONVENTION_ARM_64: return scm_smc_call(dev, desc, res, true); case SMC_CONVENTION_LEGACY: return scm_legacy_call_atomic(dev, desc, res); default:
pr_err("Unknown current SCM calling convention.\n"); return -EINVAL;
}
}
/* Need a device for DMA of the additional arguments */ if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY) return -EOPNOTSUPP;
return qcom_scm_call(__scm->dev, &desc, NULL);
}
/** * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus * @entry: Entry point function for the cpus * * Set the Linux entry point for the SCM to transfer control to when coming * out of a power down. CPU power down may be executed on cpuidle or hotplug.
*/ int qcom_scm_set_warm_boot_addr(void *entry)
{ if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT)) /* Fallback to old SCM call */ return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits); return 0;
}
EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr);
/** * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus * @entry: Entry point function for the cpus
*/ int qcom_scm_set_cold_boot_addr(void *entry)
{ if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT)) /* Fallback to old SCM call */ return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits); return 0;
}
EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr);
/** * qcom_scm_cpu_power_down() - Power down the cpu * @flags: Flags to flush cache * * This is an end point to power down cpu. If there was a pending interrupt, * the control would return from this function, otherwise, the cpu jumps to the * warm boot entry point set for this cpu upon reset.
*/ void qcom_scm_cpu_power_down(u32 flags)
{ struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_BOOT,
.cmd = QCOM_SCM_BOOT_TERMINATE_PC,
.args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
.arginfo = QCOM_SCM_ARGS(1),
.owner = ARM_SMCCC_OWNER_SIP,
};
ret = qcom_scm_io_readl(addr, &old); if (ret) return ret;
new = (old & ~mask) | (val & mask);
return qcom_scm_io_writel(addr, new);
}
staticvoid qcom_scm_set_download_mode(u32 dload_mode)
{ int ret = 0;
if (__scm->dload_mode_addr) {
ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK,
FIELD_PREP(QCOM_DLOAD_MASK, dload_mode));
} elseif (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT,
QCOM_SCM_BOOT_SET_DLOAD_MODE)) {
ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode);
} elseif (dload_mode) {
dev_err(__scm->dev, "No available mechanism for setting download mode\n");
}
if (ret)
dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
}
/** * qcom_scm_pas_init_image() - Initialize peripheral authentication service * state machine for a given peripheral, using the * metadata * @peripheral: peripheral id * @metadata: pointer to memory containing ELF header, program header table * and optional blob of data used for authenticating the metadata * and the rest of the firmware * @size: size of the metadata * @ctx: optional metadata context * * Return: 0 on success. * * Upon successful return, the PAS metadata context (@ctx) will be used to * track the metadata allocation, this needs to be released by invoking * qcom_scm_pas_metadata_release() by the caller.
*/ int qcom_scm_pas_init_image(u32 peripheral, constvoid *metadata, size_t size, struct qcom_scm_pas_metadata *ctx)
{
dma_addr_t mdata_phys; void *mdata_buf; int ret; struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_PIL,
.cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
.args[0] = peripheral,
.owner = ARM_SMCCC_OWNER_SIP,
}; struct qcom_scm_res res;
/* * During the scm call memory protection will be enabled for the meta * data blob, so make sure it's physically contiguous, 4K aligned and * non-cachable to avoid XPU violations. * * For PIL calls the hypervisor creates SHM Bridges for the blob * buffers on behalf of Linux so we must not do it ourselves hence * not using the TZMem allocator here. * * If we pass a buffer that is already part of an SHM Bridge to this * call, it will fail.
*/
mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
GFP_KERNEL); if (!mdata_buf) return -ENOMEM;
memcpy(mdata_buf, metadata, size);
ret = qcom_scm_clk_enable(); if (ret) goto out;
ret = qcom_scm_bw_enable(); if (ret) goto disable_clk;
desc.args[1] = mdata_phys;
ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_bw_disable();
/** * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral * for firmware loading * @peripheral: peripheral id * @addr: start address of memory area to prepare * @size: size of the memory area to prepare * * Returns 0 on success.
*/ int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
{ int ret; struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_PIL,
.cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
.arginfo = QCOM_SCM_ARGS(3),
.args[0] = peripheral,
.args[1] = addr,
.args[2] = size,
.owner = ARM_SMCCC_OWNER_SIP,
}; struct qcom_scm_res res;
ret = qcom_scm_clk_enable(); if (ret) return ret;
ret = qcom_scm_bw_enable(); if (ret) goto disable_clk;
ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_bw_disable();
disable_clk:
qcom_scm_clk_disable();
return ret ? : res.result[0];
}
EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup);
/** * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware * and reset the remote processor * @peripheral: peripheral id * * Return 0 on success.
*/ int qcom_scm_pas_auth_and_reset(u32 peripheral)
{ int ret; struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_PIL,
.cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
.arginfo = QCOM_SCM_ARGS(1),
.args[0] = peripheral,
.owner = ARM_SMCCC_OWNER_SIP,
}; struct qcom_scm_res res;
ret = qcom_scm_clk_enable(); if (ret) return ret;
ret = qcom_scm_bw_enable(); if (ret) goto disable_clk;
ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_bw_disable();
disable_clk:
qcom_scm_clk_disable();
return ret ? : res.result[0];
}
EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset);
/** * qcom_scm_pas_shutdown() - Shut down the remote processor * @peripheral: peripheral id * * Returns 0 on success.
*/ int qcom_scm_pas_shutdown(u32 peripheral)
{ int ret; struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_PIL,
.cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
.arginfo = QCOM_SCM_ARGS(1),
.args[0] = peripheral,
.owner = ARM_SMCCC_OWNER_SIP,
}; struct qcom_scm_res res;
ret = qcom_scm_clk_enable(); if (ret) return ret;
ret = qcom_scm_bw_enable(); if (ret) goto disable_clk;
ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_bw_disable();
disable_clk:
qcom_scm_clk_disable();
return ret ? : res.result[0];
}
EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown);
/** * qcom_scm_pas_supported() - Check if the peripheral authentication service is * available for the given peripherial * @peripheral: peripheral id * * Returns true if PAS is supported for this peripheral, otherwise false.
*/ bool qcom_scm_pas_supported(u32 peripheral)
{ int ret; struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_PIL,
.cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
.arginfo = QCOM_SCM_ARGS(1),
.args[0] = peripheral,
.owner = ARM_SMCCC_OWNER_SIP,
}; struct qcom_scm_res res;
if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
QCOM_SCM_PIL_PAS_IS_SUPPORTED)) returnfalse;
ret = qcom_scm_call(__scm->dev, &desc, &res);
return ret ? false : !!res.result[0];
}
EXPORT_SYMBOL_GPL(qcom_scm_pas_supported);
/** * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership * @mem_addr: mem region whose ownership need to be reassigned * @mem_sz: size of the region. * @srcvm: vmid for current set of owners, each set bit in * flag indicate a unique owner * @newvm: array having new owners and corresponding permission * flags * @dest_cnt: number of owners in next set. * * Return negative errno on failure or 0 on success with @srcvm updated.
*/ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
u64 *srcvm, conststruct qcom_scm_vmperm *newvm, unsignedint dest_cnt)
{ struct qcom_scm_current_perm_info *destvm; struct qcom_scm_mem_map_info *mem_to_map;
phys_addr_t mem_to_map_phys;
phys_addr_t dest_phys;
phys_addr_t ptr_phys;
size_t mem_to_map_sz;
size_t dest_sz;
size_t src_sz;
size_t ptr_sz; int next_vm;
__le32 *src; int ret, i, b;
u64 srcvm_bits = *srcvm;
/** * qcom_scm_ice_available() - Is the ICE key programming interface available? * * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and * qcom_scm_ice_set_key() are available.
*/ bool qcom_scm_ice_available(void)
{ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
}
EXPORT_SYMBOL_GPL(qcom_scm_ice_available);
/** * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key * @index: the keyslot to invalidate * * The UFSHCI and eMMC standards define a standard way to do this, but it * doesn't work on these SoCs; only this SCM call does. * * It is assumed that the SoC has only one ICE instance being used, as this SCM * call doesn't specify which ICE instance the keyslot belongs to. * * Return: 0 on success; -errno on failure.
*/ int qcom_scm_ice_invalidate_key(u32 index)
{ struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_ES,
.cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
.arginfo = QCOM_SCM_ARGS(1),
.args[0] = index,
.owner = ARM_SMCCC_OWNER_SIP,
};
/** * qcom_scm_ice_set_key() - Set an inline encryption key * @index: the keyslot into which to set the key * @key: the key to program * @key_size: the size of the key in bytes * @cipher: the encryption algorithm the key is for * @data_unit_size: the encryption data unit size, i.e. the size of each * individual plaintext and ciphertext. Given in 512-byte * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc. * * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline. * * The UFSHCI and eMMC standards define a standard way to do this, but it * doesn't work on these SoCs; only this SCM call does. * * It is assumed that the SoC has only one ICE instance being used, as this SCM * call doesn't specify which ICE instance the keyslot belongs to. * * Return: 0 on success; -errno on failure.
*/ int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
{ struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_ES,
.cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
QCOM_SCM_VAL, QCOM_SCM_VAL,
QCOM_SCM_VAL),
.args[0] = index,
.args[2] = key_size,
.args[3] = cipher,
.args[4] = data_unit_size,
.owner = ARM_SMCCC_OWNER_SIP,
};
/** * qcom_scm_derive_sw_secret() - Derive software secret from wrapped key * @eph_key: an ephemerally-wrapped key * @eph_key_size: size of @eph_key in bytes * @sw_secret: output buffer for the software secret * @sw_secret_size: size of the software secret to derive in bytes * * Derive a software secret from an ephemerally-wrapped key for software crypto * operations. This is done by calling into the secure execution environment, * which then calls into the hardware to unwrap and derive the secret. * * For more information on sw_secret, see the "Hardware-wrapped keys" section of * Documentation/block/inline-encryption.rst. * * Return: 0 on success; -errno on failure.
*/ int qcom_scm_derive_sw_secret(const u8 *eph_key, size_t eph_key_size,
u8 *sw_secret, size_t sw_secret_size)
{ struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_ES,
.cmd = QCOM_SCM_ES_DERIVE_SW_SECRET,
.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL,
QCOM_SCM_RW, QCOM_SCM_VAL),
.owner = ARM_SMCCC_OWNER_SIP,
}; int ret;
/** * qcom_scm_generate_ice_key() - Generate a wrapped key for storage encryption * @lt_key: output buffer for the long-term wrapped key * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size * used by the SoC. * * Generate a key using the built-in HW module in the SoC. The resulting key is * returned wrapped with the platform-specific Key Encryption Key. * * Return: 0 on success; -errno on failure.
*/ int qcom_scm_generate_ice_key(u8 *lt_key, size_t lt_key_size)
{ struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_ES,
.cmd = QCOM_SCM_ES_GENERATE_ICE_KEY,
.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL),
.owner = ARM_SMCCC_OWNER_SIP,
}; int ret;
/** * qcom_scm_prepare_ice_key() - Re-wrap a key with the per-boot ephemeral key * @lt_key: a long-term wrapped key * @lt_key_size: size of @lt_key in bytes * @eph_key: output buffer for the ephemerally-wrapped key * @eph_key_size: size of @eph_key in bytes. Must be the exact wrapped key size * used by the SoC. * * Given a long-term wrapped key, re-wrap it with the per-boot ephemeral key for * added protection. The resulting key will only be valid for the current boot. * * Return: 0 on success; -errno on failure.
*/ int qcom_scm_prepare_ice_key(const u8 *lt_key, size_t lt_key_size,
u8 *eph_key, size_t eph_key_size)
{ struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_ES,
.cmd = QCOM_SCM_ES_PREPARE_ICE_KEY,
.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL,
QCOM_SCM_RW, QCOM_SCM_VAL),
.owner = ARM_SMCCC_OWNER_SIP,
}; int ret;
/** * qcom_scm_import_ice_key() - Import key for storage encryption * @raw_key: the raw key to import * @raw_key_size: size of @raw_key in bytes * @lt_key: output buffer for the long-term wrapped key * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size * used by the SoC. * * Import a raw key and return a long-term wrapped key. Uses the SoC's HWKM to * wrap the raw key using the platform-specific Key Encryption Key. * * Return: 0 on success; -errno on failure.
*/ int qcom_scm_import_ice_key(const u8 *raw_key, size_t raw_key_size,
u8 *lt_key, size_t lt_key_size)
{ struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_ES,
.cmd = QCOM_SCM_ES_IMPORT_ICE_KEY,
.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL,
QCOM_SCM_RW, QCOM_SCM_VAL),
.owner = ARM_SMCCC_OWNER_SIP,
}; int ret;
/* * This is only supposed to be called once by the TZMem module. It takes the * SCM struct device as argument and uses it to pass the call as at the time * the SHM Bridge is enabled, the SCM is not yet fully set up and doesn't * accept global user calls. Don't try to use the __scm pointer here.
*/ int qcom_scm_shm_bridge_enable(struct device *scm_dev)
{ int ret;
/* * QSEECOM SCM calls should not be executed concurrently. Therefore, we * require the respective call lock to be held.
*/
lockdep_assert_held(&qcom_scm_qseecom_call_lock);
status = qcom_scm_call(__scm->dev, desc, &scm_res);
/** * qcom_scm_qseecom_call() - Perform a QSEECOM SCM call. * @desc: SCM call descriptor. * @res: SCM call response (output). * * Performs the QSEECOM SCM call described by @desc, returning the response in * @rsp. * * Return: Zero on success, nonzero on failure.
*/ staticint qcom_scm_qseecom_call(conststruct qcom_scm_desc *desc, struct qcom_scm_qseecom_resp *res)
{ int status;
/* * Note: Multiple QSEECOM SCM calls should not be executed same time, * so lock things here. This needs to be extended to callback/listener * handling when support for that is implemented.
*/
mutex_lock(&qcom_scm_qseecom_call_lock);
status = __qcom_scm_qseecom_call(desc, res);
mutex_unlock(&qcom_scm_qseecom_call_lock);
if (status) {
dev_err(__scm->dev, "qseecom: scm call failed with error %d\n", status); return status;
}
/* * TODO: Handle incomplete and blocked calls: * * Incomplete and blocked calls are not supported yet. Some devices * and/or commands require those, some don't. Let's warn about them * prominently in case someone attempts to try these commands with a * device/command combination that isn't supported yet.
*/
WARN_ON(res->result == QSEECOM_RESULT_INCOMPLETE);
WARN_ON(res->result == QSEECOM_RESULT_BLOCKED_ON_LISTENER);
return 0;
}
/** * qcom_scm_qseecom_get_version() - Query the QSEECOM version. * @version: Pointer where the QSEECOM version will be stored. * * Performs the QSEECOM SCM querying the QSEECOM version currently running in * the TrustZone. * * Return: Zero on success, nonzero on failure.
*/ staticint qcom_scm_qseecom_get_version(u32 *version)
{ struct qcom_scm_desc desc = {}; struct qcom_scm_qseecom_resp res = {};
u32 feature = 10; int ret;
ret = qcom_scm_qseecom_call(&desc, &res); if (ret) return ret;
*version = res.result; return 0;
}
/** * qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name. * @app_name: The name of the app. * @app_id: The returned app ID. * * Query and return the application ID of the SEE app identified by the given * name. This returned ID is the unique identifier of the app required for * subsequent communication. * * Return: Zero on success, nonzero on failure, -ENOENT if the app has not been * loaded or could not be found.
*/ int qcom_scm_qseecom_app_get_id(constchar *app_name, u32 *app_id)
{ unsignedlong name_buf_size = QSEECOM_MAX_APP_NAME_SIZE; unsignedlong app_name_len = strlen(app_name); struct qcom_scm_desc desc = {}; struct qcom_scm_qseecom_resp res = {}; int status;
if (app_name_len >= name_buf_size) return -EINVAL;
/** * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app. * @app_id: The ID of the target app. * @req: Request buffer sent to the app (must be TZ memory) * @req_size: Size of the request buffer. * @rsp: Response buffer, written to by the app (must be TZ memory) * @rsp_size: Size of the response buffer. * * Sends a request to the QSEE app associated with the given ID and read back * its response. The caller must provide two DMA memory regions, one for the * request and one for the response, and fill out the @req region with the * respective (app-specific) request data. The QSEE app reads this and returns * its response in the @rsp region. * * Return: Zero on success, nonzero on failure.
*/ int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp, size_t rsp_size)
{ struct qcom_scm_qseecom_resp res = {}; struct qcom_scm_desc desc = {};
phys_addr_t req_phys;
phys_addr_t rsp_phys; int status;
/* * Note: We do two steps of validation here: First, we try to query the * QSEECOM version as a check to see if the interface exists on this * device. Second, we check against known good devices due to current * driver limitations (see comment in qcom_scm_qseecom_allowlist). * * Note that we deliberately do the machine check after the version * check so that we can log potentially supported devices. This should * be safe as downstream sources indicate that the version query is * neither blocking nor reentrant.
*/
ret = qcom_scm_qseecom_get_version(&version); if (ret) return 0;
dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version);
/* * Set up QSEECOM interface device. All application clients will be * set up and managed by the corresponding driver for it.
*/
qseecom_dev = platform_device_alloc("qcom_qseecom", -1); if (!qseecom_dev) return -ENOMEM;
qseecom_dev->dev.parent = scm->dev;
ret = platform_device_add(qseecom_dev); if (ret) {
platform_device_put(qseecom_dev); return ret;
}
/** * qcom_scm_is_available() - Checks if SCM is available
*/ bool qcom_scm_is_available(void)
{ /* Paired with smp_store_release() in qcom_scm_probe */ return !!smp_load_acquire(&__scm);
}
EXPORT_SYMBOL_GPL(qcom_scm_is_available);
staticint qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)
{ /* FW currently only supports a single wq_ctx (zero). * TODO: Update this logic to include dynamic allocation and lookup of * completion structs when FW supports more wq_ctx values.
*/ if (wq_ctx != 0) {
dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n"); return -EINVAL;
}
return 0;
}
int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
{ int ret;
ret = qcom_scm_assert_valid_wq_ctx(wq_ctx); if (ret) return ret;
wait_for_completion(&__scm->waitq_comp);
return 0;
}
staticint qcom_scm_waitq_wakeup(unsignedint wq_ctx)
{ int ret;
ret = qcom_scm_assert_valid_wq_ctx(wq_ctx); if (ret) return ret;
module_param_cb(download_mode, &download_mode_param_ops, NULL, 0644);
MODULE_PARM_DESC(download_mode, "download mode: off/0/N for no dump mode, full/on/1/Y for full dump mode, mini for minidump mode and full,mini for both full and minidump mode together are acceptable values");
scm->path = devm_of_icc_get(&pdev->dev, NULL); if (IS_ERR(scm->path)) return dev_err_probe(&pdev->dev, PTR_ERR(scm->path), "failed to acquire interconnect path\n");
scm->core_clk = devm_clk_get_optional(&pdev->dev, "core"); if (IS_ERR(scm->core_clk)) return PTR_ERR(scm->core_clk);
scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface"); if (IS_ERR(scm->iface_clk)) return PTR_ERR(scm->iface_clk);
scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus"); if (IS_ERR(scm->bus_clk)) return PTR_ERR(scm->bus_clk);
scm->reset.ops = &qcom_scm_pas_reset_ops;
scm->reset.nr_resets = 1;
scm->reset.of_node = pdev->dev.of_node;
ret = devm_reset_controller_register(&pdev->dev, &scm->reset); if (ret) return ret;
/* vote for max clk rate for highest performance */
ret = clk_set_rate(scm->core_clk, INT_MAX); if (ret) return ret;
ret = of_reserved_mem_device_init(scm->dev); if (ret && ret != -ENODEV) return dev_err_probe(scm->dev, ret, "Failed to setup the reserved memory region for TZ mem\n");
ret = qcom_tzmem_enable(scm->dev); if (ret) return dev_err_probe(scm->dev, ret, "Failed to enable the TrustZone memory allocator\n");
scm->mempool = devm_qcom_tzmem_pool_new(scm->dev, &pool_config); if (IS_ERR(scm->mempool)) return dev_err_probe(scm->dev, PTR_ERR(scm->mempool), "Failed to create the SCM memory pool\n");
irq = platform_get_irq_optional(pdev, 0); if (irq < 0) { if (irq != -ENXIO) return irq;
} else {
ret = devm_request_threaded_irq(scm->dev, irq, NULL, qcom_scm_irq_handler,
IRQF_ONESHOT, "qcom-scm", scm); if (ret < 0) return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
}
/* * Paired with smp_load_acquire() in qcom_scm_is_available(). * * This marks the SCM API as ready to accept user calls and can only * be called after the TrustZone memory pool is initialized and the * waitqueue interrupt requested.
*/
smp_store_release(&__scm, scm);
__get_convention();
/* * If "download mode" is requested, from this point on warmboot * will cause the boot stages to enter download mode, unless * disabled below by a clean shutdown/reboot.
*/
qcom_scm_set_download_mode(download_mode);
/* * Disable SDI if indicated by DT that it is enabled by default.
*/ if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled") || !download_mode)
qcom_scm_disable_sdi();
/* * Initialize the QSEECOM interface. * * Note: QSEECOM is fairly self-contained and this only adds the * interface device (the driver of which does most of the heavy * lifting). So any errors returned here should be either -ENOMEM or * -EINVAL (with the latter only in case there's a bug in our code). * This means that there is no need to bring down the whole SCM driver. * Just log the error instead and let SCM live.
*/
ret = qcom_scm_qseecom_init(scm);
WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
return 0;
}
staticvoid qcom_scm_shutdown(struct platform_device *pdev)
{ /* Clean shutdown, disable download mode to allow normal restart */
qcom_scm_set_download_mode(QCOM_DLOAD_NODUMP);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.