struct clk *active_clks[8]; struct clk *reset_clks[4]; struct clk *proxy_clks[4]; struct device *proxy_pds[3]; int active_clk_count; int reset_clk_count; int proxy_clk_count; int proxy_pd_count;
struct reg_info active_regs[1]; struct reg_info proxy_regs[1]; struct reg_info fallback_proxy_regs[2]; int active_reg_count; int proxy_reg_count; int fallback_proxy_reg_count;
for (i = 0; reg_res[i].supply; i++) {
regs[i].reg = devm_regulator_get(dev, reg_res[i].supply); if (IS_ERR(regs[i].reg)) return dev_err_probe(dev, PTR_ERR(regs[i].reg), "Failed to get %s\n regulator",
reg_res[i].supply);
staticint q6v5_regulator_enable(struct q6v5 *qproc, struct reg_info *regs, int count)
{ int ret; int i;
for (i = 0; i < count; i++) { if (regs[i].uV > 0) {
ret = regulator_set_voltage(regs[i].reg,
regs[i].uV, INT_MAX); if (ret) {
dev_err(qproc->dev, "Failed to request voltage for %d.\n",
i); goto err;
}
}
if (regs[i].uA > 0) {
ret = regulator_set_load(regs[i].reg,
regs[i].uA); if (ret < 0) {
dev_err(qproc->dev, "Failed to set regulator mode\n"); goto err;
}
}
ret = regulator_enable(regs[i].reg); if (ret) {
dev_err(qproc->dev, "Regulator enable failed\n"); goto err;
}
}
return 0;
err: for (; i >= 0; i--) { if (regs[i].uV > 0)
regulator_set_voltage(regs[i].reg, 0, INT_MAX);
if (regs[i].uA > 0)
regulator_set_load(regs[i].reg, 0);
regulator_disable(regs[i].reg);
}
return ret;
}
staticvoid q6v5_regulator_disable(struct q6v5 *qproc, struct reg_info *regs, int count)
{ int i;
for (i = 0; i < count; i++) { if (regs[i].uV > 0)
regulator_set_voltage(regs[i].reg, 0, INT_MAX);
if (regs[i].uA > 0)
regulator_set_load(regs[i].reg, 0);
regulator_disable(regs[i].reg);
}
}
staticint q6v5_clk_enable(struct device *dev, struct clk **clks, int count)
{ int rc; int i;
for (i = 0; i < count; i++) {
rc = clk_prepare_enable(clks[i]); if (rc) {
dev_err(dev, "Clock enable failed\n"); goto err;
}
}
return 0;
err: for (i--; i >= 0; i--)
clk_disable_unprepare(clks[i]);
return rc;
}
staticvoid q6v5_clk_disable(struct device *dev, struct clk **clks, int count)
{ int i;
for (i = 0; i < count; i++)
clk_disable_unprepare(clks[i]);
}
staticint q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
size_t pd_count)
{ int ret; int i;
for (i = 0; i < pd_count; i++) {
dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
ret = pm_runtime_get_sync(pds[i]); if (ret < 0) {
pm_runtime_put_noidle(pds[i]);
dev_pm_genpd_set_performance_state(pds[i], 0); goto unroll_pd_votes;
}
}
return 0;
unroll_pd_votes: for (i--; i >= 0; i--) {
dev_pm_genpd_set_performance_state(pds[i], 0);
pm_runtime_put(pds[i]);
}
/* MBA is restricted to a maximum size of 1M */ if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
dev_err(qproc->dev, "MBA firmware load failed\n"); return -EINVAL;
}
mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC); if (!mba_region) {
dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
&qproc->mba_phys, qproc->mba_size); return -EBUSY;
}
staticint q6v5_reset_assert(struct q6v5 *qproc)
{ int ret;
if (qproc->has_alt_reset) {
reset_control_assert(qproc->pdc_reset);
ret = reset_control_reset(qproc->mss_restart);
reset_control_deassert(qproc->pdc_reset);
} elseif (qproc->has_spare_reg) { /* * When the AXI pipeline is being reset with the Q6 modem partly * operational there is possibility of AXI valid signal to * glitch, leading to spurious transactions and Q6 hangs. A work * around is employed by asserting the AXI_GATING_VALID_OVERRIDE * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE * is withdrawn post MSS assert followed by a MSS deassert, * while holding the PDC reset.
*/
reset_control_assert(qproc->pdc_reset);
regmap_update_bits(qproc->conn_map, qproc->conn_box,
AXI_GATING_VALID_OVERRIDE, 1);
reset_control_assert(qproc->mss_restart);
reset_control_deassert(qproc->pdc_reset);
regmap_update_bits(qproc->conn_map, qproc->conn_box,
AXI_GATING_VALID_OVERRIDE, 0);
ret = reset_control_deassert(qproc->mss_restart);
} elseif (qproc->has_ext_cntl_regs) {
regmap_write(qproc->conn_map, qproc->rscc_disable, 0);
reset_control_assert(qproc->pdc_reset);
reset_control_assert(qproc->mss_restart);
reset_control_deassert(qproc->pdc_reset);
ret = reset_control_deassert(qproc->mss_restart);
} else {
ret = reset_control_assert(qproc->mss_restart);
}
return ret;
}
staticint q6v5_reset_deassert(struct q6v5 *qproc)
{ int ret;
if (qproc->has_alt_reset) {
reset_control_assert(qproc->pdc_reset);
writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
ret = reset_control_reset(qproc->mss_restart);
writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
reset_control_deassert(qproc->pdc_reset);
} elseif (qproc->has_spare_reg || qproc->has_ext_cntl_regs) {
ret = reset_control_reset(qproc->mss_restart);
} else {
ret = reset_control_deassert(qproc->mss_restart);
}
ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); if (ret) {
dev_err(qproc->dev, "Boot FSM failed to complete.\n"); /* Reset the modem so that boot FSM is in reset state */
q6v5_reset_deassert(qproc); return ret;
}
/* Turn on the XO clock needed for PLL setup */
val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
val |= Q6SS_CBCR_CLKEN;
writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
val, !(val & Q6SS_CBCR_CLKOFF), 1,
Q6SS_CBCR_TIMEOUT_US); if (ret) {
dev_err(qproc->dev, "QDSP6SS XO clock timed out\n"); return -ETIMEDOUT;
}
/* Configure Q6 core CBCR to auto-enable after reset sequence */
val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR);
val |= Q6SS_CBCR_CLKEN;
writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR);
/* De-assert the Q6 stop core signal */
writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
/* Wait for 10 us for any staggering logic to settle */
usleep_range(10, 20);
/* Trigger the boot FSM to start the Q6 out-of-reset sequence */
writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
/* Poll the MSS_STATUS for FSM completion */
ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); if (ret) {
dev_err(qproc->dev, "Boot FSM failed to complete.\n"); /* Reset the modem so that boot FSM is in reset state */
q6v5_reset_deassert(qproc); return ret;
} goto pbl_wait;
} elseif (qproc->version == MSS_MSM8909 ||
qproc->version == MSS_MSM8953 ||
qproc->version == MSS_MSM8996 ||
qproc->version == MSS_MSM8998 ||
qproc->version == MSS_SDM660) {
if (qproc->version != MSS_MSM8909 &&
qproc->version != MSS_MSM8953) /* Override the ACC value if required */
writel(QDSP6SS_ACC_OVERRIDE_VAL,
qproc->reg_base + QDSP6SS_STRAP_ACC);
/* BHS require xo cbcr to be enabled */
val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
val |= Q6SS_CBCR_CLKEN;
writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
/* Read CLKOFF bit to go low indicating CLK is enabled */
ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
val, !(val & Q6SS_CBCR_CLKOFF), 1,
Q6SS_CBCR_TIMEOUT_US); if (ret) {
dev_err(qproc->dev, "xo cbcr enabling timed out (rc:%d)\n", ret); return ret;
} /* Enable power block headswitch and wait for it to stabilize */
val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val |= QDSP6v56_BHS_ON;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
udelay(1);
if (qproc->version == MSS_SDM660) {
ret = readl_relaxed_poll_timeout(qproc->reg_base + QDSP6V62SS_BHS_STATUS,
i, (i & QDSP6v55_BHS_EN_REST_ACK),
1, BHS_CHECK_MAX_LOOPS); if (ret == -ETIMEDOUT) {
dev_err(qproc->dev, "BHS_EN_REST_ACK not set!\n"); return -ETIMEDOUT;
}
}
/* Put LDO in bypass mode */
val |= QDSP6v56_LDO_BYP;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
if (qproc->version != MSS_MSM8909) { int mem_pwr_ctl;
/* Deassert QDSP6 compiler memory clamp */
val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val &= ~QDSP6v56_CLAMP_QMC_MEM;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
/* Deassert memory peripheral sleep and L2 memory standby */
val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
/* Turn on L1, L2, ETB and JU memories 1 at a time */ if (qproc->version == MSS_MSM8953 ||
qproc->version == MSS_MSM8996) {
mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL;
i = 19;
} else { /* MSS_MSM8998, MSS_SDM660 */
mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL;
i = 28;
}
val = readl(qproc->reg_base + mem_pwr_ctl); for (; i >= 0; i--) {
val |= BIT(i);
writel(val, qproc->reg_base + mem_pwr_ctl); /* * Read back value to ensure the write is done then * wait for 1us for both memory peripheral and data * array to turn on.
*/
val |= readl(qproc->reg_base + mem_pwr_ctl);
udelay(1);
}
} else { /* Turn on memories */
val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val |= Q6SS_SLP_RET_N | Q6SS_L2DATA_STBY_N |
Q6SS_ETB_SLP_NRET_N | QDSP6V55_MEM_BITS;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
/* Turn on L2 banks 1 at a time */ for (i = 0; i <= 7; i++) {
val |= BIT(i);
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
}
}
/* Remove word line clamp */
val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val &= ~QDSP6v56_CLAMP_WL;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
} else { /* Assert resets, stop core */
val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
/* Enable power block headswitch and wait for it to stabilize */
val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val |= QDSS_BHS_ON | QDSS_LDO_BYP;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
udelay(1); /* * Turn on memories. L2 banks should be done individually * to minimize inrush current.
*/
val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val |= Q6SS_L2DATA_SLP_NRET_N_2;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val |= Q6SS_L2DATA_SLP_NRET_N_1;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val |= Q6SS_L2DATA_SLP_NRET_N_0;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
} /* Remove IO clamp */
val &= ~Q6SS_CLAMP_IO;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
/* Bring core out of reset */
val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
val &= ~Q6SS_CORE_ARES;
writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
/* Turn on core clock */
val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
val |= Q6SS_CLK_ENABLE;
writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
/* Start core execution */
val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
val &= ~Q6SS_STOP_CORE;
writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
pbl_wait: /* Wait for PBL status */
ret = q6v5_rmb_pbl_wait(qproc, 1000); if (ret == -ETIMEDOUT) {
dev_err(qproc->dev, "PBL boot timed out\n");
} elseif (ret != RMB_PBL_SUCCESS) {
dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
ret = -EINVAL;
} else {
ret = 0;
}
/* * If the request is denied, reset the Q-channel takedown request, * wait for active transactions to complete and retry takedown.
*/
retry = 10; while (retry) {
usleep_range(5, 10);
retry--;
ret = regmap_read(map, offset + QACCEPT_DENY_REG, &val); if (!ret && val) {
regmap_write(map, offset + QACCEPT_REQ_REG, 1); break;
}
ret = regmap_read(map, offset + QACCEPT_ACCEPT_REG, &val); if (!ret && !val) {
takedown_complete = true; break;
}
}
if (!retry) break;
}
/* Rely on mss_restart to clear out pending transactions on takedown failure */ if (!takedown_complete)
dev_err(qproc->dev, "qchannel takedown failed\n");
}
/* Metadata authentication done, remove modem access */
xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false,
phys, size); if (xferop_ret)
dev_warn(qproc->dev, "mdt buffer not reclaimed system may become unstable\n");
free_dma_attrs: if (!qproc->mdata_phys)
dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
free_metadata:
kfree(metadata);
return ret < 0 ? ret : 0;
}
staticbool q6v5_phdr_valid(conststruct elf32_phdr *phdr)
{ if (phdr->p_type != PT_LOAD) returnfalse;
if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) returnfalse;
if (!phdr->p_memsz) returnfalse;
returntrue;
}
staticint q6v5_mba_load(struct q6v5 *qproc)
{ int ret; int xfermemop_ret; bool mba_load_err = false;
ret = qcom_q6v5_prepare(&qproc->q6v5); if (ret) return ret;
ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); if (ret < 0) {
dev_err(qproc->dev, "failed to enable proxy power domains\n"); goto disable_irqs;
}
ret = q6v5_regulator_enable(qproc, qproc->fallback_proxy_regs,
qproc->fallback_proxy_reg_count); if (ret) {
dev_err(qproc->dev, "failed to enable fallback proxy supplies\n"); goto disable_proxy_pds;
}
ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count); if (ret) {
dev_err(qproc->dev, "failed to enable proxy supplies\n"); goto disable_fallback_proxy_reg;
}
ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
qproc->proxy_clk_count); if (ret) {
dev_err(qproc->dev, "failed to enable proxy clocks\n"); goto disable_proxy_reg;
}
ret = q6v5_regulator_enable(qproc, qproc->active_regs,
qproc->active_reg_count); if (ret) {
dev_err(qproc->dev, "failed to enable supplies\n"); goto disable_proxy_clk;
}
if (qproc->has_ext_bhs_reg) {
ret = q6v5_external_bhs_enable(qproc); if (ret < 0) goto disable_vdd;
}
ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
qproc->reset_clk_count); if (ret) {
dev_err(qproc->dev, "failed to enable reset clocks\n"); goto disable_ext_bhs;
}
ret = q6v5_reset_deassert(qproc); if (ret) {
dev_err(qproc->dev, "failed to deassert mss restart\n"); goto disable_reset_clks;
}
ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
qproc->active_clk_count); if (ret) {
dev_err(qproc->dev, "failed to enable clocks\n"); goto assert_reset;
}
ret = q6v5proc_enable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi); if (ret) {
dev_err(qproc->dev, "failed to enable axi bridge\n"); goto disable_active_clks;
}
/* * Some versions of the MBA firmware will upon boot wipe the MPSS region as well, so provide * the Q6 access to this region.
*/
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
qproc->mpss_phys, qproc->mpss_size); if (ret) {
dev_err(qproc->dev, "assigning Q6 access to mpss memory failed: %d\n", ret); goto disable_active_clks;
}
/* Assign MBA image access in DDR to q6 */
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true,
qproc->mba_phys, qproc->mba_size); if (ret) {
dev_err(qproc->dev, "assigning Q6 access to mba memory failed: %d\n", ret); goto disable_active_clks;
}
if (qproc->has_mba_logs)
qcom_pil_info_store("mba", qproc->mba_phys, MBA_LOG_SIZE);
/* In case of failure or coredump scenario where reclaiming MBA memory * could not happen reclaim it here.
*/
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false,
qproc->mba_phys,
qproc->mba_size);
WARN_ON(ret);
ret = qcom_q6v5_unprepare(&qproc->q6v5); if (ret) {
q6v5_pds_disable(qproc, qproc->proxy_pds,
qproc->proxy_pd_count);
q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
qproc->proxy_clk_count);
q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
qproc->fallback_proxy_reg_count);
q6v5_regulator_disable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
}
}
if (qproc->version == MSS_MSM8953) {
ret = qcom_scm_pas_mem_setup(MPSS_PAS_ID, qproc->mpss_phys, qproc->mpss_size); if (ret) {
dev_err(qproc->dev, "setting up mpss memory failed: %d\n", ret); goto release_firmware;
}
}
/* * In case of a modem subsystem restart on secure devices, the modem * memory can be reclaimed only after MBA is loaded.
*/
q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false,
qproc->mpss_phys, qproc->mpss_size);
/* Share ownership between Linux and MSS, during segment loading */
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true,
qproc->mpss_phys, qproc->mpss_size); if (ret) {
dev_err(qproc->dev, "assigning Q6 access to mpss memory failed: %d\n", ret);
ret = -EAGAIN; goto release_firmware;
}
mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
qproc->mpss_reloc = mpss_reloc; /* Load firmware segments */ for (i = 0; i < ehdr->e_phnum; i++) {
phdr = &phdrs[i];
if (phdr->p_filesz && phdr->p_offset < fw->size) { /* Firmware is large enough to be non-split */ if (phdr->p_offset + phdr->p_filesz > fw->size) {
dev_err(qproc->dev, "failed to load segment %d from truncated file %s\n",
i, fw_name);
ret = -EINVAL;
memunmap(ptr); goto release_firmware;
}
if (seg_fw->size != phdr->p_filesz) {
dev_err(qproc->dev, "failed to load segment %d from truncated file %s\n",
i, fw_name);
ret = -EINVAL;
release_firmware(seg_fw);
memunmap(ptr); goto release_firmware;
}
ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); if (ret < 0) {
dev_err(qproc->dev, "MPSS authentication failed: %d\n",
ret); goto release_firmware;
}
}
/* Transfer ownership of modem ddr region to q6 */
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
qproc->mpss_phys, qproc->mpss_size); if (ret) {
dev_err(qproc->dev, "assigning Q6 access to mpss memory failed: %d\n", ret);
ret = -EAGAIN; goto release_firmware;
}
/* Unlock mba before copying segments */ if (!qproc->dump_mba_loaded) {
ret = q6v5_reload_mba(rproc); if (!ret) { /* Reset ownership back to Linux to copy segments */
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false,
qproc->mpss_phys,
qproc->mpss_size);
}
}
/* Reclaim mba after copying segments */ if (qproc->current_dump_size == qproc->total_dump_size) { if (qproc->dump_mba_loaded) { /* Try to reset ownership back to Q6 */
q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
qproc->mpss_phys,
qproc->mpss_size);
q6v5_mba_reclaim(qproc);
}
}
}
staticint q6v5_start(struct rproc *rproc)
{ struct q6v5 *qproc = rproc->priv; int xfermemop_ret; int ret;
ret = q6v5_mpss_load(qproc); if (ret) goto reclaim_mpss;
ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000)); if (ret == -ETIMEDOUT) {
dev_err(qproc->dev, "start timed out\n"); goto reclaim_mpss;
}
xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys,
qproc->mba_size); if (xfermemop_ret)
dev_err(qproc->dev, "Failed to reclaim mba buffer system may become unstable\n");
if (qproc->has_ext_bhs_reg) {
ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, "qcom,ext-bhs-reg",
1, 0, &args); if (ret < 0) {
dev_err(&pdev->dev, "failed to parse ext-bhs-reg index 0\n"); return -EINVAL;
}
qproc->conn_map = syscon_node_to_regmap(args.np);
of_node_put(args.np); if (IS_ERR(qproc->conn_map)) return PTR_ERR(qproc->conn_map);
qproc->ext_bhs = args.args[0];
}
if (qproc->has_ext_cntl_regs) {
ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, "qcom,ext-regs",
2, 0, &args); if (ret < 0) {
dev_err(&pdev->dev, "failed to parse ext-regs index 0\n"); return -EINVAL;
}
qproc->conn_map = syscon_node_to_regmap(args.np);
of_node_put(args.np); if (IS_ERR(qproc->conn_map)) return PTR_ERR(qproc->conn_map);
for (i = 0; clk_names[i]; i++) {
clks[i] = devm_clk_get(dev, clk_names[i]); if (IS_ERR(clks[i])) return dev_err_probe(dev, PTR_ERR(clks[i]), "Failed to get %s clock\n",
clk_names[i]);
}
return i;
}
staticint q6v5_pds_attach(struct device *dev, struct device **devs, char **pd_names)
{
size_t num_pds = 0; int ret; int i;
if (!pd_names) return 0;
while (pd_names[num_pds])
num_pds++;
/* Handle single power domain */ if (num_pds == 1 && dev->pm_domain) {
devs[0] = dev;
pm_runtime_enable(dev); return 1;
}
for (i = 0; i < num_pds; i++) {
devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); if (IS_ERR_OR_NULL(devs[i])) {
ret = PTR_ERR(devs[i]) ? : -ENODATA; goto unroll_attach;
}
}
return num_pds;
unroll_attach: for (i--; i >= 0; i--)
dev_pm_domain_detach(devs[i], false);
/* * In the absence of mba/mpss sub-child, extract the mba and mpss * reserved memory regions from device's memory-region property.
*/
child = of_get_child_by_name(qproc->dev->of_node, "mba"); if (!child) {
node = of_parse_phandle(qproc->dev->of_node, "memory-region", 0);
} else {
node = of_parse_phandle(child, "memory-region", 0);
of_node_put(child);
}
if (!node) {
dev_err(qproc->dev, "no mba memory-region specified\n"); return -EINVAL;
}
rmem = of_reserved_mem_lookup(node);
of_node_put(node); if (!rmem) {
dev_err(qproc->dev, "unable to resolve mba region\n"); return -EINVAL;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.