/* Free mapped doorbell scratchpad bus memory into CPU space. */
iounmap(ipc_pcie->scratchpad);
/* Free mapped IPC_REGS bus memory into CPU space. */
iounmap(ipc_pcie->ipc_regs);
/* Releases all PCI I/O and memory resources previously reserved by a * successful call to pci_request_regions. Call this function only * after all use of the PCI regions has ceased.
*/
pci_release_regions(ipc_pcie->pci);
}
/* Reserved PCI I/O and memory resources. * Mark all PCI regions associated with PCI device pci as * being reserved by owner IOSM_IPC.
*/
ret = pci_request_regions(pci, "IOSM_IPC"); if (ret) {
dev_err(ipc_pcie->dev, "failed pci request regions"); goto pci_request_region_fail;
}
/* Reserve the doorbell IPC REGS memory resources. * Remap the memory into CPU space. Arrange for the physical address * (BAR) to be visible from this driver. * pci_ioremap_bar() ensures that the memory is marked uncachable.
*/
ipc_pcie->ipc_regs = pci_ioremap_bar(pci, ipc_pcie->ipc_regs_bar_nr);
if (!ipc_pcie->ipc_regs) {
dev_err(ipc_pcie->dev, "IPC REGS ioremap error");
ret = -EBUSY; goto ipc_regs_remap_fail;
}
/* Reserve the MMIO scratchpad memory resources. * Remap the memory into CPU space. Arrange for the physical address * (BAR) to be visible from this driver. * pci_ioremap_bar() ensures that the memory is marked uncachable.
*/
ipc_pcie->scratchpad =
pci_ioremap_bar(pci, ipc_pcie->scratchpad_bar_nr);
if (!ipc_pcie->scratchpad) {
dev_err(ipc_pcie->dev, "doorbell scratchpad ioremap error");
ret = -EBUSY; goto scratch_remap_fail;
}
/* Install the irq handler triggered by CP. */
ret = ipc_acquire_irq(ipc_pcie); if (ret) {
dev_err(ipc_pcie->dev, "acquiring MSI irq failed!"); goto irq_acquire_fail;
}
/* Enable bus-mastering for the IOSM IPC device. */
pci_set_master(pci);
/* Enable LTR if possible * This is needed for L1.2!
*/
pcie_capability_read_dword(ipc_pcie->pci, PCI_EXP_DEVCAP2, &cap); if (cap & PCI_EXP_DEVCAP2_LTR)
pcie_capability_set_word(ipc_pcie->pci, PCI_EXP_DEVCTL2,
PCI_EXP_DEVCTL2_LTR_EN);
dev_dbg(ipc_pcie->dev, "link between AP and CP is fully on");
/* check if both root port and child supports ASPM L1 */ if (!ipc_pcie_check_aspm_supported(ipc_pcie, true) ||
!ipc_pcie_check_aspm_supported(ipc_pcie, false)) return;
/* Initialize the device before it is used. Ask low-level code * to enable I/O and memory. Wake up the device if it was suspended.
*/ if (pci_enable_device(pci)) {
dev_err(ipc_pcie->dev, "failed to enable the AP PCIe device"); /* If enable of PCIe device has failed then calling * ipc_pcie_cleanup will panic the system. More over * ipc_pcie_cleanup() is required to be called after * ipc_imem_mount()
*/ goto pci_enable_fail;
}
ret = dma_set_mask(ipc_pcie->dev, DMA_BIT_MASK(64)); if (ret) {
dev_err(ipc_pcie->dev, "Could not set PCI DMA mask: %d", ret); goto set_mask_fail;
}
if (ipc_pcie_resources_request(ipc_pcie)) goto resources_req_fail;
/* Establish the link to the imem layer. */
ipc_pcie->imem = ipc_imem_init(ipc_pcie, pci->device,
ipc_pcie->scratchpad, ipc_pcie->dev); if (!ipc_pcie->imem) {
dev_err(ipc_pcie->dev, "failed to init imem"); goto imem_init_fail;
}
/* Enter sleep in s2idle case
*/ staticint __maybe_unused ipc_pcie_suspend_s2idle(struct iosm_pcie *ipc_pcie)
{
ipc_cp_irq_sleep_control(ipc_pcie, IPC_MEM_DEV_PM_FORCE_SLEEP);
/* Complete all memory stores before setting bit */
smp_mb__before_atomic();
set_bit(0, &ipc_pcie->suspend);
/* Complete all memory stores after setting bit */
smp_mb__after_atomic();
ipc_imem_pm_s2idle_sleep(ipc_pcie->imem, true);
return 0;
}
/* Resume from sleep in s2idle case
*/ staticint __maybe_unused ipc_pcie_resume_s2idle(struct iosm_pcie *ipc_pcie)
{
ipc_cp_irq_sleep_control(ipc_pcie, IPC_MEM_DEV_PM_FORCE_ACTIVE);
ipc_imem_pm_s2idle_sleep(ipc_pcie->imem, false);
/* Complete all memory stores before clearing bit. */
smp_mb__before_atomic();
clear_bit(0, &ipc_pcie->suspend);
/* Complete all memory stores after clearing bit. */
smp_mb__after_atomic(); return 0;
}
int __maybe_unused ipc_pcie_suspend(struct iosm_pcie *ipc_pcie)
{ /* The HAL shall ask the shared memory layer whether D3 is allowed. */
ipc_imem_pm_suspend(ipc_pcie->imem);
int __maybe_unused ipc_pcie_resume(struct iosm_pcie *ipc_pcie)
{ /* The HAL shall inform the shared memory layer that the device is * active.
*/
ipc_imem_pm_resume(ipc_pcie->imem);
/* Store the mapping address in skb scratch pad for later usage */
IPC_CB(skb)->mapping = *mapping;
IPC_CB(skb)->direction = direction;
IPC_CB(skb)->len = size;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.