if (pci_is_root_bus(bus)) { /* * Only the root port (devfn == 0) is connected to this bus. * All other PCI devices are behind some bridge hence on another * bus.
*/ if (devfn) return NULL;
return pcie->reg_base + (where & 0xfff);
} /* Check that the link is up */ if (!(cdns_pcie_readl(pcie, CDNS_PCIE_LM_BASE) & 0x1)) return NULL; /* Clear AXI link-down status */
cdns_pcie_writel(pcie, CDNS_PCIE_AT_LINKDOWN, 0x0);
/* Update Output registers for AXI region 0. */
addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) |
CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(busn);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(0), addr0);
/* Configuration Type 0 or Type 1 access. */
desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0); /* * The bus number was already set once for all in desc1 by * cdns_pcie_host_init_address_translation().
*/ if (busn == bridge->busnr + 1)
desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0; else
desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1;
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(0), desc0);
/* Wait for link training to complete. Exit after timeout. */
end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT; do {
lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA); if (!(lnk_stat & PCI_EXP_LNKSTA_LT)) break;
usleep_range(0, 1000);
} while (time_before(jiffies, end_jiffies));
/* Check if the link is up or not */ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { if (cdns_pcie_link_up(pcie)) {
dev_info(dev, "Link up\n"); return 0;
}
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
}
return -ETIMEDOUT;
}
staticint cdns_pcie_retrain(struct cdns_pcie *pcie)
{
u32 lnk_cap_sls, pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
u16 lnk_stat, lnk_ctl; int ret = 0;
/* * Set retrain bit if current speed is 2.5 GB/s, * but the PCIe root port support is > 2.5 GB/s.
*/
/* * Set the root complex BAR configuration register: * - disable both BAR0 and BAR1. * - enable Prefetchable Memory Base and Limit registers in type 1 * config space (64 bits). * - enable IO Base and Limit registers in type 1 config * space (32 bits).
*/
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
value = CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE |
CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS;
cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
/* Set root port configuration space */ if (rc->vendor_id != 0xffff) {
id = CDNS_PCIE_LM_ID_VENDOR(rc->vendor_id) |
CDNS_PCIE_LM_ID_SUBSYS(rc->vendor_id);
cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
}
if (rc->device_id != 0xffff)
cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id);
if (entry->offset) {
dev_err(dev, "PCI addr: %llx must be equal to CPU addr: %llx\n",
pci_addr, cpu_addr); return -EINVAL;
}
while (size > 0) { /* * Try to find a minimum BAR whose size is greater than * or equal to the remaining resource_entry size. This will * fail if the size of each of the available BARs is less than * the remaining resource_entry size. * If a minimum BAR is found, IB ATU will be configured and * exited.
*/
bar = cdns_pcie_host_find_min_bar(rc, size); if (bar != RP_BAR_UNDEFINED) {
ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr,
size, flags); if (ret)
dev_err(dev, "IB BAR: %d config failed\n", bar); return ret;
}
/* * If the control reaches here, it would mean the remaining * resource_entry size cannot be fitted in a single BAR. So we * find a maximum BAR whose size is less than or equal to the * remaining resource_entry size and split the resource entry * so that part of resource entry is fitted inside the maximum * BAR. The remaining size would be fitted during the next * iteration of the loop. * If a maximum BAR is not found, there is no way we can fit * this resource_entry, so we error out.
*/
bar = cdns_pcie_host_find_max_bar(rc, size); if (bar == RP_BAR_UNDEFINED) {
dev_err(dev, "No free BAR to map cpu_addr %llx\n",
cpu_addr); return -EINVAL;
}
winsize = bar_max_size[bar];
ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, winsize,
flags); if (ret) {
dev_err(dev, "IB BAR: %d config failed\n", bar); return ret;
}
/* Reset inbound configuration for all BARs which were being used */ for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) { if (rc->avail_ib_bar[bar]) continue;
/* * Reset outbound region 0 which was reserved for configuration space * accesses.
*/
cdns_pcie_reset_outbound_region(pcie, 0);
/* Reset rest of the outbound regions */
r = 1;
resource_list_for_each_entry(entry, &bridge->windows) {
cdns_pcie_reset_outbound_region(pcie, r);
r++;
}
}
entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); if (entry)
busnr = entry->res->start;
/* * Reserve region 0 for PCI configure space accesses: * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by * cdns_pci_map_bus(), other region registers are set here once for all.
*/
addr1 = 0; /* Should be programmed to zero. */
desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1);
if (pcie->ops && pcie->ops->cpu_addr_fixup)
cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.