/* * thrd_mask_[sym|asym|cpr|dcc]: these static arrays define the thread * configuration for handling requests of specific services across the * accelerator engines. Each element in an array corresponds to an * accelerator engine, with the value being a bitmask that specifies which * threads within that engine are capable of processing the particular service. * * For example, a value of 0x0C means that threads 2 and 3 are enabled for the * service in the respective accelerator engine.
*/ staticconstunsignedlong thrd_mask_sym[ADF_6XXX_MAX_ACCELENGINES] = {
0x0C, 0x0C, 0x0C, 0x0C, 0x1C, 0x1C, 0x1C, 0x1C, 0x00
};
/* * If there is only one service enabled, use all ring pairs for * that service. * If there are two services enabled, use ring pairs 0 and 2 for * one service and ring pairs 1 and 3 for the other service.
*/ switch (nservices) { case ADF_ONE_SERVICE:
rp_config[i].ring_mask = RP_GROUP_ALL_MASK; break; case ADF_TWO_SERVICES:
rp_config[i].ring_mask = rmask_two_services[i]; break; case ADF_THREE_SERVICES:
rp_config[i].ring_mask = BIT(i);
/* If ASYM is enabled, use additional ring pair */ if (service == SVC_ASYM)
rp_config[i].ring_mask |= BIT(RP3);
if (get_rp_config(accel_dev, rp_config, &num_services)) return 0;
/* * The thd2arb_mask maps ring pairs to threads within an accelerator engine. * It ensures that jobs submitted to ring pairs are scheduled on threads capable * of handling the specified service type. * * Each group of 4 bits in the mask corresponds to a thread, with each bit * indicating whether a job from a ring pair can be scheduled on that thread. * The use of 4 bits is due to the organization of ring pairs into groups of * four, where each group shares the same configuration.
*/ for (i = 0; i < num_services; i++) {
p_mask = &rp_config[i].thrd_mask[ae];
ring_mask = rp_config[i].ring_mask;
if (get_rp_config(accel_dev, rp_config, &num_services)) return 0;
/* * Loop through the configured services and populate the `rps` array that * contains what service that particular ring pair can handle (i.e. symmetric * crypto, asymmetric crypto, data compression or compression chaining).
*/ for (i = 0; i < num_services; i++) {
cfg_mask = rp_config[i].ring_mask;
for_each_set_bit(rp_num, &cfg_mask, ADF_GEN6_NUM_BANKS_PER_VF)
rps[rp_num] = rp_config[i].ring_type;
}
/* * The ring_mask is structured into segments of 3 bits, with each * segment representing the service configuration for a specific ring pair. * Since ring pairs are organized into groups of 4, the ring_mask contains 4 * such 3-bit segments, each corresponding to one ring pair. * * The device has 64 ring pairs, which are organized in groups of 4, namely * 16 groups. Each group has the same configuration, represented here by * `ring_to_svc_map`.
*/
ring_to_svc_map = rps[RP0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
rps[RP1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
rps[RP2] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
rps[RP3] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
/* * The vector routing table is used to select the MSI-X entry to use for each * interrupt source. * The first ADF_GEN6_ETR_MAX_BANKS entries correspond to ring interrupts. * The final entry corresponds to VF2PF or error interrupts. * This vector table could be used to configure one MSI-X entry to be shared * between multiple interrupt sources. * * The default routing is set to have a one to one correspondence between the * interrupt source and the MSI-X entry used.
*/ staticvoid set_msix_default_rttable(struct adf_accel_dev *accel_dev)
{ void __iomem *csr = adf_get_pmisc_base(accel_dev); unsignedint i;
for (i = 0; i <= ADF_GEN6_ETR_MAX_BANKS; i++)
ADF_CSR_WR(csr, ADF_GEN6_MSIX_RTTABLE_OFFSET(i), i);
}
/* * Write rpresetctl register BIT(0) as 1. * Since rpresetctl registers have no RW fields, no need to preserve * values for other bits. Just write directly.
*/
ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
ADF_WQM_CSR_RPRESETCTL_RESET);
/* Read rpresetsts register and wait for rp reset to complete */
ret = read_poll_timeout(ADF_CSR_RD, status,
status & ADF_WQM_CSR_RPRESETSTS_STATUS,
ADF_RPRESET_POLL_DELAY_US,
ADF_RPRESET_POLL_TIMEOUT_US, true,
csr, ADF_WQM_CSR_RPRESETSTS(bank_number)); if (ret) return ret;
/* When ring pair reset is done, clear rpresetsts */
ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number), ADF_WQM_CSR_RPRESETSTS_STATUS);
for (i = 0; i < hw_data->num_engines; i++) {
thd2arb_map[i] = adf_gen6_get_arb_mask(accel_dev, i);
dev_dbg(&GET_DEV(accel_dev), "ME:%d arb_mask:%#x\n", i, thd2arb_map[i]);
}
/* * After each PF FLR, for each of the 64 ring pairs in the PF, the * driver must program the ringmodectl CSRs.
*/
value = ADF_CSR_RD(csr, ADF_GEN6_CSR_RINGMODECTL(bank_number));
FIELD_MODIFY(ADF_GEN6_RINGMODECTL_TC_MASK, &value, ADF_GEN6_RINGMODECTL_TC_DEFAULT);
FIELD_MODIFY(ADF_GEN6_RINGMODECTL_TC_EN_MASK, &value, ADF_GEN6_RINGMODECTL_TC_EN_OP1);
ADF_CSR_WR(csr, ADF_GEN6_CSR_RINGMODECTL(bank_number), value);
}
/* * After each PF FLR, the driver must program the Port Virtual Channel (VC) * Control Registers. * Read PVC0CTL then write the masked values.
*/
pci_read_config_dword(pdev, ADF_GEN6_PVC0CTL_OFFSET, &value);
FIELD_MODIFY(ADF_GEN6_PVC0CTL_TCVCMAP_MASK, &value, ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT);
err = pci_write_config_dword(pdev, ADF_GEN6_PVC0CTL_OFFSET, value); if (err) {
dev_err(&GET_DEV(accel_dev), "pci write to PVC0CTL failed\n"); return pcibios_err_to_errno(err);
}
/* Read PVC1CTL then write masked values */
pci_read_config_dword(pdev, ADF_GEN6_PVC1CTL_OFFSET, &value);
FIELD_MODIFY(ADF_GEN6_PVC1CTL_TCVCMAP_MASK, &value, ADF_GEN6_PVC1CTL_TCVCMAP_DEFAULT);
FIELD_MODIFY(ADF_GEN6_PVC1CTL_VCEN_MASK, &value, ADF_GEN6_PVC1CTL_VCEN_ON);
err = pci_write_config_dword(pdev, ADF_GEN6_PVC1CTL_OFFSET, value); if (err)
dev_err(&GET_DEV(accel_dev), "pci write to PVC1CTL failed\n");
/* * If bit 0 is set in the fuses, the first 4 engines are disabled. * If bit 4 is set, the second group of 4 engines are disabled. * If bit 8 is set, the admin engine (bit 8) is disabled.
*/ if (test_bit(0, &fuses))
mask &= ~ADF_AE_GROUP_0;
/* A set bit in fusectl1 means the corresponding feature is OFF in this SKU */ if (fusectl1 & ICP_ACCEL_GEN6_MASK_UCS_SLICE) {
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
} if (fusectl1 & ICP_ACCEL_GEN6_MASK_AUTH_SLICE) {
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
/* Set DRV_ACTIVE bit to power up the device */
ADF_CSR_WR(addr, ADF_GEN6_PM_INTERRUPT, ADF_GEN6_PM_DRV_ACTIVE);
/* Poll status register to make sure the device is powered up */
ret = read_poll_timeout(ADF_CSR_RD, status,
status & ADF_GEN6_PM_INIT_STATE,
ADF_GEN6_PM_POLL_DELAY_US,
ADF_GEN6_PM_POLL_TIMEOUT_US, true, addr,
ADF_GEN6_PM_STATUS); if (ret) {
dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n"); return ret;
}
dev_dbg(&GET_DEV(accel_dev), "Setting virtual channels for device qat_dev%d\n",
accel_dev->accel_id);
ret = adf_gen6_set_vc(accel_dev); if (ret)
dev_err(&GET_DEV(accel_dev), "Failed to set virtual channels\n");
return ret;
}
staticint enable_pm(struct adf_accel_dev *accel_dev)
{ int ret;
ret = adf_init_admin_pm(accel_dev, ADF_GEN6_PM_DEFAULT_IDLE_FILTER); if (ret) return ret;
/* Initialize PM internal data */
adf_gen6_init_dev_pm_data(accel_dev);
return 0;
}
staticint dev_config(struct adf_accel_dev *accel_dev)
{ int ret;
ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC); if (ret) return ret;
ret = adf_cfg_section_add(accel_dev, "Accelerator0"); if (ret) return ret;
switch (adf_get_service_enabled(accel_dev)) { case SVC_DC: case SVC_DCC:
ret = adf_gen6_comp_dev_config(accel_dev); break; default:
ret = adf_gen6_no_dev_config(accel_dev); break;
} if (ret) return ret;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.