if (cdev->doorbells && cdev->db_size)
iounmap(cdev->doorbells); if (cdev->regview)
iounmap(cdev->regview); if (atomic_read(&pdev->enable_cnt) == 1)
pci_release_regions(pdev);
pci_disable_device(pdev);
}
#define PCI_REVISION_ID_ERROR_VAL 0xff
/* Performs PCI initializations as well as initializing PCI-related parameters * in the device structrue. Returns 0 in case of success.
*/ staticint qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
{
u8 rev_id; int rc;
if (IS_PF(cdev)) {
ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); if (ptt) {
qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
&dev_info->mfw_rev, NULL);
/* If fastpath is initialized, we need at least one interrupt * per hwfn [and the slow path interrupts]. New requested number * should be a multiple of the number of hwfns.
*/
cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
DP_NOTICE(cdev, "Trying to enable MSI-X with less vectors (%d out of %d)\n",
cnt, int_params->in.num_vectors);
rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
cnt); if (!rc)
rc = cnt;
}
/* For VFs, we should return with an error in case we didn't get the * exact number of msix vectors as we requested. * Not doing that will lead to a crash when starting queues for * this VF.
*/ if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) { /* MSI-x configuration was achieved */
int_params->out.int_mode = QED_INT_MODE_MSIX;
int_params->out.num_vectors = rc;
rc = 0;
} else {
DP_NOTICE(cdev, "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
cnt, rc);
}
return rc;
}
/* This function outputs the int mode and the number of enabled msix vector */ staticint qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
{ struct qed_int_params *int_params = &cdev->int_params; struct msix_entry *tbl; int rc = 0, cnt;
if (p_handler->func)
p_handler->func(p_handler->token); else
DP_NOTICE(hwfn, "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
j, status);
status &= ~(0x2ULL << j);
rc = IRQ_HANDLED;
}
}
if (unlikely(status))
DP_VERBOSE(hwfn, NETIF_MSG_INTR, "got an unknown interrupt status 0x%llx\n",
status);
}
return rc;
}
int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
{ struct qed_dev *cdev = hwfn->cdev;
u32 int_mode; int rc = 0;
u8 id;
staticvoid qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
{ /* Calling the disable function will make sure that any * currently-running function is completed. The following call to the * enable function makes this sequence a flush-like operation.
*/ if (p_hwfn->b_sp_dpc_enabled) {
tasklet_disable(&p_hwfn->sp_dpc);
tasklet_enable(&p_hwfn->sp_dpc);
}
}
int_mode = cdev->int_params.out.int_mode; if (int_mode == QED_INT_MODE_MSIX)
synchronize_irq(cdev->int_params.msix_table[id].vector); else
synchronize_irq(cdev->pdev->irq);
qed_slowpath_tasklet_flush(p_hwfn);
}
staticvoid qed_slowpath_irq_free(struct qed_dev *cdev)
{ int i;
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
for_each_hwfn(cdev, i) { if (!cdev->hwfns[i].b_int_requested) break;
free_irq(cdev->int_params.msix_table[i].vector,
&cdev->hwfns[i].sp_dpc);
}
} else { if (QED_LEADING_HWFN(cdev)->b_int_requested)
free_irq(cdev->pdev->irq, cdev);
}
qed_int_disable_post_isr_release(cdev);
}
staticint qed_nic_stop(struct qed_dev *cdev)
{ int i, rc;
rc = qed_hw_stop(cdev);
for (i = 0; i < cdev->num_hwfns; i++) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
if (p_hwfn->b_sp_dpc_enabled) {
tasklet_disable(&p_hwfn->sp_dpc);
p_hwfn->b_sp_dpc_enabled = false;
DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, "Disabled sp tasklet [hwfn %d] at %p\n",
i, &p_hwfn->sp_dpc);
}
}
qed_dbg_pf_exit(cdev);
return rc;
}
staticint qed_nic_setup(struct qed_dev *cdev)
{ int rc, i;
/* Determine if interface is going to require LL2 */ if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) { for (i = 0; i < cdev->num_hwfns; i++) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
if (!cdev->int_params.fp_initialized) {
DP_INFO(cdev, "Protocol driver requested interrupt information, but its support is not yet configured\n"); return -EINVAL;
}
/* Need to expose only MSI-X information; Single IRQ is handled solely * by qed.
*/ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { int msix_base = cdev->int_params.fp_msix_base;
/* We want a minimum of one slowpath and one fastpath vector per hwfn */
cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
if (is_kdump_kernel()) {
DP_INFO(cdev, "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
cdev->int_params.in.min_msix_cnt);
cdev->int_params.in.num_vectors =
cdev->int_params.in.min_msix_cnt;
}
staticvoid qed_update_pf_params(struct qed_dev *cdev, struct qed_pf_params *params)
{ int i;
if (IS_ENABLED(CONFIG_QED_RDMA)) {
params->rdma_pf_params.num_qps = QED_ROCE_QPS;
params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
params->rdma_pf_params.num_srqs = QED_RDMA_SRQS; /* divide by 3 the MRs to avoid MF ILT overflow */
params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
}
if (cdev->num_hwfns > 1 || IS_VF(cdev))
params->eth_pf_params.num_arfs_filters = 0;
/* In case we might support RDMA, don't allow qede to be greedy * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp] * per hwfn.
*/ if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
u16 *num_cons;
/* Allocate LL2 interface if needed */ if (QED_LEADING_HWFN(cdev)->using_ll2) {
rc = qed_ll2_alloc_if(cdev); if (rc) goto err3;
} if (IS_PF(cdev)) {
hwfn = QED_LEADING_HWFN(cdev);
drv_version.version = (params->drv_major << 24) |
(params->drv_minor << 16) |
(params->drv_rev << 8) |
(params->drv_eng);
strscpy(drv_version.name, params->name, sizeof(drv_version.name));
rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
&drv_version); if (rc) {
DP_NOTICE(cdev, "Failed sending drv version command\n"); goto err4;
}
}
qed_reset_vport_stats(cdev);
return 0;
err4:
qed_ll2_dealloc_if(cdev);
err3:
qed_hw_stop(cdev);
err2:
qed_hw_timers_stop_all(cdev); if (IS_PF(cdev))
qed_slowpath_irq_free(cdev);
qed_free_stream_mem(cdev);
qed_disable_msix(cdev);
err1:
qed_resc_free(cdev);
err: if (IS_PF(cdev))
release_firmware(cdev->firmware);
if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
QED_LEADING_HWFN(cdev)->p_arfs_ptt)
qed_ptt_release(QED_LEADING_HWFN(cdev),
QED_LEADING_HWFN(cdev)->p_arfs_ptt);
qed_iov_wq_stop(cdev, false);
qed_slowpath_wq_stop(cdev);
return rc;
}
staticint qed_slowpath_stop(struct qed_dev *cdev)
{ if (!cdev) return -ENODEV;
qed_slowpath_wq_stop(cdev);
qed_ll2_dealloc_if(cdev);
if (IS_PF(cdev)) { if (cdev->num_hwfns == 1)
qed_ptt_release(QED_LEADING_HWFN(cdev),
QED_LEADING_HWFN(cdev)->p_arfs_ptt);
qed_free_stream_mem(cdev); if (IS_QED_ETH_IF(cdev))
qed_sriov_disable(cdev, true);
}
qed_nic_stop(cdev);
if (IS_PF(cdev))
qed_slowpath_irq_free(cdev);
qed_disable_msix(cdev);
qed_resc_free(cdev);
qed_iov_wq_stop(cdev, true);
if (IS_PF(cdev))
release_firmware(cdev->firmware);
return 0;
}
staticvoid qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
{ int i;
/* The link should be set only once per PF */
hwfn = &cdev->hwfns[0];
/* When VF wants to set link, force it to read the bulletin instead. * This mimics the PF behavior, where a noitification [both immediate * and possible later] would be generated when changing properties.
*/ if (IS_VF(cdev)) {
qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG); return 0;
}
ptt = qed_ptt_acquire(hwfn); if (!ptt) return -EBUSY;
link_params = qed_mcp_get_link_params(hwfn); if (!link_params) return -ENODATA;
speed = &link_params->speed;
if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
speed->autoneg = !!params->autoneg;
if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
speed->advertised_speeds = 0;
for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) {
map = qed_mfw_legacy_maps + i;
if (linkmode_intersects(params->adv_speeds, map->caps))
speed->advertised_speeds |= map->mfw_val;
}
}
if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
speed->forced_speed = params->forced_speed;
if (qed_mcp_is_ext_speed_supported(hwfn))
qed_set_ext_speed_params(link_params, params);
if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
link_params->pause.autoneg = true; else
link_params->pause.autoneg = false; if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
link_params->pause.forced_rx = true; else
link_params->pause.forced_rx = false; if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
link_params->pause.forced_tx = true; else
link_params->pause.forced_tx = false;
}
if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { switch (params->loopback_mode) { case QED_LINK_LOOPBACK_INT_PHY:
link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; break; case QED_LINK_LOOPBACK_EXT_PHY:
link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; break; case QED_LINK_LOOPBACK_EXT:
link_params->loopback_mode = ETH_LOOPBACK_EXT; break; case QED_LINK_LOOPBACK_MAC:
link_params->loopback_mode = ETH_LOOPBACK_MAC; break; case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123:
link_params->loopback_mode =
ETH_LOOPBACK_CNIG_AH_ONLY_0123; break; case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301:
link_params->loopback_mode =
ETH_LOOPBACK_CNIG_AH_ONLY_2301; break; case QED_LINK_LOOPBACK_PCS_AH_ONLY:
link_params->loopback_mode = ETH_LOOPBACK_PCS_AH_ONLY; break; case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY:
link_params->loopback_mode =
ETH_LOOPBACK_REVERSE_MAC_AH_ONLY; break; case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY:
link_params->loopback_mode =
ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY; break; default:
link_params->loopback_mode = ETH_LOOPBACK_NONE; break;
}
}
if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
memcpy(&link_params->eee, ¶ms->eee, sizeof(link_params->eee));
if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG)
link_params->fec = params->fec;
switch (media_type) { case MEDIA_DA_TWINAX:
phylink_set(if_caps, FIBRE);
if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
phylink_set(if_caps, 20000baseKR2_Full);
/* For DAC media multiple speed capabilities are supported */
capability |= speed_mask;
if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
phylink_set(if_caps, 1000baseKX_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
phylink_set(if_caps, 10000baseCR_Full);
if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) switch (tcvr_type) { case ETH_TRANSCEIVER_TYPE_40G_CR4: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
phylink_set(if_caps, 40000baseCR4_Full); break; default: break;
}
if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
phylink_set(if_caps, 25000baseCR_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
phylink_set(if_caps, 50000baseCR2_Full);
if (capability &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) switch (tcvr_type) { case ETH_TRANSCEIVER_TYPE_100G_CR4: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
phylink_set(if_caps, 100000baseCR4_Full); break; default: break;
}
break; case MEDIA_BASE_T:
phylink_set(if_caps, TP);
if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) { if (capability &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
phylink_set(if_caps, 1000baseT_Full); if (capability &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
phylink_set(if_caps, 10000baseT_Full);
}
if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
phylink_set(if_caps, FIBRE);
switch (tcvr_type) { case ETH_TRANSCEIVER_TYPE_1000BASET:
phylink_set(if_caps, 1000baseT_Full); break; case ETH_TRANSCEIVER_TYPE_10G_BASET:
phylink_set(if_caps, 10000baseT_Full); break; default: break;
}
}
break; case MEDIA_SFP_1G_FIBER: case MEDIA_SFPP_10G_FIBER: case MEDIA_XFP_FIBER: case MEDIA_MODULE_FIBER:
phylink_set(if_caps, FIBRE);
capability |= speed_mask;
if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) switch (tcvr_type) { case ETH_TRANSCEIVER_TYPE_1G_LX: case ETH_TRANSCEIVER_TYPE_1G_SX: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
phylink_set(if_caps, 1000baseKX_Full); break; default: break;
}
if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) switch (tcvr_type) { case ETH_TRANSCEIVER_TYPE_10G_SR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
phylink_set(if_caps, 10000baseSR_Full); break; case ETH_TRANSCEIVER_TYPE_10G_LR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
phylink_set(if_caps, 10000baseLR_Full); break; case ETH_TRANSCEIVER_TYPE_10G_LRM:
phylink_set(if_caps, 10000baseLRM_Full); break; case ETH_TRANSCEIVER_TYPE_10G_ER:
phylink_set(if_caps, 10000baseR_FEC); break; default: break;
}
if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
phylink_set(if_caps, 20000baseKR2_Full);
if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) switch (tcvr_type) { case ETH_TRANSCEIVER_TYPE_25G_SR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
phylink_set(if_caps, 25000baseSR_Full); break; default: break;
}
if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) switch (tcvr_type) { case ETH_TRANSCEIVER_TYPE_40G_LR4: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
phylink_set(if_caps, 40000baseLR4_Full); break; case ETH_TRANSCEIVER_TYPE_40G_SR4: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
phylink_set(if_caps, 40000baseSR4_Full); break; default: break;
}
if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
phylink_set(if_caps, 50000baseKR2_Full);
if (capability &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) switch (tcvr_type) { case ETH_TRANSCEIVER_TYPE_100G_SR4: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
phylink_set(if_caps, 100000baseSR4_Full); break; case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
phylink_set(if_caps, 100000baseLR4_ER4_Full); break; default: break;
}
break; case MEDIA_KR:
phylink_set(if_caps, Backplane);
if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
phylink_set(if_caps, 20000baseKR2_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
phylink_set(if_caps, 1000baseKX_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
phylink_set(if_caps, 10000baseKR_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
phylink_set(if_caps, 25000baseKR_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
phylink_set(if_caps, 40000baseKR4_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
phylink_set(if_caps, 50000baseKR2_Full); if (capability &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
phylink_set(if_caps, 100000baseKR4_Full);
break; case MEDIA_UNSPECIFIED: case MEDIA_NOT_PRESENT: default:
DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG, "Unknown media and transceiver type;\n"); break;
}
}
/* Prepare source inputs */ if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) {
dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); return;
}
/* Set the link parameters to pass to protocol driver */ if (link.link_up)
if_link->link_up = true;
if (IS_PF(hwfn->cdev) && qed_mcp_is_ext_speed_supported(hwfn)) { if (link_caps.default_ext_autoneg)
phylink_set(if_link->supported_caps, Autoneg);
/* TODO - fill duplex properly */
if_link->duplex = DUPLEX_FULL;
qed_mcp_get_media_type(hwfn, ptt, &media_type);
if_link->port = qed_get_port_type(media_type);
if_link->autoneg = params.speed.autoneg;
if (params.pause.autoneg)
if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; if (params.pause.forced_rx)
if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; if (params.pause.forced_tx)
if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
if (link.an_complete)
phylink_set(if_link->lp_caps, Autoneg); if (link.partner_adv_pause)
phylink_set(if_link->lp_caps, Pause); if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
phylink_set(if_link->lp_caps, Asym_Pause);
/* Allocate a buffer for holding the nvram image */
buf = kzalloc(nvm_image->length, GFP_KERNEL); if (!buf) return -ENOMEM;
/* Read image into buffer */
rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr,
buf, nvm_image->length); if (rc) {
DP_ERR(cdev, "Failed reading image from nvm\n"); goto out;
}
/* Convert the buffer into big-endian format (excluding the * closing 4 bytes of CRC).
*/
cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf,
DIV_ROUND_UP(nvm_image->length - 4, 4));
/* Calc CRC for the "actual" image buffer, i.e. not including * the last 4 CRC bytes.
*/
*crc = ~crc32(~0U, buf, nvm_image->length - 4);
*crc = (__force u32)cpu_to_be32p(crc);
out:
kfree(buf);
return rc;
}
/* Binary file format - * /----------------------------------------------------------------------\ * 0B | 0x4 [command index] | * 4B | image_type | Options | Number of register settings | * 8B | Value | * 12B | Mask | * 16B | Offset | * \----------------------------------------------------------------------/ * There can be several Value-Mask-Offset sets as specified by 'Number of...'. * Options - 0'b - Calculate & Update CRC for image
*/ staticint qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data, bool *check_resp)
{ struct qed_nvm_image_att nvm_image; struct qed_hwfn *p_hwfn; bool is_crc = false;
u32 image_type; int rc = 0, i;
u16 len;
*data += 4;
image_type = **data;
p_hwfn = QED_LEADING_HWFN(cdev); for (i = 0; i < p_hwfn->nvm_info.num_images; i++) if (image_type == p_hwfn->nvm_info.image_att[i].image_type) break; if (i == p_hwfn->nvm_info.num_images) {
DP_ERR(cdev, "Failed to find nvram image of type %08x\n",
image_type); return -ENOENT;
}
DP_VERBOSE(cdev, NETIF_MSG_DRV, "About to start a new file of type %02x\n", file_type); if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) {
*data += 4;
file_size = *((u32 *)(*data));
}
/* Binary file format - * /----------------------------------------------------------------------\ * 0B | 0x2 [command index] | * 4B | Length in bytes | * 8B | b'0: check_response? | b'1-31 reserved | * 12B | Offset in bytes | * 16B | Data ... | * \----------------------------------------------------------------------/ * Write data as part of a file that was previously started. Data should be * of length equal to that provided in the message
*/ staticint qed_nvm_flash_image_file_data(struct qed_dev *cdev, const u8 **data, bool *check_resp)
{
u32 offset, len; int rc;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.