/* Must be called while cmd_lock is acquired */ staticvoid qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn, struct qed_mcp_cmd_elem *p_cmd_elem)
{
list_del(&p_cmd_elem->list);
kfree(p_cmd_elem);
}
/* Must be called while cmd_lock is acquired */ staticstruct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
u16 seq_num)
{ struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); if (!p_info->public_base) {
DP_NOTICE(p_hwfn, "The address of the MCP scratch-pad is not configured\n"); return -EINVAL;
}
p_info->public_base |= GRCBASE_MCP;
/* Get the MFW MB address and number of supported messages */
mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
SECTION_OFFSIZE_ADDR(p_info->public_base,
PUBLIC_MFW_MB));
p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
p_info->mfw_mb_addr +
offsetof(struct public_mfw_mb,
sup_msgs));
/* The driver can notify that there was an MCP reset, and might read the * SHMEM values before the MFW has completed initializing them. * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a * data ready indication.
*/ while (!p_info->mfw_mb_length && --cnt) {
msleep(msec);
p_info->mfw_mb_length =
(u16)qed_rd(p_hwfn, p_ptt,
p_info->mfw_mb_addr +
offsetof(struct public_mfw_mb, sup_msgs));
}
if (!cnt) {
DP_NOTICE(p_hwfn, "Failed to get the SHMEM ready notification after %d msec\n",
QED_MCP_SHMEM_RDY_MAX_RETRIES * msec); return -EBUSY;
}
/* Get the current driver mailbox sequence before sending * the first command
*/
p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK;
/* Get current FW pulse sequence */
p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
DRV_PULSE_SEQ_MASK;
/* Initialize the MFW spinlock */
spin_lock_init(&p_info->cmd_lock);
spin_lock_init(&p_info->link_lock);
spin_lock_init(&p_info->unload_lock);
INIT_LIST_HEAD(&p_info->cmd_list);
if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
DP_NOTICE(p_hwfn, "MCP is not initialized\n"); /* Do not free mcp_info here, since public_base indicate that * the MCP is not initialized
*/ return 0;
}
/* Use MCP history register to check if MCP reset occurred between init * time and now.
*/ if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
DP_VERBOSE(p_hwfn,
QED_MSG_SP, "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
p_hwfn->mcp_info->mcp_hist, generic_por_0);
/* Set drv command along with the updated sequence */
qed_mcp_reread_offsets(p_hwfn, p_ptt);
seq = ++p_hwfn->mcp_info->drv_mb_seq;
DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
do { /* Wait for MFW response */
udelay(delay); /* Give the FW up to 500 second (50*1000*10usec) */
} while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
MISCS_REG_GENERIC_POR_0)) &&
(cnt++ < QED_MCP_RESET_RETRIES));
if (org_mcp_reset_seq !=
qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
DP_VERBOSE(p_hwfn, QED_MSG_SP, "MCP was reset after %d usec\n", cnt * delay);
} else {
DP_ERR(p_hwfn, "Failed to reset MCP\n");
rc = -EAGAIN;
}
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
return rc;
}
/* Must be called while cmd_lock is acquired */ staticbool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
{ struct qed_mcp_cmd_elem *p_cmd_elem;
/* There is at most one pending command at a certain time, and if it * exists - it is placed at the HEAD of the list.
*/ if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list, struct qed_mcp_cmd_elem, list); return !p_cmd_elem->b_is_completed;
}
returnfalse;
}
/* Must be called while cmd_lock is acquired */ staticint
qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{ struct qed_mcp_mb_params *p_mb_params; struct qed_mcp_cmd_elem *p_cmd_elem;
u32 mcp_resp;
u16 seq_num;
/* Return if no new non-handled response has been received */ if (seq_num != p_hwfn->mcp_info->drv_mb_seq) return -EAGAIN;
p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num); if (!p_cmd_elem) {
DP_ERR(p_hwfn, "Failed to find a pending mailbox cmd that expects sequence number %d\n",
seq_num); return -EINVAL;
}
p_mb_params = p_cmd_elem->p_mb_params;
/* Get the MFW response along with the sequence number */
p_mb_params->mcp_resp = mcp_resp;
/* Get the MFW param */
p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
/* Get the union data */ if (p_mb_params->p_data_dst && p_mb_params->data_dst_size) {
u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
offsetof(struct public_drv_mb,
union_data);
qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
union_data_addr, p_mb_params->data_dst_size);
}
p_cmd_elem->b_is_completed = true;
return 0;
}
/* Must be called while cmd_lock is acquired */ staticvoid __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_mcp_mb_params *p_mb_params,
u16 seq_num)
{ union drv_union_data union_data;
u32 union_data_addr;
/* Set the union data */
union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
offsetof(struct public_drv_mb, union_data);
memset(&union_data, 0, sizeof(union_data)); if (p_mb_params->p_data_src && p_mb_params->data_src_size)
memcpy(&union_data, p_mb_params->p_data_src,
p_mb_params->data_src_size);
qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data, sizeof(union_data));
/* Set the drv param */
DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
/* Set the drv command along with the sequence number */
DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
/* Wait until the mailbox is non-occupied */ do { /* Exit the loop if there is no pending command, or if the * pending command is completed during this iteration. * The spinlock stays locked until the command is sent.
*/
if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
usleep_range(QED_MCP_RESP_ITER_US,
QED_MCP_RESP_ITER_US * 2); else
udelay(QED_MCP_RESP_ITER_US);
} while (++cnt < QED_DRV_MB_MAX_RETRIES);
if (cnt >= QED_DRV_MB_MAX_RETRIES) {
DP_NOTICE(p_hwfn, "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
p_mb_params->cmd, p_mb_params->param); return -EAGAIN;
}
/* Wait for the MFW response */ do { /* Exit the loop if the command is already completed, or if the * command is completed during this iteration. * The spinlock stays locked until the list element is removed.
*/
if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
usleep_range(QED_MCP_RESP_ITER_US,
QED_MCP_RESP_ITER_US * 2); else
udelay(QED_MCP_RESP_ITER_US);
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
} while (++cnt < QED_DRV_MB_MAX_RETRIES);
if (cnt >= QED_DRV_MB_MAX_RETRIES) {
DP_NOTICE(p_hwfn, "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
p_mb_params->cmd, p_mb_params->param);
qed_mcp_print_cpu_info(p_hwfn, p_ptt);
/* MCP not initialized */ if (!qed_mcp_is_init(p_hwfn)) {
DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); return -EBUSY;
}
if (p_hwfn->mcp_info->b_block_cmd) {
DP_NOTICE(p_hwfn, "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
p_mb_params->cmd, p_mb_params->param); return -EBUSY;
}
if (p_mb_params->data_src_size > union_data_size ||
p_mb_params->data_dst_size > union_data_size) {
DP_ERR(p_hwfn, "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
p_mb_params->data_src_size,
p_mb_params->data_dst_size, union_data_size); return -EINVAL;
}
/* Use the maximal value since the actual one is part of the response */
mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN; if (b_can_sleep)
mb_params.flags = QED_MB_FLAG_CAN_SLEEP;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) return rc;
/* First handle cases where another load request should/might be sent: * - MFW expects the old interface [HSI version = 1] * - MFW responds that a force load request is required
*/ if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
DP_INFO(p_hwfn, "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
memset(&out_params, 0, sizeof(out_params));
rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params); if (rc) return rc;
} elseif (out_params.load_code ==
FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) { if (qed_mcp_can_force_load(in_params.drv_role,
out_params.exist_drv_role,
p_params->override_force_load)) {
DP_INFO(p_hwfn, "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
in_params.drv_role, in_params.fw_ver,
in_params.drv_ver_0, in_params.drv_ver_1,
out_params.exist_drv_role,
out_params.exist_fw_ver,
out_params.exist_drv_ver_0,
out_params.exist_drv_ver_1);
/* Now handle the other types of responses. * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not * expected here after the additional revised load requests were sent.
*/ switch (out_params.load_code) { case FW_MSG_CODE_DRV_LOAD_ENGINE: case FW_MSG_CODE_DRV_LOAD_PORT: case FW_MSG_CODE_DRV_LOAD_FUNCTION: if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
out_params.drv_exists) { /* The role and fw/driver version match, but the PF is * already loaded and has not been unloaded gracefully.
*/
DP_NOTICE(p_hwfn, "PF is already loaded\n"); return -EINVAL;
} break; default:
DP_NOTICE(p_hwfn, "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
out_params.load_code); return -EBUSY;
}
p_params->load_code = out_params.load_code;
return 0;
}
int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 resp = 0, param = 0; int rc;
rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
¶m); if (rc) {
DP_NOTICE(p_hwfn, "Failed to send a LOAD_DONE command, rc = %d\n", rc); return rc;
}
/* Check if there is a DID mismatch between nvm-cfg/efuse */ if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
DP_NOTICE(p_hwfn, "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
DP_NOTICE(p_hwfn, "Transceiver is present.\n"); else
DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
}
p_info->bandwidth_min = QED_MFW_GET_FIELD(p_shmem_info->config,
FUNC_MF_CFG_MIN_BW); if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
DP_INFO(p_hwfn, "bandwidth minimum out of bounds [%02x]. Set to 1\n",
p_info->bandwidth_min);
p_info->bandwidth_min = 1;
}
p_info->bandwidth_max = QED_MFW_GET_FIELD(p_shmem_info->config,
FUNC_MF_CFG_MAX_BW); if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
DP_INFO(p_hwfn, "bandwidth maximum out of bounds [%02x]. Set to 100\n",
p_info->bandwidth_max);
p_info->bandwidth_max = 100;
}
}
/* Prevent SW/attentions from doing this at the same time */
spin_lock_bh(&p_hwfn->mcp_info->link_lock);
p_link = &p_hwfn->mcp_info->link_output;
memset(p_link, 0, sizeof(*p_link)); if (!b_reset) {
status = qed_rd(p_hwfn, p_ptt,
p_hwfn->mcp_info->port_addr +
offsetof(struct public_port, link_status));
DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP), "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
status,
(u32)(p_hwfn->mcp_info->port_addr +
offsetof(struct public_port, link_status)));
} else {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link indications\n"); goto out;
}
if (p_hwfn->b_drv_link_init) { /* Link indication with modern MFW arrives as per-PF * indication.
*/ if (p_hwfn->mcp_info->capabilities &
FW_MB_PARAM_FEATURE_SUPPORT_VLINK) { struct public_func shmem_info;
/* There are MFWs that share this capability regardless of whether * this is feasible or not. And given that at the very least adv_caps * would be set internally by qed, we want to make sure LFA would * still work.
*/ if ((p_hwfn->mcp_info->capabilities &
FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; if (params->eee.tx_lpi_enable)
phy_cfg.eee_cfg |= EEE_CFG_TX_LPI; if (params->eee.adv_caps & QED_EEE_1G_ADV)
phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G; if (params->eee.adv_caps & QED_EEE_10G_ADV)
phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
EEE_TX_TIMER_USEC_OFFSET) &
EEE_TX_TIMER_USEC_MASK;
}
/* if mcp fails to respond we must abort */ if (rc) {
DP_ERR(p_hwfn, "MCP response failure, aborting\n"); return rc;
}
/* Mimic link-change attention, done for several reasons: * - On reset, there's no guarantee MFW would trigger * an attention. * - On initialization, older MFWs might not indicate link change * during LFA, so we'll never get an UP indication.
*/
qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
/* Prevent possible attentions/interrupts during the recovery handling * and till its load phase, during which they will be re-enabled.
*/
qed_int_igu_disable_int(p_hwfn, p_ptt);
DP_NOTICE(p_hwfn, "Received a process kill indication\n");
/* The following operations should be done once, and thus in CMT mode * are carried out by only the first HW function.
*/ if (p_hwfn != QED_LEADING_HWFN(cdev)) return;
if (cdev->recov_in_prog) {
DP_NOTICE(p_hwfn, "Ignoring the indication since a recovery process is already in progress\n"); return;
}
staticvoid qed_mcp_handle_fan_failure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{ /* A single notification should be sent to upper driver in CMT mode */ if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev)) return;
qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_FAN_FAIL, "Fan failure was detected on the network interface card and it's going to be shut down.\n");
}
if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
DP_INFO(p_hwfn, "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
p_mdump_cmd_params->cmd);
rc = -EOPNOTSUPP;
} elseif (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
DP_INFO(p_hwfn, "The mdump command is not supported by the MFW\n");
rc = -EOPNOTSUPP;
}
rc = qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); if (rc) return rc;
if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
DP_INFO(p_hwfn, "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
mdump_cmd_params.mcp_resp); return -EINVAL;
}
/* In CMT mode - no need for more than a single acknowledgment to the * MFW, and no more than a single notification to the upper driver.
*/ if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev)) return;
rc = qed_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain); if (rc == 0 && mdump_retain.valid)
DP_NOTICE(p_hwfn, "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
mdump_retain.epoch,
mdump_retain.pf, mdump_retain.status); else
DP_NOTICE(p_hwfn, "The MFW notified that a critical error occurred in the device\n");
DP_NOTICE(p_hwfn, "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
qed_mcp_mdump_ack(p_hwfn, p_ptt);
/* ACK everything */ for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
__be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
/* MFW expect answer in BE, so we force write in that format */
qed_wr(p_hwfn, p_ptt,
info->mfw_mb_addr + sizeof(u32) +
MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) * sizeof(u32) + i * sizeof(u32),
(__force u32)val);
}
if (!found) {
DP_NOTICE(p_hwfn, "Received an MFW message indication but no new message!\n");
rc = -EINVAL;
}
/* Copy the new mfw messages into the shadow */
memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
/* Read the address of the nvm_cfg */
nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); if (!nvm_cfg_addr) {
DP_NOTICE(p_hwfn, "Shared memory not initialized\n"); return -EINVAL;
}
/* Read the offset of nvm_cfg1 */
nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.