/* Get PCS Link down sticky */
value = spx5_rd(sparx5, DEV2G5_PCS1G_STICKY(portno));
status->link_down = DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(value); if (status->link_down) /* Clear the sticky */
spx5_wr(value, sparx5, DEV2G5_PCS1G_STICKY(portno));
/* Get both current Link and Sync status */
value = spx5_rd(sparx5, DEV2G5_PCS1G_LINK_STATUS(portno));
status->link = DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(value) &&
DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(value);
value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0)); if (value != DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY) { /* The link is or has been down. Clear the sticky bit */
status->link_down = 1;
spx5_inst_wr(0xffffffff, inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
}
status->link = (value == DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY);
status->duplex = DUPLEX_FULL; if (port->conf.portmode == PHY_INTERFACE_MODE_5GBASER)
status->speed = SPEED_5000; elseif (port->conf.portmode == PHY_INTERFACE_MODE_10GBASER)
status->speed = SPEED_10000; else
status->speed = SPEED_25000;
return 0;
}
/* Get link status of 1000Base-X/in-band and SFI ports.
*/ int sparx5_get_port_status(struct sparx5 *sparx5, struct sparx5_port *port, struct sparx5_port_status *status)
{
memset(status, 0, sizeof(*status));
status->speed = port->conf.speed; if (port->conf.power_down) {
status->link = false; return 0;
} switch (port->conf.portmode) { case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_QSGMII: case PHY_INTERFACE_MODE_1000BASEX: case PHY_INTERFACE_MODE_2500BASEX: return sparx5_get_dev2g5_status(sparx5, port, status); case PHY_INTERFACE_MODE_5GBASER: case PHY_INTERFACE_MODE_10GBASER: case PHY_INTERFACE_MODE_25GBASER: return sparx5_get_sfi_status(sparx5, port, status); case PHY_INTERFACE_MODE_NA: return 0; default:
netdev_err(port->ndev, "Status not supported"); return -ENODEV;
} return 0;
}
staticint sparx5_port_error(struct sparx5_port *port, struct sparx5_port_config *conf, enum port_error errtype)
{ switch (errtype) { case SPX5_PERR_SPEED:
netdev_err(port->ndev, "Interface does not support speed: %u: for %s\n",
conf->speed, phy_modes(conf->portmode)); break; case SPX5_PERR_IFTYPE:
netdev_err(port->ndev, "Switch port does not support interface type: %s\n",
phy_modes(conf->portmode)); break; default:
netdev_err(port->ndev, "Interface configuration error\n");
}
spd_prm = spd == SPEED_10 ? 1000 : spd == SPEED_100 ? 100 : 10; /* 6: Wait while the last frame is exiting the queues */
usleep_range(8 * spd_prm, 10 * spd_prm);
/* 7: Flush the queues associated with the port->portno */
spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
HSCH_FLUSH_CTRL_FLUSH_DST_SET(1) |
HSCH_FLUSH_CTRL_FLUSH_SRC_SET(1) |
HSCH_FLUSH_CTRL_FLUSH_ENA_SET(1),
HSCH_FLUSH_CTRL_FLUSH_PORT |
HSCH_FLUSH_CTRL_FLUSH_DST |
HSCH_FLUSH_CTRL_FLUSH_SRC |
HSCH_FLUSH_CTRL_FLUSH_ENA,
sparx5,
HSCH_FLUSH_CTRL);
/* 8: Enable dequeuing from the egress queues */
spx5_rmw(0,
HSCH_PORT_MODE_DEQUEUE_DIS,
sparx5,
HSCH_PORT_MODE(port->portno));
/* 9: Wait until flushing is complete */
err = sparx5_port_flush_poll(sparx5, port->portno); if (err) return err;
/* 10: Reset the MAC clock domain */ if (high_spd_dev) {
spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(1) |
DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(1),
DEV10G_DEV_RST_CTRL_PCS_TX_RST |
DEV10G_DEV_RST_CTRL_MAC_RX_RST |
DEV10G_DEV_RST_CTRL_MAC_TX_RST,
devinst,
DEV10G_DEV_RST_CTRL(0));
/* Set MAC IFG Gaps */
spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(tx_gap) |
DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(hdx_gap_1) |
DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(hdx_gap_2),
sparx5,
DEV2G5_MAC_IFG_CFG(port->portno));
/* Disabling frame aging when in HDX (due to HDX issue) */
spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(fdx == 0),
HSCH_PORT_MODE_AGE_DIS,
sparx5,
HSCH_PORT_MODE(port->portno));
/* Enable MAC module */
spx5_wr(DEV2G5_MAC_ENA_CFG_RX_ENA |
DEV2G5_MAC_ENA_CFG_TX_ENA,
sparx5,
DEV2G5_MAC_ENA_CFG(port->portno));
/* Select speed and take MAC out of reset */
spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd) |
DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(0),
DEV2G5_DEV_RST_CTRL_SPEED_SEL |
DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
sparx5,
DEV2G5_DEV_RST_CTRL(port->portno));
/* Enable PHAD_CTRL for better timestamping */ if (!is_sparx5(sparx5)) { for (int i = 0; i < 2; ++i) { /* Divide the port clock by three for the two * phase detection registers.
*/
spx5_rmw(DEV2G5_PHAD_CTRL_DIV_CFG_SET(3) |
DEV2G5_PHAD_CTRL_PHAD_ENA_SET(1),
DEV2G5_PHAD_CTRL_DIV_CFG |
DEV2G5_PHAD_CTRL_PHAD_ENA,
sparx5, DEV2G5_PHAD_CTRL(port->portno, i));
}
}
return 0;
}
int sparx5_port_pcs_set(struct sparx5 *sparx5, struct sparx5_port *port, struct sparx5_port_config *conf)
{ bool high_speed_dev = sparx5_is_baser(conf->portmode); int err;
if (ops->is_port_rgmii(port->portno)) return 0; /* RGMII device - nothing more to configure */
/* Configure MAC vlan awareness */
err = sparx5_port_max_tags_set(sparx5, port); if (err) return err;
/* Set Max Length */
spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
sparx5,
DEV2G5_MAC_MAXLEN_CFG(port->portno));
int sparx5_port_qos_pcp_rewr_set(conststruct sparx5_port *port, struct sparx5_port_qos_pcp_rewr *qos)
{ int i, mode = SPARX5_PORT_REW_TAG_CTRL_CLASSIFIED; struct sparx5 *sparx5 = port->sparx5;
u8 pcp, dei;
/* Use mapping table, with classified QoS as index, to map QoS and DP * to tagged PCP and DEI, if PCP is trusted. Otherwise use classified * PCP. Classified PCP equals frame PCP.
*/ if (qos->enable)
mode = SPARX5_PORT_REW_TAG_CTRL_MAPPED;
for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) { /* Extract PCP and DEI */
pcp = qos->map.map[i]; if (pcp > SPARX5_PORT_QOS_PCP_COUNT)
dei = 1; else
dei = 0;
/* Rewrite PCP and DEI, for each classified QoS class and DP * level. This table is only used if tag ctrl mode is set to * 'mapped'. * * 0:0nd - prio=0 and dp:0 => pcp=0 and dei=0 * 0:0de - prio=0 and dp:1 => pcp=0 and dei=1
*/ if (dei) {
spx5_rmw(REW_PCP_MAP_DE1_PCP_DE1_SET(pcp),
REW_PCP_MAP_DE1_PCP_DE1, sparx5,
REW_PCP_MAP_DE1(port->portno, i));
/* Enable/disable pcp and dp for qos classification. */
spx5_rmw(ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA_SET(qos->qos_enable) |
ANA_CL_QOS_CFG_PCP_DEI_DP_ENA_SET(qos->dp_enable),
ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA | ANA_CL_QOS_CFG_PCP_DEI_DP_ENA,
sparx5, ANA_CL_QOS_CFG(port->portno));
/* Map each pcp and dei value to priority and dp */ for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
pcp = *(pcp_itr + i);
dp = (i < SPARX5_PORT_QOS_PCP_COUNT) ? 0 : 1;
spx5_rmw(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL_SET(pcp) |
ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL_SET(dp),
ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL |
ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL, sparx5,
ANA_CL_PCP_DEI_MAP_CFG(port->portno, i));
}
/* On egress, rewrite DSCP value to either classified DSCP or frame * DSCP. If enabled; classified DSCP, if disabled; frame DSCP.
*/ if (qos->enable)
rewr = true;
/* On ingress, map each classified QoS class and DP to classified DSCP * value. This mapping table is global for all ports.
*/ for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
dscp = qos->map.map[i];
spx5_rmw(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_SET(dscp),
ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, sparx5,
ANA_CL_QOS_MAP_CFG(i));
}
return 0;
}
int sparx5_port_qos_dscp_set(conststruct sparx5_port *port, struct sparx5_port_qos_dscp *qos)
{ struct sparx5 *sparx5 = port->sparx5;
u8 *dscp = qos->map.map; int i;
/* Enable/disable dscp and dp for qos classification. * Disable rewrite of dscp values for now.
*/
spx5_rmw(ANA_CL_QOS_CFG_DSCP_QOS_ENA_SET(qos->qos_enable) |
ANA_CL_QOS_CFG_DSCP_DP_ENA_SET(qos->dp_enable) |
ANA_CL_QOS_CFG_DSCP_KEEP_ENA_SET(1),
ANA_CL_QOS_CFG_DSCP_QOS_ENA | ANA_CL_QOS_CFG_DSCP_DP_ENA |
ANA_CL_QOS_CFG_DSCP_KEEP_ENA, sparx5,
ANA_CL_QOS_CFG(port->portno));
/* Map each dscp value to priority and dp */ for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
spx5_rmw(ANA_CL_DSCP_CFG_DSCP_QOS_VAL_SET(*(dscp + i)) |
ANA_CL_DSCP_CFG_DSCP_DP_VAL_SET(0),
ANA_CL_DSCP_CFG_DSCP_QOS_VAL |
ANA_CL_DSCP_CFG_DSCP_DP_VAL, sparx5,
ANA_CL_DSCP_CFG(i));
}
/* Set per-dscp trust */ for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) { if (qos->qos_enable) {
spx5_rmw(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_SET(1),
ANA_CL_DSCP_CFG_DSCP_TRUST_ENA, sparx5,
ANA_CL_DSCP_CFG(i));
}
}
/* Set default prio and dp level */
spx5_rmw(ANA_CL_QOS_CFG_DEFAULT_QOS_VAL_SET(qos->default_prio) |
ANA_CL_QOS_CFG_DEFAULT_DP_VAL_SET(0),
ANA_CL_QOS_CFG_DEFAULT_QOS_VAL |
ANA_CL_QOS_CFG_DEFAULT_DP_VAL,
sparx5, ANA_CL_QOS_CFG(port->portno));
/* Set default pcp and dei for untagged frames */
spx5_rmw(ANA_CL_VLAN_CTRL_PORT_PCP_SET(0) |
ANA_CL_VLAN_CTRL_PORT_DEI_SET(0),
ANA_CL_VLAN_CTRL_PORT_PCP |
ANA_CL_VLAN_CTRL_PORT_DEI,
sparx5, ANA_CL_VLAN_CTRL(port->portno));
return 0;
}
int sparx5_get_internal_port(struct sparx5 *sparx5, int port)
{ return sparx5->data->consts->n_ports + port;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.24 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.