mqprio = &p_mqprio->mqprio_hw; /* takes care of no link case as well */ if (p_mqprio->max_rate_total > port->qos.link_speed)
shaper_susp = true;
am65_cpsw_tx_pn_shaper_reset(port);
enable = p_mqprio->shaper_en && !shaper_susp; if (!enable) return;
/* Rate limit is specified per Traffic Class but * for CPSW, rate limit can be applied per priority * at port FIFO. * * We have assigned the same priority (TCn) to all queues * of a Traffic Class so they share the same shaper * bandwidth.
*/ for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
prio = tc;
ret = pm_runtime_get_sync(common->dev); if (ret < 0) {
pm_runtime_put_noidle(common->dev); return ret;
}
if (!num_tc) {
am65_cpsw_reset_tc_mqprio(ndev);
ret = 0; goto exit_put;
}
ret = am65_cpsw_mqprio_verify_shaper(port, mqprio); if (ret) goto exit_put;
netdev_set_num_tc(ndev, num_tc);
/* Multiple Linux priorities can map to a Traffic Class * A Traffic Class can have multiple contiguous Queues, * Queues get mapped to Channels (thread_id), * if not VLAN tagged, thread_id is used as packet_priority * if VLAN tagged. VLAN priority is used as packet_priority * packet_priority gets mapped to header_priority in p0_rx_pri_map, * header_priority gets mapped to switch_priority in pn_tx_pri_map. * As p0_rx_pri_map is left at defaults (0x76543210), we can * assume that Queue_n gets mapped to header_priority_n. We can then * set the switch priority in pn_tx_pri_map.
*/
for (tc = 0; tc < num_tc; tc++) {
prio = tc;
/* For simplicity we assign the same priority (TCn) to * all queues of a Traffic Class.
*/ for (i = qopt->offset[tc]; i < qopt->offset[tc] + qopt->count[tc]; i++)
tx_prio_map |= prio << (4 * i);
/* The number of wireside clocks contained in the verify * timeout counter. The default is 0x1312d0 * (10ms at 125Mhz in 1G mode). * The frequency of the clock depends on the link speed * and the PHY interface.
*/ switch (port->slave.phy_if) { case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: if (port->qos.link_speed == SPEED_1000)
val = 125 * HZ_PER_MHZ; /* 125 MHz at 1000Mbps*/ elseif (port->qos.link_speed == SPEED_100)
val = 25 * HZ_PER_MHZ; /* 25 MHz at 100Mbps*/ else
val = (25 * HZ_PER_MHZ) / 10; /* 2.5 MHz at 10Mbps*/ break;
case PHY_INTERFACE_MODE_QSGMII: case PHY_INTERFACE_MODE_SGMII:
val = 125 * HZ_PER_MHZ; /* 125 MHz */ break;
default:
netdev_err(port->ndev, "selected mode does not supported IET\n"); return -EOPNOTSUPP;
}
val /= MILLIHZ_PER_HZ; /* count per ms timeout */
val *= verify_time_ms; /* count for timeout ms */
if (val > AM65_CPSW_PN_MAC_VERIFY_CNT_MASK) return -EINVAL;
val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
val &= ~AM65_CPSW_PN_IET_MAC_PREMPT_MASK;
val |= AM65_CPSW_PN_IET_MAC_SET_PREEMPT(preemptible_tcs);
writel(val, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
}
/* enable common IET_ENABLE only if at least 1 port has rx IET enabled. * UAPI doesn't allow tx enable without rx enable.
*/ void am65_cpsw_iet_common_enable(struct am65_cpsw_common *common)
{ struct am65_cpsw_port *port; bool rx_enable = false;
u32 val; int i;
for (i = 0; i < common->port_num; i++) {
port = &common->ports[i];
val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
rx_enable = !!(val & AM65_CPSW_PN_CTL_IET_PORT_EN); if (rx_enable) break;
}
val = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
if (rx_enable)
val |= AM65_CPSW_CTL_IET_EN; else
val &= ~AM65_CPSW_CTL_IET_EN;
/* CPSW does not have an IRQ to notify changes to the MAC Merge TX status * (active/inactive), but the preemptible traffic classes should only be * committed to hardware once TX is active. Resort to polling.
*/ void am65_cpsw_iet_commit_preemptible_tcs(struct am65_cpsw_port *port)
{
u8 preemptible_tcs; int err;
u32 val;
if (port->qos.link_speed == SPEED_UNKNOWN) return;
val = readl(port->port_base + AM65_CPSW_PN_REG_CTL); if (!(val & AM65_CPSW_PN_CTL_IET_PORT_EN)) return;
/* update common IET enable */
am65_cpsw_iet_common_enable(port->common);
/* update verify count */
err = am65_cpsw_iet_set_verify_timeout_count(port); if (err) {
netdev_err(port->ndev, "couldn't set verify count: %d\n", err); return;
}
val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL); if (!(val & AM65_CPSW_PN_IET_MAC_DISABLEVERIFY)) {
err = am65_cpsw_iet_verify_wait(port); if (err) return;
}
/* am65_cpsw_port_est_is_swapped() - Indicate if h/w is transitioned * admin -> oper or not * * Return true if already transitioned. i.e oper is equal to admin and buf * numbers match (est_oper->buf match with est_admin->buf). * false if before transition. i.e oper is not equal to admin, (i.e a * previous admin command is waiting to be transitioned to oper state * and est_oper->buf not match with est_oper->buf).
*/ staticint am65_cpsw_port_est_is_swapped(struct net_device *ndev, int *oper, int *admin)
{ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
u32 val;
val = readl(port->port_base + AM65_CPSW_PN_REG_FIFO_STATUS);
*oper = !!(val & AM65_CPSW_PN_FST_EST_BUFACT);
val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
*admin = !!(val & AM65_CPSW_PN_EST_BUFSEL);
return *admin == *oper;
}
/* am65_cpsw_port_est_get_free_buf_num() - Get free buffer number for * Admin to program the new schedule. * * Logic as follows:- * If oper is same as admin, return the other buffer (!oper) as the admin * buffer. If oper is not the same, driver let the current oper to continue * as it is in the process of transitioning from admin -> oper. So keep the * oper by selecting the same oper buffer by writing to EST_BUFSEL bit in * EST CTL register. In the second iteration they will match and code returns. * The actual buffer to write command is selected later before it is ready * to update the schedule.
*/ staticint am65_cpsw_port_est_get_free_buf_num(struct net_device *ndev)
{ int oper, admin; int roll = 2;
while (roll--) { if (am65_cpsw_port_est_is_swapped(ndev, &oper, &admin)) return !oper;
/* admin is not set, so hinder transition as it's not allowed * to touch memory in-flight, by targeting same oper buf.
*/
am65_cpsw_port_est_assign_buf_num(ndev, oper);
dev_info(&ndev->dev, "Prev. EST admin cycle is in transit %d -> %d\n",
oper, admin);
}
/* rolled buf num means changed buf while configuring */ if (port->qos.est_oper && port->qos.est_admin &&
est_new->buf == port->qos.est_oper->buf)
am65_cpsw_admin_to_oper(ndev);
}
staticvoid am65_cpsw_est_set(struct net_device *ndev, int enable)
{ struct am65_cpsw_port *port = am65_ndev_to_port(ndev); struct am65_cpsw_common *common = port->common; int common_enable = 0; int i;
am65_cpsw_port_est_enable(port, enable);
for (i = 0; i < common->port_num; i++)
common_enable |= am65_cpsw_port_est_enabled(&common->ports[i]);
/* This update is supposed to be used in any routine before getting real state * of admin -> oper transition, particularly it's supposed to be used in some * generic routine for providing real state to Taprio Qdisc.
*/ staticvoid am65_cpsw_est_update_state(struct net_device *ndev)
{ struct am65_cpsw_port *port = am65_ndev_to_port(ndev); int oper, admin;
if (!port->qos.est_admin) return;
if (!am65_cpsw_port_est_is_swapped(ndev, &oper, &admin)) return;
am65_cpsw_admin_to_oper(ndev);
}
/* Fetch command count it's number of bytes in Gigabit mode or nibbles in * 10/100Mb mode. So, having speed and time in ns, recalculate ns to number of * bytes/nibbles that can be sent while transmission on given speed.
*/ staticint am65_est_cmd_ns_to_cnt(u64 ns, int link_speed)
{
u64 temp;
writel(cmd, addr);
addr += 4;
} while (fetch_cnt);
return addr;
}
staticint am65_cpsw_est_calc_cmd_num(struct net_device *ndev, struct tc_taprio_qopt_offload *taprio, int link_speed)
{ int i, cmd_cnt, cmd_sum = 0;
u32 fetch_cnt;
for (i = 0; i < taprio->num_entries; i++) { if (taprio->entries[i].command != TC_TAPRIO_CMD_SET_GATES) {
dev_err(&ndev->dev, "Only SET command is supported"); return -EINVAL;
}
if (!fetch_cnt && i < est_new->taprio.num_entries - 1) {
dev_info(&ndev->dev, "next scheds after %d have no impact", i + 1); break;
}
all_fetch_allow |= fetch_allow;
}
/* end cmd, enabling non-timed queues for potential over cycle time */ if (ram_addr < max_ram_addr)
writel(~all_fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK, ram_addr);
}
if (taprio_new->cycle_time != taprio_oper->cycle_time) return TACT_NEED_STOP;
/* in order to avoid timer reset get base_time form oper taprio */ if (!taprio_new->base_time && taprio_oper)
taprio_new->base_time = taprio_oper->base_time;
if (taprio_new->base_time == taprio_oper->base_time) return TACT_SKIP_PROG;
/* base times are cycle synchronized */
diff = taprio_new->base_time - taprio_oper->base_time;
diff = diff < 0 ? -diff : diff; if (diff % taprio_new->cycle_time) return TACT_NEED_STOP;
/* If the base-time is in the past, start schedule from the time: * base_time + (N*cycle_time) * where N is the smallest possible integer such that the above * time is in the future.
*/
cur_time = am65_cpts_ns_gettime(cpts); if (est_new->taprio.base_time < cur_time) {
n = div64_u64(cur_time - est_new->taprio.base_time, est_new->taprio.cycle_time);
est_new->taprio.base_time += (n + 1) * est_new->taprio.cycle_time;
}
am65_cpsw_est_set(ndev, 1);
if (tact == TACT_PROG) {
ret = am65_cpsw_timer_set(ndev, est_new); if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to set cycle time"); goto fail;
}
}
if (port->qos.link_down_time) {
cur_time = ktime_get();
delta = ktime_us_delta(cur_time, port->qos.link_down_time); if (delta > USEC_PER_SEC) {
dev_err(&ndev->dev, "Link has been lost too long, stopping TAS"); goto purge_est;
}
}
if (flow_rule_match_has_control_flags(rule, extack)) return -EOPNOTSUPP;
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address"); return -EOPNOTSUPP;
}
flow_rule_match_eth_addrs(rule, &match);
if (!is_zero_ether_addr(match.mask->src)) {
NL_SET_ERR_MSG_MOD(extack, "Matching on source MAC not supported"); return -EOPNOTSUPP;
}
if (is_broadcast_ether_addr(match.key->dst) &&
is_broadcast_ether_addr(match.mask->dst)) {
ret = cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, rate_pkt_ps); if (ret) return ret;
staticint am65_cpsw_qos_clsflower_policer_validate(conststruct flow_action *action, conststruct flow_action_entry *act, struct netlink_ext_ack *extack)
{ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
NL_SET_ERR_MSG_MOD(extack, "Offload not supported when exceed action is not drop"); return -EOPNOTSUPP;
}
if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform action is not pipe or ok"); return -EOPNOTSUPP;
}
if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
!flow_action_is_last_entry(action, act)) {
NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform action is ok, but action is not last"); return -EOPNOTSUPP;
}
if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps ||
act->police.avrate || act->police.overhead) {
NL_SET_ERR_MSG_MOD(extack, "Offload not supported when bytes per second/peakrate/avrate/overhead is configured"); return -EOPNOTSUPP;
}
if (ch_msk) {
dev_err(common->dev, "TX rate limiting has to be enabled sequentially hi->lo tx_rate_msk:%x tx_rate_msk_new:%x\n",
common->tx_ch_rate_msk, tx_ch_rate_msk_new);
ret = -EINVAL; goto exit_put;
}
if (!port->qos.link_down_time)
port->qos.link_down_time = ktime_get();
}
Messung V0.5
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.5Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.