/* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation.
*/
staticbool is_wildcard(void *mask, int len); staticbool is_exactmatch(void *mask, int len); /* Return the dst fid of the func for flow forwarding * For PFs: src_fid is the fid of the PF * For VF-reps: src_fid the fid of the VF
*/ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
{ struct bnxt *bp;
/* check if dev belongs to the same switch */ if (!netdev_port_same_parent_id(pf_bp->dev, dev)) {
netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch\n",
dev->ifindex); return BNXT_FID_INVALID;
}
/* Is dev a VF-rep? */ if (bnxt_dev_is_vf_rep(dev)) return bnxt_vf_rep_get_fid(dev);
/* Key & Mask from the stack comes unaligned in multiple iterations of 4 bytes * each(u32). * This routine consolidates such multiple unaligned values into one * field each for Key & Mask (for src and dst macs separately) * For example, * Mask/Key Offset Iteration * ========== ====== ========= * dst mac 0xffffffff 0 1 * dst mac 0x0000ffff 4 2 * * src mac 0xffff0000 4 1 * src mac 0xffffffff 8 2 * * The above combination coming from the stack will be consolidated as * Mask/Key * ============== * src mac: 0xffffffffffff * dst mac: 0xffffffffffff
*/ staticvoid bnxt_set_l2_key_mask(u32 part_key, u32 part_mask,
u8 *actual_key, u8 *actual_mask)
{
u32 key = get_unaligned((u32 *)actual_key);
u32 mask = get_unaligned((u32 *)actual_mask);
if (unlikely(bnxt_eth_addr_key_mask_invalid(eth_addr, eth_addr_mask))) return -EINVAL;
if (!is_wildcard(ð_addr_mask[0], ETH_ALEN)) { if (!is_exactmatch(ð_addr_mask[0], ETH_ALEN)) return -EINVAL; /* FW expects dmac to be in u16 array format */
p = eth_addr; for (j = 0; j < 3; j++)
actions->l2_rewrite_dmac[j] = cpu_to_be16(*(p + j));
}
if (!is_wildcard(ð_addr_mask[ETH_ALEN / 2], ETH_ALEN)) { if (!is_exactmatch(ð_addr_mask[ETH_ALEN / 2], ETH_ALEN)) return -EINVAL; /* FW expects smac to be in u16 array format */
p = ð_addr[ETH_ALEN / 2]; for (j = 0; j < 3; j++)
actions->l2_rewrite_smac[j] = cpu_to_be16(*(p + j));
}
case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE;
actions->nat.l3_is_ipv4 = false; if (offset >= offsetof(struct ipv6hdr, saddr) &&
offset < offset_of_ip6_daddr) { /* 16 byte IPv6 address comes in 4 iterations of * 4byte chunks each
*/
actions->nat.src_xlate = true;
idx = (offset - offset_of_ip6_saddr) / 4; /* First 4bytes will be copied to idx 0 and so on */
actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val);
} elseif (offset >= offset_of_ip6_daddr &&
offset < offset_of_ip6_daddr + 16) {
actions->nat.src_xlate = false;
idx = (offset - offset_of_ip6_daddr) / 4;
actions->nat.l3.ipv6.daddr.s6_addr32[idx] = htonl(val);
} else {
netdev_err(bp->dev, "%s: IPv6_hdr: Invalid pedit field\n",
__func__); return -EINVAL;
} break; case FLOW_ACT_MANGLE_HDR_TYPE_TCP: case FLOW_ACT_MANGLE_HDR_TYPE_UDP: /* HW does not support L4 rewrite alone without L3 * rewrite
*/ if (!(actions->flags & BNXT_TC_ACTION_FLAG_NAT_XLATE)) {
netdev_err(bp->dev, "Need to specify L3 rewrite as well\n"); return -EINVAL;
} if (actions->nat.src_xlate)
actions->nat.l4.ports.sport = htons(val); else
actions->nat.l4.ports.dport = htons(val);
netdev_dbg(bp->dev, "actions->nat.sport = %d dport = %d\n",
actions->nat.l4.ports.sport,
actions->nat.l4.ports.dport); break; default:
netdev_err(bp->dev, "%s: Unsupported pedit hdr type\n",
__func__); return -EINVAL;
} return 0;
}
staticint bnxt_tc_parse_actions(struct bnxt *bp, struct bnxt_tc_actions *actions, struct flow_action *flow_action, struct netlink_ext_ack *extack)
{ /* Used to store the L2 rewrite mask for dmac (6 bytes) followed by * smac (6 bytes) if rewrite of both is specified, otherwise either * dmac or smac
*/
u16 eth_addr_mask[ETH_ALEN] = { 0 }; /* Used to store the L2 rewrite key for dmac (6 bytes) followed by * smac (6 bytes) if rewrite of both is specified, otherwise either * dmac or smac
*/
u16 eth_addr[ETH_ALEN] = { 0 }; struct flow_action_entry *act; int i, rc;
if (!flow_action_has_entries(flow_action)) {
netdev_info(bp->dev, "no actions\n"); return -EINVAL;
}
if (!flow_action_basic_hw_stats_check(flow_action, extack)) return -EOPNOTSUPP;
flow_action_for_each(i, act, flow_action) { switch (act->id) { case FLOW_ACTION_DROP:
actions->flags |= BNXT_TC_ACTION_FLAG_DROP; return 0; /* don't bother with other actions */ case FLOW_ACTION_REDIRECT:
rc = bnxt_tc_parse_redir(bp, actions, act); if (rc) return rc; break; case FLOW_ACTION_VLAN_POP: case FLOW_ACTION_VLAN_PUSH: case FLOW_ACTION_VLAN_MANGLE:
rc = bnxt_tc_parse_vlan(bp, actions, act); if (rc) return rc; break; case FLOW_ACTION_TUNNEL_ENCAP:
rc = bnxt_tc_parse_tunnel_set(bp, actions, act); if (rc) return rc; break; case FLOW_ACTION_TUNNEL_DECAP:
actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP; break; /* Packet edit: L2 rewrite, NAT, NAPT */ case FLOW_ACTION_MANGLE:
rc = bnxt_tc_parse_pedit(bp, actions, act, i,
(u8 *)eth_addr,
(u8 *)eth_addr_mask); if (rc) return rc; break; default: break;
}
}
if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) {
rc = bnxt_fill_l2_rewrite_fields(actions, eth_addr,
eth_addr_mask); if (rc) return rc;
}
if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) { if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) { /* dst_fid is PF's fid */
actions->dst_fid = bp->pf.fw_fid;
} else { /* find the FID from dst_dev */
actions->dst_fid =
bnxt_flow_get_dst_fid(bp, actions->dst_dev); if (actions->dst_fid == BNXT_FID_INVALID) return -EINVAL;
}
}
staticint ipv6_mask_len(struct in6_addr *mask)
{ int mask_len = 0, i;
for (i = 0; i < 4; i++)
mask_len += inet_mask_len(mask->s6_addr32[i]);
return mask_len;
}
staticbool is_wildcard(void *mask, int len)
{ const u8 *p = mask; int i;
for (i = 0; i < len; i++) { if (p[i] != 0) returnfalse;
} returntrue;
}
staticbool is_exactmatch(void *mask, int len)
{ const u8 *p = mask; int i;
for (i = 0; i < len; i++) if (p[i] != 0xff) returnfalse;
returntrue;
}
staticbool is_vlan_tci_allowed(__be16 vlan_tci_mask,
__be16 vlan_tci)
{ /* VLAN priority must be either exactly zero or fully wildcarded and * VLAN id must be exact match.
*/ if (is_vid_exactmatch(vlan_tci_mask) &&
((is_vlan_pcp_exactmatch(vlan_tci_mask) &&
is_vlan_pcp_zero(vlan_tci)) ||
is_vlan_pcp_wildcarded(vlan_tci_mask))) returntrue;
returnfalse;
}
staticbool bits_set(void *key, int len)
{ const u8 *p = key; int i;
for (i = 0; i < len; i++) if (p[i] != 0) returntrue;
if (flow->l2_key.num_vlans > 0) {
flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE; /* FW expects the inner_vlan_tci value to be set * in outer_vlan_tci when num_vlans is 1 (which is * always the case in TC.)
*/
req->outer_vlan_tci = flow->l2_key.inner_vlan_tci;
}
/* If all IP and L4 fields are wildcarded then this is an L2 flow */ if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
} else {
flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ?
CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 :
CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6;
if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) {
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID; /* tunnel_id is wrongly defined in hsi defn. as __le32 */
req->tunnel_id = tunnel_id_to_key32(tun_key->tun_id);
}
/* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16.
*/
req->l2_ctxt_ref_id = (__force __le16)ref_decap_handle;
req->enables = cpu_to_le32(enables);
/* Get the ref_flow_handle for a flow by checking if there are any other * flows that share the same L2 key as this flow.
*/ staticint
bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, struct bnxt_tc_flow_node *flow_node,
__le16 *ref_flow_handle)
{ struct bnxt_tc_info *tc_info = bp->tc_info; struct bnxt_tc_flow_node *ref_flow_node; struct bnxt_tc_l2_node *l2_node;
l2_node = bnxt_tc_get_l2_node(bp, &tc_info->l2_table,
tc_info->l2_ht_params,
&flow->l2_key); if (!l2_node) return -1;
/* If any other flow is using this l2_node, use it's flow_handle * as the ref_flow_handle
*/ if (l2_node->refcount > 0) {
ref_flow_node = list_first_entry(&l2_node->common_l2_flows, struct bnxt_tc_flow_node,
l2_list_node);
*ref_flow_handle = ref_flow_node->flow_handle;
} else {
*ref_flow_handle = cpu_to_le16(0xffff);
}
/* Insert the l2_node into the flow_node so that subsequent flows * with a matching l2 key can use the flow_handle of this flow * as their ref_flow_handle
*/
flow_node->l2_node = l2_node;
list_add(&flow_node->l2_list_node, &l2_node->common_l2_flows);
l2_node->refcount++; return 0;
}
/* After the flow parsing is done, this routine is used for checking * if there are any aspects of the flow that prevent it from being * offloaded.
*/ staticbool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
{ /* If L4 ports are specified then ip_proto must be TCP or UDP */ if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) &&
(flow->l4_key.ip_proto != IPPROTO_TCP &&
flow->l4_key.ip_proto != IPPROTO_UDP)) {
netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports\n",
flow->l4_key.ip_proto); returnfalse;
}
/* Currently source/dest MAC cannot be partial wildcard */ if (bits_set(&flow->l2_key.smac, sizeof(flow->l2_key.smac)) &&
!is_exactmatch(flow->l2_mask.smac, sizeof(flow->l2_mask.smac))) {
netdev_info(bp->dev, "Wildcard match unsupported for Source MAC\n"); returnfalse;
} if (bits_set(&flow->l2_key.dmac, sizeof(flow->l2_key.dmac)) &&
!is_exactmatch(&flow->l2_mask.dmac, sizeof(flow->l2_mask.dmac))) {
netdev_info(bp->dev, "Wildcard match unsupported for Dest MAC\n"); returnfalse;
}
/* Currently VLAN fields cannot be partial wildcard */ if (bits_set(&flow->l2_key.inner_vlan_tci, sizeof(flow->l2_key.inner_vlan_tci)) &&
!is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci,
flow->l2_key.inner_vlan_tci)) {
netdev_info(bp->dev, "Unsupported VLAN TCI\n"); returnfalse;
} if (bits_set(&flow->l2_key.inner_vlan_tpid, sizeof(flow->l2_key.inner_vlan_tpid)) &&
!is_exactmatch(&flow->l2_mask.inner_vlan_tpid, sizeof(flow->l2_mask.inner_vlan_tpid))) {
netdev_info(bp->dev, "Wildcard match unsupported for VLAN TPID\n"); returnfalse;
}
/* Currently Ethertype must be set */ if (!is_exactmatch(&flow->l2_mask.ether_type, sizeof(flow->l2_mask.ether_type))) {
netdev_info(bp->dev, "Wildcard match unsupported for Ethertype\n"); returnfalse;
}
returntrue;
}
/* Returns the final refcount of the node on success * or a -ve error code on failure
*/ staticint bnxt_tc_put_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table, struct rhashtable_params *ht_params, struct bnxt_tc_tunnel_node *tunnel_node)
{ int rc;
decap_l2_node = bnxt_tc_get_l2_node(bp, &tc_info->decap_l2_table,
tc_info->decap_l2_ht_params,
l2_key); if (!decap_l2_node) return -1;
/* If any other flow is using this decap_l2_node, use it's decap_handle * as the ref_decap_handle
*/ if (decap_l2_node->refcount > 0) {
ref_flow_node =
list_first_entry(&decap_l2_node->common_l2_flows, struct bnxt_tc_flow_node,
decap_l2_list_node);
*ref_decap_handle = ref_flow_node->decap_node->tunnel_handle;
} else {
*ref_decap_handle = INVALID_TUNNEL_HANDLE;
}
/* Insert the l2_node into the flow_node so that subsequent flows * with a matching decap l2 key can use the decap_filter_handle of * this flow as their ref_decap_handle
*/
flow_node->decap_l2_node = decap_l2_node;
list_add(&flow_node->decap_l2_list_node,
&decap_l2_node->common_l2_flows);
decap_l2_node->refcount++; return 0;
}
rt = ip_route_output_key(dev_net(real_dst_dev), &flow); if (IS_ERR(rt)) {
netdev_info(bp->dev, "no route to %pI4b\n", &flow.daddr); return -EOPNOTSUPP;
}
/* The route must either point to the real_dst_dev or a dst_dev that * uses the real_dst_dev.
*/
dst_dev = rt->dst.dev; if (is_vlan_dev(dst_dev)) { #if IS_ENABLED(CONFIG_VLAN_8021Q) struct vlan_dev_priv *vlan = vlan_dev_priv(dst_dev);
if (vlan->real_dev != real_dst_dev) {
netdev_info(bp->dev, "dst_dev(%s) doesn't use PF-if(%s)\n",
netdev_name(dst_dev),
netdev_name(real_dst_dev));
rc = -EOPNOTSUPP; goto put_rt;
}
l2_info->inner_vlan_tci = htons(vlan->vlan_id);
l2_info->inner_vlan_tpid = vlan->vlan_proto;
l2_info->num_vlans = 1; #endif
} elseif (dst_dev != real_dst_dev) {
netdev_info(bp->dev, "dst_dev(%s) for %pI4b is not PF-if(%s)\n",
netdev_name(dst_dev), &flow.daddr,
netdev_name(real_dst_dev));
rc = -EOPNOTSUPP; goto put_rt;
}
nbr = dst_neigh_lookup(&rt->dst, &flow.daddr); if (!nbr) {
netdev_info(bp->dev, "can't lookup neighbor for %pI4b\n",
&flow.daddr);
rc = -EOPNOTSUPP; goto put_rt;
}
/* Check if there's another flow using the same tunnel decap. * If not, add this tunnel to the table and resolve the other * tunnel header fields. Ignore src_port in the tunnel_key, * since it is not required for decap filters.
*/
decap_key->tp_src = 0;
decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table,
&tc_info->decap_ht_params,
decap_key); if (!decap_node) return -ENOMEM;
flow_node->decap_node = decap_node;
if (decap_node->tunnel_handle != INVALID_TUNNEL_HANDLE) goto done;
/* Resolve the L2 fields for tunnel decap * Resolve the route for remote vtep (saddr) of the decap key * Find it's next-hop mac addrs
*/
tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
tun_key.tp_dst = flow->tun_key.tp_dst;
rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info); if (rc) goto put_decap;
/* For getting a decap_filter_handle we first need to check if * there are any other decap flows that share the same tunnel L2 * key and if so, pass that flow's decap_filter_handle as the * ref_decap_handle for this flow.
*/
rc = bnxt_tc_get_ref_decap_handle(bp, flow, decap_l2_info, flow_node,
&ref_decap_handle); if (rc) goto put_decap;
/* Issue the hwrm cmd to allocate a decap filter handle */
rc = hwrm_cfa_decap_filter_alloc(bp, flow, decap_l2_info,
ref_decap_handle,
&decap_node->tunnel_handle); if (rc) goto put_decap_l2;
/* Lookup the tunnel encap table and check if there's an encap_handle * alloc'd already. * If not, query L2 info via a route lookup and issue an encap_record_alloc * cmd to FW.
*/ staticint bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, struct bnxt_tc_flow_node *flow_node,
__le32 *encap_handle)
{ struct ip_tunnel_key *encap_key = &flow->actions.tun_encap_key; struct bnxt_tc_info *tc_info = bp->tc_info; struct bnxt_tc_tunnel_node *encap_node; int rc;
/* Check if there's another flow using the same tunnel encap. * If not, add this tunnel to the table and resolve the other * tunnel header fields
*/
encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table,
&tc_info->encap_ht_params,
encap_key); if (!encap_node) return -ENOMEM;
flow_node->encap_node = encap_node;
if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE) goto done;
rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info); if (rc) goto put_encap;
/* Allocate a new tunnel encap record */
rc = hwrm_cfa_encap_record_alloc(bp, encap_key, &encap_node->l2_info,
&encap_node->tunnel_handle); if (rc) goto put_encap;
/* Add a new flow or replace an existing flow. * Notes on locking: * There are essentially two critical sections here. * 1. while adding a new flow * a) lookup l2-key * b) issue HWRM cmd and get flow_handle * c) link l2-key with flow * 2. while deleting a flow * a) unlinking l2-key from flow * A lock is needed to protect these two critical sections. * * The hash-tables are already protected by the rhashtable API.
*/ staticint bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, struct flow_cls_offload *tc_flow_cmd)
{ struct bnxt_tc_flow_node *new_node, *old_node; struct bnxt_tc_info *tc_info = bp->tc_info; struct bnxt_tc_flow *flow;
__le32 tunnel_handle = 0;
__le16 ref_flow_handle; int rc;
/* allocate memory for the new flow and it's node */
new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); if (!new_node) {
rc = -ENOMEM; goto done;
}
new_node->cookie = tc_flow_cmd->cookie;
flow = &new_node->flow;
rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow); if (rc) goto free_node;
/* If a flow exists with the same cookie, delete it */
old_node = rhashtable_lookup_fast(&tc_info->flow_table,
&tc_flow_cmd->cookie,
tc_info->flow_ht_params); if (old_node)
__bnxt_tc_del_flow(bp, old_node);
/* Check if the L2 part of the flow has been offloaded already. * If so, bump up it's refcnt and get it's reference handle.
*/
mutex_lock(&tc_info->lock);
rc = bnxt_tc_get_ref_flow_handle(bp, flow, new_node, &ref_flow_handle); if (rc) goto unlock;
/* If the flow involves tunnel encap/decap, get tunnel_handle */
rc = bnxt_tc_get_tunnel_handle(bp, flow, new_node, &tunnel_handle); if (rc) goto put_l2;
/* send HWRM cmd to alloc the flow */
rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
tunnel_handle, new_node); if (rc) goto put_tunnel;
flow->lastused = jiffies;
spin_lock_init(&flow->stats_lock); /* add new flow to flow-table */
rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node,
tc_info->flow_ht_params); if (rc) goto hwrm_flow_free;
if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) {
*flow_id = flow_node->flow_id;
/* If flow_id is used to fetch flow stats then: * 1. lower 12 bits of flow_handle must be set to all 1s. * 2. 15th bit of flow_handle must specify the flow * direction (TX/RX).
*/ if (flow_node->flow.l2_key.dir == BNXT_DIR_RX)
handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX |
CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK; else
handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
for (i = 0; i < num_flows; i++) {
stats_batch[i].hw_stats.packets =
le64_to_cpu(resp_packets[i]);
stats_batch[i].hw_stats.bytes =
le64_to_cpu(resp_bytes[i]);
}
}
hwrm_req_drop(bp, req); exit: if (rc)
netdev_info(bp->dev, "error rc=%d\n", rc);
return rc;
}
/* Add val to accum while handling a possible wraparound * of val. Eventhough val is of type u64, its actual width * is denoted by mask and will wrap-around beyond that width.
*/ staticvoid accumulate_val(u64 *accum, u64 val, u64 mask)
{ #define low_bits(x, mask) ((x) & (mask)) #define high_bits(x, mask) ((x) & ~(mask)) bool wrapped = val < low_bits(*accum, mask);
/* The HW counters' width is much less than 64bits. * Handle possible wrap-around while updating the stat counters
*/ staticvoid bnxt_flow_stats_accum(struct bnxt_tc_info *tc_info, struct bnxt_tc_flow_stats *acc_stats, struct bnxt_tc_flow_stats *hw_stats)
{
accumulate_val(&acc_stats->bytes, hw_stats->bytes, tc_info->bytes_mask);
accumulate_val(&acc_stats->packets, hw_stats->packets,
tc_info->packets_mask);
}
staticint
bnxt_tc_flow_stats_batch_update(struct bnxt *bp, int num_flows, struct bnxt_tc_stats_batch stats_batch[])
{ struct bnxt_tc_info *tc_info = bp->tc_info; int rc, i;
rc = bnxt_hwrm_cfa_flow_stats_get(bp, num_flows, stats_batch); if (rc) return rc;
for (i = 0; i < num_flows; i++) { struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node; struct bnxt_tc_flow *flow = &flow_node->flow;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.