/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. * All rights reserved. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
struct vlan_res { struct list_head list;
u16 vlan; int ref_count; int vlan_index;
u8 port;
};
struct res_common { struct list_head list; struct rb_node node;
u64 res_id; int owner; int state; int from_state; int to_state; int removing; constchar *func_name;
};
/* For Debug uses */ staticconstchar *resource_str(enum mlx4_resource rt)
{ switch (rt) { case RES_QP: return"RES_QP"; case RES_CQ: return"RES_CQ"; case RES_SRQ: return"RES_SRQ"; case RES_MPT: return"RES_MPT"; case RES_MTT: return"RES_MTT"; case RES_MAC: return"RES_MAC"; case RES_VLAN: return"RES_VLAN"; case RES_EQ: return"RES_EQ"; case RES_COUNTER: return"RES_COUNTER"; case RES_FS_RULE: return"RES_FS_RULE"; case RES_XRCD: return"RES_XRCD"; default: return"Unknown resource type !!!";
}
}
staticvoid rem_slave_vlans(struct mlx4_dev *dev, int slave); staticinlineint mlx4_grant_resource(struct mlx4_dev *dev, int slave, enum mlx4_resource res_type, int count, int port)
{ struct mlx4_priv *priv = mlx4_priv(dev); struct resource_allocator *res_alloc =
&priv->mfunc.master.res_tracker.res_alloc[res_type]; int err = -EDQUOT; int allocated, free, reserved, guaranteed, from_free; int from_rsvd;
if (slave > dev->persist->num_vfs) return -EINVAL;
if (allocated - count >= guaranteed) {
from_rsvd = 0;
} else { /* portion may need to be returned to reserved area */ if (allocated - guaranteed > 0)
from_rsvd = count - (allocated - guaranteed); else
from_rsvd = count;
}
staticint
mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev, struct resource_allocator *res_alloc, int vf)
{ struct mlx4_active_ports actv_ports; int ports, counters_guaranteed;
/* For master, only allocate according to the number of phys ports */ if (vf == mlx4_master_func_num(dev)) return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports;
/* calculate real number of ports for the VF */
actv_ports = mlx4_get_active_ports(dev, vf);
ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT;
/* If we do not have enough counters for this VF, do not * allocate any for it. '-1' to reduce the sink counter.
*/ if ((res_alloc->res_reserved + counters_guaranteed) >
(dev->caps.max_counters - 1)) return 0;
return counters_guaranteed;
}
int mlx4_init_resource_tracker(struct mlx4_dev *dev)
{ struct mlx4_priv *priv = mlx4_priv(dev); int i, j; int t;
priv->mfunc.master.res_tracker.slave_list =
kcalloc(dev->num_slaves, sizeof(struct slave_list),
GFP_KERNEL); if (!priv->mfunc.master.res_tracker.slave_list) return -ENOMEM;
for (i = 0 ; i < dev->num_slaves; i++) { for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
slave_list[i].res_list[t]);
mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
}
mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
dev->num_slaves); for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { struct resource_allocator *res_alloc =
&priv->mfunc.master.res_tracker.res_alloc[i];
res_alloc->quota = kmalloc_array(dev->persist->num_vfs + 1, sizeof(int),
GFP_KERNEL);
res_alloc->guaranteed = kmalloc_array(dev->persist->num_vfs + 1, sizeof(int),
GFP_KERNEL); if (i == RES_MAC || i == RES_VLAN)
res_alloc->allocated =
kcalloc(MLX4_MAX_PORTS *
(dev->persist->num_vfs + 1), sizeof(int), GFP_KERNEL); else
res_alloc->allocated =
kcalloc(dev->persist->num_vfs + 1, sizeof(int), GFP_KERNEL); /* Reduce the sink counter */ if (i == RES_COUNTER)
res_alloc->res_free = dev->caps.max_counters - 1;
if (!res_alloc->quota || !res_alloc->guaranteed ||
!res_alloc->allocated) goto no_mem_err;
spin_lock_init(&res_alloc->alloc_lock); for (t = 0; t < dev->persist->num_vfs + 1; t++) { struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, t); switch (i) { case RES_QP:
initialize_res_quotas(dev, res_alloc, RES_QP,
t, dev->caps.num_qps -
dev->caps.reserved_qps -
mlx4_num_reserved_sqps(dev)); break; case RES_CQ:
initialize_res_quotas(dev, res_alloc, RES_CQ,
t, dev->caps.num_cqs -
dev->caps.reserved_cqs); break; case RES_SRQ:
initialize_res_quotas(dev, res_alloc, RES_SRQ,
t, dev->caps.num_srqs -
dev->caps.reserved_srqs); break; case RES_MPT:
initialize_res_quotas(dev, res_alloc, RES_MPT,
t, dev->caps.num_mpts -
dev->caps.reserved_mrws); break; case RES_MTT:
initialize_res_quotas(dev, res_alloc, RES_MTT,
t, dev->caps.num_mtts -
dev->caps.reserved_mtts); break; case RES_MAC: if (t == mlx4_master_func_num(dev)) { int max_vfs_pport = 0; /* Calculate the max vfs per port for */ /* both ports. */ for (j = 0; j < dev->caps.num_ports;
j++) { struct mlx4_slaves_pport slaves_pport =
mlx4_phys_to_slaves_pport(dev, j + 1); unsigned current_slaves =
bitmap_weight(slaves_pport.slaves,
dev->caps.num_ports) - 1; if (max_vfs_pport < current_slaves)
max_vfs_pport =
current_slaves;
}
res_alloc->quota[t] =
MLX4_MAX_MAC_NUM -
2 * max_vfs_pport;
res_alloc->guaranteed[t] = 2; for (j = 0; j < MLX4_MAX_PORTS; j++)
res_alloc->res_port_free[j] =
MLX4_MAX_MAC_NUM;
} else {
res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
res_alloc->guaranteed[t] = 2;
} break; case RES_VLAN: if (t == mlx4_master_func_num(dev)) {
res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2; for (j = 0; j < MLX4_MAX_PORTS; j++)
res_alloc->res_port_free[j] =
res_alloc->quota[t];
} else {
res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
res_alloc->guaranteed[t] = 0;
} break; case RES_COUNTER:
res_alloc->quota[t] = dev->caps.max_counters;
res_alloc->guaranteed[t] =
mlx4_calc_res_counter_guaranteed(dev, res_alloc, t); break; default: break;
} if (i == RES_MAC || i == RES_VLAN) { for (j = 0; j < dev->caps.num_ports; j++) if (test_bit(j, actv_ports.ports))
res_alloc->res_port_rsvd[j] +=
res_alloc->guaranteed[t];
} else {
res_alloc->res_reserved += res_alloc->guaranteed[t];
}
}
}
spin_lock_init(&priv->mfunc.master.res_tracker.lock); return 0;
no_mem_err: for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
} return -ENOMEM;
}
err = handle_counter(dev, qpc, slave, port); if (err) goto out;
if (MLX4_VGT != vp_oper->state.default_vlan) { /* the reserved QPs (special, proxy, tunnel) * do not operate over vlans
*/ if (mlx4_is_qp_reserved(dev, qpn)) return 0;
/* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */ if (qp_type == MLX4_QP_ST_UD ||
(qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) { if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
*(__be32 *)inbox->buf =
cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
MLX4_QP_OPTPAR_VLAN_STRIPPING);
qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
} else { struct mlx4_update_qp_params params = {.flags = 0};
staticconstchar *mlx4_resource_type_to_str(enum mlx4_resource t)
{ switch (t) { case RES_QP: return"QP"; case RES_CQ: return"CQ"; case RES_SRQ: return"SRQ"; case RES_XRCD: return"XRCD"; case RES_MPT: return"MPT"; case RES_MTT: return"MTT"; case RES_MAC: return"MAC"; case RES_VLAN: return"VLAN"; case RES_COUNTER: return"COUNTER"; case RES_FS_RULE: return"FS_RULE"; case RES_EQ: return"EQ"; default: return"INVALID RESOURCE";
}
}
spin_lock_irq(mlx4_tlock(dev));
r = find_res(dev, res_id, type); if (!r) {
err = -ENONET; gotoexit;
}
if (r->state == RES_ANY_BUSY) {
mlx4_warn(dev, "%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n",
func_name, slave, res_id, mlx4_resource_type_to_str(type),
r->func_name);
err = -EBUSY; gotoexit;
}
if (r->owner != slave) {
err = -EPERM; gotoexit;
}
staticstruct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave, int extra)
{ struct res_common *ret;
switch (type) { case RES_QP:
ret = alloc_qp_tr(id); break; case RES_MPT:
ret = alloc_mpt_tr(id, extra); break; case RES_MTT:
ret = alloc_mtt_tr(id, extra); break; case RES_EQ:
ret = alloc_eq_tr(id); break; case RES_CQ:
ret = alloc_cq_tr(id); break; case RES_SRQ:
ret = alloc_srq_tr(id); break; case RES_MAC:
pr_err("implementation missing\n"); return NULL; case RES_COUNTER:
ret = alloc_counter_tr(id, extra); break; case RES_XRCD:
ret = alloc_xrcdn_tr(id); break; case RES_FS_RULE:
ret = alloc_fs_rule_tr(id, extra); break; default: return NULL;
} if (ret)
ret->owner = slave;
return ret;
}
int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port, struct mlx4_counter *data)
{ struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct res_common *tmp; struct res_counter *counter; int *counters_arr; int i = 0, err = 0;
memset(data, 0, sizeof(*data));
counters_arr = kmalloc_array(dev->caps.max_counters, sizeof(*counters_arr), GFP_KERNEL); if (!counters_arr) return -ENOMEM;
staticint remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
{ switch (type) { case RES_QP: return remove_qp_ok((struct res_qp *)res); case RES_CQ: return remove_cq_ok((struct res_cq *)res); case RES_SRQ: return remove_srq_ok((struct res_srq *)res); case RES_MPT: return remove_mpt_ok((struct res_mpt *)res); case RES_MTT: return remove_mtt_ok((struct res_mtt *)res, extra); case RES_MAC: return -EOPNOTSUPP; case RES_EQ: return remove_eq_ok((struct res_eq *)res); case RES_COUNTER: return remove_counter_ok((struct res_counter *)res); case RES_XRCD: return remove_xrcdn_ok((struct res_xrcdn *)res); case RES_FS_RULE: return remove_fs_rule_ok((struct res_fs_rule *)res); default: return -EINVAL;
}
}
staticint rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, enum mlx4_resource type, int extra)
{
u64 i; int err; struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct res_common *r;
spin_lock_irq(mlx4_tlock(dev)); for (i = base; i < base + count; ++i) {
r = res_tracker_lookup(&tracker->res_tree[type], i); if (!r) {
err = -ENOENT; goto out;
} if (r->owner != slave) {
err = -EPERM; goto out;
}
err = remove_ok(r, type, extra); if (err) goto out;
}
for (i = base; i < base + count; ++i) {
r = res_tracker_lookup(&tracker->res_tree[type], i);
rb_erase(&r->node, &tracker->res_tree[type]);
list_del(&r->list);
kfree(r);
}
err = 0;
out:
spin_unlock_irq(mlx4_tlock(dev));
return err;
}
staticint qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, enum res_qp_states state, struct res_qp **qp, int alloc)
{ struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct res_qp *r; int err = 0;
staticint qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{ int err; int count; int align; int base; int qpn;
u8 flags;
switch (op) { case RES_OP_RESERVE:
count = get_param_l(&in_param) & 0xffffff; /* Turn off all unsupported QP allocation flags that the * slave tries to set.
*/
flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
align = get_param_h(&in_param);
err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); if (err) return err;
staticint mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{ int err = -EINVAL; int index; int id; struct res_mpt *mpt;
switch (op) { case RES_OP_RESERVE:
err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0); if (err) break;
index = __mlx4_mpt_reserve(dev); if (index == -1) {
mlx4_release_resource(dev, slave, RES_MPT, 1, 0); break;
}
id = index & mpt_mask(dev);
err = add_res_range(dev, slave, id, 1, RES_MPT, index); if (err) {
mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
__mlx4_mpt_release(dev, index); break;
}
set_param_l(out_param, index); break; case RES_OP_MAP_ICM:
index = get_param_l(&in_param);
id = index & mpt_mask(dev);
err = mr_res_start_move_to(dev, slave, id,
RES_MPT_MAPPED, &mpt); if (err) return err;
list_for_each_entry_safe(res, tmp, mac_list, list) {
list_del(&res->list); /* dereference the mac the num times the slave referenced it */ for (i = 0; i < res->ref_count; i++)
__mlx4_unregister_mac(dev, res->port, res->mac);
mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
kfree(res);
}
}
staticint mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param, int in_port)
{ int err = -EINVAL; int port;
u64 mac;
u8 smac_index;
if (op != RES_OP_RESERVE_AND_MAP) return err;
port = !in_port ? get_param_l(out_param) : in_port;
port = mlx4_slave_convert_port(
dev, slave, port);
list_for_each_entry_safe(res, tmp, vlan_list, list) {
list_del(&res->list); /* dereference the vlan the num times the slave referenced it */ for (i = 0; i < res->ref_count; i++)
__mlx4_unregister_vlan(dev, res->port, res->vlan);
mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
kfree(res);
}
}
staticint vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param, int in_port)
{ struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; int err;
u16 vlan; int vlan_index; int port;
port = !in_port ? get_param_l(out_param) : in_port;
if (!port || op != RES_OP_RESERVE_AND_MAP) return -EINVAL;
port = mlx4_slave_convert_port(
dev, slave, port);
if (port < 0) return -EINVAL; /* upstream kernels had NOP for reg/unreg vlan. Continue this. */ if (!in_port && port > 0 && port <= dev->caps.num_ports) {
slave_state[slave].old_vlan_api = true; return 0;
}
staticint mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param)
{ int err = -EINVAL; int index; int id; struct res_mpt *mpt;
switch (op) { case RES_OP_RESERVE:
index = get_param_l(&in_param);
id = index & mpt_mask(dev);
err = get_res(dev, slave, id, RES_MPT, &mpt); if (err) break;
index = mpt->key;
put_res(dev, slave, id, RES_MPT);
err = rem_res_range(dev, slave, id, 1, RES_MPT, 0); if (err) break;
mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
__mlx4_mpt_release(dev, index); break; case RES_OP_MAP_ICM:
index = get_param_l(&in_param);
id = index & mpt_mask(dev);
err = mr_res_start_move_to(dev, slave, id,
RES_MPT_RESERVED, &mpt); if (err) return err;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.