sf_index = event->function_id - base_id;
sf_dev = xa_load(&table->devices, sf_index); switch (event->new_vhca_state) { case MLX5_VHCA_STATE_INVALID: case MLX5_VHCA_STATE_ALLOCATED: if (sf_dev)
mlx5_sf_dev_del(table->dev, sf_dev, sf_index); break; case MLX5_VHCA_STATE_TEARDOWN_REQUEST: if (sf_dev)
mlx5_sf_dev_del(table->dev, sf_dev, sf_index); else
mlx5_core_err(table->dev, "SF DEV: teardown state for invalid dev index=%d sfnum=0x%x\n",
sf_index, event->sw_function_id); break; case MLX5_VHCA_STATE_ACTIVE: if (!sf_dev)
mlx5_sf_dev_add(table->dev, sf_index, event->function_id,
event->sw_function_id); break; default: break;
} return 0;
}
staticint mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table)
{ struct mlx5_core_dev *dev = table->dev;
u16 max_functions;
u16 function_id; int err = 0; int i;
max_functions = mlx5_sf_max_functions(dev);
function_id = mlx5_sf_start_function_id(dev); /* Arm the vhca context as the vhca event notifier */ for (i = 0; i < max_functions; i++) {
err = mlx5_vhca_event_arm(dev, function_id); if (err) return err;
work_ctx = container_of(_work, struct mlx5_sf_dev_active_work_ctx, work); if (work_ctx->table->stop_active_wq) goto out; /* Don't probe device which is already probe */ if (!xa_load(&work_ctx->table->devices, work_ctx->sf_index))
mlx5_sf_dev_add(work_ctx->table->dev, work_ctx->sf_index,
work_ctx->event.function_id, work_ctx->event.sw_function_id); /* There is a race where SF got inactive after the query * above. e.g.: the query returns that the state of the * SF is active, and after that the eswitch manager set it to * inactive. * This case cannot be managed in SW, since the probing of the * SF is on one system, and the inactivation is on a different * system. * If the inactive is done after the SF perform init_hca(), * the SF will fully probe and then removed. If it was * done before init_hca(), the SF probe will fail.
*/
out:
kfree(work_ctx);
}
/* In case SFs are generated externally, probe active SFs */ staticvoid mlx5_sf_dev_queue_active_works(struct work_struct *_work)
{ struct mlx5_sf_dev_table *table = container_of(_work, struct mlx5_sf_dev_table, work);
u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {}; struct mlx5_sf_dev_active_work_ctx *work_ctx; struct mlx5_core_dev *dev = table->dev;
u16 max_functions;
u16 function_id;
u16 sw_func_id; int err = 0; int wq_idx;
u8 state; int i;
max_functions = mlx5_sf_max_functions(dev);
function_id = mlx5_sf_start_function_id(dev); for (i = 0; i < max_functions; i++, function_id++) { if (table->stop_active_wq) return;
err = mlx5_cmd_query_vhca_state(dev, function_id, out, sizeof(out)); if (err) /* A failure of specific vhca doesn't mean others will * fail as well.
*/ continue;
state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state); if (state != MLX5_VHCA_STATE_ACTIVE) continue;
/* In case SFs are generated externally, probe active SFs */ staticint mlx5_sf_dev_create_active_works(struct mlx5_sf_dev_table *table)
{ if (MLX5_CAP_GEN(table->dev, eswitch_manager)) return 0; /* the table is local */
/* Use a workqueue to probe active SFs, which are in large * quantity and may take up to minutes to probe.
*/
table->active_wq = create_singlethread_workqueue("mlx5_active_sf"); if (!table->active_wq) return -ENOMEM;
INIT_WORK(&table->work, &mlx5_sf_dev_queue_active_works);
queue_work(table->active_wq, &table->work); return 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.