/* * Copyright (c) 2016, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000) /* Max number of counters to query in bulk read is 32K */ #define MLX5_SW_MAX_COUNTERS_BULK BIT(15) #define MLX5_INIT_COUNTERS_BULK 8 #define MLX5_FC_POOL_MAX_THRESHOLD BIT(18) #define MLX5_FC_POOL_USED_BUFF_RATIO 10
/* Synchronization notes * * Access to counter array: * - create - mlx5_fc_create() (user context) * - inserts the counter into the xarray. * * - destroy - mlx5_fc_destroy() (user context) * - erases the counter from the xarray and releases it. * * - query mlx5_fc_query(), mlx5_fc_query_cached{,_raw}() (user context) * - user should not access a counter after destroy. * * - bulk query (single thread workqueue context) * - create: query relies on 'lastuse' to avoid updating counters added * around the same time as the current bulk cmd. * - destroy: destroyed counters will not be accessed, even if they are * destroyed during a bulk query command.
*/ staticvoid mlx5_fc_stats_query_all_counters(struct mlx5_core_dev *dev)
{ struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
u32 bulk_len = fc_stats->bulk_query_len;
XA_STATE(xas, &fc_stats->counters, 0);
u32 *data = fc_stats->bulk_query_out; struct mlx5_fc *counter;
u32 last_bulk_id = 0;
u64 bulk_query_time;
u32 bulk_base_id; int err;
xas_lock(&xas);
xas_for_each(&xas, counter, U32_MAX) { if (xas_retry(&xas, counter)) continue; if (unlikely(counter->id >= last_bulk_id)) { /* Start new bulk query. */ /* First id must be aligned to 4 when using bulk query. */
bulk_base_id = counter->id & ~0x3;
last_bulk_id = bulk_base_id + bulk_len; /* The lock is released while querying the hw and reacquired after. */
xas_unlock(&xas); /* The same id needs to be processed again in the next loop iteration. */
xas_reset(&xas);
bulk_query_time = jiffies;
err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len, data); if (err) {
mlx5_core_err(dev, "Error doing bulk query: %d\n", err); return;
}
xas_lock(&xas); continue;
} /* Do not update counters added after bulk query was started. */ if (time_after64(bulk_query_time, counter->cache.lastuse))
update_counter_cache(counter->id - bulk_base_id, data,
&counter->cache);
}
xas_unlock(&xas);
}
/* Grow the bulk query buffer to max if not maxed and enough counters are present. */ if (unlikely(fc_stats->bulk_query_len < get_max_bulk_query_len(dev) &&
mlx5_fc_num_counters(fc_stats) > get_init_bulk_query_len(dev)))
mlx5_fc_stats_bulk_query_buf_realloc(dev, get_max_bulk_query_len(dev));
if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
mlx5_core_err(dev, "Freeing bulk before all counters were released\n"); return -EBUSY;
}
pool_index.fs_bulk = fs_bulk;
pool_index.index = fc->id - fc->bulk->base_id; if (mlx5_fs_pool_release_index(fc_pool, &pool_index))
mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
}
/** * mlx5_fc_local_create - Allocate mlx5_fc struct for a counter which * was already acquired using its counter id and bulk data. * * @counter_id: counter acquired counter id * @offset: counter offset from bulk base * @bulk_size: counter's bulk size as was allocated * * Return: Pointer to mlx5_fc on success, ERR_PTR otherwise.
*/ struct mlx5_fc *
mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size)
{ struct mlx5_fc_bulk *fc_bulk; struct mlx5_fc *counter;
counter = kzalloc(sizeof(*counter), GFP_KERNEL); if (!counter) return ERR_PTR(-ENOMEM);
fc_bulk = kzalloc(sizeof(*fc_bulk), GFP_KERNEL); if (!fc_bulk) {
kfree(counter); return ERR_PTR(-ENOMEM);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.