/* * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
u32 reserved_bot, u32 reserved_top)
{ /* num must be a power of 2 */ if (num != roundup_pow_of_two(num)) return -EINVAL;
/* Should be called under a lock */ staticvoid __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry)
{ struct mlx4_zone_allocator *zone_alloc = entry->allocator;
if (!list_empty(&entry->prio_list)) { /* Check if we need to add an alternative node to the prio list */ if (!list_is_last(&entry->list, &zone_alloc->entries)) { struct mlx4_zone_entry *next = list_first_entry(&entry->list,
typeof(*next),
list);
if (next->priority == entry->priority)
list_add_tail(&next->prio_list, &entry->prio_list);
}
list_for_each_entry_from(curr_node, &zone_alloc->entries, list) {
res = mlx4_bitmap_alloc_range(curr_node->bitmap, count,
align, skip_mask); if (res != (u32)-1) {
res += curr_node->offset;
uid = curr_node->uid; goto out;
}
}
}
out: if (NULL != puid && res != (u32)-1)
*puid = uid; return res;
}
/* Should be called under a lock */ staticvoid __mlx4_free_from_zone(struct mlx4_zone_entry *zone, u32 obj,
u32 count)
{
mlx4_bitmap_free_range(zone->bitmap, obj - zone->offset, count, zone->use_rr);
}
/* Should be called under a lock */ staticstruct mlx4_zone_entry *__mlx4_find_zone_by_uid( struct mlx4_zone_allocator *zones, u32 uid)
{ struct mlx4_zone_entry *zone;
int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid)
{ struct mlx4_zone_entry *zone; int res = 0;
spin_lock(&zones->lock);
zone = __mlx4_find_zone_by_uid(zones, uid);
if (NULL == zone) {
res = -1; goto out;
}
__mlx4_zone_remove_one_entry(zone);
out:
spin_unlock(&zones->lock);
kfree(zone);
return res;
}
/* Should be called under a lock */ staticstruct mlx4_zone_entry *__mlx4_find_zone_by_uid_unique( struct mlx4_zone_allocator *zones, u32 obj)
{ struct mlx4_zone_entry *zone, *zone_candidate = NULL;
u32 dist = (u32)-1;
/* Search for the smallest zone that this obj could be * allocated from. This is done in order to handle * situations when small bitmaps are allocated from bigger * bitmaps (and the allocated space is marked as reserved in * the bigger bitmap.
*/
list_for_each_entry(zone, &zones->entries, list) { if (obj >= zone->offset) {
u32 mobj = (obj - zone->offset) & zones->mask;
if (mobj < zone->bitmap->max) {
u32 curr_dist = zone->bitmap->effective_len;
/* Handling for queue buffers -- we allocate a bunch of memory and * register it in a memory region at HCA virtual address 0. If the * requested size is > max_direct, we split the allocation into * multiple pages, so we don't require too much contiguous memory.
*/ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, struct mlx4_buf *buf)
{ if (size <= max_direct) { return mlx4_buf_direct_alloc(dev, size, buf);
} else {
dma_addr_t t; int i;
for (i = 0; i < buf->nbufs; ++i) {
buf->page_list[i].buf =
dma_alloc_coherent(&dev->persist->pdev->dev,
PAGE_SIZE, &t, GFP_KERNEL); if (!buf->page_list[i].buf) goto err_free;
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
{ if (buf->nbufs == 1) {
dma_free_coherent(&dev->persist->pdev->dev, size,
buf->direct.buf, buf->direct.map);
} else { int i;
for (i = 0; i < buf->nbufs; ++i) if (buf->page_list[i].buf)
dma_free_coherent(&dev->persist->pdev->dev,
PAGE_SIZE,
buf->page_list[i].buf,
buf->page_list[i].map);
kfree(buf->page_list);
}
}
EXPORT_SYMBOL_GPL(mlx4_buf_free);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.