staticstruct threads_table_entry *threads__table(struct threads *threads, pid_t tid)
{ /* Cast it to handle tid == -1 */ return &threads->table[(unsignedint)tid % THREADS__TABLE_SIZE];
}
static size_t key_hash(long key, void *ctx __maybe_unused)
{ /* The table lookup removes low bit entropy, but this is just ignored here. */ return key;
}
size_t threads__nr(struct threads *threads)
{
size_t nr = 0;
for (int i = 0; i < THREADS__TABLE_SIZE; i++) { struct threads_table_entry *table = &threads->table[i];
down_read(&table->lock);
nr += hashmap__size(&table->shard);
up_read(&table->lock);
} return nr;
}
/* * Front-end cache - TID lookups come in blocks, * so most of the time we dont have to look up * the full rbtree:
*/ staticstruct thread *__threads_table_entry__get_last_match(struct threads_table_entry *table,
pid_t tid)
{ struct thread *th, *res = NULL;
th = table->last_match; if (th != NULL) { if (thread__tid(th) == tid)
res = thread__get(th);
} return res;
}
down_read(&table->lock);
res = __threads_table_entry__get_last_match(table, tid); if (!res) { if (hashmap__find(&table->shard, tid, &res))
res = thread__get(res);
}
up_read(&table->lock); if (res)
threads_table_entry__set_last_match(table, res); return res;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.