/* * Copyright (c) 2007-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
if (list_empty(queue_to_indicate)) { /* nothing to indicate */ return;
}
if (ep->ep_cb.tx_comp_multi != NULL) {
ath6kl_dbg(ATH6KL_DBG_HTC, "%s: calling ep %d, send complete multiple callback (%d pkts)\n",
__func__, ep->eid,
get_queue_depth(queue_to_indicate)); /* * a multiple send complete handler is being used, * pass the queue to the handler
*/
ep->ep_cb.tx_comp_multi(ep->target, queue_to_indicate); /* * all packets are now owned by the callback, * reset queue to be safe
*/
INIT_LIST_HEAD(queue_to_indicate);
} else { /* using legacy EpTxComplete */ do {
packet = list_first_entry(queue_to_indicate, struct htc_packet, list);
list_del(&packet->list);
ath6kl_dbg(ATH6KL_DBG_HTC, "%s: calling ep %d send complete callback on packet 0x%p\n",
__func__, ep->eid, packet);
ep->ep_cb.tx_complete(ep->target, packet);
} while (!list_empty(queue_to_indicate));
}
}
if (ep->eid == ENDPOINT_0) { /* * endpoint 0 is special, it always has a credit and * does not require credit based flow control
*/
credits_required = 0;
} else { if (ep->cred_dist.credits < credits_required) break;
/* check if we need credits back from the target */ if (ep->cred_dist.credits <
ep->cred_dist.cred_per_msg) { /* tell the target we need credits ASAP! */
send_flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
ep->ep_st.cred_low_indicate += 1;
ath6kl_dbg(ATH6KL_DBG_HTC, "%s: host needs credits\n",
__func__);
}
}
/* now we can fully dequeue */
packet = list_first_entry(&ep->txq, struct htc_packet, list);
list_del(&packet->list); /* save the number of credits this packet consumed */
packet->info.tx.cred_used = credits_required; /* save send flags */
packet->info.tx.flags = send_flags;
packet->info.tx.seqno = ep->seqno;
ep->seqno++; /* queue this packet into the caller's queue */
list_add_tail(&packet->list, queue);
}
}
/* store in look up queue to match completions */
list_add_tail(&packet->list, &ep->pipe.tx_lookup_queue);
ep->ep_st.tx_issued += 1;
spin_unlock_bh(&target->tx_lock);
status = ath6kl_hif_pipe_send(target->dev->ar,
ep->pipe.pipeid_ul, NULL, skb);
if (status != 0) { if (status != -ENOMEM) { /* TODO: if more than 1 endpoint maps to the * same PipeID, it is possible to run out of * resources in the HIF layer. * Don't emit the error
*/
ath6kl_dbg(ATH6KL_DBG_HTC, "%s: failed status:%d\n",
__func__, status);
}
spin_lock_bh(&target->tx_lock);
list_del(&packet->list);
/* init the local send queue */
INIT_LIST_HEAD(&send_queue);
/* * txq equals to NULL means * caller didn't provide a queue, just wants us to * check queues and send
*/ if (txq != NULL) { if (list_empty(txq)) { /* empty queue */ return HTC_SEND_QUEUE_DROP;
}
if (txqueue_depth >= ep->max_txq_depth) { /* we've already overflowed */
overflow = get_queue_depth(txq);
} else { /* get how much we will overflow by */
overflow = txqueue_depth;
overflow += get_queue_depth(txq); /* get how much we will overflow the TX queue by */
overflow -= ep->max_txq_depth;
}
/* if overflow is negative or zero, we are okay */ if (overflow > 0) {
ath6kl_dbg(ATH6KL_DBG_HTC, "%s: Endpoint %d, TX queue will overflow :%d, Tx Depth:%d, Max:%d\n",
__func__, ep->eid, overflow, txqueue_depth,
ep->max_txq_depth);
} if ((overflow <= 0) ||
(ep->ep_cb.tx_full == NULL)) { /* * all packets will fit or caller did not provide send * full indication handler -- just move all of them * to the local send_queue object
*/
list_splice_tail_init(txq, &send_queue);
} else {
good_pkts = get_queue_depth(txq) - overflow; if (good_pkts < 0) {
WARN_ON_ONCE(1); return HTC_SEND_QUEUE_DROP;
}
/* we have overflowed, and a callback is provided */ /* dequeue all non-overflow packets to the sendqueue */ for (i = 0; i < good_pkts; i++) { /* pop off caller's queue */
packet = list_first_entry(txq, struct htc_packet,
list); /* move to local queue */
list_move_tail(&packet->list, &send_queue);
}
/* * the caller's queue has all the packets that won't fit * walk through the caller's queue and indicate each to * the send full handler
*/
list_for_each_entry_safe(packet, tmp_pkt,
txq, list) {
ath6kl_dbg(ATH6KL_DBG_HTC, "%s: Indicate overflowed TX pkts: %p\n",
__func__, packet);
action = ep->ep_cb.tx_full(ep->target, packet); if (action == HTC_SEND_FULL_DROP) { /* callback wants the packet dropped */
ep->ep_st.tx_dropped += 1;
/* leave this one in the caller's queue
* for cleanup */
} else { /* callback wants to keep this packet, * move from caller's queue to the send
* queue */
list_move_tail(&packet->list,
&send_queue);
}
}
if (list_empty(&send_queue)) { /* no packets made it in, caller will cleanup */ return HTC_SEND_QUEUE_DROP;
}
}
}
spin_lock_bh(&target->tx_lock); if (!list_empty(&send_queue)) { /* transfer packets to tail */
list_splice_tail_init(&send_queue, &ep->txq); if (!list_empty(&send_queue)) {
WARN_ON_ONCE(1);
spin_unlock_bh(&target->tx_lock); return HTC_SEND_QUEUE_DROP;
}
INIT_LIST_HEAD(&send_queue);
}
/* increment tx processing count on entry */
ep->tx_proc_cnt++;
if (ep->tx_proc_cnt > 1) { /* * Another thread or task is draining the TX queues on this * endpoint that thread will reset the tx processing count * when the queue is drained.
*/
ep->tx_proc_cnt--;
spin_unlock_bh(&target->tx_lock); return HTC_SEND_QUEUE_OK;
}
/***** beyond this point only 1 thread may enter ******/
/* * Now drain the endpoint TX queue for transmission as long as we have * enough transmit resources.
*/ while (true) { if (get_queue_depth(&ep->txq) == 0) break;
if (ep->pipe.tx_credit_flow_enabled) { /* * Credit based mechanism provides flow control * based on target transmit resource availability, * we assume that the HIF layer will always have * bus resources greater than target transmit * resources.
*/
get_htc_packet_credit_based(target, ep, &send_queue);
} else { /* * Get all packets for this endpoint that we can * for this pass.
*/
get_htc_packet(target, ep, &send_queue, tx_resources);
}
if (get_queue_depth(&send_queue) == 0) { /* * Didn't get packets due to out of resources or TX * queue was drained.
*/ break;
}
spin_unlock_bh(&target->tx_lock);
/* send what we can */
htc_issue_packets(target, ep, &send_queue);
/* FIXME: hif_usbaudioclass is always zero */ if (hif_usbaudioclass) {
ath6kl_dbg(ATH6KL_DBG_HTC, "%s: For USB Audio Class- Total:%d\n",
__func__, credits);
entry++;
entry++; /* Setup VO Service To have Max Credits */
entry->service_id = WMI_DATA_VO_SVC;
entry->credit_alloc = (credits - 6); if (entry->credit_alloc == 0)
entry->credit_alloc++;
credits -= (int) entry->credit_alloc; if (credits <= 0) return status;
/* leftovers go to best effort */
entry++;
entry->service_id = WMI_DATA_BE_SVC;
entry->credit_alloc = (u8) credits;
status = 0;
}
if (status == 0) { for (i = 0; i < ENDPOINT_MAX; i++) { if (target->pipe.txcredit_alloc[i].service_id != 0) {
ath6kl_dbg(ATH6KL_DBG_HTC, "HTC Service Index : %d TX : 0x%2.2X : alloc:%d\n",
i,
target->pipe.txcredit_alloc[i].
service_id,
target->pipe.txcredit_alloc[i].
credit_alloc);
}
}
} return status;
}
/* process credit reports and call distribution function */ staticvoid htc_process_credit_report(struct htc_target *target, struct htc_credit_report *rpt, int num_entries, enum htc_endpoint_id from_ep)
{ int total_credits = 0, i; struct htc_endpoint *ep;
/* lock out TX while we update credits */
spin_lock_bh(&target->tx_lock);
for (i = 0; i < num_entries; i++, rpt++) { if (rpt->eid >= ENDPOINT_MAX) {
WARN_ON_ONCE(1);
spin_unlock_bh(&target->tx_lock); return;
}
ep = &target->endpoint[rpt->eid];
ep->cred_dist.credits += rpt->credits;
/* * In the adapted HIF layer, struct sk_buff * are passed between HIF and HTC, * since upper layers expects struct htc_packet containers we use the completed * skb and lookup it's corresponding HTC packet buffer from a lookup list. * This is extra overhead that can be fixed by re-aligning HIF interfaces with * HTC.
*/ staticstruct htc_packet *htc_lookup_tx_packet(struct htc_target *target, struct htc_endpoint *ep, struct sk_buff *skb)
{ struct htc_packet *packet, *tmp_pkt, *found_packet = NULL;
spin_lock_bh(&target->tx_lock);
/* * iterate from the front of tx lookup queue * this lookup should be fast since lower layers completes in-order and * so the completed packet should be at the head of the list generally
*/
list_for_each_entry_safe(packet, tmp_pkt, &ep->pipe.tx_lookup_queue,
list) { /* check for removal */ if (skb == packet->skb) { /* found it */
list_del(&packet->list);
found_packet = packet; break;
}
}
ep_id = htc_hdr->eid;
ep = &target->endpoint[ep_id];
packet = htc_lookup_tx_packet(target, ep, skb); if (packet == NULL) { /* may have already been flushed and freed */
ath6kl_err("HTC TX lookup failed!\n");
} else { /* will be giving this buffer back to upper layers */
packet->status = 0;
send_packet_completion(target, packet);
}
skb = NULL;
if (!ep->pipe.tx_credit_flow_enabled) { /* * note: when using TX credit flow, the re-checking of queues * happens when credits flow back from the target. in the * non-TX credit case, we recheck after the packet completes
*/
htc_try_send(target, ep, NULL);
}
/* get first packet to find out which ep the packets will go into */
packet = list_first_entry(pkt_queue, struct htc_packet, list);
if (packet->endpoint >= ENDPOINT_MAX) {
WARN_ON_ONCE(1); return -EINVAL;
}
ep = &target->endpoint[packet->endpoint];
htc_try_send(target, ep, pkt_queue);
/* do completion on any packets that couldn't get in */ if (!list_empty(pkt_queue)) {
list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
packet->status = -ENOMEM;
}
staticint htc_process_trailer(struct htc_target *target, u8 *buffer, int len, enum htc_endpoint_id from_ep)
{ struct htc_credit_report *report; struct htc_record_hdr *record;
u8 *record_buf; int status = 0;
while (len > 0) { if (len < sizeof(struct htc_record_hdr)) {
status = -EINVAL; break;
}
/* these are byte aligned structs */
record = (struct htc_record_hdr *) buffer;
len -= sizeof(struct htc_record_hdr);
buffer += sizeof(struct htc_record_hdr);
if (record->len > len) { /* no room left in buffer for record */
ath6kl_dbg(ATH6KL_DBG_HTC, "invalid length: %d (id:%d) buffer has: %d bytes left\n",
record->len, record->rec_id, len);
status = -EINVAL; break;
}
/* start of record follows the header */
record_buf = buffer;
switch (record->rec_id) { case HTC_RECORD_CREDITS: if (record->len < sizeof(struct htc_credit_report)) {
WARN_ON_ONCE(1); return -EINVAL;
}
/* * ar->htc_target can be NULL due to a race condition that can occur * during driver initialization(we do 'ath6kl_hif_power_on' before * initializing 'ar->htc_target' via 'ath6kl_htc_create'). * 'ath6kl_hif_power_on' assigns 'ath6kl_recv_complete' as * usb_complete_t/callback function for 'usb_fill_bulk_urb'. * Thus the possibility of ar->htc_target being NULL * via ath6kl_recv_complete -> ath6kl_usb_io_comp_work.
*/ if (!target) {
ath6kl_dbg(ATH6KL_DBG_HTC, "Target not yet initialized\n");
status = -EINVAL; goto free_skb;
}
netdata = skb->data;
netlen = skb->len;
htc_hdr = (struct htc_frame_hdr *) netdata;
if (htc_hdr->eid >= ENDPOINT_MAX) {
ath6kl_dbg(ATH6KL_DBG_HTC, "HTC Rx: invalid EndpointID=%d\n",
htc_hdr->eid);
status = -EINVAL; goto free_skb;
}
ep = &target->endpoint[htc_hdr->eid];
/* * TODO: the message based HIF architecture allocates net bufs * for recv packets since it bridges that HIF to upper layers, * which expects HTC packets, we form the packets here
*/
packet = alloc_htc_packet_container(target); if (packet == NULL) {
status = -ENOMEM; goto free_skb;
}
/* give the packet back */
do_recv_completion(ep, &container);
spin_lock_bh(&target->rx_lock);
}
spin_unlock_bh(&target->rx_lock);
}
/* polling routine to wait for a control packet to be received */ staticint htc_wait_recv_ctrl_message(struct htc_target *target)
{ int count = HTC_TARGET_RESPONSE_POLL_COUNT;
while (count > 0) {
spin_lock_bh(&target->rx_lock);
if (target->pipe.ctrl_response_valid) {
target->pipe.ctrl_response_valid = false;
spin_unlock_bh(&target->rx_lock); break;
}
for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
ep = &target->endpoint[i];
ep->svc_id = 0;
ep->len_max = 0;
ep->max_txq_depth = 0;
ep->eid = i;
INIT_LIST_HEAD(&ep->txq);
INIT_LIST_HEAD(&ep->pipe.tx_lookup_queue);
INIT_LIST_HEAD(&ep->rx_bufq);
ep->target = target;
ep->pipe.tx_credit_flow_enabled = true;
}
}
/* start HTC, this is called after all services are connected */ staticint htc_config_target_hif_pipe(struct htc_target *target)
{ return 0;
}
/* htc service functions */ static u8 htc_get_credit_alloc(struct htc_target *target, u16 service_id)
{
u8 allocation = 0; int i;
for (i = 0; i < ENDPOINT_MAX; i++) { if (target->pipe.txcredit_alloc[i].service_id == service_id)
allocation =
target->pipe.txcredit_alloc[i].credit_alloc;
}
if (allocation == 0) {
ath6kl_dbg(ATH6KL_DBG_HTC, "HTC Service TX : 0x%2.2X : allocation is zero!\n",
service_id);
}
if (conn_req->svc_id == 0) {
WARN_ON_ONCE(1);
status = -EINVAL; goto free_packet;
}
if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) { /* special case for pseudo control service */
assigned_epid = ENDPOINT_0;
max_msg_size = HTC_MAX_CTRL_MSG_LEN;
tx_alloc = 0;
} else {
tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id); if (tx_alloc == 0) {
status = -ENOMEM; goto free_packet;
}
/* allocate a packet to send to the target */
packet = htc_alloc_txctrl_packet(target);
if (packet == NULL) {
WARN_ON_ONCE(1);
status = -ENOMEM; goto free_packet;
}
/* we don't own it anymore */
packet = NULL; if (status != 0) goto free_packet;
/* wait for response */
status = htc_wait_recv_ctrl_message(target); if (status != 0) goto free_packet;
/* we controlled the buffer creation so it has to be * properly aligned
*/
resp_msg = (struct htc_conn_service_resp *)
target->pipe.ctrl_response_buf;
if (resp_msg->msg_id != cpu_to_le16(HTC_MSG_CONN_SVC_RESP_ID) ||
(target->pipe.ctrl_response_len < sizeof(*resp_msg))) { /* this message is not valid */
WARN_ON_ONCE(1);
status = -EINVAL; goto free_packet;
}
/* store receive packets */
list_splice_tail_init(pkt_queue, &ep->rx_bufq);
spin_unlock_bh(&target->rx_lock);
if (status != 0) { /* walk through queue and mark each one canceled */
list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
packet->status = -ECANCELED;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.