/* Structure for dbgc fragment buffer * @buf_addr_lsb: LSB of the buffer's physical address * @buf_addr_msb: MSB of the buffer's physical address * @buf_size: Total size of the buffer
*/ struct btintel_pcie_dbgc_ctxt_buf {
u32 buf_addr_lsb;
u32 buf_addr_msb;
u32 buf_size;
};
/* Structure for dbgc fragment * @magic_num: 0XA5A5A5A5 * @ver: For Driver-FW compatibility * @total_size: Total size of the payload debug info * @num_buf: Num of allocated debug bufs * @bufs: All buffer's addresses and sizes
*/ struct btintel_pcie_dbgc_ctxt {
u32 magic_num;
u32 ver;
u32 total_size;
u32 num_buf; struct btintel_pcie_dbgc_ctxt_buf bufs[BTINTEL_PCIE_DBGC_BUFFER_COUNT];
};
staticinlinechar *btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)
{ switch (alive_intr_ctxt) { case BTINTEL_PCIE_ROM: return"rom"; case BTINTEL_PCIE_FW_DL: return"fw_dl"; case BTINTEL_PCIE_D0: return"d0"; case BTINTEL_PCIE_D3: return"d3"; case BTINTEL_PCIE_HCI_RESET: return"hci_reset"; case BTINTEL_PCIE_INTEL_HCI_RESET1: return"intel_reset1"; case BTINTEL_PCIE_INTEL_HCI_RESET2: return"intel_reset2"; default: return"unknown";
}
}
/* This function initializes the memory for DBGC buffers and formats the * DBGC fragment which consists header info and DBGC buffer's LSB, MSB and * size as the payload
*/ staticint btintel_pcie_setup_dbgc(struct btintel_pcie_data *data)
{ struct btintel_pcie_dbgc_ctxt db_frag; struct data_buf *buf; int i;
/* Set the doorbell for TXQ to notify the device that @index (actually index-1) * of the TFD is updated and ready to transmit.
*/ staticvoid btintel_pcie_set_tx_db(struct btintel_pcie_data *data, u16 index)
{
u32 val;
val = index;
val |= (BTINTEL_PCIE_TX_DB_VEC << 16);
/* Copy the data to next(@tfd_index) data buffer and update the TFD(transfer * descriptor) with the data length and the DMA address of the data buffer.
*/ staticvoid btintel_pcie_prepare_tx(struct txq *txq, u16 tfd_index, struct sk_buff *skb)
{ struct data_buf *buf; struct tfd *tfd;
/* Prepare for TX. It updates the TFD with the length of data and * address of the DMA buffer, and copy the data to the DMA buffer
*/
btintel_pcie_prepare_tx(txq, tfd_index, skb);
/* Arm wait event condition */
data->tx_wait_done = false;
/* Set the doorbell to notify the device */
btintel_pcie_set_tx_db(data, tfd_index);
/* Wait for the complete interrupt - URBD0 */
ret = wait_event_timeout(data->tx_wait_q, data->tx_wait_done,
msecs_to_jiffies(BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS)); if (!ret) {
bt_dev_err(data->hdev, "Timeout (%u ms) on tx completion",
BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS);
btintel_pcie_dump_debug_registers(data->hdev); return -ETIME;
}
if (wait_on_alive) {
ret = wait_event_timeout(data->gp0_wait_q,
data->gp0_received,
msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS)); if (!ret) {
hdev->stat.err_tx++;
bt_dev_err(hdev, "Timeout (%u ms) on alive interrupt, alive context: %s",
BTINTEL_DEFAULT_INTR_TIMEOUT_MS,
btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt)); return -ETIME;
}
} return 0;
}
/* Set the doorbell for RXQ to notify the device that @index (actually index-1) * is available to receive the data
*/ staticvoid btintel_pcie_set_rx_db(struct btintel_pcie_data *data, u16 index)
{
u32 val;
val = index;
val |= (BTINTEL_PCIE_RX_DB_VEC << 16);
/* Update the FRBD (free buffer descriptor) with the @frbd_index and the * DMA address of the free buffer.
*/ staticvoid btintel_pcie_prepare_rx(struct rxq *rxq, u16 frbd_index)
{ struct data_buf *buf; struct frbd *frbd;
/* Get the buffer of the FRBD for DMA */
buf = &rxq->bufs[frbd_index];
do {
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG); if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS) return 0; /* Need delay here for Target Access harwdware to settle down*/
usleep_range(1000, 1200);
staticvoid btintel_pcie_dump_traces(struct hci_dev *hdev)
{ struct btintel_pcie_data *data = hci_get_drvdata(hdev); int ret = 0;
ret = btintel_pcie_get_mac_access(data); if (ret) {
bt_dev_err(hdev, "Failed to get mac access: (%d)", ret); return;
}
ret = btintel_pcie_read_dram_buffers(data);
btintel_pcie_release_mac_access(data);
if (ret)
bt_dev_err(hdev, "Failed to dump traces: (%d)", ret);
}
/* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in * BTINTEL_PCIE_CSR_FUNC_CTRL_REG register and wait for MSI-X with * BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0. * Then the host reads firmware version from BTINTEL_CSR_F2D_MBX and the boot stage * from BTINTEL_PCIE_CSR_BOOT_STAGE_REG.
*/ staticint btintel_pcie_enable_bt(struct btintel_pcie_data *data)
{ int err;
u32 reg;
data->gp0_received = false;
/* Update the DMA address of CI struct to CSR */
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG,
data->ci_p_addr & 0xffffffff);
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG,
(u64)data->ci_p_addr >> 32);
/* Reset the cached value of boot stage. it is updated by the MSI-X * gp0 interrupt handler.
*/
data->boot_stage_cache = 0x0;
/* Set MAC_INIT bit to start primary bootloader */
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT |
BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON |
BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
/* wait for interrupt from the device after booting up to primary * bootloader.
*/
data->alive_intr_ctxt = BTINTEL_PCIE_ROM;
err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS)); if (!err) return -ETIME;
/* Check cached boot stage is BTINTEL_PCIE_CSR_BOOT_STAGE_ROM(BIT(0)) */ if (~data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ROM) return -ENODEV;
staticint btintel_pcie_read_device_mem(struct btintel_pcie_data *data, void *buf, u32 dev_addr, int len)
{ int err;
u32 *val = buf;
/* Get device mac access */
err = btintel_pcie_get_mac_access(data); if (err) {
bt_dev_err(data->hdev, "Failed to get mac access %d", err); return err;
}
for (; len > 0; len -= 4, dev_addr += 4, val++)
*val = btintel_pcie_rd_dev_mem(data, dev_addr);
/* This function handles the MSI-X interrupt for gp0 cause (bit 0 in * BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response.
*/ staticvoid btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
{ bool submit_rx, signal_waitq;
u32 reg, old_ctxt;
/* This interrupt is for three different causes and it is not easy to * know what causes the interrupt. So, it compares each register value * with cached value and update it before it wake up the queue.
*/
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG); if (reg != data->boot_stage_cache)
data->boot_stage_cache = reg;
switch (data->alive_intr_ctxt) { case BTINTEL_PCIE_ROM:
data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
signal_waitq = true; break; case BTINTEL_PCIE_FW_DL: /* Error case is already handled. Ideally control shall not * reach here
*/ break; case BTINTEL_PCIE_INTEL_HCI_RESET1: if (btintel_pcie_in_op(data)) {
submit_rx = true;
signal_waitq = true; break;
}
if (btintel_pcie_in_iml(data)) {
submit_rx = true;
signal_waitq = true;
data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL; break;
} break; case BTINTEL_PCIE_INTEL_HCI_RESET2: if (btintel_test_and_clear_flag(data->hdev, INTEL_WAIT_FOR_D0)) {
btintel_wake_up_flag(data->hdev, INTEL_WAIT_FOR_D0);
data->alive_intr_ctxt = BTINTEL_PCIE_D0;
} break; case BTINTEL_PCIE_D0: if (btintel_pcie_in_d3(data)) {
data->alive_intr_ctxt = BTINTEL_PCIE_D3;
signal_waitq = true; break;
} break; case BTINTEL_PCIE_D3: if (btintel_pcie_in_d0(data)) {
data->alive_intr_ctxt = BTINTEL_PCIE_D0;
submit_rx = true;
signal_waitq = true; break;
} break; case BTINTEL_PCIE_HCI_RESET:
data->alive_intr_ctxt = BTINTEL_PCIE_D0;
submit_rx = true;
signal_waitq = true; break; default:
bt_dev_err(data->hdev, "Unknown state: 0x%2.2x",
data->alive_intr_ctxt); break;
}
if (submit_rx) {
btintel_pcie_reset_ia(data);
btintel_pcie_start_rx(data);
}
if (signal_waitq) {
bt_dev_dbg(data->hdev, "wake up gp0 wait_q");
wake_up(&data->gp0_wait_q);
}
/* This function handles the MSX-X interrupt for rx queue 0 which is for TX
*/ staticvoid btintel_pcie_msix_tx_handle(struct btintel_pcie_data *data)
{
u16 cr_tia, cr_hia; struct txq *txq; struct urbd0 *urbd0;
if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) { switch (skb->data[2]) { case 0x02: /* When switching to the operational firmware * the device sends a vendor specific event * indicating that the bootup completed.
*/
btintel_bootup(hdev, ptr, len);
/* If bootup event is from operational image, * driver needs to write sleep control register to * move into D0 state
*/ if (btintel_pcie_in_op(data)) {
btintel_pcie_wr_sleep_cntrl(data, BTINTEL_PCIE_STATE_D0);
data->alive_intr_ctxt = BTINTEL_PCIE_INTEL_HCI_RESET2;
kfree_skb(skb); return 0;
}
if (btintel_pcie_in_iml(data)) { /* In case of IML, there is no concept * of D0 transition. Just mimic as if * IML moved to D0 by clearing INTEL_WAIT_FOR_D0 * bit and waking up the task waiting on * INTEL_WAIT_FOR_D0. This is required * as intel_boot() is common function for * both IML and OP image loading.
*/ if (btintel_test_and_clear_flag(data->hdev,
INTEL_WAIT_FOR_D0))
btintel_wake_up_flag(data->hdev,
INTEL_WAIT_FOR_D0);
}
kfree_skb(skb); return 0; case 0x06: /* When the firmware loading completes the * device sends out a vendor specific event * indicating the result of the firmware * loading.
*/
btintel_secure_send_result(hdev, ptr, len);
kfree_skb(skb); return 0;
}
}
/* This is a debug event that comes from IML and OP image when it * starts execution. There is no need pass this event to stack.
*/ if (skb->data[2] == 0x97) {
hci_recv_diag(hdev, skb); return 0;
}
}
return hci_recv_frame(hdev, skb);
} /* Process the received rx data * It check the frame header to identify the data type and create skb * and calling HCI API
*/ staticint btintel_pcie_recv_frame(struct btintel_pcie_data *data, struct sk_buff *skb)
{ int ret;
u8 pkt_type;
u16 plen;
u32 pcie_pkt_type; void *pdata; struct hci_dev *hdev = data->hdev;
spin_lock(&data->hci_rx_lock);
/* The first 4 bytes indicates the Intel PCIe specific packet type */
pdata = skb_pull_data(skb, BTINTEL_PCIE_HCI_TYPE_LEN); if (!pdata) {
bt_dev_err(hdev, "Corrupted packet received");
ret = -EILSEQ; goto exit_error;
}
pcie_pkt_type = get_unaligned_le32(pdata);
switch (pcie_pkt_type) { case BTINTEL_PCIE_HCI_ACL_PKT: if (skb->len >= HCI_ACL_HDR_SIZE) {
plen = HCI_ACL_HDR_SIZE + __le16_to_cpu(hci_acl_hdr(skb)->dlen);
pkt_type = HCI_ACLDATA_PKT;
} else {
bt_dev_err(hdev, "ACL packet is too short");
ret = -EILSEQ; goto exit_error;
} break;
case BTINTEL_PCIE_HCI_SCO_PKT: if (skb->len >= HCI_SCO_HDR_SIZE) {
plen = HCI_SCO_HDR_SIZE + hci_sco_hdr(skb)->dlen;
pkt_type = HCI_SCODATA_PKT;
} else {
bt_dev_err(hdev, "SCO packet is too short");
ret = -EILSEQ; goto exit_error;
} break;
case BTINTEL_PCIE_HCI_EVT_PKT: if (skb->len >= HCI_EVENT_HDR_SIZE) {
plen = HCI_EVENT_HDR_SIZE + hci_event_hdr(skb)->plen;
pkt_type = HCI_EVENT_PKT;
} else {
bt_dev_err(hdev, "Event packet is too short");
ret = -EILSEQ; goto exit_error;
} break;
case BTINTEL_PCIE_HCI_ISO_PKT: if (skb->len >= HCI_ISO_HDR_SIZE) {
plen = HCI_ISO_HDR_SIZE + __le16_to_cpu(hci_iso_hdr(skb)->dlen);
pkt_type = HCI_ISODATA_PKT;
} else {
bt_dev_err(hdev, "ISO packet is too short");
ret = -EILSEQ; goto exit_error;
} break;
default:
bt_dev_err(hdev, "Invalid packet type received: 0x%4.4x",
pcie_pkt_type);
ret = -EINVAL; goto exit_error;
}
if (skb->len < plen) {
bt_dev_err(hdev, "Received corrupted packet. type: 0x%2.2x",
pkt_type);
ret = -EILSEQ; goto exit_error;
}
if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
ret = btintel_pcie_recv_event(hdev, skb); else
ret = hci_recv_frame(hdev, skb);
skb = NULL; /* skb is freed in the callee */
offset = 4; do {
pending = len - offset; if (pending < sizeof(*tlv)) break;
tlv = (struct tlv *)(buf + offset);
/* If type == 0, then there are no more TLVs to be parsed */ if (!tlv->type) {
bt_dev_dbg(data->hdev, "Invalid TLV type 0"); break;
}
pkt_len = le16_to_cpu(tlv->len);
offset += sizeof(*tlv);
pending = len - offset; if (pkt_len > pending) break;
offset += pkt_len;
/* Only TLVs of type == 1 are HCI events, no need to process other * TLVs
*/ if (tlv->type != 1) continue;
/* copy Intel specific pcie packet type */
val = BTINTEL_PCIE_HCI_EVT_PKT;
memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &val,
BTINTEL_PCIE_HCI_TYPE_LEN);
if (test_and_set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags)) return;
if (test_and_set_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) return;
/* Trigger device core dump when there is HW exception */ if (!test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags))
data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT;
if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) {
btintel_pcie_dump_traces(data->hdev);
clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags);
}
if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) { /* Unlike usb products, controller will not send hardware * exception event on exception. Instead controller writes the * hardware event to device memory along with optional debug * events, raises MSIX and halts. Driver shall read the * exception event from device memory and passes it stack for * further processing.
*/
btintel_pcie_read_hwexp(data);
clear_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags);
}
/* Process the sk_buf in queue and send to the HCI layer */ while ((skb = skb_dequeue(&data->rx_skb_q))) {
btintel_pcie_recv_frame(data, skb);
}
}
/* create sk_buff with data and save it to queue and start RX work */ staticint btintel_pcie_submit_rx_work(struct btintel_pcie_data *data, u8 status, void *buf)
{ int ret, len; struct rfh_hdr *rfh_hdr; struct sk_buff *skb;
rfh_hdr = buf;
len = rfh_hdr->packet_len; if (len <= 0) {
ret = -EINVAL; goto resubmit;
}
/* Remove RFH header */
buf += sizeof(*rfh_hdr);
skb = alloc_skb(len, GFP_ATOMIC); if (!skb) goto resubmit;
/* Check CR_TIA and CR_HIA for change */ if (cr_tia == cr_hia) return;
rxq = &data->rxq;
/* The firmware sends multiple CD in a single MSI-X and it needs to * process all received CDs in this interrupt.
*/ while (cr_tia != cr_hia) {
urbd1 = &rxq->urbd1s[cr_tia];
ipc_print_urbd1(data->hdev, urbd1, cr_tia);
buf = &rxq->bufs[urbd1->frbd_tag]; if (!buf) {
bt_dev_err(hdev, "RXQ: failed to get the DMA buffer for %d",
urbd1->frbd_tag); return;
}
ret = btintel_pcie_submit_rx_work(data, urbd1->status,
buf->data); if (ret) {
bt_dev_err(hdev, "RXQ: failed to submit rx request"); return;
}
/* Clear causes registers to avoid being handling the same cause */
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES, intr_fh);
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES, intr_hw);
spin_unlock(&data->irq_lock);
/* This interrupt is raised when there is an hardware exception */ if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP)
btintel_pcie_msix_hw_exp_handler(data);
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1)
btintel_pcie_msix_gp1_handler(data);
/* For TX */ if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
btintel_pcie_msix_tx_handle(data); if (!btintel_pcie_is_rxq_empty(data))
btintel_pcie_msix_rx_handle(data);
}
/* For RX */ if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) {
btintel_pcie_msix_rx_handle(data); if (!btintel_pcie_is_txackq_empty(data))
btintel_pcie_msix_tx_handle(data);
}
/* This interrupt is triggered by the firmware after updating * boot_stage register and image_response register
*/ if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
btintel_pcie_msix_gp0_handler(data);
/* * Before sending the interrupt the HW disables it to prevent a nested * interrupt. This is done by writing 1 to the corresponding bit in * the mask register. After handling the interrupt, it should be * re-enabled by clearing this bit. This register is defined as write 1 * clear (W1C) register, meaning that it's cleared by writing 1 * to the bit.
*/
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_AUTOMASK_ST,
BIT(entry->entry));
return IRQ_HANDLED;
}
/* This function requests the irq for MSI-X and registers the handlers per irq. * Currently, it requests only 1 irq for all interrupt causes.
*/ staticint btintel_pcie_setup_irq(struct btintel_pcie_data *data)
{ int err; int num_irqs, i;
for (i = 0; i < BTINTEL_PCIE_MSIX_VEC_MAX; i++)
data->msix_entries[i].entry = i;
/* This function configures the interrupt masks for both HW_INT_CAUSES and * FH_INT_CAUSES which are meaningful to us. * * After resetting BT function via PCIE FLR or FUNC_CTRL reset, the driver * need to call this function again to configure since the masks * are reset to 0xFFFFFFFF after reset.
*/ staticvoid btintel_pcie_config_msix(struct btintel_pcie_data *data)
{ int i; int val = data->def_irq | BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE;
/* Set Non Auto Clear Cause */ for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
btintel_pcie_wr_reg8(data,
BTINTEL_PCIE_CSR_MSIX_IVAR(causes_list[i].cause_num),
val);
btintel_pcie_clr_reg_bits(data,
causes_list[i].mask_reg,
causes_list[i].cause);
}
/* Save the initial interrupt mask */
data->fh_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK);
data->hw_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK);
}
staticint btintel_pcie_config_pcie(struct pci_dev *pdev, struct btintel_pcie_data *data)
{ int err;
err = pcim_enable_device(pdev); if (err) return err;
pci_set_master(pdev);
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) {
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) return err;
}
data->base_addr = pcim_iomap_region(pdev, 0, KBUILD_MODNAME); if (IS_ERR(data->base_addr)) return PTR_ERR(data->base_addr);
err = btintel_pcie_setup_irq(data); if (err) return err;
/* Configure MSI-X with causes list */
btintel_pcie_config_msix(data);
/* Allocate the same number of buffers as the descriptor */
txq->bufs = kmalloc_array(txq->count, sizeof(*buf), GFP_KERNEL); if (!txq->bufs) return -ENOMEM;
/* Allocate full chunk of data buffer for DMA first and do indexing and * initialization next, so it can be freed easily
*/
txq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev,
txq->count * BTINTEL_PCIE_BUFFER_SIZE,
&txq->buf_p_addr,
GFP_KERNEL | __GFP_NOWARN); if (!txq->buf_v_addr) {
kfree(txq->bufs); return -ENOMEM;
}
/* Setup the allocated DMA buffer to bufs. Each data_buf should * have virtual address and physical address
*/ for (i = 0; i < txq->count; i++) {
buf = &txq->bufs[i];
buf->data_p_addr = txq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
buf->data = txq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
}
/* Allocate the same number of buffers as the descriptor */
rxq->bufs = kmalloc_array(rxq->count, sizeof(*buf), GFP_KERNEL); if (!rxq->bufs) return -ENOMEM;
/* Allocate full chunk of data buffer for DMA first and do indexing and * initialization next, so it can be freed easily
*/
rxq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev,
rxq->count * BTINTEL_PCIE_BUFFER_SIZE,
&rxq->buf_p_addr,
GFP_KERNEL | __GFP_NOWARN); if (!rxq->buf_v_addr) {
kfree(rxq->bufs); return -ENOMEM;
}
/* Setup the allocated DMA buffer to bufs. Each data_buf should * have virtual address and physical address
*/ for (i = 0; i < rxq->count; i++) {
buf = &rxq->bufs[i];
buf->data_p_addr = rxq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
buf->data = rxq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
}
return 0;
}
staticvoid btintel_pcie_setup_ia(struct btintel_pcie_data *data,
dma_addr_t p_addr, void *v_addr, struct ia *ia)
{ /* TR Head Index Array */
ia->tr_hia_p_addr = p_addr;
ia->tr_hia = v_addr;
/* Allocate tx and rx queues, any related data structures and buffers.
*/ staticint btintel_pcie_alloc(struct btintel_pcie_data *data)
{ int err = 0;
size_t total;
dma_addr_t p_addr; void *v_addr;
/* Allocate the chunk of DMA memory for descriptors, index array, and * context information, instead of allocating individually. * The DMA memory for data buffer is allocated while setting up the * each queue. * * Total size is sum of the following * + size of TFD * Number of descriptors in queue * + size of URBD0 * Number of descriptors in queue * + size of FRBD * Number of descriptors in queue * + size of URBD1 * Number of descriptors in queue * + size of index * Number of queues(2) * type of index array(4) * + size of context information
*/
total = (sizeof(struct tfd) + sizeof(struct urbd0)) * BTINTEL_PCIE_TX_DESCS_COUNT;
total += (sizeof(struct frbd) + sizeof(struct urbd1)) * BTINTEL_PCIE_RX_DESCS_COUNT;
/* Add the sum of size of index array and size of ci struct */
total += (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4) + sizeof(struct ctx_info);
if (test_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags)) return -ENODEV;
/* Due to the fw limitation, the type header of the packet should be * 4 bytes unlike 1 byte for UART. In UART, the firmware can read * the first byte to get the packet type and redirect the rest of data * packet to the right handler. * * But for PCIe, THF(Transfer Flow Handler) fetches the 4 bytes of data * from DMA memory and by the time it reads the first 4 bytes, it has * already consumed some part of packet. Thus the packet type indicator * for iBT PCIe is 4 bytes. * * Luckily, when HCI core creates the skb, it allocates 8 bytes of * head room for profile and driver use, and before sending the data * to the device, append the iBT PCIe packet type in the front.
*/ switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT:
type = BTINTEL_PCIE_HCI_CMD_PKT;
cmd = (void *)skb->data;
opcode = le16_to_cpu(cmd->opcode); if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) { struct hci_command_hdr *cmd = (void *)skb->data;
__u16 opcode = le16_to_cpu(cmd->opcode);
/* When the BTINTEL_HCI_OP_RESET command is issued to * boot into the operational firmware, it will actually * not send a command complete event. To keep the flow * control working inject that event here.
*/ if (opcode == BTINTEL_HCI_OP_RESET)
btintel_pcie_inject_cmd_complete(hdev, opcode);
}
hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT:
type = BTINTEL_PCIE_HCI_ACL_PKT;
hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT:
type = BTINTEL_PCIE_HCI_SCO_PKT;
hdev->stat.sco_tx++; break; case HCI_ISODATA_PKT:
type = BTINTEL_PCIE_HCI_ISO_PKT; break; default:
bt_dev_err(hdev, "Unknown HCI packet type"); return -EILSEQ;
}
ret = btintel_pcie_send_sync(data, skb, type, opcode); if (ret) {
hdev->stat.err_tx++;
bt_dev_err(hdev, "Failed to send frame (%d)", ret); goto exit_error;
}
/* Check the status */ if (skb->data[0]) {
bt_dev_err(hdev, "Intel Read Version command failed (%02x)",
skb->data[0]);
err = -EIO; goto exit_error;
}
/* Apply the common HCI quirks for Intel device */
hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_DIAG);
/* Set up the quality report callback for Intel devices */
hdev->set_quality_report = btintel_set_quality_report;
memset(&ver_tlv, 0, sizeof(ver_tlv)); /* For TLV type device, parse the tlv data */
err = btintel_parse_version_tlv(hdev, &ver_tlv, skb); if (err) {
bt_dev_err(hdev, "Failed to parse TLV version information"); goto exit_error;
}
/* Check for supported iBT hardware variants of this firmware * loading method. * * This check has been put in place to ensure correct forward * compatibility options when newer hardware variants come * along.
*/ switch (INTEL_HW_VARIANT(ver_tlv.cnvi_bt)) { case 0x1e: /* BzrI */ case 0x1f: /* ScP */ case 0x22: /* BzrIW */ /* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);
/* Apply the device specific HCI quirks for TLV based devices * * All TLV based devices support WBS
*/
hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev,
INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.