#ifdef DEBUG #define ctucan_netdev_dbg(ndev, args...) \
netdev_dbg(ndev, args) #else #define ctucan_netdev_dbg(...) do { } while (0) #endif
#define CTUCANFD_ID 0xCAFD
/* TX buffer rotation: * - when a buffer transitions to empty state, rotate order and priorities * - if more buffers seem to transition at the same time, rotate by the number of buffers * - it may be assumed that buffers transition to empty state in FIFO order (because we manage * priorities that way) * - at frame filling, do not rotate anything, just increment buffer modulo counter
*/
/** * ctucan_state_to_str() - Converts CAN controller state code to corresponding text * @state: CAN controller state code * * Return: Pointer to string representation of the error state
*/ staticconstchar *ctucan_state_to_str(enum can_state state)
{ constchar *txt = NULL;
if (state >= 0 && state < CAN_STATE_MAX)
txt = ctucan_state_strings[state]; return txt ? txt : "UNKNOWN";
}
/** * ctucan_reset() - Issues software reset request to CTU CAN FD * @ndev: Pointer to net_device structure * * Return: 0 for success, -%ETIMEDOUT if CAN controller does not leave reset
*/ staticint ctucan_reset(struct net_device *ndev)
{ struct ctucan_priv *priv = netdev_priv(ndev); int i = 100;
do {
u16 device_id = FIELD_GET(REG_DEVICE_ID_DEVICE_ID,
ctucan_read32(priv, CTUCANFD_DEVICE_ID));
if (device_id == 0xCAFD) return 0; if (!i--) {
netdev_warn(ndev, "device did not leave reset\n"); return -ETIMEDOUT;
}
usleep_range(100, 200);
} while (1);
}
/** * ctucan_set_btr() - Sets CAN bus bit timing in CTU CAN FD * @ndev: Pointer to net_device structure * @bt: Pointer to Bit timing structure * @nominal: True - Nominal bit timing, False - Data bit timing * * Return: 0 - OK, -%EPERM if controller is enabled
*/ staticint ctucan_set_btr(struct net_device *ndev, struct can_bittiming *bt, bool nominal)
{ struct ctucan_priv *priv = netdev_priv(ndev); int max_ph1_len = 31;
u32 btr = 0;
u32 prop_seg = bt->prop_seg;
u32 phase_seg1 = bt->phase_seg1;
if (CTU_CAN_FD_ENABLED(priv)) {
netdev_err(ndev, "BUG! Cannot set bittiming - CAN is enabled\n"); return -EPERM;
}
if (nominal)
max_ph1_len = 63;
/* The timing calculation functions have only constraints on tseg1, which is prop_seg + * phase1_seg combined. tseg1 is then split in half and stored into prog_seg and phase_seg1. * In CTU CAN FD, PROP is 6/7 bits wide but PH1 only 6/5, so we must re-distribute the * values here.
*/ if (phase_seg1 > max_ph1_len) {
prop_seg += phase_seg1 - max_ph1_len;
phase_seg1 = max_ph1_len;
bt->prop_seg = prop_seg;
bt->phase_seg1 = phase_seg1;
}
/* Some bits fixed: * TSTM - Off, User shall not be able to change REC/TEC by hand during operation
*/
mode_reg &= ~REG_MODE_TSTM;
ctucan_write32(priv, CTUCANFD_MODE, mode_reg);
}
/** * ctucan_chip_start() - This routine starts the driver * @ndev: Pointer to net_device structure * * Routine expects that chip is in reset state. It setups initial * Tx buffers for FIFO priorities, sets bittiming, enables interrupts, * switches core to operational mode and changes controller * state to %CAN_STATE_STOPPED. * * Return: 0 on success and failure value on error
*/ staticint ctucan_chip_start(struct net_device *ndev)
{ struct ctucan_priv *priv = netdev_priv(ndev);
u32 int_ena, int_msk;
u32 mode_reg; int err; struct can_ctrlmode mode;
/* Bus error reporting -> Allow Error/Arb.lost interrupts */ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
int_ena |= REG_INT_STAT_ALI |
REG_INT_STAT_BEI;
}
int_msk = ~int_ena; /* Mask all disabled interrupts */
/* It's after reset, so there is no need to clear anything */
ctucan_write32(priv, CTUCANFD_INT_MASK_SET, int_msk);
ctucan_write32(priv, CTUCANFD_INT_ENA_SET, int_ena);
/** * ctucan_do_set_mode() - Sets mode of the driver * @ndev: Pointer to net_device structure * @mode: Tells the mode of the driver * * This check the drivers state and calls the corresponding modes to set. * * Return: 0 on success and failure value on error
*/ staticint ctucan_do_set_mode(struct net_device *ndev, enum can_mode mode)
{ int ret;
switch (mode) { case CAN_MODE_START:
ret = ctucan_reset(ndev); if (ret < 0) return ret;
ret = ctucan_chip_start(ndev); if (ret < 0) {
netdev_err(ndev, "ctucan_chip_start failed!\n"); return ret;
}
netif_wake_queue(ndev); break; default:
ret = -EOPNOTSUPP; break;
}
return ret;
}
/** * ctucan_get_tx_status() - Gets status of TXT buffer * @priv: Pointer to private data * @buf: Buffer index (0-based) * * Return: Status of TXT buffer
*/ staticenum ctucan_txtb_status ctucan_get_tx_status(struct ctucan_priv *priv, u8 buf)
{
u32 tx_status = ctucan_read32(priv, CTUCANFD_TX_STATUS); enum ctucan_txtb_status status = (tx_status >> (buf * 4)) & 0x7;
return status;
}
/** * ctucan_is_txt_buf_writable() - Checks if frame can be inserted to TXT Buffer * @priv: Pointer to private data * @buf: Buffer index (0-based) * * Return: True - Frame can be inserted to TXT Buffer, False - If attempted, frame will not be * inserted to TXT Buffer
*/ staticbool ctucan_is_txt_buf_writable(struct ctucan_priv *priv, u8 buf)
{ enum ctucan_txtb_status buf_status;
/** * ctucan_start_xmit() - Starts the transmission * @skb: sk_buff pointer that contains data to be Txed * @ndev: Pointer to net_device structure * * Invoked from upper layers to initiate transmission. Uses the next available free TXT Buffer and * populates its fields to start the transmission. * * Return: %NETDEV_TX_OK on success, %NETDEV_TX_BUSY when no free TXT buffer is available, * negative return values reserved for error cases
*/ static netdev_tx_t ctucan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{ struct ctucan_priv *priv = netdev_priv(ndev); struct canfd_frame *cf = (struct canfd_frame *)skb->data;
u32 txtb_id; bool ok; unsignedlong flags;
if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK;
if (unlikely(!CTU_CAN_FD_TXTNF(priv))) {
netif_stop_queue(ndev);
netdev_err(ndev, "BUG!, no TXB free when queue awake!\n"); return NETDEV_TX_BUSY;
}
txtb_id = priv->txb_head % priv->ntxbufs;
ctucan_netdev_dbg(ndev, "%s: using TXB#%u\n", __func__, txtb_id);
ok = ctucan_insert_frame(priv, cf, txtb_id, can_is_canfd_skb(skb));
if (!ok) {
netdev_err(ndev, "BUG! TXNF set but cannot insert frame into TXTB! HW Bug?");
kfree_skb(skb);
ndev->stats.tx_dropped++; return NETDEV_TX_OK;
}
/* Check if all TX buffers are full */ if (!CTU_CAN_FD_TXTNF(priv))
netif_stop_queue(ndev);
spin_unlock_irqrestore(&priv->tx_lock, flags);
return NETDEV_TX_OK;
}
/** * ctucan_read_rx_frame() - Reads frame from RX FIFO * @priv: Pointer to CTU CAN FD's private data * @cf: Pointer to CAN frame struct * @ffw: Previously read frame format word * * Note: Frame format word must be read separately and provided in 'ffw'.
*/ staticvoid ctucan_read_rx_frame(struct ctucan_priv *priv, struct canfd_frame *cf, u32 ffw)
{
u32 idw; unsignedint i; unsignedint wc; unsignedint len;
/* Data */ for (i = 0; i < len; i += 4) {
u32 data = ctucan_read32(priv, CTUCANFD_RX_DATA);
*(__le32 *)(cf->data + i) = cpu_to_le32(data);
} while (unlikely(i < wc * 4)) {
ctucan_read32(priv, CTUCANFD_RX_DATA);
i += 4;
}
}
/** * ctucan_rx() - Called from CAN ISR to complete the received frame processing * @ndev: Pointer to net_device structure * * This function is invoked from the CAN isr(poll) to process the Rx frames. It does minimal * processing and invokes "netif_receive_skb" to complete further processing. * Return: 1 when frame is passed to the network layer, 0 when the first frame word is read but * system is out of free SKBs temporally and left code to resolve SKB allocation later, * -%EAGAIN in a case of empty Rx FIFO.
*/ staticint ctucan_rx(struct net_device *ndev)
{ struct ctucan_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; struct canfd_frame *cf; struct sk_buff *skb;
u32 ffw;
/** * ctucan_err_interrupt() - Error frame ISR * @ndev: net_device pointer * @isr: interrupt status register value * * This is the CAN error interrupt and it will check the type of error and forward the error * frame to upper layers.
*/ staticvoid ctucan_err_interrupt(struct net_device *ndev, u32 isr)
{ struct ctucan_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb; enum can_state state; struct can_berr_counter bec;
u32 err_capt_alc; int dologerr = net_ratelimit();
ctucan_get_rec_tec(priv, &bec);
state = ctucan_read_fault_state(priv);
err_capt_alc = ctucan_read32(priv, CTUCANFD_ERR_CAPT);
/* Check for Arbitration Lost interrupt */ if (FIELD_GET(REG_INT_STAT_ALI, isr)) { if (dologerr)
netdev_info(ndev, "arbitration lost\n");
priv->can.can_stats.arbitration_lost++; if (skb) {
cf->can_id |= CAN_ERR_LOSTARB;
cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
}
}
/* Check for Bus Error interrupt */ if (FIELD_GET(REG_INT_STAT_BEI, isr)) {
netdev_info(ndev, "bus error\n");
priv->can.can_stats.bus_error++;
stats->rx_errors++; if (skb) {
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
cf->data[2] = CAN_ERR_PROT_UNSPEC;
cf->data[3] = CAN_ERR_PROT_LOC_UNSPEC;
}
}
if (skb) {
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
netif_rx(skb);
}
}
/** * ctucan_rx_poll() - Poll routine for rx packets (NAPI) * @napi: NAPI structure pointer * @quota: Max number of rx packets to be processed. * * This is the poll routine for rx part. It will process the packets maximux quota value. * * Return: Number of packets received
*/ staticint ctucan_rx_poll(struct napi_struct *napi, int quota)
{ struct net_device *ndev = napi->dev; struct ctucan_priv *priv = netdev_priv(ndev); int work_done = 0;
u32 status;
u32 framecnt; int res = 1;
framecnt = FIELD_GET(REG_RX_STATUS_RXFRC, ctucan_read32(priv, CTUCANFD_RX_STATUS)); while (framecnt && work_done < quota && res > 0) {
res = ctucan_rx(ndev);
work_done++;
framecnt = FIELD_GET(REG_RX_STATUS_RXFRC, ctucan_read32(priv, CTUCANFD_RX_STATUS));
}
/* Check for RX FIFO Overflow */
status = ctucan_read32(priv, CTUCANFD_STATUS); if (FIELD_GET(REG_STATUS_DOR, status)) { struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb;
/* Clear Data Overrun */
ctucan_write32(priv, CTUCANFD_COMMAND, REG_COMMAND_CDO);
}
if (!framecnt && res != 0) { if (napi_complete_done(napi, work_done)) { /* Clear and enable RBNEI. It is level-triggered, so * there is no race condition.
*/
ctucan_write32(priv, CTUCANFD_INT_STAT, REG_INT_STAT_RBNEI);
ctucan_write32(priv, CTUCANFD_INT_MASK_CLR, REG_INT_STAT_RBNEI);
}
}
/* read tx_status * if txb[n].finished (bit 2) * if ok -> echo * if error / aborted -> ?? (find how to handle oneshot mode) * txb_tail++
*/ do {
spin_lock_irqsave(&priv->tx_lock, flags);
ctucan_netdev_dbg(ndev, "TXI: TXB#%u: status 0x%x\n", txtb_id, txtb_status);
switch (txtb_status) { case TXT_TOK:
ctucan_netdev_dbg(ndev, "TXT_OK\n");
stats->tx_bytes += can_get_echo_skb(ndev, txtb_id, NULL);
stats->tx_packets++; break; case TXT_ERR: /* This indicated that retransmit limit has been reached. Obviously * we should not echo the frame, but also not indicate any kind of * error. If desired, it was already reported (possible multiple * times) on each arbitration lost.
*/
netdev_warn(ndev, "TXB in Error state\n");
can_free_echo_skb(ndev, txtb_id, NULL);
stats->tx_dropped++; break; case TXT_ABT: /* Same as for TXT_ERR, only with different cause. We *could* * re-queue the frame, but multiqueue/abort is not supported yet * anyway.
*/
netdev_warn(ndev, "TXB in Aborted state\n");
can_free_echo_skb(ndev, txtb_id, NULL);
stats->tx_dropped++; break; default: /* Bug only if the first buffer is not finished, otherwise it is * pretty much expected.
*/ if (first) {
netdev_err(ndev, "BUG: TXB#%u not in a finished state (0x%x)!\n",
txtb_id, txtb_status);
spin_unlock_irqrestore(&priv->tx_lock, flags); /* do not clear nor wake */ return;
} goto clear;
}
priv->txb_tail++;
first = false;
some_buffers_processed = true; /* Adjust priorities *before* marking the buffer as empty. */
ctucan_rotate_txb_prio(ndev);
ctucan_give_txtb_cmd(priv, TXT_CMD_SET_EMPTY, txtb_id);
}
clear:
spin_unlock_irqrestore(&priv->tx_lock, flags);
/* If no buffers were processed this time, we cannot clear - that would introduce * a race condition.
*/ if (some_buffers_processed) { /* Clear the interrupt again. We do not want to receive again interrupt for * the buffer already handled. If it is the last finished one then it would * cause log of spurious interrupt.
*/
ctucan_write32(priv, CTUCANFD_INT_STAT, REG_INT_STAT_TXBHCI);
}
} while (some_buffers_processed);
spin_lock_irqsave(&priv->tx_lock, flags);
/* Check if at least one TX buffer is free */ if (CTU_CAN_FD_TXTNF(priv))
netif_wake_queue(ndev);
spin_unlock_irqrestore(&priv->tx_lock, flags);
}
/** * ctucan_interrupt() - CAN Isr * @irq: irq number * @dev_id: device id pointer * * This is the CTU CAN FD ISR. It checks for the type of interrupt * and invokes the corresponding ISR. * * Return: * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
*/ static irqreturn_t ctucan_interrupt(int irq, void *dev_id)
{ struct net_device *ndev = (struct net_device *)dev_id; struct ctucan_priv *priv = netdev_priv(ndev);
u32 isr, icr;
u32 imask; int irq_loops;
for (irq_loops = 0; irq_loops < 10000; irq_loops++) { /* Get the interrupt status */
isr = ctucan_read32(priv, CTUCANFD_INT_STAT);
if (!isr) return irq_loops ? IRQ_HANDLED : IRQ_NONE;
/* Receive Buffer Not Empty Interrupt */ if (FIELD_GET(REG_INT_STAT_RBNEI, isr)) {
ctucan_netdev_dbg(ndev, "RXBNEI\n"); /* Mask RXBNEI the first, then clear interrupt and schedule NAPI. Even if * another IRQ fires, RBNEI will always be 0 (masked).
*/
icr = REG_INT_STAT_RBNEI;
ctucan_write32(priv, CTUCANFD_INT_MASK_SET, icr);
ctucan_write32(priv, CTUCANFD_INT_STAT, icr);
napi_schedule(&priv->napi);
}
netdev_err(ndev, "txb_head=0x%08x txb_tail=0x%08x\n",
priv->txb_head, priv->txb_tail); for (i = 0; i < priv->ntxbufs; i++) {
u32 status = ctucan_get_tx_status(priv, i);
netdev_err(ndev, "txb[%d] txb status=0x%08x\n", i, status);
}
}
/** * ctucan_open() - Driver open routine * @ndev: Pointer to net_device structure * * This is the driver open routine. * Return: 0 on success and failure value on error
*/ staticint ctucan_open(struct net_device *ndev)
{ struct ctucan_priv *priv = netdev_priv(ndev); int ret;
ret = pm_runtime_get_sync(priv->dev); if (ret < 0) {
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
__func__, ret);
pm_runtime_put_noidle(priv->dev); return ret;
}
ret = ctucan_reset(ndev); if (ret < 0) goto err_reset;
/* Common open */
ret = open_candev(ndev); if (ret) {
netdev_warn(ndev, "open_candev failed!\n"); goto err_open;
}
ret = request_irq(ndev->irq, ctucan_interrupt, priv->irq_flags, ndev->name, ndev); if (ret < 0) {
netdev_err(ndev, "irq allocation for CAN failed\n"); goto err_irq;
}
ret = ctucan_chip_start(ndev); if (ret < 0) {
netdev_err(ndev, "ctucan_chip_start failed!\n"); goto err_chip_start;
}
/* Needed for timing adjustment to be performed as soon as possible */
priv->can.do_set_bittiming = ctucan_set_bittiming;
priv->can.fd.do_set_data_bittiming = ctucan_set_data_bittiming;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.