// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** * * Driver for Option High Speed Mobile Devices. * * Copyright (C) 2008 Option International * Filip Aben <f.aben@option.com> * Denis Joseph Barrow <d.barow@option.com> * Jan Dumon <j.dumon@option.com> * Copyright (C) 2007 Andrew Bird (Sphere Systems Ltd) * <ajb@spheresystems.co.uk> * Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de> * Copyright (C) 2008 Novell, Inc. *
*****************************************************************************/
/****************************************************************************** * * Description of the device: * * Interface 0: Contains the IP network interface on the bulk end points. * The multiplexed serial ports are using the interrupt and * control endpoints. * Interrupt contains a bitmap telling which multiplexed * serialport needs servicing. * * Interface 1: Diagnostics port, uses bulk only, do not submit urbs until the * port is opened, as this have a huge impact on the network port * throughput. * * Interface 2: Standard modem interface - circuit switched interface, this * can be used to make a standard ppp connection however it * should not be used in conjunction with the IP network interface * enabled for USB performance reasons i.e. if using this set * ideally disable_net=1. *
*****************************************************************************/
/* These definitions are used with the struct hso_net flags element */ /* - use *_bit operations on it. (bit indices not values.) */ #define HSO_NET_RUNNING 0
#define HSO_NET_TX_TIMEOUT (HZ*10)
#define HSO_SERIAL_MAGIC 0x48534f31
/* Number of ttys to handle */ #define HSO_SERIAL_TTY_MINORS 256
#define MAX_RX_URBS 2
/*****************************************************************************/ /* Debugging functions */ /*****************************************************************************/ #define hso_dbg(lvl, fmt, ...) \ do { \ if ((lvl) & debug) \
pr_info("[%d:%s] " fmt, \
__LINE__, __func__, ##__VA_ARGS__); \
} while (0)
struct hso_serial { struct hso_device *parent; int magic;
u8 minor;
struct hso_shared_int *shared_int;
/* rx/tx urb could be either a bulk urb or a control urb depending
on which serial port it is used on. */ struct urb *rx_urb[MAX_RX_URBS];
u8 num_rx_urbs;
u8 *rx_data[MAX_RX_URBS];
u16 rx_data_length; /* should contain allocated length */
struct tty_port port; /* from usb_serial_port */
spinlock_t serial_lock;
int (*write_data) (struct hso_serial *serial); struct hso_tiocmget *tiocmget; /* Hacks required to get flow control * working on the serial receive buffers * so as not to drop characters on the floor.
*/ int curr_rx_urb_idx;
u8 rx_urb_filled[MAX_RX_URBS]; struct tasklet_struct unthrottle_tasklet;
};
staticint hso_urb_to_index(struct hso_serial *serial, struct urb *urb)
{ int idx;
for (idx = 0; idx < serial->num_rx_urbs; idx++) if (serial->rx_urb[idx] == urb) return idx;
dev_err(serial->parent->dev, "hso_urb_to_index failed\n"); return -1;
}
/* converts mux value to a port spec value */ static u32 hso_mux_to_port(int mux)
{
u32 result;
switch (mux) { case 0x1:
result = HSO_PORT_CONTROL; break; case 0x2:
result = HSO_PORT_APP; break; case 0x4:
result = HSO_PORT_PCSC; break; case 0x8:
result = HSO_PORT_GPS; break; case 0x10:
result = HSO_PORT_APP2; break; default:
result = HSO_PORT_NO_PORT;
} return result;
}
/* converts port spec value to a mux value */ static u32 hso_port_to_mux(int port)
{
u32 result;
switch (port & HSO_PORT_MASK) { case HSO_PORT_CONTROL:
result = 0x0; break; case HSO_PORT_APP:
result = 0x1; break; case HSO_PORT_PCSC:
result = 0x2; break; case HSO_PORT_GPS:
result = 0x3; break; case HSO_PORT_APP2:
result = 0x4; break; default:
result = 0x0;
} return result;
}
staticstruct hso_serial *get_serial_by_shared_int_and_type( struct hso_shared_int *shared_int, int mux)
{ int i, port;
port = hso_mux_to_port(mux);
for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) { if (serial_table[i] &&
(dev2ser(serial_table[i])->shared_int == shared_int) &&
((serial_table[i]->port_spec & HSO_PORT_MASK) == port)) { return dev2ser(serial_table[i]);
}
}
spin_lock_irqsave(&serial_table_lock, flags); if (serial_table[index])
serial = dev2ser(serial_table[index]);
spin_unlock_irqrestore(&serial_table_lock, flags);
return serial;
}
staticint obtain_minor(struct hso_serial *serial)
{ int index; unsignedlong flags;
spin_lock_irqsave(&serial_table_lock, flags); for (index = 0; index < HSO_SERIAL_TTY_MINORS; index++) { if (serial_table[index] == NULL) {
serial_table[index] = serial->parent;
serial->minor = index;
spin_unlock_irqrestore(&serial_table_lock, flags); return 0;
}
}
spin_unlock_irqrestore(&serial_table_lock, flags);
pr_err("%s: no free serial devices in table\n", __func__); return -1;
}
switch (status) { case -ENODEV:
explanation = "no device"; break; case -ENOENT:
explanation = "endpoint not enabled"; break; case -EPIPE:
explanation = "endpoint stalled"; break; case -ENOSPC:
explanation = "not enough bandwidth"; break; case -ESHUTDOWN:
explanation = "device disabled"; break; case -EHOSTUNREACH:
explanation = "device suspended"; break; case -EINVAL: case -EAGAIN: case -EFBIG: case -EMSGSIZE:
explanation = "internal error"; break; case -EILSEQ: case -EPROTO: case -ETIME: case -ETIMEDOUT:
explanation = "protocol error"; if (hso_dev)
usb_queue_reset_device(hso_dev->interface); break; default:
explanation = "unknown status"; break;
}
/* log a meaningful explanation of an USB status */
hso_dbg(0x1, "%s: received USB status - %s (%d)\n",
function, explanation, status);
}
/* Network interface functions */
/* called when net interface is brought up by ifconfig */ staticint hso_net_open(struct net_device *net)
{ struct hso_net *odev = netdev_priv(net); unsignedlong flags = 0;
if (!odev) {
dev_err(&net->dev, "No net device !\n"); return -ENODEV;
}
/* We are up and running. */
set_bit(HSO_NET_RUNNING, &odev->flags);
hso_start_net_device(odev->parent);
/* Tell the kernel we are ready to start receiving from it */
netif_start_queue(net);
return 0;
}
/* called when interface is brought down by ifconfig */ staticint hso_net_close(struct net_device *net)
{ struct hso_net *odev = netdev_priv(net);
/* we don't need the queue anymore */
netif_stop_queue(net); /* no longer running */
clear_bit(HSO_NET_RUNNING, &odev->flags);
hso_stop_net_device(odev->parent);
/* done */ return 0;
}
/* USB tells is xmit done, we should start the netqueue again */ staticvoid write_bulk_callback(struct urb *urb)
{ struct hso_net *odev = urb->context; int status = urb->status;
/* Sanity check */ if (!odev || !test_bit(HSO_NET_RUNNING, &odev->flags)) {
dev_err(&urb->dev->dev, "%s: device not running\n", __func__); return;
}
/* Do we still have a valid kernel network device? */ if (!netif_device_present(odev->net)) {
dev_err(&urb->dev->dev, "%s: net device not present\n",
__func__); return;
}
/* log status, but don't act on it, we don't need to resubmit anything
* anyhow */ if (status)
handle_usb_error(status, __func__, odev->parent);
hso_put_activity(odev->parent);
/* Tell the network interface we are ready for another frame */
netif_wake_queue(odev->net);
}
/* called by kernel when we need to transmit a packet */ static netdev_tx_t hso_net_start_xmit(struct sk_buff *skb, struct net_device *net)
{ struct hso_net *odev = netdev_priv(net); int result;
/* Tell the kernel, "No more frames 'til we are done with this one." */
netif_stop_queue(net); if (hso_get_activity(odev->parent) == -EAGAIN) {
odev->skb_tx_buf = skb; return NETDEV_TX_OK;
}
/* log if asked */
DUMP1(skb->data, skb->len); /* Copy it from kernel memory to OUR memory */
memcpy(odev->mux_bulk_tx_buf, skb->data, skb->len);
hso_dbg(0x1, "len: %d/%d\n", skb->len, MUX_BULK_TX_BUF_SIZE);
/* Fill in the URB for shipping it out. */
usb_fill_bulk_urb(odev->mux_bulk_tx_urb,
odev->parent->usb,
usb_sndbulkpipe(odev->parent->usb,
odev->out_endp->
bEndpointAddress & 0x7F),
odev->mux_bulk_tx_buf, skb->len, write_bulk_callback,
odev);
/* Deal with the Zero Length packet problem, I hope */
odev->mux_bulk_tx_urb->transfer_flags |= URB_ZERO_PACKET;
/* Send the URB on its merry way. */
result = usb_submit_urb(odev->mux_bulk_tx_urb, GFP_ATOMIC); if (result) {
dev_warn(&odev->parent->interface->dev, "failed mux_bulk_tx_urb %d\n", result);
net->stats.tx_errors++;
netif_start_queue(net);
} else {
net->stats.tx_packets++;
net->stats.tx_bytes += skb->len;
}
dev_kfree_skb(skb); /* we're done */ return NETDEV_TX_OK;
}
/* called when a packet did not ack after watchdogtimeout */ staticvoid hso_net_tx_timeout(struct net_device *net, unsignedint txqueue)
{ struct hso_net *odev = netdev_priv(net);
if (!odev) return;
/* Tell syslog we are hosed. */
dev_warn(&net->dev, "Tx timed out.\n");
/* Tear the waiting frame off the list */ if (odev->mux_bulk_tx_urb)
usb_unlink_urb(odev->mux_bulk_tx_urb);
/* Update statistics */
net->stats.tx_errors++;
}
/* make a real packet from the received USB buffer */ staticvoid packetizeRx(struct hso_net *odev, unsignedchar *ip_pkt, unsignedint count, unsignedchar is_eop)
{ unsignedshort temp_bytes; unsignedshort buffer_offset = 0; unsignedshort frame_len;
/* Copy the rest of the bytes that are left in the
* buffer into the waiting sk_buf. */ /* Make room for temp_bytes after tail. */
skb_put_data(odev->skb_rx_buf,
ip_pkt + buffer_offset,
temp_bytes);
odev->rx_buf_missing -= temp_bytes;
count -= temp_bytes;
buffer_offset += temp_bytes;
odev->rx_buf_size += temp_bytes; if (!odev->rx_buf_missing) { /* Packet is complete. Inject into stack. */ /* We have IP packet here */
odev->skb_rx_buf->protocol = cpu_to_be16(ETH_P_IP);
skb_reset_mac_header(odev->skb_rx_buf);
/* Ship it off to the kernel */
netif_rx(odev->skb_rx_buf); /* No longer our buffer. */
odev->skb_rx_buf = NULL;
/* update out statistics */
odev->net->stats.rx_packets++;
/* Moving data from usb to kernel (in interrupt state) */ staticvoid read_bulk_callback(struct urb *urb)
{ struct hso_net *odev = urb->context; struct net_device *net; int result; unsignedlong flags; int status = urb->status;
/* is al ok? (Filip: Who's Al ?) */ if (status) {
handle_usb_error(status, __func__, odev->parent); return;
}
/* Sanity check */ if (!odev || !test_bit(HSO_NET_RUNNING, &odev->flags)) {
hso_dbg(0x1, "BULK IN callback but driver is not active!\n"); return;
}
usb_mark_last_busy(urb->dev);
if (odev->parent->port_spec & HSO_INFO_CRC_BUG)
fix_crc_bug(urb, odev->in_endp->wMaxPacketSize);
/* do we even have a packet? */ if (urb->actual_length) { /* Handle the IP stream, add header and push it onto network
* stack if the packet is complete. */
spin_lock_irqsave(&odev->net_lock, flags);
packetizeRx(odev, urb->transfer_buffer, urb->actual_length,
(urb->transfer_buffer_length >
urb->actual_length) ? 1 : 0);
spin_unlock_irqrestore(&odev->net_lock, flags);
}
/* We are done with this URB, resubmit it. Prep the USB to wait for
* another frame. Reuse same as received. */
usb_fill_bulk_urb(urb,
odev->parent->usb,
usb_rcvbulkpipe(odev->parent->usb,
odev->in_endp->
bEndpointAddress & 0x7F),
urb->transfer_buffer, MUX_BULK_RX_BUF_SIZE,
read_bulk_callback, odev);
/* Give this to the USB subsystem so it can tell us when more data
* arrives. */
result = usb_submit_urb(urb, GFP_ATOMIC); if (result)
dev_warn(&odev->parent->interface->dev, "%s failed submit mux_bulk_rx_urb %d\n", __func__,
result);
}
/* Serial driver functions */
staticvoid hso_init_termios(struct ktermios *termios)
{ /* * The default requirements for this device are:
*/
termios->c_iflag &=
~(IGNBRK /* disable ignore break */
| BRKINT /* disable break causes interrupt */
| PARMRK /* disable mark parity errors */
| ISTRIP /* disable clear high bit of input characters */
| INLCR /* disable translate NL to CR */
| IGNCR /* disable ignore CR */
| ICRNL /* disable translate CR to NL */
| IXON); /* disable enable XON/XOFF flow control */
staticvoid hso_resubmit_rx_bulk_urb(struct hso_serial *serial, struct urb *urb)
{ int result; /* We are done with this URB, resubmit it. Prep the USB to wait for
* another frame */
usb_fill_bulk_urb(urb, serial->parent->usb,
usb_rcvbulkpipe(serial->parent->usb,
serial->in_endp->
bEndpointAddress & 0x7F),
urb->transfer_buffer, serial->rx_data_length,
hso_std_serial_read_bulk_callback, serial); /* Give this to the USB subsystem so it can tell us when more data
* arrives. */
result = usb_submit_urb(urb, GFP_ATOMIC); if (result) {
dev_err(&urb->dev->dev, "%s failed submit serial rx_urb %d\n",
__func__, result);
}
}
staticvoid put_rxbuf_data_and_resubmit_bulk_urb(struct hso_serial *serial)
{ int count; struct urb *curr_urb;
while (serial->rx_urb_filled[serial->curr_rx_urb_idx]) {
curr_urb = serial->rx_urb[serial->curr_rx_urb_idx];
count = put_rxbuf_data(curr_urb, serial); if (count == -1) return; if (count == 0) {
serial->curr_rx_urb_idx++; if (serial->curr_rx_urb_idx >= serial->num_rx_urbs)
serial->curr_rx_urb_idx = 0;
hso_resubmit_rx_bulk_urb(serial, curr_urb);
}
}
}
urb = serial->rx_urb[0]; if (serial->port.count > 0) {
count = put_rxbuf_data(urb, serial); if (count == -1) return;
} /* Re issue a read as long as we receive data. */
/* read callback for Diag and CS port */ staticvoid hso_std_serial_read_bulk_callback(struct urb *urb)
{ struct hso_serial *serial = urb->context; int status = urb->status; unsignedlong flags;
/* * This needs to be a tasklet otherwise we will * end up recursively calling this function.
*/ staticvoid hso_unthrottle_tasklet(struct tasklet_struct *t)
{ struct hso_serial *serial = from_tasklet(serial, t,
unthrottle_tasklet); unsignedlong flags;
/* open the requested serial port */ staticint hso_serial_open(struct tty_struct *tty, struct file *filp)
{ struct hso_serial *serial = get_serial_by_index(tty->index); int result;
/* sanity check */ if (serial == NULL || serial->magic != HSO_SERIAL_MAGIC) {
WARN_ON(1);
tty->driver_data = NULL;
hso_dbg(0x1, "Failed to open port\n"); return -ENODEV;
}
mutex_lock(&serial->parent->mutex);
result = usb_autopm_get_interface(serial->parent->interface); if (result < 0) goto err_out;
/* check for port already opened, if not set the termios */
serial->port.count++; if (serial->port.count == 1) {
serial->rx_state = RX_IDLE; /* Force default termio settings */
_hso_serial_set_termios(tty);
tasklet_setup(&serial->unthrottle_tasklet,
hso_unthrottle_tasklet);
result = hso_start_serial_device(serial->parent, GFP_KERNEL); if (result) {
hso_stop_serial_device(serial->parent);
serial->port.count--;
} else {
kref_get(&serial->parent->ref);
}
} else {
hso_dbg(0x1, "Port was already open\n");
}
/* how much room is there for writing */ staticunsignedint hso_serial_write_room(struct tty_struct *tty)
{ struct hso_serial *serial = tty->driver_data; unsignedint room; unsignedlong flags;
/* Sanity checks */ if (!serial) return; if (status) {
handle_usb_error(status, __func__, serial->parent); return;
}
/* tiocmget is only supported on HSO_PORT_MODEM */
tiocmget = serial->tiocmget; if (!tiocmget) return;
BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM);
usb = serial->parent->usb;
interface = serial->parent->interface;
/* wIndex should be the USB interface number of the port to which the * notification applies, which should always be the Modem port.
*/
serial_state_notification = tiocmget->serial_state_notification; if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE ||
serial_state_notification->bNotification != B_NOTIFICATION ||
le16_to_cpu(serial_state_notification->wValue) != W_VALUE ||
le16_to_cpu(serial_state_notification->wIndex) != if_num ||
le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) {
dev_warn(&usb->dev, "hso received invalid serial state notification\n");
DUMP(serial_state_notification, sizeof(struct hso_serial_state_notification));
} else { unsignedlong flags;
/* * next few functions largely stolen from drivers/serial/serial_core.c
*/ /* Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change * - mask passed in arg for lines of interest * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) * Caller should use TIOCGICOUNT to see which one it was
*/ staticint
hso_wait_modem_status(struct hso_serial *serial, unsignedlong arg)
{
DECLARE_WAITQUEUE(wait, current); struct uart_icount cprev, cnow; struct hso_tiocmget *tiocmget; int ret;
tiocmget = serial->tiocmget; if (!tiocmget) return -ENOENT; /* * note the counters on entry
*/
spin_lock_irq(&serial->serial_lock);
memcpy(&cprev, &tiocmget->icount, sizeof(struct uart_icount));
spin_unlock_irq(&serial->serial_lock);
add_wait_queue(&tiocmget->waitq, &wait); for (;;) {
spin_lock_irq(&serial->serial_lock);
memcpy(&cnow, &tiocmget->icount, sizeof(struct uart_icount));
spin_unlock_irq(&serial->serial_lock);
set_current_state(TASK_INTERRUPTIBLE); if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd))) {
ret = 0; break;
}
schedule(); /* see if a signal did it */ if (signal_pending(current)) {
ret = -ERESTARTSYS; break;
}
cprev = cnow;
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(&tiocmget->waitq, &wait);
return ret;
}
/* * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) * Return: write counters to the user passed counter struct * NB: both 1->0 and 0->1 transitions are counted except for * RI where only 0->1 is counted.
*/ staticint hso_get_count(struct tty_struct *tty, struct serial_icounter_struct *icount)
{ struct uart_icount cnow; struct hso_serial *serial = tty->driver_data; struct hso_tiocmget *tiocmget = serial->tiocmget;
if (!serial) return -ENODEV; switch (cmd) { case TIOCMIWAIT:
ret = hso_wait_modem_status(serial, arg); break; default:
ret = -ENOIOCTLCMD; break;
} return ret;
}
/* starts a transmit */ staticvoid hso_kick_transmit(struct hso_serial *serial)
{ unsignedlong flags; int res;
spin_lock_irqsave(&serial->serial_lock, flags); if (!serial->tx_buffer_count) goto out;
if (serial->tx_urb_used) goto out;
/* Wakeup USB interface if necessary */ if (hso_get_activity(serial->parent) == -EAGAIN) goto out;
/* Switch pointers around to avoid memcpy */
swap(serial->tx_buffer, serial->tx_data);
serial->tx_data_count = serial->tx_buffer_count;
serial->tx_buffer_count = 0;
/* If serial->tx_data is set, it means we switched buffers */ if (serial->tx_data && serial->write_data) {
res = serial->write_data(serial); if (res >= 0)
serial->tx_urb_used = 1;
}
out:
spin_unlock_irqrestore(&serial->serial_lock, flags);
}
/* make a request (for reading and writing data to muxed serial port) */ staticint mux_device_request(struct hso_serial *serial, u8 type, u16 port, struct urb *ctrl_urb, struct usb_ctrlrequest *ctrl_req,
u8 *ctrl_urb_data, u32 size)
{ int result; int pipe;
/* used for muxed serial port callback (muxed serial read) */ staticvoid intr_callback(struct urb *urb)
{ struct hso_shared_int *shared_int = urb->context; struct hso_serial *serial; unsignedchar *port_req; int status = urb->status; unsignedlong flags; int i;
usb_mark_last_busy(urb->dev);
/* sanity check */ if (!shared_int) return;
/* status check */ if (status) {
handle_usb_error(status, __func__, NULL); return;
}
hso_dbg(0x8, "--- Got intr callback 0x%02X ---\n", status);
/* what request? */
port_req = urb->transfer_buffer;
hso_dbg(0x8, "port_req = 0x%.2X\n", *port_req); /* loop over all muxed ports to find the one sending this */ for (i = 0; i < 8; i++) { /* max 8 channels on MUX */ if (*port_req & (1 << i)) {
serial = get_serial_by_shared_int_and_type(shared_int,
(1 << i)); if (serial != NULL) {
hso_dbg(0x1, "Pending read interrupt on port %d\n",
i);
spin_lock_irqsave(&serial->serial_lock, flags); if (serial->rx_state == RX_IDLE &&
serial->port.count > 0) { /* Setup and send a ctrl req read on
* port i */ if (!serial->rx_urb_filled[0]) {
serial->rx_state = RX_SENT;
hso_mux_serial_read(serial);
} else
serial->rx_state = RX_PENDING;
} else {
hso_dbg(0x1, "Already a read pending on port %d or port not open\n",
i);
}
spin_unlock_irqrestore(&serial->serial_lock,
flags);
}
}
} /* Resubmit interrupt urb */
hso_mux_submit_intr_urb(shared_int, urb->dev, GFP_ATOMIC);
}
/* called for writing to muxed serial port */ staticint hso_mux_serial_write_data(struct hso_serial *serial)
{ if (NULL == serial) return -EINVAL;
/* write callback for Diag and CS port */ staticvoid hso_std_serial_write_bulk_callback(struct urb *urb)
{ struct hso_serial *serial = urb->context; int status = urb->status; unsignedlong flags;
/* called for writing diag or CS serial port */ staticint hso_std_serial_write_data(struct hso_serial *serial)
{ int count = serial->tx_data_count; int result;
result = usb_submit_urb(serial->tx_urb, GFP_ATOMIC); if (result) {
dev_warn(&serial->parent->usb->dev, "Failed to submit urb - res %d\n", result); return result;
}
return count;
}
/* callback after read or write on muxed serial port */ staticvoid ctrl_callback(struct urb *urb)
{ struct hso_serial *serial = urb->context; struct usb_ctrlrequest *req; int status = urb->status; unsignedlong flags;
/* Put it out there so the device can send us stuff */
result = usb_submit_urb(hso_net->mux_bulk_rx_urb_pool[i],
GFP_NOIO); if (result)
dev_warn(&hso_dev->usb->dev, "%s failed mux_bulk_rx_urb[%d] %d\n", __func__,
i, result);
}
for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { if (hso_net->mux_bulk_rx_urb_pool[i])
usb_kill_urb(hso_net->mux_bulk_rx_urb_pool[i]);
} if (hso_net->mux_bulk_tx_urb)
usb_kill_urb(hso_net->mux_bulk_tx_urb);
return 0;
}
staticint hso_start_serial_device(struct hso_device *hso_dev, gfp_t flags)
{ int i, result = 0; struct hso_serial *serial = dev2ser(hso_dev);
if (!serial) return -ENODEV;
/* If it is not the MUX port fill in and submit a bulk urb (already
* allocated in hso_serial_start) */ if (!(serial->parent->port_spec & HSO_INTF_MUX)) { for (i = 0; i < serial->num_rx_urbs; i++) {
usb_fill_bulk_urb(serial->rx_urb[i],
serial->parent->usb,
usb_rcvbulkpipe(serial->parent->usb,
serial->in_endp->
bEndpointAddress &
0x7F),
serial->rx_data[i],
serial->rx_data_length,
hso_std_serial_read_bulk_callback,
serial);
result = usb_submit_urb(serial->rx_urb[i], flags); if (result) {
dev_warn(&serial->parent->usb->dev, "Failed to submit urb - res %d\n",
result); break;
}
}
} else {
mutex_lock(&serial->shared_int->shared_int_lock); if (!serial->shared_int->use_count) {
result =
hso_mux_submit_intr_urb(serial->shared_int,
hso_dev->usb, flags);
}
serial->shared_int->use_count++;
mutex_unlock(&serial->shared_int->shared_int_lock);
} if (serial->tiocmget)
tiocmget_submit_urb(serial,
serial->tiocmget,
serial->parent->usb); return result;
}
staticvoid hso_serial_common_free(struct hso_serial *serial)
{ int i;
for (i = 0; i < serial->num_rx_urbs; i++) { /* unlink and free RX URB */
usb_free_urb(serial->rx_urb[i]); /* free the RX buffer */
kfree(serial->rx_data[i]);
}
/* Removes a network device in the network device table */ staticint remove_net_device(struct hso_device *hso_dev)
{ int i;
for (i = 0; i < HSO_MAX_NET_DEVICES; i++) { if (network_table[i] == hso_dev) {
network_table[i] = NULL; break;
}
} if (i == HSO_MAX_NET_DEVICES) return -1; return 0;
}
hso_dbg(0x1, "sizeof hso_net is %zu\n", sizeof(*hso_net));
/* fill in the other fields */
net->netdev_ops = &hso_netdev_ops;
net->watchdog_timeo = HSO_NET_TX_TIMEOUT;
net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
net->type = ARPHRD_NONE;
net->mtu = DEFAULT_MTU - 14;
net->tx_queue_len = 10;
net->ethtool_ops = &ops;
/* and initialize the semaphore */
spin_lock_init(&hso_net->net_lock);
}
/* Adds a network device in the network device table */ staticint add_net_device(struct hso_device *hso_dev)
{ int i;
for (i = 0; i < HSO_MAX_NET_DEVICES; i++) { if (network_table[i] == NULL) {
network_table[i] = hso_dev; break;
}
} if (i == HSO_MAX_NET_DEVICES) return -1; return 0;
}
staticint hso_rfkill_set_block(void *data, bool blocked)
{ struct hso_device *hso_dev = data; int enabled = !blocked; int rv;
hso_dev = hso_create_device(interface, port_spec); if (!hso_dev) return NULL;
/* allocate our network device, then we can put in our private data */ /* call hso_net_init to do the basic initialization */
net = alloc_netdev(sizeof(struct hso_net), "hso%d", NET_NAME_UNKNOWN,
hso_net_init); if (!net) {
dev_err(&interface->dev, "Unable to create ethernet device\n"); goto err_hso_dev;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.