while ((skb = skb_dequeue(q))) {
refcount_dec(&skb->users);
dev_kfree_skb_any(skb);
}
}
/* * NOP action for statemachines
*/ staticvoid ctcm_action_nop(fsm_instance *fi, int event, void *arg)
{
}
/* * Actions for channel - statemachines.
*/
/* * Normal data has been send. Free the corresponding * skb (it's in io_queue), reset dev->tbusy and * revert to idle state. * * fi An instance of a channel statemachine. * event The event, just happened. * arg Generic pointer, casted from channel * upon call.
*/ staticvoid chx_txdone(fsm_instance *fi, int event, void *arg)
{ struct channel *ch = arg; struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv; struct sk_buff *skb; int first = 1; int i; unsignedlong duration; unsignedlong done_stamp = jiffies;
/* * Initial data is sent. * Notify device statemachine that we are up and * running. * * fi An instance of a channel statemachine. * event The event, just happened. * arg Generic pointer, casted from channel * upon call.
*/ void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg)
{ struct channel *ch = arg; struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv;
/* * Got normal data, check for sanity, queue it up, allocate new buffer * trigger bottom half, and initiate next read. * * fi An instance of a channel statemachine. * event The event, just happened. * arg Generic pointer, casted from channel * upon call.
*/ staticvoid chx_rx(fsm_instance *fi, int event, void *arg)
{ struct channel *ch = arg; struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv; int len = ch->max_bufsize - ch->irb->scsw.cmd.count; struct sk_buff *skb = ch->trans_skb;
__u16 block_len = *((__u16 *)skb->data); int check_len; int rc;
/* * Initialize connection by sending a __u16 of value 0. * * fi An instance of a channel statemachine. * event The event, just happened. * arg Generic pointer, casted from channel * upon call.
*/ staticvoid chx_firstio(fsm_instance *fi, int event, void *arg)
{ int rc; struct channel *ch = arg; int fsmstate = fsm_getstate(fi);
ch->sense_rc = 0; /* reset unit check report control */ if (fsmstate == CTC_STATE_TXIDLE)
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "%s(%s): remote side issued READ?, init.\n",
CTCM_FUNTAIL, ch->id);
fsm_deltimer(&ch->timer); if (ctcm_checkalloc_buffer(ch)) return; if ((fsmstate == CTC_STATE_SETUPWAIT) &&
(ch->protocol == CTCM_PROTO_OS390)) { /* OS/390 resp. z/OS */ if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC,
CTC_EVENT_TIMER, ch);
chx_rxidle(fi, event, arg);
} else { struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv;
fsm_newstate(fi, CTC_STATE_TXIDLE);
fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
} return;
} /* * Don't setup a timer for receiving the initial RX frame * if in compatibility mode, since VM TCP delays the initial * frame until it has some data to send.
*/ if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) ||
(ch->protocol != CTCM_PROTO_S390))
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
ch->ccw[1].count = 2; /* Transfer only length */
fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); if (rc != 0) {
fsm_deltimer(&ch->timer);
fsm_newstate(fi, CTC_STATE_SETUPWAIT);
ctcm_ccw_check_rc(ch, rc, "init IO");
} /* * If in compatibility mode since we don't setup a timer, we * also signal RX channel up immediately. This enables us * to send packets early which in turn usually triggers some * reply from VM TCP which brings up the RX channel to it's * final state.
*/ if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) &&
(ch->protocol == CTCM_PROTO_S390)) { struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv;
fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
}
}
/* * Got initial data, check it. If OK, * notify device statemachine that we are up and * running. * * fi An instance of a channel statemachine. * event The event, just happened. * arg Generic pointer, casted from channel * upon call.
*/ staticvoid chx_rxidle(fsm_instance *fi, int event, void *arg)
{ struct channel *ch = arg; struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv;
__u16 buflen; int rc;
/* * Set channel into extended mode. * * fi An instance of a channel statemachine. * event The event, just happened. * arg Generic pointer, casted from channel * upon call.
*/ staticvoid ctcm_chx_setmode(fsm_instance *fi, int event, void *arg)
{ struct channel *ch = arg; int rc; unsignedlong saveflags = 0; int timeout = CTCM_TIME_5_SEC;
if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); /* Such conditional locking is undeterministic in
* static view. => ignore sparse warnings here. */
/* * Setup channel. * * fi An instance of a channel statemachine. * event The event, just happened. * arg Generic pointer, casted from channel * upon call.
*/ staticvoid ctcm_chx_start(fsm_instance *fi, int event, void *arg)
{ struct channel *ch = arg; unsignedlong saveflags; int rc;
/* * Shutdown a channel. * * fi An instance of a channel statemachine. * event The event, just happened. * arg Generic pointer, casted from channel * upon call.
*/ staticvoid ctcm_chx_haltio(fsm_instance *fi, int event, void *arg)
{ struct channel *ch = arg; unsignedlong saveflags = 0; int rc; int oldstate;
fsm_deltimer(&ch->timer); if (IS_MPC(ch))
fsm_deltimer(&ch->sweep_timer);
/* * Cleanup helper for chx_fail and chx_stopped * cleanup channels queue and notify interface statemachine. * * fi An instance of a channel statemachine. * state The next state (depending on caller). * ch The channel to operate on.
*/ staticvoid ctcm_chx_cleanup(fsm_instance *fi, int state, struct channel *ch)
{ struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv;
/* * A channel has successfully been halted. * Cleanup it's queue and notify interface statemachine. * * fi An instance of a channel statemachine. * event The event, just happened. * arg Generic pointer, casted from channel * upon call.
*/ staticvoid ctcm_chx_stopped(fsm_instance *fi, int event, void *arg)
{
ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg);
}
/* * A stop command from device statemachine arrived and we are in * not operational mode. Set state to stopped. * * fi An instance of a channel statemachine. * event The event, just happened. * arg Generic pointer, casted from channel * upon call.
*/ staticvoid ctcm_chx_stop(fsm_instance *fi, int event, void *arg)
{
fsm_newstate(fi, CTC_STATE_STOPPED);
}
/* * A machine check for no path, not operational status or gone device has * happened. * Cleanup queue and notify interface statemachine. * * fi An instance of a channel statemachine. * event The event, just happened. * arg Generic pointer, casted from channel * upon call.
*/ staticvoid ctcm_chx_fail(fsm_instance *fi, int event, void *arg)
{
ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg);
}
/* * Handle error during setup of channel. * * fi An instance of a channel statemachine. * event The event, just happened. * arg Generic pointer, casted from channel * upon call.
*/ staticvoid ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
{ struct channel *ch = arg; struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv;
/* * Special case: Got UC_RCRESET on setmode. * This means that remote side isn't setup. In this case * simply retry after some 10 secs...
*/ if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) &&
((event == CTC_EVENT_UC_RCRESET) ||
(event == CTC_EVENT_UC_RSRESET))) {
fsm_newstate(fi, CTC_STATE_STARTRETRY);
fsm_deltimer(&ch->timer);
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); if (!IS_MPC(ch) &&
(CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) { int rc = ccw_device_halt(ch->cdev, 0); if (rc != 0)
ctcm_ccw_check_rc(ch, rc, "HaltIO in chx_setuperr");
} return;
}
/* * Restart a channel after an error. * * fi An instance of a channel statemachine. * event The event, just happened. * arg Generic pointer, casted from channel * upon call.
*/ staticvoid ctcm_chx_restart(fsm_instance *fi, int event, void *arg)
{ struct channel *ch = arg; struct net_device *dev = ch->netdev; unsignedlong saveflags = 0; int oldstate; int rc;
CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, "%s: %s[%d] of %s\n",
CTCM_FUNTAIL, ch->id, event, dev->name);
fsm_deltimer(&ch->timer);
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
oldstate = fsm_getstate(fi);
fsm_newstate(fi, CTC_STATE_STARTWAIT); if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); /* Such conditional locking is a known problem for * sparse because its undeterministic in static view.
* Warnings should be ignored here. */
rc = ccw_device_halt(ch->cdev, 0); if (event == CTC_EVENT_TIMER)
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); if (rc != 0) { if (rc != -EBUSY) {
fsm_deltimer(&ch->timer);
fsm_newstate(fi, oldstate);
}
ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart");
}
}
/* * Handle error during RX initial handshake (exchange of * 0-length block header) * * fi An instance of a channel statemachine. * event The event, just happened. * arg Generic pointer, casted from channel * upon call.
*/ staticvoid ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg)
{ struct channel *ch = arg; struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv;
if (event == CTC_EVENT_TIMER) { if (!IS_MPCDEV(dev)) /* TODO : check if MPC deletes timer somewhere */
fsm_deltimer(&ch->timer); if (ch->retry++ < 3)
ctcm_chx_restart(fi, event, arg); else {
fsm_newstate(fi, CTC_STATE_RXERR);
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
}
} else {
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
ctc_ch_event_names[event], fsm_getstate_str(fi));
/* * Notify device statemachine if we gave up initialization * of RX channel. * * fi An instance of a channel statemachine. * event The event, just happened. * arg Generic pointer, casted from channel * upon call.
*/ staticvoid ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg)
{ struct channel *ch = arg; struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv;
/* * MPC actions for mpc channel statemachine * handling of MPC protocol requires extra * statemachine and actions which are prefixed ctcmpc_ . * The ctc_ch_states and ctc_ch_state_names, * ctc_ch_events and ctc_ch_event_names share the ctcm definitions * which are expanded by some elements.
*/
/* * Actions for mpc channel statemachine.
*/
/* * Normal data has been send. Free the corresponding * skb (it's in io_queue), reset dev->tbusy and * revert to idle state. * * fi An instance of a channel statemachine. * event The event, just happened. * arg Generic pointer, casted from channel * upon call.
*/ staticvoid ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
{ struct channel *ch = arg; struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv; struct mpc_group *grp = priv->mpcg; struct sk_buff *skb; int first = 1; int i;
__u32 data_space; unsignedlong duration; struct sk_buff *peekskb; int rc; struct th_header *header; struct pdu *p_header; unsignedlong done_stamp = jiffies;
CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
__func__, ch->trans_skb->len);
CTCM_PR_DBGDATA("%s: pdu header and data for up" " to 32 bytes sent to vtam\n", __func__);
CTCM_D3_DUMP((char *)p_header, min_t(int, skb->len, 32));
ch->collect_len -= skb->len;
data_space -= skb->len;
priv->stats.tx_packets++;
priv->stats.tx_bytes += skb->len;
refcount_dec(&skb->users);
dev_kfree_skb_any(skb);
peekskb = skb_peek(&ch->collect_queue); if (peekskb->len > data_space) break;
i++;
} /* p_header points to the last one we handled */ if (p_header)
p_header->pdu_flag |= PDU_LAST; /*Say it's the last one*/
/* * Initialize connection by sending a __u16 of value 0. * * fi An instance of a channel statemachine. * event The event, just happened. * arg Generic pointer, casted from channel * upon call.
*/ staticvoid ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
{ struct channel *ch = arg; struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv; struct mpc_group *gptr = priv->mpcg;
/* * Got initial data, check it. If OK, * notify device statemachine that we are up and * running. * * fi An instance of a channel statemachine. * event The event, just happened. * arg Generic pointer, casted from channel * upon call.
*/ void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg)
{ struct channel *ch = arg; struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv; struct mpc_group *grp = priv->mpcg; int rc; unsignedlong saveflags = 0; /* avoids compiler warning */
switch (fsm_getstate(grp->fsm)) { case MPCG_STATE_XID2INITW: /* ok..start yside xid exchanges */ if (!ch->in_mpcgroup) break; if (fsm_getstate(ch->fsm) == CH_XID0_PENDING) {
fsm_deltimer(&grp->timer);
fsm_addtimer(&grp->timer,
MPC_XID_TIMEOUT_VALUE,
MPCG_EVENT_TIMER, dev);
fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
} elseif (fsm_getstate(ch->fsm) < CH_XID7_PENDING1) /* attn rcvd before xid0 processed via bh */
fsm_newstate(ch->fsm, CH_XID7_PENDING1); break; case MPCG_STATE_XID2INITX: case MPCG_STATE_XID0IOWAIT: case MPCG_STATE_XID0IOWAIX: /* attn rcvd before xid0 processed on ch
but mid-xid0 processing for group */ if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
fsm_newstate(ch->fsm, CH_XID7_PENDING1); break; case MPCG_STATE_XID7INITW: case MPCG_STATE_XID7INITX: case MPCG_STATE_XID7INITI: case MPCG_STATE_XID7INITZ: switch (fsm_getstate(ch->fsm)) { case CH_XID7_PENDING:
fsm_newstate(ch->fsm, CH_XID7_PENDING1); break; case CH_XID7_PENDING2:
fsm_newstate(ch->fsm, CH_XID7_PENDING3); break;
}
fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev); break;
}
return;
}
/* * ctcmpc channel FSM action * called from one point in ctcmpc_ch_fsm * ctcmpc only
*/ staticvoid ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg)
{ struct channel *ch = arg; struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv; struct mpc_group *grp = priv->mpcg;
switch (fsm_getstate(grp->fsm)) { case MPCG_STATE_XID0IOWAIT: /* vtam wants to be primary.start yside xid exchanges*/ /* only receive one attn-busy at a time so must not */ /* change state each time */
grp->changed_side = 1;
fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW); break; case MPCG_STATE_XID2INITW: if (grp->changed_side == 1) {
grp->changed_side = 2; break;
} /* process began via call to establish_conn */ /* so must report failure instead of reverting */ /* back to ready-for-xid passive state */ if (grp->estconnfunc) goto done; /* this attnbusy is NOT the result of xside xid */ /* collisions so yside must have been triggered */ /* by an ATTN that was not intended to start XID */ /* processing. Revert back to ready-for-xid and */ /* wait for ATTN interrupt to signal xid start */ if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) {
fsm_newstate(ch->fsm, CH_XID0_PENDING) ;
fsm_deltimer(&grp->timer); goto done;
}
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); goto done; case MPCG_STATE_XID2INITX: /* XID2 was received before ATTN Busy for second channel.Send yside xid for second channel.
*/ if (grp->changed_side == 1) {
grp->changed_side = 2; break;
}
fallthrough; case MPCG_STATE_XID0IOWAIX: case MPCG_STATE_XID7INITW: case MPCG_STATE_XID7INITX: case MPCG_STATE_XID7INITI: case MPCG_STATE_XID7INITZ: default: /* multiple attn-busy indicates too out-of-sync */ /* and they are certainly not being received as part */ /* of valid mpc group negotiations.. */
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); goto done;
}
if (grp->changed_side == 1) {
fsm_deltimer(&grp->timer);
fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE,
MPCG_EVENT_TIMER, dev);
} if (ch->in_mpcgroup)
fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); else
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, "%s(%s): channel %s not added to group",
CTCM_FUNTAIL, dev->name, ch->id);
done: return;
}
/* * ctcmpc channel FSM action * called from several points in ctcmpc_ch_fsm * ctcmpc only
*/ staticvoid ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg)
{ struct channel *ch = arg; struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv; struct mpc_group *grp = priv->mpcg;
if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) { /* give the previous IO time to complete */
fsm_addtimer(&wch->sweep_timer,
200, CTC_EVENT_RSWEEP_TIMER, wch); goto done;
}
skb = skb_dequeue(&wch->sweep_queue); if (!skb) goto done;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.