// SPDX-License-Identifier: GPL-2.0 /* * Copyright IBM Corp. 2004, 2007 * Authors: Belinda Thompson (belindat@us.ibm.com) * Andy Richter (richtera@us.ibm.com) * Peter Tiedemann (ptiedem@de.ibm.com)
*/
/* This module exports functions to be used by CCS: EXPORT_SYMBOL(ctc_mpc_alloc_channel); EXPORT_SYMBOL(ctc_mpc_establish_connectivity); EXPORT_SYMBOL(ctc_mpc_dealloc_ch); EXPORT_SYMBOL(ctc_mpc_flow_control);
*/
if (sw != 0) { for ( ; rm > 0; rm--, sw++) { if ((sw == 4) || (sw == 12))
strcat(bhex, " "); if (sw == 8)
strcat(bhex, " ");
strcat(bhex, " ");
strcat(basc, " ");
} if (dup != 0) {
scnprintf(tdup, sizeof(tdup), "Duplicate as above to %s", addr);
ctcm_pr_debug(" --- %s ---\n", tdup);
}
ctcm_pr_debug(" %s (+%s) : %s [%s]\n",
addr, boff, bhex, basc);
} else { if (dup >= 1) {
scnprintf(tdup, sizeof(tdup), "Duplicate as above to %s", addr);
ctcm_pr_debug(" --- %s ---\n", tdup);
} if (dup != 0) {
ctcm_pr_debug(" %s (+%s) : %s [%s]\n",
addr, boff, bhex, basc);
}
}
return;
} /* end of ctcmpc_dumpit */ #endif
#ifdef DEBUGDATA /* * Dump header and first 16 bytes of an sk_buff for debugging purposes. * * skb The sk_buff to dump. * offset Offset relative to skb-data, where to start the dump.
*/ void ctcmpc_dump_skb(struct sk_buff *skb, int offset)
{
__u8 *p = skb->data; struct th_header *header; struct pdu *pheader; int bl = skb->len; int i;
if (p == NULL) return;
p += offset;
header = (struct th_header *)p;
ctcm_pr_debug("dump:\n");
ctcm_pr_debug("skb len=%d \n", skb->len); if (skb->len > 2) { switch (header->th_ch_flag) { case TH_HAS_PDU: break; case 0x00: case TH_IS_XID: if ((header->th_blk_flag == TH_DATA_IS_XID) &&
(header->th_is_xid == 0x01)) goto dumpth; case TH_SWEEP_REQ: goto dumpth; case TH_SWEEP_RESP: goto dumpth; default: break;
}
switch (fsm_getstate(grp->fsm)) { case MPCG_STATE_INOP: /* Group is in the process of terminating */
grp->alloc_called = 1; break; case MPCG_STATE_RESET: /* MPC Group will transition to state */ /* MPCG_STATE_XID2INITW iff the minimum number */ /* of 1 read and 1 write channel have successfully*/ /* activated */ /*fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);*/ if (callback)
grp->send_qllc_disc = 1;
fallthrough; case MPCG_STATE_XID0IOWAIT:
fsm_deltimer(&grp->timer);
grp->outstanding_xid2 = 0;
grp->outstanding_xid7 = 0;
grp->outstanding_xid7_p2 = 0;
grp->saved_xid2 = NULL; if (callback)
ctcm_open(dev);
fsm_event(priv->fsm, DEV_EVENT_START, dev); break; case MPCG_STATE_READY: /* XID exchanges completed after PORT was activated */ /* Link station already active */ /* Maybe timing issue...retry callback */
grp->allocchan_callback_retries++; if (grp->allocchan_callback_retries < 4) { if (grp->allochanfunc)
grp->allochanfunc(grp->port_num,
grp->group_max_buflen);
} else { /* there are problems...bail out */ /* there may be a state mismatch so restart */
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
grp->allocchan_callback_retries = 0;
} break;
}
switch (fsm_getstate(grp->fsm)) { case MPCG_STATE_READY: /* XID exchanges completed after PORT was activated */ /* Link station already active */ /* Maybe timing issue...retry callback */
fsm_deltimer(&grp->timer);
grp->estconn_callback_retries++; if (grp->estconn_callback_retries < 4) { if (grp->estconnfunc) {
grp->estconnfunc(grp->port_num, 0,
grp->group_max_buflen);
grp->estconnfunc = NULL;
}
} else { /* there are problems...bail out */
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
grp->estconn_callback_retries = 0;
} break; case MPCG_STATE_INOP: case MPCG_STATE_RESET: /* MPC Group is not ready to start XID - min num of */ /* 1 read and 1 write channel have not been acquired*/
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, "%s(%s): REJECTED - inactive channels",
CTCM_FUNTAIL, dev->name); if (grp->estconnfunc) {
grp->estconnfunc(grp->port_num, -1, 0);
grp->estconnfunc = NULL;
} break; case MPCG_STATE_XID2INITW: /* alloc channel was called but no XID exchange */ /* has occurred. initiate xside XID exchange */ /* make sure yside XID0 processing has not started */
if ((fsm_getstate(rch->fsm) > CH_XID0_PENDING) ||
(fsm_getstate(wch->fsm) > CH_XID0_PENDING)) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, "%s(%s): ABORT - PASSIVE XID",
CTCM_FUNTAIL, dev->name); break;
}
grp->send_qllc_disc = 1;
fsm_newstate(grp->fsm, MPCG_STATE_XID0IOWAIT);
fsm_deltimer(&grp->timer);
fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE,
MPCG_EVENT_TIMER, dev);
grp->outstanding_xid7 = 0;
grp->outstanding_xid7_p2 = 0;
grp->saved_xid2 = NULL; if ((rch->in_mpcgroup) &&
(fsm_getstate(rch->fsm) == CH_XID0_PENDING))
fsm_event(grp->fsm, MPCG_EVENT_XID0DO, rch); else {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, "%s(%s): RX-%s not ready for ACTIVE XID0",
CTCM_FUNTAIL, dev->name, rch->id); if (grp->estconnfunc) {
grp->estconnfunc(grp->port_num, -1, 0);
grp->estconnfunc = NULL;
}
fsm_deltimer(&grp->timer); goto done;
} if ((wch->in_mpcgroup) &&
(fsm_getstate(wch->fsm) == CH_XID0_PENDING))
fsm_event(grp->fsm, MPCG_EVENT_XID0DO, wch); else {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, "%s(%s): WX-%s not ready for ACTIVE XID0",
CTCM_FUNTAIL, dev->name, wch->id); if (grp->estconnfunc) {
grp->estconnfunc(grp->port_num, -1, 0);
grp->estconnfunc = NULL;
}
fsm_deltimer(&grp->timer); goto done;
} break; case MPCG_STATE_XID0IOWAIT: /* already in active XID negotiations */ default: break;
}
mpcg_state = fsm_getstate(grp->fsm); switch (flowc) { case 1: if (mpcg_state == MPCG_STATE_FLOWC) break; if (mpcg_state == MPCG_STATE_READY) { if (grp->flow_off_called == 1)
grp->flow_off_called = 0; else
fsm_newstate(grp->fsm, MPCG_STATE_FLOWC); break;
} break; case 0: if (mpcg_state == MPCG_STATE_FLOWC) {
fsm_newstate(grp->fsm, MPCG_STATE_READY); /* ensure any data that has accumulated */ /* on the io_queue will now be sen t */
tasklet_schedule(&rch->ch_tasklet);
} /* possible race condition */ if (mpcg_state == MPCG_STATE_READY) {
grp->flow_off_called = 1; break;
} break;
}
if (grp == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, "%s(%s): No MPC group",
CTCM_FUNTAIL, dev->name); return;
}
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE, "%s: %s: GROUP TRANSITIONED TO READY, maxbuf = %d\n",
CTCM_FUNTAIL, dev->name, grp->group_max_buflen);
fsm_newstate(grp->fsm, MPCG_STATE_READY);
/* Put up a read on the channel */
ch = priv->channel[CTCM_READ];
ch->pdu_seq = 0;
CTCM_PR_DBGDATA("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" ,
__func__, ch->pdu_seq);
ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch); /* Put the write channel in idle state */
ch = priv->channel[CTCM_WRITE]; if (ch->collect_len > 0) {
spin_lock(&ch->collect_lock);
ctcm_purge_skb_queue(&ch->collect_queue);
ch->collect_len = 0;
spin_unlock(&ch->collect_lock);
}
ctcm_chx_txidle(ch->fsm, CTC_EVENT_START, ch);
ctcm_clear_busy(dev);
/* * Unpack a just received skb and hand it over to * upper layers. * special MPC version of unpack_skb. * * ch The channel where this skb has been received. * pskb The received skb.
*/ staticvoid ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
{ struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv; struct mpc_group *grp = priv->mpcg; struct pdu *curr_pdu; struct mpcg_info *mpcginfo; struct th_header *header = NULL; struct th_sweep *sweep = NULL; int pdu_last_seen = 0;
__u32 new_len; struct sk_buff *skb; int skblen; int sendrc = 0;
if (likely(header->th_ch_flag == TH_HAS_PDU)) {
CTCM_PR_DBGDATA("%s: came into th_has_pdu\n", __func__); if ((fsm_getstate(grp->fsm) == MPCG_STATE_FLOWC) ||
((fsm_getstate(grp->fsm) == MPCG_STATE_READY) &&
(header->th_seq_num != ch->th_seq_num + 1) &&
(ch->th_seq_num != 0))) { /* This is NOT the next segment * * we are not the correct race winner * * go away and let someone else win * * BUT..this only applies if xid negot * * is done *
*/
grp->out_of_sequence += 1;
__skb_push(pskb, TH_HEADER_LENGTH);
skb_queue_tail(&ch->io_queue, pskb);
CTCM_PR_DBGDATA("%s: th_seq_num expect:%08x " "got:%08x\n", __func__,
ch->th_seq_num + 1, header->th_seq_num);
/* * tasklet helper for mpc's skb unpacking. * * ch The channel to work on. * Allow flow control back pressure to occur here. * Throttling back channel can result in excessive * channel inactivity and system deact of channel
*/ void ctcmpc_bh(unsignedlong thischan)
{ struct channel *ch = (struct channel *)thischan; struct sk_buff *skb; struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv; struct mpc_group *grp = priv->mpcg;
CTCM_PR_DEBUG("%s cp:%i enter: %s() %s\n",
dev->name, smp_processor_id(), __func__, ch->id); /* caller has requested driver to throttle back */ while ((fsm_getstate(grp->fsm) != MPCG_STATE_FLOWC) &&
(skb = skb_dequeue(&ch->io_queue))) {
ctcmpc_unpack_skb(ch, skb); if (grp->out_of_sequence > 20) { /* assume data loss has occurred if */ /* missing seq_num for extended */ /* period of time */
grp->out_of_sequence = 0;
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); break;
} if (skb == skb_peek(&ch->io_queue)) break;
}
CTCM_PR_DEBUG("exit %s: %s: ch=0x%p id=%s\n",
__func__, dev->name, ch, ch->id); return;
}
/* * MPC Group Station FSM actions * CTCM_PROTO_MPC only
*/
/* * NOP action for statemachines
*/ staticvoid mpc_action_nop(fsm_instance *fi, int event, void *arg)
{
}
/* * invoked when the device transitions to dev_stopped * MPC will stop each individual channel if a single XID failure * occurs, or will intitiate all channels be stopped if a GROUP * level failure occurs.
*/ staticvoid mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
{ struct net_device *dev = arg; struct ctcm_priv *priv; struct mpc_group *grp; struct channel *wch;
grp->channels_terminating = 1;
grp->saved_state = fsm_getstate(grp->fsm);
fsm_newstate(grp->fsm, MPCG_STATE_INOP); if (grp->saved_state > MPCG_STATE_XID7INITF)
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE, "%s(%s): MPC GROUP INOPERATIVE",
CTCM_FUNTAIL, dev->name); if ((grp->saved_state != MPCG_STATE_RESET) || /* dealloc_channel has been called */
(grp->port_persist == 0))
fsm_deltimer(&priv->restart_timer);
wch = priv->channel[CTCM_WRITE];
switch (grp->saved_state) { case MPCG_STATE_RESET: case MPCG_STATE_INOP: case MPCG_STATE_XID2INITW: case MPCG_STATE_XID0IOWAIT: case MPCG_STATE_XID2INITX: case MPCG_STATE_XID7INITW: case MPCG_STATE_XID7INITX: case MPCG_STATE_XID0IOWAIX: case MPCG_STATE_XID7INITI: case MPCG_STATE_XID7INITZ: case MPCG_STATE_XID7INITF: break; case MPCG_STATE_FLOWC: case MPCG_STATE_READY: default:
tasklet_hi_schedule(&wch->ch_disc_tasklet);
}
grp->xid2_tgnum = 0;
grp->group_max_buflen = 0; /*min of all received */
grp->outstanding_xid2 = 0;
grp->outstanding_xid7 = 0;
grp->outstanding_xid7_p2 = 0;
grp->saved_xid2 = NULL;
grp->xidnogood = 0;
grp->changed_side = 0;
if (grp->send_qllc_disc == 1) {
grp->send_qllc_disc = 0;
mpc_send_qllc_discontact(dev);
}
/* DO NOT issue DEV_EVENT_STOP directly out of this code */ /* This can result in INOP of VTAM PU due to halting of */ /* outstanding IO which causes a sense to be returned */ /* Only about 3 senses are allowed and then IOS/VTAM will*/ /* become unreachable without manual intervention */ if ((grp->port_persist == 1) || (grp->alloc_called)) {
grp->alloc_called = 0;
fsm_deltimer(&priv->restart_timer);
fsm_addtimer(&priv->restart_timer, 500, DEV_EVENT_RESTART, dev);
fsm_newstate(grp->fsm, MPCG_STATE_RESET); if (grp->saved_state > MPCG_STATE_XID7INITF)
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ALWAYS, "%s(%s): MPC GROUP RECOVERY SCHEDULED",
CTCM_FUNTAIL, dev->name);
} else {
fsm_deltimer(&priv->restart_timer);
fsm_addtimer(&priv->restart_timer, 500, DEV_EVENT_STOP, dev);
fsm_newstate(grp->fsm, MPCG_STATE_RESET);
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ALWAYS, "%s(%s): NO MPC GROUP RECOVERY ATTEMPTED",
CTCM_FUNTAIL, dev->name);
}
}
/* * Handle mpc group action timeout. * MPC Group Station FSM action * CTCM_PROTO_MPC only * * fi An instance of an mpc_group fsm. * event The event, just happened. * arg Generic pointer, casted from net_device * upon call.
*/ staticvoid mpc_action_timeout(fsm_instance *fi, int event, void *arg)
{ struct net_device *dev = arg; struct ctcm_priv *priv; struct mpc_group *grp; struct channel *wch; struct channel *rch;
switch (fsm_getstate(grp->fsm)) { case MPCG_STATE_XID2INITW: /* Unless there is outstanding IO on the */ /* channel just return and wait for ATTN */ /* interrupt to begin XID negotiations */ if ((fsm_getstate(rch->fsm) == CH_XID0_PENDING) &&
(fsm_getstate(wch->fsm) == CH_XID0_PENDING)) break;
fallthrough; default:
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
}
/* * MPC Group Station FSM action * CTCM_PROTO_MPC only
*/ void mpc_action_discontact(fsm_instance *fi, int event, void *arg)
{ struct mpcg_info *mpcginfo = arg; struct channel *ch = mpcginfo->ch; struct net_device *dev; struct ctcm_priv *priv; struct mpc_group *grp;
if (ch) {
dev = ch->netdev; if (dev) {
priv = dev->ml_priv; if (priv) {
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE, "%s: %s: %s\n",
CTCM_FUNTAIL, dev->name, ch->id);
grp = priv->mpcg;
grp->send_qllc_disc = 1;
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
}
}
}
return;
}
/* * MPC Group Station - not part of FSM * CTCM_PROTO_MPC only * called from add_channel in ctcm_main.c
*/ void mpc_action_send_discontact(unsignedlong thischan)
{ int rc; struct channel *ch = (struct channel *)thischan; unsignedlong saveflags = 0;
/* convert two 32 bit numbers into 1 64 bit for id compare */
our_id = (__u64)priv->xid->xid2_adj_id;
our_id = our_id << 32;
our_id = our_id + priv->xid->xid2_sender_id;
their_id = (__u64)xid->xid2_adj_id;
their_id = their_id << 32;
their_id = their_id + xid->xid2_sender_id; /* lower id assume the xside role */ if (our_id < their_id) {
grp->roll = XSIDE;
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE, "%s(%s): WE HAVE LOW ID - TAKE XSIDE",
CTCM_FUNTAIL, ch->id);
} else {
grp->roll = YSIDE;
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE, "%s(%s): WE HAVE HIGH ID - TAKE YSIDE",
CTCM_FUNTAIL, ch->id);
}
} else { if (xid->xid2_flag4 != grp->saved_xid2->xid2_flag4) {
rc = 3; /* XID REJECTED: xid flag byte4 mismatch */
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, "%s(%s): xid flag byte4 mismatch",
CTCM_FUNTAIL, ch->id);
} if (xid->xid2_flag2 == 0x40) {
rc = 4; /* XID REJECTED - xid NOGOOD */
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, "%s(%s): xid NOGOOD",
CTCM_FUNTAIL, ch->id);
} if (xid->xid2_adj_id != grp->saved_xid2->xid2_adj_id) {
rc = 5; /* XID REJECTED - Adjacent Station ID Mismatch */
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, "%s(%s): Adjacent Station ID Mismatch",
CTCM_FUNTAIL, ch->id);
} if (xid->xid2_sender_id != grp->saved_xid2->xid2_sender_id) {
rc = 6; /* XID REJECTED - Sender Address Mismatch */
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, "%s(%s): Sender Address Mismatch",
CTCM_FUNTAIL, ch->id);
}
}
done: if (rc) {
dev_warn(&dev->dev, "The XID used in the MPC protocol is not valid, " "rc = %d\n", rc);
priv->xid->xid2_flag2 = 0x40;
grp->saved_xid2->xid2_flag2 = 0x40;
}
return rc;
}
/* * MPC Group Station FSM action * CTCM_PROTO_MPC only
*/ staticvoid mpc_action_side_xid(fsm_instance *fsm, void *arg, int side)
{ struct channel *ch = arg; int rc = 0; int gotlock = 0; unsignedlong saveflags = 0; /* avoids compiler warning with
spin_unlock_irqrestore */
/* * skb data-buffer referencing:
*/
ch->trans_skb->data = ch->trans_skb_data;
skb_reset_tail_pointer(ch->trans_skb);
ch->trans_skb->len = 0; /* result of the previous 3 statements is NOT always * already set after ctcm_checkalloc_buffer * because of possible reuse of the trans_skb
*/
memset(ch->trans_skb->data, 0, 16);
ch->rcvd_xid_th = (struct th_header *)ch->trans_skb_data; /* check is main purpose here: */
skb_put(ch->trans_skb, TH_HEADER_LENGTH);
ch->rcvd_xid = (struct xid2 *)skb_tail_pointer(ch->trans_skb); /* check is main purpose here: */
skb_put(ch->trans_skb, XID2_LENGTH);
ch->rcvd_xid_id = skb_tail_pointer(ch->trans_skb); /* cleanup back to startpoint */
ch->trans_skb->data = ch->trans_skb_data;
skb_reset_tail_pointer(ch->trans_skb);
ch->trans_skb->len = 0;
if (!in_hardirq()) { /* Such conditional locking is a known problem for * sparse because its static undeterministic.
* Warnings should be ignored here. */
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
gotlock = 1;
}
switch (fsm_getstate(grp->fsm)) { case MPCG_STATE_XID2INITW: case MPCG_STATE_XID2INITX:
ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD; break; case MPCG_STATE_XID0IOWAIT: case MPCG_STATE_XID0IOWAIX:
ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL; break;
}
fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch);
return;
}
/* * MPC Group Station FSM action * CTCM_PROTO_MPC only
*/ staticvoid mpc_action_doxid7(fsm_instance *fsm, int event, void *arg)
{ struct net_device *dev = arg; struct ctcm_priv *priv = dev->ml_priv; struct mpc_group *grp = NULL; int direction; int send = 0;
if (priv)
grp = priv->mpcg; if (grp == NULL) return;
/* * mpc_action helper of an MPC Group Station FSM action * CTCM_PROTO_MPC only
*/ staticint mpc_send_qllc_discontact(struct net_device *dev)
{ struct sk_buff *skb; struct qllc *qllcptr; struct ctcm_priv *priv = dev->ml_priv; struct mpc_group *grp = priv->mpcg;
CTCM_PR_DEBUG("%s: GROUP STATE: %s\n",
__func__, mpcg_state_names[grp->saved_state]);
switch (grp->saved_state) { /* * establish conn callback function is * preferred method to report failure
*/ case MPCG_STATE_XID0IOWAIT: case MPCG_STATE_XID0IOWAIX: case MPCG_STATE_XID7INITI: case MPCG_STATE_XID7INITZ: case MPCG_STATE_XID2INITW: case MPCG_STATE_XID2INITX: case MPCG_STATE_XID7INITW: case MPCG_STATE_XID7INITX: if (grp->estconnfunc) {
grp->estconnfunc(grp->port_num, -1, 0);
grp->estconnfunc = NULL; break;
}
fallthrough; case MPCG_STATE_FLOWC: case MPCG_STATE_READY:
grp->send_qllc_disc = 2;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.