/* Debug trace area intended for all entries in abbreviated form. */
DEFINE_STATIC_DEBUG_INFO(sclp_debug, "sclp", 8, 1, SCLP_TRACE_ENTRY_SIZE,
&debug_hex_ascii_view);
/* Error trace area intended for full entries relating to failed requests. */
DEFINE_STATIC_DEBUG_INFO(sclp_debug_err, "sclp_err", 4, 1,
SCLP_TRACE_ENTRY_SIZE, &debug_hex_ascii_view);
/* Lock to protect internal data consistency. */ static DEFINE_SPINLOCK(sclp_lock);
/* Mask of events that we can send to the sclp interface. */ static sccb_mask_t sclp_receive_mask;
/* Mask of events that we can receive from the sclp interface. */ static sccb_mask_t sclp_send_mask;
/* List of registered event listeners and senders. */ static LIST_HEAD(sclp_reg_list);
/* List of queued requests. */ static LIST_HEAD(sclp_req_queue);
/* Data for read and init requests. */ staticstruct sclp_req sclp_read_req; staticstruct sclp_req sclp_init_req; staticvoid *sclp_read_sccb; staticstruct init_sccb *sclp_init_sccb;
/* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */ int sclp_console_pages = SCLP_CONSOLE_PAGES; /* Flag to indicate if buffer pages are dropped on buffer full condition */ bool sclp_console_drop = true; /* Number of times the console dropped buffer pages */ unsignedlong sclp_console_full;
/* The currently active SCLP command word. */ static sclp_cmdw_t active_cmd;
/* Timer for request retries. */ staticstruct timer_list sclp_request_timer;
/* Timer for queued requests. */ staticstruct timer_list sclp_queue_timer;
/* Internal state: is a request active at the sclp? */ staticvolatileenum sclp_running_state_t {
sclp_running_state_idle,
sclp_running_state_running,
sclp_running_state_reset_pending
} sclp_running_state = sclp_running_state_idle;
/* Internal state: is a read request pending? */ staticvolatileenum sclp_reading_state_t {
sclp_reading_state_idle,
sclp_reading_state_reading
} sclp_reading_state = sclp_reading_state_idle;
/* Internal state: is the driver currently serving requests? */ staticvolatileenum sclp_activation_state_t {
sclp_activation_state_active,
sclp_activation_state_deactivating,
sclp_activation_state_inactive,
sclp_activation_state_activating
} sclp_activation_state = sclp_activation_state_active;
/* Internal state: is an init mask request pending? */ staticvolatileenum sclp_mask_state_t {
sclp_mask_state_idle,
sclp_mask_state_initializing
} sclp_mask_state = sclp_mask_state_idle;
spin_lock_irqsave(&sclp_lock, flags); if (force_restart) { if (sclp_running_state == sclp_running_state_running) { /* Break running state and queue NOP read event request
* to get a defined interface state. */
__sclp_queue_read_req();
sclp_running_state = sclp_running_state_idle;
}
} else {
__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
sclp_request_timeout_normal);
}
spin_unlock_irqrestore(&sclp_lock, flags);
sclp_process_queue();
}
/* * Returns the expire value in jiffies of the next pending request timeout, * if any. Needs to be called with sclp_lock.
*/ staticunsignedlong __sclp_req_queue_find_next_timeout(void)
{ unsignedlong expires_next = 0; struct sclp_req *req;
list_for_each_entry(req, &sclp_req_queue, list) { if (!req->queue_expires) continue; if (!expires_next ||
(time_before(req->queue_expires, expires_next)))
expires_next = req->queue_expires;
} return expires_next;
}
/* * Returns expired request, if any, and removes it from the list.
*/ staticstruct sclp_req *__sclp_req_queue_remove_expired_req(void)
{ unsignedlong flags, now; struct sclp_req *req;
spin_lock_irqsave(&sclp_lock, flags);
now = jiffies; /* Don't need list_for_each_safe because we break out after list_del */
list_for_each_entry(req, &sclp_req_queue, list) { if (!req->queue_expires) continue; if (time_before_eq(req->queue_expires, now)) { if (req->status == SCLP_REQ_QUEUED) {
req->status = SCLP_REQ_QUEUED_TIMEOUT;
list_del(&req->list); goto out;
}
}
}
req = NULL;
out:
spin_unlock_irqrestore(&sclp_lock, flags); return req;
}
/* * Timeout handler for queued requests. Removes request from list and * invokes callback. This timer can be set per request in situations where * waiting too long would be harmful to the system, e.g. during SE reboot.
*/ staticvoid sclp_req_queue_timeout(struct timer_list *unused)
{ unsignedlong flags, expires_next; struct sclp_req *req;
do {
req = __sclp_req_queue_remove_expired_req();
if (req) { /* RQTM: Request timed out (a=sccb, b=summary) */
sclp_trace_req(2, "RQTM", req, true);
}
if (req && req->callback)
req->callback(req, req->callback_data);
} while (req);
/* SRV1: Service call about to be issued (a=command, b=sccb address) */
sclp_trace_sccb(0, "SRV1", command, (u64)sccb, command, sccb, false);
rc = sclp_service_call(command, sccb);
/* SRV2: Service call was issued (a=rc, b=SRVC sequence number) */
sclp_trace(0, "SRV2", -rc, ++srvc_count, rc != 0);
if (rc == 0)
active_cmd = command;
return rc;
}
/* Try to start a request. Return zero if the request was successfully * started or if it will be started at a later time. Return non-zero otherwise.
* Called while sclp_lock is locked. */ staticint
__sclp_start_request(struct sclp_req *req)
{ int rc;
/* Search request list for request with matching sccb. Return request if found,
* NULL otherwise. Called while sclp_lock is locked. */ staticinlinestruct sclp_req *
__sclp_find_req(u32 sccb)
{ struct list_head *l; struct sclp_req *req;
/* Convert interval in jiffies to TOD ticks. */ staticinline u64
sclp_tod_from_jiffies(unsignedlong jiffies)
{ return (u64) (jiffies / HZ) << 32;
}
/* Wait until a currently running request finished. Note: while this function
* is running, no timers are served on the calling CPU. */ void
sclp_sync_wait(void)
{ unsignedlonglong old_tick; struct ctlreg cr0, cr0_sync; unsignedlong flags; static u64 sync_count;
u64 timeout; int irq_context;
/* Calculate receive and send mask of currently registered listeners.
* Called while sclp_lock is locked. */ staticinlinevoid
__sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
{ struct list_head *l; struct sclp_register *t;
/* Remove event buffers which are marked processed. Return the number of
* remaining event buffers. */ int
sclp_remove_processed(struct sccb_header *sccb)
{ struct evbuf_header *evbuf; int unprocessed;
u16 remaining;
/* Deactivate SCLP interface. On success, new requests will be rejected, * events will no longer be dispatched. Return 0 on success, non-zero
* otherwise. */ int
sclp_deactivate(void)
{ unsignedlong flags; int rc;
spin_lock_irqsave(&sclp_lock, flags); /* Deactivate can only be called when active */ if (sclp_activation_state != sclp_activation_state_active) {
spin_unlock_irqrestore(&sclp_lock, flags); return -EINVAL;
}
sclp_activation_state = sclp_activation_state_deactivating;
spin_unlock_irqrestore(&sclp_lock, flags);
rc = sclp_init_mask(0);
spin_lock_irqsave(&sclp_lock, flags); if (rc == 0)
sclp_activation_state = sclp_activation_state_inactive; else
sclp_activation_state = sclp_activation_state_active;
spin_unlock_irqrestore(&sclp_lock, flags); return rc;
}
EXPORT_SYMBOL(sclp_deactivate);
/* Reactivate SCLP interface after sclp_deactivate. On success, new * requests will be accepted, events will be dispatched again. Return 0 on
* success, non-zero otherwise. */ int
sclp_reactivate(void)
{ unsignedlong flags; int rc;
spin_lock_irqsave(&sclp_lock, flags); /* Reactivate can only be called when inactive */ if (sclp_activation_state != sclp_activation_state_inactive) {
spin_unlock_irqrestore(&sclp_lock, flags); return -EINVAL;
}
sclp_activation_state = sclp_activation_state_activating;
spin_unlock_irqrestore(&sclp_lock, flags);
rc = sclp_init_mask(1);
spin_lock_irqsave(&sclp_lock, flags); if (rc == 0)
sclp_activation_state = sclp_activation_state_active; else
sclp_activation_state = sclp_activation_state_inactive;
spin_unlock_irqrestore(&sclp_lock, flags); return rc;
}
EXPORT_SYMBOL(sclp_reactivate);
/* Handler for external interruption used during initialization. Modify
* request state to done. */ staticvoid sclp_check_handler(struct ext_code ext_code, unsignedint param32, unsignedlong param64)
{
u32 finished_sccb;
inc_irq_stat(IRQEXT_SCP);
finished_sccb = param32 & 0xfffffff8; /* Is this the interrupt we are waiting for? */ if (finished_sccb == 0) return; if (finished_sccb != __pa(sclp_init_sccb))
panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
finished_sccb);
spin_lock(&sclp_lock); if (sclp_running_state == sclp_running_state_running) {
sclp_init_req.status = SCLP_REQ_DONE;
sclp_running_state = sclp_running_state_idle;
}
spin_unlock(&sclp_lock);
}
/* Initial init mask request timed out. Modify request state to failed. */ staticvoid
sclp_check_timeout(struct timer_list *unused)
{ unsignedlong flags;
/* Perform a check of the SCLP interface. Return zero if the interface is * available and there are no pending requests from a previous instance.
* Return non-zero otherwise. */ staticint
sclp_check_interface(void)
{ struct init_sccb *sccb; unsignedlong flags; int retry; int rc;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.