Plan 9 from Bell Labs’s /usr/web/sources/xen/xen2/9/xen/ctrl_if.c

Copyright © 2021 Plan 9 Foundation.
Distributed under the MIT License.
Download the Plan 9 distribution.


/******************************************************************************
 * ctrl_if.c
 * 
 * Management functions for special interface to the domain controller.
 * 
 * Copyright (c) 2004, K A Fraser
 */
#include	"u.h"
#include	"../port/lib.h"
#include	"mem.h"
#include	"dat.h"
#include	"fns.h"
#include	"../port/error.h"
#include "xen.h"
#define	MSGOFFSET(s, m)	(ulong)((((s*)0)->m))

#define LOG(a) 
#define SETUPLOG(a)
extern start_info_t xen_start_info;
/*
 * Only used by initial domain which must create its own control-interface
 * event channel. This value is picked up by the user-space domain controller
 * via an ioctl.
 */
int initdom_ctrlif_domcontroller_port = -1;

static int        ctrl_if_evtchn;
static int        ctrl_if_irq;
static Lock ctrl_if_lock;

static CONTROL_RING_IDX ctrl_if_tx_resp_cons;
static CONTROL_RING_IDX ctrl_if_rx_req_cons;

/* Incoming message requests. */
    /* Primary message type -> message handler. */
static ctrl_msg_handler_t ctrl_if_rxmsg_handler[256];
    /* Primary message type -> callback in process context? */
static unsigned long ctrl_if_rxmsg_blocking_context[256/sizeof(unsigned long)];
    /* Queue up messages to be handled in process context. */
static ctrl_msg_t ctrl_if_rxmsg_deferred[CONTROL_RING_SIZE];
static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_prod;
static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_cons;

/* Incoming message responses: message identifier -> message handler/id. */
static struct {
    ctrl_msg_handler_t fn;
    unsigned long      id;
} ctrl_if_txmsg_id_mapping[CONTROL_RING_SIZE];

/* we have three procs, two for the kproc and one for any client
  * of a kproc
  */
Rendez kproc_rx_r, kproc_rx_defer_r;

Lock kproc_rx_defer_l;

#define get_ctrl_if() ((control_if_t *)((char *)HYPERVISOR_shared_info + 2048))
#define TX_FULL(_c)   \
    (((_c)->tx_req_prod - ctrl_if_tx_resp_cons) == CONTROL_RING_SIZE)

void
ctrl_if_interrupt(Ureg *, void *) {
	static void ctrl_rx_flush(void);
	//LOG(dp("ctrl_if_interrupt\n"));
	//wakeup(&kproc_rx_r);
	ctrl_rx_flush();
}

static void ctrl_if_notify_controller(void)
{
    notify_via_evtchn(ctrl_if_evtchn);
}

static void ctrl_if_rxmsg_default_handler(ctrl_msg_t *msg, unsigned long )
{
    msg->length = 0;
    ctrl_if_send_response(msg);
}


/* we expect you locked the lock ... */
/* we should do this in a lockless way, using atomic inc, but that's for 
 * later if we even need it
 */
static void ctrl_tx_flush(void)
{
    control_if_t *ctrl_if = get_ctrl_if();
    ctrl_msg_t   *msg;
	LOG(dp("TX: start ctrl_if %p\n", ctrl_if));
	    while ( ctrl_if_tx_resp_cons != ctrl_if->tx_resp_prod )
	    {
		LOG(dp("TX:cons 0x%ulx prod 0x%ulx\n", 
				ctrl_if_tx_resp_cons, 
				ctrl_if->tx_resp_prod));
		//break;
	        msg = &ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if_tx_resp_cons)];
	
	        /* Execute the callback handler, if one was specified. */
	        if ( msg->id != 0xFF )
	        {
	            (*ctrl_if_txmsg_id_mapping[msg->id].fn)(
	                msg, ctrl_if_txmsg_id_mapping[msg->id].id);
	            //smp_mb(); /* Execute, /then/ free. */
	            ctrl_if_txmsg_id_mapping[msg->id].fn = 0;
	        }
	
	        /*
	         * Step over the message in the ring /after/ finishing reading it. As 
	         * soon as the index is updated then the message may get blown away.
	         */
	        ctrl_if_tx_resp_cons++;
	    }

}

int
kproc_rx_defer_kick(void *) {
	return ctrl_if_rxmsg_deferred_cons != ctrl_if_rxmsg_deferred_prod;
}

static void kproc_rx_defer(void *)
{
    ctrl_msg_t *msg;
   LOG(dp("RXDEFER: start\n"));
   while(1) {
	    while ( ctrl_if_rxmsg_deferred_cons != ctrl_if_rxmsg_deferred_prod )
	    {
		control_msg_t copymsg;
		LOG(dp("krpoc_rx_defer: something but break out\n"));
		ilock(&kproc_rx_defer_l);
	        msg = &ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(
	            ctrl_if_rxmsg_deferred_cons++)];
		/* dq and copy but leave lock held until done for now */
		copymsg = *msg;
		iunlock(&kproc_rx_defer_l);
	        (*ctrl_if_rxmsg_handler[copymsg.type])(&copymsg, 0);
	//	iunlock(&kproc_rx_defer_l);
	    }
		LOG(dp("RXDEFER: sleep\n"));
	    sleep(&kproc_rx_defer_r, kproc_rx_defer_kick, 0);
		LOG(dp("RXDEFER: woken up\n"));
    }
}

int
kproc_rx_kick(void *) {
	control_if_t *ctrl_if = get_ctrl_if();
	return (ctrl_if_rx_req_cons != ctrl_if->rx_req_prod);
}

static void ctrl_rx_flush(void *)
{
    control_if_t *ctrl_if = get_ctrl_if();
    ctrl_msg_t    msg, *pmsg;
/*
   LOG(dp("RX: start\n"));
	LOG(dp("ctrl_if is %p\n", ctrl_if));
    while(1) {
*/
	    while ( ctrl_if_rx_req_cons != ctrl_if->rx_req_prod )
	    {
		LOG(dp("kproc_rx: something cons is 0x%ulx, proc 0x%ulx\n",
					ctrl_if_rx_req_cons,
						ctrl_if->rx_req_prod);)
//		break;
	        pmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if_rx_req_cons++)];
	        memmove(&msg, pmsg, MSGOFFSET(ctrl_msg_t, msg));
	        if ( msg.length != 0 )
	            memmove(msg.msg, pmsg->msg, msg.length);
	        LOG(dp("kprox_rx :msg.lenght is %d, type %d\n", msg.length, msg.type));
	        if ( synch_test_bit(msg.type, ctrl_if_rxmsg_blocking_context) )
	        {
		   LOG(dp("RX: It's blocking!\n"));
		   ilock(&kproc_rx_defer_l);
	            pmsg = &ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(
	                ctrl_if_rxmsg_deferred_prod++)];
	            memmove(pmsg, &msg, MSGOFFSET(ctrl_msg_t, msg) + msg.length);
		    iunlock(&kproc_rx_defer_l);
		    wakeup(&kproc_rx_defer_r);
	  //          schedule_task(&ctrl_if_rxmsg_deferred_tq);
	        }
	        else
	        {
			LOG(dp("RX: not blocking\n"));
	            (*ctrl_if_rxmsg_handler[msg.type])(&msg, 0);
	        }
	    }
/*
	LOG(dp("RX: sleep\n"));
	sleep(&kproc_rx_r, kproc_rx_kick, 0);
	LOG(dp("RX: woken up\n"));
    }
*/
}

static int
client_tx_kick(void *) {
	control_if_t *ctrl_if = get_ctrl_if();
	return (!TX_FULL(ctrl_if));
}

int ctrl_if_send_message_noblock(
    ctrl_msg_t *msg, 
    ctrl_msg_handler_t hnd,
    unsigned long id)
{
    control_if_t *ctrl_if = get_ctrl_if();
    int           i;

    ilock(&ctrl_if_lock);

    if ( TX_FULL(ctrl_if) )
    {
        iunlock(&ctrl_if_lock);
	LOG(dp("nb:TXF\n"));
        return -1;
    }

   msg->id = 0xFF;
    if ( hnd )
    {
        for ( i = 0; ctrl_if_txmsg_id_mapping[i].fn; i++ )
            continue;
        ctrl_if_txmsg_id_mapping[i].fn = hnd;
        ctrl_if_txmsg_id_mapping[i].id = id;
        msg->id = i;
    }

    memmove(&ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if->tx_req_prod)], 
           msg, sizeof(*msg));
    //wmb(); /* Write the message before letting the controller peek at it. */
    ctrl_if->tx_req_prod++;

    iunlock(&ctrl_if_lock);
	LOG(dp("_noblock: call notify_controller\n"));
    ctrl_if_notify_controller();
	LOG(dp("_noblock: done\n"));
    return 0;
}


int ctrl_if_send_message_block(
    ctrl_msg_t *msg, 
    ctrl_msg_handler_t hnd, 
    unsigned long id,
    long /*wait_state*/)
{
    int rc;
//	LOG(dp("_block: start\n"));
    /* Fast path. */
    if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != -1 ){
  
	LOG(dp("ctrl_if_send_message_block: rc %d\n", rc));
	return rc;
	}

    for ( ; ; )
    {
	ilock(&ctrl_if_lock);
	ctrl_tx_flush();
	iunlock(&ctrl_if_lock);
	/* this will return if there is space in the q */
        if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != -1 )
            break;
	/* let the hypervisor have its way with us for a while */
	HYPERVISOR_yield();
	/* in theory, at this point, there really ought to be some room
	  * in that damned queue
	  */
    }

	LOG(dp("ctrl_if_send_message_block: rc %d\n", rc));
    return rc;
}

#ifdef NOT
int ctrl_if_enqueue_space_callback(struct tq_struct *task)
{
    control_if_t *ctrl_if = get_ctrl_if();

    /* Fast path. */
    if ( !TX_FULL(ctrl_if) )
        return 0;

    (void)queue_task(task, &ctrl_if_tx_tq);

    /*
     * We may race execution of the task queue, so return re-checked status. If
     * the task is not executed despite the ring being non-full then we will
     * certainly return 'not full'.
     */
 //   smp_mb();
    return TX_FULL(ctrl_if);
}
#endif

void ctrl_if_send_response(ctrl_msg_t *msg)
{
    control_if_t *ctrl_if = get_ctrl_if();
    ctrl_msg_t   *dmsg;

    /*
     * NB. The response may the original request message, modified in-place.
     * In this situation we may have src==dst, so no copying is required.
     */
    ilock(&ctrl_if_lock);
    dmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if->rx_resp_prod)];
    if ( dmsg != msg )
        memmove(dmsg, msg, sizeof(*msg));
   /// wmb(); /* Write the message before letting the controller peek at it. */
    ctrl_if->rx_resp_prod++;
    iunlock(&ctrl_if_lock);

    ctrl_if_notify_controller();
}

int ctrl_if_register_receiver(
    u8 type, 
    ctrl_msg_handler_t hnd, 
    unsigned int flags)
{
    int inuse;

    ilock(&ctrl_if_lock);

    inuse = (ctrl_if_rxmsg_handler[type] != ctrl_if_rxmsg_default_handler);

    if ( inuse )
    {
        dp("Receiver %p already established for control "
               "messages of type %d.\n", ctrl_if_rxmsg_handler[type], type);
    }
    else
    {
        ctrl_if_rxmsg_handler[type] = hnd;
        synch_clear_bit(type, ctrl_if_rxmsg_blocking_context);
        if ( flags == CALLBACK_IN_BLOCKING_CONTEXT )
        {
            synch_set_bit(type, ctrl_if_rxmsg_blocking_context);
 //           if ( !safe_to_schedule_task )
     //           BUG();
        }
    }

    iunlock(&ctrl_if_lock);

    return !inuse;
}

void ctrl_if_unregister_receiver(u8 type, ctrl_msg_handler_t hnd)
{

    ilock(&ctrl_if_lock);

    if ( ctrl_if_rxmsg_handler[type] != hnd )
        dp("Receiver %p is not registered for control "
               "messages of type %d.\n", hnd, type);
    else
        ctrl_if_rxmsg_handler[type] = ctrl_if_rxmsg_default_handler;

    iunlock(&ctrl_if_lock);

    /* Ensure that @hnd will not be executed after this function returns. */
    //tasklet_unlock_wait(&ctrl_if_rx_tasklet);
}

#ifdef NOTNOW
void ctrl_if_suspend(void)
{
    free_irq(ctrl_if_irq, 0);
    unbind_evtchn_from_irq(ctrl_if_evtchn);
}

#endif
void ctrl_if_resume(void)
{
//	extern int bind_evtchn_to_irq(int evtchn);
	extended_start_info_t *esi = (extended_start_info_t *)&xen_start_info;
	extern void xencons_rx_msg(ctrl_msg_t *msg, unsigned long l);
    ctrl_if_tx_resp_cons = 0;
    ctrl_if_rx_req_cons  = 0;


    ctrl_if_evtchn = esi->domain_controller_evtchn;
	dp("evtchn from start info is %d but that's bullshit ...\n", ctrl_if_evtchn);
    ctrl_if_irq    = bind_evtchn_to_irq(ctrl_if_evtchn, 0);
    dp("evtchn is %d, ctrl_if_irq is %d\n", ctrl_if_evtchn, 
				ctrl_if_irq);
	intrenable(ctrl_if_irq, ctrl_if_interrupt, nil, 0, "Control if interrupt");
	SETUPLOG(dp("ctrl if enabled\n"));
	ctrl_if_register_receiver(CMSG_CONSOLE, xencons_rx_msg, 0);

}

void ctrl_if_init(void)
{
    int i;

    for ( i = 0; i < 256; i++ )
        ctrl_if_rxmsg_handler[i] = ctrl_if_rxmsg_default_handler;
    //ctrl_if_rxmsg_deferred_tq.routine = __ctrl_if_rxmsg_deferred;

    //spin_lock_init(&ctrl_if_lock);
//	ctrl_if_resume();
}

void ctrl_if_kproc(void) {
	print(" STARTING kprocs\n");

//	kproc("#|kproc_rx", kproc_rx, 0);
	kproc("#|kproc_rx_defer", kproc_rx_defer, 0);
	LOG(dp("Done starting kprocs\n"));
}



/*
 * !! The following are DANGEROUS FUNCTIONS !!
 * Use with care [for example, see xencons_force_flush()].
 */

int ctrl_if_transmitter_empty(void)
{
    return (get_ctrl_if()->tx_req_prod == ctrl_if_tx_resp_cons);
}

void ctrl_if_discard_responses(void)
{
    ctrl_if_tx_resp_cons = get_ctrl_if()->tx_resp_prod;
}


Bell Labs OSI certified Powered by Plan 9

(Return to Plan 9 Home Page)

Copyright © 2021 Plan 9 Foundation. All Rights Reserved.
Comments to webmaster@9p.io.