/////////////////////////////////////////////////////////////////////////////////// // File : kernel_init.c // Date : 26/05/2012 // Authors : alain greiner & mohamed karaoui // Copyright (c) UPMC-LIP6 //////////////////////////////////////////////////////////////////////////////////// // FIXME // The kernel_init.c files is part of the GIET-VM nano-kernel. // It contains the kernel entry point for the second phase of system initialisation: // all processors are jumping to _kernel_init, but P[0] is first because other // processors are blocked until P[0] complete initilisation of task contexts, // vobjs and peripherals. // All procs in this phase have their MMU activated, because each processor P[i] // must initialise registers SP, SR, PTPR and EPC with informations stored // in _scheduler[i]. //////////////////////////////////////////////////////////////////////////////////// #include #include #include #include #include #include #include #include #include #include #include #include #define in_kinit __attribute__((section (".kinit"))) /////////////////////////////////////////////////////////////////////////////////// // array of pointers on the page tables // (both physical and virtual addresses) /////////////////////////////////////////////////////////////////////////////////// __attribute__((section (".kdata"))) unsigned int _kernel_ptabs_paddr[GIET_NB_VSPACE_MAX]; __attribute__((section (".kdata"))) unsigned int _kernel_ptabs_vaddr[GIET_NB_VSPACE_MAX]; /////////////////////////////////////////////////////////////////////////////////// // declarations required to avoid forward references /////////////////////////////////////////////////////////////////////////////////// void _kernel_ptabs_init(void); void _kernel_vobjs_init(void); void _kernel_tasks_init(void); void _kernel_peripherals_init(void); void _kernel_interrupt_vector_init(void); void _kernel_start_all_procs(void); ////////////////////////////////////////////////////////////////////////////////// // This function is the entry point for the second step of the boot sequence. ////////////////////////////////////////////////////////////////////////////////// in_kinit void _kernel_init() { // values to be written in registers unsigned int sp_value; unsigned int sr_value; unsigned int ptpr_value; unsigned int epc_value; unsigned int pid = _procid(); // only processor 0 executes system initialisation if ( pid == 0 ) { _kernel_ptabs_init(); /* must be called after the initialisation of ptabs */ _kernel_vobjs_init(); _kernel_tasks_init(); _kernel_interrupt_vector_init(); _kernel_peripherals_init(); _kernel_start_all_procs(); } // each processor initialises it's SP, SR, PTPR, and EPC registers // from values defined in _scheduler[pid], starts it's private // context switch timer (if there is more than one task allocated) // and jumps to user code. // It does nothing, and keep idle if no task allocated. static_scheduler_t* sched = &_scheduler[pid]; if ( sched->tasks ) // at leat one task allocated { // initialise registers sp_value = sched->context[0][CTX_SP_ID]; sr_value = sched->context[0][CTX_SR_ID]; ptpr_value = sched->context[0][CTX_PTPR_ID]; epc_value = sched->context[0][CTX_EPC_ID]; // start TICK timer if ( sched->tasks > 1 ) { unsigned int cluster_id = pid / NB_PROCS; unsigned int proc_id = pid % NB_PROCS; _timer_write( cluster_id, proc_id, TIMER_PERIOD, GIET_TICK_VALUE ); _timer_write( cluster_id, proc_id, TIMER_MODE , 0x3 ); } } else // no task allocated { _get_lock( &_tty_put_lock ); _puts("\n No task allocated to processor "); _putw( pid ); _puts(" => keep idle\n"); _release_lock ( &_tty_put_lock ); // enable interrupts in kernel mode asm volatile ( "li $26, 0xFF01 \n" "mtc0 $26, $12 \n" ::: "$26" ); // infinite loop in kernel mode while (1) asm volatile("nop"); } asm volatile ( "move $29, %0 \n" /* SP <= ctx[CTX_SP_ID] */ "mtc0 %1, $12 \n" /* SR <= ctx[CTX_SR_ID] */ "mtc2 %2, $0 \n" /* PTPR <= ctx[CTX_PTPR_ID] */ "mtc0 %3, $14 \n" /* EPC <= ctx[CTX_EPC_ID] */ "eret \n" /* jump to user code */ "nop \n" : : "r"(sp_value), "r"(sr_value), "r"(ptpr_value), "r"(epc_value) ); } // end _kernel_init() ////////////////////////////////////////////////////////////////////////////////// // This function wakeup all processors. // It should be executed by P[0] when the kernel initialisation is done. ////////////////////////////////////////////////////////////////////////////////// in_kinit void _kernel_start_all_procs() { mapping_header_t* header = (mapping_header_t*)&seg_mapping_base; _puts("\n[INIT] Starting parallel execution at cycle : "); _putw( _proctime() ); _puts("\n"); header->signature = OUT_MAPPING_SIGNATURE; } ////////////////////////////////////////////////////////////////////////////////// // _eret() // The address of this function is used to initialise the return address (RA) // in all task contexts (when the task has never been executed. ////////////////////////////////////////////////////////////////////////////////// in_kinit void _eret() { asm volatile("eret \n" "nop"); } /////////////////////////////////////////////////////////////////////////////// // used to access user space /////////////////////////////////////////////////////////////////////////////// void _set_ptpr(unsigned int vspace_id) { unsigned int ptpr = ((unsigned int)_kernel_ptabs_paddr[vspace_id]) >> 13; asm volatile("mtc2 %0, $0"::"r"(ptpr)); } /////////////////////////////////////////////////////////////////////////////// // This function initialises the _kernel_ptabs_paddr[] array indexed by the vspace_id, // and containing the base addresses of all page tables. // This _kernel_ptabs_paddr[] array is used to initialise the task contexts. /////////////////////////////////////////////////////////////////////////////// in_kinit void _kernel_ptabs_init() { mapping_header_t* header = (mapping_header_t*)&seg_mapping_base; mapping_vspace_t* vspace = _get_vspace_base( header ); mapping_vobj_t* vobj = _get_vobj_base( header ); unsigned int vspace_id; unsigned int vobj_id; // loop on the vspaces for ( vspace_id = 0 ; vspace_id < header->vspaces ; vspace_id++ ) { char ptab_found = 0; #if INIT_DEBUG_CTX _puts("[INIT] --- vobjs initialisation in vspace "); _puts(vspace[vspace_id].name); _puts("\n"); #endif // loop on the vobjs and get the ptpr for(vobj_id= vspace[vspace_id].vobj_offset; vobj_id < (vspace[vspace_id].vobj_offset+ vspace[vspace_id].vobjs); vobj_id++) { if(vobj[vobj_id].type == VOBJ_TYPE_PTAB) { if( ptab_found ) { _puts("\n[INIT ERROR] Only one PTAB for by vspace "); _putw( vspace_id ); _exit(); } ptab_found = 1; _kernel_ptabs_paddr[vspace_id] = vobj[vobj_id].paddr; _kernel_ptabs_vaddr[vspace_id] = vobj[vobj_id].vaddr; #if INIT_DEBUG_CTX _puts("[INIT] PTAB address = "); _putw(_kernel_ptabs_paddr[vspace_id]); _puts("\n"); #endif } } if( !ptab_found ) { _puts("\n[INIT ERROR] Missing PTAB for vspace "); _putw( vspace_id ); _exit(); } } _puts("\n[INIT] Ptabss initialisation completed at cycle : "); _putw( _proctime() ); _puts("\n"); } // end kernel_ptabs_init() /////////////////////////////////////////////////////////////////////////////// // This function initializes all private vobjs defined in the vspaces, // such as mwmr channels, barriers and locks, depending on the vobj type. // (Most of the vobjs are not known, and not initialised by the compiler). /////////////////////////////////////////////////////////////////////////////// in_kinit void _kernel_vobjs_init() { mapping_header_t* header = (mapping_header_t*)&seg_mapping_base; mapping_vspace_t* vspace = _get_vspace_base( header ); mapping_vobj_t* vobj = _get_vobj_base( header ); unsigned int vspace_id; unsigned int vobj_id; // loop on the vspaces for ( vspace_id = 0 ; vspace_id < header->vspaces ; vspace_id++ ) { char ptab_found = 0; #if INIT_DEBUG_CTX _puts("[INIT] --- vobjs initialisation in vspace "); _puts(vspace[vspace_id].name); _puts("\n"); #endif // loop on the vobjs and get the ptpr for(vobj_id= vspace[vspace_id].vobj_offset; vobj_id < (vspace[vspace_id].vobj_offset+ vspace[vspace_id].vobjs); vobj_id++) { if(vobj[vobj_id].type == VOBJ_TYPE_PTAB) { if( ptab_found ) { _puts("\n[INIT ERROR] Only one PTAB for by vspace "); _putw( vspace_id ); _exit(); } ptab_found = 1; _kernel_ptabs_paddr[vspace_id] = vobj[vobj_id].paddr; _kernel_ptabs_vaddr[vspace_id] = vobj[vobj_id].vaddr; #if INIT_DEBUG_CTX _puts("[INIT] PTAB address = "); _putw(_kernel_ptabs_paddr[vspace_id]); _puts("\n"); #endif } } if( !ptab_found ) { _puts("\n[INIT ERROR] Missing PTAB for vspace "); _putw( vspace_id ); _exit(); } /** Set the current vspace ptpr to initialise the vobjs */ _set_ptpr(vspace_id); // loop on the vobjs and get the ptpr for(vobj_id= vspace[vspace_id].vobj_offset; vobj_id < (vspace[vspace_id].vobj_offset+ vspace[vspace_id].vobjs); vobj_id++) { switch( vobj[vobj_id].type ) { case VOBJ_TYPE_PTAB: // initialise page table pointers array { break;//already handled } case VOBJ_TYPE_MWMR: // storage capacity is (vobj.length/4 - 5) words { mwmr_channel_t* mwmr = (mwmr_channel_t*)(vobj[vobj_id].vaddr); mwmr->ptw = 0; mwmr->ptr = 0; mwmr->sts = 0; mwmr->depth = (vobj[vobj_id].length>>2) - 5; mwmr->width = vobj[vobj_id].init; mwmr->lock = 0; #if INIT_DEBUG_CTX _puts("[INIT] MWMR channel "); _puts( vobj->name); _puts(" / depth = "); _putw( mwmr->depth ); _puts("\n"); #endif break; } case VOBJ_TYPE_ELF: // initialisation done by the loader { #if INIT_DEBUG_CTX _puts("[INIT] ELF section "); _puts( vobj->name); _puts(" / length = "); _putw( vobj->length ); _puts("\n"); #endif break; } case VOBJ_TYPE_BARRIER: // init is the number of participants { giet_barrier_t* barrier = (giet_barrier_t*)(vobj[vobj_id].vaddr); barrier->count = 0; barrier->init = vobj[vobj_id].init; #if INIT_DEBUG_CTX _puts(" BARRIER "); _puts( vobj->name); _puts(" / init_value = "); _putw( barrier->init ); _puts("\n"); #endif break; } case VOBJ_TYPE_LOCK: // init is "not taken" { unsigned int* lock = (unsigned int*)(vobj[vobj_id].vaddr); *lock = 0; #if INIT_DEBUG_CTX _puts(" LOCK "); _puts( vobj->name); _puts("\n"); #endif break; } case VOBJ_TYPE_BUFFER: // nothing to do { #if INIT_DEBUG_CTX _puts(" BUFFER "); _puts( vobj->name); _puts(" / length = "); _putw( vobj->length ); _puts("\n"); #endif break; } default: { _puts("\n[INIT ERROR] illegal vobj of name "); _puts(vobj->name); _puts(" / in vspace = "); _puts(vobj->name); _puts("\n "); _exit(); } } // end switch type } // end loop on vobjs } // end loop on vspaces _puts("\n[INIT] Vobjs initialisation completed at cycle : "); _putw( _proctime() ); _puts("\n"); } // end kernel_vobjs_init() /////////////////////////////////////////////////////////////////////////////// // This function maps a given task, defined in a given vspace // on the processor allocated in the mapping_info structure, // and initialises the task context. // There is one scheduler per processor, and processor can be shared // by several applications running in different vspaces. // There is one private context array handled by each scheduler. // // The following values must be initialised in all task contexts: // - sp stack pointer = stack_base + stack_length // - ra return address = &_eret // - epc start address = start_vector[task->startid] // - sr status register = OxFF13 // - tty TTY terminal index (global index) // - fb FB_DMA channel index (global index) // - ptpr page table base address / 8K // - mode mmu_mode = 0xF (TLBs and caches activated) // - ptab page table virtual address //////////////////////////////////////////////////////////////////////////////// in_kinit void _task_map( unsigned int task_id, // global index unsigned int vspace_id, // global index unsigned int tty_id, // TTY index unsigned int fbdma_id ) // FBDMA index { mapping_header_t* header = (mapping_header_t*)&seg_mapping_base; mapping_task_t* task = _get_task_base(header); mapping_vspace_t* vspace = _get_vspace_base(header); mapping_vobj_t* vobj = _get_vobj_base( header ); /** Set the current vspace ptpr before acessing the memory */ _set_ptpr(vspace_id); // values to be initialised in task context unsigned int ra = (unsigned int)&_eret; unsigned int sr = 0x0000FF13; unsigned int tty = tty_id; unsigned int fb = fbdma_id; unsigned int ptpr = _kernel_ptabs_paddr[vspace_id] >> 13; unsigned int ptab = _kernel_ptabs_vaddr[vspace_id]; unsigned int mode = 0xF; unsigned int sp; unsigned int epc; // EPC : Get the (virtual) base address of the start_vector containing // the start addresses for all tasks defined in a vspace. mapping_vobj_t* vobj_data = &vobj[vspace[vspace_id].vobj_offset + vspace[vspace_id].start_offset]; unsigned int* start_vector = (unsigned int*)vobj_data->vaddr; epc = start_vector[task[task_id].startid]; // SP : Get the vobj containing the stack unsigned int vobj_id = task[task_id].vobjlocid + vspace[vspace_id].vobj_offset; sp = vobj[vobj_id].vaddr + vobj[vobj_id].length; // compute global processor index unsigned int proc_id = task[task_id].clusterid * NB_PROCS + task[task_id].proclocid; // compute and check local task index unsigned int ltid = _scheduler[proc_id].tasks; if ( ltid >= GIET_NB_TASKS_MAX ) { _puts("\n[INIT ERROR] : too much tasks allocated to processor "); _putw( proc_id ); _puts("\n"); _exit(); } // update number of tasks allocated to scheduler _scheduler[proc_id].tasks = ltid + 1; // initializes the task context _scheduler[proc_id].context[ltid][CTX_SR_ID] = sr; _scheduler[proc_id].context[ltid][CTX_SP_ID] = sp; _scheduler[proc_id].context[ltid][CTX_RA_ID] = ra; _scheduler[proc_id].context[ltid][CTX_EPC_ID] = epc; _scheduler[proc_id].context[ltid][CTX_PTPR_ID] = ptpr; _scheduler[proc_id].context[ltid][CTX_MODE_ID] = mode; _scheduler[proc_id].context[ltid][CTX_TTY_ID] = tty; _scheduler[proc_id].context[ltid][CTX_FBDMA_ID] = fb; _scheduler[proc_id].context[ltid][CTX_PTAB_ID] = ptab; _scheduler[proc_id].context[ltid][CTX_TASK_ID] = task_id; #if INIT_DEBUG_CTX _puts("Task "); _puts( task[task_id].name ); _puts(" allocated to processor "); _putw( proc_id ); _puts(" / ltid = "); _putw( ltid ); _puts("\n"); _puts(" - SR = "); _putw( sr ); _puts(" saved at "); _putw( (unsigned int)&_scheduler[proc_id].context[ltid][CTX_SR_ID] ); _puts("\n"); _puts(" - RA = "); _putw( ra ); _puts(" saved at "); _putw( (unsigned int)&_scheduler[proc_id].context[ltid][CTX_RA_ID] ); _puts("\n"); _puts(" - SP = "); _putw( sp ); _puts(" saved at "); _putw( (unsigned int)&_scheduler[proc_id].context[ltid][CTX_SP_ID] ); _puts("\n"); _puts(" - EPC = "); _putw( epc ); _puts(" saved at "); _putw( (unsigned int)&_scheduler[proc_id].context[ltid][CTX_EPC_ID] ); _puts("\n"); _puts(" - PTPR = "); _putw( ptpr<<13 ); _puts(" saved at "); _putw( (unsigned int)&_scheduler[proc_id].context[ltid][CTX_PTPR_ID] ); _puts("\n"); _puts(" - TTY = "); _putw( tty ); _puts(" saved at "); _putw( (unsigned int)&_scheduler[proc_id].context[ltid][CTX_TTY_ID] ); _puts("\n"); _puts(" - FB = "); _putw( fb ); _puts(" saved at "); _putw( (unsigned int)&_scheduler[proc_id].context[ltid][CTX_FBDMA_ID] ); _puts("\n"); _puts(" - PTAB = "); _putw( ptab ); _puts(" saved at "); _putw( (unsigned int)&_scheduler[proc_id].context[ltid][CTX_PTAB_ID] ); _puts("\n"); #endif } // end _task_map() /////////////////////////////////////////////////////////////////////////////// // This function initialises all task contexts and processors schedulers. // It sets the default values for all schedulers (tasks <= 0, current <= 0). // Finally, it scan all tasks in all vspaces to initialise the schedulers, // and the tasks contexts, as defined in the mapping_info data structure. // A global TTY index and a global FB channel are allocated if required. // TTY[0] is reserved for the kernel. /////////////////////////////////////////////////////////////////////////////// in_kinit void _kernel_tasks_init() { mapping_header_t* header = (mapping_header_t*)&seg_mapping_base; mapping_cluster_t* cluster = _get_cluster_base( header ); mapping_vspace_t* vspace = _get_vspace_base( header ); mapping_task_t* task = _get_task_base( header ); unsigned int base_tty_id = 1; // TTY index allocator unsigned int base_fb_id = 0; // FB channel index allocator unsigned int cluster_id; unsigned int proc_id; unsigned int vspace_id; unsigned int task_id; // initialise the schedulers (not done by the compiler) for ( cluster_id = 0 ; cluster_id < header->clusters ; cluster_id++ ) { for ( proc_id = 0 ; proc_id < cluster[cluster_id].procs ; proc_id++ ) { if ( proc_id >= NB_PROCS ) { _puts("\n[INIT ERROR] The number of processors in cluster "); _putw( cluster_id ); _puts(" is larger than NB_PROCS \n"); _exit(); } _scheduler[cluster_id*NB_PROCS+proc_id].tasks = 0; _scheduler[cluster_id*NB_PROCS+proc_id].current = 0; } } // loop on the virtual spaces for ( vspace_id = 0 ; vspace_id < header->vspaces ; vspace_id++ ) { #if INIT_DEBUG_CTX _puts("\n[INIT] mapping tasks in vspace "); _puts(vspace[vspace_id].name); _puts("\n"); #endif // loop on the tasks for ( task_id = vspace[vspace_id].task_offset ; task_id < (vspace[vspace_id].task_offset + vspace[vspace_id].tasks) ; task_id++ ) { unsigned int tty_id = 0xFFFFFFFF; unsigned int fb_id = 0xFFFFFFFF; if ( task[task_id].use_tty ) { tty_id = base_tty_id; base_tty_id++; } if ( task[task_id].use_fb ) { fb_id = base_fb_id; base_fb_id++; } _task_map( task_id, // global task index vspace_id, // vspace index tty_id, // global tty index fb_id ); // global fbdma index } // end loop on tasks } // end oop on vspaces _puts("\n[INIT] Task Contexts initialisation completed at cycle "); _putw( _proctime() ); _puts("\n"); #if INIT_DEBUG_CTX for ( cluster_id = 0 ; cluster_id < header->clusters ; cluster_id++ ) { _puts("\nCluster "); _putw( cluster_id ); _puts("\n"); for ( proc_id = 0 ; proc_id < cluster[cluster_id].procs ; proc_id++ ) { unsigned int ltid; // local task index unsigned int gtid; // global task index unsigned int pid = cluster_id * NB_PROCS + proc_id; _puts(" - processor "); _putw( pid ); _puts("\n"); for ( ltid = 0 ; ltid < _scheduler[pid].tasks ; ltid++ ) { gtid = _scheduler[pid].context[ltid][CTX_TASK_ID]; _puts(" task : "); _puts( task[gtid].name ); _puts("\n"); } } } #endif } // end _kernel_task_init() //////////////////////////////////////////////////////////////////////////////// // This function intializes the external periherals such as the IOB component // (I/O bridge, containing the IOMMU, the IOC (external disk controller), // the NIC (external network controller), the FBDMA (frame buffer controller), //////////////////////////////////////////////////////////////////////////////// in_kinit void _kernel_peripherals_init() { ///////////////////// // IOC peripheral // we simply activate the IOC interrupts... if ( NB_IOC ) { unsigned int* ioc_address = (unsigned int*)&seg_ioc_base; ioc_address[BLOCK_DEVICE_IRQ_ENABLE] = 1; } ///////////////////// // FBDMA peripheral // we simply activate the DMA interrupts... if ( NB_DMAS ) { unsigned int* dma_address = (unsigned int*)&seg_dma_base; dma_address[DMA_IRQ_DISABLE] = 0; } ///////////////////// // IOB peripheral // must be initialised in case of IOMMU if ( GIET_IOMMU_ACTIVE ) { unsigned int* iob_address = (unsigned int*)&seg_iob_base; // define IPI address mapping the IOC interrupt ...TODO... // set IOMMU page table address iob_address[IOB_IOMMU_PTPR] = (unsigned int)(&_iommu_ptab); // activate IOMMU iob_address[IOB_IOMMU_ACTIVE] = 1; } _puts("\n[INIT] Peripherals initialisation completed at cycle "); _putw( _proctime() ); _puts("\n"); } // end _kernel_peripherals_init() //////////////////////////////////////////////////////////////////////////////// // This function intialises the interrupt vector, and initialises // the ICU mask registers for all processors in all clusters. // It strongly depends on the actual peripheral hardware wiring. // In this peculiar version, all clusters are identical, // the number of processors per cluster cannot be larger than 8. // Processor 0 handle all interrupts corresponding to TTYs, DMAs and IOC // (ICU inputs from from IRQ[8] to IRQ[31]). Only the 8 TIMER interrupts // (ICU iputs IRQ[0] to IRQ[7]), that are used for context switching // are distributed to the 8 processors. //////////////////////////////////////////////////////////////////////////////// in_kinit void _kernel_interrupt_vector_init() { mapping_header_t* header = (mapping_header_t*)&seg_mapping_base; mapping_cluster_t* cluster = _get_cluster_base( header ); unsigned int cluster_id; unsigned int proc_id; // ICU mask values (up to 8 processors per cluster) unsigned int icu_mask[8] = { 0xFFFFFF01, 0x00000002, 0x00000004, 0x00000008, 0x00000010, 0x00000020, 0x00000040, 0x00000080 }; // initialise ICUs for each processor in each cluster for ( cluster_id = 0 ; cluster_id < header->clusters ; cluster_id++ ) { for ( proc_id = 0 ; proc_id < cluster[cluster_id].procs ; proc_id++ ) { _icu_write( cluster_id, proc_id, ICU_MASK_SET, icu_mask[proc_id] ); } } // initialize Interrupt vector _interrupt_vector[0] = &_isr_switch; _interrupt_vector[1] = &_isr_switch; _interrupt_vector[2] = &_isr_switch; _interrupt_vector[3] = &_isr_switch; _interrupt_vector[4] = &_isr_switch; _interrupt_vector[5] = &_isr_switch; _interrupt_vector[6] = &_isr_switch; _interrupt_vector[7] = &_isr_switch; _interrupt_vector[8] = &_isr_dma_0; _interrupt_vector[9] = &_isr_dma_1; _interrupt_vector[10] = &_isr_dma_2; _interrupt_vector[11] = &_isr_dma_3; _interrupt_vector[12] = &_isr_dma_4; _interrupt_vector[13] = &_isr_dma_5; _interrupt_vector[14] = &_isr_dma_6; _interrupt_vector[15] = &_isr_dma_7; _interrupt_vector[16] = &_isr_tty_get_0; _interrupt_vector[17] = &_isr_tty_get_1; _interrupt_vector[18] = &_isr_tty_get_2; _interrupt_vector[19] = &_isr_tty_get_3; _interrupt_vector[20] = &_isr_tty_get_4; _interrupt_vector[21] = &_isr_tty_get_5; _interrupt_vector[22] = &_isr_tty_get_6; _interrupt_vector[23] = &_isr_tty_get_7; _interrupt_vector[24] = &_isr_tty_get_8; _interrupt_vector[25] = &_isr_tty_get_9; _interrupt_vector[26] = &_isr_tty_get_10; _interrupt_vector[27] = &_isr_tty_get_11; _interrupt_vector[28] = &_isr_tty_get_12; _interrupt_vector[29] = &_isr_tty_get_13; _interrupt_vector[30] = &_isr_tty_get_14; _interrupt_vector[31] = &_isr_ioc; _puts("\n[INIT] Interrupt vector initialisation completed at cycle "); _putw( _proctime() ); _puts("\n"); } // end _kernel_interrup_vector_init()