////////////////////////////////////////////////////////////////////////////////// // File : boot_init.c // Date : 01/04/2012 // Author : alain greiner // Copyright (c) UPMC-LIP6 /////////////////////////////////////////////////////////////////////////////////// // The boot_init.c file is part of the GIET-VM nano-kernel. // This code is executed in the boot phase by proc[0] to initialize the // peripherals and the kernel data structures: // - pages tables for the various vspaces // - shedulers for processors (including the tasks contexts and interrupt vectors) // // The GIET-VM uses the paged virtual memory and the MAPPING_INFO binary file // to provides two services: // 1) classical memory protection, when several independant applications compiled // in different virtual spaces are executing on the same hardware platform. // 2) data placement in NUMA architectures, when we want to control the placement // of the software objects (virtual segments) on the physical memory banks. // // The MAPPING_INFO binary data structure must be loaded in the the seg_boot_mapping // segment (at address seg_mapping_base). // This MAPPING_INFO data structure defines both the hardware architecture // and the mapping: // - physical segmentation of the physical address space, // - virtual spaces definition (one multi-task application per vspace), // - placement of virtual objects (vobj) in the virtual segments (vseg). // - placement of virtual segments (vseg) in the physical segments (pseg). // - placement of tasks on the processors, // // The page table are statically build in the boot phase, and they do not // change during execution. The GIET uses only 4 Kbytes pages. // As most applications use only a limited number of segments, the number of PT2s // actually used by a given virtual space is generally smaller than 2048, and is // computed during the boot phase. // The max number of virtual spaces (GIET_NB_VSPACE_MAX) is a configuration parameter. // // Each page table (one page table per virtual space) is monolithic, and // contains one PT1 and (GIET_NB_PT2_MAX) PT2s. The PT1 is addressed using the ix1 field // (11 bits) of the VPN, and the selected PT2 is addressed using the ix2 field (9 bits). // - PT1[2048] : a first 8K aligned array of unsigned int, indexed by the (ix1) field of VPN. // Each entry in the PT1 contains a 32 bits PTD. The MSB bit PTD[31] is // the PTD valid bit, and LSB bits PTD[19:0] are the 20 MSB bits of the physical base // address of the selected PT2. // The PT1 contains 2048 PTD of 4 bytes => 8K bytes. // - PT2[1024][GIET_NB_PT2_MAX] : an array of array of unsigned int. // Each PT2[1024] must be 4K aligned, and each entry in a PT2 contains two unsigned int: // the first word contains the protection flags, and the second word contains the PPN. // Each PT2 contains 512 PTE2 of 8bytes => 4K bytes. // The total size of a page table is finally = 8K + (GIET_NB_PT2_MAX)*4K bytes. //////////////////////////////////////////////////////////////////////////////////// #include #include #include #include #include #include #include #include #include #include #include #if !defined(NB_CLUSTERS) # error The NB_CLUSTERS value must be defined in the 'giet_config.h' file ! #endif #if !defined(NB_PROCS_MAX) # error The NB_PROCS_MAX value must be defined in the 'giet_config.h' file ! #endif #if !defined(GIET_NB_VSPACE_MAX) # error The GIET_NB_VSPACE_MAX value must be defined in the 'giet_config.h' file ! #endif //////////////////////////////////////////////////////////////////////////// // Global variables for boot code // As both the page tables and the schedulers are physically distributed, // these global variables are just arrays of pointers. //////////////////////////////////////////////////////////////////////////// // Page table pointers array page_table_t* boot_ptabs_vaddr[GIET_NB_VSPACE_MAX]; page_table_t* boot_ptabs_paddr[GIET_NB_VSPACE_MAX]; // Scheduler pointers array static_scheduler_t* boot_schedulers_paddr[NB_CLUSTERS * NB_PROCS_MAX]; // Next free PT2 index array unsigned int boot_next_free_pt2[GIET_NB_VSPACE_MAX] = { [0 ... GIET_NB_VSPACE_MAX-1] = 0 }; // Max PT2 index unsigned int boot_max_pt2[GIET_NB_VSPACE_MAX] = { [0 ... GIET_NB_VSPACE_MAX-1] = 0 }; ////////////////////////////////////////////////////////////////////////////// // boot_procid() ////////////////////////////////////////////////////////////////////////////// inline unsigned int boot_procid() { unsigned int ret; asm volatile("mfc0 %0, $15, 1" : "=r"(ret)); return (ret & 0x3FF); } ////////////////////////////////////////////////////////////////////////////// // boot_proctime() ////////////////////////////////////////////////////////////////////////////// inline unsigned int boot_proctime() { unsigned int ret; asm volatile("mfc0 %0, $9" : "=r"(ret)); return ret; } ////////////////////////////////////////////////////////////////////////////// // boot_exit() ////////////////////////////////////////////////////////////////////////////// void boot_exit() { while(1) asm volatile("nop"); } ////////////////////////////////////////////////////////////////////////////// // boot_eret() // The address of this function is used to initialise the return address (RA) // in all task contexts (when the task has never been executed. /////////////////////////////////"///////////////////////////////////////////// void boot_eret() { asm volatile("eret"); } ////////////////////////////////////////////////////////////////////////////// // boot_scheduler_set_context() // This function set a context slot in a scheduler, after a temporary // desactivation of the DTLB (because we use the scheduler physical address). // - gpid : global processor/scheduler index // - ltid : local task index // - slotid : context slot index // - value : value to be written ////////////////////////////////////////////////////////////////////////////// inline void boot_scheduler_set_context( unsigned int gpid, unsigned int ltid, unsigned int slotid, unsigned int value ) { // get scheduler physical address static_scheduler_t* psched = boot_schedulers_paddr[gpid]; // get slot physical address unsigned int* pslot = &(psched->context[ltid][slotid]); asm volatile ( "li $26, 0xB \n" "mtc2 $26, $1 \n" /* desactivate DTLB */ "sw %1, 0(%0) \n" /* *pslot <= value */ "li $26, 0xF \n" "mtc2 $26, $1 \n" /* activate DTLB */ : : "r"(pslot), "r"(value) : "$26" ); } ////////////////////////////////////////////////////////////////////////////// // boot_scheduler_set_itvector() // This function set an interrupt vector slot in a scheduler, after a temporary // desactivation of the DTLB (because we use the scheduler physical address). // - gpid : global processor/scheduler index // - slotid : context slot index // - value : value to be written ////////////////////////////////////////////////////////////////////////////// inline void boot_scheduler_set_itvector( unsigned int gpid, unsigned int slotid, unsigned int value ) { // get scheduler physical address static_scheduler_t* psched = boot_schedulers_paddr[gpid]; // get slot physical address unsigned int* pslot = &(psched->interrupt_vector[slotid]); asm volatile ( "li $26, 0xB \n" "mtc2 $26, $1 \n" /* desactivate DTLB */ "sw %1, 0(%0) \n" /* *pslot <= value */ "li $26, 0xF \n" "mtc2 $26, $1 \n" /* activate DTLB */ : : "r"(pslot), "r"(value) : "$26" ); } ////////////////////////////////////////////////////////////////////////////// // boot_scheduler_get_tasks() // This function returns the "tasks" field of a scheduler, after temporary // desactivation of the DTLB (because we use the scheduler physical address). // - gpid : global processor/scheduler index ////////////////////////////////////////////////////////////////////////////// inline unsigned int boot_scheduler_get_tasks( unsigned int gpid ) { unsigned int ret; // get scheduler physical address static_scheduler_t* psched = boot_schedulers_paddr[gpid]; // get tasks physical address unsigned int* ptasks = &(psched->tasks); asm volatile ( "li $26, 0xB \n" "mtc2 $26, $1 \n" /* desactivate DTLB */ "lw %0, 0(%1) \n" /* ret <= *ptasks */ "li $26, 0xF \n" "mtc2 $26, $1 \n" /* activate DTLB */ : "=r"(ret) : "r"(ptasks) : "$26" ); return ret; } ////////////////////////////////////////////////////////////////////////////// // boot_scheduler_set_tasks() // This function set the "tasks" field of a scheduler, after temporary // desactivation of the DTLB (because we use the scheduler physical address). // - gpid : global processor/scheduler index // - value : value to be written ////////////////////////////////////////////////////////////////////////////// inline void boot_scheduler_set_tasks( unsigned int gpid, unsigned int value ) { // get scheduler physical address static_scheduler_t* psched = boot_schedulers_paddr[gpid]; // get tasks physical address unsigned int* ptasks = &(psched->tasks); asm volatile ( "li $26, 0xB \n" "mtc2 $26, $1 \n" /* desactivate DTLB */ "sw %1, 0(%0) \n" /* *ptasks <= value */ "li $26, 0xF \n" "mtc2 $26, $1 \n" /* activate DTLB */ : : "r"(ptasks), "r"(value) : "$26" ); } ////////////////////////////////////////////////////////////////////////////// // boot_scheduler_set_current() // This function set the "current" field of a scheduler, after temporary // desactivation of the DTLB (because we use the scheduler physical address). // - gpid : global processor/scheduler index // - value : value to be written ////////////////////////////////////////////////////////////////////////////// inline void boot_scheduler_set_current( unsigned int gpid, unsigned int value ) { // get scheduler physical address static_scheduler_t* psched = boot_schedulers_paddr[gpid]; // get tasks physical address unsigned int* pcur = &(psched->current); asm volatile ( "li $26, 0xB \n" "mtc2 $26, $1 \n" /* desactivate DTLB */ "sw %1, 0(%0) \n" /* *pcur <= value */ "li $26, 0xF \n" "mtc2 $26, $1 \n" /* activate DTLB */ : : "r"(pcur), "r"(value) : "$26" ); } ////////////////////////////////////////////////////////////////////////////// // boot_set_mmu_ptpr() // This function set a new value for the MMU PTPR register. ////////////////////////////////////////////////////////////////////////////// inline void boot_set_mmu_ptpr( unsigned int val ) { asm volatile("mtc2 %0, $0" : : "r"(val) ); } ////////////////////////////////////////////////////////////////////////////// // boot_set_mmu_mode() // This function set a new value for the MMU MODE register. ////////////////////////////////////////////////////////////////////////////// inline void boot_set_mmu_mode( unsigned int val ) { asm volatile("mtc2 %0, $1" : : "r"(val) ); } //////////////////////////////////////////////////////////////////////////// // boot_puts() // (it uses TTY0) //////////////////////////////////////////////////////////////////////////// void boot_puts(const char *buffer) { unsigned int* tty_address = (unsigned int*)&seg_tty_base; unsigned int n; for ( n=0; n<100; n++) { if (buffer[n] == 0) break; tty_address[0] = (unsigned int)buffer[n]; } } //////////////////////////////////////////////////////////////////////////// // boot_putw() // (it uses TTY0) //////////////////////////////////////////////////////////////////////////// void boot_putw(unsigned int val) { static const char HexaTab[] = "0123456789ABCDEF"; char buf[11]; unsigned int c; buf[0] = '0'; buf[1] = 'x'; buf[10] = 0; for ( c = 0 ; c < 8 ; c++ ) { buf[9-c] = HexaTab[val&0xF]; val = val >> 4; } boot_puts(buf); } ///////////////////////////////////////////////////////////////////////////// // mapping_info data structure access functions ///////////////////////////////////////////////////////////////////////////// inline mapping_cluster_t* boot_get_cluster_base( mapping_header_t* header ) { return (mapping_cluster_t*) ((char*)header + MAPPING_HEADER_SIZE); } ///////////////////////////////////////////////////////////////////////////// inline mapping_pseg_t* boot_get_pseg_base( mapping_header_t* header ) { return (mapping_pseg_t*) ((char*)header + MAPPING_HEADER_SIZE + MAPPING_CLUSTER_SIZE*header->clusters); } ///////////////////////////////////////////////////////////////////////////// inline mapping_vspace_t* boot_get_vspace_base( mapping_header_t* header ) { return (mapping_vspace_t*) ((char*)header + MAPPING_HEADER_SIZE + MAPPING_CLUSTER_SIZE*header->clusters + MAPPING_PSEG_SIZE*header->psegs); } ///////////////////////////////////////////////////////////////////////////// inline mapping_vseg_t* boot_get_vseg_base( mapping_header_t* header ) { return (mapping_vseg_t*) ((char*)header + MAPPING_HEADER_SIZE + MAPPING_CLUSTER_SIZE*header->clusters + MAPPING_PSEG_SIZE*header->psegs + MAPPING_VSPACE_SIZE*header->vspaces); } ///////////////////////////////////////////////////////////////////////////// inline mapping_vobj_t* boot_get_vobj_base( mapping_header_t* header ) { return (mapping_vobj_t*) ((char*)header + MAPPING_HEADER_SIZE + MAPPING_CLUSTER_SIZE*header->clusters + MAPPING_PSEG_SIZE*header->psegs + MAPPING_VSPACE_SIZE*header->vspaces + MAPPING_VSEG_SIZE*header->vsegs ); } ///////////////////////////////////////////////////////////////////////////// inline mapping_task_t* boot_get_task_base( mapping_header_t* header ) { return (mapping_task_t*) ((char*)header + MAPPING_HEADER_SIZE + MAPPING_CLUSTER_SIZE*header->clusters + MAPPING_PSEG_SIZE*header->psegs + MAPPING_VSPACE_SIZE*header->vspaces + MAPPING_VSEG_SIZE*header->vsegs + MAPPING_VOBJ_SIZE*header->vobjs ); } ///////////////////////////////////////////////////////////////////////////// inline mapping_proc_t* boot_get_proc_base( mapping_header_t* header ) { return (mapping_proc_t*) ((char*)header + MAPPING_HEADER_SIZE + MAPPING_CLUSTER_SIZE*header->clusters + MAPPING_PSEG_SIZE*header->psegs + MAPPING_VSPACE_SIZE*header->vspaces + MAPPING_VSEG_SIZE*header->vsegs + MAPPING_VOBJ_SIZE*header->vobjs + MAPPING_TASK_SIZE*header->tasks ); } ///////////////////////////////////////////////////////////////////////////// inline mapping_irq_t* boot_get_irq_base( mapping_header_t* header ) { return (mapping_irq_t*) ((char*)header + MAPPING_HEADER_SIZE + MAPPING_CLUSTER_SIZE*header->clusters + MAPPING_PSEG_SIZE*header->psegs + MAPPING_VSPACE_SIZE*header->vspaces + MAPPING_VSEG_SIZE*header->vsegs + MAPPING_VOBJ_SIZE*header->vobjs + MAPPING_TASK_SIZE*header->tasks + MAPPING_PROC_SIZE*header->procs ); } ///////////////////////////////////////////////////////////////////////////// inline mapping_coproc_t* boot_get_coproc_base( mapping_header_t* header ) { return (mapping_coproc_t*) ((char*)header + MAPPING_HEADER_SIZE + MAPPING_CLUSTER_SIZE*header->clusters + MAPPING_PSEG_SIZE*header->psegs + MAPPING_VSPACE_SIZE*header->vspaces + MAPPING_VOBJ_SIZE*header->vobjs + MAPPING_VSEG_SIZE*header->vsegs + MAPPING_TASK_SIZE*header->tasks + MAPPING_PROC_SIZE*header->procs + MAPPING_IRQ_SIZE*header->irqs ); } /////////////////////////////////////////////////////////////////////////////////// inline mapping_cp_port_t* boot_get_cp_port_base( mapping_header_t* header ) { return (mapping_cp_port_t*) ((char*)header + MAPPING_HEADER_SIZE + MAPPING_CLUSTER_SIZE*header->clusters + MAPPING_PSEG_SIZE*header->psegs + MAPPING_VSPACE_SIZE*header->vspaces + MAPPING_VOBJ_SIZE*header->vobjs + MAPPING_VSEG_SIZE*header->vsegs + MAPPING_TASK_SIZE*header->tasks + MAPPING_PROC_SIZE*header->procs + MAPPING_IRQ_SIZE*header->irqs + MAPPING_COPROC_SIZE*header->coprocs ); } /////////////////////////////////////////////////////////////////////////////////// inline mapping_periph_t* boot_get_periph_base( mapping_header_t* header ) { return (mapping_periph_t*) ((char*)header + MAPPING_HEADER_SIZE + MAPPING_CLUSTER_SIZE*header->clusters + MAPPING_PSEG_SIZE*header->psegs + MAPPING_VSPACE_SIZE*header->vspaces + MAPPING_VOBJ_SIZE*header->vobjs + MAPPING_VSEG_SIZE*header->vsegs + MAPPING_TASK_SIZE*header->tasks + MAPPING_PROC_SIZE*header->procs + MAPPING_IRQ_SIZE*header->irqs + MAPPING_COPROC_SIZE*header->coprocs + MAPPING_CP_PORT_SIZE*header->cp_ports ); } ////////////////////////////////////////////////////////////////////////////// // boot_pseg_get() // This function returns the pointer on a physical segment // identified by the pseg index. ////////////////////////////////////////////////////////////////////////////// mapping_pseg_t* boot_pseg_get( unsigned int seg_id) { mapping_header_t* header = (mapping_header_t*)&seg_mapping_base; mapping_pseg_t* pseg = boot_get_pseg_base( header ); // checking argument if ( seg_id >= header->psegs ) { boot_puts("\n[BOOT ERROR] : seg_id argument too large\n"); boot_puts(" in function boot_pseg_get()\n"); boot_exit(); } return &pseg[seg_id]; } // end boot_pseg_get() ////////////////////////////////////////////////////////////////////////////// // boot_add_pte() // This function registers a new PTE in the page table pointed // by the vspace_id argument, and updates both PT1 and PT2. // A new PT2 is used when required. // As the set of PT2s is implemented as a fixed size array (no dynamic // allocation), this function checks a possible overflow of the PT2 array. // // The global parameter is a boolean indicating wether a global vseg is // being mapped. ////////////////////////////////////////////////////////////////////////////// void boot_add_pte( unsigned int vspace_id, unsigned int vpn, unsigned int flags, unsigned int ppn ) { unsigned int ix1; unsigned int ix2; unsigned int ptba; // PT2 base address unsigned int pt2_id; // PT2 index unsigned int* pt_flags; // pointer on the pte_flags = &PT2[2*ix2] unsigned int* pt_ppn; // pointer on the pte_ppn = &PT2[2*ix2+1] ix1 = vpn >> 9; // 11 bits ix2 = vpn & 0x1FF; // 9 bits // check that the boot_max_pt2[vspace_id] has been set unsigned int max_pt2 = boot_max_pt2[vspace_id]; if(max_pt2 == 0) { boot_puts("Unfound page table for vspace "); boot_putw(vspace_id); boot_puts("\n"); boot_exit(); } // get page table physical address page_table_t* pt = boot_ptabs_paddr[vspace_id]; if ( (pt->pt1[ix1] & PTE_V) == 0 ) // set a new PTD in PT1 { pt2_id = boot_next_free_pt2[vspace_id]; if ( pt2_id == max_pt2 ) { boot_puts("\n[BOOT ERROR] in boot_add_pte() function\n"); boot_puts("the length of the ptab vobj is too small\n"); boot_exit(); } else { ptba = (unsigned int)pt + PT1_SIZE + PT2_SIZE*pt2_id; pt->pt1[ix1] = PTE_V | PTE_T | (ptba >> 12); boot_next_free_pt2[vspace_id] = pt2_id + 1; } } else { ptba = pt->pt1[ix1] << 12; } // set PTE2 after checking double mapping error pt_flags = (unsigned int*)(ptba + 8*ix2); pt_ppn = (unsigned int*)(ptba + 8*ix2 + 4); if ( ( *pt_flags & PTE_V) != 0 ) // page already mapped { boot_puts("\n[BOOT ERROR] in boot_add_pte() function\n"); boot_puts("page already mapped\n"); boot_exit(); } // set PTE2 *pt_flags = flags; *pt_ppn = ppn; } // end boot_add_pte() ///////////////////////////////////////////////////////////////////// // This function build the page table for a given vspace. // The physical base addresses for all vsegs (global and private) // must have been previously computed. // It initializes the MWMR channels. ///////////////////////////////////////////////////////////////////// void boot_vspace_pt_build( unsigned int vspace_id ) { unsigned int vseg_id; unsigned int npages; unsigned int ppn; unsigned int vpn; unsigned int flags; unsigned int page_id; mapping_header_t* header = (mapping_header_t*)&seg_mapping_base; mapping_vspace_t* vspace = boot_get_vspace_base( header ); mapping_vseg_t* vseg = boot_get_vseg_base( header ); // private segments for ( vseg_id = vspace[vspace_id].vseg_offset ; vseg_id < (vspace[vspace_id].vseg_offset + vspace[vspace_id].vsegs) ; vseg_id++ ) { vpn = vseg[vseg_id].vbase >> 12; ppn = vseg[vseg_id].pbase >> 12; npages = vseg[vseg_id].length >> 12; if ( (vseg[vseg_id].length & 0xFFF) != 0 ) npages++; flags = PTE_V; if ( vseg[vseg_id].mode & C_MODE_MASK ) flags = flags | PTE_C; if ( vseg[vseg_id].mode & X_MODE_MASK ) flags = flags | PTE_X; if ( vseg[vseg_id].mode & W_MODE_MASK ) flags = flags | PTE_W; if ( vseg[vseg_id].mode & U_MODE_MASK ) flags = flags | PTE_U; #if BOOT_DEBUG_PT boot_puts( vseg[vseg_id].name ); boot_puts(" : flags = "); boot_putw( flags ); boot_puts(" / npages = "); boot_putw( npages ); boot_puts(" / pbase = "); boot_putw( vseg[vseg_id].pbase ); boot_puts("\n"); #endif // loop on 4K pages for ( page_id = 0 ; page_id < npages ; page_id++ ) { boot_add_pte( vspace_id, vpn, flags, ppn ); vpn++; ppn++; } } // global segments for ( vseg_id = 0 ; vseg_id < header->globals ; vseg_id++ ) { vpn = vseg[vseg_id].vbase >> 12; ppn = vseg[vseg_id].pbase >> 12; npages = vseg[vseg_id].length >> 12; if ( (vseg[vseg_id].length & 0xFFF) != 0 ) npages++; flags = PTE_V; if ( vseg[vseg_id].mode & C_MODE_MASK ) flags = flags | PTE_C; if ( vseg[vseg_id].mode & X_MODE_MASK ) flags = flags | PTE_X; if ( vseg[vseg_id].mode & W_MODE_MASK ) flags = flags | PTE_W; if ( vseg[vseg_id].mode & U_MODE_MASK ) flags = flags | PTE_U; #if BOOT_DEBUG_PT boot_puts( vseg[vseg_id].name ); boot_puts(" / flags = "); boot_putw( flags ); boot_puts(" / npages = "); boot_putw( npages ); boot_puts(" / pbase = "); boot_putw( vseg[vseg_id].pbase ); boot_puts("\n"); #endif // loop on 4K pages for ( page_id = 0 ; page_id < npages ; page_id++ ) { boot_add_pte( vspace_id, vpn, flags, ppn ); vpn++; ppn++; } } } // end boot_vspace_pt_build() /////////////////////////////////////////////////////////////////////////// // Align the value "toAlign" to the required alignement indicated by // alignPow2 ( the logarithme of 2 the alignement). /////////////////////////////////////////////////////////////////////////// unsigned int align_to( unsigned int toAlign, unsigned int alignPow2) { unsigned int mask = (1 << alignPow2) - 1; return ((toAlign + mask ) & ~mask ); } /////////////////////////////////////////////////////////////////////////// // This function compute the physical base address for a vseg // as specified in the mapping info data structure. // It updates the pbase and the length fields of the vseg. // It updates the pbase and vbase fields of all vobjs in the vseg. // It updates the next_base field of the pseg, and checks overflow. // It updates the boot_ptabs_paddr[] and boot_ptabs_vaddr[] arrays. // It is a global vseg if vspace_id = (-1). /////////////////////////////////////////////////////////////////////////// void boot_vseg_map( mapping_vseg_t* vseg, unsigned int vspace_id ) { unsigned int vobj_id; unsigned int cur_vaddr; unsigned int cur_paddr; mapping_header_t* header = (mapping_header_t*)&seg_mapping_base; mapping_vobj_t* vobj = boot_get_vobj_base( header ); // get physical segment pointer mapping_pseg_t* pseg = boot_pseg_get( vseg->psegid ); // compute vseg physical base address if ( vseg->ident != 0 ) // identity mapping required { vseg->pbase = vseg->vbase; } else // unconstrained mapping { vseg->pbase = pseg->next_base; // test alignment constraint if ( vobj[vseg->vobj_offset].align ) { vseg->pbase = align_to( vseg->pbase, vobj[vseg->vobj_offset].align ); } } // loop on vobjs contained in vseg to : // (1) computes the length of the vseg, // (2) initialise the vaddr and paddr fields of all vobjs, // (3) initialise the page table pointers arrays cur_vaddr = vseg->vbase; cur_paddr = vseg->pbase; for( vobj_id = vseg->vobj_offset; vobj_id < (vseg->vobj_offset + vseg->vobjs); vobj_id++) { if ( vobj[vobj_id].align ) { cur_paddr = align_to(cur_paddr, vobj[vobj_id].align); } // set vaddr/paddr for current vobj vobj[vobj_id].vaddr = cur_vaddr; vobj[vobj_id].paddr = cur_paddr; // initialise boot_ptabs_vaddr[] if current vobj is a PTAB if ( vobj[vobj_id].type == VOBJ_TYPE_PTAB ) { if(vspace_id == ((unsigned int) -1)) // global vseg { boot_puts( "\n[BOOT ERROR] in boot_vseg_map() function: " ); boot_puts( "a PTAB vobj cannot be global" ); boot_exit(); } // we need at least one PT2 => ( boot_max_pt2[vspace_id] >= 1) if(vobj[vobj_id].length < (PT1_SIZE + PT2_SIZE) ) { boot_puts( "\n[BOOT ERROR] in boot_vseg_map() function, " ); boot_puts("PTAB too small, minumum size is: "); boot_putw( PT1_SIZE + PT2_SIZE); boot_exit(); } // register both physical and virtual page table address boot_ptabs_vaddr[vspace_id] = (page_table_t*)vobj[vobj_id].vaddr; boot_ptabs_paddr[vspace_id] = (page_table_t*)vobj[vobj_id].paddr; /* computing the number of second level page */ boot_max_pt2[vspace_id] = (vobj[vobj_id].length - PT1_SIZE) / PT2_SIZE; } // set next vaddr/paddr cur_vaddr += vobj[vobj_id].length; cur_paddr += vobj[vobj_id].length; } // end for vobjs //set the vseg length vseg->length = align_to( (cur_paddr - vseg->pbase), 12); // checking pseg overflow if ( (vseg->pbase < pseg->base) || ((vseg->pbase + vseg->length) > (pseg->base + pseg->length)) ) { boot_puts("\n[BOOT ERROR] in boot_vseg_map() function\n"); boot_puts("impossible mapping for virtual segment: "); boot_puts( vseg->name ); boot_puts("\n"); boot_puts("vseg pbase = "); boot_putw( vseg->pbase ); boot_puts("\n"); boot_puts("vseg length = "); boot_putw( vseg->length ); boot_puts("\n"); boot_puts("pseg pbase = "); boot_putw( pseg->base ); boot_puts("\n"); boot_puts("pseg length = "); boot_putw( pseg->length ); boot_puts("\n"); boot_exit(); } // set the next_base field in vseg if ( vseg->ident == 0 ) pseg->next_base = vseg->pbase + vseg->length; #if BOOT_DEBUG_PT boot_puts( vseg->name ); boot_puts(" : len = "); boot_putw( vseg->length ); boot_puts(" / vbase = "); boot_putw( vseg->vbase ); boot_puts(" / pbase = "); boot_putw( vseg->pbase ); boot_puts("\n"); #endif } // end boot_vseg_map() ///////////////////////////////////////////////////////////////////// // This function checks consistence beween the mapping_info data // structure (soft), and the giet_config file (hard). ///////////////////////////////////////////////////////////////////// void boot_check_mapping() { mapping_header_t* header = (mapping_header_t*)&seg_mapping_base; mapping_cluster_t* cluster = boot_get_cluster_base( header ); mapping_periph_t* periph = boot_get_periph_base( header ); // checking mapping availability if ( header->signature != IN_MAPPING_SIGNATURE ) { boot_puts("\n[BOOT ERROR] Illegal mapping signature: "); boot_putw(header->signature); boot_puts("\n"); boot_exit(); } // checking Rnumber of clusters if ( header->clusters != NB_CLUSTERS ) { boot_puts("\n[BOOT ERROR] Incoherent NB_CLUSTERS"); boot_puts("\n - In giet_config, value = "); boot_putw ( NB_CLUSTERS ); boot_puts("\n - In mapping_info, value = "); boot_putw ( header->clusters ); boot_puts("\n"); boot_exit(); } // checking number of virtual spaces if ( header->vspaces > GIET_NB_VSPACE_MAX ) { boot_puts("\n[BOOT ERROR] : number of vspaces > GIET_NB_VSPACE_MAX\n"); boot_puts("\n"); boot_exit(); } // checking harware unsigned int periph_id; unsigned int cluster_id; unsigned int channels; unsigned int tty_found = 0; unsigned int nic_found = 0; for ( cluster_id = 0 ; cluster_id < NB_CLUSTERS ; cluster_id++ ) { // NB_PROCS_MAX if ( cluster[cluster_id].procs > NB_PROCS_MAX ) { boot_puts("\n[BOOT ERROR] too much processors in cluster "); boot_putw( cluster_id ); boot_puts(" : procs = "); boot_putw ( cluster[cluster_id].procs ); boot_puts("\n"); boot_exit(); } for ( periph_id = cluster[cluster_id].periph_offset ; periph_id < cluster[cluster_id].periph_offset + cluster[cluster_id].periphs ; periph_id++ ) { // NB_TTYS if ( periph[periph_id].type == PERIPH_TYPE_TTY ) { if ( tty_found ) { boot_puts("\n[BOOT ERROR] TTY component should not be replicated\n"); boot_exit(); } if ( periph[periph_id].channels > NB_TTYS ) { boot_puts("\n[BOOT ERROR] Too much TTY terminals in cluster "); boot_putw( cluster_id ); boot_puts(" : ttys = "); boot_putw ( periph[periph_id].channels ); boot_puts("\n"); boot_exit(); } tty_found = 1; } // NB_NICS if ( periph[periph_id].type == PERIPH_TYPE_NIC ) { if ( nic_found ) { boot_puts("\n[BOOT ERROR] NIC component should not be replicated\n"); boot_exit(); } if ( periph[periph_id].channels > NB_NICS ) { boot_puts("\n[BOOT ERROR] Too much NIC channels in cluster "); boot_putw( cluster_id ); boot_puts(" : nics = "); boot_putw ( periph[periph_id].channels ); boot_puts("\n"); boot_exit(); } nic_found = 1; } // NB_TIMERS if ( periph[periph_id].type == PERIPH_TYPE_TIM ) { if ( periph[periph_id].channels > NB_TIMERS_MAX ) { boot_puts("\n[BOOT ERROR] Too much user timers in cluster "); boot_putw( cluster_id ); boot_puts(" : timers = "); boot_putw ( periph[periph_id].channels ); boot_puts("\n"); boot_exit(); } } // NB_DMAS if ( periph[periph_id].type == PERIPH_TYPE_DMA ) { if ( periph[periph_id].channels > NB_DMAS_MAX ) { boot_puts("\n[BOOT ERROR] Too much DMA channels in cluster "); boot_putw( cluster_id ); boot_puts(" : channels = "); boot_putw ( periph[periph_id].channels ); boot_puts("\n"); boot_exit(); } } } // end for periphs } // end for clusters } // end boot_check_mapping() ///////////////////////////////////////////////////////////////////// // This function initialises the physical pages table allocators // for all psegs (i.e. next_base field of the pseg). // In each cluster containing processors, it reserve space for the // schedulers in the first RAM pseg found (4k bytes per processor). ///////////////////////////////////////////////////////////////////// void boot_psegs_init() { mapping_header_t* header = (mapping_header_t*)&seg_mapping_base; mapping_cluster_t* cluster = boot_get_cluster_base( header ); mapping_pseg_t* pseg = boot_get_pseg_base( header ); unsigned int cluster_id; unsigned int pseg_id; unsigned int found; #if BOOT_DEBUG_PT boot_puts("\n[BOOT DEBUG] ****** psegs allocators nitialisation ******\n"); #endif for ( cluster_id = 0 ; cluster_id < header->clusters ; cluster_id++ ) { if ( cluster[cluster_id].procs > NB_PROCS_MAX ) { boot_puts("\n[BOOT ERROR] The number of processors in cluster "); boot_putw( cluster_id ); boot_puts(" is larger than NB_PROCS_MAX \n"); boot_exit(); } found = 0; for ( pseg_id = cluster[cluster_id].pseg_offset ; pseg_id < cluster[cluster_id].pseg_offset + cluster[cluster_id].psegs ; pseg_id++ ) { unsigned int free = pseg[pseg_id].base; if ( (pseg[pseg_id].type == PSEG_TYPE_RAM) && (found == 0) ) { free = free + (cluster[cluster_id].procs << 12); found = 1; } pseg[pseg_id].next_base = free; #if BOOT_DEBUG_PT boot_puts("cluster "); boot_putw(cluster_id); boot_puts(" / pseg "); boot_puts(pseg[pseg_id].name); boot_puts(" : next_base = "); boot_putw(pseg[pseg_id].next_base); boot_puts("\n"); #endif } } } // end boot_pseg_init() ///////////////////////////////////////////////////////////////////// // This function builds the page tables for all virtual spaces // defined in the mapping_info data structure. // For each virtual space, it maps both the global vsegs // (replicated in all vspaces), and the private vsegs. ///////////////////////////////////////////////////////////////////// void boot_pt_init() { mapping_header_t* header = (mapping_header_t*)&seg_mapping_base; mapping_vspace_t* vspace = boot_get_vspace_base( header ); mapping_vseg_t* vseg = boot_get_vseg_base( header ); unsigned int vspace_id; unsigned int vseg_id; #if BOOT_DEBUG_PT boot_puts("\n[BOOT DEBUG] ****** mapping global vsegs ******\n"); #endif // step 1 : first loop on virtual spaces to map global vsegs for ( vseg_id = 0 ; vseg_id < header->globals ; vseg_id++ ) { boot_vseg_map( &vseg[vseg_id], ((unsigned int)(-1)) ); } // step 2 : loop on virtual vspaces to map private vsegs for ( vspace_id = 0 ; vspace_id < header->vspaces ; vspace_id++ ) { #if BOOT_DEBUG_PT boot_puts("\n[BOOT DEBUG] ****** mapping private vsegs in vspace "); boot_puts(vspace[vspace_id].name); boot_puts(" ******\n"); #endif for ( vseg_id = vspace[vspace_id].vseg_offset ; vseg_id < (vspace[vspace_id].vseg_offset + vspace[vspace_id].vsegs) ; vseg_id++ ) { boot_vseg_map( &vseg[vseg_id], vspace_id ); } } // step 3 : loop on the vspaces to build the page tables for ( vspace_id = 0 ; vspace_id < header->vspaces ; vspace_id++ ) { #if BOOT_DEBUG_PT boot_puts("\n[BOOT DEBUG] ****** building page table for vspace "); boot_puts(vspace[vspace_id].name); boot_puts(" ******\n"); #endif boot_vspace_pt_build( vspace_id ); #if BOOT_DEBUG_PT boot_puts("\n>>> page table physical address = "); boot_putw((unsigned int)boot_ptabs_paddr[vspace_id]); boot_puts(", page table number of PT2 = "); boot_putw((unsigned int)boot_max_pt2[vspace_id]); boot_puts("\n"); #endif } } // end boot_pt_init() /////////////////////////////////////////////////////////////////////////////// // This function initializes all private vobjs defined in the vspaces, // such as mwmr channels, barriers and locks, because these vobjs // are not known, and not initialised by the compiler. /////////////////////////////////////////////////////////////////////////////// void boot_vobjs_init() { mapping_header_t* header = (mapping_header_t*)&seg_mapping_base; mapping_vspace_t* vspace = boot_get_vspace_base( header ); mapping_vobj_t* vobj = boot_get_vobj_base( header ); unsigned int vspace_id; unsigned int vobj_id; // loop on the vspaces for ( vspace_id = 0 ; vspace_id < header->vspaces ; vspace_id++ ) { #if BOOT_DEBUG_VOBJS boot_puts("\n[BOOT DEBUG] ****** vobjs initialisation in vspace "); boot_puts(vspace[vspace_id].name); boot_puts(" ******\n"); #endif // We must set the PTPR depending on the vspace, because the addresses // of mwmmr channels, barriers and locks are defined in virtual space. boot_set_mmu_ptpr( (unsigned int)boot_ptabs_paddr[vspace_id] >> 13 ); unsigned int ptab_found = 0; // loop on the vobjs for(vobj_id= vspace[vspace_id].vobj_offset; vobj_id < (vspace[vspace_id].vobj_offset+ vspace[vspace_id].vobjs); vobj_id++) { switch( vobj[vobj_id].type ) { case VOBJ_TYPE_MWMR: // storage capacity is (vobj.length/4 - 5) words { mwmr_channel_t* mwmr = (mwmr_channel_t*)(vobj[vobj_id].vaddr); mwmr->ptw = 0; mwmr->ptr = 0; mwmr->sts = 0; mwmr->width = vobj[vobj_id].init; mwmr->depth = (vobj[vobj_id].length>>2) - 6; mwmr->lock = 0; #if BOOT_DEBUG_VOBJS boot_puts("MWMR : "); boot_puts( vobj[vobj_id].name); boot_puts(" / depth = "); boot_putw( mwmr->depth ); boot_puts(" / width = "); boot_putw( mwmr->width ); boot_puts("\n"); #endif break; } case VOBJ_TYPE_ELF: // initialisation done by the loader { #if BOOT_DEBUG_VOBJS boot_puts("ELF : "); boot_puts( vobj[vobj_id].name); boot_puts(" / length = "); boot_putw( vobj[vobj_id].length ); boot_puts("\n"); #endif break; } case VOBJ_TYPE_BLOB: // initialisation done by the loader { #if BOOT_DEBUG_VOBJS boot_puts("BLOB : "); boot_puts( vobj[vobj_id].name); boot_puts(" / length = "); boot_putw( vobj[vobj_id].length ); boot_puts("\n"); #endif break; } case VOBJ_TYPE_BARRIER: // init is the number of participants { giet_barrier_t* barrier = (giet_barrier_t*)(vobj[vobj_id].vaddr); barrier->count = 0; barrier->init = vobj[vobj_id].init; #if BOOT_DEBUG_VOBJS boot_puts("BARRIER : "); boot_puts( vobj[vobj_id].name); boot_puts(" / init_value = "); boot_putw( barrier->init ); boot_puts("\n"); #endif break; } case VOBJ_TYPE_LOCK: // init is "not taken" { unsigned int* lock = (unsigned int*)(vobj[vobj_id].vaddr); *lock = 0; #if BOOT_DEBUG_VOBJS boot_puts("LOCK : "); boot_puts( vobj[vobj_id].name); boot_puts("\n"); #endif break; } case VOBJ_TYPE_BUFFER: // nothing to initialise { #if BOOT_DEBUG_VOBJS boot_puts("BUFFER : "); boot_puts( vobj[vobj_id].name); boot_puts(" / length = "); boot_putw( vobj[vobj_id].length ); boot_puts("\n"); #endif break; } case VOBJ_TYPE_PTAB: // nothing to initialise { ptab_found = 1; #if BOOT_DEBUG_VOBJS boot_puts("PTAB : "); boot_puts( vobj[vobj_id].name); boot_puts(" / length = "); boot_putw( vobj[vobj_id].length ); boot_puts("\n"); #endif break; } default: { boot_puts("\n[INIT ERROR] illegal vobj of name "); boot_puts(vobj->name); boot_puts(" / in vspace = "); boot_puts(vobj->name); boot_puts("\n "); boot_exit(); } } // end switch type } // end loop on vobjs if( ptab_found == 0 ) { boot_puts("\n[INIT ERROR] Missing PTAB for vspace "); boot_putw( vspace_id ); boot_exit(); } } // end loop on vspaces } // end boot_vobjs_init() //////////////////////////////////////////////////////////////////////////////// // This function intializes the periherals and coprocessors, as specified // tsuch as the IOB component // (I/O bridge, containing the IOMMU, the IOC (external disk controller), // the NIC (external network controller), the FBDMA (frame buffer controller), //////////////////////////////////////////////////////////////////////////////// void boot_peripherals_init() { mapping_header_t* header = (mapping_header_t*)&seg_mapping_base; mapping_cluster_t* cluster = boot_get_cluster_base( header ); mapping_periph_t* periph = boot_get_periph_base( header ); mapping_pseg_t* pseg = boot_get_pseg_base( header ); unsigned int cluster_id; unsigned int periph_id; unsigned int coproc_id; unsigned int channel_id; for ( cluster_id = 0 ; cluster_id < header->clusters ; cluster_id++ ) { #if BOOT_DEBUG_PERI boot_puts("\n[BOOT DEBUG] ****** peripheral initialisation in cluster "); boot_putw( cluster_id ); boot_puts(" ******\n"); #endif for ( periph_id = cluster[cluster_id].periph_offset ; periph_id < cluster[cluster_id].periph_offset + cluster[cluster_id].periphs ; periph_id++ ) { unsigned int type = periph[periph_id].type; unsigned int channels = periph[periph_id].channels; unsigned int pseg_id = periph[periph_id].psegid; unsigned int* pseg_base = (unsigned int*)pseg[pseg_id].base; //////// vci_block_device component if ( type == PERIPH_TYPE_IOC ) { // activate interrupts pseg_base[BLOCK_DEVICE_IRQ_ENABLE] = 1; #if BOOT_DEBUG_PERI boot_puts("- IOC initialised : "); boot_putw( channels ); boot_puts(" channels\n"); #endif } //////// vci_multi_dma component else if ( type == PERIPH_TYPE_DMA ) { for ( channel_id = 0 ; channel_id < channels ; channel_id++ ) { // activate interrupts pseg_base[DMA_IRQ_DISABLE + channel_id*DMA_SPAN] = 0; } #if BOOT_DEBUG_PERI boot_puts("- DMA initialised : "); boot_putw( channels ); boot_puts(" channels\n"); #endif } //////// vci_multi_nic component else if ( type == PERIPH_TYPE_NIC ) { for ( channel_id = 0 ; channel_id < channels ; channel_id++ ) { // TODO } #if BOOT_DEBUG_PERI boot_puts("- NIC initialised : "); boot_putw( channels ); boot_puts(" channels\n"); #endif } //////// vci_io_bridge component else if ( (type == PERIPH_TYPE_IOB) && GIET_IOMMU_ACTIVE ) { // get the iommu page table physical address // TODO // define IPI address mapping the IOC interrupt // TODO // set IOMMU page table address // pseg_base[IOB_IOMMU_PTPR] = ptab_pbase; // activate IOMMU // pseg_base[IOB_IOMMU_ACTIVE] = 1; #if BOOT_DEBUG_PERI boot_puts("- IOB initialised : "); boot_putw( channels ); boot_puts(" channels\n"); #endif } } // end for periphs for ( coproc_id = cluster[cluster_id].coproc_offset ; coproc_id < cluster[cluster_id].coproc_offset + cluster[cluster_id].coprocs ; coproc_id++ ) { // TODO } // end for coprocs } // end for clusters } // end boot_peripherals_init() /////////////////////////////////////////////////////////////////////////////// // This function initialises all processors schedulers. // This is done by processor 0, and the MMU must be activated. // It initialises the boot_schedulers_paddr[gpid] pointers array. // Finally, it scan all tasks in all vspaces to initialise the tasks contexts, // as specified in the mapping_info data structure. // For each task, a TTY channel, a TIMER channel, a FBDMA channel, and a NIC // channel can be allocated if required. /////////////////////////////////////////////////////////////////////////////// void boot_schedulers_init() { mapping_header_t* header = (mapping_header_t*)&seg_mapping_base; mapping_cluster_t* cluster = boot_get_cluster_base( header ); mapping_pseg_t* pseg = boot_get_pseg_base( header ); mapping_vspace_t* vspace = boot_get_vspace_base( header ); mapping_task_t* task = boot_get_task_base( header ); mapping_vobj_t* vobj = boot_get_vobj_base( header ); mapping_proc_t* proc = boot_get_proc_base( header ); mapping_irq_t* irq = boot_get_irq_base( header ); unsigned int alloc_tty_channel; // TTY channel allocator unsigned int alloc_nic_channel; // NIC channel allocator unsigned int alloc_fbdma_channel[NB_CLUSTERS]; // FBDMA channel allocators unsigned int alloc_timer_channel[NB_CLUSTERS]; // user TIMER allocators unsigned int cluster_id; // cluster global index unsigned int proc_id; // processor global index unsigned int irq_id; // irq global index unsigned int pseg_id; // pseg global index unsigned int vspace_id; // vspace global index unsigned int task_id; // task global index; // Step 0 : TTY, NIC, TIMERS and DMA channels allocators initialisation // global_id = cluster_id*NB_*_MAX + loc_id // - TTY[0] is reserved for the kernel // - In all clusters the first NB_PROCS_MAX timers // are reserved for the kernel (context switch) alloc_tty_channel = 1; alloc_nic_channel = 0; for ( cluster_id = 0 ; cluster_id < header->clusters ; cluster_id++ ) { alloc_fbdma_channel[cluster_id] = 0; alloc_timer_channel[cluster_id] = NB_PROCS_MAX; } // Step 1 : loop on the clusters and on the processors // - initialise the boot_schedulers_paddr[] pointers array // - initialise the interrupt vectors for each processor. for ( cluster_id = 0 ; cluster_id < header->clusters ; cluster_id++ ) { #if BOOT_DEBUG_SCHED boot_puts("\n[BOOT DEBUG] Initialise schedulers / IT vector in cluster "); boot_putw( cluster_id ); boot_puts("\n"); #endif unsigned int found = 0; unsigned int pseg_pbase; // pseg base address unsigned int lpid; // processor local index // get the physical base address of the first PSEG_TYPE_RAM pseg in cluster for ( pseg_id = cluster[cluster_id].pseg_offset ; pseg_id < cluster[cluster_id].pseg_offset + cluster[cluster_id].psegs ; pseg_id++ ) { if ( pseg[pseg_id].type == PSEG_TYPE_RAM ) { pseg_pbase = pseg[pseg_id].base; found = 1; break; } } if ( (cluster[cluster_id].procs > 0) && (found == 0) ) { boot_puts("\n[BOOT ERROR] Missing RAM pseg in cluster "); boot_putw( cluster_id ); boot_puts("\n"); boot_exit(); } // 4 Kbytes per scheduler for ( lpid = 0 ; lpid < cluster[cluster_id].procs ; lpid++ ) { boot_schedulers_paddr[cluster_id*NB_PROCS_MAX + lpid] = (static_scheduler_t*)( pseg_pbase + (lpid<<12) ); } for ( proc_id = cluster[cluster_id].proc_offset ; proc_id < cluster[cluster_id].proc_offset + cluster[cluster_id].procs ; proc_id++ ) { #if BOOT_DEBUG_SCHED boot_puts("\nProc "); boot_putw( proc_id ); boot_puts(" : scheduler pbase = "); boot_putw( pseg_pbase + (proc_id<<12) ); boot_puts("\n"); #endif // initialise the "tasks" variable in scheduler boot_scheduler_set_tasks( proc_id , 0 ); // initialise the interrupt_vector with ISR_DEFAULT unsigned int slot; for ( slot = 0 ; slot < 32 ; slot++) { boot_scheduler_set_itvector( proc_id, slot, 0); } // scan the IRQs actually allocated to current processor for ( irq_id = proc[proc_id].irq_offset ; irq_id < proc[proc_id].irq_offset + proc[proc_id].irqs ; irq_id++ ) { unsigned int type = irq[irq_id].type; unsigned int icu_id = irq[irq_id].icuid; unsigned int isr_id = irq[irq_id].isr; unsigned int channel = irq[irq_id].channel; unsigned int value = isr_id | (type<<8) | (channel<<16); boot_scheduler_set_itvector( proc_id, icu_id, value ); #if BOOT_DEBUG_SCHED boot_puts("- IRQ : icu = "); boot_putw( icu_id ); boot_puts(" / type = "); boot_putw( type ); boot_puts(" / isr = "); boot_putw( isr_id ); boot_puts(" / channel = "); boot_putw( channel ); boot_puts("\n"); #endif } } // end for procs } // end for clusters // Step 2 : loop on the vspaces and the tasks // to initialise the schedulers and the task contexts. for ( vspace_id = 0 ; vspace_id < header->vspaces ; vspace_id++ ) { #if BOOT_DEBUG_SCHED boot_puts("\n[BOOT DEBUG] Initialise schedulers / task contexts for vspace "); boot_puts(vspace[vspace_id].name); boot_puts("\n"); #endif // We must set the PTPR depending on the vspace, because the start_vector // and the stack address are defined in virtual space. boot_set_mmu_ptpr( (unsigned int)boot_ptabs_paddr[vspace_id] >> 13 ); // loop on the tasks in vspace (task_id is the global index) for ( task_id = vspace[vspace_id].task_offset ; task_id < (vspace[vspace_id].task_offset + vspace[vspace_id].tasks) ; task_id++ ) { // ctx_ra : the return address is &boot_eret() unsigned int ctx_ra = (unsigned int)&boot_eret; // ctx_sr : value required before an eret instruction unsigned int ctx_sr = 0x0000FF13; // ctx_ptpr : page table physical base address (shifted by 13 bit) unsigned int ctx_ptpr = (unsigned int)boot_ptabs_paddr[vspace_id] >> 13; // ctx_ptab : page_table virtual base address unsigned int ctx_ptab = (unsigned int)boot_ptabs_vaddr[vspace_id]; // ctx_tty : terminal global index provided by a global allocator unsigned int ctx_tty = 0xFFFFFFFF; if ( task[task_id].use_tty ) { if ( alloc_tty_channel >= NB_TTYS ) { boot_puts("\n[BOOT ERROR] TTY index too large for task "); boot_puts( task[task_id].name ); boot_puts(" in vspace "); boot_puts( vspace[vspace_id].name ); boot_puts("\n"); boot_exit(); } ctx_tty = alloc_tty_channel; alloc_tty_channel++; } // ctx_nic : NIC channel global index provided by a global allocator unsigned int ctx_nic = 0xFFFFFFFF; if ( task[task_id].use_nic ) { if ( alloc_nic_channel >= NB_NICS ) { boot_puts("\n[BOOT ERROR] NIC channel index too large for task "); boot_puts( task[task_id].name ); boot_puts(" in vspace "); boot_puts( vspace[vspace_id].name ); boot_puts("\n"); boot_exit(); } ctx_nic = alloc_nic_channel; alloc_nic_channel++; } // ctx_timer : user TIMER global index provided by a cluster allocator unsigned int ctx_timer = 0xFFFFFFFF; if ( task[task_id].use_timer ) { unsigned int cluster_id = task[task_id].clusterid; if ( alloc_timer_channel[cluster_id] >= NB_TIMERS_MAX ) { boot_puts("\n[BOOT ERROR] local TIMER index too large for task "); boot_puts( task[task_id].name ); boot_puts(" in vspace "); boot_puts( vspace[vspace_id].name ); boot_puts("\n"); boot_exit(); } ctx_timer = cluster_id*NB_TIMERS_MAX + alloc_timer_channel[cluster_id]; alloc_timer_channel[cluster_id]++; } // ctx_fbdma : DMA global index provided by a cluster allocator unsigned int ctx_fbdma = 0xFFFFFFFF; if ( task[task_id].use_fbdma ) { unsigned int cluster_id = task[task_id].clusterid; if ( alloc_fbdma_channel[cluster_id] >= NB_DMAS_MAX ) { boot_puts("\n[BOOT ERROR] local FBDMA index too large for task "); boot_puts( task[task_id].name ); boot_puts(" in vspace "); boot_puts( vspace[vspace_id].name ); boot_puts("\n"); boot_exit(); } ctx_fbdma = cluster_id*NB_DMAS_MAX + alloc_fbdma_channel[cluster_id]; alloc_fbdma_channel[cluster_id]++; } // ctx_epc : Get the virtual address of the start function mapping_vobj_t* pvobj = &vobj[vspace[vspace_id].vobj_offset + vspace[vspace_id].start_offset]; unsigned int* start_vector_vbase = (unsigned int*)pvobj->vaddr; unsigned int ctx_epc = start_vector_vbase[task[task_id].startid]; // ctx_sp : Get the vobj containing the stack unsigned int vobj_id = task[task_id].vobjlocid + vspace[vspace_id].vobj_offset; unsigned int ctx_sp = vobj[vobj_id].vaddr + vobj[vobj_id].length; // compute gpid = global processor index unsigned int gpid = task[task_id].clusterid*NB_PROCS_MAX + task[task_id].proclocid; // In the code below, we access the scheduler with specific access // functions, because we only have the physical address of the scheduler, // and these functions must temporary desactivate the DTLB... // get local task index in scheduler[gpid] unsigned int ltid = boot_scheduler_get_tasks( gpid ); if ( ltid >= IDLE_TASK_INDEX ) { boot_puts("\n[BOOT ERROR] : "); boot_putw( ltid ); boot_puts(" tasks allocated to processor "); boot_putw( gpid ); boot_puts(" / max is 15\n"); boot_exit(); } // update the "tasks" field in scheduler[gpid] boot_scheduler_set_tasks( gpid, ltid + 1); // update the "current" field in scheduler[gpid] boot_scheduler_set_current( gpid, 0 ); // initializes the task context in scheduler[gpid] boot_scheduler_set_context( gpid, ltid, CTX_SR_ID , ctx_sr ); boot_scheduler_set_context( gpid, ltid, CTX_SP_ID , ctx_sp ); boot_scheduler_set_context( gpid, ltid, CTX_RA_ID , ctx_ra ); boot_scheduler_set_context( gpid, ltid, CTX_EPC_ID , ctx_epc ); boot_scheduler_set_context( gpid, ltid, CTX_PTPR_ID , ctx_ptpr ); boot_scheduler_set_context( gpid, ltid, CTX_TTY_ID , ctx_tty ); boot_scheduler_set_context( gpid, ltid, CTX_FBDMA_ID , ctx_fbdma ); boot_scheduler_set_context( gpid, ltid, CTX_NIC_ID , ctx_nic ); boot_scheduler_set_context( gpid, ltid, CTX_TIMER_ID , ctx_timer ); boot_scheduler_set_context( gpid, ltid, CTX_PTAB_ID , ctx_ptab ); boot_scheduler_set_context( gpid, ltid, CTX_LTID_ID , ltid ); boot_scheduler_set_context( gpid, ltid, CTX_VSID_ID , vspace_id ); boot_scheduler_set_context( gpid, ltid, CTX_RUN_ID , 1 ); #if BOOT_DEBUG_SCHED boot_puts("\nTask "); boot_puts( task[task_id].name ); boot_puts(" allocated to processor "); boot_putw( gpid ); boot_puts(" - ctx[LTID] = "); boot_putw( ltid ); boot_puts("\n"); boot_puts(" - ctx[SR] = "); boot_putw( ctx_sr ); boot_puts("\n"); boot_puts(" - ctx[SR] = "); boot_putw( ctx_sp ); boot_puts("\n"); boot_puts(" - ctx[RA] = "); boot_putw( ctx_ra ); boot_puts("\n"); boot_puts(" - ctx[EPC] = "); boot_putw( ctx_epc ); boot_puts("\n"); boot_puts(" - ctx[PTPR] = "); boot_putw( ctx_ptpr ); boot_puts("\n"); boot_puts(" - ctx[TTY] = "); boot_putw( ctx_tty ); boot_puts("\n"); boot_puts(" - ctx[NIC] = "); boot_putw( ctx_nic ); boot_puts("\n"); boot_puts(" - ctx[TIMER] = "); boot_putw( ctx_timer ); boot_puts("\n"); boot_puts(" - ctx[FBDMA] = "); boot_putw( ctx_fbdma ); boot_puts("\n"); boot_puts(" - ctx[PTAB] = "); boot_putw( ctx_ptab ); boot_puts("\n"); boot_puts(" - ctx[VSID] = "); boot_putw( vspace_id ); boot_puts("\n"); #endif } // end loop on tasks } // end loop on vspaces } // end boot_schedulers_init() ////////////////////////////////////////////////////////////////////////////////// // This function is executed by P[0] to wakeup all processors. ////////////////////////////////////////////////////////////////////////////////// void boot_start_all_procs() { mapping_header_t* header = (mapping_header_t*)&seg_mapping_base; header->signature = OUT_MAPPING_SIGNATURE; } ///////////////////////////////////////////////////////////////////// // This function is the entry point of the initialisation procedure ///////////////////////////////////////////////////////////////////// void boot_init() { // mapping_info checking boot_check_mapping(); boot_puts("\n[BOOT] Mapping check completed at cycle "); boot_putw( boot_proctime() ); boot_puts("\n"); // pseg allocators initialisation boot_psegs_init(); boot_puts("\n[BOOT] Pseg allocators initialisation completed at cycle "); boot_putw( boot_proctime() ); boot_puts("\n"); // peripherals initialisation boot_peripherals_init(); boot_puts("\n[BOOT] Peripherals initialisation completed at cycle "); boot_putw( boot_proctime() ); boot_puts("\n"); // page table building boot_pt_init(); boot_puts("\n[BOOT] Page Tables initialisation completed at cycle "); boot_putw( boot_proctime() ); boot_puts("\n"); // mmu activation boot_set_mmu_ptpr( (unsigned int)boot_ptabs_paddr[0] >> 13 ); boot_set_mmu_mode( 0xF ); boot_puts("\n[BOOT] MMU activation completed at cycle "); boot_putw( boot_proctime() ); boot_puts("\n"); // vobjs initialisation boot_vobjs_init(); boot_puts("\n[BOOT] Vobjs initialisation completed at cycle : "); boot_putw( boot_proctime() ); boot_puts("\n"); // schedulers initialisation boot_schedulers_init(); boot_puts("\n[BOOT] Schedulers initialisation completed at cycle "); boot_putw( boot_proctime() ); boot_puts("\n"); // start all processors boot_start_all_procs(); } // end boot_init() // Local Variables: // tab-width: 4 // c-basic-offset: 4 // c-file-offsets:((innamespace . 0)(inline-open . 0)) // indent-tabs-mode: nil // End: // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=4:softtabstop=4