////////////////////////////////////////////////////////////////////////////////// // File : boot_init.c // Date : 01/04/2012 // Author : alain greiner // Copyright (c) UPMC-LIP6 /////////////////////////////////////////////////////////////////////////////////// // The boot_init.c file is part of the GIET-VM nano-kernel. // This code is executed in the boot phase by proc[0] to initialize the // peripherals and the kernel data structures: // - pages tables for the various vspaces // - shedulers for processors (including the tasks contexts and interrupt vectors) // // This nano-kernel has been written for the MIPS32 processor. // The virtual adresses are on 32 bits and use the (unsigned int) type, but the // physicals addresses can have up to 40 bits, and use the (unsigned long long) type. // // The GIET-VM uses the paged virtual memory and the MAPPING_INFO binary file // to provides two services: // 1) classical memory protection, when several independant applications compiled // in different virtual spaces are executing on the same hardware platform. // 2) data placement in NUMA architectures, when we want to control the placement // of the software objects (virtual segments) on the physical memory banks. // // The MAPPING_INFO binary data structure must be loaded in the the seg_boot_mapping // segment (at address seg_mapping_base). // This MAPPING_INFO data structure defines // - the hardware architecture: number of clusters, number or processors, // size of the memory segments, and peripherals in each cluster. // - The structure of the various multi-threaded software applications: // number of tasks, communication channels. // - The mapping: placement of virtual objects (vobj) in the virtual segments (vseg), // placement of virtual segments (vseg) in the physical segments (pseg), placement // of software tasks on the processors, // // The page table are statically build in the boot phase, and they do not // change during execution. The GIET uses only 4 Kbytes pages. // As most applications use only a limited number of segments, the number of PT2s // actually used by a given virtual space is generally smaller than 2048, and is // computed during the boot phase. // The max number of virtual spaces (GIET_NB_VSPACE_MAX) is a configuration parameter. // // Each page table (one page table per virtual space) is monolithic, and contains // one PT1 and up to (GIET_NB_PT2_MAX) PT2s. The PT1 is addressed using the ix1 field // (11 bits) of the VPN, and the selected PT2 is addressed using the ix2 field (9 bits). // - PT1[2048] : a first 8K aligned array of unsigned int, indexed by (ix1) field of VPN. // Each entry in the PT1 contains a 32 bits PTD. The MSB bit PTD[31] is // the PTD valid bit, and LSB bits PTD[19:0] are the 20 MSB bits of the physical base // address of the selected PT2. // The PT1 contains 2048 PTD of 4 bytes => 8K bytes. // - PT2[1024][GIET_NB_PT2_MAX] : an array of array of unsigned int. // Each PT2[1024] must be 4K aligned, each entry in a PT2 contains two unsigned int: // the first word contains the protection flags, and the second word contains the PPN. // Each PT2 contains 512 PTE2 of 8bytes => 4K bytes. // The total size of a page table is finally = 8K + (GIET_NB_PT2_MAX)*4K bytes. //////////////////////////////////////////////////////////////////////////////////// #include #include #include #include #include #include #include #include #include #include #include #include #if !defined(NB_CLUSTERS) # error The NB_CLUSTERS value must be defined in the 'giet_config.h' file ! #endif #if !defined(NB_PROCS_MAX) # error The NB_PROCS_MAX value must be defined in the 'giet_config.h' file ! #endif #if !defined(GIET_NB_VSPACE_MAX) # error The GIET_NB_VSPACE_MAX value must be defined in the 'giet_config.h' file ! #endif //////////////////////////////////////////////////////////////////////////// // Global variables for boot code // Both the page tables for the various virtual spaces, and the schedulers // for the processors are physically distributed on the clusters. // These global variables are just arrays of pointers. //////////////////////////////////////////////////////////////////////////// // Page table addresses arrays __attribute__((section (".wdata"))) paddr_t boot_ptabs_paddr[GIET_NB_VSPACE_MAX]; __attribute__((section (".wdata"))) unsigned int boot_ptabs_vaddr[GIET_NB_VSPACE_MAX]; // Scheduler pointers array (virtual addresses) __attribute__((section (".wdata"))) static_scheduler_t* boot_schedulers[NB_CLUSTERS * NB_PROCS_MAX]; // Next free PT2 index array __attribute__((section (".wdata"))) unsigned int boot_next_free_pt2[GIET_NB_VSPACE_MAX] = { [0 ... GIET_NB_VSPACE_MAX - 1] = 0 }; // Max PT2 index __attribute__((section (".wdata"))) unsigned int boot_max_pt2[GIET_NB_VSPACE_MAX] = { [0 ... GIET_NB_VSPACE_MAX - 1] = 0 }; ////////////////////////////////////////////////////////////////////////////// // boot_procid() ////////////////////////////////////////////////////////////////////////////// inline unsigned int boot_procid() { unsigned int ret; asm volatile ("mfc0 %0, $15, 1":"=r" (ret)); return (ret & 0x3FF); } ////////////////////////////////////////////////////////////////////////////// // boot_proctime() ////////////////////////////////////////////////////////////////////////////// inline unsigned int boot_proctime() { unsigned int ret; asm volatile ("mfc0 %0, $9":"=r" (ret)); return ret; } ////////////////////////////////////////////////////////////////////////////// // boot_exit() ////////////////////////////////////////////////////////////////////////////// void boot_exit() { while (1) { asm volatile ("nop"); } } ////////////////////////////////////////////////////////////////////////////// // boot_eret() // The address of this function is used to initialise the return address (RA) // in all task contexts (when the task has never been executed. /////////////////////////////////"///////////////////////////////////////////// void boot_eret() { asm volatile ("eret"); } //////////////////////////////////////////////////////////////////////////// // boot_physical_read() // This function makes a physical read access to a 32 bits word in memory, // after a temporary DTLB de-activation and paddr extension. //////////////////////////////////////////////////////////////////////////// unsigned int boot_physical_read(paddr_t paddr) { unsigned int value; unsigned int lsb = (unsigned int) paddr; unsigned int msb = (unsigned int) (paddr >> 32); asm volatile( "mfc2 $2, $1 \n" /* $2 <= MMU_MODE */ "andi $3, $2, 0xb \n" "mtc2 $3, $1 \n" /* DTLB off */ "mtc2 %2, $24 \n" /* PADDR_EXT <= msb */ "lw %0, 0(%1) \n" /* value <= *paddr */ "mtc2 $0, $24 \n" /* PADDR_EXT <= 0 */ "mtc2 $2, $1 \n" /* restore MMU_MODE */ : "=r" (value) : "r" (lsb), "r" (msb) : "$2", "$3"); return value; } //////////////////////////////////////////////////////////////////////////// // boot_physical_write() // This function makes a physical write access to a 32 bits word in memory, // after a temporary DTLB de-activation and paddr extension. //////////////////////////////////////////////////////////////////////////// void boot_physical_write(paddr_t paddr, unsigned int value) { unsigned int lsb = (unsigned int)paddr; unsigned int msb = (unsigned int)(paddr >> 32); asm volatile( "mfc2 $2, $1 \n" /* $2 <= MMU_MODE */ "andi $3, $2, 0xb \n" "mtc2 $3, $1 \n" /* DTLB off */ "mtc2 %2, $24 \n" /* PADDR_EXT <= msb */ "sw %0, 0(%1) \n" /* *paddr <= value */ "mtc2 $0, $24 \n" /* PADDR_EXT <= 0 */ "mtc2 $2, $1 \n" /* restore MMU_MODE */ : : "r" (value), "r" (lsb), "r" (msb) : "$2", "$3"); } ////////////////////////////////////////////////////////////////////////////// // boot_set_mmu_ptpr() // This function set a new value for the MMU PTPR register. ////////////////////////////////////////////////////////////////////////////// inline void boot_set_mmu_ptpr(unsigned int val) { asm volatile ("mtc2 %0, $0"::"r" (val)); } ////////////////////////////////////////////////////////////////////////////// // boot_set_mmu_mode() // This function set a new value for the MMU MODE register. ////////////////////////////////////////////////////////////////////////////// inline void boot_set_mmu_mode(unsigned int val) { asm volatile ("mtc2 %0, $1"::"r" (val)); } //////////////////////////////////////////////////////////////////////////// // boot_puts() // display a string on TTY0 //////////////////////////////////////////////////////////////////////////// void boot_puts(const char * buffer) { unsigned int *tty_address = (unsigned int *) &seg_tty_base; unsigned int n; for (n = 0; n < 100; n++) { if (buffer[n] == 0) break; tty_address[TTY_WRITE] = (unsigned int) buffer[n]; } } //////////////////////////////////////////////////////////////////////////// // boot_putx() // display a 32 bits unsigned int as an hexadecimal string on TTY0 //////////////////////////////////////////////////////////////////////////// void boot_putx(unsigned int val) { static const char HexaTab[] = "0123456789ABCDEF"; char buf[11]; unsigned int c; buf[0] = '0'; buf[1] = 'x'; buf[10] = 0; for (c = 0; c < 8; c++) { buf[9 - c] = HexaTab[val & 0xF]; val = val >> 4; } boot_puts(buf); } //////////////////////////////////////////////////////////////////////////// // boot_putl() // display a 64 bits unsigned long as an hexadecimal string on TTY0 //////////////////////////////////////////////////////////////////////////// void boot_putl(paddr_t val) { static const char HexaTab[] = "0123456789ABCDEF"; char buf[19]; unsigned int c; buf[0] = '0'; buf[1] = 'x'; buf[18] = 0; for (c = 0; c < 16; c++) { buf[17 - c] = HexaTab[(unsigned int)val & 0xF]; val = val >> 4; } boot_puts(buf); } //////////////////////////////////////////////////////////////////////////// // boot_putd() // display a 32 bits unsigned int as a decimal string on TTY0 //////////////////////////////////////////////////////////////////////////// void boot_putd(unsigned int val) { static const char DecTab[] = "0123456789"; char buf[11]; unsigned int i; unsigned int first; buf[10] = 0; for (i = 0; i < 10; i++) { if ((val != 0) || (i == 0)) { buf[9 - i] = DecTab[val % 10]; first = 9 - i; } else { break; } val /= 10; } boot_puts(&buf[first]); } ///////////////////////////////////////////////////////////////////////////// // mapping_info data structure access functions ///////////////////////////////////////////////////////////////////////////// inline mapping_cluster_t *boot_get_cluster_base(mapping_header_t * header) { return (mapping_cluster_t *) ((char *) header + MAPPING_HEADER_SIZE); } ///////////////////////////////////////////////////////////////////////////// inline mapping_pseg_t *boot_get_pseg_base(mapping_header_t * header) { return (mapping_pseg_t *) ((char *) header + MAPPING_HEADER_SIZE + MAPPING_CLUSTER_SIZE * header->clusters); } ///////////////////////////////////////////////////////////////////////////// inline mapping_vspace_t *boot_get_vspace_base(mapping_header_t * header) { return (mapping_vspace_t *) ((char *) header + MAPPING_HEADER_SIZE + MAPPING_CLUSTER_SIZE * header->clusters + MAPPING_PSEG_SIZE * header->psegs); } ///////////////////////////////////////////////////////////////////////////// inline mapping_vseg_t *boot_get_vseg_base(mapping_header_t * header) { return (mapping_vseg_t *) ((char *) header + MAPPING_HEADER_SIZE + MAPPING_CLUSTER_SIZE * header->clusters + MAPPING_PSEG_SIZE * header->psegs + MAPPING_VSPACE_SIZE * header->vspaces); } ///////////////////////////////////////////////////////////////////////////// inline mapping_vobj_t *boot_get_vobj_base(mapping_header_t * header) { return (mapping_vobj_t *) ((char *) header + MAPPING_HEADER_SIZE + MAPPING_CLUSTER_SIZE * header->clusters + MAPPING_PSEG_SIZE * header->psegs + MAPPING_VSPACE_SIZE * header->vspaces + MAPPING_VSEG_SIZE * header->vsegs); } ///////////////////////////////////////////////////////////////////////////// inline mapping_task_t *boot_get_task_base(mapping_header_t * header) { return (mapping_task_t *) ((char *) header + MAPPING_HEADER_SIZE + MAPPING_CLUSTER_SIZE * header->clusters + MAPPING_PSEG_SIZE * header->psegs + MAPPING_VSPACE_SIZE * header->vspaces + MAPPING_VSEG_SIZE * header->vsegs + MAPPING_VOBJ_SIZE * header->vobjs); } ///////////////////////////////////////////////////////////////////////////// inline mapping_proc_t *boot_get_proc_base(mapping_header_t * header) { return (mapping_proc_t *) ((char *) header + MAPPING_HEADER_SIZE + MAPPING_CLUSTER_SIZE * header->clusters + MAPPING_PSEG_SIZE * header->psegs + MAPPING_VSPACE_SIZE * header->vspaces + MAPPING_VSEG_SIZE * header->vsegs + MAPPING_VOBJ_SIZE * header->vobjs + MAPPING_TASK_SIZE * header->tasks); } ///////////////////////////////////////////////////////////////////////////// inline mapping_irq_t *boot_get_irq_base(mapping_header_t * header) { return (mapping_irq_t *) ((char *) header + MAPPING_HEADER_SIZE + MAPPING_CLUSTER_SIZE * header->clusters + MAPPING_PSEG_SIZE * header->psegs + MAPPING_VSPACE_SIZE * header->vspaces + MAPPING_VSEG_SIZE * header->vsegs + MAPPING_VOBJ_SIZE * header->vobjs + MAPPING_TASK_SIZE * header->tasks + MAPPING_PROC_SIZE * header->procs); } ///////////////////////////////////////////////////////////////////////////// inline mapping_coproc_t *boot_get_coproc_base(mapping_header_t * header) { return (mapping_coproc_t *) ((char *) header + MAPPING_HEADER_SIZE + MAPPING_CLUSTER_SIZE * header->clusters + MAPPING_PSEG_SIZE * header->psegs + MAPPING_VSPACE_SIZE * header->vspaces + MAPPING_VOBJ_SIZE * header->vobjs + MAPPING_VSEG_SIZE * header->vsegs + MAPPING_TASK_SIZE * header->tasks + MAPPING_PROC_SIZE * header->procs + MAPPING_IRQ_SIZE * header->irqs); } /////////////////////////////////////////////////////////////////////////////////// inline mapping_cp_port_t *boot_get_cp_port_base(mapping_header_t * header) { return (mapping_cp_port_t *) ((char *) header + MAPPING_HEADER_SIZE + MAPPING_CLUSTER_SIZE * header->clusters + MAPPING_PSEG_SIZE * header->psegs + MAPPING_VSPACE_SIZE * header->vspaces + MAPPING_VOBJ_SIZE * header->vobjs + MAPPING_VSEG_SIZE * header->vsegs + MAPPING_TASK_SIZE * header->tasks + MAPPING_PROC_SIZE * header->procs + MAPPING_IRQ_SIZE * header->irqs + MAPPING_COPROC_SIZE * header->coprocs); } /////////////////////////////////////////////////////////////////////////////////// inline mapping_periph_t *boot_get_periph_base(mapping_header_t * header) { return (mapping_periph_t *) ((char *) header + MAPPING_HEADER_SIZE + MAPPING_CLUSTER_SIZE * header->clusters + MAPPING_PSEG_SIZE * header->psegs + MAPPING_VSPACE_SIZE * header->vspaces + MAPPING_VOBJ_SIZE * header->vobjs + MAPPING_VSEG_SIZE * header->vsegs + MAPPING_TASK_SIZE * header->tasks + MAPPING_PROC_SIZE * header->procs + MAPPING_IRQ_SIZE * header->irqs + MAPPING_COPROC_SIZE * header->coprocs + MAPPING_CP_PORT_SIZE * header->cp_ports); } ////////////////////////////////////////////////////////////////////////////// // boot_pseg_get() // This function returns the pointer on a physical segment // identified by the pseg index. ////////////////////////////////////////////////////////////////////////////// mapping_pseg_t *boot_pseg_get(unsigned int seg_id) { mapping_header_t * header = (mapping_header_t *) & seg_mapping_base; mapping_pseg_t * pseg = boot_get_pseg_base(header); // checking argument if (seg_id >= header->psegs) { boot_puts("\n[BOOT ERROR] : seg_id argument too large\n"); boot_puts(" in function boot_pseg_get()\n"); boot_exit(); } return &pseg[seg_id]; } ////////////////////////////////////////////////////////////////////////////// // boot_add_pte() // This function registers a new PTE in the page table defined // by the vspace_id argument, and updates both PT1 and PT2. // A new PT2 is used when required. // As the set of PT2s is implemented as a fixed size array (no dynamic // allocation), this function checks a possible overflow of the PT2 array. ////////////////////////////////////////////////////////////////////////////// void boot_add_pte(unsigned int vspace_id, unsigned int vpn, unsigned int flags, unsigned int ppn, unsigned int verbose) { unsigned int ix1; unsigned int ix2; paddr_t pt1_pbase; // PT1 physical base address paddr_t pt2_pbase; // PT2 physical base address paddr_t pte_paddr; // PTE physucal address unsigned int pt2_id; // PT2 index unsigned int ptd; // PTD : entry in PT1 unsigned int max_pt2; // max number of PT2s for a given vspace ix1 = vpn >> 9; // 11 bits ix2 = vpn & 0x1FF; // 9 bits // check that the boot_max_pt2[vspace_id] has been set max_pt2 = boot_max_pt2[vspace_id]; if (max_pt2 == 0) { boot_puts("Undefined page table for vspace "); boot_putd(vspace_id); boot_puts("\n"); boot_exit(); } // get page table physical base address pt1_pbase = boot_ptabs_paddr[vspace_id]; // get ptd in PT1 ptd = boot_physical_read(pt1_pbase + 4 * ix1); if ((ptd & PTE_V) == 0) // invalid PTD: compute PT2 base address, // and set a new PTD in PT1 { pt2_id = boot_next_free_pt2[vspace_id]; if (pt2_id == max_pt2) { boot_puts("\n[BOOT ERROR] in boot_add_pte() function\n"); boot_puts("the length of the ptab vobj is too small\n"); boot_exit(); } else { pt2_pbase = pt1_pbase + PT1_SIZE + PT2_SIZE * pt2_id; ptd = PTE_V | PTE_T | (unsigned int) (pt2_pbase >> 12); boot_physical_write( pt1_pbase + 4 * ix1, ptd); boot_next_free_pt2[vspace_id] = pt2_id + 1; } } else // valid PTD: compute PT2 base address { pt2_pbase = ((paddr_t)(ptd & 0x0FFFFFFF)) << 12; } // set PTE in PT2 : flags & PPN in two 32 bits words pte_paddr = pt2_pbase + 8 * ix2; boot_physical_write(pte_paddr , flags); boot_physical_write(pte_paddr + 4, ppn); if (verbose) { boot_puts(" / pt1_pbase = "); boot_putl( pt1_pbase ); boot_puts(" / ptd = "); boot_putl( ptd ); boot_puts(" / pt2_pbase = "); boot_putl( pt2_pbase ); boot_puts(" / pte_paddr = "); boot_putl( pte_paddr ); boot_puts(" / ppn = "); boot_putx( ppn ); boot_puts("/\n"); } } // end boot_add_pte() ///////////////////////////////////////////////////////////////////// // This function build the page table for a given vspace. // The physical base addresses for all vsegs (global and private) // must have been previously computed and stored in the mapping. // It initializes the MWMR channels. ///////////////////////////////////////////////////////////////////// void boot_vspace_pt_build(unsigned int vspace_id) { unsigned int vseg_id; unsigned int npages; unsigned int ppn; unsigned int vpn; unsigned int flags; unsigned int page_id; unsigned int verbose = 0; // can be used to activate trace in add_pte() mapping_header_t * header = (mapping_header_t *) & seg_mapping_base; mapping_vspace_t * vspace = boot_get_vspace_base(header); mapping_vseg_t * vseg = boot_get_vseg_base(header); // private segments for (vseg_id = vspace[vspace_id].vseg_offset; vseg_id < (vspace[vspace_id].vseg_offset + vspace[vspace_id].vsegs); vseg_id++) { vpn = vseg[vseg_id].vbase >> 12; ppn = (unsigned int) (vseg[vseg_id].pbase >> 12); npages = vseg[vseg_id].length >> 12; if ((vseg[vseg_id].length & 0xFFF) != 0) npages++; flags = PTE_V; if (vseg[vseg_id].mode & C_MODE_MASK) flags = flags | PTE_C; if (vseg[vseg_id].mode & X_MODE_MASK) flags = flags | PTE_X; if (vseg[vseg_id].mode & W_MODE_MASK) flags = flags | PTE_W; if (vseg[vseg_id].mode & U_MODE_MASK) flags = flags | PTE_U; #if BOOT_DEBUG_PT boot_puts(vseg[vseg_id].name); boot_puts(" : flags = "); boot_putx(flags); boot_puts(" / npages = "); boot_putd(npages); boot_puts(" / pbase = "); boot_putl(vseg[vseg_id].pbase); boot_puts("\n"); #endif // loop on 4K pages for (page_id = 0; page_id < npages; page_id++) { boot_add_pte(vspace_id, vpn, flags, ppn, verbose); vpn++; ppn++; } } // global segments for (vseg_id = 0; vseg_id < header->globals; vseg_id++) { vpn = vseg[vseg_id].vbase >> 12; ppn = (unsigned int)(vseg[vseg_id].pbase >> 12); npages = vseg[vseg_id].length >> 12; if ((vseg[vseg_id].length & 0xFFF) != 0) npages++; flags = PTE_V; if (vseg[vseg_id].mode & C_MODE_MASK) flags = flags | PTE_C; if (vseg[vseg_id].mode & X_MODE_MASK) flags = flags | PTE_X; if (vseg[vseg_id].mode & W_MODE_MASK) flags = flags | PTE_W; if (vseg[vseg_id].mode & U_MODE_MASK) flags = flags | PTE_U; #if BOOT_DEBUG_PT boot_puts(vseg[vseg_id].name); boot_puts(" : flags = "); boot_putx(flags); boot_puts(" / npages = "); boot_putd(npages); boot_puts(" / pbase = "); boot_putl(vseg[vseg_id].pbase); boot_puts("\n"); #endif // loop on 4K pages for (page_id = 0; page_id < npages; page_id++) { boot_add_pte(vspace_id, vpn, flags, ppn, verbose); vpn++; ppn++; } } } // end boot_vspace_pt_build() /////////////////////////////////////////////////////////////////////////// // Align the value of paddr or vaddr to the required alignement, // defined by alignPow2 == L2(alignement). /////////////////////////////////////////////////////////////////////////// paddr_t paddr_align_to(paddr_t paddr, unsigned int alignPow2) { paddr_t mask = (1 << alignPow2) - 1; return ((paddr + mask) & ~mask); } unsigned int vaddr_align_to(unsigned int vaddr, unsigned int alignPow2) { unsigned int mask = (1 << alignPow2) - 1; return ((vaddr + mask) & ~mask); } /////////////////////////////////////////////////////////////////////////// // This function computes the physical base address for a vseg // as specified in the mapping info data structure. // It updates the pbase and the length fields of the vseg. // It updates the pbase and vbase fields of all vobjs in the vseg. // It updates the next_base field of the pseg, and checks overflow. // It updates the boot_ptabs_paddr[] and boot_ptabs_vaddr[] arrays. // It is a global vseg if vspace_id = (-1). /////////////////////////////////////////////////////////////////////////// void boot_vseg_map(mapping_vseg_t * vseg, unsigned int vspace_id) { unsigned int vobj_id; unsigned int cur_vaddr; paddr_t cur_paddr; unsigned int offset; mapping_header_t * header = (mapping_header_t *) & seg_mapping_base; mapping_vobj_t * vobj = boot_get_vobj_base(header); // get physical segment pointer mapping_pseg_t* pseg = boot_pseg_get(vseg->psegid); // compute vseg physical base address if (vseg->ident != 0) // identity mapping required { vseg->pbase = vseg->vbase; } else // unconstrained mapping { vseg->pbase = pseg->next_base; // test alignment constraint if (vobj[vseg->vobj_offset].align) { vseg->pbase = paddr_align_to(vseg->pbase, vobj[vseg->vobj_offset].align); } } // loop on vobjs contained in vseg to : // (1) computes the length of the vseg, // (2) initialize the vaddr and paddr fields of all vobjs, // (3) initialize the page table pointers arrays cur_vaddr = vseg->vbase; cur_paddr = vseg->pbase; for (vobj_id = vseg->vobj_offset; vobj_id < (vseg->vobj_offset + vseg->vobjs); vobj_id++) { if (vobj[vobj_id].align) { cur_paddr = paddr_align_to(cur_paddr, vobj[vobj_id].align); cur_vaddr = vaddr_align_to(cur_vaddr, vobj[vobj_id].align); } // set vaddr/paddr for current vobj vobj[vobj_id].vaddr = cur_vaddr; vobj[vobj_id].paddr = cur_paddr; // initialize boot_ptabs_vaddr[] & boot_ptabs-paddr[] if PTAB if (vobj[vobj_id].type == VOBJ_TYPE_PTAB) { if (vspace_id == ((unsigned int) -1)) // global vseg { boot_puts("\n[BOOT ERROR] in boot_vseg_map() function: "); boot_puts("a PTAB vobj cannot be global"); boot_exit(); } // we need at least one PT2 if (vobj[vobj_id].length < (PT1_SIZE + PT2_SIZE)) { boot_puts("\n[BOOT ERROR] in boot_vseg_map() function, "); boot_puts("PTAB too small, minumum size is: "); boot_putx(PT1_SIZE + PT2_SIZE); boot_exit(); } // register both physical and virtual page table address boot_ptabs_vaddr[vspace_id] = vobj[vobj_id].vaddr; boot_ptabs_paddr[vspace_id] = vobj[vobj_id].paddr; // reset all valid bits in PT1 for ( offset = 0 ; offset < 8192 ; offset = offset + 4) { boot_physical_write(cur_paddr + offset, 0); } // computing the number of second level pages boot_max_pt2[vspace_id] = (vobj[vobj_id].length - PT1_SIZE) / PT2_SIZE; } // set next vaddr/paddr cur_vaddr = cur_vaddr + vobj[vobj_id].length; cur_paddr = cur_paddr + vobj[vobj_id].length; } // end for vobjs //set the vseg length vseg->length = vaddr_align_to((unsigned int)(cur_paddr - vseg->pbase), 12); // checking pseg overflow if ((vseg->pbase < pseg->base) || ((vseg->pbase + vseg->length) > (pseg->base + pseg->length))) { boot_puts("\n[BOOT ERROR] in boot_vseg_map() function\n"); boot_puts("impossible mapping for virtual segment: "); boot_puts(vseg->name); boot_puts("\n"); boot_puts("vseg pbase = "); boot_putl(vseg->pbase); boot_puts("\n"); boot_puts("vseg length = "); boot_putx(vseg->length); boot_puts("\n"); boot_puts("pseg pbase = "); boot_putl(pseg->base); boot_puts("\n"); boot_puts("pseg length = "); boot_putl(pseg->length); boot_puts("\n"); boot_exit(); } #if BOOT_DEBUG_PT boot_puts(vseg->name); boot_puts(" : len = "); boot_putx(vseg->length); boot_puts(" / vbase = "); boot_putx(vseg->vbase); boot_puts(" / pbase = "); boot_putl(vseg->pbase); boot_puts("\n"); #endif // set the next_base field in pseg when it's a RAM if ( pseg->type == PSEG_TYPE_RAM ) { pseg->next_base = vseg->pbase + vseg->length; } } // end boot_vseg_map() ///////////////////////////////////////////////////////////////////// // This function checks consistence beween the mapping_info data // structure (soft), and the giet_config file (hard). ///////////////////////////////////////////////////////////////////// void boot_check_mapping() { mapping_header_t * header = (mapping_header_t *) & seg_mapping_base; mapping_cluster_t * cluster = boot_get_cluster_base(header); mapping_periph_t * periph = boot_get_periph_base(header); // checking mapping availability if (header->signature != IN_MAPPING_SIGNATURE) { boot_puts("\n[BOOT ERROR] Illegal mapping signature: "); boot_putx(header->signature); boot_puts("\n"); boot_exit(); } // checking number of clusters if (header->clusters != NB_CLUSTERS) { boot_puts("\n[BOOT ERROR] Incoherent NB_CLUSTERS"); boot_puts("\n - In giet_config, value = "); boot_putd(NB_CLUSTERS); boot_puts("\n - In mapping_info, value = "); boot_putd(header->clusters); boot_puts("\n"); boot_exit(); } // checking number of virtual spaces if (header->vspaces > GIET_NB_VSPACE_MAX) { boot_puts("\n[BOOT ERROR] : number of vspaces > GIET_NB_VSPACE_MAX\n"); boot_puts("\n"); boot_exit(); } // checking hardware unsigned int periph_id; unsigned int cluster_id; unsigned int tty_found = 0; unsigned int nic_found = 0; for (cluster_id = 0; cluster_id < NB_CLUSTERS; cluster_id++) { // NB_PROCS_MAX if (cluster[cluster_id].procs > NB_PROCS_MAX) { boot_puts("\n[BOOT ERROR] too many processors in cluster "); boot_putd(cluster_id); boot_puts(" : procs = "); boot_putd(cluster[cluster_id].procs); boot_puts("\n"); boot_exit(); } for (periph_id = cluster[cluster_id].periph_offset; periph_id < cluster[cluster_id].periph_offset + cluster[cluster_id].periphs; periph_id++) { // NB_TTY_CHANNELS if (periph[periph_id].type == PERIPH_TYPE_TTY) { if (tty_found) { boot_puts("\n[BOOT ERROR] TTY component should not be replicated\n"); boot_exit(); } if (periph[periph_id].channels > NB_TTY_CHANNELS) { boot_puts("\n[BOOT ERROR] Wrong NB_TTY_CHANNELS in cluster "); boot_putd(cluster_id); boot_puts(" : ttys = "); boot_putd(periph[periph_id].channels); boot_puts("\n"); boot_exit(); } tty_found = 1; } // NB_NIC_CHANNELS if (periph[periph_id].type == PERIPH_TYPE_NIC) { if (nic_found) { boot_puts("\n[BOOT ERROR] NIC component should not be replicated\n"); boot_exit(); } if (periph[periph_id].channels != NB_NIC_CHANNELS) { boot_puts("\n[BOOT ERROR] Wrong NB_NIC_CHANNELS in cluster "); boot_putd(cluster_id); boot_puts(" : nics = "); boot_putd(periph[periph_id].channels); boot_puts("\n"); boot_exit(); } nic_found = 1; } // NB_TIMERS if (periph[periph_id].type == PERIPH_TYPE_TIM) { if (periph[periph_id].channels > NB_TIM_CHANNELS) { boot_puts("\n[BOOT ERROR] Too much user timers in cluster "); boot_putd(cluster_id); boot_puts(" : timers = "); boot_putd(periph[periph_id].channels); boot_puts("\n"); boot_exit(); } } // NB_DMAS if (periph[periph_id].type == PERIPH_TYPE_DMA) { if (periph[periph_id].channels != NB_DMA_CHANNELS) { boot_puts("\n[BOOT ERROR] Too much DMA channels in cluster "); boot_putd(cluster_id); boot_puts(" : channels = "); boot_putd(periph[periph_id].channels); boot_puts(" - NB_DMA_CHANNELS : "); boot_putd(NB_DMA_CHANNELS); boot_puts("\n"); boot_exit(); } } } // end for periphs } // end for clusters } // end boot_check_mapping() ///////////////////////////////////////////////////////////////////// // This function initialises the physical pages table allocators // for all psegs (i.e. next_base field of the pseg). ///////////////////////////////////////////////////////////////////// void boot_psegs_init() { mapping_header_t* header = (mapping_header_t *) &seg_mapping_base; mapping_cluster_t* cluster = boot_get_cluster_base(header); mapping_pseg_t* pseg = boot_get_pseg_base(header); unsigned int cluster_id; unsigned int pseg_id; #if BOOT_DEBUG_PT boot_puts ("\n[BOOT DEBUG] ****** psegs allocators initialisation ******\n"); #endif for (cluster_id = 0; cluster_id < header->clusters; cluster_id++) { if (cluster[cluster_id].procs > NB_PROCS_MAX) { boot_puts("\n[BOOT ERROR] The number of processors in cluster "); boot_putd(cluster_id); boot_puts(" is larger than NB_PROCS_MAX \n"); boot_exit(); } for (pseg_id = cluster[cluster_id].pseg_offset; pseg_id < cluster[cluster_id].pseg_offset + cluster[cluster_id].psegs; pseg_id++) { pseg[pseg_id].next_base = pseg[pseg_id].base; #if BOOT_DEBUG_PT boot_puts("cluster "); boot_putd(cluster_id); boot_puts(" / pseg "); boot_puts(pseg[pseg_id].name); boot_puts(" : next_base = "); boot_putl(pseg[pseg_id].next_base); boot_puts("\n"); #endif } } } // end boot_psegs_init() ///////////////////////////////////////////////////////////////////// // This function builds the page tables for all virtual spaces // defined in the mapping_info data structure, in three steps: // - step 1 : It computes the physical base address for global vsegs // and for all associated vobjs. // - step 2 : It computes the physical base address for all private // vsegs and all vobjs in each virtual space. // - step 3 : It actually fill the page table for each vspace. ///////////////////////////////////////////////////////////////////// void boot_pt_init() { mapping_header_t * header = (mapping_header_t *) &seg_mapping_base; mapping_vspace_t * vspace = boot_get_vspace_base(header); mapping_vseg_t * vseg = boot_get_vseg_base(header); unsigned int vspace_id; unsigned int vseg_id; #if BOOT_DEBUG_PT boot_puts("\n[BOOT DEBUG] ****** mapping global vsegs ******\n"); #endif // step 1 : loop on virtual spaces to map global vsegs for (vseg_id = 0; vseg_id < header->globals; vseg_id++) { boot_vseg_map(&vseg[vseg_id], ((unsigned int) (-1))); } // step 2 : loop on virtual vspaces to map private vsegs for (vspace_id = 0; vspace_id < header->vspaces; vspace_id++) { #if BOOT_DEBUG_PT boot_puts("\n[BOOT DEBUG] ****** mapping private vsegs in vspace "); boot_puts(vspace[vspace_id].name); boot_puts(" ******\n"); #endif for (vseg_id = vspace[vspace_id].vseg_offset; vseg_id < (vspace[vspace_id].vseg_offset + vspace[vspace_id].vsegs); vseg_id++) { boot_vseg_map(&vseg[vseg_id], vspace_id); } } // step 3 : loop on the vspaces to build the page tables for (vspace_id = 0; vspace_id < header->vspaces; vspace_id++) { #if BOOT_DEBUG_PT boot_puts("\n[BOOT DEBUG] ****** building page table for vspace "); boot_puts(vspace[vspace_id].name); boot_puts(" ******\n"); #endif boot_vspace_pt_build(vspace_id); #if BOOT_DEBUG_PT boot_puts("\n>>> page table physical address = "); boot_putl(boot_ptabs_paddr[vspace_id]); boot_puts(", number of PT2 = "); boot_putd((unsigned int) boot_max_pt2[vspace_id]); boot_puts("\n"); #endif } } // end boot_pt_init() /////////////////////////////////////////////////////////////////////////////// // This function initializes all private vobjs defined in the vspaces, // such as mwmr channels, barriers and locks, because these vobjs // are not known, and not initialized by the compiler. // Warning : The MMU is supposed to be activated... /////////////////////////////////////////////////////////////////////////////// void boot_vobjs_init() { mapping_header_t* header = (mapping_header_t *) & seg_mapping_base; mapping_vspace_t* vspace = boot_get_vspace_base(header); mapping_vobj_t* vobj = boot_get_vobj_base(header); unsigned int vspace_id; unsigned int vobj_id; // loop on the vspaces for (vspace_id = 0; vspace_id < header->vspaces; vspace_id++) { #if BOOT_DEBUG_VOBJS boot_puts("\n[BOOT DEBUG] ****** vobjs initialisation in vspace "); boot_puts(vspace[vspace_id].name); boot_puts(" ******\n"); #endif unsigned int ptab_found = 0; // loop on the vobjs for (vobj_id = vspace[vspace_id].vobj_offset; vobj_id < (vspace[vspace_id].vobj_offset + vspace[vspace_id].vobjs); vobj_id++) { switch (vobj[vobj_id].type) { case VOBJ_TYPE_MWMR: // storage capacity is (vobj.length/4 - 5) words { mwmr_channel_t* mwmr = (mwmr_channel_t *) (vobj[vobj_id].vaddr); mwmr->ptw = 0; mwmr->ptr = 0; mwmr->sts = 0; mwmr->width = vobj[vobj_id].init; mwmr->depth = (vobj[vobj_id].length >> 2) - 6; mwmr->lock = 0; #if BOOT_DEBUG_VOBJS boot_puts("MWMR : "); boot_puts(vobj[vobj_id].name); boot_puts(" / depth = "); boot_putd(mwmr->depth); boot_puts(" / width = "); boot_putd(mwmr->width); boot_puts("\n"); #endif break; } case VOBJ_TYPE_ELF: // initialisation done by the loader { #if BOOT_DEBUG_VOBJS boot_puts("ELF : "); boot_puts(vobj[vobj_id].name); boot_puts(" / length = "); boot_putx(vobj[vobj_id].length); boot_puts("\n"); #endif break; } case VOBJ_TYPE_BLOB: // initialisation done by the loader { #if BOOT_DEBUG_VOBJS boot_puts("BLOB : "); boot_puts(vobj[vobj_id].name); boot_puts(" / length = "); boot_putx(vobj[vobj_id].length); boot_puts("\n"); #endif break; } case VOBJ_TYPE_BARRIER: // init is the number of participants { giet_barrier_t* barrier = (giet_barrier_t *) (vobj[vobj_id].vaddr); barrier->count = vobj[vobj_id].init; barrier->init = vobj[vobj_id].init; #if BOOT_DEBUG_VOBJS boot_puts("BARRIER : "); boot_puts(vobj[vobj_id].name); boot_puts(" / init_value = "); boot_putd(barrier->init); boot_puts("\n"); #endif break; } case VOBJ_TYPE_LOCK: // init value is "not taken" { unsigned int* lock = (unsigned int *) (vobj[vobj_id].vaddr); *lock = 0; #if BOOT_DEBUG_VOBJS boot_puts("LOCK : "); boot_puts(vobj[vobj_id].name); boot_puts("\n"); #endif break; } case VOBJ_TYPE_BUFFER: // nothing to initialise { #if BOOT_DEBUG_VOBJS boot_puts("BUFFER : "); boot_puts(vobj[vobj_id].name); boot_puts(" / paddr = "); boot_putl(vobj[vobj_id].paddr); boot_puts(" / length = "); boot_putx(vobj[vobj_id].length); boot_puts("\n"); #endif break; } case VOBJ_TYPE_MEMSPACE: { giet_memspace_t* memspace = (giet_memspace_t *) vobj[vobj_id].vaddr; memspace->buffer = (void *) vobj[vobj_id].vaddr + 8; memspace->size = vobj[vobj_id].length - 8; #if BOOT_DEBUG_VOBJS boot_puts("MEMSPACE : "); boot_puts(vobj[vobj_id].name); boot_puts(" / vaddr = "); boot_putx(vobj[vobj_id].vaddr); boot_puts(" / length = "); boot_putx(vobj[vobj_id].length); boot_puts(" / buffer = "); boot_putx((unsigned int)memspace->buffer); boot_puts(" / size = "); boot_putx(memspace->size); boot_puts("\n"); #endif break; } case VOBJ_TYPE_PTAB: // nothing to initialize { ptab_found = 1; #if BOOT_DEBUG_VOBJS boot_puts("PTAB : "); boot_puts(vobj[vobj_id].name); boot_puts(" / length = "); boot_putx(vobj[vobj_id].length); boot_puts("\n"); #endif break; } case VOBJ_TYPE_CONST: { unsigned int* addr = (unsigned int *) vobj[vobj_id].vaddr; *addr = vobj[vobj_id].init; #if BOOT_DEBUG_VOBJS boot_puts("CONST : "); boot_puts(vobj[vobj_id].name); boot_puts(" / Paddr :"); boot_putl(vobj[vobj_id].paddr); boot_puts(" / init = "); boot_putx(*addr); boot_puts("\n"); #endif break; } default: { boot_puts("\n[BOOT ERROR] illegal vobj type: "); boot_putd(vobj[vobj_id].type); boot_puts("\n"); boot_exit(); } } // end switch type } // end loop on vobjs if (ptab_found == 0) { boot_puts("\n[BOOT ERROR] Missing PTAB for vspace "); boot_putd(vspace_id); boot_exit(); } } // end loop on vspaces } // end boot_vobjs_init() //////////////////////////////////////////////////////////////////////////////// // This function initializes one MWMR controller channel. // - coproc_pbase : physical base address of the Coproc configuration segment // - channel_pbase : physical base address of the MWMR channel segment // Warning : the channel physical base address should be on 32 bits, as the // MWMR controller configuration registers are 32 bits. // TODO : Introduce a MWMR_CONFIG_PADDR_EXT register in the MWMR coprocessor // To support addresses > 32 bits and remove this limitation... /////////////////////////////////////////////////////////////////////////////// void mwmr_hw_init(paddr_t coproc_pbase, enum mwmrPortDirection way, unsigned int no, paddr_t channel_pbase) { if ( (channel_pbase>>32) != 0 ) { boot_puts("\n[BOOT ERROR] MWMR controller does not support address > 32 bits\n"); boot_exit(); } unsigned int lsb = (unsigned int)channel_pbase; // unsigned int msb = (unsigned int)(channel_pbase>>32); unsigned int depth = boot_physical_read( channel_pbase + 16 ); unsigned int width = boot_physical_read( channel_pbase + 20 ); boot_physical_write( coproc_pbase + MWMR_CONFIG_FIFO_WAY*4, way ); boot_physical_write( coproc_pbase + MWMR_CONFIG_FIFO_NO*4, no ); boot_physical_write( coproc_pbase + MWMR_CONFIG_WIDTH*4, width); boot_physical_write( coproc_pbase + MWMR_CONFIG_DEPTH*4, depth); boot_physical_write( coproc_pbase + MWMR_CONFIG_STATUS_ADDR*4, lsb); boot_physical_write( coproc_pbase + MWMR_CONFIG_BUFFER_ADDR*4, lsb + 24 ); // boot_physical_write( coproc_pbase + MWMR_CONFIG_PADDR_EXT*4, msb); boot_physical_write( coproc_pbase + MWMR_CONFIG_RUNNING*4, 1 ); } //////////////////////////////////////////////////////////////////////////////// // This function intializes the periherals and coprocessors, as specified // in the mapping_info file. //////////////////////////////////////////////////////////////////////////////// void boot_peripherals_init() { mapping_header_t * header = (mapping_header_t *) & seg_mapping_base; mapping_cluster_t * cluster = boot_get_cluster_base(header); mapping_periph_t * periph = boot_get_periph_base(header); mapping_pseg_t * pseg = boot_get_pseg_base(header); mapping_vobj_t * vobj = boot_get_vobj_base(header); mapping_vspace_t * vspace = boot_get_vspace_base(header); mapping_coproc_t * coproc = boot_get_coproc_base(header); mapping_cp_port_t * cp_port = boot_get_cp_port_base(header); unsigned int cluster_id; unsigned int periph_id; unsigned int coproc_id; unsigned int cp_port_id; unsigned int channel_id; for (cluster_id = 0; cluster_id < header->clusters; cluster_id++) { #if BOOT_DEBUG_PERI boot_puts("\n[BOOT DEBUG] ****** peripherals initialisation in cluster "); boot_putd(cluster_id); boot_puts(" ******\n"); #endif for (periph_id = cluster[cluster_id].periph_offset; periph_id < cluster[cluster_id].periph_offset + cluster[cluster_id].periphs; periph_id++) { unsigned int type = periph[periph_id].type; unsigned int channels = periph[periph_id].channels; unsigned int pseg_id = periph[periph_id].psegid; paddr_t pbase = pseg[pseg_id].base; #if BOOT_DEBUG_PERI boot_puts("- peripheral type : "); boot_putd(type); boot_puts(" / pbase = "); boot_putl(pbase); boot_puts(" / channels = "); boot_putd(channels); boot_puts("\n"); #endif switch (type) { case PERIPH_TYPE_IOC: // vci_block_device component { paddr_t paddr = pbase + BLOCK_DEVICE_IRQ_ENABLE*4; boot_physical_write(paddr, 1); #if BOOT_DEBUG_PERI boot_puts("- IOC initialised\n"); #endif } break; case PERIPH_TYPE_DMA: // vci_multi_dma component for (channel_id = 0; channel_id < channels; channel_id++) { paddr_t paddr = pbase + (channel_id*DMA_SPAN + DMA_IRQ_DISABLE) * 4; boot_physical_write(paddr, 0); } #if BOOT_DEBUG_PERI boot_puts("- DMA initialised\n"); #endif break; case PERIPH_TYPE_NIC: // vci_multi_nic component for (channel_id = 0; channel_id < channels; channel_id++) { // TODO } #if BOOT_DEBUG_PERI boot_puts("- NIC initialised\n"); #endif break; case PERIPH_TYPE_TTY: // vci_multi_tty component #if BOOT_DEBUG_PERI boot_puts("- TTY initialised\n"); #endif break; case PERIPH_TYPE_IOB: // vci_io_bridge component if (IOMMU_ACTIVE) { // TODO // get the iommu page table physical address // define IPI address mapping the IOC interrupt // set IOMMU page table address // pseg_base[IOB_IOMMU_PTPR] = ptab_pbase; // activate IOMMU // pseg_base[IOB_IOMMU_ACTIVE] = 1; } #if BOOT_DEBUG_PERI boot_puts("- IOB initialised\n"); #endif break; } // end switch periph type } // end for periphs #if BOOT_DEBUG_PERI boot_puts("\n[BOOT DEBUG] ****** coprocessors initialisation in cluster "); boot_putd(cluster_id); boot_puts(" ******\n"); #endif for (coproc_id = cluster[cluster_id].coproc_offset; coproc_id < cluster[cluster_id].coproc_offset + cluster[cluster_id].coprocs; coproc_id++) { unsigned no_fifo_to = 0; //FIXME: should the map.xml define the order? unsigned no_fifo_from = 0; // Get physical base address for MWMR controler paddr_t coproc_pbase = pseg[coproc[coproc_id].psegid].base; #if BOOT_DEBUG_PERI boot_puts("- coprocessor name : "); boot_puts(coproc[coproc_id].name); boot_puts(" / nb ports = "); boot_putd((unsigned int) coproc[coproc_id].ports); boot_puts("\n"); #endif for (cp_port_id = coproc[coproc_id].port_offset; cp_port_id < coproc[coproc_id].port_offset + coproc[coproc_id].ports; cp_port_id++) { unsigned int vspace_id = cp_port[cp_port_id].vspaceid; unsigned int vobj_id = cp_port[cp_port_id].mwmr_vobjid + vspace[vspace_id].vobj_offset; // Get MWMR channel base address paddr_t channel_pbase = vobj[vobj_id].paddr; if (cp_port[cp_port_id].direction == PORT_TO_COPROC) { #if BOOT_DEBUG_PERI boot_puts(" port direction: PORT_TO_COPROC"); #endif mwmr_hw_init(coproc_pbase, PORT_TO_COPROC, no_fifo_to, channel_pbase); no_fifo_to++; } else { #if BOOT_DEBUG_PERI boot_puts(" port direction: PORT_FROM_COPROC"); #endif mwmr_hw_init(coproc_pbase, PORT_FROM_COPROC, no_fifo_from, channel_pbase); no_fifo_from++; } #if BOOT_DEBUG_PERI boot_puts(", with mwmr: "); boot_puts(vobj[vobj_id].name); boot_puts(" of vspace: "); boot_puts(vspace[vspace_id].name); #endif } // end for cp_ports } // end for coprocs } // end for clusters } // end boot_peripherals_init() /////////////////////////////////////////////////////////////////////////////// // This function returns in the vbase and length buffers the virtual base // address and the length of the segment allocated to the schedulers array // in the cluster defined by the clusterid argument. /////////////////////////////////////////////////////////////////////////////// void boot_get_sched_vaddr( unsigned int cluster_id, unsigned int* vbase, unsigned int* length ) { mapping_header_t* header = (mapping_header_t *) & seg_mapping_base; mapping_vobj_t* vobj = boot_get_vobj_base(header); mapping_vseg_t* vseg = boot_get_vseg_base(header); mapping_pseg_t* pseg = boot_get_pseg_base(header); unsigned int vseg_id; unsigned int found = 0; for ( vseg_id = 0 ; (vseg_id < header->vsegs) && (found == 0) ; vseg_id++ ) { if ( (vobj[vseg[vseg_id].vobj_offset].type == VOBJ_TYPE_SCHED) && (pseg[vseg[vseg_id].psegid].cluster == cluster_id ) ) { *vbase = vseg[vseg_id].vbase; *length = vobj[vseg[vseg_id].vobj_offset].length; found = 1; } } if ( found == 0 ) { boot_puts("\n[BOOT ERROR] No vobj of type SCHED in cluster "); boot_putd(cluster_id); boot_puts("\n"); boot_exit(); } } // end boot_get_sched_vaddr() /////////////////////////////////////////////////////////////////////////////// // This function initialises all processors schedulers. // This is done by processor 0, and the MMU must be activated. // It initialises the boot_chedulers[gpid] pointers array. // Finally, it scan all tasks in all vspaces to initialise the tasks contexts, // as specified in the mapping_info data structure. // For each task, a TTY channel, a TIMER channel, a FBDMA channel, and a NIC // channel are allocated if required. /////////////////////////////////////////////////////////////////////////////// void boot_schedulers_init() { mapping_header_t* header = (mapping_header_t *) & seg_mapping_base; mapping_cluster_t* cluster = boot_get_cluster_base(header); mapping_vspace_t* vspace = boot_get_vspace_base(header); mapping_task_t* task = boot_get_task_base(header); mapping_vobj_t* vobj = boot_get_vobj_base(header); mapping_proc_t* proc = boot_get_proc_base(header); mapping_irq_t* irq = boot_get_irq_base(header); unsigned int cluster_id; // cluster index in mapping_info unsigned int proc_id; // processor index in mapping_info unsigned int irq_id; // irq index in mapping_info unsigned int vspace_id; // vspace index in mapping_info unsigned int task_id; // task index in mapping_info unsigned int alloc_tty_channel = 1; // TTY channel allocator unsigned int alloc_nic_channel = 0; // NIC channel allocator unsigned int alloc_cma_channel = 0; // CMA channel allocator unsigned int alloc_ioc_channel = 0; // IOC channel allocator unsigned int alloc_dma_channel[NB_CLUSTERS]; // DMA channel allocators unsigned int alloc_tim_channel[NB_CLUSTERS]; // user TIMER allocators ///////////////////////////////////////////////////////////////////////// // Step 1 : loop on the clusters and on the processors // to initialize the schedulers[] array of pointers and // the interrupt vectors. // Implementation note: // We need to use both proc_id to scan the mapping info structure, // and lpid to access the schedulers array. // - the boot_schedulers[] array of pointers can contain "holes", because // it is indexed by the global pid = cluster_id*NB_PROCS_MAX + ltid // - the mapping info array of processors is contiguous, it is indexed // by proc_id, and use an offset specific in each cluster. for (cluster_id = 0; cluster_id < header->clusters; cluster_id++) { #if BOOT_DEBUG_SCHED boot_puts("\n[BOOT DEBUG] Initialise schedulers in cluster "); boot_putd(cluster_id); boot_puts("\n"); #endif // TTY, NIC, CMA, IOC, TIM and DMA channels allocators // - TTY[0] is reserved for the kernel // - In all clusters the first NB_PROCS_MAX timers // are reserved for the kernel (context switch) alloc_dma_channel[cluster_id] = 0; alloc_tim_channel[cluster_id] = NB_PROCS_MAX; unsigned int lpid; // processor local index in cluster unsigned int sched_vbase; // schedulers segment virtual base address unsigned int sched_length; // schedulers segment length unsigned int nprocs; // number of processors in cluster nprocs = cluster[cluster_id].procs; // checking processors number if ( nprocs > NB_PROCS_MAX ) { boot_puts("\n[BOOT ERROR] Too much processors in cluster "); boot_putd(cluster_id); boot_puts("\n"); boot_exit(); } // get scheduler array virtual base address for cluster_id boot_get_sched_vaddr( cluster_id, &sched_vbase, &sched_length ); // each processor scheduler requires 4 Kbytes if ( sched_length < (nprocs<<12) ) { boot_puts("\n[BOOT ERROR] Schedulers segment too small in cluster "); boot_putd(cluster_id); boot_puts("\n"); boot_exit(); } for ( proc_id = cluster[cluster_id].proc_offset, lpid = 0 ; proc_id < cluster[cluster_id].proc_offset + cluster[cluster_id].procs; proc_id++, lpid++ ) { // set the schedulers pointers array boot_schedulers[cluster_id * NB_PROCS_MAX + lpid] = (static_scheduler_t*)( sched_vbase + (lpid<<12) ); #if BOOT_DEBUG_SCHED boot_puts("\nProc "); boot_putd(lpid); boot_puts(" : scheduler virtual base address = "); boot_putx( sched_vbase + (lpid<<12) ); boot_puts("\n"); #endif // current processor scheduler pointer : psched static_scheduler_t* psched = (static_scheduler_t*)(sched_vbase+(lpid<<12)); // initialise the "tasks" variable psched->tasks = 0; // initialise the interrupt_vector with ISR_DEFAULT unsigned int slot; for (slot = 0; slot < 32; slot++) psched->interrupt_vector[slot] = 0; // scan the IRQs actually allocated to current processor for (irq_id = proc[proc_id].irq_offset; irq_id < proc[proc_id].irq_offset + proc[proc_id].irqs; irq_id++) { unsigned int type = irq[irq_id].type; unsigned int icu_id = irq[irq_id].icuid; unsigned int isr_id = irq[irq_id].isr; unsigned int channel = irq[irq_id].channel; unsigned int value = isr_id | (type << 8) | (channel << 16); psched->interrupt_vector[icu_id] = value; #if BOOT_DEBUG_SCHED boot_puts("- IRQ : icu = "); boot_putd(icu_id); boot_puts(" / type = "); boot_putd(type); boot_puts(" / isr = "); boot_putd(isr_id); boot_puts(" / channel = "); boot_putd(channel); boot_puts(" => vector_entry = "); boot_putx( value ); boot_puts("\n"); #endif } } // end for procs } // end for clusters /////////////////////////////////////////////////////////////////// // Step 2 : loop on the vspaces and the tasks // to initialise the schedulers and the task contexts. // Implementation note: // This function initialises the task context for all tasks. // For each processor, the scheduler virtual base address // is written in the CP0_SCHED register in reset.S for (vspace_id = 0; vspace_id < header->vspaces; vspace_id++) { #if BOOT_DEBUG_SCHED boot_puts("\n[BOOT DEBUG] Initialise task contexts for vspace "); boot_puts(vspace[vspace_id].name); boot_puts("\n"); #endif // We must set the PTPR depending on the vspace, because the start_vector // and the stack address are defined in virtual space. boot_set_mmu_ptpr( (unsigned int)(boot_ptabs_paddr[vspace_id] >> 13) ); // loop on the tasks in vspace (task_id is the global index) for (task_id = vspace[vspace_id].task_offset; task_id < (vspace[vspace_id].task_offset + vspace[vspace_id].tasks); task_id++) { // compute gpid (global processor index) and scheduler base address unsigned int gpid = task[task_id].clusterid * NB_PROCS_MAX + task[task_id].proclocid; static_scheduler_t* psched = boot_schedulers[gpid]; // ctx_ra : the return address is &boot_eret() unsigned int ctx_ra = (unsigned int) &boot_eret; // ctx_sr : value required before an eret instruction unsigned int ctx_sr = 0x0000FF13; // ctx_ptpr : page table physical base address (shifted by 13 bit) unsigned int ctx_ptpr = (unsigned int)(boot_ptabs_paddr[vspace_id] >> 13); // ctx_ptab : page_table virtual base address unsigned int ctx_ptab = boot_ptabs_vaddr[vspace_id]; // ctx_tty : terminal global index provided by the global allocator unsigned int ctx_tty = 0xFFFFFFFF; if (task[task_id].use_tty) { if (alloc_tty_channel >= NB_TTY_CHANNELS) { boot_puts("\n[BOOT ERROR] TTY index too large for task "); boot_puts(task[task_id].name); boot_puts(" in vspace "); boot_puts(vspace[vspace_id].name); boot_puts("\n"); boot_exit(); } ctx_tty = alloc_tty_channel; alloc_tty_channel++; } // ctx_nic : NIC channel global index provided by the global allocator unsigned int ctx_nic = 0xFFFFFFFF; if (task[task_id].use_nic) { if (alloc_nic_channel >= NB_NIC_CHANNELS) { boot_puts("\n[BOOT ERROR] NIC channel index too large for task "); boot_puts(task[task_id].name); boot_puts(" in vspace "); boot_puts(vspace[vspace_id].name); boot_puts("\n"); boot_exit(); } ctx_nic = alloc_nic_channel; alloc_nic_channel++; } // ctx_cma : CMA channel global index provided by the global allocator unsigned int ctx_cma = 0xFFFFFFFF; if (task[task_id].use_cma) { if (alloc_cma_channel >= NB_CMA_CHANNELS) { boot_puts("\n[BOOT ERROR] CMA channel index too large for task "); boot_puts(task[task_id].name); boot_puts(" in vspace "); boot_puts(vspace[vspace_id].name); boot_puts("\n"); boot_exit(); } ctx_cma = alloc_cma_channel; alloc_cma_channel++; } // ctx_ioc : IOC channel global index provided by the global allocator unsigned int ctx_ioc = 0xFFFFFFFF; if (task[task_id].use_ioc) { if (alloc_ioc_channel >= NB_IOC_CHANNELS) { boot_puts("\n[BOOT ERROR] IOC channel index too large for task "); boot_puts(task[task_id].name); boot_puts(" in vspace "); boot_puts(vspace[vspace_id].name); boot_puts("\n"); boot_exit(); } ctx_ioc = alloc_ioc_channel; alloc_ioc_channel++; } // ctx_tim : TIMER local channel index provided by the cluster allocator unsigned int ctx_tim = 0xFFFFFFFF; if (task[task_id].use_tim) { unsigned int cluster_id = task[task_id].clusterid; if ( alloc_tim_channel[cluster_id] >= NB_TIM_CHANNELS ) { boot_puts("\n[BOOT ERROR] local TIMER index too large for task "); boot_puts(task[task_id].name); boot_puts(" in vspace "); boot_puts(vspace[vspace_id].name); boot_puts("\n"); boot_exit(); } // checking that there is a well defined ISR_TIMER installed unsigned int found = 0; for ( irq_id = 0 ; irq_id < 32 ; irq_id++ ) { unsigned int entry = psched->interrupt_vector[irq_id]; unsigned int isr = entry & 0x000000FF; unsigned int channel = entry>>16; if ( (isr == ISR_TIMER) && (channel == alloc_tim_channel[cluster_id]) ) { found = 1; ctx_tim = alloc_tim_channel[cluster_id]; alloc_tim_channel[cluster_id]++; break; } } if (!found) { boot_puts("\n[BOOT ERROR] No ISR_TIMER installed for task "); boot_puts(task[task_id].name); boot_puts(" in vspace "); boot_puts(vspace[vspace_id].name); boot_puts("\n"); boot_exit(); } } // ctx_dma : the local channel index is defined by the cluster allocator // but the ctx_dma value is a global index unsigned int ctx_dma = 0xFFFFFFFF; if ( task[task_id].use_dma ) { unsigned int cluster_id = task[task_id].clusterid; if (alloc_dma_channel[cluster_id] >= NB_DMA_CHANNELS) { boot_puts("\n[BOOT ERROR] local DMA index too large for task "); boot_puts(task[task_id].name); boot_puts(" in vspace "); boot_puts(vspace[vspace_id].name); boot_puts("\n"); boot_exit(); } ctx_dma = cluster_id * NB_DMA_CHANNELS + alloc_dma_channel[cluster_id]; alloc_dma_channel[cluster_id]++; } // ctx_epc : Get the virtual address of the start function mapping_vobj_t* pvobj = &vobj[vspace[vspace_id].vobj_offset + vspace[vspace_id].start_offset]; unsigned int* start_vector_vbase = (unsigned int *) pvobj->vaddr; unsigned int ctx_epc = start_vector_vbase[task[task_id].startid]; // ctx_sp : Get the vobj containing the stack unsigned int vobj_id = task[task_id].stack_vobjid + vspace[vspace_id].vobj_offset; unsigned int ctx_sp = vobj[vobj_id].vaddr + vobj[vobj_id].length; // get local task index in scheduler unsigned int ltid = psched->tasks; if (ltid >= IDLE_TASK_INDEX) { boot_puts("\n[BOOT ERROR] : "); boot_putd(ltid); boot_puts(" tasks allocated to processor "); boot_putd(gpid); boot_puts(" / max is 15\n"); boot_exit(); } // update the "tasks" field in scheduler psched->tasks = ltid + 1; // update the "current" field in scheduler psched->current = 0; // initializes the task context in scheduler psched->context[ltid][CTX_SR_ID] = ctx_sr; psched->context[ltid][CTX_SP_ID] = ctx_sp; psched->context[ltid][CTX_RA_ID] = ctx_ra; psched->context[ltid][CTX_EPC_ID] = ctx_epc; psched->context[ltid][CTX_PTPR_ID] = ctx_ptpr; psched->context[ltid][CTX_TTY_ID] = ctx_tty; psched->context[ltid][CTX_CMA_ID] = ctx_cma; psched->context[ltid][CTX_IOC_ID] = ctx_ioc; psched->context[ltid][CTX_NIC_ID] = ctx_nic; psched->context[ltid][CTX_TIM_ID] = ctx_tim; psched->context[ltid][CTX_DMA_ID] = ctx_dma; psched->context[ltid][CTX_PTAB_ID] = ctx_ptab; psched->context[ltid][CTX_LTID_ID] = ltid; psched->context[ltid][CTX_GTID_ID] = task_id; psched->context[ltid][CTX_VSID_ID] = vspace_id; psched->context[ltid][CTX_RUN_ID] = 1; #if BOOT_DEBUG_SCHED boot_puts("\nTask "); boot_puts(task[task_id].name); boot_puts(" ("); boot_putd(task_id); boot_puts(") allocated to processor "); boot_putd(gpid); boot_puts("\n - ctx[LTID] = "); boot_putd(ltid); boot_puts("\n - ctx[SR] = "); boot_putx(ctx_sr); boot_puts("\n - ctx[SR] = "); boot_putx(ctx_sp); boot_puts("\n - ctx[RA] = "); boot_putx(ctx_ra); boot_puts("\n - ctx[EPC] = "); boot_putx(ctx_epc); boot_puts("\n - ctx[PTPR] = "); boot_putx(ctx_ptpr); boot_puts("\n - ctx[TTY] = "); boot_putd(ctx_tty); boot_puts("\n - ctx[NIC] = "); boot_putd(ctx_nic); boot_puts("\n - ctx[CMA] = "); boot_putd(ctx_cma); boot_puts("\n - ctx[IOC] = "); boot_putd(ctx_ioc); boot_puts("\n - ctx[TIM] = "); boot_putd(ctx_tim); boot_puts("\n - ctx[DMA] = "); boot_putd(ctx_dma); boot_puts("\n - ctx[PTAB] = "); boot_putx(ctx_ptab); boot_puts("\n - ctx[GTID] = "); boot_putd(task_id); boot_puts("\n - ctx[VSID] = "); boot_putd(vspace_id); boot_puts("\n"); #endif } // end loop on tasks } // end loop on vspaces } // end boot_schedulers_init() ////////////////////////////////////////////////////////////////////////////////// // This function is executed by P[0] to wakeup all processors. ////////////////////////////////////////////////////////////////////////////////// void boot_start_all_procs() { mapping_header_t * header = (mapping_header_t *) &seg_mapping_base; header->signature = OUT_MAPPING_SIGNATURE; } ///////////////////////////////////////////////////////////////////// // This function is the entry point of the initialisation procedure ///////////////////////////////////////////////////////////////////// void boot_init() { // mapping_info checking boot_check_mapping(); boot_puts("\n[BOOT] Mapping check completed at cycle "); boot_putd(boot_proctime()); boot_puts("\n"); // pseg allocators initialisation boot_psegs_init(); boot_puts("\n[BOOT] Pseg allocators initialisation completed at cycle "); boot_putd(boot_proctime()); boot_puts("\n"); // page table building boot_pt_init(); boot_puts("\n[BOOT] Page Tables initialisation completed at cycle "); boot_putd(boot_proctime()); boot_puts("\n"); // mmu activation ( with page table [Ø] ) boot_set_mmu_ptpr((unsigned int) (boot_ptabs_paddr[0] >> 13)); boot_set_mmu_mode(0xF); boot_puts("\n[BOOT] Proc 0 MMU activation at cycle "); boot_putd(boot_proctime()); boot_puts("\n"); // vobjs initialisation boot_vobjs_init(); boot_puts("\n[BOOT] Vobjs initialisation completed at cycle : "); boot_putd(boot_proctime()); boot_puts("\n"); // peripherals initialisation boot_peripherals_init(); boot_puts("\n[BOOT] Peripherals initialisation completed at cycle "); boot_putd(boot_proctime()); boot_puts("\n"); // schedulers initialisation boot_schedulers_init(); boot_puts("\n[BOOT] Schedulers initialisation completed at cycle "); boot_putd(boot_proctime()); boot_puts("\n"); // start all processors boot_start_all_procs(); } // end boot_init() // Local Variables: // tab-width: 4 // c-basic-offset: 4 // c-file-offsets:((innamespace . 0)(inline-open . 0)) // indent-tabs-mode: nil // End: // vim: filetype=c:expandtab:shiftwidth=4:tabstop=4:softtabstop=4