Changeset 819 for soft/giet_vm
- Timestamp:
- Apr 28, 2016, 1:12:26 PM (9 years ago)
- Location:
- soft/giet_vm
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
soft/giet_vm/giet_boot/boot.c
r791 r819 405 405 // The three flags (Local, Remote and Dirty) are set to 1 406 406 // to avoid hardware update for these flags, because GIET_VM 407 // does use these flags.407 // does not use these flags. 408 408 unsigned int flags = 0; 409 409 if (vseg->mode & C_MODE_MASK) flags |= PTE_C; … … 411 411 if (vseg->mode & W_MODE_MASK) flags |= PTE_W; 412 412 if (vseg->mode & U_MODE_MASK) flags |= PTE_U; 413 if ( global ) flags |= PTE_G;414 413 flags |= PTE_L; 415 414 flags |= PTE_R; 416 415 flags |= PTE_D; 416 417 #if GIET_USE_MMU_GLOBAL_FLAG 418 if ( global ) flags |= PTE_G; 419 #endif 417 420 418 421 // compute VPN, PPN and number of pages (big or small) … … 849 852 // idle_thread context, the HWI / PTI / WTI interrupt vectors, 850 853 // and the XCU HWI / PTI / WTI masks. 851 // - In Step 2, it scan all threads in all vspaces to complete the threads contexts, 854 // - In Step 2, it scan all threads in all vspaces to complete the threads contexts, 852 855 // initialisation as specified in the mapping_info data structure, 853 // and set the CP0_SCHED register. 856 // and set the CP0_SCHED register. 854 857 //////////////////////////////////////////////////////////////////////////////////// 855 void boot_scheduler_init( unsigned int x, 858 void boot_scheduler_init( unsigned int x, 856 859 unsigned int y ) 857 860 { … … 864 867 mapping_irq_t* irq = _get_irq_base(header); 865 868 866 unsigned int periph_id; 869 unsigned int periph_id; 867 870 unsigned int irq_id; 868 871 unsigned int vspace_id; 869 872 unsigned int vseg_id; 870 unsigned int thread_id; 871 872 unsigned int sched_vbase; // schedulers array vbase address 873 unsigned int thread_id; 874 875 unsigned int sched_vbase; // schedulers array vbase address 873 876 unsigned int sched_length; // schedulers array length 874 877 static_scheduler_t* psched; // pointer on processor scheduler 875 878 876 879 unsigned int cluster_id = (x * Y_SIZE) + y; 877 unsigned int cluster_xy = (x << Y_WIDTH) + y; 880 unsigned int cluster_xy = (x << Y_WIDTH) + y; 878 881 unsigned int nprocs = cluster[cluster_id].procs; 879 unsigned int lpid; 880 882 unsigned int lpid; 883 881 884 if ( nprocs > 8 ) 882 885 { … … 886 889 887 890 //////////////////////////////////////////////////////////////////////////////// 888 // Step 1 : - initialize the schedulers[] array of pointers, 889 // - initialize the "threads" and "current variables. 891 // Step 1 : - initialize the schedulers[] array of pointers, 892 // - initialize the "threads" and "current variables. 890 893 // - initialise the idle_thread context. 891 894 // - initialize the HWI, PTI and WTI interrupt vectors. 892 895 // - initialize the XCU masks for HWI / WTI / PTI interrupts. 893 896 // 894 // The general policy for interrupts routing is the following: 897 // The general policy for interrupts routing is the following: 895 898 // - the local HWI are statically allocatedted to local processors. 896 899 // - the nprocs first PTI are allocated for TICK (one per processor). … … 955 958 unsigned int wti_mask[8] = {0,0,0,0,0,0,0,0}; 956 959 957 // scan local peripherals to get and check local XCU 960 // scan local peripherals to get and check local XCU 958 961 mapping_periph_t* xcu = NULL; 959 962 unsigned int min = cluster[cluster_id].periph_offset ; … … 962 965 for ( periph_id = min ; periph_id < max ; periph_id++ ) 963 966 { 964 if( periph[periph_id].type == PERIPH_TYPE_XCU ) 967 if( periph[periph_id].type == PERIPH_TYPE_XCU ) 965 968 { 966 969 xcu = &periph[periph_id]; … … 996 999 } 997 1000 } 998 } 1001 } 999 1002 1000 1003 if ( xcu == NULL ) 1001 { 1004 { 1002 1005 _printf("\n[BOOT ERROR] missing XCU in cluster[%d,%d]\n", x , y ); 1003 1006 _exit(); … … 1005 1008 1006 1009 // HWI interrupt vector definition 1007 // scan HWI connected to local XCU 1010 // scan HWI connected to local XCU 1008 1011 // for round-robin allocation to local processors 1009 1012 lpid = 0; … … 1029 1032 hwi_mask[lpid] = hwi_mask[lpid] | (1<<srcid); 1030 1033 1031 lpid = (lpid + 1) % nprocs; 1034 lpid = (lpid + 1) % nprocs; 1032 1035 } // end for irqs 1033 1036 … … 1057 1060 } 1058 1061 1059 // set the XCU masks for HWI / WTI / PTI interrupts 1062 // set the XCU masks for HWI / WTI / PTI interrupts 1060 1063 for ( lpid = 0 ; lpid < nprocs ; lpid++ ) 1061 1064 { 1062 unsigned int channel = lpid * IRQ_PER_PROCESSOR; 1063 1064 _xcu_set_mask( cluster_xy, channel, hwi_mask[lpid], IRQ_TYPE_HWI ); 1065 unsigned int channel = lpid * IRQ_PER_PROCESSOR; 1066 1067 _xcu_set_mask( cluster_xy, channel, hwi_mask[lpid], IRQ_TYPE_HWI ); 1065 1068 _xcu_set_mask( cluster_xy, channel, wti_mask[lpid], IRQ_TYPE_WTI ); 1066 1069 _xcu_set_mask( cluster_xy, channel, pti_mask[lpid], IRQ_TYPE_PTI ); … … 1079 1082 // Step 2 : Initialise the threads context. The context of a thread placed 1080 1083 // on processor P must be stored in the scheduler of P. 1081 // For each vspace, this require two nested loops: loop on the threads, 1082 // and loop on the local processors in cluster[x,y]. 1084 // For each vspace, this require two nested loops: loop on the threads, 1085 // and loop on the local processors in cluster[x,y]. 1083 1086 // We complete the scheduler when the required placement matches 1084 1087 // the local processor. 1085 1088 /////////////////////////////////////////////////////////////////////////////// 1086 1089 1087 for (vspace_id = 0; vspace_id < header->vspaces; vspace_id++) 1088 { 1089 // We must set the PTPR depending on the vspace, because the start_vector 1090 for (vspace_id = 0; vspace_id < header->vspaces; vspace_id++) 1091 { 1092 // We must set the PTPR depending on the vspace, because the start_vector 1090 1093 // and the stack address are defined in virtual space. 1091 1094 _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[vspace_id][x][y] >> 13) ); … … 1094 1097 for (thread_id = vspace[vspace_id].thread_offset; 1095 1098 thread_id < (vspace[vspace_id].thread_offset + vspace[vspace_id].threads); 1096 thread_id++) 1099 thread_id++) 1097 1100 { 1098 1101 // get the required thread placement coordinates [x,y,p] 1099 1102 unsigned int req_x = cluster[thread[thread_id].clusterid].x; 1100 1103 unsigned int req_y = cluster[thread[thread_id].clusterid].y; 1101 unsigned int req_p = thread[thread_id].proclocid; 1104 unsigned int req_p = thread[thread_id].proclocid; 1105 1106 // skip this thread if it is allocated to another cluster 1107 if ( ( req_x != x ) || ( req_y != y ) ) continue; 1108 1109 if ( req_p >= NB_PROCS_MAX ) 1110 { 1111 _printf("\n[BOOT ERROR] Bad allocation of thread %s from vspace %s\n", 1112 thread[thread_id].name, 1113 vspace[vspace_id].name); 1114 _exit(); 1115 } 1102 1116 1103 1117 // ctx_norun : two conditions to activate a thread … … 1117 1131 1118 1132 // ctx_entry : Get the virtual address of the memory location containing 1119 // the thread entry point : the start_vector is stored by GCC in the 1120 // seg_data segment, and we must wait the application.elf loading to get 1133 // the thread entry point : the start_vector is stored by GCC in the 1134 // seg_data segment, and we must wait the application.elf loading to get 1121 1135 // the entry point value... 1122 vseg_id = vspace[vspace_id].start_vseg_id; 1136 vseg_id = vspace[vspace_id].start_vseg_id; 1123 1137 unsigned int ctx_entry = vseg[vseg_id].vbase + (thread[thread_id].startid)*4; 1124 1138 1125 // ctx_sp : Get the vseg containing the stack 1139 // ctx_sp : Get the vseg containing the stack 1126 1140 // allocate 16 slots (64 bytes) for possible arguments. 1127 1141 vseg_id = thread[thread_id].stack_vseg_id; 1128 1142 unsigned int ctx_sp = vseg[vseg_id].vbase + vseg[vseg_id].length - 64; 1129 1143 1130 // loop on the local processors 1131 for ( lpid = 0 ; lpid < nprocs ; lpid++ ) 1132 { 1133 if ( (x == req_x) && (y == req_y) && (req_p == lpid) ) // fit 1134 { 1135 // pointer on selected scheduler 1136 psched = _schedulers[x][y][lpid]; 1137 1138 // ltid : compute local thread index in scheduler 1139 unsigned int ltid = psched->threads; 1140 1141 // update the threads field in scheduler: 1142 psched->threads = ltid + 1; 1143 1144 // ctx_trdid : compute pthread global identifier 1145 unsigned int ctx_trdid = x << 24 | y<<16 | lpid<<8 | ltid; 1146 1147 // initializes the thread context 1148 psched->context[ltid].slot[CTX_CR_ID] = 0; 1149 psched->context[ltid].slot[CTX_SR_ID] = GIET_SR_INIT_VALUE; 1150 psched->context[ltid].slot[CTX_SP_ID] = ctx_sp; 1151 psched->context[ltid].slot[CTX_EPC_ID] = ctx_entry; 1152 psched->context[ltid].slot[CTX_ENTRY_ID] = ctx_entry; 1153 psched->context[ltid].slot[CTX_PTPR_ID] = ctx_ptpr; 1154 psched->context[ltid].slot[CTX_PTAB_ID] = ctx_ptab; 1155 psched->context[ltid].slot[CTX_NPT2_ID] = ctx_npt2; 1156 psched->context[ltid].slot[CTX_LTID_ID] = ltid; 1157 psched->context[ltid].slot[CTX_TRDID_ID] = ctx_trdid; 1158 psched->context[ltid].slot[CTX_VSID_ID] = vspace_id; 1159 psched->context[ltid].slot[CTX_NORUN_ID] = ctx_norun; 1160 psched->context[ltid].slot[CTX_SIGS_ID] = 0; 1161 psched->context[ltid].slot[CTX_LOCKS_ID] = 0; 1162 1163 psched->context[ltid].slot[CTX_TTY_ID] = 0xFFFFFFFF; 1164 psched->context[ltid].slot[CTX_CMA_FB_ID] = 0xFFFFFFFF; 1165 psched->context[ltid].slot[CTX_CMA_RX_ID] = 0xFFFFFFFF; 1166 psched->context[ltid].slot[CTX_CMA_TX_ID] = 0xFFFFFFFF; 1167 psched->context[ltid].slot[CTX_NIC_RX_ID] = 0xFFFFFFFF; 1168 psched->context[ltid].slot[CTX_NIC_TX_ID] = 0xFFFFFFFF; 1169 psched->context[ltid].slot[CTX_TIM_ID] = 0xFFFFFFFF; 1170 psched->context[ltid].slot[CTX_HBA_ID] = 0xFFFFFFFF; 1171 1172 // update thread ltid field in the mapping 1173 thread[thread_id].ltid = ltid; 1144 // pointer on selected scheduler 1145 psched = _schedulers[x][y][req_p]; 1146 1147 // ltid : compute local thread index in scheduler 1148 unsigned int ltid = psched->threads; 1149 1150 // update the threads field in scheduler: 1151 psched->threads = ltid + 1; 1152 1153 // ctx_trdid : compute pthread global identifier 1154 unsigned int ctx_trdid = (x<<24) | (y<<16) | (req_p<<8) | ltid; 1155 1156 // initializes the thread context 1157 psched->context[ltid].slot[CTX_CR_ID] = 0; 1158 psched->context[ltid].slot[CTX_SR_ID] = GIET_SR_INIT_VALUE; 1159 psched->context[ltid].slot[CTX_SP_ID] = ctx_sp; 1160 psched->context[ltid].slot[CTX_EPC_ID] = ctx_entry; 1161 psched->context[ltid].slot[CTX_ENTRY_ID] = ctx_entry; 1162 psched->context[ltid].slot[CTX_PTPR_ID] = ctx_ptpr; 1163 psched->context[ltid].slot[CTX_PTAB_ID] = ctx_ptab; 1164 psched->context[ltid].slot[CTX_NPT2_ID] = ctx_npt2; 1165 psched->context[ltid].slot[CTX_LTID_ID] = ltid; 1166 psched->context[ltid].slot[CTX_TRDID_ID] = ctx_trdid; 1167 psched->context[ltid].slot[CTX_VSID_ID] = vspace_id; 1168 psched->context[ltid].slot[CTX_NORUN_ID] = ctx_norun; 1169 psched->context[ltid].slot[CTX_SIGS_ID] = 0; 1170 psched->context[ltid].slot[CTX_LOCKS_ID] = 0; 1171 1172 psched->context[ltid].slot[CTX_TTY_ID] = 0xFFFFFFFF; 1173 psched->context[ltid].slot[CTX_CMA_FB_ID] = 0xFFFFFFFF; 1174 psched->context[ltid].slot[CTX_CMA_RX_ID] = 0xFFFFFFFF; 1175 psched->context[ltid].slot[CTX_CMA_TX_ID] = 0xFFFFFFFF; 1176 psched->context[ltid].slot[CTX_NIC_RX_ID] = 0xFFFFFFFF; 1177 psched->context[ltid].slot[CTX_NIC_TX_ID] = 0xFFFFFFFF; 1178 psched->context[ltid].slot[CTX_TIM_ID] = 0xFFFFFFFF; 1179 psched->context[ltid].slot[CTX_HBA_ID] = 0xFFFFFFFF; 1180 1181 // update thread ltid field in the mapping 1182 thread[thread_id].ltid = ltid; 1174 1183 1175 1184 #if BOOT_DEBUG_SCHED … … 1188 1197 thread[thread_id].name, 1189 1198 vspace[vspace_id].name, 1190 x, y, lpid,1199 x, y, req_p, 1191 1200 psched->context[ltid].slot[CTX_LTID_ID], 1192 1201 psched->context[ltid].slot[CTX_TRDID_ID], … … 1201 1210 psched->context[ltid].slot[CTX_SIGS_ID] ); 1202 1211 #endif 1203 } // end if FIT1204 } // end for loop on local procs1205 1212 } // end loop on threads 1206 1213 } // end loop on vspaces -
soft/giet_vm/giet_config.h
r816 r819 62 62 #define GIET_SR_INIT_VALUE 0x2000FF13 /* SR initial value (before eret) */ 63 63 #define GIET_USE_HARD_FLOAT 0 /* hard float supported */ 64 #define GIET_USE_MMU_GLOBAL_FLAG 0 /* enable the use of the PTE_G flag */ 64 65 65 66 #endif
Note: See TracChangeset
for help on using the changeset viewer.