1 | /////////////////////////////////////////////////////////////////////////////////// |
---|
2 | // File : init.c |
---|
3 | // Date : 26/05/2012 |
---|
4 | // Authors : alain greiner & mohamed karaoui |
---|
5 | // Copyright (c) UPMC-LIP6 |
---|
6 | //////////////////////////////////////////////////////////////////////////////////// |
---|
7 | // The init.c files is part of the GIET nano-kernel. |
---|
8 | // This code can be used in the second phase of the boot to inititialise the kernel |
---|
9 | // structures and to launch one or several multi-tasks applications on a many_cores |
---|
10 | // hardware architecture. |
---|
11 | // All procs at this phase should have their MMU activated, this activation is done |
---|
12 | // by the boot phase, wich is responsable of constructing all the pages tables. |
---|
13 | //////////////////////////////////////////////////////////////////////////////////// |
---|
14 | |
---|
15 | #include <common.h> |
---|
16 | #include <ctx_handler.h> |
---|
17 | #include <sys_handler.h> |
---|
18 | #include <mapping_info.h> |
---|
19 | #include <giet_config.h> |
---|
20 | #include <mips32_registers.h> |
---|
21 | #include <irq_handler.h> |
---|
22 | #include <hwr_mapping.h> |
---|
23 | #include <mwmr.h> |
---|
24 | |
---|
25 | #define in_kinit __attribute__((section (".kinit"))) |
---|
26 | |
---|
27 | unsigned int _ptabs[GIET_NB_VSPACE_MAX]; |
---|
28 | |
---|
29 | void _tcg_init(); |
---|
30 | void _peri_init(); |
---|
31 | extern void _task_init(); |
---|
32 | |
---|
33 | in_kinit void _init() |
---|
34 | { |
---|
35 | _puts("\n KERNEL INIT! \n "); |
---|
36 | |
---|
37 | // building tasks contexts |
---|
38 | _tcg_init(); |
---|
39 | _puts("\n[INIT] Task Contexts completed at cycle "); |
---|
40 | _putw( _proctime() ); |
---|
41 | _puts("\n"); |
---|
42 | |
---|
43 | // Initialize peripherals |
---|
44 | _peri_init(); |
---|
45 | _puts("\n[INIT] Peripherals completed at cycle "); |
---|
46 | _putw( _proctime() ); |
---|
47 | _puts("\n"); |
---|
48 | |
---|
49 | //wakeup all other processor |
---|
50 | mapping_header_t* header = (mapping_header_t*)&seg_mapping_base; |
---|
51 | header->signature = OUT_MAPPING_SIGNATURE; |
---|
52 | } |
---|
53 | |
---|
54 | //////////////////////////////////////////////////////////////////////////// |
---|
55 | // _eret() |
---|
56 | //////////////////////////////////////////////////////////////////////////// |
---|
57 | in_kinit void _eret() |
---|
58 | { |
---|
59 | asm volatile("eret \n" |
---|
60 | "nop"); |
---|
61 | } |
---|
62 | |
---|
63 | /////////////////////////////////////////////////////////////////////////////// |
---|
64 | // This function initialises the task context for a given vspace. |
---|
65 | // There is a private context array for each vspace, indexed by the |
---|
66 | // (task_id, proc_id) composite index. |
---|
67 | // The following values are written in the task context: |
---|
68 | // - SP stack pointer = stack_base + stack_length |
---|
69 | // - RA return address = &_eret |
---|
70 | // - EPC start address = start_vector[task->startid] |
---|
71 | // - SR status register = OxFF13 |
---|
72 | // - TTY TTY index = base_tty_id + tty_local_id |
---|
73 | // - PTPR page table base address / 8K |
---|
74 | // - MODE mmu_mode = 0xF (TLBs and caches activated) |
---|
75 | // It statically allocates the task to the proper scheduler |
---|
76 | // (one scheduler per processor). |
---|
77 | //////////////////////////////////////////////////////////////////////////////// |
---|
78 | in_kinit void _task_map( unsigned int task_id, // global index |
---|
79 | unsigned int vspace_id, // global index |
---|
80 | unsigned int base_tty_id, |
---|
81 | unsigned int* start_vector ) |
---|
82 | { |
---|
83 | mapping_header_t* header = (mapping_header_t*)&seg_mapping_base; |
---|
84 | |
---|
85 | mapping_vseg_t* vseg = _get_vseg_base(header); |
---|
86 | mapping_task_t* task = _get_task_base(header); |
---|
87 | mapping_vspace_t* vspace = _get_vspace_base(header); |
---|
88 | |
---|
89 | unsigned int vseg_id; |
---|
90 | unsigned int loc_id; |
---|
91 | unsigned int proc_id; |
---|
92 | |
---|
93 | unsigned int sp; |
---|
94 | unsigned int ra = (unsigned int)&_eret; |
---|
95 | |
---|
96 | #if INIT_DEBUG_CTX |
---|
97 | _puts("\n[INIT] : task start vector "); |
---|
98 | _putw((unsigned int)start_vector); |
---|
99 | _puts("\n[INIT] : task startid "); |
---|
100 | _putw(task[task_id].startid); |
---|
101 | #endif |
---|
102 | unsigned int epc = start_vector[task[task_id].startid]; |
---|
103 | unsigned int tty = base_tty_id + task[task_id].ttylocid; |
---|
104 | unsigned int sr = 0x0000FF13; |
---|
105 | unsigned int mode = 0xF; |
---|
106 | |
---|
107 | unsigned int ptpr = ((unsigned int)_ptabs[vspace_id]) >> 13; |
---|
108 | |
---|
109 | // check values |
---|
110 | if ( task[task_id].proclocid >= NB_PROCS ) |
---|
111 | { |
---|
112 | _puts("\n[INIT ERROR] : processor index too large for task "); |
---|
113 | _puts( task[task_id].name ); |
---|
114 | _puts(" in vspace "); |
---|
115 | _puts( vspace[vspace_id].name ); |
---|
116 | _puts("\n"); |
---|
117 | _exit(); |
---|
118 | } |
---|
119 | if ( task[task_id].clusterid >= NB_CLUSTERS ) |
---|
120 | { |
---|
121 | _puts("\n[INIT ERROR] : cluster index too large for task "); |
---|
122 | _puts( task[task_id].name ); |
---|
123 | _puts(" in vspace "); |
---|
124 | _puts( vspace[vspace_id].name ); |
---|
125 | _puts("\n"); |
---|
126 | _exit(); |
---|
127 | } |
---|
128 | if ( task[task_id].vobjlocid >= vspace->vsegs ) |
---|
129 | { |
---|
130 | _puts("\n[INIT ERROR] : vseg index too large for task "); |
---|
131 | _puts( task[task_id].name ); |
---|
132 | _puts(" in vspace "); |
---|
133 | _puts( vspace[vspace_id].name ); |
---|
134 | _puts("\n"); |
---|
135 | _exit(); |
---|
136 | } |
---|
137 | if ( task[task_id].startid >= vspace->tasks ) |
---|
138 | { |
---|
139 | _puts("\n[INIT ERROR] : start index too large for task "); |
---|
140 | _puts( task[task_id].name ); |
---|
141 | _puts(" in vspace "); |
---|
142 | _puts( vspace[vspace_id].name ); |
---|
143 | _puts("\n"); |
---|
144 | _exit(); |
---|
145 | } |
---|
146 | if ( tty >= NB_TTYS ) |
---|
147 | { |
---|
148 | _puts("\n[INIT ERROR] : TTY index too large for task "); |
---|
149 | _puts( task[task_id].name ); |
---|
150 | _puts(" in vspace "); |
---|
151 | _puts( vspace[vspace_id].name ); |
---|
152 | _puts("\n"); |
---|
153 | _exit(); |
---|
154 | } |
---|
155 | |
---|
156 | // get stack pointer value |
---|
157 | vseg_id = task[task_id].vobjlocid + vspace[vspace_id].vobj_offset; |
---|
158 | sp = vseg[vseg_id].vbase + vseg[vseg_id].length; |
---|
159 | |
---|
160 | // compute global processor index |
---|
161 | proc_id = task[task_id].clusterid * NB_PROCS + task[task_id].proclocid; |
---|
162 | |
---|
163 | // check local task index |
---|
164 | loc_id = _scheduler[proc_id].tasks; |
---|
165 | if ( loc_id >= GIET_NB_TASKS_MAX ) |
---|
166 | { |
---|
167 | _puts("\n[INIT ERROR] : too much tasks allocated to processor "); |
---|
168 | _putw( proc_id ); |
---|
169 | _puts("\n"); |
---|
170 | _exit(); |
---|
171 | } |
---|
172 | |
---|
173 | // update number of tasks allocated to scheduler |
---|
174 | _scheduler[proc_id].tasks = loc_id + 1; |
---|
175 | |
---|
176 | // initializes the task context |
---|
177 | _scheduler[proc_id].context[loc_id][CTX_SR_ID] = sr; |
---|
178 | _scheduler[proc_id].context[loc_id][CTX_SP_ID] = sp; |
---|
179 | _scheduler[proc_id].context[loc_id][CTX_RA_ID] = ra; |
---|
180 | _scheduler[proc_id].context[loc_id][CTX_EPC_ID] = epc; |
---|
181 | _scheduler[proc_id].context[loc_id][CTX_TTY_ID] = tty; |
---|
182 | _scheduler[proc_id].context[loc_id][CTX_PTPR_ID] = ptpr; |
---|
183 | _scheduler[proc_id].context[loc_id][CTX_MODE_ID] = mode; |
---|
184 | |
---|
185 | #if INIT_DEBUG_CTX |
---|
186 | _puts("Task "); |
---|
187 | _puts( task[task_id].name ); |
---|
188 | _puts(" allocated to processor "); |
---|
189 | _putw( proc_id ); |
---|
190 | _puts(" / loc_id = "); |
---|
191 | _putw( loc_id ); |
---|
192 | _puts("\n"); |
---|
193 | |
---|
194 | _puts(" - SR = "); |
---|
195 | _putw( sr ); |
---|
196 | _puts(" saved at "); |
---|
197 | _putw( (unsigned int)&_scheduler[proc_id].context[loc_id][CTX_SR_ID] ); |
---|
198 | _puts("\n"); |
---|
199 | |
---|
200 | _puts(" - RA = "); |
---|
201 | _putw( ra ); |
---|
202 | _puts(" saved at "); |
---|
203 | _putw( (unsigned int)&_scheduler[proc_id].context[loc_id][CTX_RA_ID] ); |
---|
204 | _puts("\n"); |
---|
205 | |
---|
206 | _puts(" - SP = "); |
---|
207 | _putw( sp ); |
---|
208 | _puts(" saved at "); |
---|
209 | _putw( (unsigned int)&_scheduler[proc_id].context[loc_id][CTX_SP_ID] ); |
---|
210 | _puts("\n"); |
---|
211 | |
---|
212 | _puts(" - EPC = "); |
---|
213 | _putw( epc ); |
---|
214 | _puts(" saved at "); |
---|
215 | _putw( (unsigned int)&_scheduler[proc_id].context[loc_id][CTX_EPC_ID] ); |
---|
216 | _puts("\n"); |
---|
217 | |
---|
218 | _puts(" - TTY = "); |
---|
219 | _putw( tty ); |
---|
220 | _puts(" saved at "); |
---|
221 | _putw( (unsigned int)&_scheduler[proc_id].context[loc_id][CTX_TTY_ID] ); |
---|
222 | _puts("\n"); |
---|
223 | |
---|
224 | _puts(" - PTPR = "); |
---|
225 | _putw( ptpr<<13 ); |
---|
226 | _puts(" saved at "); |
---|
227 | _putw( (unsigned int)&_scheduler[proc_id].context[loc_id][CTX_PTPR_ID] ); |
---|
228 | _puts("\n"); |
---|
229 | |
---|
230 | _puts(" - MODE = "); |
---|
231 | _putw( mode ); |
---|
232 | _puts(" saved at "); |
---|
233 | _putw( (unsigned int)&_scheduler[proc_id].context[loc_id][CTX_MODE_ID] ); |
---|
234 | _puts("\n"); |
---|
235 | #endif |
---|
236 | |
---|
237 | } // end _task_map() |
---|
238 | |
---|
239 | /////////////////////////////////////////////////////////////////////////// |
---|
240 | // Initialise vobjs |
---|
241 | // parm: |
---|
242 | // vobj: the vobj to initialise |
---|
243 | // region_id: the vspace in wich the vobj is located or the global space(-1). |
---|
244 | /////////////////////////////////////////////////////////////////////////// |
---|
245 | void initialise_vobj(mapping_vobj_t* vobj, unsigned int region_id) |
---|
246 | { |
---|
247 | mwmr_channel_t* mwmr; |
---|
248 | switch(vobj->type) |
---|
249 | { |
---|
250 | |
---|
251 | case PTAB: |
---|
252 | break; |
---|
253 | |
---|
254 | case MWMR: |
---|
255 | // initializes MWMR channel if vseg is a MWMR |
---|
256 | // the channel storage capacity is (vobj->length/4 - 5) words |
---|
257 | mwmr = (mwmr_channel_t*)(vobj->vaddr); |
---|
258 | mwmr->ptw = 0; |
---|
259 | mwmr->ptr = 0; |
---|
260 | mwmr->sts = 0; |
---|
261 | mwmr->depth = (vobj->length>>2) - 5; |
---|
262 | mwmr->lock = 0; |
---|
263 | |
---|
264 | #if INIT_DEBUG_CTX |
---|
265 | _puts(" MWMR channel name = "); |
---|
266 | _puts( vobj->name); |
---|
267 | _puts(" MWMR channel depth = "); |
---|
268 | _putw( mwmr->depth ); |
---|
269 | _puts("\n"); |
---|
270 | #endif |
---|
271 | break; |
---|
272 | case ELF: |
---|
273 | break; |
---|
274 | case PERI: |
---|
275 | break; |
---|
276 | case BARRIER: |
---|
277 | case BUFFER: |
---|
278 | case LOCK: |
---|
279 | break;//TODO |
---|
280 | default: |
---|
281 | _puts("Unknown Ressource of type: "); |
---|
282 | _putw(vobj->type); |
---|
283 | _puts("Unknown Ressource name: "); |
---|
284 | _puts(vobj->name); |
---|
285 | _puts("\n "); |
---|
286 | _exit(); |
---|
287 | } |
---|
288 | } |
---|
289 | |
---|
290 | |
---|
291 | /////////////////////////////////////////////////////////////////////////////// |
---|
292 | // |
---|
293 | /////////////////////////////////////////////////////////////////////////////// |
---|
294 | void _set_ptpr(unsigned int vspace_id) |
---|
295 | { |
---|
296 | unsigned int ptpr = ((unsigned int)_ptabs[vspace_id]) >> 13; |
---|
297 | asm volatile("mtc2 %0, $0"::"r"(ptpr)); |
---|
298 | } |
---|
299 | |
---|
300 | /////////////////////////////////////////////////////////////////////////////// |
---|
301 | // This function sets the schedulers default values for all processors |
---|
302 | // (tasks <= 0, and current <= 0). |
---|
303 | // Then it scan all tasks (in all vspaces) to initialise the schedulers, |
---|
304 | // the tasks contexts, as defined in the mapping_info data structure. |
---|
305 | // A global TTY index is allocated to each task, as specified in the mapping. |
---|
306 | // TTY0 is reserved for the kernel. |
---|
307 | /////////////////////////////////////////////////////////////////////////////// |
---|
308 | in_kinit void _tcg_init() |
---|
309 | { |
---|
310 | mapping_header_t* header = (mapping_header_t*)&seg_mapping_base; |
---|
311 | mapping_cluster_t* cluster = _get_cluster_base( header ); |
---|
312 | mapping_vspace_t* vspace = _get_vspace_base( header ); |
---|
313 | mapping_vobj_t* vobj = _get_vobj_base( header ); |
---|
314 | |
---|
315 | |
---|
316 | unsigned int* start_vector_base; |
---|
317 | |
---|
318 | unsigned int base_tty_id = 1; // TTY allocator |
---|
319 | |
---|
320 | unsigned int cluster_id; |
---|
321 | unsigned int proc_id; |
---|
322 | unsigned int vspace_id; |
---|
323 | unsigned int vobj_id; |
---|
324 | unsigned int task_id; |
---|
325 | |
---|
326 | _puts("\n SCHEDULLER "); |
---|
327 | _putw((unsigned int)_scheduler); |
---|
328 | _puts("\n"); |
---|
329 | |
---|
330 | // initialise the schedulers (not done by the compiler/loader) |
---|
331 | for ( cluster_id = 0 ; cluster_id < header->clusters ; cluster_id++ ) |
---|
332 | { |
---|
333 | for ( proc_id = 0 ; proc_id < cluster[cluster_id].procs ; proc_id++ ) |
---|
334 | { |
---|
335 | if ( proc_id >= NB_PROCS ) |
---|
336 | { |
---|
337 | _puts("\n[INIT ERROR] The number of processors in cluster "); |
---|
338 | _putw( cluster_id ); |
---|
339 | _puts(" is larger than NB_PROCS \n"); |
---|
340 | _exit(); |
---|
341 | } |
---|
342 | _scheduler[cluster_id*NB_PROCS+proc_id].tasks = 0; |
---|
343 | _scheduler[cluster_id*NB_PROCS+proc_id].current = 0; |
---|
344 | } |
---|
345 | } |
---|
346 | |
---|
347 | // loop on the virtual spaces and set the ptpr |
---|
348 | for ( vspace_id = 0 ; vspace_id < header->vspaces ; vspace_id++ ) |
---|
349 | { |
---|
350 | char found = 0; |
---|
351 | //initialise all vobjs |
---|
352 | for(vobj_id= vspace[vspace_id].vobj_offset; |
---|
353 | vobj_id < (vspace[vspace_id].vobj_offset+ vspace[vspace_id].vobjs); |
---|
354 | vobj_id++) |
---|
355 | { |
---|
356 | if(vobj[vobj_id].type == PTAB) |
---|
357 | { |
---|
358 | found = 1; |
---|
359 | //ptabs allready buided by the boot |
---|
360 | _ptabs[vspace_id] = (unsigned int) vobj[vobj_id].paddr; |
---|
361 | _puts("ptabs for vspace " ); |
---|
362 | _putw(vspace_id); |
---|
363 | _puts(" address: ") ; |
---|
364 | _putw(_ptabs[vspace_id]); |
---|
365 | _puts("\n"); |
---|
366 | } |
---|
367 | } |
---|
368 | |
---|
369 | if(!found) |
---|
370 | { |
---|
371 | _puts("\n[INIT ERROR] Forget to set a PTAB for vspace "); |
---|
372 | _putw( vspace_id ); |
---|
373 | _puts(" ?\n"); |
---|
374 | _exit(); |
---|
375 | } |
---|
376 | } |
---|
377 | |
---|
378 | // main loop on the virtual spaces |
---|
379 | for ( vspace_id = 0 ; vspace_id < header->vspaces ; vspace_id++ ) |
---|
380 | { |
---|
381 | _set_ptpr(vspace_id); |
---|
382 | #if INIT_DEBUG_CTX |
---|
383 | _puts("\n******* mapping tasks and channels in vspace "); |
---|
384 | _puts(vspace[vspace_id].name); |
---|
385 | _puts(" ********\n"); |
---|
386 | #endif |
---|
387 | //initialise all vobjs |
---|
388 | for(vobj_id= vspace[vspace_id].vobj_offset; |
---|
389 | vobj_id < (vspace[vspace_id].vobj_offset+ vspace[vspace_id].vobjs); |
---|
390 | vobj_id++) |
---|
391 | { |
---|
392 | initialise_vobj(&vobj[vobj_id], vspace_id); |
---|
393 | } |
---|
394 | |
---|
395 | |
---|
396 | // Get the physical address of the start_vector for the vspace. |
---|
397 | // The start_vector is stored at the beginning of the seg_data segment, |
---|
398 | // and contains the start addresses for all tasks defined in a vspace. |
---|
399 | // The seg_data segment must be the first vseg defined in |
---|
400 | // the mapping_info data structure. |
---|
401 | mapping_vobj_t* vobj_data = &vobj[vspace[vspace_id].vobj_offset + vspace[vspace_id].funcs_offset]; |
---|
402 | #if INIT_DEBUG_CTX |
---|
403 | _puts("\n[INIT] : vobj_data name "); |
---|
404 | _puts(vobj_data->name); |
---|
405 | _puts("\n[INIT] : vobj_data vaddrr "); |
---|
406 | _putw(vobj_data->vaddr); |
---|
407 | _puts("\n[INIT] : vobj_data addrr "); |
---|
408 | _putw(vobj_data->paddr); |
---|
409 | #endif |
---|
410 | start_vector_base = (unsigned int*)vobj_data->vaddr; |
---|
411 | |
---|
412 | // map tasks |
---|
413 | for ( task_id = vspace[vspace_id].task_offset ; |
---|
414 | task_id < (vspace[vspace_id].task_offset + vspace[vspace_id].tasks) ; |
---|
415 | task_id++ ) |
---|
416 | { |
---|
417 | _task_map( task_id, |
---|
418 | vspace_id, |
---|
419 | base_tty_id, |
---|
420 | start_vector_base ); |
---|
421 | } |
---|
422 | |
---|
423 | // increment TTY allocator |
---|
424 | base_tty_id = base_tty_id + vspace[vspace_id].ttys; |
---|
425 | } |
---|
426 | } // end _tc_init() |
---|
427 | |
---|
428 | //////////////////////////////////////////////////////////////////////////////// |
---|
429 | // _peri_init() |
---|
430 | // This generic function initializes the interrupt vector, the ICU masks, |
---|
431 | // and the timers for the context switch. |
---|
432 | // The hardware parameters are NB_CLUSTERS, NB_PROCS, NB_TIMERS, NB_DMAS |
---|
433 | // CLUSTER_SPAN, seg_icu_base, seg_timer_base. |
---|
434 | // The number of processor per cluster cannot be larger than 8. |
---|
435 | // The total number of TTYs cannot be larger than 15. |
---|
436 | // The NB_TIMERS, NB_DMAS & NB_PROCS parameters must be equal. |
---|
437 | //////////////////////////////////////////////////////////////////////////////// |
---|
438 | in_kinit void _peri_init() |
---|
439 | { |
---|
440 | mapping_header_t* header = (mapping_header_t*)&seg_mapping_base; |
---|
441 | mapping_cluster_t* cluster = _get_cluster_base( header ); |
---|
442 | |
---|
443 | unsigned int cluster_id; |
---|
444 | |
---|
445 | if ( NB_TIMERS != NB_PROCS ) |
---|
446 | { |
---|
447 | _puts("\n[INIT ERROR] NB_TIMERS != NB_PROCS\n"); |
---|
448 | _exit(); |
---|
449 | } |
---|
450 | if ( NB_DMAS != NB_PROCS ) |
---|
451 | { |
---|
452 | _puts("\n[INIT ERROR] NB_DMAS != NB_PROCS\n"); |
---|
453 | _exit(); |
---|
454 | } |
---|
455 | |
---|
456 | // interrupt vector initialisation |
---|
457 | |
---|
458 | _interrupt_vector[0] = &_isr_ioc; |
---|
459 | |
---|
460 | _interrupt_vector[1] = &_isr_tty_get_0; |
---|
461 | _interrupt_vector[2] = &_isr_tty_get_1; |
---|
462 | _interrupt_vector[3] = &_isr_tty_get_2; |
---|
463 | _interrupt_vector[4] = &_isr_tty_get_3; |
---|
464 | _interrupt_vector[5] = &_isr_tty_get_4; |
---|
465 | _interrupt_vector[6] = &_isr_tty_get_5; |
---|
466 | _interrupt_vector[7] = &_isr_tty_get_6; |
---|
467 | _interrupt_vector[8] = &_isr_tty_get_7; |
---|
468 | _interrupt_vector[9] = &_isr_tty_get_8; |
---|
469 | _interrupt_vector[10] = &_isr_tty_get_9; |
---|
470 | _interrupt_vector[11] = &_isr_tty_get_10; |
---|
471 | _interrupt_vector[12] = &_isr_tty_get_11; |
---|
472 | _interrupt_vector[13] = &_isr_tty_get_12; |
---|
473 | _interrupt_vector[14] = &_isr_tty_get_13; |
---|
474 | _interrupt_vector[14] = &_isr_tty_get_14; |
---|
475 | |
---|
476 | |
---|
477 | _interrupt_vector[16] = &_isr_switch; |
---|
478 | _interrupt_vector[17] = &_isr_dma; |
---|
479 | _interrupt_vector[18] = &_isr_switch; |
---|
480 | _interrupt_vector[19] = &_isr_dma; |
---|
481 | _interrupt_vector[20] = &_isr_switch; |
---|
482 | _interrupt_vector[21] = &_isr_dma; |
---|
483 | _interrupt_vector[22] = &_isr_switch; |
---|
484 | _interrupt_vector[23] = &_isr_dma; |
---|
485 | _interrupt_vector[24] = &_isr_switch; |
---|
486 | _interrupt_vector[25] = &_isr_dma; |
---|
487 | _interrupt_vector[26] = &_isr_switch; |
---|
488 | _interrupt_vector[27] = &_isr_dma; |
---|
489 | _interrupt_vector[28] = &_isr_switch; |
---|
490 | _interrupt_vector[29] = &_isr_dma; |
---|
491 | _interrupt_vector[30] = &_isr_switch; |
---|
492 | _interrupt_vector[31] = &_isr_dma; |
---|
493 | |
---|
494 | |
---|
495 | // ICU MASKs and TIMERS initialisation |
---|
496 | |
---|
497 | volatile unsigned int* icu = (unsigned int*)&seg_icu_base; |
---|
498 | volatile unsigned int* timer = (unsigned int*)&seg_timer_base; |
---|
499 | |
---|
500 | for ( cluster_id = 0 ; cluster_id < header->clusters ; cluster_id++ ) |
---|
501 | { |
---|
502 | if ( cluster[cluster_id].procs == 0 ) break; |
---|
503 | |
---|
504 | icu[ICU_MASK_SET + 0*ICU_SPAN] = 0x000380FF; // ICU_MASK for proc 0 |
---|
505 | if ( _scheduler[cluster_id*NB_PROCS + 0].tasks > 1 ) |
---|
506 | { |
---|
507 | timer[TIMER_PERIOD + 0*TIMER_SPAN] = GIET_TICK_VALUE; |
---|
508 | timer[TIMER_MODE + 0*TIMER_SPAN] = 0x3; |
---|
509 | } |
---|
510 | |
---|
511 | if ( cluster[cluster_id].procs == 1 ) break; |
---|
512 | |
---|
513 | icu[ICU_MASK_SET + 1*ICU_SPAN] = 0x000C0000; // ICU_MASK for proc 1 |
---|
514 | if ( _scheduler[cluster_id*NB_PROCS + 1].tasks > 1 ) |
---|
515 | { |
---|
516 | timer[TIMER_PERIOD + 1*TIMER_SPAN] = GIET_TICK_VALUE; |
---|
517 | timer[TIMER_MODE + 1*TIMER_SPAN] = 0x3; |
---|
518 | } |
---|
519 | |
---|
520 | if ( cluster[cluster_id].procs == 2 ) break; |
---|
521 | |
---|
522 | icu[ICU_MASK_SET + 2*ICU_SPAN] = 0x00300000; // ICU_MASK for proc 2 |
---|
523 | if ( _scheduler[cluster_id*NB_PROCS + 2].tasks > 1 ) |
---|
524 | { |
---|
525 | timer[TIMER_PERIOD + 2*TIMER_SPAN] = GIET_TICK_VALUE; |
---|
526 | timer[TIMER_MODE + 2*TIMER_SPAN] = 0x3; |
---|
527 | } |
---|
528 | |
---|
529 | if ( cluster[cluster_id].procs == 3 ) break; |
---|
530 | |
---|
531 | icu[ICU_MASK_SET + 3*ICU_SPAN] = 0x00C00000; // ICU_MASK for proc 3 |
---|
532 | if ( _scheduler[cluster_id*NB_PROCS + 3].tasks > 1 ) |
---|
533 | { |
---|
534 | timer[TIMER_PERIOD + 3*TIMER_SPAN] = GIET_TICK_VALUE; |
---|
535 | timer[TIMER_MODE + 3*TIMER_SPAN] = 0x3; |
---|
536 | } |
---|
537 | |
---|
538 | if ( cluster[cluster_id].procs == 4 ) break; |
---|
539 | icu[ICU_MASK_SET + 4*ICU_SPAN] = 0x03000000; // ICU_MASK for proc 4 |
---|
540 | |
---|
541 | if ( _scheduler[cluster_id*NB_PROCS + 4].tasks > 1 ) |
---|
542 | { |
---|
543 | timer[TIMER_PERIOD + 4*TIMER_SPAN] = GIET_TICK_VALUE; |
---|
544 | timer[TIMER_MODE + 4*TIMER_SPAN] = 0x3; |
---|
545 | } |
---|
546 | |
---|
547 | if ( cluster[cluster_id].procs == 5 ) break; |
---|
548 | |
---|
549 | icu[ICU_MASK_SET + 5*ICU_SPAN] = 0x0C000000; // ICU_MASK for proc 5 |
---|
550 | if ( _scheduler[cluster_id*NB_PROCS + 5].tasks > 1 ) |
---|
551 | { |
---|
552 | timer[TIMER_PERIOD + 5*TIMER_SPAN] = GIET_TICK_VALUE; |
---|
553 | timer[TIMER_MODE + 5*TIMER_SPAN] = 0x3; |
---|
554 | } |
---|
555 | |
---|
556 | if ( cluster[cluster_id].procs == 6 ) break; |
---|
557 | |
---|
558 | icu[ICU_MASK_SET + 6*ICU_SPAN] = 0x30000000; // ICU_MASK for proc 6 |
---|
559 | if ( _scheduler[cluster_id*NB_PROCS + 6].tasks > 1 ) |
---|
560 | { |
---|
561 | timer[TIMER_PERIOD + 6*TIMER_SPAN] = GIET_TICK_VALUE; |
---|
562 | timer[TIMER_MODE + 6*TIMER_SPAN] = 0x3; |
---|
563 | } |
---|
564 | |
---|
565 | if ( cluster[cluster_id].procs == 7 ) break; |
---|
566 | |
---|
567 | icu[ICU_MASK_SET + 7*ICU_SPAN] = 0xC0000000; // ICU_MASK for proc 7 |
---|
568 | if ( _scheduler[cluster_id*NB_PROCS + 7].tasks > 1 ) |
---|
569 | { |
---|
570 | timer[TIMER_PERIOD + 7*TIMER_SPAN] = GIET_TICK_VALUE; |
---|
571 | timer[TIMER_MODE + 7*TIMER_SPAN] = 0x3; |
---|
572 | } |
---|
573 | |
---|
574 | if ( cluster[cluster_id].procs > 8 ) |
---|
575 | { |
---|
576 | _puts("\n[INIT ERROR] The number of processors per cluster\n"); |
---|
577 | _puts(" cannot be larger than 8\n"); |
---|
578 | _exit(); |
---|
579 | } |
---|
580 | icu = icu + (CLUSTER_SPAN>>2); |
---|
581 | timer = timer + (CLUSTER_SPAN>>2); |
---|
582 | } |
---|
583 | } // end _peri_init() |
---|