source: trunk/kernel/kern/cluster.c @ 9

Last change on this file since 9 was 5, checked in by alain, 8 years ago

Introduce the chdev_t structure in place of the device_t structure.

File size: 16.1 KB
Line 
1/*
2 * cluster.c - Cluster-Manager related operations
3 *
4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *         Mohamed Lamine Karaoui (2015)
6 *         Alain Greiner (2016)
7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH..
11 *
12 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH. is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <almos_config.h>
27#include <hal_types.h>
28#include <hal_atomic.h>
29#include <hal_special.h>
30#include <printk.h>
31#include <errno.h>
32#include <spinlock.h>
33#include <core.h>
34#include <scheduler.h>
35#include <list.h>
36#include <cluster.h>
37#include <sysfs.h>
38#include <boot_info.h>
39#include <bits.h>
40#include <ppm.h>
41#include <thread.h>
42#include <kmem.h>
43#include <process.h>
44#include <dqdt.h>
45
46///////////////////////////////////////////////////////////////////////////////////////////
47// Extern global variables
48///////////////////////////////////////////////////////////////////////////////////////////
49
50process_t process_zero;     // allocated in kernel_init.c file
51
52
53
54//////////////////////////////////
55void cluster_sysfs_register(void)
56{
57        // TODO
58}
59
60/////////////////////////////////////////////////
61error_t cluster_init( struct boot_info_s * info )
62{
63    lpid_t      lpid;     // local process_index
64    lid_t       lid;      // local core index
65
66        cluster_t * cluster = LOCAL_CLUSTER;
67
68    // initialize cluster global parameters
69        cluster->paddr_width     = info->paddr_width; 
70        cluster->x_width         = info->x_width;
71        cluster->y_width         = info->y_width;
72        cluster->x_size          = info->x_size;
73        cluster->y_size          = info->y_size;
74        cluster->io_cxy          = info->io_cxy;
75
76    // initialize cluster local parameters
77        cluster->cores_nr        = info->cores_nr;
78    cluster->cores_in_kernel = info->cores_nr;   // all cpus start in kernel mode
79
80    // initialize the lock protectig the embedded kcm allocator
81        spinlock_init( &cluster->kcm_lock );
82
83    // initialises DQDT
84    cluster->dqdt_root_level = dqdt_init( info->x_size, 
85                                          info->y_size, 
86                                          info->y_width );
87    cluster->threads_var = 0;
88    cluster->pages_var   = 0;
89
90    // initialises embedded PPM
91        ppm_init( &cluster->ppm,
92              info->pages_nr,
93              info->pages_offset );
94
95    // initialises embedded KHM
96        khm_init( &cluster->khm );
97 
98    // initialises embedded KCM
99        kcm_init( &cluster->kcm , KMEM_KCM );
100
101    // initialises all cores descriptors
102        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
103        {
104                core_init( &cluster->core_tbl[lid],    // target core descriptor
105                       lid,                        // local core index
106                       info->core[lid].gid );      // gid from boot_info_t
107        }
108       
109    // initialises RPC fifo
110        rpc_fifo_init( &cluster->rpc_fifo );
111
112    // initialise pref_tbl[] in process manager
113        spinlock_init( &cluster->pmgr.pref_lock );
114    cluster->pmgr.pref_nr = 0;
115    cluster->pmgr.pref_tbl[0] = XPTR( local_cxy , &process_zero );   
116    for( lpid = 1 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
117    {
118        cluster->pmgr.pref_tbl[lpid] = XPTR_NULL;
119    }
120
121    // initialise local_list in process manager
122        spinlock_init( &cluster->pmgr.local_lock );
123    list_root_init( &cluster->pmgr.local_root );
124    cluster->pmgr.local_nr = 0;
125
126    // initialise copies_lists in process manager
127    for( lpid = 1 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
128    {
129            remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ) );
130        cluster->pmgr.copies_nr[lpid] = 0;
131        xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) );
132    }   
133
134    hal_wbflush();
135
136    // wait all clusters initialised on barrier located in cluster_io
137    remote_barrier( XPTR( cluster->io_cxy , &cluster->barrier ) ,
138                    cluster->x_size * cluster->y_size );
139
140        return 0;
141} // end cluster_init()
142
143////////////////////////////////////////
144bool_t cluster_is_undefined( cxy_t cxy )
145{
146    cluster_t * cluster = LOCAL_CLUSTER;
147
148    uint32_t y_width = cluster->y_width;
149
150    uint32_t x = cxy >> y_width;
151    uint32_t y = cxy & ((1<<y_width)-1);
152
153    if( x >= cluster->x_size ) return true; 
154    if( y >= cluster->y_size ) return true; 
155
156    return false;
157}
158
159////////////////////////////////////////////////////////////////////////////////////
160//  Cores related functions
161////////////////////////////////////////////////////////////////////////////////////
162
163////////////////////////////////
164void cluster_core_kernel_enter()
165{
166    cluster_t * cluster = LOCAL_CLUSTER;
167        hal_atomic_inc( &cluster->cores_in_kernel );
168}
169
170///////////////////////////////
171void cluster_core_kernel_exit()
172{
173    cluster_t * cluster = LOCAL_CLUSTER;
174        hal_atomic_dec( &cluster->cores_in_kernel );
175}
176
177/////////////////////////////////
178lid_t cluster_select_local_core()
179{
180    uint32_t min = 100;
181    lid_t    sel = 0;
182    lid_t    lid;
183
184    cluster_t * cluster = LOCAL_CLUSTER;
185
186    for( lid = 0 ; lid < cluster->cores_nr ; lid++ )
187    {
188        if( cluster->core_tbl[lid].usage < min )
189        {
190            min = cluster->core_tbl[lid].usage;
191            sel = lid;
192        }
193    } 
194    return sel;
195}
196
197////////////////////////////////////////////////////////////////////////////////////
198//  Process management related functions
199////////////////////////////////////////////////////////////////////////////////////
200
201//////////////////////////////////////////////////////////
202xptr_t cluster_get_reference_process_from_pid( pid_t pid )
203{ 
204    xptr_t xp;   // extended pointer on process descriptor
205
206    cluster_t * cluster = LOCAL_CLUSTER;
207
208    // get owner cluster and lpid
209    cxy_t  owner_cxy = CXY_FROM_PID( pid );
210    lpid_t lpid      = LPID_FROM_PID( pid );
211
212    // Check valid PID
213    if( lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER )
214    {
215        printk("\n[PANIC] in %s : illegal PID\n", __FUNCTION__ );
216        hal_core_sleep();
217    }
218
219    if( local_cxy == owner_cxy )   // local cluster is owner cluster
220    { 
221        xp = cluster->pmgr.pref_tbl[lpid];
222    }
223    else                              // use a remote_lwd to access owner cluster
224    {
225        xp = (xptr_t)hal_remote_lwd( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) );
226    }
227
228    return xp;
229}
230
231////////////////////////////////////////////////
232error_t cluster_pid_alloc( xptr_t    process_xp,
233                           pid_t   * pid )
234{
235    error_t     error;
236    lpid_t      lpid;
237    bool_t      found;
238
239    pmgr_t    * pm         = &LOCAL_CLUSTER->pmgr;
240
241    // get the process manager lock
242    spinlock_lock( &pm->pref_lock );
243
244    // search an empty slot
245    found = false;
246    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
247    {
248        if( pm->pref_tbl[lpid] == XPTR_NULL )
249        {
250            found = true;
251            break;
252        }
253    }
254
255    if( found )
256    {
257        // register process in pref_tbl[]
258        pm->pref_tbl[lpid] = process_xp;
259        pm->pref_nr++;
260
261        // returns pid
262        *pid = PID( local_cxy , lpid );
263
264        error = 0;
265    }
266    else
267    {
268        error = EAGAIN;
269    }   
270
271    // release the processs_manager lock
272    spinlock_unlock( &pm->pref_lock );
273
274    return error;
275
276} // end cluster_pid_alloc()
277
278/////////////////////////////////////
279void cluster_pid_release( pid_t pid )
280{
281    cxy_t  owner_cxy  = CXY_FROM_PID( pid );
282    lpid_t lpid       = LPID_FROM_PID( pid );
283
284    // check pid argument
285    if( (lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER) || (owner_cxy != local_cxy) )
286    {
287        printk("\n[PANIC] in %s : illegal PID\n", __FUNCTION__ );
288        hal_core_sleep();
289    }
290
291    pmgr_t  * pm = &LOCAL_CLUSTER->pmgr;
292
293    // get the process manager lock
294    spinlock_lock( &pm->pref_lock );
295
296    // remove process from pref_tbl[]
297    pm->pref_tbl[lpid] = XPTR_NULL;
298    pm->pref_nr--;
299
300    // release the processs_manager lock
301    spinlock_unlock( &pm->pref_lock );
302
303} // end cluster_pid_release()
304
305///////////////////////////////////////////////////////////
306process_t * cluster_get_local_process_from_pid( pid_t pid )
307{
308    process_t    * ret     = NULL;
309    list_entry_t * root    = &LOCAL_CLUSTER->pmgr.local_root;
310    list_entry_t * iter;
311    process_t    * process;
312   
313    LIST_FOREACH( root , iter )
314    {
315        process = LIST_ELEMENT( iter , process_t , local_list );
316        if( process->pid == pid )
317        {
318            ret = process;
319            break;
320        }
321    }
322    return ret;
323
324}  // end cluster_get_local_process_from_pid()
325
326//////////////////////////////////////////////////////
327void cluster_process_local_link( process_t * process )
328{
329    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
330
331    // get lock protecting the process manager local list
332    spinlock_lock( &pm->local_lock );
333
334    list_add_first( &pm->local_root , &process->local_list );
335    pm->local_nr++;
336
337    // release lock protecting the process manager local list
338    spinlock_unlock( &pm->local_lock );
339}
340
341////////////////////////////////////////////////////////
342void cluster_process_local_unlink( process_t * process )
343{
344    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
345
346    // get lock protecting the process manager local list
347    spinlock_lock( &pm->local_lock );
348
349    list_unlink( &process->local_list );
350    pm->local_nr--;
351
352    // release lock protecting the process manager local list
353    spinlock_unlock( &pm->local_lock );
354}
355
356///////////////////////////////////////////////////////
357void cluster_process_copies_link( process_t * process )
358{
359    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
360
361    // get owner cluster identifier CXY and process LPID
362    pid_t    pid        = process->pid;
363    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
364    lpid_t   lpid       = LPID_FROM_PID( pid );
365
366    // get extended pointer on lock protecting copies_list[lpid]
367    xptr_t copies_lock  = hal_remote_lwd( XPTR( owner_cxy , &pm->copies_lock[lpid] ) );
368
369    // get extended pointer on the copies_list[lpid] root
370    xptr_t copies_root  = hal_remote_lwd( XPTR( owner_cxy , &pm->copies_root[lpid] ) );
371
372    // get extended pointer on the local copies_list entry
373    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
374
375    // get lock protecting copies_list[lpid]
376    remote_spinlock_lock( copies_lock );
377
378    xlist_add_first( copies_root , copies_entry );
379    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , 1 );
380
381    // release lock protecting copies_list[lpid]
382    remote_spinlock_unlock( copies_lock );
383}
384
385/////////////////////////////////////////////////////////
386void cluster_process_copies_unlink( process_t * process )
387{
388    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
389
390    // get owner cluster identifier CXY and process LPID
391    pid_t    pid        = process->pid;
392    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
393    lpid_t   lpid       = LPID_FROM_PID( pid );
394
395    // get extended pointer on lock protecting copies_list[lpid]
396    xptr_t copies_lock  = hal_remote_lwd( XPTR( owner_cxy , &pm->copies_lock[lpid] ) );
397
398    // get extended pointer on the local copies_list entry
399    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
400
401    // get lock protecting copies_list[lpid]
402    remote_spinlock_lock( copies_lock );
403
404    xlist_unlink( copies_entry );
405    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , -1 );
406
407    // release lock protecting copies_list[lpid]
408    remote_spinlock_unlock( copies_lock );
409}
410
411////////////////////////////////////////////////////////////////////////////////////////
412// TODO Il me semble que la seule chose que fait ce kernel thread à chaque réveil
413// est de mettre à jour la DQDT, et de se rendormir... A-t-on besoin d'un thread ? [AG]
414//////////////////////////////////////////////////////////////////////////////////////////
415
416#if 0
417void * cluster_manager_thread( void * arg )
418{
419        register struct dqdt_cluster_s * root;
420        register struct cluster_s      * root_home;
421
422        register uint32_t                tm_start;
423        register uint32_t                tm_end;
424        register uint32_t                cpu_id;
425        struct cluster_s               * cluster;
426        struct thread_s                * this;
427        struct event_s                   event;
428        struct alarm_info_s              info;
429        register uint32_t                cntr;
430        register bool_t                  isRootMgr;
431        register uint32_t                period;
432
433        cpu_enable_all_irq( NULL );
434
435        cluster   = arg;
436        this      = CURRENT_THREAD;
437        cpu_id    = cpu_get_id();
438        root      = dqdt_root;
439        root_home = dqdt_root->home;
440        isRootMgr = (cluster == root_home) ? true : false;
441        cntr      = 0;
442        period    = (isRootMgr) ?
443                CONFIG_DQDT_ROOTMGR_PERIOD * MSEC_PER_TICK :
444                CONFIG_DQDT_MGR_PERIOD * MSEC_PER_TICK;
445
446        event_set_senderId(&event, this);
447        event_set_priority(&event, E_CHR);
448        event_set_handler(&event, &manager_alarm_event_handler);
449 
450        info.event = &event;
451        thread_preempt_disable(CURRENT_THREAD);
452
453    // infinite loop
454        while(1)
455        {
456                tm_start = cpu_time_stamp();
457                dqdt_update();
458                tm_end   = cpu_time_stamp();
459
460                if(isRootMgr)
461                {
462                        if((cntr % 10) == 0)
463                        {
464                                printk(INFO, "INFO: cpu %d, DQDT update ended [ %u - %u ]\n",
465                                       cpu_id,
466                                       tm_end,
467                                       tm_end - tm_start);
468
469                                dqdt_print_summary(root);
470                        }
471                }
472
473                alarm_wait( &info , period );
474                sched_sleep(this);
475                cntr ++;
476        }
477
478        return NULL;
479} // end cluster_manager_thread()
480
481//////////////////////////////////////////
482EVENT_HANDLER(manager_alarm_event_handler)
483{
484        struct thread_s *manager;
485 
486        manager = event_get_senderId(event);
487 
488        thread_preempt_disable(CURRENT_THREAD);
489
490        //printk(INFO, "%s: cpu %d [%u]\n", __FUNCTION__, cpu_get_id(), cpu_time_stamp());
491
492        sched_wakeup(manager);
493 
494        thread_preempt_enable(CURRENT_THREAD);
495
496        return 0;
497}
498
499///////////////////////////////////////////////
500EVENT_HANDLER(cluster_key_create_event_handler)
501{
502        struct cluster_s *cluster;
503        struct thread_s *sender;
504        ckey_t *ckey;
505        uint32_t key;
506
507        sender  = event_get_senderId(event);
508        ckey    = event_get_argument(event);
509        cluster = current_cluster;
510        key     = cluster->next_key;
511
512        while((key < CLUSTER_TOTAL_KEYS_NR) && (cluster->keys_tbl[key] != NULL))
513                key ++;
514
515        if(key < CLUSTER_TOTAL_KEYS_NR)
516        {
517                ckey->val = key;
518                cluster->keys_tbl[key] = (void *) 0x1; // Reserved
519                cluster->next_key = key;
520                event_set_error(event, 0);
521        }
522        else
523                event_set_error(event, ENOSPC);
524
525        sched_wakeup(sender);
526        return 0;
527}
528
529///////////////////////////////////////////////
530EVENT_HANDLER(cluster_key_delete_event_handler)
531{
532        struct cluster_s *cluster;
533        struct thread_s *sender;
534        ckey_t *ckey;
535        uint32_t key;
536
537        sender  = event_get_senderId(event);
538        ckey    = event_get_argument(event);
539        cluster = current_cluster;
540        key     = ckey->val;
541
542        if(key < cluster->next_key)
543                cluster->next_key = key;
544
545        cluster->keys_tbl[key] = NULL;
546        event_set_error(event, 0);
547
548        sched_wakeup(sender);
549        return 0;
550}
551
552#define _CKEY_CREATE  0x0
553#define _CKEY_DELETE  0x1
554
555error_t cluster_do_key_op(ckey_t *key, uint32_t op)
556{
557        struct event_s event;
558        struct thread_s *this;
559        struct cluster_s *cluster;
560        struct cpu_s *cpu;
561
562        this = CURRENT_THREAD;
563
564        event_set_priority(&event, E_FUNC);
565        event_set_senderId(&event, this);
566        event_set_argument(&event, key);
567
568        if(op == _CKEY_CREATE)
569                event_set_handler(&event, cluster_key_create_event_handler);
570        else
571                event_set_handler(&event, cluster_key_delete_event_handler);
572
573        cluster = current_cluster;
574        cpu     = cluster->bscluster->bscpu;
575        event_send(&event, &cpu->re_listner);
576
577        sched_sleep(this);
578
579        return event_get_error(&event);
580}
581
582error_t cluster_key_create(ckey_t *key)
583{
584        return cluster_do_key_op(key, _CKEY_CREATE);
585}
586
587error_t cluster_key_delete(ckey_t *key)
588{
589        return cluster_do_key_op(key, _CKEY_DELETE);
590}
591
592void* cluster_getspecific(ckey_t *key)
593{
594        struct cluster_s *cluster;
595
596        cluster = current_cluster;
597        return cluster->keys_tbl[key->val];
598}
599
600void  cluster_setspecific(ckey_t *key, void *val)
601{
602        struct cluster_s *cluster;
603
604        cluster = current_cluster;
605        cluster->keys_tbl[key->val] = val;
606}
607#endif
Note: See TracBrowser for help on using the repository browser.