source: trunk/kernel/kern/cluster.c @ 86

Last change on this file since 86 was 50, checked in by alain, 7 years ago

bloup

File size: 17.3 KB
Line 
1/*
2 * cluster.c - Cluster-Manager related operations
3 *
4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *         Mohamed Lamine Karaoui (2015)
6 *         Alain Greiner (2016,2017)
7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH..
11 *
12 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH. is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <kernel_config.h>
27#include <hal_types.h>
28#include <hal_atomic.h>
29#include <hal_special.h>
30#include <hal_ppm.h>
31#include <printk.h>
32#include <errno.h>
33#include <spinlock.h>
34#include <core.h>
35#include <scheduler.h>
36#include <list.h>
37#include <cluster.h>
38#include <boot_info.h>
39#include <bits.h>
40#include <ppm.h>
41#include <thread.h>
42#include <kmem.h>
43#include <process.h>
44#include <dqdt.h>
45
46///////////////////////////////////////////////////////////////////////////////////////////
47// Extern global variables
48///////////////////////////////////////////////////////////////////////////////////////////
49
50extern process_t process_zero;     // allocated in kernel_init.c file
51
52
53
54//////////////////////////////////
55void cluster_sysfs_register(void)
56{
57        // TODO
58}
59
60/////////////////////////////////////////////////
61error_t cluster_init( struct boot_info_s * info )
62{
63    error_t     error;
64    lpid_t      lpid;     // local process_index
65    lid_t       lid;      // local core index
66
67        cluster_t * cluster = LOCAL_CLUSTER;
68
69    // initialize cluster global parameters
70        cluster->paddr_width     = info->paddr_width;
71        cluster->x_width         = info->x_width;
72        cluster->y_width         = info->y_width;
73        cluster->x_size          = info->x_size;
74        cluster->y_size          = info->y_size;
75        cluster->io_cxy          = info->io_cxy;
76
77    // initialize cluster local parameters
78        cluster->cores_nr        = info->cores_nr;
79    cluster->cores_in_kernel = info->cores_nr; // all cpus start in kernel mode
80
81    // initialize the lock protecting the embedded kcm allocator
82        spinlock_init( &cluster->kcm_lock );
83
84    cluster_dmsg("\n[INFO] %s for cluster %x enters\n",
85                 __FUNCTION__ , local_cxy );
86
87    // initialises DQDT
88    cluster->dqdt_root_level = dqdt_init( info->x_size,
89                                          info->y_size,
90                                          info->y_width );
91    cluster->threads_var = 0;
92    cluster->pages_var   = 0;
93
94    // initialises embedded PPM
95        error = hal_ppm_init( info );
96
97    if( error )
98    {
99        printk("\n[ERROR] in %s : cannot initialize PPM in cluster %x\n",
100               __FUNCTION__ , local_cxy );
101        return ENOMEM;
102    }
103
104    cluster_dmsg("\n[INFO] %s : PPM initialized in cluster %x at cycle %d\n",
105                 __FUNCTION__ , local_cxy , hal_time_stamp() );
106
107    // initialises embedded KHM
108        khm_init( &cluster->khm );
109
110    cluster_dmsg("\n[INFO] %s : KHM initialized in cluster %x at cycle %d\n",
111                 __FUNCTION__ , local_cxy , hal_time_stamp() );
112
113    // initialises embedded KCM
114        kcm_init( &cluster->kcm , KMEM_KCM );
115
116    cluster_dmsg("\n[INFO] %s : KCM initialized in cluster %x at cycle %d\n",
117                 __FUNCTION__ , local_cxy , hal_time_stamp() );
118
119    // initialises all cores descriptors
120        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
121        {
122                core_init( &cluster->core_tbl[lid],    // target core descriptor
123                       lid,                        // local core index
124                       info->core[lid].gid );      // gid from boot_info_t
125        }
126
127    cluster_dmsg("\n[INFO] %s : cores initialized in cluster %x at cycle %d\n",
128                 __FUNCTION__ , local_cxy , hal_time_stamp() );
129
130    // initialises RPC fifo
131        rpc_fifo_init( &cluster->rpc_fifo );
132
133    cluster_dmsg("\n[INFO] %s : RPC fifo inialized in cluster %x at cycle %d\n",
134                 __FUNCTION__ , local_cxy , hal_time_stamp() );
135
136    // initialise pref_tbl[] in process manager
137        spinlock_init( &cluster->pmgr.pref_lock );
138    cluster->pmgr.pref_nr = 0;
139    cluster->pmgr.pref_tbl[0] = XPTR( local_cxy , &process_zero );
140    for( lpid = 1 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
141    {
142        cluster->pmgr.pref_tbl[lpid] = XPTR_NULL;
143    }
144
145    // initialise local_list in process manager
146        remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
147    xlist_root_init( XPTR( local_cxy , &cluster->pmgr.local_root ) );
148    cluster->pmgr.local_nr = 0;
149
150    // initialise copies_lists in process manager
151    for( lpid = 1 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
152    {
153            remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ) );
154        cluster->pmgr.copies_nr[lpid] = 0;
155        xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) );
156    }
157
158    cluster_dmsg("\n[INFO] %s Process Manager initialized in cluster %x at cycle %d\n",
159                 __FUNCTION__ , local_cxy , hal_time_stamp() );
160
161    hal_wbflush();
162
163        return 0;
164} // end cluster_init()
165
166////////////////////////////////////////
167bool_t cluster_is_undefined( cxy_t cxy )
168{
169    cluster_t * cluster = LOCAL_CLUSTER;
170
171    uint32_t y_width = cluster->y_width;
172
173    uint32_t x = cxy >> y_width;
174    uint32_t y = cxy & ((1<<y_width)-1);
175
176    if( x >= cluster->x_size ) return true;
177    if( y >= cluster->y_size ) return true;
178
179    return false;
180}
181
182////////////////////////////////////////////////////////////////////////////////////
183//  Cores related functions
184////////////////////////////////////////////////////////////////////////////////////
185
186////////////////////////////////
187void cluster_core_kernel_enter()
188{
189    cluster_t * cluster = LOCAL_CLUSTER;
190        hal_atomic_add( &cluster->cores_in_kernel , 1 );
191}
192
193///////////////////////////////
194void cluster_core_kernel_exit()
195{
196    cluster_t * cluster = LOCAL_CLUSTER;
197        hal_atomic_add( &cluster->cores_in_kernel , -1 );
198}
199
200/////////////////////////////////
201lid_t cluster_select_local_core()
202{
203    uint32_t min = 100;
204    lid_t    sel = 0;
205    lid_t    lid;
206
207    cluster_t * cluster = LOCAL_CLUSTER;
208
209    for( lid = 0 ; lid < cluster->cores_nr ; lid++ )
210    {
211        if( cluster->core_tbl[lid].usage < min )
212        {
213            min = cluster->core_tbl[lid].usage;
214            sel = lid;
215        }
216    }
217    return sel;
218}
219
220////////////////////////////////////////////////////////////////////////////////////
221//  Process management related functions
222////////////////////////////////////////////////////////////////////////////////////
223
224//////////////////////////////////////////////////////////
225xptr_t cluster_get_reference_process_from_pid( pid_t pid )
226{
227    xptr_t ref_xp;   // extended pointer on reference process descriptor
228
229    cluster_t * cluster = LOCAL_CLUSTER;
230
231    // get owner cluster and lpid
232    cxy_t  owner_cxy = CXY_FROM_PID( pid );
233    lpid_t lpid      = LPID_FROM_PID( pid );
234
235    // Check valid PID
236    if( lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER )  return XPTR_NULL;
237
238    if( local_cxy == owner_cxy )   // local cluster is owner cluster
239    {
240        ref_xp = cluster->pmgr.pref_tbl[lpid];
241    }
242    else                              // use a remote_lwd to access owner cluster
243    {
244        ref_xp = (xptr_t)hal_remote_lwd( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) );
245    }
246
247    return ref_xp;
248}
249
250////////////////////////////////////////////////
251error_t cluster_pid_alloc( xptr_t    process_xp,
252                           pid_t   * pid )
253{
254    error_t     error;
255    lpid_t      lpid;
256    bool_t      found;
257
258    pmgr_t    * pm         = &LOCAL_CLUSTER->pmgr;
259
260    // get the process manager lock
261    spinlock_lock( &pm->pref_lock );
262
263    // search an empty slot
264    found = false;
265    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
266    {
267        if( pm->pref_tbl[lpid] == XPTR_NULL )
268        {
269            found = true;
270            break;
271        }
272    }
273
274    if( found )
275    {
276        // register process in pref_tbl[]
277        pm->pref_tbl[lpid] = process_xp;
278        pm->pref_nr++;
279
280        // returns pid
281        *pid = PID( local_cxy , lpid );
282
283        error = 0;
284    }
285    else
286    {
287        error = EAGAIN;
288    }
289
290    // release the processs_manager lock
291    spinlock_unlock( &pm->pref_lock );
292
293    return error;
294
295} // end cluster_pid_alloc()
296
297/////////////////////////////////////
298void cluster_pid_release( pid_t pid )
299{
300    cxy_t  owner_cxy  = CXY_FROM_PID( pid );
301    lpid_t lpid       = LPID_FROM_PID( pid );
302
303    // check pid argument
304    if( (lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER) || (owner_cxy != local_cxy) )
305    {
306        printk("\n[PANIC] in %s : illegal PID\n", __FUNCTION__ );
307        hal_core_sleep();
308    }
309
310    pmgr_t  * pm = &LOCAL_CLUSTER->pmgr;
311
312    // get the process manager lock
313    spinlock_lock( &pm->pref_lock );
314
315    // remove process from pref_tbl[]
316    pm->pref_tbl[lpid] = XPTR_NULL;
317    pm->pref_nr--;
318
319    // release the processs_manager lock
320    spinlock_unlock( &pm->pref_lock );
321
322} // end cluster_pid_release()
323
324///////////////////////////////////////////////////////////
325process_t * cluster_get_local_process_from_pid( pid_t pid )
326{
327    xptr_t         process_xp;
328    process_t    * process_ptr;
329    xptr_t         root_xp;
330    xptr_t         iter_xp;
331    bool_t         found;
332
333    found   = false;
334    root_xp = XPTR( local_cxy , &LOCAL_CLUSTER->pmgr.local_root );
335
336    XLIST_FOREACH( root_xp , iter_xp )
337    {
338        process_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
339        process_ptr = (process_t *)GET_PTR( process_xp );
340        if( process_ptr->pid == pid )
341        {
342            found = true;
343            break;
344        }
345    }
346
347    if (found ) return process_ptr;
348    else        return NULL;
349
350}  // end cluster_get_local_process_from_pid()
351
352//////////////////////////////////////////////////////
353void cluster_process_local_link( process_t * process )
354{
355    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
356
357    // get lock protecting the process manager local list
358    remote_spinlock_lock( XPTR( local_cxy , &pm->local_lock ) );
359
360    xlist_add_first( XPTR( local_cxy , &pm->local_root ),
361                     XPTR( local_cxy , &process->local_list ) );
362    pm->local_nr++;
363
364    // release lock protecting the process manager local list
365    remote_spinlock_unlock( XPTR( local_cxy , &pm->local_lock ) );
366}
367
368////////////////////////////////////////////////////////
369void cluster_process_local_unlink( process_t * process )
370{
371    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
372
373    // get lock protecting the process manager local list
374    remote_spinlock_lock( XPTR( local_cxy , &pm->local_lock ) );
375
376    xlist_unlink( XPTR( local_cxy , &process->local_list ) );
377    pm->local_nr--;
378
379    // release lock protecting the process manager local list
380    remote_spinlock_unlock( XPTR( local_cxy , &pm->local_lock ) );
381}
382
383///////////////////////////////////////////////////////
384void cluster_process_copies_link( process_t * process )
385{
386    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
387
388    // get owner cluster identifier CXY and process LPID
389    pid_t    pid        = process->pid;
390    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
391    lpid_t   lpid       = LPID_FROM_PID( pid );
392
393    // get extended pointer on lock protecting copies_list[lpid]
394    xptr_t copies_lock  = hal_remote_lwd( XPTR( owner_cxy , &pm->copies_lock[lpid] ) );
395
396    // get extended pointer on the copies_list[lpid] root
397    xptr_t copies_root  = hal_remote_lwd( XPTR( owner_cxy , &pm->copies_root[lpid] ) );
398
399    // get extended pointer on the local copies_list entry
400    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
401
402    // get lock protecting copies_list[lpid]
403    remote_spinlock_lock( copies_lock );
404
405    xlist_add_first( copies_root , copies_entry );
406    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , 1 );
407
408    // release lock protecting copies_list[lpid]
409    remote_spinlock_unlock( copies_lock );
410}
411
412/////////////////////////////////////////////////////////
413void cluster_process_copies_unlink( process_t * process )
414{
415    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
416
417    // get owner cluster identifier CXY and process LPID
418    pid_t    pid        = process->pid;
419    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
420    lpid_t   lpid       = LPID_FROM_PID( pid );
421
422    // get extended pointer on lock protecting copies_list[lpid]
423    xptr_t copies_lock  = hal_remote_lwd( XPTR( owner_cxy , &pm->copies_lock[lpid] ) );
424
425    // get extended pointer on the local copies_list entry
426    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
427
428    // get lock protecting copies_list[lpid]
429    remote_spinlock_lock( copies_lock );
430
431    xlist_unlink( copies_entry );
432    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , -1 );
433
434    // release lock protecting copies_list[lpid]
435    remote_spinlock_unlock( copies_lock );
436}
437
438////////////////////////////////////////////////////////////////////////////////////////
439// TODO Il me semble que la seule chose que fait ce kernel thread à chaque réveil
440// est de mettre à jour la DQDT, et de se rendormir... A-t-on besoin d'un thread ? [AG]
441//////////////////////////////////////////////////////////////////////////////////////////
442
443#if 0
444void * cluster_manager_thread( void * arg )
445{
446        register struct dqdt_cluster_s * root;
447        register struct cluster_s      * root_home;
448
449        register uint32_t                tm_start;
450        register uint32_t                tm_end;
451        register uint32_t                cpu_id;
452        struct cluster_s               * cluster;
453        struct thread_s                * this;
454        struct event_s                   event;
455        struct alarm_info_s              info;
456        register uint32_t                cntr;
457        register bool_t                  isRootMgr;
458        register uint32_t                period;
459
460        cpu_enable_all_irq( NULL );
461
462        cluster   = arg;
463        this      = CURRENT_THREAD;
464        cpu_id    = cpu_get_id();
465        root      = dqdt_root;
466        root_home = dqdt_root->home;
467        isRootMgr = (cluster == root_home) ? true : false;
468        cntr      = 0;
469        period    = (isRootMgr) ?
470                CONFIG_DQDT_ROOTMGR_PERIOD * MSEC_PER_TICK :
471                CONFIG_DQDT_MGR_PERIOD * MSEC_PER_TICK;
472
473        event_set_senderId(&event, this);
474        event_set_priority(&event, E_CHR);
475        event_set_handler(&event, &manager_alarm_event_handler);
476
477        info.event = &event;
478        thread_preempt_disable(CURRENT_THREAD);
479
480    // infinite loop
481        while(1)
482        {
483                tm_start = cpu_time_stamp();
484                dqdt_update();
485                tm_end   = cpu_time_stamp();
486
487                if(isRootMgr)
488                {
489                        if((cntr % 10) == 0)
490                        {
491                                printk(INFO, "INFO: cpu %d, DQDT update ended [ %u - %u ]\n",
492                                       cpu_id,
493                                       tm_end,
494                                       tm_end - tm_start);
495
496                                dqdt_print_summary(root);
497                        }
498                }
499
500                alarm_wait( &info , period );
501                sched_sleep(this);
502                cntr ++;
503        }
504
505        return NULL;
506} // end cluster_manager_thread()
507
508//////////////////////////////////////////
509EVENT_HANDLER(manager_alarm_event_handler)
510{
511        struct thread_s *manager;
512
513        manager = event_get_senderId(event);
514
515        thread_preempt_disable(CURRENT_THREAD);
516
517        //printk(INFO, "%s: cpu %d [%u]\n", __FUNCTION__, cpu_get_id(), cpu_time_stamp());
518
519        sched_wakeup(manager);
520
521        thread_preempt_enable(CURRENT_THREAD);
522
523        return 0;
524}
525
526///////////////////////////////////////////////
527EVENT_HANDLER(cluster_key_create_event_handler)
528{
529        struct cluster_s *cluster;
530        struct thread_s *sender;
531        ckey_t *ckey;
532        uint32_t key;
533
534        sender  = event_get_senderId(event);
535        ckey    = event_get_argument(event);
536        cluster = current_cluster;
537        key     = cluster->next_key;
538
539        while((key < CLUSTER_TOTAL_KEYS_NR) && (cluster->keys_tbl[key] != NULL))
540                key ++;
541
542        if(key < CLUSTER_TOTAL_KEYS_NR)
543        {
544                ckey->val = key;
545                cluster->keys_tbl[key] = (void *) 0x1; // Reserved
546                cluster->next_key = key;
547                event_set_error(event, 0);
548        }
549        else
550                event_set_error(event, ENOSPC);
551
552        sched_wakeup(sender);
553        return 0;
554}
555
556///////////////////////////////////////////////
557EVENT_HANDLER(cluster_key_delete_event_handler)
558{
559        struct cluster_s *cluster;
560        struct thread_s *sender;
561        ckey_t *ckey;
562        uint32_t key;
563
564        sender  = event_get_senderId(event);
565        ckey    = event_get_argument(event);
566        cluster = current_cluster;
567        key     = ckey->val;
568
569        if(key < cluster->next_key)
570                cluster->next_key = key;
571
572        cluster->keys_tbl[key] = NULL;
573        event_set_error(event, 0);
574
575        sched_wakeup(sender);
576        return 0;
577}
578
579#define _CKEY_CREATE  0x0
580#define _CKEY_DELETE  0x1
581
582error_t cluster_do_key_op(ckey_t *key, uint32_t op)
583{
584        struct event_s event;
585        struct thread_s *this;
586        struct cluster_s *cluster;
587        struct cpu_s *cpu;
588
589        this = CURRENT_THREAD;
590
591        event_set_priority(&event, E_FUNC);
592        event_set_senderId(&event, this);
593        event_set_argument(&event, key);
594
595        if(op == _CKEY_CREATE)
596                event_set_handler(&event, cluster_key_create_event_handler);
597        else
598                event_set_handler(&event, cluster_key_delete_event_handler);
599
600        cluster = current_cluster;
601        cpu     = cluster->bscluster->bscpu;
602        event_send(&event, &cpu->re_listner);
603
604        sched_sleep(this);
605
606        return event_get_error(&event);
607}
608
609error_t cluster_key_create(ckey_t *key)
610{
611        return cluster_do_key_op(key, _CKEY_CREATE);
612}
613
614error_t cluster_key_delete(ckey_t *key)
615{
616        return cluster_do_key_op(key, _CKEY_DELETE);
617}
618
619void* cluster_getspecific(ckey_t *key)
620{
621        struct cluster_s *cluster;
622
623        cluster = current_cluster;
624        return cluster->keys_tbl[key->val];
625}
626
627void  cluster_setspecific(ckey_t *key, void *val)
628{
629        struct cluster_s *cluster;
630
631        cluster = current_cluster;
632        cluster->keys_tbl[key->val] = val;
633}
634#endif
Note: See TracBrowser for help on using the repository browser.