source: trunk/kernel/kern/cluster.c @ 40

Last change on this file since 40 was 23, checked in by alain, 7 years ago

Introduce syscalls.

File size: 16.2 KB
RevLine 
[1]1/*
2 * cluster.c - Cluster-Manager related operations
[19]3 *
[1]4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *         Mohamed Lamine Karaoui (2015)
[23]6 *         Alain Greiner (2016,2017)
[1]7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH..
11 *
12 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH. is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
[14]26#include <kernel_config.h>
[1]27#include <hal_types.h>
28#include <hal_atomic.h>
29#include <hal_special.h>
30#include <printk.h>
31#include <errno.h>
32#include <spinlock.h>
33#include <core.h>
34#include <scheduler.h>
35#include <list.h>
36#include <cluster.h>
37#include <boot_info.h>
38#include <bits.h>
39#include <ppm.h>
40#include <thread.h>
41#include <kmem.h>
42#include <process.h>
43#include <dqdt.h>
44
[14]45// TODO #include <sysfs.h>
46
[1]47///////////////////////////////////////////////////////////////////////////////////////////
48// Extern global variables
49///////////////////////////////////////////////////////////////////////////////////////////
50
[23]51extern process_t process_zero;     // allocated in kernel_init.c file
[1]52
53
54
55//////////////////////////////////
56void cluster_sysfs_register(void)
57{
58        // TODO
59}
60
61/////////////////////////////////////////////////
62error_t cluster_init( struct boot_info_s * info )
63{
64    lpid_t      lpid;     // local process_index
65    lid_t       lid;      // local core index
66
67        cluster_t * cluster = LOCAL_CLUSTER;
68
69    // initialize cluster global parameters
[19]70        cluster->paddr_width     = info->paddr_width;
[1]71        cluster->x_width         = info->x_width;
72        cluster->y_width         = info->y_width;
73        cluster->x_size          = info->x_size;
74        cluster->y_size          = info->y_size;
75        cluster->io_cxy          = info->io_cxy;
76
77    // initialize cluster local parameters
78        cluster->cores_nr        = info->cores_nr;
[19]79    cluster->cores_in_kernel = info->cores_nr; // all cpus start in kernel mode
[1]80
[19]81    // initialize the lock protecting the embedded kcm allocator
[1]82        spinlock_init( &cluster->kcm_lock );
83
[19]84    // initialises DQDT
85    cluster->dqdt_root_level = dqdt_init( info->x_size,
86                                          info->y_size,
[1]87                                          info->y_width );
88    cluster->threads_var = 0;
89    cluster->pages_var   = 0;
90
91    // initialises embedded PPM
92        ppm_init( &cluster->ppm,
93              info->pages_nr,
94              info->pages_offset );
95
96    // initialises embedded KHM
97        khm_init( &cluster->khm );
[19]98
99    // initialises embedded KCM
[5]100        kcm_init( &cluster->kcm , KMEM_KCM );
[1]101
[5]102    // initialises all cores descriptors
[1]103        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
104        {
105                core_init( &cluster->core_tbl[lid],    // target core descriptor
106                       lid,                        // local core index
107                       info->core[lid].gid );      // gid from boot_info_t
108        }
[19]109
[1]110    // initialises RPC fifo
111        rpc_fifo_init( &cluster->rpc_fifo );
112
113    // initialise pref_tbl[] in process manager
114        spinlock_init( &cluster->pmgr.pref_lock );
115    cluster->pmgr.pref_nr = 0;
[19]116    cluster->pmgr.pref_tbl[0] = XPTR( local_cxy , &process_zero );
[1]117    for( lpid = 1 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
118    {
119        cluster->pmgr.pref_tbl[lpid] = XPTR_NULL;
120    }
121
122    // initialise local_list in process manager
[23]123        remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
124    xlist_root_init( XPTR( local_cxy , &cluster->pmgr.local_root ) );
[1]125    cluster->pmgr.local_nr = 0;
126
127    // initialise copies_lists in process manager
128    for( lpid = 1 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
129    {
130            remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ) );
131        cluster->pmgr.copies_nr[lpid] = 0;
132        xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) );
[19]133    }
[1]134
135    hal_wbflush();
136
137        return 0;
138} // end cluster_init()
139
140////////////////////////////////////////
141bool_t cluster_is_undefined( cxy_t cxy )
142{
143    cluster_t * cluster = LOCAL_CLUSTER;
144
145    uint32_t y_width = cluster->y_width;
146
147    uint32_t x = cxy >> y_width;
148    uint32_t y = cxy & ((1<<y_width)-1);
149
[19]150    if( x >= cluster->x_size ) return true;
151    if( y >= cluster->y_size ) return true;
[1]152
153    return false;
154}
155
156////////////////////////////////////////////////////////////////////////////////////
157//  Cores related functions
158////////////////////////////////////////////////////////////////////////////////////
159
160////////////////////////////////
161void cluster_core_kernel_enter()
162{
163    cluster_t * cluster = LOCAL_CLUSTER;
[23]164        hal_atomic_add( &cluster->cores_in_kernel , 1 );
[1]165}
166
167///////////////////////////////
168void cluster_core_kernel_exit()
169{
170    cluster_t * cluster = LOCAL_CLUSTER;
[23]171        hal_atomic_add( &cluster->cores_in_kernel , -1 );
[1]172}
173
174/////////////////////////////////
175lid_t cluster_select_local_core()
176{
177    uint32_t min = 100;
178    lid_t    sel = 0;
179    lid_t    lid;
180
181    cluster_t * cluster = LOCAL_CLUSTER;
182
183    for( lid = 0 ; lid < cluster->cores_nr ; lid++ )
184    {
185        if( cluster->core_tbl[lid].usage < min )
186        {
187            min = cluster->core_tbl[lid].usage;
188            sel = lid;
189        }
[19]190    }
[1]191    return sel;
192}
193
194////////////////////////////////////////////////////////////////////////////////////
195//  Process management related functions
196////////////////////////////////////////////////////////////////////////////////////
197
198//////////////////////////////////////////////////////////
199xptr_t cluster_get_reference_process_from_pid( pid_t pid )
[19]200{
[23]201    xptr_t ref_xp;   // extended pointer on reference process descriptor
[1]202
203    cluster_t * cluster = LOCAL_CLUSTER;
204
205    // get owner cluster and lpid
206    cxy_t  owner_cxy = CXY_FROM_PID( pid );
207    lpid_t lpid      = LPID_FROM_PID( pid );
208
[19]209    // Check valid PID
[23]210    if( lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER )  return XPTR_NULL;
[1]211
212    if( local_cxy == owner_cxy )   // local cluster is owner cluster
[19]213    {
[23]214        ref_xp = cluster->pmgr.pref_tbl[lpid];
[1]215    }
216    else                              // use a remote_lwd to access owner cluster
217    {
[23]218        ref_xp = (xptr_t)hal_remote_lwd( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) );
[1]219    }
220
[23]221    return ref_xp;
[1]222}
223
224////////////////////////////////////////////////
225error_t cluster_pid_alloc( xptr_t    process_xp,
226                           pid_t   * pid )
227{
228    error_t     error;
229    lpid_t      lpid;
230    bool_t      found;
231
232    pmgr_t    * pm         = &LOCAL_CLUSTER->pmgr;
233
234    // get the process manager lock
235    spinlock_lock( &pm->pref_lock );
236
237    // search an empty slot
238    found = false;
239    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
240    {
241        if( pm->pref_tbl[lpid] == XPTR_NULL )
242        {
243            found = true;
244            break;
245        }
246    }
247
248    if( found )
249    {
250        // register process in pref_tbl[]
251        pm->pref_tbl[lpid] = process_xp;
252        pm->pref_nr++;
253
254        // returns pid
255        *pid = PID( local_cxy , lpid );
256
257        error = 0;
258    }
259    else
260    {
261        error = EAGAIN;
[19]262    }
[1]263
264    // release the processs_manager lock
265    spinlock_unlock( &pm->pref_lock );
266
267    return error;
268
269} // end cluster_pid_alloc()
270
271/////////////////////////////////////
272void cluster_pid_release( pid_t pid )
273{
274    cxy_t  owner_cxy  = CXY_FROM_PID( pid );
275    lpid_t lpid       = LPID_FROM_PID( pid );
276
277    // check pid argument
278    if( (lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER) || (owner_cxy != local_cxy) )
279    {
280        printk("\n[PANIC] in %s : illegal PID\n", __FUNCTION__ );
281        hal_core_sleep();
282    }
283
284    pmgr_t  * pm = &LOCAL_CLUSTER->pmgr;
285
286    // get the process manager lock
287    spinlock_lock( &pm->pref_lock );
288
289    // remove process from pref_tbl[]
290    pm->pref_tbl[lpid] = XPTR_NULL;
291    pm->pref_nr--;
292
293    // release the processs_manager lock
294    spinlock_unlock( &pm->pref_lock );
295
296} // end cluster_pid_release()
297
298///////////////////////////////////////////////////////////
299process_t * cluster_get_local_process_from_pid( pid_t pid )
300{
[23]301    xptr_t         process_xp;
302    process_t    * process_ptr;
303    xptr_t         root_xp;
304    xptr_t         iter_xp;
305    bool_t         found;
[19]306
[23]307    found   = false;
308    root_xp = XPTR( local_cxy , &LOCAL_CLUSTER->pmgr.local_root );
309
310    XLIST_FOREACH( root_xp , iter_xp )
[1]311    {
[23]312        process_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
313        process_ptr = (process_t *)GET_PTR( process_xp );
314        if( process_ptr->pid == pid )
[1]315        {
[23]316            found = true;
[1]317            break;
318        }
319    }
320
[23]321    if (found ) return process_ptr;
322    else        return NULL;
323
[1]324}  // end cluster_get_local_process_from_pid()
325
326//////////////////////////////////////////////////////
327void cluster_process_local_link( process_t * process )
328{
329    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
330
331    // get lock protecting the process manager local list
[23]332    remote_spinlock_lock( XPTR( local_cxy , &pm->local_lock ) );
[1]333
[23]334    xlist_add_first( XPTR( local_cxy , &pm->local_root ),
335                     XPTR( local_cxy , &process->local_list ) );
[1]336    pm->local_nr++;
337
338    // release lock protecting the process manager local list
[23]339    remote_spinlock_unlock( XPTR( local_cxy , &pm->local_lock ) );
[1]340}
341
342////////////////////////////////////////////////////////
343void cluster_process_local_unlink( process_t * process )
344{
345    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
346
347    // get lock protecting the process manager local list
[23]348    remote_spinlock_lock( XPTR( local_cxy , &pm->local_lock ) );
[1]349
[23]350    xlist_unlink( XPTR( local_cxy , &process->local_list ) );
[1]351    pm->local_nr--;
352
353    // release lock protecting the process manager local list
[23]354    remote_spinlock_unlock( XPTR( local_cxy , &pm->local_lock ) );
[1]355}
356
357///////////////////////////////////////////////////////
358void cluster_process_copies_link( process_t * process )
359{
360    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
361
362    // get owner cluster identifier CXY and process LPID
363    pid_t    pid        = process->pid;
364    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
365    lpid_t   lpid       = LPID_FROM_PID( pid );
366
367    // get extended pointer on lock protecting copies_list[lpid]
368    xptr_t copies_lock  = hal_remote_lwd( XPTR( owner_cxy , &pm->copies_lock[lpid] ) );
369
370    // get extended pointer on the copies_list[lpid] root
371    xptr_t copies_root  = hal_remote_lwd( XPTR( owner_cxy , &pm->copies_root[lpid] ) );
372
373    // get extended pointer on the local copies_list entry
374    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
375
[19]376    // get lock protecting copies_list[lpid]
[1]377    remote_spinlock_lock( copies_lock );
378
379    xlist_add_first( copies_root , copies_entry );
380    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , 1 );
381
[19]382    // release lock protecting copies_list[lpid]
[1]383    remote_spinlock_unlock( copies_lock );
384}
385
386/////////////////////////////////////////////////////////
387void cluster_process_copies_unlink( process_t * process )
388{
389    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
390
391    // get owner cluster identifier CXY and process LPID
392    pid_t    pid        = process->pid;
393    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
394    lpid_t   lpid       = LPID_FROM_PID( pid );
395
396    // get extended pointer on lock protecting copies_list[lpid]
397    xptr_t copies_lock  = hal_remote_lwd( XPTR( owner_cxy , &pm->copies_lock[lpid] ) );
398
399    // get extended pointer on the local copies_list entry
400    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
401
[19]402    // get lock protecting copies_list[lpid]
[1]403    remote_spinlock_lock( copies_lock );
404
405    xlist_unlink( copies_entry );
406    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , -1 );
407
[19]408    // release lock protecting copies_list[lpid]
[1]409    remote_spinlock_unlock( copies_lock );
410}
411
412////////////////////////////////////////////////////////////////////////////////////////
413// TODO Il me semble que la seule chose que fait ce kernel thread à chaque réveil
[19]414// est de mettre à jour la DQDT, et de se rendormir... A-t-on besoin d'un thread ? [AG]
[1]415//////////////////////////////////////////////////////////////////////////////////////////
416
417#if 0
418void * cluster_manager_thread( void * arg )
419{
420        register struct dqdt_cluster_s * root;
421        register struct cluster_s      * root_home;
422
423        register uint32_t                tm_start;
424        register uint32_t                tm_end;
425        register uint32_t                cpu_id;
426        struct cluster_s               * cluster;
427        struct thread_s                * this;
428        struct event_s                   event;
429        struct alarm_info_s              info;
430        register uint32_t                cntr;
431        register bool_t                  isRootMgr;
432        register uint32_t                period;
433
434        cpu_enable_all_irq( NULL );
435
436        cluster   = arg;
437        this      = CURRENT_THREAD;
438        cpu_id    = cpu_get_id();
439        root      = dqdt_root;
440        root_home = dqdt_root->home;
441        isRootMgr = (cluster == root_home) ? true : false;
442        cntr      = 0;
[19]443        period    = (isRootMgr) ?
444                CONFIG_DQDT_ROOTMGR_PERIOD * MSEC_PER_TICK :
[1]445                CONFIG_DQDT_MGR_PERIOD * MSEC_PER_TICK;
446
447        event_set_senderId(&event, this);
448        event_set_priority(&event, E_CHR);
449        event_set_handler(&event, &manager_alarm_event_handler);
[19]450
[1]451        info.event = &event;
452        thread_preempt_disable(CURRENT_THREAD);
453
454    // infinite loop
455        while(1)
456        {
457                tm_start = cpu_time_stamp();
458                dqdt_update();
459                tm_end   = cpu_time_stamp();
460
461                if(isRootMgr)
462                {
463                        if((cntr % 10) == 0)
464                        {
[19]465                                printk(INFO, "INFO: cpu %d, DQDT update ended [ %u - %u ]\n",
466                                       cpu_id,
467                                       tm_end,
[1]468                                       tm_end - tm_start);
469
470                                dqdt_print_summary(root);
471                        }
472                }
473
474                alarm_wait( &info , period );
475                sched_sleep(this);
476                cntr ++;
477        }
478
479        return NULL;
480} // end cluster_manager_thread()
481
482//////////////////////////////////////////
483EVENT_HANDLER(manager_alarm_event_handler)
484{
485        struct thread_s *manager;
[19]486
[1]487        manager = event_get_senderId(event);
[19]488
[1]489        thread_preempt_disable(CURRENT_THREAD);
490
491        //printk(INFO, "%s: cpu %d [%u]\n", __FUNCTION__, cpu_get_id(), cpu_time_stamp());
492
493        sched_wakeup(manager);
[19]494
[1]495        thread_preempt_enable(CURRENT_THREAD);
496
497        return 0;
498}
499
500///////////////////////////////////////////////
501EVENT_HANDLER(cluster_key_create_event_handler)
502{
503        struct cluster_s *cluster;
504        struct thread_s *sender;
505        ckey_t *ckey;
506        uint32_t key;
507
508        sender  = event_get_senderId(event);
509        ckey    = event_get_argument(event);
510        cluster = current_cluster;
511        key     = cluster->next_key;
512
513        while((key < CLUSTER_TOTAL_KEYS_NR) && (cluster->keys_tbl[key] != NULL))
514                key ++;
515
516        if(key < CLUSTER_TOTAL_KEYS_NR)
517        {
518                ckey->val = key;
[19]519                cluster->keys_tbl[key] = (void *) 0x1; // Reserved
[1]520                cluster->next_key = key;
521                event_set_error(event, 0);
522        }
523        else
524                event_set_error(event, ENOSPC);
525
526        sched_wakeup(sender);
527        return 0;
528}
529
530///////////////////////////////////////////////
531EVENT_HANDLER(cluster_key_delete_event_handler)
532{
533        struct cluster_s *cluster;
534        struct thread_s *sender;
535        ckey_t *ckey;
536        uint32_t key;
537
538        sender  = event_get_senderId(event);
539        ckey    = event_get_argument(event);
540        cluster = current_cluster;
541        key     = ckey->val;
542
543        if(key < cluster->next_key)
544                cluster->next_key = key;
545
546        cluster->keys_tbl[key] = NULL;
547        event_set_error(event, 0);
548
549        sched_wakeup(sender);
550        return 0;
551}
552
553#define _CKEY_CREATE  0x0
554#define _CKEY_DELETE  0x1
555
556error_t cluster_do_key_op(ckey_t *key, uint32_t op)
557{
558        struct event_s event;
559        struct thread_s *this;
560        struct cluster_s *cluster;
561        struct cpu_s *cpu;
562
563        this = CURRENT_THREAD;
564
565        event_set_priority(&event, E_FUNC);
566        event_set_senderId(&event, this);
567        event_set_argument(&event, key);
568
569        if(op == _CKEY_CREATE)
570                event_set_handler(&event, cluster_key_create_event_handler);
571        else
572                event_set_handler(&event, cluster_key_delete_event_handler);
573
574        cluster = current_cluster;
575        cpu     = cluster->bscluster->bscpu;
576        event_send(&event, &cpu->re_listner);
577
578        sched_sleep(this);
579
580        return event_get_error(&event);
581}
582
583error_t cluster_key_create(ckey_t *key)
584{
585        return cluster_do_key_op(key, _CKEY_CREATE);
586}
587
588error_t cluster_key_delete(ckey_t *key)
589{
590        return cluster_do_key_op(key, _CKEY_DELETE);
591}
592
593void* cluster_getspecific(ckey_t *key)
594{
595        struct cluster_s *cluster;
596
597        cluster = current_cluster;
598        return cluster->keys_tbl[key->val];
599}
600
601void  cluster_setspecific(ckey_t *key, void *val)
602{
603        struct cluster_s *cluster;
604
605        cluster = current_cluster;
606        cluster->keys_tbl[key->val] = val;
607}
608#endif
Note: See TracBrowser for help on using the repository browser.