source: trunk/kernel/libk/user_dir.c @ 665

Last change on this file since 665 was 641, checked in by alain, 5 years ago
  • Fix several bugs.
  • Introduce the "stat" command in KSH.

This almos-mkh version sucessfully executed the FFT application
(65536 complex points) on the TSAR architecture from 1 to 64 cores.

File size: 17.9 KB
RevLine 
[613]1/*
2 * user_dir.c - kernel DIR related operations implementation.
3 *
[629]4 * Authors   Alain   Greiner (2016,2017,2018,2019)
[613]5 *
6 * Copyright (c) UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24#include <kernel_config.h>
25#include <hal_kernel_types.h>
26#include <hal_irqmask.h>
27#include <hal_remote.h>
28#include <thread.h>
29#include <xlist.h>
30#include <scheduler.h>
31#include <remote_queuelock.h>
32#include <user_dir.h>
33
34
35/////////////////////////////////////////////
36xptr_t user_dir_from_ident( intptr_t  ident )
37{
38    // get pointer on local process_descriptor
39    process_t * process = CURRENT_THREAD->process;
40
41    // get pointers on reference process
42    xptr_t      ref_xp  = process->ref_xp;
43    cxy_t       ref_cxy = GET_CXY( ref_xp );
44    process_t * ref_ptr = GET_PTR( ref_xp );
45
46    // get extended pointers on open directories list and lock 
47    xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->dir_root );
48    xptr_t lock_xp = XPTR( ref_cxy , &ref_ptr->dir_lock );
49
50    // get lock protecting open directories list
51    remote_queuelock_acquire( lock_xp );
52 
53    // scan reference process dir list
54    xptr_t           iter_xp;
55    xptr_t           dir_xp;
56    cxy_t            dir_cxy;
57    user_dir_t     * dir_ptr;
58    intptr_t         current;
59    bool_t           found = false;
60           
61    XLIST_FOREACH( root_xp , iter_xp )
62    {
63        dir_xp  = XLIST_ELEMENT( iter_xp , user_dir_t , list );
64        dir_cxy = GET_CXY( dir_xp );
65        dir_ptr = GET_PTR( dir_xp );
66        current = (intptr_t)hal_remote_lpt( XPTR( dir_cxy , &dir_ptr->ident ) );   
67        if( ident == current )
68        {
69            found = true;
70            break;
71        }
72    }
73
74    // relese lock protecting open directories list
75    remote_queuelock_release( lock_xp );
76 
77    if( found == false )  return XPTR_NULL;
78    else                  return dir_xp;
79
80}  // end user_dir_from_ident()
81
[614]82//////////////////////////////////////////////////
83user_dir_t * user_dir_create( vfs_inode_t * inode,
84                              xptr_t        ref_xp )
[613]85{ 
86    user_dir_t    * dir;               // local pointer on created user_dir_t
87    vseg_t        * vseg;              // local pointer on dirent array vseg
88    uint32_t        vseg_size;         // size of vseg in bytes
89    process_t     * ref_ptr;           // local pointer on reference process
90    cxy_t           ref_cxy;           // reference process cluster identifier
[614]91    pid_t           ref_pid;           // reference process PID
[613]92    xptr_t          gpt_xp;            // extended pointer on reference process GPT
[629]93    uint32_t        attr;              // attributes for all GPT entries
[613]94    uint32_t        dirents_per_page;  // number of dirent descriptors per page
95    page_t        * page;              // local pointer on page descriptor
96    struct dirent * base;              // local pointer on physical page base
97    uint32_t        total_dirents;     // total number of dirents in dirent array
98    uint32_t        total_pages;       // total number of pages for dirent array
[629]99    vpn_t           vpn_base;          // first page in dirent array vseg
100    vpn_t           vpn;               // current page in dirent array vseg
[613]101    ppn_t           ppn;               // ppn of currently allocated physical page
102    uint32_t        entries;           // number of dirent actually comied in one page
103    uint32_t        first_entry;       // index of first dentry to copy in dirent array
104    bool_t          done;              // last entry found and copied when true
105    list_entry_t    root;              // root of temporary list of allocated pages
106    uint32_t        page_id;           // page index in list of physical pages
107    kmem_req_t      req;               // kmem request descriptor
[629]108    ppn_t           fake_ppn;          // unused, but required by hal_gptlock_pte()
109    uint32_t        fake_attr;         // unused, but required by hal_gptlock_pte()
[613]110    error_t         error;
111
[629]112    // get cluster, local pointer, and pid of reference process
[614]113    ref_cxy = GET_CXY( ref_xp );
114    ref_ptr = GET_PTR( ref_xp );
115    ref_pid = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->pid ) );
[613]116
117#if DEBUG_USER_DIR
118uint32_t cycle = (uint32_t)hal_get_cycles();
119thread_t * this = CURRENT_THREAD;
120if( cycle > DEBUG_USER_DIR )
[614]121printk("\n[%s] thread[%x,%x] enter for inode (%x,%x) and process %x / cycle %d\n",
122__FUNCTION__, this->process->pid, this->trdid, local_cxy, inode, ref_pid, cycle );
[613]123#endif
124
125// check dirent size
[635]126assert( ( sizeof(struct dirent) == 64), "sizeof(dirent) must be 64\n");
[613]127
128    // compute number of dirent per page
129    dirents_per_page = CONFIG_PPM_PAGE_SIZE >> 6;
130   
131    // initialise temporary list of pages
132    list_root_init( &root );
133
134    // allocate memory for a local user_dir descriptor
[635]135    req.type  = KMEM_KCM;
136    req.order = bits_log2( sizeof(user_dir_t) );
137    req.flags = AF_ZERO | AF_KERNEL;
[613]138    dir       = kmem_alloc( &req );
139
140    if( dir == NULL )
141    {
142        printk("\n[ERROR] in %s : cannot allocate user_dir_t in cluster %x\n",
143        __FUNCTION__, local_cxy );
144        return NULL;
145    }
146
[641]147    // Build and initialize the dirent array as a list of pages.
[613]148    // For each iteration in this while loop:
149    // - allocate one physical 4 Kbytes (64 dirent slots)
150    // - call the relevant FS specific function to scan the directory mapper,
151    //   and copy up to 64 entries in the page.
152    // - register the page in a temporary list using the embedded page list_entry
153    // - exit when the last entry has been found (done == true).
154
155    // initialize loops variables
156    done          = false;
157    total_dirents = 0;
158    total_pages   = 0;
159    first_entry   = 0;
160
161    while( done == false )  // loop on physical pages
162    {
163        // allocate one physical page
[635]164        req.type  = KMEM_PPM;
165        req.order = 0;
[613]166        req.flags = AF_ZERO;
[635]167        base      = kmem_alloc( &req );
[613]168
[635]169        if( base == NULL )
[613]170        {
171            printk("\n[ERROR] in %s : cannot allocate page in cluster %x\n",
172            __FUNCTION__, ref_cxy );
173            goto user_dir_create_failure;
174        }
175
[641]176        // call the relevant FS specific function to copy dirents in page
[613]177        error = vfs_fs_get_user_dir( inode,
178                                     base,
179                                     dirents_per_page,
180                                     first_entry,
181                                     false,        // don't create missing inodes
182                                     &entries,
183                                     &done );
184        if( error )
185        {
186            printk("\n[ERROR] in %s : cannot initialise dirent array in cluster %x\n",
187            __FUNCTION__, ref_cxy );
188            goto user_dir_create_failure;
189        }
190
191        // increment number of written dirents
192        total_dirents += entries;
193
[635]194        // get page descriptor pointer from base
195        page = GET_PTR( ppm_base2page( XPTR( local_cxy , base ) ) );
196
[613]197        // register page in temporary list
198        list_add_last( &root , &page->list ); 
199        total_pages++; 
200
201        // set first_entry for next iteration
202        first_entry = total_dirents;
203
204    } // end while
205       
[614]206#if DEBUG_USER_DIR
207if( cycle > DEBUG_USER_DIR )
208printk("\n[%s] thread[%x,%x] initialised dirent array / %d entries\n",
209__FUNCTION__, this->process->pid, this->trdid, total_dirents, cycle );
210#endif
211
[641]212    // compute required vseg size
[613]213    vseg_size = total_dirents << 6;
214
215    // create an ANON vseg and register it in reference process VSL
216    if( local_cxy == ref_cxy )
217    {
[614]218        vseg = vmm_create_vseg( ref_ptr,
[613]219                                VSEG_TYPE_ANON,
220                                0,                      // vseg base (unused)
221                                vseg_size,
222                                0,                      // file offset (unused)
223                                0,                      // file_size (unused)
224                                XPTR_NULL,              // mapper (unused)
[614]225                                local_cxy );
[613]226    }
227    else
228    {
229        rpc_vmm_create_vseg_client( ref_cxy,
230                                    ref_ptr,
231                                    VSEG_TYPE_ANON,
232                                    0,                     // vseg base (unused)
233                                    vseg_size,
234                                    0,                     // file offset (unused)
235                                    0,                     // file size (unused)
236                                    XPTR_NULL,             // mapper (unused)
[614]237                                    local_cxy,
[613]238                                    &vseg ); 
239    }
[614]240
[613]241    if( vseg == NULL )
242    {
[614]243        printk("\n[ERROR] in %s : cannot create vseg for user_dir in cluster %x\n",
[613]244        __FUNCTION__, ref_cxy);
245        goto user_dir_create_failure;
246    }
247
[614]248#if DEBUG_USER_DIR
[613]249if( cycle > DEBUG_USER_DIR )
250printk("\n[%s] thread[%x,%x] allocated vseg ANON / base %x / size %x\n",
[614]251__FUNCTION__, this->process->pid, this->trdid, vseg->min, vseg->max - vseg->min );
[613]252#endif
253
254// check vseg size
255assert( (total_pages == hal_remote_l32( XPTR( ref_cxy , &vseg->vpn_size ) ) ),
[641]256"unconsistent vseg size for dirent array " );
[613]257
[629]258    // build extended pointer on reference process GPT
[613]259    gpt_xp         = XPTR( ref_cxy , &ref_ptr->vmm.gpt );
260
[629]261    // build PTE attributes
262    attr = GPT_MAPPED   |
263           GPT_SMALL    |
264           GPT_READABLE |
265           GPT_CACHABLE |
266           GPT_USER     ;
267
[613]268    // get first vpn from vseg descriptor
[629]269    vpn_base = hal_remote_l32( XPTR( ref_cxy , &vseg->vpn_base ) );
[613]270
271    // scan the list of allocated physical pages to map
[629]272    // all physical pages in the reference process GPT
[613]273    page_id = 0;
274    while( list_is_empty( &root ) == false )
275    {
276        // get pointer on first page descriptor
277        page = LIST_FIRST( &root , page_t , list );
278
279        // compute ppn
280        ppn = ppm_page2ppn( XPTR( local_cxy , page ) );
[629]281
282        // compute vpn
283        vpn = vpn_base + page_id;
[613]284       
[629]285        // lock the PTE (and create PT2 if required)
286        error = hal_gpt_lock_pte( gpt_xp,
287                                  vpn,
288                                  &fake_attr,
289                                  &fake_ppn );
[613]290        if( error )
291        {
292            printk("\n[ERROR] in %s : cannot map vpn %x in GPT\n",
[629]293            __FUNCTION__, vpn );
[619]294
295            // delete the vseg
[641]296            intptr_t base = (intptr_t)hal_remote_lpt( XPTR( ref_cxy , &vseg->min ) );
297            rpc_vmm_remove_vseg_client( ref_cxy, ref_pid, base );
[640]298         
[613]299            // release the user_dir descriptor
[635]300            req.type = KMEM_KCM;
[613]301            req.ptr  = dir;
302            kmem_free( &req );
303            return NULL;
304        }
305
[629]306        // set PTE in GPT                         
307        hal_gpt_set_pte( gpt_xp,
308                         vpn,
309                         attr,
310                         ppn );
311
[614]312#if DEBUG_USER_DIR
[613]313if( cycle > DEBUG_USER_DIR )
314printk("\n[%s] thread[%x,%x] mapped vpn %x to ppn %x\n",
[614]315__FUNCTION__, this->process->pid, this->trdid, vpn + page_id, ppn );
[613]316#endif
317
318        // remove the page from temporary list
319        list_unlink( &page->list );
320
321        page_id++;
322
323    }  // end map loop
324
325// check number of pages
326assert( (page_id == total_pages) , "unconsistent pages number\n" );
327
328    // initialise user_dir_t structure
329    dir->current = 0;
330    dir->entries = total_dirents;
[629]331    dir->ident   = (intptr_t)(vpn_base << CONFIG_PPM_PAGE_SHIFT);
[613]332
333    // build extended pointers on root and lock of user_dir xlist in ref process
334    xptr_t root_xp  = XPTR( ref_cxy , &ref_ptr->dir_root );
335    xptr_t lock_xp  = XPTR( ref_cxy , &ref_ptr->dir_lock );
336
337    // build extended pointer on list field in user_dir structure
338    xptr_t entry_xp = XPTR( local_cxy , &dir->list );
339
340    // get lock protecting open directories list
341    remote_queuelock_acquire( lock_xp );
342
343    // register user_dir_t in reference process 
344    xlist_add_first( root_xp , entry_xp );
345
346    // release lock protecting  open directorie list
347    remote_queuelock_release( lock_xp );
348
349#if DEBUG_USER_DIR
350cycle = (uint32_t)hal_get_cycles();
351if( cycle > DEBUG_USER_DIR )
352printk("\n[%s] thread[%x,%x] created user_dir (%x,%x) / %d entries / cycle %d\n",
[614]353__FUNCTION__, this->process->pid, this->trdid, local_cxy, dir, total_dirents, cycle );
[613]354#endif
355
356    return dir;
357
358user_dir_create_failure:
359
360    // release local user_dir_t structure
[635]361    req.type = KMEM_KCM;
[613]362    req.ptr  = dir;
363    kmem_free( &req );
364
365    // release local physical pages
366    while( list_is_empty( &root ) == false )
367    {
368        page = LIST_FIRST( &root , page_t , list );
[635]369
370        // get base from page descriptor pointer
371        base = GET_PTR( ppm_page2base( XPTR( local_cxy , page ) ) );
372 
373        req.type  = KMEM_PPM;
374        req.ptr   = base;
[613]375        kmem_free( &req );
376    }
377
378    return NULL;
379
380}  // end user_dir_create()
381
[614]382////////////////////////////////////////
383void user_dir_destroy( user_dir_t * dir,
384                       xptr_t       ref_xp )
[613]385{
[614]386    thread_t     * this;       // local pointer on calling thread
[613]387    cluster_t    * cluster;    // local pointer on local cluster
388    intptr_t       ident;      // user pointer on dirent array
[614]389    xptr_t         ref_pid;    // reference process PID
[613]390    cxy_t          ref_cxy;    // reference process cluster identifier
391    process_t    * ref_ptr;    // local pointer on reference process
392    xptr_t         root_xp;    // root of xlist
393    xptr_t         lock_xp;    // extended pointer on lock protecting xlist
394    xptr_t         iter_xp;    // iteratot in xlist
395    reg_t          save_sr;    // for critical section
396    cxy_t          owner_cxy;  // owner process cluster
397    lpid_t         lpid;       // process local index
398    rpc_desc_t     rpc;        // rpc descriptor
[619]399    uint32_t       responses;  // response counter
[613]400     
401    this    = CURRENT_THREAD;
402    cluster = LOCAL_CLUSTER;
403
[614]404    // get cluster, local pointer, and PID of reference user process
405    ref_cxy = GET_CXY( ref_xp );
406    ref_ptr = GET_PTR( ref_xp );
407    ref_pid = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->pid ) );
408
[613]409#if DEBUG_USER_DIR
410uint32_t cycle = (uint32_t)hal_get_cycles();
411if( cycle > DEBUG_USER_DIR )
[614]412printk("\n[%s] thread[%x,%x] enter for user_dir (%x,%x) and process %x / cycle %d\n",
[633]413__FUNCTION__, this->process->pid, this->trdid, local_cxy, dir, ref_pid, cycle );
[613]414#endif
415
416    // get user pointer on dirent array
417    ident = dir->ident;
418
419    // build extended pointer on lock protecting open directories list
420    lock_xp = XPTR( ref_cxy , &ref_ptr->dir_lock );
421
422    // get lock protecting open directories list
423    remote_queuelock_acquire( lock_xp );
424
425    // remove dir from reference process xlist
426    xlist_unlink( XPTR( local_cxy , &dir->list ) );
427
428    // release lock protecting open directories list
429    remote_queuelock_release( lock_xp );
430
431    // To delete all copies of the vseg containing the dirent array, the client thread
432    // send parallel RPCs to all clusters containing a client process copy (including
433    // the local cluster). It blocks and deschedules when all RPCs have been sent,
434    // to wait all RPC responses, and will be unblocked by the last RPC server thread.
435    // It allocates a - shared - RPC descriptor in the stack,  because all parallel
[641]436    // server threads use the same input arguments, and there is no out argument.
[613]437
438    // get owner cluster identifier and process lpid
[614]439    owner_cxy = CXY_FROM_PID( ref_pid );
440    lpid      = LPID_FROM_PID( ref_pid );
[613]441
442    // get root of list of copies and lock from owner cluster
443    root_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
444    lock_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
445
446    // mask IRQs
447    hal_disable_irq( &save_sr);
448
449    // client thread blocks itself
450    thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC );
451
[619]452    // initialize responses counter
453    responses = 0;
454
455    // initialize a shared RPC descriptor
456    rpc.rsp       = &responses;
[641]457    rpc.blocking  = false;                  // non blocking behaviour for rpc_send()
[640]458    rpc.index     = RPC_VMM_REMOVE_VSEG;
[613]459    rpc.thread    = this;
460    rpc.lid       = this->core->lid;
[614]461    rpc.args[0]   = ref_pid;
[613]462    rpc.args[1]   = ident;
463
464    // take the lock protecting process copies
465    remote_queuelock_acquire( lock_xp );
466
467    // scan list of process copies
468    XLIST_FOREACH( root_xp , iter_xp )
469    {
470        // get extended pointer and cluster of process
471        xptr_t      process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
472        cxy_t       process_cxy = GET_CXY( process_xp );
473
474        // atomically increment responses counter
[619]475        hal_atomic_add( &responses , 1 );
[613]476
[641]477#if (DEBUG_USER_DIR & 1)
478uint32_t cycle = (uint32_t)hal_get_cycles();
479if( cycle > DEBUG_USER_DIR )
480printk("\n[%s] thread[%x,%x] register RPC request in cluster %x\n",
481__FUNCTION__, this->process->pid, this->trdid, process_cxy );
482#endif
483
[619]484        // send RPC to target cluster 
485        rpc_send( process_cxy , &rpc );
486    }
[613]487
488    // release the lock protecting process copies
489    remote_queuelock_release( lock_xp );
490
491    // client thread deschedule
[619]492    sched_yield("blocked on rpc_vmm_delete_vseg");
[613]493 
494    // restore IRQs
495    hal_restore_irq( save_sr);
496
497    // release local user_dir_t structure
498    kmem_req_t  req;
[635]499    req.type = KMEM_KCM;
[613]500    req.ptr  = dir;
501    kmem_free( &req );
502
503#if DEBUG_USER_DIR
504cycle = (uint32_t)hal_get_cycles();
505if( cycle > DEBUG_USER_DIR )
506printk("\n[%s] thread[%x,%x] deleted user_dir (%x,%x) / cycle %d\n",
[633]507__FUNCTION__, this->process->pid, this->trdid, local_cxy, dir, cycle );
[613]508#endif
509
510}  // end user_dir_destroy()
Note: See TracBrowser for help on using the repository browser.