source: trunk/kernel/libk/user_dir.c @ 683

Last change on this file since 683 was 683, checked in by alain, 3 years ago

All modifications required to support the <tcp_chat> application
including error recovery in case of packet loss.A

File size: 18.3 KB
Line 
1/*
2 * user_dir.c - kernel DIR related operations implementation.
3 *
4 * Authors   Alain   Greiner (2016,2017,2018,2019,2020)
5 *
6 * Copyright (c) UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24#include <kernel_config.h>
25#include <hal_kernel_types.h>
26#include <hal_irqmask.h>
27#include <hal_remote.h>
28#include <thread.h>
29#include <xlist.h>
30#include <scheduler.h>
31#include <remote_queuelock.h>
32#include <user_dir.h>
33
34
35/////////////////////////////////////////////
36xptr_t user_dir_from_ident( intptr_t  ident )
37{
38    // get pointer on local process_descriptor
39    process_t * process = CURRENT_THREAD->process;
40
41    // get pointers on reference process
42    xptr_t      ref_xp  = process->ref_xp;
43    cxy_t       ref_cxy = GET_CXY( ref_xp );
44    process_t * ref_ptr = GET_PTR( ref_xp );
45
46    // get extended pointers on open directories list and lock 
47    xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->dir_root );
48    xptr_t lock_xp = XPTR( ref_cxy , &ref_ptr->dir_lock );
49
50    // get lock protecting open directories list
51    remote_queuelock_acquire( lock_xp );
52 
53    // scan reference process dir list
54    xptr_t           iter_xp;
55    xptr_t           dir_xp;
56    cxy_t            dir_cxy;
57    user_dir_t     * dir_ptr;
58    intptr_t         current;
59    bool_t           found = false;
60           
61    XLIST_FOREACH( root_xp , iter_xp )
62    {
63        dir_xp  = XLIST_ELEMENT( iter_xp , user_dir_t , list );
64        dir_cxy = GET_CXY( dir_xp );
65        dir_ptr = GET_PTR( dir_xp );
66        current = (intptr_t)hal_remote_lpt( XPTR( dir_cxy , &dir_ptr->ident ) );   
67        if( ident == current )
68        {
69            found = true;
70            break;
71        }
72    }
73
74    // relese lock protecting open directories list
75    remote_queuelock_release( lock_xp );
76 
77    if( found == false )  return XPTR_NULL;
78    else                  return dir_xp;
79
80}  // end user_dir_from_ident()
81
82//////////////////////////////////////////////////
83user_dir_t * user_dir_create( vfs_inode_t * inode,
84                              xptr_t        ref_xp )
85{ 
86    user_dir_t    * dir;               // local pointer on created user_dir_t
87    vseg_t        * vseg;              // local pointer on dirent array vseg
88    uint32_t        vseg_size;         // size of vseg in bytes
89    process_t     * ref_ptr;           // local pointer on reference process
90    cxy_t           ref_cxy;           // reference process cluster identifier
91    pid_t           ref_pid;           // reference process PID
92    xptr_t          gpt_xp;            // extended pointer on reference process GPT
93    uint32_t        attr;              // attributes for all GPT entries
94    uint32_t        dirents_per_page;  // number of dirent descriptors per page
95    page_t        * page;              // local pointer on page descriptor
96    struct dirent * base;              // local pointer on physical page base
97    uint32_t        total_dirents;     // total number of dirents in dirent array
98    uint32_t        total_pages;       // total number of pages for dirent array
99    vpn_t           vpn_base;          // first page in dirent array vseg
100    vpn_t           vpn;               // current page in dirent array vseg
101    ppn_t           ppn;               // ppn of currently allocated physical page
102    uint32_t        entries;           // number of dirent actually comied in one page
103    uint32_t        first_entry;       // index of first dentry to copy in dirent array
104    bool_t          done;              // last entry found and copied when true
105    list_entry_t    root;              // root of temporary list of allocated pages
106    uint32_t        page_id;           // page index in list of physical pages
107    ppn_t           fake_ppn;          // unused, but required by hal_gptlock_pte()
108    uint32_t        fake_attr;         // unused, but required by hal_gptlock_pte()
109    error_t         error;
110
111#if DEBUG_USER_DIR_CREATE || DEBUG_USER_DIR_ERROR
112uint32_t   cycle = (uint32_t)hal_get_cycles();
113thread_t * this  = CURRENT_THREAD;
114#endif
115
116    // get cluster, local pointer, and pid of reference process
117    ref_cxy = GET_CXY( ref_xp );
118    ref_ptr = GET_PTR( ref_xp );
119    ref_pid = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->pid ) );
120
121#if DEBUG_USER_DIR_CREATE
122if( DEBUG_USER_DIR_CREATE < cycle )
123printk("\n[%s] thread[%x,%x] enter for inode (%x,%x) and process %x / cycle %d\n",
124__FUNCTION__, this->process->pid, this->trdid, local_cxy, inode, ref_pid, cycle );
125#endif
126
127// check dirent size
128assert( __FUNCTION__, ( sizeof(struct dirent) == 64), "sizeof(dirent) must be 64\n");
129
130    // compute number of dirent per page
131    dirents_per_page = CONFIG_PPM_PAGE_SIZE >> 6;
132   
133    // initialise temporary list of pages
134    list_root_init( &root );
135
136    // allocate memory for a local user_dir descriptor
137    dir = kmem_alloc( bits_log2(sizeof(user_dir_t)) , AF_ZERO );
138
139    if( dir == NULL )
140    {
141
142#if DEBUG_USER_DIR_ERROR
143printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate user_dir_t in cluster %x / cycle %d\n",
144__FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle );
145#endif
146        return NULL;
147    }
148
149    // First loop to build and initialize the dirent array
150    // as a temporary list of pages. For each iteration :
151    // - allocate one physical 4 Kbytes (64 dirent slots)
152    // - call the relevant FS specific function to scan the directory mapper,
153    //   and copy up to 64 entries in the page.
154    // - register the page in a temporary list using the embedded page list_entry
155    // - exit when the last entry has been found (done == true).
156
157    // initialize loops variables
158    done          = false;
159    total_dirents = 0;
160    total_pages   = 0;
161    first_entry   = 0;
162
163    while( done == false )  // loop on physical pages
164    {
165        // allocate one physical page
166        base = kmem_alloc( CONFIG_PPM_PAGE_ORDER , AF_ZERO );
167
168        if( base == NULL )
169        {
170
171#if DEBUG_USER_DIR_ERROR
172printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate page in cluster %x / cycle %d\n",
173__FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle );
174#endif
175            goto user_dir_create_failure;
176        }
177
178        // call the relevant FS specific function to copy dirents in page
179        error = vfs_fs_get_user_dir( inode,
180                                     base,
181                                     dirents_per_page,
182                                     first_entry,
183                                     false,        // don't create missing inodes
184                                     &entries,
185                                     &done );
186        if( error )
187        {
188
189#if DEBUG_USER_DIR_ERROR
190printk("\n[ERROR] in %s : thread[%x,%x] cannot initialize dirent array in cluster %x / cycle %d\n",
191__FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle );
192#endif
193            goto user_dir_create_failure;
194        }
195
196        // increment number of written dirents
197        total_dirents += entries;
198
199        // get page descriptor pointer from base
200        page = GET_PTR( ppm_base2page( XPTR( local_cxy , base ) ) );
201
202        // register page in temporary list
203        list_add_last( &root , &page->list ); 
204        total_pages++; 
205
206        // set first_entry for next iteration
207        first_entry = total_dirents;
208
209    } // end while
210       
211#if DEBUG_USER_DIR_CREATE
212if( DEBUG_USER_DIR_CREATE < cycle )
213printk("\n[%s] thread[%x,%x] initialised dirent array / %d entries\n",
214__FUNCTION__, this->process->pid, this->trdid, total_dirents, cycle );
215#endif
216
217    // compute required vseg size
218    vseg_size = total_dirents << 6;
219
220    // create an ANON vseg and register it in reference process VSL
221    if( local_cxy == ref_cxy )
222    {
223        vseg = vmm_create_vseg( ref_ptr,
224                                VSEG_TYPE_ANON,
225                                0,                      // vseg base (unused)
226                                vseg_size,
227                                0,                      // file offset (unused)
228                                0,                      // file_size (unused)
229                                XPTR_NULL,              // mapper (unused)
230                                local_cxy );
231    }
232    else
233    {
234        rpc_vmm_create_vseg_client( ref_cxy,
235                                    ref_ptr,
236                                    VSEG_TYPE_ANON,
237                                    0,                     // vseg base (unused)
238                                    vseg_size,
239                                    0,                     // file offset (unused)
240                                    0,                     // file size (unused)
241                                    XPTR_NULL,             // mapper (unused)
242                                    local_cxy,
243                                    &vseg ); 
244    }
245
246    if( vseg == NULL )
247    {
248
249#if DEBUG_USER_DIR_ERROR
250printk("\n[ERROR] in %s : thread[%x,%x] cannot create vseg in cluster %x / cycle %d\n",
251__FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle );
252#endif
253        goto user_dir_create_failure;
254    }
255
256#if DEBUG_USER_DIR_CREATE
257if( DEBUG_USER_DIR_CREATE < cycle )
258printk("\n[%s] thread[%x,%x] allocated vseg ANON / base %x / size %x\n",
259__FUNCTION__, this->process->pid, this->trdid, vseg->min, vseg->max - vseg->min );
260#endif
261
262// check vseg size
263assert( __FUNCTION__, (total_pages == hal_remote_l32( XPTR( ref_cxy , &vseg->vpn_size ) ) ),
264"unconsistent vseg size for dirent array " );
265
266    // build extended pointer on reference process GPT
267    gpt_xp         = XPTR( ref_cxy , &ref_ptr->vmm.gpt );
268
269    // build PTE attributes
270    attr = GPT_MAPPED   |
271           GPT_SMALL    |
272           GPT_READABLE |
273           GPT_CACHABLE |
274           GPT_USER     ;
275
276    // get first vpn from vseg descriptor
277    vpn_base = hal_remote_l32( XPTR( ref_cxy , &vseg->vpn_base ) );
278
279    // Second loop on the allocated physical pages to map
280    // all physical pages in the reference process GPT
281    // The pages are mapped in the user process GPT, but
282    // are removed from the temporary list
283
284    page_id = 0;
285
286    while( list_is_empty( &root ) == false )
287    {
288        // get pointer on first page descriptor
289        page = LIST_FIRST( &root , page_t , list );
290
291        // compute ppn
292        ppn = ppm_page2ppn( XPTR( local_cxy , page ) );
293
294        // compute vpn
295        vpn = vpn_base + page_id;
296       
297        // lock the PTE (and create PT2 if required)
298        error = hal_gpt_lock_pte( gpt_xp,
299                                  vpn,
300                                  &fake_attr,
301                                  &fake_ppn );
302        if( error )
303        {
304
305#if DEBUG_USER_DIR_ERROR
306printk("\n[ERROR] in %s : thread[%x,%x] cannot map vpn %x in cluster %x / cycle %d\n",
307__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, cycle );
308#endif
309            // delete the vseg
310            intptr_t base = (intptr_t)hal_remote_lpt( XPTR( ref_cxy , &vseg->min ) );
311            rpc_vmm_remove_vseg_client( ref_cxy, ref_pid, base );
312         
313            // release the user_dir descriptor
314            kmem_free( dir , bits_log2(sizeof(user_dir_t)) );
315            return NULL;
316        }
317
318        // set PTE in GPT                         
319        hal_gpt_set_pte( gpt_xp,
320                         vpn,
321                         attr,
322                         ppn );
323
324#if DEBUG_USER_DIR_CREATE
325if( DEBUG_USER_DIR_CREATE < cycle )
326printk("\n[%s] thread[%x,%x] mapped vpn %x to ppn %x\n",
327__FUNCTION__, this->process->pid, this->trdid, vpn + page_id, ppn );
328#endif
329
330        // remove the page from temporary list
331        list_unlink( &page->list );
332
333        page_id++;
334
335    }  // end map loop
336
337// check number of pages
338assert( __FUNCTION__, (page_id == total_pages) , "unconsistent pages number\n" );
339
340    // initialise user_dir_t structure
341    dir->current = 0;
342    dir->entries = total_dirents;
343    dir->ident   = (intptr_t)(vpn_base << CONFIG_PPM_PAGE_ORDER);
344
345    // build extended pointers on root and lock of user_dir xlist in ref process
346    xptr_t root_xp  = XPTR( ref_cxy , &ref_ptr->dir_root );
347    xptr_t lock_xp  = XPTR( ref_cxy , &ref_ptr->dir_lock );
348
349    // build extended pointer on list field in user_dir structure
350    xptr_t entry_xp = XPTR( local_cxy , &dir->list );
351
352    // get lock protecting open directories list
353    remote_queuelock_acquire( lock_xp );
354
355    // register user_dir_t in reference process 
356    xlist_add_first( root_xp , entry_xp );
357
358    // release lock protecting  open directorie list
359    remote_queuelock_release( lock_xp );
360
361#if DEBUG_USER_DIR_CREATE
362if( DEBUG_USER_DIR_CREATE < cycle )
363printk("\n[%s] thread[%x,%x] created user_dir (%x,%x) / %d entries / cycle %d\n",
364__FUNCTION__, this->process->pid, this->trdid, local_cxy, dir, total_dirents, cycle );
365#endif
366
367    return dir;
368
369user_dir_create_failure:
370
371    // release user_dir_t structure
372    kmem_free( dir , bits_log2(sizeof(user_dir_t)) );
373
374    // release physical pages
375    while( list_is_empty( &root ) == false )
376    {
377        // get page descriptor
378        page = LIST_FIRST( &root , page_t , list );
379
380        // get base from page descriptor pointer
381        base = GET_PTR( ppm_page2base( XPTR( local_cxy , page ) ) );
382 
383        // release the page
384        kmem_free( base , CONFIG_PPM_PAGE_ORDER );
385    }
386
387    return NULL;
388
389}  // end user_dir_create()
390
391////////////////////////////////////////
392void user_dir_destroy( user_dir_t * dir,
393                       xptr_t       ref_xp )
394{
395    thread_t     * this;       // local pointer on calling thread
396    cluster_t    * cluster;    // local pointer on local cluster
397    intptr_t       ident;      // user pointer on dirent array
398    xptr_t         ref_pid;    // reference process PID
399    cxy_t          ref_cxy;    // reference process cluster identifier
400    process_t    * ref_ptr;    // local pointer on reference process
401    xptr_t         root_xp;    // root of xlist
402    xptr_t         lock_xp;    // extended pointer on lock protecting xlist
403    xptr_t         iter_xp;    // iteratot in xlist
404    reg_t          save_sr;    // for critical section
405    cxy_t          owner_cxy;  // owner process cluster
406    lpid_t         lpid;       // process local index
407    rpc_desc_t     rpc;        // rpc descriptor
408    uint32_t       responses;  // response counter
409     
410    this    = CURRENT_THREAD;
411    cluster = LOCAL_CLUSTER;
412
413#if DEBUG_USER_DIR_DESTROY
414uint32_t cycle = (uint32_t)hal_get_cycles();
415#endif
416
417    // get cluster, local pointer, and PID of reference user process
418    ref_cxy = GET_CXY( ref_xp );
419    ref_ptr = GET_PTR( ref_xp );
420    ref_pid = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->pid ) );
421
422#if DEBUG_USER_DIR_DESTROY
423if( DEBUG_USER_DIR_DESTROY < cycle )
424printk("\n[%s] thread[%x,%x] enter for user_dir (%x,%x) and process %x / cycle %d\n",
425__FUNCTION__, this->process->pid, this->trdid, local_cxy, dir, ref_pid, cycle );
426#endif
427
428    // get user pointer on dirent array
429    ident = dir->ident;
430
431    // build extended pointer on lock protecting open directories list
432    lock_xp = XPTR( ref_cxy , &ref_ptr->dir_lock );
433
434    // get lock protecting open directories list
435    remote_queuelock_acquire( lock_xp );
436
437    // remove dir from reference process xlist
438    xlist_unlink( XPTR( local_cxy , &dir->list ) );
439
440    // release lock protecting open directories list
441    remote_queuelock_release( lock_xp );
442
443    // To delete all copies of the vseg containing the dirent array, the client thread
444    // send parallel RPCs to all clusters containing a client process copy (including
445    // the local cluster). It blocks and deschedules when all RPCs have been sent,
446    // to wait all RPC responses, and will be unblocked by the last RPC server thread.
447    // It allocates a - shared - RPC descriptor in the stack,  because all parallel
448    // server threads use the same input arguments, and there is no out argument.
449
450    // get owner cluster identifier and process lpid
451    owner_cxy = CXY_FROM_PID( ref_pid );
452    lpid      = LPID_FROM_PID( ref_pid );
453
454    // get root of list of copies and lock from owner cluster
455    root_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
456    lock_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
457
458    // mask IRQs
459    hal_disable_irq( &save_sr);
460
461    // client thread blocks itself
462    thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC );
463
464    // initialize responses counter
465    responses = 0;
466
467    // initialize a shared RPC descriptor
468    rpc.rsp       = &responses;
469    rpc.blocking  = false;                  // non blocking behaviour for rpc_send()
470    rpc.index     = RPC_VMM_REMOVE_VSEG;
471    rpc.thread    = this;
472    rpc.lid       = this->core->lid;
473    rpc.args[0]   = ref_pid;
474    rpc.args[1]   = ident;
475
476    // take the lock protecting process copies
477    remote_queuelock_acquire( lock_xp );
478
479    // scan list of process copies
480    XLIST_FOREACH( root_xp , iter_xp )
481    {
482        // get extended pointer and cluster of process
483        xptr_t      process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
484        cxy_t       process_cxy = GET_CXY( process_xp );
485
486        // atomically increment responses counter
487        hal_atomic_add( &responses , 1 );
488
489#if (DEBUG_USER_DIR_DESTROY & 1)
490if(  DEBUG_USER_DIR_DESTROY < cycle )
491printk("\n[%s] thread[%x,%x] register RPC request in cluster %x\n",
492__FUNCTION__, this->process->pid, this->trdid, process_cxy );
493#endif
494
495        // send RPC to target cluster 
496        rpc_send( process_cxy , &rpc );
497    }
498
499    // release the lock protecting process copies
500    remote_queuelock_release( lock_xp );
501
502    // client thread deschedule
503    sched_yield("blocked on rpc_vmm_delete_vseg");
504 
505    // restore IRQs
506    hal_restore_irq( save_sr);
507
508    // release local user_dir_t structure
509    kmem_free( dir , bits_log2(sizeof(user_dir_t)) );
510
511#if DEBUG_USER_DIR_DESTROY
512cycle = (uint32_t)hal_get_cycles();
513if( DEBUG_USER_DIR_DESTROY < cycle )
514printk("\n[%s] thread[%x,%x] deleted user_dir (%x,%x) / cycle %d\n",
515__FUNCTION__, this->process->pid, this->trdid, local_cxy, dir, cycle );
516#endif
517
518}  // end user_dir_destroy()
Note: See TracBrowser for help on using the repository browser.