source: trunk/kernel/libk/user_dir.c @ 613

Last change on this file since 613 was 613, checked in by alain, 5 years ago
  • introduce the sys_rename.c file to support the mv command.
  • introduce the user_dir.c file to support the ls command.
File size: 17.4 KB
RevLine 
[613]1/*
2 * user_dir.c - kernel DIR related operations implementation.
3 *
4 * Authors   Alain   Greiner (2016,2017,2018)
5 *
6 * Copyright (c) UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24#include <kernel_config.h>
25#include <hal_kernel_types.h>
26#include <hal_irqmask.h>
27#include <hal_remote.h>
28#include <thread.h>
29#include <xlist.h>
30#include <scheduler.h>
31#include <remote_queuelock.h>
32#include <user_dir.h>
33
34
35/////////////////////////////////////////////
36xptr_t user_dir_from_ident( intptr_t  ident )
37{
38    // get pointer on local process_descriptor
39    process_t * process = CURRENT_THREAD->process;
40
41    // get pointers on reference process
42    xptr_t      ref_xp  = process->ref_xp;
43    cxy_t       ref_cxy = GET_CXY( ref_xp );
44    process_t * ref_ptr = GET_PTR( ref_xp );
45
46    // get extended pointers on open directories list and lock 
47    xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->dir_root );
48    xptr_t lock_xp = XPTR( ref_cxy , &ref_ptr->dir_lock );
49
50    // get lock protecting open directories list
51    remote_queuelock_acquire( lock_xp );
52 
53    // scan reference process dir list
54    xptr_t           iter_xp;
55    xptr_t           dir_xp;
56    cxy_t            dir_cxy;
57    user_dir_t     * dir_ptr;
58    intptr_t         current;
59    bool_t           found = false;
60           
61    XLIST_FOREACH( root_xp , iter_xp )
62    {
63        dir_xp  = XLIST_ELEMENT( iter_xp , user_dir_t , list );
64        dir_cxy = GET_CXY( dir_xp );
65        dir_ptr = GET_PTR( dir_xp );
66        current = (intptr_t)hal_remote_lpt( XPTR( dir_cxy , &dir_ptr->ident ) );   
67        if( ident == current )
68        {
69            found = true;
70            break;
71        }
72    }
73
74    // relese lock protecting open directories list
75    remote_queuelock_release( lock_xp );
76 
77    if( found == false )  return XPTR_NULL;
78    else                  return dir_xp;
79
80}  // end user_dir_from_ident()
81
82///////////////////////////////////////////////////
83user_dir_t * user_dir_create( vfs_inode_t * inode )
84{ 
85    user_dir_t    * dir;               // local pointer on created user_dir_t
86    vseg_t        * vseg;              // local pointer on dirent array vseg
87    uint32_t        vseg_size;         // size of vseg in bytes
88    process_t     * process;           // local pointer on calling process
89    xptr_t          ref_xp;            // extended pointer on reference process
90    process_t     * ref_ptr;           // local pointer on reference process
91    cxy_t           ref_cxy;           // reference process cluster identifier
92    xptr_t          gpt_xp;            // extended pointer on reference process GPT
93    uint32_t        gpt_attributes;    // attributes for all mapped gpt entries
94    uint32_t        dirents_per_page;  // number of dirent descriptors per page
95    xptr_t          page_xp;           // extended pointer on page descriptor 
96    page_t        * page;              // local pointer on page descriptor
97    xptr_t          base_xp;           // extended pointer on physical page base
98    struct dirent * base;              // local pointer on physical page base
99    uint32_t        total_dirents;     // total number of dirents in dirent array
100    uint32_t        total_pages;       // total number of pages for dirent array
101    vpn_t           vpn;               // first page in dirent array vseg
102    ppn_t           ppn;               // ppn of currently allocated physical page
103    uint32_t        entries;           // number of dirent actually comied in one page
104    uint32_t        first_entry;       // index of first dentry to copy in dirent array
105    bool_t          done;              // last entry found and copied when true
106    list_entry_t    root;              // root of temporary list of allocated pages
107    uint32_t        page_id;           // page index in list of physical pages
108    kmem_req_t      req;               // kmem request descriptor
109    error_t         error;
110
111    // get pointer on local process descriptor
112    process = CURRENT_THREAD->process;
113
114#if DEBUG_USER_DIR
115uint32_t cycle = (uint32_t)hal_get_cycles();
116thread_t * this = CURRENT_THREAD;
117if( cycle > DEBUG_USER_DIR )
118printk("\n[%s] thread[%x,%x] enter for inode (%x,%x) / cycle %d\n",
119__FUNCTION__, process->pid, this->trdid, local_cxy, inode, cycle );
120#endif
121
122// check dirent size
123assert( ( sizeof(struct dirent) == 64), "sizeof(dirent) != 64\n");
124
125    // compute number of dirent per page
126    dirents_per_page = CONFIG_PPM_PAGE_SIZE >> 6;
127   
128    // initialise temporary list of pages
129    list_root_init( &root );
130
131    // get pointers on reference process
132    ref_xp  = process->ref_xp;
133    ref_cxy = GET_CXY( ref_xp );
134    ref_ptr = GET_PTR( ref_xp );
135
136    // allocate memory for a local user_dir descriptor
137    req.type  = KMEM_DIR;
138    req.flags = AF_ZERO;
139    dir       = kmem_alloc( &req );
140
141    if( dir == NULL )
142    {
143        printk("\n[ERROR] in %s : cannot allocate user_dir_t in cluster %x\n",
144        __FUNCTION__, local_cxy );
145        return NULL;
146    }
147
148    // Build an initialize the dirent array as a list of physical pages.
149    // For each iteration in this while loop:
150    // - allocate one physical 4 Kbytes (64 dirent slots)
151    // - call the relevant FS specific function to scan the directory mapper,
152    //   and copy up to 64 entries in the page.
153    // - register the page in a temporary list using the embedded page list_entry
154    // - exit when the last entry has been found (done == true).
155
156    // initialize loops variables
157    done          = false;
158    total_dirents = 0;
159    total_pages   = 0;
160    first_entry   = 0;
161
162    while( done == false )  // loop on physical pages
163    {
164        // allocate one physical page
165        req.type  = KMEM_PAGE;
166        req.size  = 0;
167        req.flags = AF_ZERO;
168        page      = kmem_alloc( &req );
169
170        if( page == NULL )
171        {
172            printk("\n[ERROR] in %s : cannot allocate page in cluster %x\n",
173            __FUNCTION__, ref_cxy );
174            goto user_dir_create_failure;
175        }
176
177        // get pointer on page base (array of dirents)
178        page_xp  = XPTR( local_cxy , page );
179        base_xp  = ppm_page2base( page_xp );
180        base     = GET_PTR( base_xp );
181
182        // call the relevant FS specific function to copy up to 64 dirents in page
183        error = vfs_fs_get_user_dir( inode,
184                                     base,
185                                     dirents_per_page,
186                                     first_entry,
187                                     false,        // don't create missing inodes
188                                     &entries,
189                                     &done );
190        if( error )
191        {
192            printk("\n[ERROR] in %s : cannot initialise dirent array in cluster %x\n",
193            __FUNCTION__, ref_cxy );
194            goto user_dir_create_failure;
195        }
196
197        // increment number of written dirents
198        total_dirents += entries;
199
200        // register page in temporary list
201        list_add_last( &root , &page->list ); 
202        total_pages++; 
203
204        // set first_entry for next iteration
205        first_entry = total_dirents;
206
207    } // end while
208       
209    // compute required vseg size for a 64 bytes dirent
210    vseg_size = total_dirents << 6;
211
212    // create an ANON vseg and register it in reference process VSL
213    if( local_cxy == ref_cxy )
214    {
215        vseg = vmm_create_vseg( process,
216                                VSEG_TYPE_ANON,
217                                0,                      // vseg base (unused)
218                                vseg_size,
219                                0,                      // file offset (unused)
220                                0,                      // file_size (unused)
221                                XPTR_NULL,              // mapper (unused)
222                                ref_cxy );
223    }
224    else
225    {
226        rpc_vmm_create_vseg_client( ref_cxy,
227                                    ref_ptr,
228                                    VSEG_TYPE_ANON,
229                                    0,                     // vseg base (unused)
230                                    vseg_size,
231                                    0,                     // file offset (unused)
232                                    0,                     // file size (unused)
233                                    XPTR_NULL,             // mapper (unused)
234                                    ref_cxy,
235                                    &vseg ); 
236    }
237    if( vseg == NULL )
238    {
239        printk("\n[ERROR] in %s : cannot create vseg for DIR in cluster %x\n",
240        __FUNCTION__, ref_cxy);
241        goto user_dir_create_failure;
242    }
243
244#if (DEBUG_USER_DIR & 1)
245if( cycle > DEBUG_USER_DIR )
246printk("\n[%s] thread[%x,%x] allocated vseg ANON / base %x / size %x\n",
247__FUNCTION__, process->pid, this->trdid, vseg->min, vseg->max - vseg->min );
248#endif
249
250// check vseg size
251assert( (total_pages == hal_remote_l32( XPTR( ref_cxy , &vseg->vpn_size ) ) ),
252"unconsistent vseg size for dirent array" );
253
254    // build extended pointer on reference process GPT, PTE attributes and ppn
255    gpt_xp         = XPTR( ref_cxy , &ref_ptr->vmm.gpt );
256    gpt_attributes = GPT_MAPPED   |
257                     GPT_SMALL    |
258                     GPT_READABLE |
259                     GPT_CACHABLE |
260                     GPT_USER     ;
261
262    // get first vpn from vseg descriptor
263    vpn = hal_remote_l32( XPTR( ref_cxy , &vseg->vpn_base ) );
264
265    // scan the list of allocated physical pages to map
266    // all physical pages in the in the reference process GPT
267    page_id = 0;
268    while( list_is_empty( &root ) == false )
269    {
270        // get pointer on first page descriptor
271        page = LIST_FIRST( &root , page_t , list );
272
273        // compute ppn
274        ppn = ppm_page2ppn( XPTR( local_cxy , page ) );
275       
276        error = hal_gpt_set_pte( gpt_xp,
277                                 vpn + page_id,
278                                 gpt_attributes,
279                                 ppn );
280        if( error )
281        {
282            printk("\n[ERROR] in %s : cannot map vpn %x in GPT\n",
283            __FUNCTION__, (vpn + page_id) );
284            // use the non blocking RPC to delete the remote vseg
285            rpc_desc_t     desc;
286            desc.index     = RPC_VMM_DELETE_VSEG;
287            desc.responses = 1;
288            desc.thread    = CURRENT_THREAD;
289            desc.lid       = CURRENT_THREAD->core->lid;
290            desc.blocking  = true;
291            desc.args[0]   = process->pid;
292            desc.args[1]   = vpn << CONFIG_PPM_PAGE_SHIFT;
293            rpc_vmm_delete_vseg_client( ref_cxy , &desc );
294            // release the user_dir descriptor
295            req.type = KMEM_DIR;
296            req.ptr  = dir;
297            kmem_free( &req );
298            return NULL;
299        }
300
301#if (DEBUG_USER_DIR & 1)
302if( cycle > DEBUG_USER_DIR )
303printk("\n[%s] thread[%x,%x] mapped vpn %x to ppn %x\n",
304__FUNCTION__, process->pid, this->trdid, vpn + page_id, ppn );
305#endif
306
307        // remove the page from temporary list
308        list_unlink( &page->list );
309
310        page_id++;
311
312    }  // end map loop
313
314// check number of pages
315assert( (page_id == total_pages) , "unconsistent pages number\n" );
316
317    // initialise user_dir_t structure
318    dir->current = 0;
319    dir->entries = total_dirents;
320    dir->ident   = (intptr_t)(vpn << CONFIG_PPM_PAGE_SHIFT);
321
322    // build extended pointers on root and lock of user_dir xlist in ref process
323    xptr_t root_xp  = XPTR( ref_cxy , &ref_ptr->dir_root );
324    xptr_t lock_xp  = XPTR( ref_cxy , &ref_ptr->dir_lock );
325
326    // build extended pointer on list field in user_dir structure
327    xptr_t entry_xp = XPTR( local_cxy , &dir->list );
328
329    // get lock protecting open directories list
330    remote_queuelock_acquire( lock_xp );
331
332    // register user_dir_t in reference process 
333    xlist_add_first( root_xp , entry_xp );
334
335    // release lock protecting  open directorie list
336    remote_queuelock_release( lock_xp );
337
338#if DEBUG_USER_DIR
339cycle = (uint32_t)hal_get_cycles();
340if( cycle > DEBUG_USER_DIR )
341printk("\n[%s] thread[%x,%x] created user_dir (%x,%x) / %d entries / cycle %d\n",
342__FUNCTION__, process->pid, this->trdid, local_cxy, dir, total_dirents, cycle );
343#endif
344
345    return dir;
346
347user_dir_create_failure:
348
349    // release local user_dir_t structure
350    req.type = KMEM_DIR;
351    req.ptr  = dir;
352    kmem_free( &req );
353
354    // release local physical pages
355    while( list_is_empty( &root ) == false )
356    {
357        page = LIST_FIRST( &root , page_t , list );
358        req.type  = KMEM_PAGE;
359        req.ptr   = page;
360        kmem_free( &req );
361    }
362
363    return NULL;
364
365}  // end user_dir_create()
366
367/////////////////////////////////////////
368void user_dir_destroy( user_dir_t * dir )
369{
370    process_t    * process;    // local pointer on client process
371    thread_t     * this;       // local pointer on client thread
372    cluster_t    * cluster;    // local pointer on local cluster
373    intptr_t       ident;      // user pointer on dirent array
374    xptr_t         ref_xp;     // extended pointer on reference process
375    cxy_t          ref_cxy;    // reference process cluster identifier
376    process_t    * ref_ptr;    // local pointer on reference process
377    xptr_t         root_xp;    // root of xlist
378    xptr_t         lock_xp;    // extended pointer on lock protecting xlist
379    xptr_t         iter_xp;    // iteratot in xlist
380    reg_t          save_sr;    // for critical section
381    pid_t          pid;        // process descriptor
382    cxy_t          owner_cxy;  // owner process cluster
383    lpid_t         lpid;       // process local index
384    rpc_desc_t     rpc;        // rpc descriptor
385     
386    // get pointers on client process & thread
387    this    = CURRENT_THREAD;
388    process = this->process;
389    cluster = LOCAL_CLUSTER;
390
391#if DEBUG_USER_DIR
392uint32_t cycle = (uint32_t)hal_get_cycles();
393if( cycle > DEBUG_USER_DIR )
394printk("\n[%s] thread[%x,%x] enter for user_dir (%x,%x) / cycle %d\n",
395__FUNCTION__, process->pid, this->trdid, local_cxy, dir, cycle );
396#endif
397
398    // get user pointer on dirent array
399    ident = dir->ident;
400
401    // get pointers on reference process
402    ref_xp  = process->ref_xp;
403    ref_cxy = GET_CXY( ref_xp );
404    ref_ptr = GET_PTR( ref_xp );
405
406    // build extended pointer on lock protecting open directories list
407    lock_xp = XPTR( ref_cxy , &ref_ptr->dir_lock );
408
409    // get lock protecting open directories list
410    remote_queuelock_acquire( lock_xp );
411
412    // remove dir from reference process xlist
413    xlist_unlink( XPTR( local_cxy , &dir->list ) );
414
415    // release lock protecting open directories list
416    remote_queuelock_release( lock_xp );
417
418    // To delete all copies of the vseg containing the dirent array, the client thread
419    // send parallel RPCs to all clusters containing a client process copy (including
420    // the local cluster). It blocks and deschedules when all RPCs have been sent,
421    // to wait all RPC responses, and will be unblocked by the last RPC server thread.
422    // It allocates a - shared - RPC descriptor in the stack,  because all parallel
423    // server threads use the same input arguments, and the same response field.
424
425    // get owner cluster identifier and process lpid
426    pid       = process->pid;
427    owner_cxy = CXY_FROM_PID( pid );
428    lpid      = LPID_FROM_PID( pid );
429
430    // get root of list of copies and lock from owner cluster
431    root_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
432    lock_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
433
434    // mask IRQs
435    hal_disable_irq( &save_sr);
436
437    // client thread blocks itself
438    thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC );
439
440    // initialize RPC descriptor shared fields
441    rpc.responses = 0;
442    rpc.blocking  = false;
443    rpc.index     = RPC_VMM_DELETE_VSEG;
444    rpc.thread    = this;
445    rpc.lid       = this->core->lid;
446    rpc.args[0]   = process->pid;
447    rpc.args[1]   = ident;
448
449    // take the lock protecting process copies
450    remote_queuelock_acquire( lock_xp );
451
452    // scan list of process copies
453    XLIST_FOREACH( root_xp , iter_xp )
454    {
455        // get extended pointer and cluster of process
456        xptr_t      process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
457        cxy_t       process_cxy = GET_CXY( process_xp );
458
459        // atomically increment responses counter
460        hal_atomic_add( (void *)&rpc.responses , 1 );
461
462        // call RPC 
463        rpc_vmm_delete_vseg_client( process_cxy , &rpc );
464
465    }  // end list of copies
466
467    // release the lock protecting process copies
468    remote_queuelock_release( lock_xp );
469
470    // client thread deschedule
471    sched_yield("blocked on rpc_vmm_unmap_vseg");
472 
473    // restore IRQs
474    hal_restore_irq( save_sr);
475
476    // release local user_dir_t structure
477    kmem_req_t  req;
478    req.type = KMEM_DIR;
479    req.ptr  = dir;
480    kmem_free( &req );
481
482#if DEBUG_USER_DIR
483cycle = (uint32_t)hal_get_cycles();
484if( cycle > DEBUG_USER_DIR )
485printk("\n[%s] thread[%x,%x] deleted user_dir (%x,%x) / cycle %d\n",
486__FUNCTION__, process->pid, this->trdid, local_cxy, dir, cycle );
487#endif
488
489}  // end user_dir_destroy()
Note: See TracBrowser for help on using the repository browser.