source: soft/giet_vm/giet_drivers/ioc_driver.c @ 407

Last change on this file since 407 was 350, checked in by alain, 11 years ago

Introducing two modifications regarding the locks protecting
exclusive access to BDV and TTY peripherals channels:

  • use the giet_lock_t type to have only one lock per cache line.
  • store these locks in the seg_kernel_data or in seg_kernel_uncdata, depending on the GIET_NO HARD_CC configuration parameter, to have cacheable locks when it is possible.
File size: 16.1 KB
Line 
1///////////////////////////////////////////////////////////////////////////////////
2// File       : ioc_driver.c
3// Date       : 23/05/2013
4// Author     : alain greiner
5// Maintainer : cesar fuguet
6// Copyright (c) UPMC-LIP6
7///////////////////////////////////////////////////////////////////////////////////
8// The ioc_driver.c and ioc_driver.h files are part ot the GIET-VM kernel.
9//
10// This abstact driver define a generic API, supporting various physical
11// block device controlers, including:
12// - vci_block_device : single channel)                    => bdv_driver
13// - vci_ahci         : multi channels                     => hba_driver
14// - sd_card          : single channel                     => sdc_driver
15// - ramdisk (single channel meory mapped virtual disk)    => rdk_driver
16//
17// It can exist only one block-device type in the architecture, that must be
18// defined by one of the following configuration variables in hard_config.h file:
19// USE_IOC_BDV, USE_IOC_SDC, USE_IOC_HBA, USE_IOC_RDK.
20//
21// Any physical driver xxx must provide the following API:
22// - _xxx_init()
23// - _xxx_read()
24// - _xxx_write()
25// - _xxx_get_status()
26// - _xxx_get_block_size()
27// The "channel" parameter is no transmited to single channel devices.
28//
29// The _ioc_read() and _ioc_write() functions are always blocking for
30// the calling user program.
31//
32// These functions compute the physical address of the memory buffer before
33// calling the proper physical device. They can be called in 3 modes:
34//
35// - In BOOT mode, these functions use the buffer virtual address
36//   as a physical address if the MMU is not activated.
37//   They make a V2P translation if the MMU is activated.
38//   This mode is used to load the map.bin file (before memory activation),
39//   or to load the various .elf files (after MMU activation).
40//
41// - In KERNEL mode, these functions make a V2P translation to
42//   compute the buffer physical address.
43//   There is no checking of user access right to the memory buffer. 
44//   This mode must be used for an "open" system call.
45//
46// - In USER mode, these functions make a V2P translation to
47//   compute the buffer physical address.
48//   The user access right to the memory buffer are checked. 
49//   This mode must be used for a "read" or "write" system call.
50//
51// The IOMMU can be activated or not:
52//
53// 1) When the IOMMU is used, a fixed size 2Mbytes vseg is allocated to
54// the IOC peripheral, in the I/O virtual space, and the user buffer is
55// dynamically remapped in the IOMMU page table. The corresponding entry
56// in the IOMMU PT1 is defined by the kernel _ioc_iommu_ix1 variable.
57// The number of pages to be unmapped is stored in the _ioc_npages variable.
58// The number of PT2 entries is dynamically computed and stored in the
59// kernel _ioc_iommu_npages variable. It cannot be larger than 512.
60// The user buffer is unmapped by the _ioc_completed() function when
61// the transfer is completed.
62//
63// 2/ If the IOMMU is not used, we check that  the user buffer is mapped to a
64// contiguous physical buffer (this is generally true because the user space
65// page tables are statically constructed to use contiguous physical memory).
66//
67// Finally, the memory buffer must fulfill the following conditions:
68// - The buffer must be word aligned,
69// - The buffer must be mapped in user space for an user access,
70// - The buffer must be writable in case of (to_mem) access,
71// - The total number of physical pages occupied by the user buffer cannot
72//   be larger than 512 pages if the IOMMU is activated,
73// - All physical pages occupied by the user buffer must be contiguous
74//   if the IOMMU is not activated.
75// An error code is returned if these conditions are not verified.
76//
77// The SEG_IOC_BASE virtual base address must be defined in hard_config.h,
78// as it is used by the BDV, HBA and SPI drivers.
79//
80// If the RAMDISK is used, an extra memory segment with virtual base address
81// SEG_RDK_BASE, used by RDK driver, must be defined in hard_config.h.
82///////////////////////////////////////////////////////////////////////////////////
83// Implementation note:
84// In order to share the code, the two _ioc_read() and _ioc_write() functions
85// call the same _ioc_access() function.
86///////////////////////////////////////////////////////////////////////////////////
87
88#include <giet_config.h>
89#include <ioc_driver.h>
90#include <bdv_driver.h>
91#include <hba_driver.h>
92#include <sdc_driver.h>
93#include <rdk_driver.h>
94#include <utils.h>
95#include <tty_driver.h>
96#include <iob_driver.h>
97#include <ctx_handler.h>
98#include <mmc_driver.h>
99#include <vmem.h>
100
101#if !defined( SEG_IOC_BASE )
102# error: You must define SEG_IOC_BASE in the hard_config.h file
103#endif
104
105#if !defined( USE_IOB )
106# error: You must define USE_IOB in the hard_config.h file
107#endif
108
109#if !defined(GIET_USE_IOMMU)
110# error: You must define GIET_USE_IOMMU in the giet_config.h file
111#endif
112
113#if (USE_IOC_BDV + USE_IOC_SPI + USE_IOC_HBA + USE_IOC_RDK) != 1
114# error: You must use only one IOC controller type (BDV or SPI or HBA or RDK)
115#endif
116
117#if USE_IOC_BDV
118# include <bdv_driver.h>
119#endif
120
121#if USE_IOC_SPI
122# include <sdc_driver.h>
123#endif
124
125#if USE_IOC_HBA
126# include <hba_driver.h>
127#endif
128
129#if USE_IOC_RDK
130# include <rdk_driver.h>
131#endif
132
133///////////////////////////////////////////////////////////////////////////////
134// IOC global variables
135///////////////////////////////////////////////////////////////////////////////
136
137#define in_unckdata __attribute__((section (".unckdata")))
138
139in_unckdata volatile unsigned int _ioc_iommu_ix1 = 0;
140in_unckdata volatile unsigned int _ioc_iommu_npages; 
141
142///////////////////////////////////////////////////////////////////////////////
143// This function transfer data between a memory buffer and the block device.
144// The buffer lentgth is (count*block_size) bytes.
145// Arguments are:
146// - to_mem     : from external storage to memory when non 0.
147// - mode       : BOOT_PA / BOOT_VA / KERNEL / USER
148// - lba        : first block index on the external storage.
149// - buf_vaddr  : virtual base address of the memory buffer.
150// - count      : number of blocks to be transfered.
151// Returns 0 if success, > 0 if error.
152///////////////////////////////////////////////////////////////////////////////
153static unsigned int _ioc_access( unsigned int to_mem,
154                                 unsigned int channel,
155                                 unsigned int mode,
156                                 unsigned int lba,
157                                 unsigned int buf_vaddr,
158                                 unsigned int count) 
159{
160
161#if GIET_DEBUG_IOC_DRIVER
162unsigned int procid  = _get_procid();
163unsigned int cid     = procid / NB_PROCS_MAX;
164unsigned int lpid    = procid % NB_PROCS_MAX;
165unsigned int x       = cid >> Y_WIDTH;
166unsigned int y       = cid & ((1<<Y_WIDTH) - 1);
167
168_printf("\n[IOC DEBUG] Processor[%d,%d,%d] enters _ioc_access() at cycle %d\n"
169        " - channel  = %d\n"
170        " - mode     = %d\n"
171        " - vaddr    = %x\n"
172        " - sectors  = %d\n"
173        " - lba      = %x\n",
174        x, y, lpid, _get_proctime(), channel, mode, buf_vaddr, count, lba );
175#endif
176
177    unsigned int error;            // return value
178    unsigned int pt_vbase;         // page table vbase address
179    unsigned int vpn_min;          // first virtuel page index covering buffer
180    unsigned int vpn_max;          // last virtual page index covering buffer
181    unsigned int vpn;              // current virtual page index
182    unsigned int ppn;              // physical page number
183    unsigned int flags;            // page protection flags
184    unsigned int ix2;              // page index in IOMMU PT1 page table
185    unsigned int ppn_first = 0;    // first physical page number for user buffer
186    unsigned int buf_xaddr = 0;    // user buffer virtual address in IO space (if IOMMU)
187    paddr_t      buf_paddr = 0;    // user buffer physical address (if no IOMMU),
188
189    // check buffer alignment
190    if ((unsigned int) buf_vaddr & 0x3)
191    {
192        _printf("\n[GIET ERROR] in _ioc_access() : buffer not word aligned\n");
193        _exit(); 
194    }
195
196    // check channel
197    if ( (USE_IOC_HBA == 0) && (channel > 0) )
198    {
199        _printf("\n[GIET ERROR] in _ioc_access() : channel must be 0 when HBA not used\n");
200        _exit(); 
201    }
202
203    unsigned int length = count << 9;  // count * 512 bytes
204
205    // computing memory buffer physical address
206    if ( (mode == IOC_BOOT_MODE) && ((_get_mmu_mode() & 0x4) == 0) ) // identity mapping
207    {
208        buf_paddr = (paddr_t)buf_vaddr;
209    }
210    else                                                    // V2P translation required
211    {
212        // get page table virtual address
213        pt_vbase = _get_context_slot(CTX_PTAB_ID);
214        vpn_min  = buf_vaddr >> 12;
215        vpn_max  = (buf_vaddr + length - 1) >> 12;
216
217        // loop on all virtual pages covering the user buffer
218        for (vpn = vpn_min, ix2 = 0 ; vpn <= vpn_max ; vpn++, ix2++ ) 
219        {
220            // get ppn and flags for each vpn
221            unsigned int ko = _v2p_translate( (page_table_t*)pt_vbase,
222                                              vpn,
223                                              &ppn,
224                                              &flags);
225            // check access rights
226            if ( ko )
227            {
228                _printf("\n[GIET ERROR] in _ioc_access() : buffer unmapped\n");
229                return 1; 
230            }
231
232            if ( (mode == IOC_USER_MODE) && ((flags & PTE_U) == 0) )
233            {
234                _printf("\n[GIET ERROR] in _ioc_access() : buffer not user accessible\n");
235                return 1; 
236            }
237
238            if ( ((flags & PTE_W) == 0 ) && to_mem )
239            {
240                _printf("\n[GIET ERROR] in _ioc_access() : buffer not writable\n");
241                return 1; 
242            }
243
244            // save first ppn value
245            if (ix2 == 0) ppn_first = ppn;
246
247#if GIET_USE_IOMMU
248 
249            // check buffer length < 2 Mbytes
250            if (ix2 > 511) // check buffer length < 2 Mbytes
251            {
252                _printf("\n[GIET ERROR] in _ioc_access() : user buffer > 2 Mbytes\n");
253                return 1; 
254            }
255            // map the physical page in IOMMU page table
256            _iommu_add_pte2( _ioc_iommu_ix1,    // PT1 index
257                             ix2,               // PT2 index
258                             ppn,               // Physical page number   
259                             flags );           // Protection flags
260
261            // compute user buffer virtual adress in IO space
262            buf_xaddr = (_ioc_iommu_ix1) << 21 | (buf_vaddr & 0xFFF);
263
264#else
265
266            // check that physical pages are contiguous
267            if ((ppn - ppn_first) != ix2) 
268            {
269                _printf("[GIET ERROR] in _ioc_access() : split physical buffer\n");
270                return 1; 
271            }
272
273            // compute user buffer physical adress
274            buf_paddr = (((paddr_t)ppn_first) << 12) | (buf_vaddr & 0xFFF);
275#endif           
276
277        } // end for vpn
278    }
279
280#if GIET_USE_IOMMU
281
282    // register the number of pages to be unmapped in IOMMU
283    _ioc_iommu_npages = (vpn_max - vpn_min) + 1;
284
285#endif
286
287    if ( to_mem ) // memory write : invalidate data caches
288    {
289        // L1 cache
290        if ( GIET_NO_HARD_CC ) _dcache_buf_invalidate((void *) buf_vaddr, length);
291
292        // L2 cache (only if IOB used)
293        if ( USE_IOB ) _mmc_inval( buf_paddr, length );
294    }
295    else         // memory read : update data caches
296    {
297        // L1 cache : nothing to do for L1 write-through
298
299        // L2 cache (only if IOB used)
300        if ( USE_IOB ) _mmc_sync( buf_paddr, length );
301    }
302
303    if ( GIET_USE_IOMMU ) buf_paddr = (paddr_t) buf_xaddr;
304
305    ///////////////////////////////////////////
306    // select the proper physical device
307    ///////////////////////////////////////////
308
309#if       ( USE_IOC_BDV )
310        if (to_mem) error = _bdv_read ( mode, lba, buf_paddr, count);
311        else        error = _bdv_write( mode, lba, buf_paddr, count);
312#elif ( USE_IOC_SPI )
313        if (to_mem) error = _sdc_read (mode, lba, buf_paddr, count);
314        else        error = _sdc_write(mode, lba, buf_paddr, count);
315#elif ( USE_IOC_HBA )
316        if (to_mem) error = _hba_read (channel, mode, lba, buf_paddr, count);
317        else        error = _hba_write(channel, mode, lba, buf_paddr, count);
318#elif ( USE_IOC_RDK )
319        if (to_mem) error = _rdk_read (lba, buf_vaddr, count);
320        else        error = _rdk_write(lba, buf_vaddr, count);
321#endif
322
323    return error;
324} // end _ioc_access()
325
326///////////////////////////////////////////////////////////////////////////////
327// This function cheks block size, and desactivates the IOC interrupts.
328// Return 0 for success.
329///////////////////////////////////////////////////////////////////////////////
330unsigned int _ioc_init( unsigned int channel )
331{
332
333#if   ( USE_IOC_BDV )
334
335    return _bdv_init();
336
337#elif ( USE_IOC_SPI )
338
339    return _sdc_init();
340   
341#elif ( USE_IOC_HBA )
342
343    return _hba_init( channel );
344   
345#elif ( USE_IOC_RDK )
346
347    return _rdk_init();
348   
349#endif
350   
351}
352
353///////////////////////////////////////////////////////////////////////////////
354// Transfer data from the block device to a memory buffer.
355// - mode     : BOOT_PA / BOOT_VA / KERNEL / USER
356// - lba      : first block index on the block device
357// - buffer   : base address of the memory buffer (must be word aligned)
358// - count    : number of blocks to be transfered.
359// Returns 0 if success, > 0 if error.
360///////////////////////////////////////////////////////////////////////////////
361unsigned int _ioc_read( unsigned int channel,
362                        unsigned int mode, 
363                        unsigned int lba, 
364                        void*        buffer, 
365                        unsigned int count) 
366{
367    return _ioc_access( 1,        // read access
368                        channel,
369                        mode, 
370                        lba,
371                        (unsigned int) buffer,
372                        count );
373}
374
375///////////////////////////////////////////////////////////////////////////////
376// Transfer data from a memory buffer to the block device.
377// - mode     : BOOT_PA / BOOT_VA / KERNEL / USER
378// - lba      : first block index on the block device
379// - buffer   : base address of the memory buffer (must be word aligned)
380// - count    : number of blocks to be transfered.
381// Returns 0 if success, > 0 if error.
382///////////////////////////////////////////////////////////////////////////////
383unsigned int _ioc_write( unsigned int channel,
384                         unsigned int mode, 
385                         unsigned int lba, 
386                         const void*  buffer, 
387                         unsigned int count ) 
388{
389    return _ioc_access( 0,        // write access
390                        channel,
391                        mode, 
392                        lba,
393                        (unsigned int) buffer,
394                        count );
395}
396
397///////////////////////////////////////////////////////////////////////////////
398// This function returns in the status variable, the transfert status, and
399// acknowledge the IRQ if the IOC controler is not busy.
400// Returns 0 if success, > 0 if error
401///////////////////////////////////////////////////////////////////////////////
402unsigned int _ioc_get_status( unsigned int  channel )
403{
404
405#if   ( USE_IOC_BDV )
406
407    return _bdv_get_status( );
408
409#elif ( USE_IOC_SPI )
410
411    return _sdc_get_status( );
412
413#elif ( USE_IOC_HBA )
414
415    return _hba_get_status( channel );
416
417#elif ( USE_IOC_RDK )
418
419    _printf("[GIET ERROR] _ioc_get_status() should not be called");
420    _printf(" when RAMDISK  is used...\n");
421    _exit();
422
423    return 0;
424
425#endif
426
427}
428
429///////////////////////////////////////////////////////////////////////////////
430// This function returns the block_size with which the IOC has been configured.
431///////////////////////////////////////////////////////////////////////////////
432unsigned int _ioc_get_block_size() 
433{
434
435#if   ( USE_IOC_BDV )
436
437    return _bdv_get_block_size();
438   
439#elif ( USE_IOC_SPI )
440
441    return _sdc_get_block_size();
442   
443#elif ( USE_IOC_HBA )
444
445    return _hba_get_block_size();
446   
447#elif ( USE_IOC_RDK )
448
449    return 512;
450
451#endif
452
453}
454
455
456// Local Variables:
457// tab-width: 4
458// c-basic-offset: 4
459// c-file-offsets:((innamespace . 0)(inline-open . 0))
460// indent-tabs-mode: nil
461// End:
462// vim: filetype=c:expandtab:shiftwidth=4:tabstop=4:softtabstop=4
463
Note: See TracBrowser for help on using the repository browser.