source: soft/giet_vm/sys/drivers.c @ 253

Last change on this file since 253 was 253, checked in by alain, 11 years ago

1/ introducing support to display images on the frame buffer
with the vci_chbuf_dma (in stdio.c and drivers.c)
2/ introducing support for mem_cache configuration segment
as the memory cache is considered as another addressable peripheral type
(in drivers.c)
3/ Introducing the new "increment" parameter in the mapping header.
This parameter define the virtual address increment for the vsegs
associated to the replicated peripherals (ICU, XICU, MDMA, TIMER, MMC).
This parameter is mandatory, and all map.xml files the "mappings"
directory have been updated.

File size: 66.9 KB
Line 
1///////////////////////////////////////////////////////////////////////////////////
2// File     : drivers.c
3// Date     : 23/05/2013
4// Author   : alain greiner
5// Copyright (c) UPMC-LIP6
6///////////////////////////////////////////////////////////////////////////////////
7// The drivers.c and drivers.h files are part ot the GIET-VM kernel.
8//
9// They contains the drivers for the peripherals available in the SoCLib library:
10// - vci_multi_tty
11// - vci_multi_timer
12// - vci_multi_dma
13// - vci_multi_icu
14// - vci_xicu
15// - vci_gcd
16// - vci_frame_buffer
17// - vci_block_device
18// - vci_multi_nic
19// - vci_chbuf_dma
20//
21// For the peripherals replicated in each cluster (ICU, TIMER, XCU, DMA, MMC),
22// the corresponding (virtual) base addresses must be completed by an offset
23// depending on the cluster index.
24//
25// The following global parameters must be defined in the hard_config.h file:
26// - NB_CLUSTERS   
27// - NB_PROCS_MAX 
28// - NB_TIM_CHANNELS   
29// - NB_DMA_CHANNELS     
30// - NB_TTY_CHANNELS_MAX   
31//
32// The following virtual base addresses must be defined in the giet_vsegs.ld file:
33// - seg_icu_base
34// - seg_xcu_base
35// - seg_tim_base
36// - seg_dma_base
37// - seg_tty_base
38// - seg_gcd_base
39// - seg_fbf_base
40// - seg_ioc_base
41// - seg_nic_base
42// - seg_cma_base
43// - seg_iob_base
44// - seg_mmc_base
45// - vseg_cluster_increment
46///////////////////////////////////////////////////////////////////////////////////
47
48#include <vm_handler.h>
49#include <sys_handler.h>
50#include <giet_config.h>
51#include <drivers.h>
52#include <common.h>
53#include <hwr_mapping.h>
54#include <mips32_registers.h>
55#include <ctx_handler.h>
56
57#if !defined(NB_CLUSTERS)
58# error: You must define NB_CLUSTERS in the hard_config.h file
59#endif
60
61#if (NB_CLUSTERS > 256)
62# error: NB_CLUSTERS cannot be larger than 256!
63#endif
64
65#if !defined(NB_PROCS_MAX)
66# error: You must define NB_PROCS_MAX in the hard_config.h file
67#endif
68
69#if (NB_PROCS_MAX > 8)
70# error: NB_PROCS_MAX cannot be larger than 8!
71#endif
72
73#if !defined(GIET_USE_IOMMU)
74# error: You must define GIET_USE_IOMMU in the giet_config.h file
75#endif
76
77#if !defined(NB_TTY_CHANNELS)
78# error: You must define NB_TTY_CHANNELS in the hard_config.h file
79#endif
80
81#if (NB_TTY_CHANNELS < 1)
82# error: NB_TTY_CHANNELS cannot be smaller than 1!
83#endif
84
85#if !defined(NB_DMA_CHANNELS)
86# error: You must define NB_DMA_CHANNELS in the hard_config.h file
87#endif
88
89#if (NB_DMA_CHANNELS > 8)
90# error: NB_DMA_CHANNELS cannot be smaller than 8!
91#endif
92
93#if !defined(NB_TIM_CHANNELS)
94#define NB_TIM_CHANNELS 0
95#endif
96
97#if ( (NB_TIM_CHANNELS + NB_PROC_MAX) > 32 )
98# error: NB_TIM_CHANNELS + NB_PROCS_MAX cannot be larger than 32
99#endif
100
101#if !defined(NB_IOC_CHANNELS)
102# error: You must define NB_IOC_CHANNELS in the hard_config.h file
103#endif
104
105#if ( NB_IOC_CHANNELS > 8 )
106# error: NB_IOC_CHANNELS cannot be larger than 8
107#endif
108
109#if !defined(NB_NIC_CHANNELS)
110# error: You must define NB_NIC_CHANNELS in the hard_config.h file
111#endif
112
113#if ( NB_NIC_CHANNELS > 8 )
114# error: NB_NIC_CHANNELS cannot be larger than 8
115#endif
116
117#if !defined(NB_CMA_CHANNELS)
118# error: You must define NB_CMA_CHANNELS in the hard_config.h file
119#endif
120
121#if ( NB_CMA_CHANNELS > 8 )
122# error: NB_CMA_CHANNELS cannot be larger than 8
123#endif
124
125#if !defined( USE_XICU )
126# error: You must define USE_XICU in the hard_config.h file
127#endif
128
129#if !defined( USE_IOB )
130# error: You must define USE_IOB in the hard_config.h file
131#endif
132
133
134#define in_unckdata __attribute__((section (".unckdata")))
135
136//////////////////////////////////////////////////////////////////////////////
137//     Timers driver
138//////////////////////////////////////////////////////////////////////////////
139// This peripheral is replicated in all clusters.
140// The timers can be implemented in a vci_timer component or in a vci_xicu
141// component (depending on the USE_XICU parameter).
142// There is one timer (or xicu) component per cluster.
143// There is two types of timers:
144// - "system" timers : one per processor, used for context switch.
145//   local_id in [0, NB_PROCS_MAX-1],
146// - "user" timers : requested by the task in the mapping_info data structure.
147//   For each user timer, the timer_id is stored in the context of the task.
148// The global index is cluster_id * (NB_PROCS_MAX+NB_TIM_CHANNELS) + local_id
149//////////////////////////////////////////////////////////////////////////////
150// The (virtual) base address of the associated segment is:
151//
152//       timer_address = seg_tim_base + cluster_id * vseg_cluster_increment
153//   or  timer_address = seg_xcu_base + cluster_id * vseg_cluster_increment
154//
155////////////////////////////////////////////////////////////////////////////////
156
157// User Timer signaling variables
158
159#if (NB_TIM_CHANNELS > 0)
160in_unckdata volatile unsigned char _user_timer_event[NB_CLUSTERS * NB_TIM_CHANNELS] 
161                            = { [0 ... ((NB_CLUSTERS * NB_TIM_CHANNELS) - 1)] = 0 };
162#endif
163
164//////////////////////////////////////////////////////////////////////////////
165//     _timer_start()
166// This function activates a timer in the vci_timer (or vci_xicu) component
167// by writing in the proper register the period value.
168// It can be used by both the kernel to initialise a "system" timer,
169// or by a task (through a system call) to configure an "user" timer.
170// Returns 0 if success, > 0 if error.
171//////////////////////////////////////////////////////////////////////////////
172unsigned int _timer_start( unsigned int cluster_id, 
173                           unsigned int local_id, 
174                           unsigned int period) 
175{
176    // parameters checking
177    if (cluster_id >= NB_CLUSTERS)  return 1;
178    if (local_id >= NB_TIM_CHANNELS)  return 2;
179
180#if USE_XICU
181    unsigned int* timer_address = (unsigned int *) ((unsigned int)&seg_xcu_base + 
182                                  (cluster_id * (unsigned int)&vseg_cluster_increment));
183
184    timer_address[XICU_REG(XICU_PTI_PER, local_id)] = period;
185#else
186    unsigned int* timer_address = (unsigned int *) ((unsigned int)&seg_tim_base + 
187                                  (cluster_id * (unsigned int)&vseg_cluster_increment));
188
189    timer_address[local_id * TIMER_SPAN + TIMER_PERIOD] = period;
190    timer_address[local_id * TIMER_SPAN + TIMER_MODE] = 0x3;
191#endif
192    return 0;
193}
194
195//////////////////////////////////////////////////////////////////////////////
196//     _timer_stop()
197// This function desactivates a timer in the vci_timer (or vci_xicu) component
198// by writing in the proper register.
199// Returns 0 if success, > 0 if error.
200//////////////////////////////////////////////////////////////////////////////
201unsigned int _timer_stop( unsigned int cluster_id, 
202                          unsigned int local_id) 
203{
204    // parameters checking
205    if (cluster_id >= NB_CLUSTERS)  return 1;
206    if (local_id >= NB_TIM_CHANNELS)  return 2;
207
208#if USE_XICU
209    unsigned int * timer_address = (unsigned int *) ((unsigned int)&seg_xcu_base + 
210                                   (cluster_id * (unsigned int)&vseg_cluster_increment));
211
212    timer_address[XICU_REG(XICU_PTI_PER, local_id)] = 0;
213#else
214    unsigned int* timer_address = (unsigned int *) ((unsigned int)&seg_tim_base + 
215                                  (cluster_id * (unsigned int)&vseg_cluster_increment));
216
217    timer_address[local_id * TIMER_SPAN + TIMER_MODE] = 0;
218#endif
219    return 0;
220}
221
222//////////////////////////////////////////////////////////////////////////////
223//     _timer_reset_irq()
224// This function acknowlegge a timer interrupt in the vci_timer (or vci_xicu)
225// component by reading/writing in the proper register.
226// It can be used by both the isr_switch() for a "system" timer,
227// or by the _isr_timer() for an "user" timer.
228// Returns 0 if success, > 0 if error.
229//////////////////////////////////////////////////////////////////////////////
230unsigned int _timer_reset_irq( unsigned int cluster_id, 
231                               unsigned int local_id ) 
232{
233    // parameters checking
234    if (cluster_id >= NB_CLUSTERS)  return 1;
235    if (local_id >= NB_TIM_CHANNELS)  return 2;
236
237#if USE_XICU
238    unsigned int * timer_address = (unsigned int *) ((unsigned int)&seg_xcu_base +
239                                   (cluster_id * (unsigned int)&vseg_cluster_increment));
240
241    unsigned int bloup = timer_address[XICU_REG(XICU_PTI_ACK, local_id)];
242    bloup++; // to avoid a warning
243#else
244    unsigned int * timer_address = (unsigned int *) ((unsigned int)&seg_tim_base + 
245                                   (cluster_id * (unsigned int)&vseg_cluster_increment));
246
247    timer_address[local_id * TIMER_SPAN + TIMER_RESETIRQ] = 0;
248#endif
249    return 0;
250}
251
252///////////////////////////////////////////////////////////////////////
253// _timer_reset_irq_cpt()
254///////////////////////////////////////////////////////////////////////
255// This function resets the period at the end of which
256// an interrupt is sent. To do so, we re-write the period
257// in the proper register, what causes the count to restart.
258// The period value is read from the same (TIMER_PERIOD) register,
259// this is why in appearance we do nothing useful (read a value
260// from a register and write this value in the same register)
261// This function is called during a context switch (user or preemptive)
262///////////////////////////////////////////////////////////////////////
263unsigned int _timer_reset_irq_cpt( unsigned int cluster_id, 
264                                   unsigned int local_id) {
265    // parameters checking
266    if (cluster_id >= NB_CLUSTERS) {
267        return 1;
268    }
269    if (local_id >= NB_TIM_CHANNELS) {
270        return 2;
271    }
272
273#if USE_XICU
274    unsigned int * timer_address = (unsigned int *) ((unsigned int) &seg_xcu_base + 
275                                   (cluster_id * (unsigned int)&vseg_cluster_increment));
276
277    unsigned int timer_period = timer_address[XICU_REG(XICU_PTI_PER, local_id)];
278
279    // we write 0 first because if the timer is currently running,
280    //the corresponding timer counter is not reset
281    timer_address[XICU_REG(XICU_PTI_PER, local_id)] = 0;
282    timer_address[XICU_REG(XICU_PTI_PER, local_id)] = timer_period;
283#else
284    // We suppose that the TIMER_MODE register value is 0x3
285    unsigned int * timer_address = (unsigned int *) ((unsigned int)&seg_tim_base + 
286                                   (cluster_id * (unsigned int)&vseg_cluster_increment));
287
288    unsigned int timer_period = timer_address[local_id * TIMER_SPAN + TIMER_PERIOD];
289
290    timer_address[local_id * TIMER_SPAN + TIMER_PERIOD] = timer_period;
291#endif
292
293    return 0;
294}
295
296/////////////////////////////////////////////////////////////////////////////////
297//     VciMultiTty driver
298/////////////////////////////////////////////////////////////////////////////////
299// There is only one multi_tty controler in the architecture.
300// The total number of TTYs is defined by the configuration parameter NB_TTY_CHANNELS.
301// The "system" terminal is TTY[0].
302// The "user" TTYs are allocated to applications by the GIET in the boot phase,
303// as defined in the mapping_info data structure. The corresponding tty_id must
304// be stored in the context of the task by the boot code.
305// The TTY address is : seg_tty_base + tty_id*TTY_SPAN
306/////////////////////////////////////////////////////////////////////////////////
307
308// TTY variables
309in_unckdata volatile unsigned char _tty_get_buf[NB_TTY_CHANNELS];
310in_unckdata volatile unsigned char _tty_get_full[NB_TTY_CHANNELS] 
311                                     = { [0 ... NB_TTY_CHANNELS - 1] = 0 };
312in_unckdata unsigned int _tty_put_lock = 0;  // protect kernel TTY[0]
313
314////////////////////////////////////////////////////////////////////////////////
315//      _tty_error()
316////////////////////////////////////////////////////////////////////////////////
317void _tty_error(unsigned int tty_id, unsigned int task_id) 
318{
319    unsigned int proc_id = _procid();
320
321    _get_lock(&_tty_put_lock);
322    if (tty_id == 0xFFFFFFFF) _puts("\n[GIET ERROR] no TTY assigned to the task ");
323    else                      _puts("\n[GIET ERROR] TTY index too large for task ");
324    _putd(task_id);
325    _puts(" on processor ");
326    _putd(proc_id);
327    _puts("\n");
328    _release_lock(&_tty_put_lock);
329}
330
331
332/////////////////////////////////////////////////////////////////////////////////
333//      _tty_write()
334// Write one or several characters directly from a fixed-length user buffer to
335// the TTY_WRITE register of the TTY controler.
336// It doesn't use the TTY_PUT_IRQ interrupt and the associated kernel buffer.
337// This is a non blocking call: it tests the TTY_STATUS register, and stops
338// the transfer as soon as the TTY_STATUS[WRITE] bit is set.
339// The function returns  the number of characters that have been written.
340/////////////////////////////////////////////////////////////////////////////////
341unsigned int _tty_write(const char * buffer, 
342                        unsigned int length) 
343{
344    unsigned int nwritten;
345    unsigned int tty_id = _get_context_slot(CTX_TTY_ID);
346    unsigned int* tty_address = (unsigned int *) &seg_tty_base;
347
348    for (nwritten = 0; nwritten < length; nwritten++) 
349    {
350        // check tty's status
351        if ((tty_address[tty_id * TTY_SPAN + TTY_STATUS] & 0x2) == 0x2) break;
352        tty_address[tty_id * TTY_SPAN + TTY_WRITE] = (unsigned int) buffer[nwritten];
353    }
354    return nwritten;
355}
356
357//////////////////////////////////////////////////////////////////////////////
358//      _tty_read()
359// This non-blocking function uses the TTY_GET_IRQ[tty_id] interrupt and
360// the associated kernel buffer, that has been written by the ISR.
361// It get the TTY terminal index from the context of the current task.
362// It fetches one single character from the _tty_get_buf[tty_id] kernel
363// buffer, writes this character to the user buffer, and resets the
364// _tty_get_full[tty_id] buffer.
365// The length argument is not used.
366// Returns 0 if the kernel buffer is empty, 1 if the buffer is full.
367//////////////////////////////////////////////////////////////////////////////
368unsigned int _tty_read(char * buffer, 
369                       unsigned int length) 
370{
371    unsigned int tty_id = _get_context_slot(CTX_TTY_ID);
372
373    if (_tty_get_full[tty_id] == 0) 
374    {
375        return 0;
376    }
377    else 
378    {
379        *buffer = _tty_get_buf[tty_id];
380        _tty_get_full[tty_id] = 0;
381        return 1;
382    }
383}
384
385////////////////////////////////////////////////////////////////////////////////
386//     _tty_get_char()
387// This function is used by the _isr_tty to read a character in the TTY
388// terminal defined by the tty_id argument. The character is stored
389// in requested buffer, and the IRQ is acknowledged.
390// Returns 0 if success, 1 if tty_id too large.
391////////////////////////////////////////////////////////////////////////////////
392unsigned int _tty_get_char(unsigned int tty_id, 
393                           unsigned char * buffer) 
394{
395    // checking argument
396    if (tty_id >= NB_TTY_CHANNELS) { return 1; }
397
398    // compute terminal base address
399    unsigned int * tty_address = (unsigned int *) &seg_tty_base; 
400
401    *buffer = (unsigned char) tty_address[tty_id * TTY_SPAN + TTY_READ];
402    return 0;
403}
404
405
406////////////////////////////////////////////////////////////////////////////////
407//     VciMultiIcu or VciXicu driver
408////////////////////////////////////////////////////////////////////////////////
409// This hardware component is replicated in all clusters.
410// There is one vci_multi_icu (or vci_xicu) component per cluster,
411// and the number of ICU channels is equal to NB_PROCS_MAX,
412// because there is one private interrupt controler per processor.
413////////////////////////////////////////////////////////////////////////////////
414// The (virtual) base address of the associated segment is:
415//
416//       icu_address = seg_icu_base + cluster_id * vseg_cluster_increment
417//  or   icu_address = seg_xcu_base + cluster_id * vseg_cluster_increment
418//
419////////////////////////////////////////////////////////////////////////////////
420
421////////////////////////////////////////////////////////////////////////////////
422//     _icu_set_mask()
423// This function can be used with both the vci_xicu & vci_multi_icu components.
424// It set the mask register for the ICU channel identified by the cluster index
425// and the processor index: all '1' bits are set / all '0' bits are not modified.
426// Returns 0 if success, > 0 if error.
427////////////////////////////////////////////////////////////////////////////////
428unsigned int _icu_set_mask( unsigned int cluster_id,
429                            unsigned int proc_id,
430                            unsigned int value,
431                            unsigned int is_PTI) 
432{
433    // parameters checking
434    if (cluster_id >= NB_CLUSTERS) return 1; 
435    if (proc_id >= NB_PROCS_MAX)   return 1; 
436
437#if USE_XICU
438    unsigned int * icu_address = (unsigned int *) ((unsigned int)&seg_xcu_base + 
439                                 (cluster_id * (unsigned int)&vseg_cluster_increment));
440    if (is_PTI) 
441    {
442        icu_address[XICU_REG(XICU_MSK_PTI_ENABLE, proc_id)] = value;
443    }
444    else 
445    {
446        icu_address[XICU_REG(XICU_MSK_HWI_ENABLE, proc_id)] = value;
447    }
448#else
449    unsigned int * icu_address = (unsigned int *) ((unsigned int)&seg_icu_base + 
450                                 (cluster_id * (unsigned int)&vseg_cluster_increment));
451
452    icu_address[proc_id * ICU_SPAN + ICU_MASK_SET] = value; 
453#endif
454    return 0;
455}
456
457////////////////////////////////////////////////////////////////////////////////
458//     _icu_get_index()
459// This function can be used with both the vci_xicu & vci_multi_icu components.
460// It returns the index of the highest priority (smaller index) active HWI.
461// The ICU channel is identified by the cluster index and the processor index.
462// Returns 0 if success, > 0 if error.
463////////////////////////////////////////////////////////////////////////////////
464unsigned int _icu_get_index( unsigned int cluster_id, 
465                             unsigned int proc_id, 
466                             unsigned int * buffer) 
467{
468    // parameters checking
469    if (cluster_id >= NB_CLUSTERS)  return 1;
470    if (proc_id >= NB_PROCS_MAX)    return 1;
471
472#if USE_XICU
473    unsigned int * icu_address = (unsigned int *) ((unsigned int)&seg_xcu_base + 
474                                 (cluster_id * (unsigned int)&vseg_cluster_increment));
475
476    unsigned int prio = icu_address[XICU_REG(XICU_PRIO, proc_id)];
477    unsigned int pti_ok = (prio & 0x00000001);
478    unsigned int hwi_ok = (prio & 0x00000002);
479    unsigned int swi_ok = (prio & 0x00000004);
480    unsigned int pti_id = (prio & 0x00001F00) >> 8;
481    unsigned int hwi_id = (prio & 0x001F0000) >> 16;
482    unsigned int swi_id = (prio & 0x1F000000) >> 24;
483    if      (pti_ok) { *buffer = pti_id; }
484    else if (hwi_ok) { *buffer = hwi_id; }
485    else if (swi_ok) { *buffer = swi_id; }
486    else             { *buffer = 32; }
487#else
488    unsigned int * icu_address = (unsigned int *) ((unsigned int)&seg_icu_base + 
489                                 (cluster_id * (unsigned int)&vseg_cluster_increment));
490
491    *buffer = icu_address[proc_id * ICU_SPAN + ICU_IT_VECTOR]; 
492#endif
493    return 0;
494}
495
496////////////////////////////////////////////////////////////////////////////////
497//     VciGcd driver
498////////////////////////////////////////////////////////////////////////////////
499// The Greater Dommon Divider is a -very- simple hardware coprocessor
500// performing the computation of the GCD of two 32 bits integers.
501// It has no DMA capability.
502////////////////////////////////////////////////////////////////////////////////
503
504////////////////////////////////////////////////////////////////////////////////
505//     _gcd_write()
506// Write a 32-bit word in a memory mapped register of the GCD coprocessor.
507// Returns 0 if success, > 0 if error.
508////////////////////////////////////////////////////////////////////////////////
509unsigned int _gcd_write( unsigned int register_index, 
510                         unsigned int value) 
511{
512    // parameters checking
513    if (register_index >= GCD_END)  return 1; 
514
515    unsigned int * gcd_address = (unsigned int *) &seg_gcd_base;
516
517    gcd_address[register_index] = value; // write word
518    return 0;
519}
520
521
522////////////////////////////////////////////////////////////////////////////////
523//     _gcd_read()
524// Read a 32-bit word in a memory mapped register of the GCD coprocessor.
525// Returns 0 if success, > 0 if error.
526////////////////////////////////////////////////////////////////////////////////
527unsigned int _gcd_read( unsigned int register_index, 
528                        unsigned int * buffer ) 
529{
530    // parameters checking
531    if (register_index >= GCD_END)  return 1;
532
533    unsigned int * gcd_address = (unsigned int *) &seg_gcd_base;
534
535    *buffer = gcd_address[register_index]; // read word
536    return 0;
537}
538
539////////////////////////////////////////////////////////////////////////////////
540// VciBlockDevice driver
541////////////////////////////////////////////////////////////////////////////////
542// The VciBlockDevice is a single channel external storage contrÃŽler.
543//
544// The IOMMU can be activated or not:
545//
546// 1) When the IOMMU is used, a fixed size 2Mbytes vseg is allocated to
547// the IOC peripheral, in the I/O virtual space, and the user buffer is
548// dynamically remapped in the IOMMU page table. The corresponding entry
549// in the IOMMU PT1 is defined by the kernel _ioc_iommu_ix1 variable.
550// The number of pages to be unmapped is stored in the _ioc_npages variable.
551// The number of PT2 entries is dynamically computed and stored in the
552// kernel _ioc_iommu_npages variable. It cannot be larger than 512.
553// The user buffer is unmapped by the _ioc_completed() function when
554// the transfer is completed.
555//
556// 2/ If the IOMMU is not used, we check that  the user buffer is mapped to a
557// contiguous physical buffer (this is generally true because the user space
558// page tables are statically constructed to use contiguous physical memory).
559//
560// Finally, the memory buffer must fulfill the following conditions:
561// - The user buffer must be word aligned,
562// - The user buffer must be mapped in user address space,
563// - The user buffer must be writable in case of (to_mem) access,
564// - The total number of physical pages occupied by the user buffer cannot
565//   be larger than 512 pages if the IOMMU is activated,
566// - All physical pages occupied by the user buffer must be contiguous
567//   if the IOMMU is not activated.
568// An error code is returned if these conditions are not verified.
569//
570// As the IOC component can be used by several programs running in parallel,
571// the _ioc_lock variable guaranties exclusive access to the device.  The
572// _ioc_read() and _ioc_write() functions use atomic LL/SC to get the lock.
573// and set _ioc_lock to a non zero value.  The _ioc_write() and _ioc_read()
574// functions are blocking, polling the _ioc_lock variable until the device is
575// available.
576// When the tranfer is completed, the ISR routine activated by the IOC IRQ
577// set the _ioc_done variable to a non-zero value. Possible address errors
578// detected by the IOC peripheral are reported by the ISR in the _ioc_status
579// variable.
580// The _ioc_completed() function is polling the _ioc_done variable, waiting for
581// transfer completion. When the completion is signaled, the _ioc_completed()
582// function reset the _ioc_done variable to zero, and releases the _ioc_lock
583// variable.
584//
585// In a multi-processing environment, this polling policy should be replaced by
586// a descheduling policy for the requesting process.
587///////////////////////////////////////////////////////////////////////////////
588
589// IOC global variables
590in_unckdata volatile unsigned int _ioc_status= 0;
591in_unckdata volatile unsigned int _ioc_done = 0;
592in_unckdata unsigned int _ioc_lock = 0;
593in_unckdata unsigned int _ioc_iommu_ix1 = 0;
594in_unckdata unsigned int _ioc_iommu_npages; 
595
596///////////////////////////////////////////////////////////////////////////////
597//      _ioc_access()
598// This function transfer data between a memory buffer and the block device.
599// The buffer lentgth is (count*block_size) bytes.
600// Arguments are:
601// - to_mem     : from external storage to memory when non 0
602// - lba        : first block index on the external storage.
603// - user_vaddr : virtual base address of the memory buffer.
604// - count      : number of blocks to be transfered.
605// Returns 0 if success, > 0 if error.
606///////////////////////////////////////////////////////////////////////////////
607unsigned int _ioc_access( unsigned int to_mem,
608                          unsigned int lba,
609                          unsigned int user_vaddr,
610                          unsigned int count) 
611{
612    unsigned int user_vpn_min;     // first virtuel page index in user space
613    unsigned int user_vpn_max;     // last virtual page index in user space
614    unsigned int vpn;              // current virtual page index in user space
615    unsigned int ppn;              // physical page number
616    unsigned int flags;            // page protection flags
617    unsigned int ix2;              // page index in IOMMU PT1 page table
618    unsigned int ppn_first;        // first physical page number for user buffer
619    unsigned int buf_xaddr = 0;    // user buffer virtual address in IO space (if IOMMU)
620    paddr_t      buf_paddr = 0;    // user buffer physical address (if no IOMMU),
621
622    // check buffer alignment
623    if ((unsigned int) user_vaddr & 0x3)
624    {
625        _get_lock(&_tty_put_lock);
626        _puts("[GIET ERROR] in _ioc_access() : user buffer not word aligned\n");
627        _release_lock(&_tty_put_lock);
628        return 1; 
629    }
630
631    unsigned int * ioc_address = (unsigned int *) &seg_ioc_base ;
632
633    unsigned int block_size = ioc_address[BLOCK_DEVICE_BLOCK_SIZE];
634    unsigned int length = count * block_size;
635
636    // get user space page table virtual address
637    unsigned int user_pt_vbase = _get_context_slot(CTX_PTAB_ID);
638
639    user_vpn_min = user_vaddr >> 12;
640    user_vpn_max = (user_vaddr + length - 1) >> 12;
641
642    // loop on all virtual pages covering the user buffer
643    for (vpn = user_vpn_min, ix2 = 0 ; 
644         vpn <= user_vpn_max ; 
645         vpn++, ix2++ ) 
646    {
647        // get ppn and flags for each vpn
648        unsigned int ko = _v2p_translate((page_table_t *) user_pt_vbase,
649                                          vpn,
650                                          &ppn,
651                                          &flags);
652        // check access rights
653        if (ko)
654        {
655            _get_lock(&_tty_put_lock);
656            _puts("[GIET ERROR] in _ioc_access() : user buffer unmapped\n");
657            _release_lock(&_tty_put_lock);
658            return 1; 
659        }
660        if ((flags & PTE_U) == 0) 
661        {
662            _get_lock(&_tty_put_lock);
663            _puts("[GIET ERROR] in _ioc_access() : user buffer not in user space\n");
664            _release_lock(&_tty_put_lock);
665            return 1; 
666        }
667        if (((flags & PTE_W) == 0 ) && to_mem)
668        {
669            _get_lock(&_tty_put_lock);
670            _puts("[GIET ERROR] in _ioc_access() : user buffer not writable\n");
671            _release_lock(&_tty_put_lock);
672            return 1; 
673        }
674
675        // save first ppn value
676        if (ix2 == 0) ppn_first = ppn;
677
678        if ( GIET_USE_IOMMU && USE_IOB ) // user buffer remapped in the I/0 space
679        {
680            // check buffer length < 2 Mbytes
681            if (ix2 > 511) 
682            {
683                _get_lock(&_tty_put_lock);
684                _puts("[GIET ERROR] in _ioc_access() : user buffer > 2 Mbytes\n");
685                _release_lock(&_tty_put_lock);
686                return 1; 
687            }
688
689            // map the physical page in IOMMU page table
690            _iommu_add_pte2( _ioc_iommu_ix1,    // PT1 index
691                             ix2,               // PT2 index
692                             ppn,               // Physical page number   
693                             flags);            // Protection flags
694
695            // compute user buffer virtual adress in IO space
696            buf_xaddr = (_ioc_iommu_ix1) << 21 | (user_vaddr & 0xFFF);
697        }
698        else            // No IOMMU
699        {
700            // check that physical pages are contiguous
701            if ((ppn - ppn_first) != ix2) 
702            {
703                _get_lock(&_tty_put_lock);
704                _puts("[GIET ERROR] in _ioc_access() : split physical user buffer\n");
705                _release_lock(&_tty_put_lock);
706                return 1; 
707            }
708
709            // compute user buffer physical adress
710            buf_paddr = (((paddr_t)ppn_first) << 12) | (user_vaddr & 0xFFF);
711        }
712    } // end for vpn
713
714    // register the number of pages to be unmapped
715    _ioc_iommu_npages = (user_vpn_max - user_vpn_min) + 1;
716
717    // invalidate local data cache in case of memory write
718    if (to_mem) _dcache_buf_invalidate((void *) user_vaddr, length);
719
720#if GIET_DEBUG_IOC_DRIVER
721_get_lock(&_tty_put_lock);
722_puts("\n[GIET DEBUG]  IOC_ACCESS at cycle ");
723_putd( _proctime() );
724_puts("\n - proc_id         = ");
725_putd( _procid() );
726_puts("\n - ioc_vbase       = ");
727_putx( (unsigned int)ioc_address );
728_puts("\n - psched_vbase    = ");
729_putx( (unsigned int)_get_sched() );
730_puts("\n - pt_vbase        = ");
731_putx( user_pt_vbase );
732_puts("\n - user_buf_vbase  = ");
733_putx( user_vaddr );
734_puts("\n - user_buf_length = ");
735_putx( length );
736_puts("\n - user_buf_paddr  = ");
737_putl( buf_paddr );
738_puts("\n - user_buf_xaddr  = ");
739_putx( buf_xaddr );
740_puts("\n");
741_release_lock(&_tty_put_lock);
742#endif
743
744    // Invalidate L2 cache if IO Bridge is used
745    if ( to_mem && USE_IOB ) _memc_inval( buf_paddr, length );
746   
747    // get the lock on ioc device
748    _get_lock(&_ioc_lock);
749
750    // peripheral configuration 
751    if ( GIET_USE_IOMMU && USE_IOB ) 
752    {
753        ioc_address[BLOCK_DEVICE_BUFFER] = buf_xaddr;
754    }
755    else
756    {
757        ioc_address[BLOCK_DEVICE_BUFFER]     = (unsigned int)buf_paddr;
758        ioc_address[BLOCK_DEVICE_BUFFER_EXT] = (unsigned int)(buf_paddr>>32);
759    }
760    ioc_address[BLOCK_DEVICE_COUNT] = count;
761    ioc_address[BLOCK_DEVICE_LBA] = lba;
762    if (to_mem == 0) 
763    {
764        ioc_address[BLOCK_DEVICE_OP] = BLOCK_DEVICE_WRITE;
765    }
766    else 
767    {
768        ioc_address[BLOCK_DEVICE_OP] = BLOCK_DEVICE_READ;
769    }
770    return 0;
771}
772
773/////////////////////////////////////////////////////////////////////////////////
774// _ioc_completed()
775//
776// This function checks completion of an I/O transfer and reports errors.
777// As it is a blocking call, the processor is stalled.
778// If the virtual memory is activated, the pages mapped in the I/O virtual
779// space are unmapped, and the IOB TLB is cleared.
780// Returns 0 if success, > 0 if error.
781/////////////////////////////////////////////////////////////////////////////////
782unsigned int _ioc_completed() 
783{
784    unsigned int ret;
785    unsigned int ix2;
786
787    // busy waiting
788    while (_ioc_done == 0) { asm volatile("nop"); }
789
790#if GIET_DEBUG_IOC_DRIVER
791_get_lock(&_tty_put_lock);
792_puts("\n[GIET DEBUG]  IOC_COMPLETED at cycle ");
793_putd( _proctime() );
794_puts("\n - proc_id         = ");
795_putd( _procid() );
796_puts("\n");
797_release_lock(&_tty_put_lock);
798#endif
799
800    // unmap the buffer from IOMMU page table if IOMMU is activated
801    if ( GIET_USE_IOMMU && USE_IOB ) 
802    {
803        unsigned int * iob_address = (unsigned int *) &seg_iob_base;
804
805        for (ix2 = 0; ix2 < _ioc_iommu_npages; ix2++) 
806        {
807            // unmap the page in IOMMU page table
808            _iommu_inval_pte2(
809                    _ioc_iommu_ix1, // PT1 index
810                    ix2 );          // PT2 index
811
812            // clear IOMMU TLB
813            iob_address[IOB_INVAL_PTE] = (_ioc_iommu_ix1 << 21) | (ix2 << 12); 
814        }
815    }
816
817    // test IOC status
818    if ((_ioc_status != BLOCK_DEVICE_READ_SUCCESS)
819            && (_ioc_status != BLOCK_DEVICE_WRITE_SUCCESS)) ret = 1; // error
820    else                                                    ret = 0; // success
821
822    // reset synchronization variables
823    _ioc_done = 0;
824    asm volatile("sync");
825    _ioc_lock = 0;
826
827    return ret;
828}
829
830
831///////////////////////////////////////////////////////////////////////////////
832//     _ioc_read()
833// Transfer data from the block device to a memory buffer in user space.
834// - lba    : first block index on the block device
835// - buffer : base address of the memory buffer (must be word aligned)
836// - count  : number of blocks to be transfered.
837// Returns 0 if success, > 0 if error.
838///////////////////////////////////////////////////////////////////////////////
839unsigned int _ioc_read( unsigned int lba, 
840                        void * buffer, 
841                        unsigned int count) 
842{
843    return _ioc_access(
844            1,        // read access
845            lba,
846            (unsigned int) buffer,
847            count);
848}
849
850
851///////////////////////////////////////////////////////////////////////////////
852//     _ioc_write()
853// Transfer data from a memory buffer in user space to the block device.
854// - lba    : first block index on the block device
855// - buffer : base address of the memory buffer (must be word aligned)
856// - count  : number of blocks to be transfered.
857// Returns 0 if success, > 0 if error.
858///////////////////////////////////////////////////////////////////////////////
859unsigned int _ioc_write( unsigned int lba, 
860                         const void * buffer, 
861                         unsigned int count) 
862{
863    return _ioc_access(
864            0, // write access
865            lba,
866            (unsigned int) buffer,
867            count);
868}
869
870
871///////////////////////////////////////////////////////////////////////////////
872//     _ioc_get_status()
873// This function returns the transfert status, and acknowledge the IRQ.
874// Returns 0 if success, > 0 if error.
875///////////////////////////////////////////////////////////////////////////////
876unsigned int _ioc_get_status(unsigned int * status) 
877{
878    // get IOC base address
879    unsigned int * ioc_address = (unsigned int *) &seg_ioc_base;
880
881    *status = ioc_address[BLOCK_DEVICE_STATUS]; // read status & reset IRQ
882    return 0;
883}
884
885
886///////////////////////////////////////////////////////////////////////////////
887//     _ioc_get_block_size()
888// This function returns the block_size with which the IOC has been configured.
889///////////////////////////////////////////////////////////////////////////////
890unsigned int _ioc_get_block_size() 
891{
892    // get IOC base address
893    unsigned int * ioc_address = (unsigned int *) &seg_ioc_base;
894   
895    return  ioc_address[BLOCK_DEVICE_BLOCK_SIZE];
896}
897
898
899//////////////////////////////////////////////////////////////////////////////////
900// VciMultiDma driver
901//////////////////////////////////////////////////////////////////////////////////
902// The DMA controllers are physically distributed in the clusters.
903// There is  (NB_CLUSTERS * NB_DMA_CHANNELS) channels, indexed by a global index:
904//        dma_id = cluster_id * NB_DMA_CHANNELS + loc_id
905//
906// As a DMA channel is a private ressource allocated to a task,
907// there is no lock protecting exclusive access to the channel.
908// The signalisation between the OS and the DMA uses the _dma_done[dma_id]
909// synchronisation variables  (set by the ISR, and reset by the OS).
910// The transfer status is copied by the ISR in the _dma_status[dma_id] variables.
911//////////////////////////////////////////////////////////////////////////////////
912// The (virtual) base address of the associated segment is:
913//
914//       dma_address = seg_dma_base + cluster_id * vseg_cluster_increment
915//
916////////////////////////////////////////////////////////////////////////////////
917
918#if NB_DMA_CHANNELS > 0
919
920// in_unckdata unsigned int            _dma_lock[NB_DMA_CHANNELS * NB_CLUSTERS]
921// = { [0 ... (NB_DMA_CHANNELS * NB_CLUSTERS) - 1] = 0 };
922
923in_unckdata volatile unsigned int    _dma_done[NB_DMA_CHANNELS * NB_CLUSTERS] 
924        = { [0 ... (NB_DMA_CHANNELS * NB_CLUSTERS) - 1] = 0 };
925in_unckdata volatile unsigned int _dma_status[NB_DMA_CHANNELS * NB_CLUSTERS];
926in_unckdata unsigned int _dma_iommu_ix1 = 1;
927in_unckdata unsigned int _dma_iommu_npages[NB_DMA_CHANNELS * NB_CLUSTERS];
928#endif
929
930//////////////////////////////////////////////////////////////////////////////////
931// _dma_reset_irq()
932//////////////////////////////////////////////////////////////////////////////////
933unsigned int _dma_reset_irq( unsigned int cluster_id, 
934                             unsigned int channel_id) 
935{
936#if NB_DMA_CHANNELS > 0
937    // parameters checking
938    if (cluster_id >= NB_CLUSTERS)  return 1;
939    if (channel_id >= NB_DMA_CHANNELS)  return 1; 
940
941    // compute DMA base address
942    unsigned int * dma_address = (unsigned int *) ((unsigned int)&seg_dma_base + 
943                                 (cluster_id * (unsigned int)&vseg_cluster_increment));
944
945    dma_address[channel_id * DMA_SPAN + DMA_RESET] = 0;           
946    return 0;
947#else
948    return -1;
949#endif
950}
951
952//////////////////////////////////////////////////////////////////////////////////
953// _dma_get_status()
954//////////////////////////////////////////////////////////////////////////////////
955unsigned int _dma_get_status( unsigned int cluster_id, 
956                              unsigned int channel_id, 
957                              unsigned int * status) 
958{
959#if NB_DMA_CHANNELS > 0
960    // parameters checking
961    if (cluster_id >= NB_CLUSTERS)  return 1;
962    if (channel_id >= NB_DMA_CHANNELS)  return 1;
963
964    // compute DMA base address
965    unsigned int * dma_address = (unsigned int *) ((unsigned int)&seg_dma_base + 
966                                 (cluster_id * (unsigned int)&vseg_cluster_increment));
967
968    *status = dma_address[channel_id * DMA_SPAN + DMA_LEN];
969    return 0;
970#else
971    return -1;
972#endif
973}
974
975//////////////////////////////////////////////////////////////////////////////////
976// _dma_transfer()
977// Transfer data between a user buffer and a device buffer using DMA.
978// Only one device type is supported: Frame Buffer (dev_type == 0)
979// Arguments are:
980// - dev_type     : device type.
981// - to_user      : from  device buffer to user buffer when true.
982// - offset       : offset (in bytes) in the device buffer.
983// - user_vaddr   : virtual base address of the user buffer.
984// - length       : number of bytes to be transfered.
985//
986// The cluster_id and channel_id are obtained from task context (CTX_DMA_ID).
987// The user buffer must be mapped in user address space and word-aligned.
988// The user buffer length must be multiple of 4 bytes.
989// We compute the physical base addresses for both the device buffer
990// and the user buffer before programming the DMA transfer.
991// The GIET being fully static, we don't need to split the transfer in 4 Kbytes
992// pages, because the user buffer is contiguous in physical space.
993// Returns 0 if success, > 0 if error.
994//////////////////////////////////////////////////////////////////////////////////
995unsigned int _dma_transfer( unsigned int dev_type,
996                            unsigned int to_user,
997                            unsigned int offset,
998                            unsigned int user_vaddr,
999                            unsigned int length ) 
1000{
1001#if NB_DMA_CHANNELS > 0
1002    unsigned int ko;           // unsuccessfull V2P translation
1003    unsigned int device_vbase; // device buffer vbase address
1004    unsigned int flags;        // protection flags
1005    unsigned int ppn;          // physical page number
1006    paddr_t      user_pbase;   // user buffer pbase address
1007    paddr_t      device_pbase; // frame buffer pbase address
1008
1009    // check user buffer address and length alignment
1010    if ((user_vaddr & 0x3) || (length & 0x3)) 
1011    {
1012        _get_lock(&_tty_put_lock);
1013        _puts("\n[GIET ERROR] in _dma_transfer : user buffer not word aligned\n");
1014        _release_lock(&_tty_put_lock);
1015        return 1;
1016    }
1017
1018    // get DMA channel and compute DMA vbase address
1019    unsigned int dma_id      = _get_context_slot(CTX_DMA_ID);
1020    if ( dma_id == 0xFFFFFFFF )
1021    {
1022        _get_lock(&_tty_put_lock);
1023        _puts("\n[GIET ERROR] in _dma_transfer : no DMA channel allocated\n");
1024        _release_lock(&_tty_put_lock);
1025        return 1;
1026    }
1027    unsigned int cluster_id  = dma_id / NB_DMA_CHANNELS;
1028    unsigned int channel_id  = dma_id % NB_DMA_CHANNELS;
1029    unsigned int * dma_vbase = (unsigned int *) ((unsigned int)&seg_dma_base + 
1030                               (cluster_id * (unsigned int)&vseg_cluster_increment));
1031    // get page table address
1032    unsigned int user_ptab = _get_context_slot(CTX_PTAB_ID);
1033
1034    // get devic buffer virtual address, depending on peripheral type
1035    if (dev_type == 0) 
1036    {
1037        device_vbase = (unsigned int) &seg_fbf_base + offset;
1038    }
1039    else 
1040    {
1041        _get_lock(&_tty_put_lock);
1042        _puts("\n[GIET ERROR] in _dma_transfer : device type not supported\n");
1043        _release_lock(&_tty_put_lock);
1044        return 1;
1045    }
1046
1047    // get device buffer physical address
1048    ko = _v2p_translate( (page_table_t*) user_ptab, 
1049                         (device_vbase >> 12), 
1050                         &ppn, 
1051                         &flags );
1052    if (ko) 
1053    {
1054        _get_lock(&_tty_put_lock);
1055        _puts("\n[GIET ERROR] in _dma_transfer : device buffer unmapped\n");
1056        _release_lock(&_tty_put_lock);
1057        return 1;
1058    }
1059    device_pbase = ((paddr_t)ppn << 12) | (device_vbase & 0x00000FFF);
1060
1061    // Compute user buffer physical address
1062    ko = _v2p_translate( (page_table_t*) user_ptab, 
1063                         (user_vaddr >> 12), 
1064                         &ppn, 
1065                         &flags );
1066    if (ko) 
1067    {
1068        _get_lock(&_tty_put_lock);
1069        _puts("\n[GIET ERROR] in _dma_transfer() : user buffer unmapped\n");
1070        _release_lock(&_tty_put_lock);
1071        return 1;
1072    } 
1073    if ((flags & PTE_U) == 0) 
1074    {
1075        _get_lock(&_tty_put_lock);
1076        _puts("[GIET ERROR] in _dma_transfer() : user buffer not in user space\n");
1077        _release_lock(&_tty_put_lock);
1078        return 1; 
1079    }
1080    if (((flags & PTE_W) == 0 ) && to_user) 
1081    {
1082        _get_lock(&_tty_put_lock);
1083        _puts("\n[GIET ERROR] in _dma_transfer() : user buffer not writable\n");
1084        _release_lock(&_tty_put_lock);
1085        return 1;
1086    }
1087    user_pbase = (((paddr_t)ppn) << 12) | (user_vaddr & 0x00000FFF);
1088
1089/*  This is a draft for IOMMU support
1090
1091    // loop on all virtual pages covering the user buffer
1092    unsigned int user_vpn_min = user_vaddr >> 12;
1093    unsigned int user_vpn_max = (user_vaddr + length - 1) >> 12;
1094    unsigned int ix2          = 0;
1095    unsigned int ix1          = _dma_iommu_ix1 + dma_id;
1096
1097    for ( vpn = user_vpn_min ; vpn <= user_vpn_max ; vpn++ )
1098    {
1099    // get ppn and flags for each vpn
1100    unsigned int ko = _v2p_translate( (page_table_t*)user_pt_vbase,
1101    vpn,
1102    &ppn,
1103    &flags );
1104
1105    // check access rights
1106    if ( ko )                                 return 3;     // unmapped
1107    if ( (flags & PTE_U) == 0 )               return 4;     // not in user space
1108    if ( ( (flags & PTE_W) == 0 ) && to_user ) return 5;     // not writable
1109
1110    // save first ppn value
1111    if ( ix2 == 0 ) ppn_first = ppn;
1112
1113    if ( GIET_USE_IOMMU && USE_IOB )    // user buffer remapped in the I/0 space
1114    {
1115    // check buffer length < 2 Mbytes
1116    if ( ix2 > 511 ) return 2;
1117
1118    // map the physical page in IOMMU page table
1119    _iommu_add_pte2( ix1,        // PT1 index
1120    ix2,        // PT2 index
1121    ppn,        // physical page number
1122    flags );    // protection flags
1123    }
1124    else            // no IOMMU : check that physical pages are contiguous
1125    {
1126    if ( (ppn - ppn_first) != ix2 )       return 6;     // split physical buffer 
1127    }
1128
1129    // increment page index
1130    ix2++;
1131    } // end for vpn
1132
1133    // register the number of pages to be unmapped if iommu activated
1134    _dma_iommu_npages[dma_id] = (user_vpn_max - user_vpn_min) + 1;
1135
1136*/
1137
1138    // invalidate data cache in case of memory write
1139    if (to_user) _dcache_buf_invalidate((void *) user_vaddr, length);
1140
1141// get the lock
1142//  _get_lock(&_dma_lock[dma_id]);
1143
1144#if GIET_DEBUG_DMA_DRIVER
1145_get_lock(&_tty_put_lock);
1146_puts("\n[GIET DEBUG] DMA TRANSFER at cycle ");
1147_putd( _proctime() );
1148_puts("\n - cluster_id       = ");
1149_putx( cluster_id );
1150_puts("\n - channel_id       = ");
1151_putx( channel_id );
1152_puts("\n - dma_vbase        = ");
1153_putx( (unsigned int)dma_vbase );
1154_puts("\n - device_buf_vbase = ");
1155_putx( device_vbase );
1156_puts("\n - device_buf_pbase = ");
1157_putl( device_pbase );
1158_puts("\n - user_buf_vbase   = ");
1159_putx( user_vaddr );
1160_puts("\n - user_buf_pbase   = ");
1161_putl( user_pbase );
1162_puts("\n");
1163_release_lock(&_tty_put_lock);
1164#endif
1165
1166    // DMA configuration
1167    if (to_user) 
1168    {
1169        dma_vbase[channel_id * DMA_SPAN + DMA_SRC]     = (unsigned int)(device_pbase);
1170        dma_vbase[channel_id * DMA_SPAN + DMA_SRC_EXT] = (unsigned int)(device_pbase>>32);
1171        dma_vbase[channel_id * DMA_SPAN + DMA_DST]     = (unsigned int)(user_pbase);
1172        dma_vbase[channel_id * DMA_SPAN + DMA_DST_EXT] = (unsigned int)(user_pbase>>32);
1173    }
1174    else 
1175    {
1176        dma_vbase[channel_id * DMA_SPAN + DMA_SRC]     = (unsigned int)(user_pbase);
1177        dma_vbase[channel_id * DMA_SPAN + DMA_SRC_EXT] = (unsigned int)(user_pbase>>32);
1178        dma_vbase[channel_id * DMA_SPAN + DMA_DST]     = (unsigned int)(device_pbase);
1179        dma_vbase[channel_id * DMA_SPAN + DMA_DST_EXT] = (unsigned int)(device_pbase>>32);
1180    }
1181    dma_vbase[channel_id * DMA_SPAN + DMA_LEN] = (unsigned int) length;
1182
1183    return 0;
1184
1185#else // NB_DMA_CHANNELS == 0
1186    _get_lock(&_tty_put_lock);
1187    _puts("\n[GIET ERROR] in _dma_transfer() : NB_DMA_CHANNELS == 0");
1188    _release_lock(&_tty_put_lock);
1189    return 1;
1190#endif
1191
1192}  // end _dma_transfer() 
1193
1194//////////////////////////////////////////////////////////////////////////////////
1195// _dma_completed()
1196// This function checks completion of a DMA transfer to or from a peripheral
1197// device (Frame Buffer or Multi-Nic).
1198// As it is a blocking call, the processor is busy waiting.
1199// Returns 0 if success, > 0 if error
1200// (1 == read error / 2 == DMA idle error / 3 == write error)
1201//////////////////////////////////////////////////////////////////////////////////
1202unsigned int _dma_completed() 
1203{
1204#if NB_DMA_CHANNELS > 0
1205    unsigned int dma_id  = _get_context_slot(CTX_DMA_ID);
1206    unsigned int dma_ret;
1207
1208    // busy waiting with a pseudo random delay between bus access
1209    while (_dma_done[dma_id] == 0) 
1210    {
1211        unsigned int delay = (( _proctime() ^ _procid() << 4) & 0x3F) + 1;
1212        asm volatile(
1213                "move  $3,   %0                 \n"
1214                "loop_nic_completed:            \n"
1215                "addi  $3,   $3, -1             \n"
1216                "bnez  $3,   loop_nic_completed \n"
1217                "nop                            \n"
1218                :
1219                : "r" (delay)
1220                : "$3"); 
1221    }
1222
1223#if GIET_DEBUG_DMA_DRIVER
1224_get_lock(&_tty_put_lock);
1225_puts("\n[GIET DEBUG] DMA COMPLETED at cycle ");
1226_putd( _proctime() );
1227_puts("\n - cluster_id       = ");
1228_putx( dma_id/NB_DMA_CHANNELS );
1229_puts("\n - channel_id       = ");
1230_putx( dma_id%NB_DMA_CHANNELS );
1231_puts("\n");
1232_release_lock(&_tty_put_lock);
1233#endif
1234
1235    // reset synchronization variables
1236    _dma_done[dma_id] = 0;
1237    dma_ret = _dma_status[dma_id];
1238    asm volatile("sync\n");
1239
1240//    _dma_lock[dma_id] = 0;
1241
1242    return dma_ret;
1243
1244#else // NB_DMA_CHANNELS == 0
1245    return -1;
1246#endif
1247
1248}  // end _dma_completed
1249
1250
1251//////////////////////////////////////////////////////////////////////////////////
1252//     VciFrameBuffer driver
1253//////////////////////////////////////////////////////////////////////////////////
1254// There three methods to access the VciFrameBuffer device:
1255// 
1256// 1) The _fb_sync_write() and _fb_sync_read() functions use a memcpy strategy
1257// to implement the transfer between a data buffer (user space) and the frame
1258// buffer (kernel space). They are blocking until completion of the transfer.
1259//
1260// 2) The _fb_dma_write(), _fb_dma_read() and _fb_mdma_completed() functions use
1261// the VciMultiDma components (distributed in the clusters) to transfer data
1262// between the user buffer and the frame buffer.
1263// A DMA channel is allocated to the task requesting it in the mapping_info,
1264// and stored in the task context.
1265//
1266// 3) The _fb_cma_init(), _fb_cma_write() and _fb_cma_stop() functions use
1267// the VciChbufDma component (non replicated) to transfer a flow of images from
1268// an user space chained buffer (two buffers) to the frame buffer.
1269// A CMA channel is allocated to the task requesting it in the mapping_info,
1270// and stored in the task context.
1271//////////////////////////////////////////////////////////////////////////////////
1272
1273//////////////////////////////////////////////////////////////////////////////////
1274// _fb_sync_write()
1275// Transfer data from an memory buffer to the frame_buffer device using a memcpy.
1276// - offset : offset (in bytes) in the frame buffer.
1277// - buffer : base address of the memory buffer.
1278// - length : number of bytes to be transfered.
1279//////////////////////////////////////////////////////////////////////////////////
1280
1281unsigned int _fb_sync_write(unsigned int offset, 
1282                            const void * buffer, 
1283                            unsigned int length) 
1284{
1285    unsigned char * fb_address = (unsigned char *) &seg_fbf_base + offset;
1286    memcpy((void *) fb_address, (void *) buffer, length);
1287    return 0;
1288}
1289
1290
1291//////////////////////////////////////////////////////////////////////////////////
1292// _fb_sync_read()
1293// Transfer data from the frame_buffer device to a memory buffer using a memcpy.
1294// - offset : offset (in bytes) in the frame buffer.
1295// - buffer : base address of the memory buffer.
1296// - length : number of bytes to be transfered.
1297//////////////////////////////////////////////////////////////////////////////////
1298unsigned int _fb_sync_read( unsigned int   offset, 
1299                            const void*    buffer, 
1300                            unsigned int   length) 
1301{
1302    unsigned char* fb_address = (unsigned char *) &seg_fbf_base + offset;
1303    memcpy((void *) buffer, (void *) fb_address, length);
1304    return 0;
1305}
1306
1307
1308//////////////////////////////////////////////////////////////////////////////////
1309// _fb_dma_write()
1310// Transfer data from a memory buffer to the frame_buffer device using  DMA.
1311// - offset : offset (in bytes) in the frame buffer.
1312// - buffer : base address of the memory buffer.
1313// - length : number of bytes to be transfered.
1314// Returns 0 if success, > 0 if error.
1315//////////////////////////////////////////////////////////////////////////////////
1316unsigned int _fb_dma_write( unsigned int   offset, 
1317                            const void*    buffer, 
1318                            unsigned int   length) 
1319{
1320    return _dma_transfer( 0,             // frame buffer
1321                          0,             // write
1322                          offset,
1323                          (unsigned int) buffer,
1324                          length );
1325}
1326//////////////////////////////////////////////////////////////////////////////////
1327// _fb_dma_read()
1328// Transfer data from the frame_buffer device to a memory buffer using  DMA.
1329// - offset : offset (in bytes) in the frame buffer.
1330// - buffer : virtual base address of the user buffer.
1331// - length : buffer size (number of bytes)
1332// Returns 0 if success, > 0 if error.
1333//////////////////////////////////////////////////////////////////////////////////
1334unsigned int _fb_dma_read( unsigned int   offset, 
1335                           const void*    buffer, 
1336                           unsigned int   length ) 
1337{
1338    return _dma_transfer( 0,    // frame buffer
1339                          1,    // read
1340                          offset,
1341                          (unsigned int) buffer,
1342                          length );
1343}
1344//////////////////////////////////////////////////////////////////////////////////
1345// _fb_completed()
1346// This function checks completion of a DMA transfer to or fom the frame buffer.
1347// As it is a blocking call, the processor is busy waiting.
1348// Returns 0 if success, > 0 if error
1349// (1 == read error / 2 == DMA idle error / 3 == write error)
1350//////////////////////////////////////////////////////////////////////////////////
1351unsigned int _fb_dma_completed() 
1352{
1353    return _dma_completed();
1354}
1355
1356// This structure contains two chbuf descriptors that can be used by
1357// the VciChbufDma component to tranfer a flow of images:
1358// - The SRC chbuf descriptor contain two slots (two user buffers)
1359// - The DST chbuf descriptor contains only one slot (frame buffer)
1360typedef struct fb_cma_channel_s
1361{
1362    paddr_t buf0;   // physical address + status for user buffer 0
1363    paddr_t buf1;   // physical address + status for user buffer 1
1364    paddr_t fbf;    // physical address + status for frame buffer
1365} fb_cma_channel_t;
1366
1367in_unckdata volatile fb_cma_channel_t _fb_cma_channel[NB_CMA_CHANNELS];
1368
1369//////////////////////////////////////////////////////////////////////////////////
1370// _fb_cma_init()
1371// This function does two things:
1372// 1) Initialises the SRC chbuf descriptor (two buffers), and the DST
1373//    chbuf descriptor (one single frame buffer), after translating 
1374//    virtual addresses to physical addresses, and checking access rights.
1375// 2) Starts the CMA hardware channel, that will permanently try to display
1376//    images as soon as the SRC buffers are filled.
1377// Arguments are:
1378// - vbase0 : virtual base address of the first user buffer.
1379// - vbase1 : virtual base address of the second user buffer.
1380// - length : user buffer size (number of bytes)
1381// Returns 0 if success, > 0 if error
1382//////////////////////////////////////////////////////////////////////////////////
1383unsigned int _fb_cma_init( const void*  vbase0,
1384                           const void*  vbase1,
1385                           unsigned int length ) 
1386{
1387#if NB_CMA_CHANNELS > 0
1388
1389    unsigned int  channel_id;       // CMA channel index
1390    unsigned int  user_ptab;        // page table virtual address
1391    unsigned int  ko;               // unsuccessfull V2P translation
1392    unsigned int  vpn;              // virtual page number
1393    unsigned int  flags;            // protection flags
1394    unsigned int  ppn;              // physical page number
1395    paddr_t       src_chbuf_pbase;  // physical address for SRC chbuf descriptor
1396    paddr_t       dst_chbuf_pbase;  // physical address for SRC chbuf descriptor
1397
1398    // get CMA channel index
1399    channel_id = _get_context_slot(CTX_CMA_ID);
1400    if ( channel_id >= NB_CMA_CHANNELS )
1401    {
1402        _get_lock(&_tty_put_lock);
1403        _puts("\n[GIET ERROR] in _fb_cma_init() : CMA channel index too large\n");
1404        _release_lock(&_tty_put_lock);
1405        return 1;
1406    }
1407
1408    // check user buffer virtual addresses and length alignment
1409    if ( ((unsigned int)vbase0 & 0x3) || ((unsigned int)vbase1 & 0x3) || (length & 0x3) ) 
1410    {
1411        _get_lock(&_tty_put_lock);
1412        _puts("\n[GIET ERROR] in _fb_cma_init() : user buffer not word aligned\n");
1413        _release_lock(&_tty_put_lock);
1414        return 1;
1415    }
1416
1417    // get page table virtual address
1418    user_ptab = _get_context_slot(CTX_PTAB_ID);
1419
1420    // get frame buffer virtual address
1421
1422    // compute and register frame buffer physical address
1423    vpn = ((unsigned int)&seg_fbf_base) >> 12;
1424    ko = _v2p_translate( (page_table_t*) user_ptab, 
1425                         vpn, 
1426                         &ppn, 
1427                         &flags );
1428    if (ko) 
1429    {
1430        _get_lock(&_tty_put_lock);
1431        _puts("\n[GIET ERROR] in _fb_cma_init() : frame buffer unmapped\n");
1432        _release_lock(&_tty_put_lock);
1433        return 1;
1434    }
1435    _fb_cma_channel[channel_id].fbf = ((paddr_t)ppn << 12) | (vpn & 0x00000FFF);
1436
1437    // Compute and register first user buffer physical address
1438    vpn = (unsigned int)vbase0 >> 12; 
1439    ko = _v2p_translate( (page_table_t*) user_ptab, 
1440                         vpn, 
1441                         &ppn, 
1442                         &flags );
1443    if (ko) 
1444    {
1445        _get_lock(&_tty_put_lock);
1446        _puts("\n[GIET ERROR] in _fb_cma_init() : user buffer 0 unmapped\n");
1447        _release_lock(&_tty_put_lock);
1448        return 1;
1449    } 
1450    if ((flags & PTE_U) == 0) 
1451    {
1452        _get_lock(&_tty_put_lock);
1453        _puts("[GIET ERROR] in _fb_cma_init() : user buffer 0 not in user space\n");
1454        _release_lock(&_tty_put_lock);
1455        return 1; 
1456    }
1457    _fb_cma_channel[channel_id].buf0 = ((paddr_t)ppn << 12) | (vpn & 0x00000FFF);
1458
1459    // Compute and register second user buffer physical address
1460    vpn = (unsigned int)vbase1 >> 12; 
1461    ko = _v2p_translate( (page_table_t*) user_ptab, 
1462                         vpn, 
1463                         &ppn, 
1464                         &flags );
1465    if (ko) 
1466    {
1467        _get_lock(&_tty_put_lock);
1468        _puts("\n[GIET ERROR] in _fb_cma_init() : user buffer 1 unmapped\n");
1469        _release_lock(&_tty_put_lock);
1470        return 1;
1471    } 
1472    if ((flags & PTE_U) == 0) 
1473    {
1474        _get_lock(&_tty_put_lock);
1475        _puts("[GIET ERROR] in _fb_cma_init() : user buffer 1 not in user space\n");
1476        _release_lock(&_tty_put_lock);
1477        return 1; 
1478    }
1479    _fb_cma_channel[channel_id].buf1 = ((paddr_t)ppn << 12) | (vpn & 0x00000FFF);
1480
1481    // Compute physical adress of the SRC chbuf descriptor
1482    vpn = ((unsigned int)(&_fb_cma_channel[channel_id].buf0)) >> 12;
1483    ko = _v2p_translate( (page_table_t*) user_ptab, 
1484                         vpn,
1485                         &ppn, 
1486                         &flags );
1487    if (ko) 
1488    {
1489        _get_lock(&_tty_put_lock);
1490        _puts("\n[GIET ERROR] in _fb_cma_init() : SRC chbuf descriptor unmapped\n");
1491        _release_lock(&_tty_put_lock);
1492        return 1;
1493    } 
1494    src_chbuf_pbase = (((paddr_t)ppn) << 12) | (vpn & 0x00000FFF);
1495
1496    // Compute physical adress of the DST chbuf descriptor
1497    vpn = ((unsigned int)(&_fb_cma_channel[channel_id].fbf)) >> 12;
1498    ko = _v2p_translate( (page_table_t*) user_ptab, 
1499                         vpn,
1500                         &ppn, 
1501                         &flags );
1502    if (ko) 
1503    {
1504        _get_lock(&_tty_put_lock);
1505        _puts("\n[GIET ERROR] in _fb_cma_init() : DST chbuf descriptor unmapped\n");
1506        _release_lock(&_tty_put_lock);
1507        return 1;
1508    } 
1509    dst_chbuf_pbase = (((paddr_t)ppn) << 12) | (vpn & 0x00000FFF);
1510
1511    // CMA channel activation
1512    unsigned int* cma_vbase = (unsigned int *)&seg_cma_base;
1513    unsigned int  offset     = channel_id * CHBUF_CHANNEL_SPAN;
1514
1515    cma_vbase[offset + CHBUF_SRC_DESC]  = (unsigned int)(src_chbuf_pbase & 0xFFFFFFFF);
1516    cma_vbase[offset + CHBUF_SRC_EXT]   = (unsigned int)(src_chbuf_pbase >> 32);
1517    cma_vbase[offset + CHBUF_SRC_NBUFS] = 2;
1518    cma_vbase[offset + CHBUF_DST_DESC]  = (unsigned int)(dst_chbuf_pbase & 0xFFFFFFFF);
1519    cma_vbase[offset + CHBUF_DST_EXT]   = (unsigned int)(dst_chbuf_pbase >> 32);
1520    cma_vbase[offset + CHBUF_DST_NBUFS] = 1;
1521    cma_vbase[offset + CHBUF_BUF_SIZE]  = length;
1522    cma_vbase[offset + CHBUF_PERIOD]    = 300;
1523    cma_vbase[offset + CHBUF_RUN]       = 1;
1524
1525    return 0;
1526
1527#else
1528
1529    _get_lock(&_tty_put_lock);
1530    _puts("\n[GIET ERROR] in _fb_cma_init() : no CMA channel allocated\n");
1531    _release_lock(&_tty_put_lock);
1532
1533    return 1;
1534#endif
1535}
1536//////////////////////////////////////////////////////////////////////////////////
1537// _fb_cma_write()
1538// This function updates the status of the SRC and DST chbuf descriptors
1539// to allows the CMA component to transfer another buffer.
1540// - buffer_id : user buffer index (0 => buf0 / not 0 => buf1)
1541// Returns 0 if success, > 0 if error
1542//////////////////////////////////////////////////////////////////////////////////
1543unsigned int _fb_cma_write( unsigned int buffer_id )
1544{
1545#if NB_CMA_CHANNELS > 0
1546
1547    // get CMA channel index
1548    unsigned int channel_id = _get_context_slot(CTX_CMA_ID);
1549    if ( channel_id >= NB_CMA_CHANNELS )
1550    {
1551        _get_lock(&_tty_put_lock);
1552        _puts("\n[GIET ERROR] in _fb_cma_write() : CMA channel index too large\n");
1553        _release_lock(&_tty_put_lock);
1554        return 1;
1555    }
1556    // set SRC full
1557    if ( buffer_id == 0 )
1558    _fb_cma_channel[channel_id].buf0 = _fb_cma_channel[channel_id].buf0
1559                                       | 0x8000000000000000ULL;
1560    else
1561    _fb_cma_channel[channel_id].buf1 = _fb_cma_channel[channel_id].buf1
1562                                       | 0x8000000000000000ULL;
1563    // set DST empty
1564    _fb_cma_channel[channel_id].fbf  = _fb_cma_channel[channel_id].fbf
1565                                       & 0x7FFFFFFFFFFFFFFFULL;
1566    return 0;
1567
1568#else
1569
1570    _get_lock(&_tty_put_lock);
1571    _puts("\n[GIET ERROR] in _fb_cma_channel() : no CMA channel allocated\n");
1572    _release_lock(&_tty_put_lock);
1573    return 1;
1574
1575#endif
1576}
1577//////////////////////////////////////////////////////////////////////////////////
1578// _fb_cma_stop()
1579// This function desactivates the CMA channel allocated to the calling task.
1580// Returns 0 if success, > 0 if error
1581//////////////////////////////////////////////////////////////////////////////////
1582unsigned int _fb_cma_stop( unsigned int buffer_id )
1583{
1584#if NB_CMA_CHANNELS > 0
1585
1586    // get CMA channel allocated
1587    unsigned int channel_id = _get_context_slot(CTX_CMA_ID);
1588    if ( channel_id >= NB_CMA_CHANNELS )
1589    {
1590        _get_lock(&_tty_put_lock);
1591        _puts("\n[GIET ERROR] in _fb_cma_stop() : CMA channel index too large\n");
1592        _release_lock(&_tty_put_lock);
1593        return 1;
1594    }
1595    // CMA channel desactivation
1596    unsigned int* cma_vbase = (unsigned int *)&seg_cma_base;
1597    unsigned int  offset     = channel_id * CHBUF_CHANNEL_SPAN;
1598    cma_vbase[offset + CHBUF_RUN] = 0;
1599    return 0;
1600
1601#else
1602
1603    _get_lock(&_tty_put_lock);
1604    _puts("\n[GIET ERROR] in _fb_cma_stop() : no CMA channel allocated\n");
1605    _release_lock(&_tty_put_lock);
1606    return 1;
1607
1608#endif
1609}
1610   
1611//////////////////////////////////////////////////////////////////////////////////
1612//     VciMultiNic driver
1613//////////////////////////////////////////////////////////////////////////////////
1614// The VciMultiNic device can be accessed directly by software with memcpy(),
1615// or it can be accessed through a multi-channels CMA component:
1616// 
1617// The '_nic_sync_write' and '_nic_sync_read' functions use a memcpy strategy to
1618// implement the transfer between a data buffer (user space) and the NIC
1619// buffer (kernel space). They are blocking until completion of the transfer.
1620//
1621// The _nic_cma_init() and _nic_cma_stop() functions use the VciChbufDma component
1622// to transfer a flow of packets from the NIC RX hard chbuf (two containers)
1623// to an user RX chbuf (two containers), and to transfer another flow of packets
1624// from an user TX chbuf (two containers) to the NIC TX chbuf (two containers).
1625// One NIC channel and two CMA channels must be allocated to the task
1626// in the mapping_info data structure.
1627//////////////////////////////////////////////////////////////////////////////////
1628
1629//////////////////////////////////////////////////////////////////////////////////
1630// _nic_sync_write()
1631// Transfer data from an memory buffer to the NIC device using a memcpy.
1632// - buffer : base address of the memory buffer.
1633// - length : number of bytes to be transfered.
1634//////////////////////////////////////////////////////////////////////////////////
1635unsigned int _nic_sync_write( const void*    buffer,
1636                              unsigned int   length ) 
1637{
1638    // To be defined
1639    // unsigned char* nic_address = (unsigned char *) &seg_nic_base;
1640    // memcpy((void *) nic_address, (void *) buffer, length);
1641    return 0;
1642}
1643//////////////////////////////////////////////////////////////////////////////////
1644// _nic_sync_read()
1645// Transfer data from the NIC device to a memory buffer using a memcpy.
1646// - buffer : base address of the memory buffer.
1647// - length : number of bytes to be transfered.
1648//////////////////////////////////////////////////////////////////////////////////
1649unsigned int _nic_sync_read( const void*    buffer, 
1650                             unsigned int   length ) 
1651{
1652    // To be defined
1653    // unsigned char* nic_address = (unsigned char *) &seg_nic_base;
1654    // memcpy((void *) buffer, (void *) nic_address, length);
1655    return 0;
1656}
1657//////////////////////////////////////////////////////////////////////////////////
1658// _nic_cma_rx_init()
1659// Returns 0 if success, > 0 if error.
1660//////////////////////////////////////////////////////////////////////////////////
1661unsigned int _nic_cma_rx_init( const void*  buf0,
1662                               const void*  buf1,
1663                               unsigned int length ) 
1664{
1665    // to be defined
1666    // unsigned char* nic_address = (unsigned char *) &seg_nic_base;
1667    return 0;
1668}
1669//////////////////////////////////////////////////////////////////////////////////
1670// _nic_cma_tx_init()
1671// Returns 0 if success, > 0 if error.
1672//////////////////////////////////////////////////////////////////////////////////
1673unsigned int _nic_cma_tx_init( const void*  buf0,
1674                               const void*  buf1,
1675                               unsigned int length ) 
1676{
1677    // to be defined
1678    // unsigned char* nic_address = (unsigned char *) &seg_nic_base;
1679    return 0;
1680}//////////////////////////////////////////////////////////////////////////////////
1681// _nic_cma_stop()
1682// Returns 0 if success, > 0 if error.
1683//////////////////////////////////////////////////////////////////////////////////
1684unsigned int _nic_cma_stop()
1685{
1686    // to be defined
1687    // unsigned char* nic_address = (unsigned char *) &seg_nic_base;
1688    return 0;
1689}
1690
1691
1692//////////////////////////////////////////////////////////////////////////////////
1693//     VciMemCache driver
1694//////////////////////////////////////////////////////////////////////////////////
1695// The VciMemCache device can be accessed through a configuration interface.
1696// as a set of uncached, memory mapped registers.
1697///////////////////////////////////////////////////////////////////////////////////
1698// The (virtual) base address of the associated segment is:
1699//
1700//       mmc_address = seg_mmc_base + cluster_id * vseg_cluster_increment
1701//
1702////////////////////////////////////////////////////////////////////////////////
1703
1704///////////////////////////////////////////////////////////////////////////////////
1705// _memc_inval()
1706// This function invalidate all cache lines covering a memory buffer defined
1707// by the physical base address, and the length.
1708// The buffer address MSB are used to compute the cluster index.
1709///////////////////////////////////////////////////////////////////////////////////
1710void _memc_inval( paddr_t      buf_paddr,
1711                  unsigned int buf_length )
1712{
1713    unsigned int cluster_id    = (unsigned int)((buf_paddr>>32)/(256/NB_CLUSTERS));
1714
1715    unsigned int * mmc_address = (unsigned int *) ((unsigned int)&seg_mmc_base + 
1716                                 (cluster_id * (unsigned int)&vseg_cluster_increment));
1717
1718    // get the lock protecting exclusive access to MEMC
1719    while ( mmc_address[MEMC_LOCK] ) { asm volatile("nop"); }
1720
1721    // write inval arguments
1722    mmc_address[MEMC_ADDR_LO]    = (unsigned int)buf_paddr;
1723    mmc_address[MEMC_ADDR_HI]    = (unsigned int)(buf_paddr>>32);
1724    mmc_address[MEMC_BUF_LENGTH] = buf_length;
1725    mmc_address[MEMC_CMD_TYPE]   = MEMC_CMD_INVAL;
1726
1727    // release the lock protecting MEMC
1728    mmc_address[MEMC_LOCK] = 0;
1729}
1730
1731///////////////////////////////////////////////////////////////////////////////////
1732// _heap_info()
1733// This function returns the information associated to a heap (size and vaddr)
1734// It uses the global task index (CTX_GTID_ID, unique for each giet task) and the
1735// vspace index (CTX_VSID_ID) defined in the task context.
1736///////////////////////////////////////////////////////////////////////////////////
1737unsigned int _heap_info( unsigned int* vaddr, 
1738                         unsigned int* size ) 
1739{
1740    mapping_header_t * header  = (mapping_header_t *) (&seg_mapping_base);
1741    mapping_task_t * tasks     = _get_task_base(header);
1742    mapping_vobj_t * vobjs     = _get_vobj_base(header);
1743    mapping_vspace_t * vspaces = _get_vspace_base(header);
1744
1745    unsigned int taskid        = _get_context_slot(CTX_GTID_ID);
1746    unsigned int vspaceid      = _get_context_slot(CTX_VSID_ID);
1747
1748    int heap_local_vobjid      = tasks[taskid].heap_vobjid;
1749    if (heap_local_vobjid != -1) 
1750    {
1751        unsigned int vobjheapid = heap_local_vobjid + vspaces[vspaceid].vobj_offset;
1752        *vaddr                  = vobjs[vobjheapid].vaddr;
1753        *size                   = vobjs[vobjheapid].length;
1754        return 0;
1755    }
1756    else 
1757    {
1758        *vaddr = 0;
1759        *size = 0;
1760        return 0;
1761    }
1762}
1763
1764// Local Variables:
1765// tab-width: 4
1766// c-basic-offset: 4
1767// c-file-offsets:((innamespace . 0)(inline-open . 0))
1768// indent-tabs-mode: nil
1769// End:
1770// vim: filetype=c:expandtab:shiftwidth=4:tabstop=4:softtabstop=4
1771
Note: See TracBrowser for help on using the repository browser.