source: soft/giet_vm/sys/drivers.c @ 256

Last change on this file since 256 was 255, checked in by meunier, 11 years ago
  • Added a syscall and some user functions to manipulate the Simulation Helper
  • Changed the the way the Vseg -> Pseg mapping is made during the boot to better utilize the address space (+ adaptation of the algorithm in memo)
  • Fixed a bug in boot_init (vobj_init): the vobj initialization could only be made for the first application (ptpr was not changed)
File size: 71.0 KB
Line 
1///////////////////////////////////////////////////////////////////////////////////
2// File     : drivers.c
3// Date     : 23/05/2013
4// Author   : alain greiner
5// Copyright (c) UPMC-LIP6
6///////////////////////////////////////////////////////////////////////////////////
7// The drivers.c and drivers.h files are part ot the GIET-VM kernel.
8//
9// They contains the drivers for the peripherals available in the SoCLib library:
10// - vci_multi_tty
11// - vci_multi_timer
12// - vci_multi_dma
13// - vci_multi_icu
14// - vci_xicu
15// - vci_gcd
16// - vci_frame_buffer
17// - vci_block_device
18// - vci_multi_nic
19// - vci_chbuf_dma
20//
21// For the peripherals replicated in each cluster (ICU, TIMER, XCU, DMA, MMC),
22// the corresponding (virtual) base addresses must be completed by an offset
23// depending on the cluster index.
24//
25// The following global parameters must be defined in the hard_config.h file:
26// - NB_CLUSTERS   
27// - NB_PROCS_MAX 
28// - NB_TIM_CHANNELS   
29// - NB_DMA_CHANNELS     
30// - NB_TTY_CHANNELS_MAX   
31//
32// The following virtual base addresses must be defined in the giet_vsegs.ld file:
33// - seg_icu_base
34// - seg_xcu_base
35// - seg_tim_base
36// - seg_dma_base
37// - seg_tty_base
38// - seg_gcd_base
39// - seg_fbf_base
40// - seg_ioc_base
41// - seg_nic_base
42// - seg_cma_base
43// - seg_iob_base
44// - seg_mmc_base
45// - vseg_cluster_increment
46///////////////////////////////////////////////////////////////////////////////////
47
48#include <vm_handler.h>
49#include <sys_handler.h>
50#include <giet_config.h>
51#include <drivers.h>
52#include <common.h>
53#include <hwr_mapping.h>
54#include <mips32_registers.h>
55#include <ctx_handler.h>
56
57#if !defined(NB_CLUSTERS)
58# error: You must define NB_CLUSTERS in the hard_config.h file
59#endif
60
61#if (NB_CLUSTERS > 256)
62# error: NB_CLUSTERS cannot be larger than 256!
63#endif
64
65#if !defined(NB_PROCS_MAX)
66# error: You must define NB_PROCS_MAX in the hard_config.h file
67#endif
68
69#if (NB_PROCS_MAX > 8)
70# error: NB_PROCS_MAX cannot be larger than 8!
71#endif
72
73#if !defined(GIET_USE_IOMMU)
74# error: You must define GIET_USE_IOMMU in the giet_config.h file
75#endif
76
77#if !defined(NB_TTY_CHANNELS)
78# error: You must define NB_TTY_CHANNELS in the hard_config.h file
79#endif
80
81#if (NB_TTY_CHANNELS < 1)
82# error: NB_TTY_CHANNELS cannot be smaller than 1!
83#endif
84
85#if !defined(NB_DMA_CHANNELS)
86# error: You must define NB_DMA_CHANNELS in the hard_config.h file
87#endif
88
89#if (NB_DMA_CHANNELS > 8)
90# error: NB_DMA_CHANNELS cannot be smaller than 8!
91#endif
92
93#if !defined(NB_TIM_CHANNELS)
94#define NB_TIM_CHANNELS 0
95#endif
96
97#if ( (NB_TIM_CHANNELS + NB_PROC_MAX) > 32 )
98# error: NB_TIM_CHANNELS + NB_PROCS_MAX cannot be larger than 32
99#endif
100
101#if !defined(NB_IOC_CHANNELS)
102# error: You must define NB_IOC_CHANNELS in the hard_config.h file
103#endif
104
105#if ( NB_IOC_CHANNELS > 8 )
106# error: NB_IOC_CHANNELS cannot be larger than 8
107#endif
108
109#if !defined(NB_NIC_CHANNELS)
110# error: You must define NB_NIC_CHANNELS in the hard_config.h file
111#endif
112
113#if ( NB_NIC_CHANNELS > 8 )
114# error: NB_NIC_CHANNELS cannot be larger than 8
115#endif
116
117#if !defined(NB_CMA_CHANNELS)
118# error: You must define NB_CMA_CHANNELS in the hard_config.h file
119#endif
120
121#if ( NB_CMA_CHANNELS > 8 )
122# error: NB_CMA_CHANNELS cannot be larger than 8
123#endif
124
125#if !defined( USE_XICU )
126# error: You must define USE_XICU in the hard_config.h file
127#endif
128
129#if !defined( USE_IOB )
130# error: You must define USE_IOB in the hard_config.h file
131#endif
132
133
134#define in_unckdata __attribute__((section (".unckdata")))
135
136//////////////////////////////////////////////////////////////////////////////
137//     Timers driver
138//////////////////////////////////////////////////////////////////////////////
139// This peripheral is replicated in all clusters.
140// The timers can be implemented in a vci_timer component or in a vci_xicu
141// component (depending on the USE_XICU parameter).
142// There is one timer (or xicu) component per cluster.
143// There is two types of timers:
144// - "system" timers : one per processor, used for context switch.
145//   local_id in [0, NB_PROCS_MAX-1],
146// - "user" timers : requested by the task in the mapping_info data structure.
147//   For each user timer, the timer_id is stored in the context of the task.
148// The global index is cluster_id * (NB_PROCS_MAX+NB_TIM_CHANNELS) + local_id
149//////////////////////////////////////////////////////////////////////////////
150// The (virtual) base address of the associated segment is:
151//
152//       timer_address = seg_tim_base + cluster_id * vseg_cluster_increment
153//   or  timer_address = seg_xcu_base + cluster_id * vseg_cluster_increment
154//
155////////////////////////////////////////////////////////////////////////////////
156
157// User Timer signaling variables
158
159#if (NB_TIM_CHANNELS > 0)
160in_unckdata volatile unsigned char _user_timer_event[NB_CLUSTERS * NB_TIM_CHANNELS] 
161                            = { [0 ... ((NB_CLUSTERS * NB_TIM_CHANNELS) - 1)] = 0 };
162#endif
163
164//////////////////////////////////////////////////////////////////////////////
165//     _timer_start()
166// This function activates a timer in the vci_timer (or vci_xicu) component
167// by writing in the proper register the period value.
168// It can be used by both the kernel to initialise a "system" timer,
169// or by a task (through a system call) to configure an "user" timer.
170// Returns 0 if success, > 0 if error.
171//////////////////////////////////////////////////////////////////////////////
172unsigned int _timer_start( unsigned int cluster_id, 
173                           unsigned int local_id, 
174                           unsigned int period) 
175{
176    // parameters checking
177    if (cluster_id >= NB_CLUSTERS)  return 1;
178    if (local_id >= NB_TIM_CHANNELS)  return 2;
179
180#if USE_XICU
181    unsigned int* timer_address = (unsigned int *) ((unsigned int)&seg_xcu_base + 
182                                  (cluster_id * (unsigned int)&vseg_cluster_increment));
183
184    timer_address[XICU_REG(XICU_PTI_PER, local_id)] = period;
185#else
186    unsigned int* timer_address = (unsigned int *) ((unsigned int)&seg_tim_base + 
187                                  (cluster_id * (unsigned int)&vseg_cluster_increment));
188
189    timer_address[local_id * TIMER_SPAN + TIMER_PERIOD] = period;
190    timer_address[local_id * TIMER_SPAN + TIMER_MODE] = 0x3;
191#endif
192    return 0;
193}
194
195//////////////////////////////////////////////////////////////////////////////
196//     _timer_stop()
197// This function desactivates a timer in the vci_timer (or vci_xicu) component
198// by writing in the proper register.
199// Returns 0 if success, > 0 if error.
200//////////////////////////////////////////////////////////////////////////////
201unsigned int _timer_stop( unsigned int cluster_id, 
202                          unsigned int local_id) 
203{
204    // parameters checking
205    if (cluster_id >= NB_CLUSTERS)  return 1;
206    if (local_id >= NB_TIM_CHANNELS)  return 2;
207
208#if USE_XICU
209    unsigned int * timer_address = (unsigned int *) ((unsigned int)&seg_xcu_base + 
210                                   (cluster_id * (unsigned int)&vseg_cluster_increment));
211
212    timer_address[XICU_REG(XICU_PTI_PER, local_id)] = 0;
213#else
214    unsigned int* timer_address = (unsigned int *) ((unsigned int)&seg_tim_base + 
215                                  (cluster_id * (unsigned int)&vseg_cluster_increment));
216
217    timer_address[local_id * TIMER_SPAN + TIMER_MODE] = 0;
218#endif
219    return 0;
220}
221
222//////////////////////////////////////////////////////////////////////////////
223//     _timer_reset_irq()
224// This function acknowlegge a timer interrupt in the vci_timer (or vci_xicu)
225// component by reading/writing in the proper register.
226// It can be used by both the isr_switch() for a "system" timer,
227// or by the _isr_timer() for an "user" timer.
228// Returns 0 if success, > 0 if error.
229//////////////////////////////////////////////////////////////////////////////
230unsigned int _timer_reset_irq( unsigned int cluster_id, 
231                               unsigned int local_id ) 
232{
233    // parameters checking
234    if (cluster_id >= NB_CLUSTERS)  return 1;
235    if (local_id >= NB_TIM_CHANNELS)  return 2;
236
237#if USE_XICU
238    unsigned int * timer_address = (unsigned int *) ((unsigned int)&seg_xcu_base +
239                                   (cluster_id * (unsigned int)&vseg_cluster_increment));
240
241    unsigned int bloup = timer_address[XICU_REG(XICU_PTI_ACK, local_id)];
242    bloup++; // to avoid a warning
243#else
244    unsigned int * timer_address = (unsigned int *) ((unsigned int)&seg_tim_base + 
245                                   (cluster_id * (unsigned int)&vseg_cluster_increment));
246
247    timer_address[local_id * TIMER_SPAN + TIMER_RESETIRQ] = 0;
248#endif
249    return 0;
250}
251
252///////////////////////////////////////////////////////////////////////
253// _timer_reset_irq_cpt()
254///////////////////////////////////////////////////////////////////////
255// This function resets the period at the end of which
256// an interrupt is sent. To do so, we re-write the period
257// in the proper register, what causes the count to restart.
258// The period value is read from the same (TIMER_PERIOD) register,
259// this is why in appearance we do nothing useful (read a value
260// from a register and write this value in the same register)
261// This function is called during a context switch (user or preemptive)
262///////////////////////////////////////////////////////////////////////
263unsigned int _timer_reset_irq_cpt( unsigned int cluster_id, 
264                                   unsigned int local_id) {
265    // parameters checking
266    if (cluster_id >= NB_CLUSTERS) {
267        return 1;
268    }
269    if (local_id >= NB_TIM_CHANNELS) {
270        return 2;
271    }
272
273#if USE_XICU
274    unsigned int * timer_address = (unsigned int *) ((unsigned int) &seg_xcu_base + 
275                                   (cluster_id * (unsigned int)&vseg_cluster_increment));
276
277    unsigned int timer_period = timer_address[XICU_REG(XICU_PTI_PER, local_id)];
278
279    // we write 0 first because if the timer is currently running,
280    //the corresponding timer counter is not reset
281    timer_address[XICU_REG(XICU_PTI_PER, local_id)] = 0;
282    timer_address[XICU_REG(XICU_PTI_PER, local_id)] = timer_period;
283#else
284    // We suppose that the TIMER_MODE register value is 0x3
285    unsigned int * timer_address = (unsigned int *) ((unsigned int)&seg_tim_base + 
286                                   (cluster_id * (unsigned int)&vseg_cluster_increment));
287
288    unsigned int timer_period = timer_address[local_id * TIMER_SPAN + TIMER_PERIOD];
289
290    timer_address[local_id * TIMER_SPAN + TIMER_PERIOD] = timer_period;
291#endif
292
293    return 0;
294}
295
296/////////////////////////////////////////////////////////////////////////////////
297//     VciMultiTty driver
298/////////////////////////////////////////////////////////////////////////////////
299// There is only one multi_tty controler in the architecture.
300// The total number of TTYs is defined by the configuration parameter NB_TTY_CHANNELS.
301// The "system" terminal is TTY[0].
302// The "user" TTYs are allocated to applications by the GIET in the boot phase,
303// as defined in the mapping_info data structure. The corresponding tty_id must
304// be stored in the context of the task by the boot code.
305// The TTY address is : seg_tty_base + tty_id*TTY_SPAN
306/////////////////////////////////////////////////////////////////////////////////
307
308// TTY variables
309in_unckdata volatile unsigned char _tty_get_buf[NB_TTY_CHANNELS];
310in_unckdata volatile unsigned char _tty_get_full[NB_TTY_CHANNELS] 
311                                     = { [0 ... NB_TTY_CHANNELS - 1] = 0 };
312in_unckdata unsigned int _tty_put_lock = 0;  // protect kernel TTY[0]
313
314////////////////////////////////////////////////////////////////////////////////
315//      _tty_error()
316////////////////////////////////////////////////////////////////////////////////
317void _tty_error(unsigned int tty_id, unsigned int task_id) 
318{
319    unsigned int proc_id = _procid();
320
321    _get_lock(&_tty_put_lock);
322    if (tty_id == 0xFFFFFFFF) _puts("\n[GIET ERROR] no TTY assigned to the task ");
323    else                      _puts("\n[GIET ERROR] TTY index too large for task ");
324    _putd(task_id);
325    _puts(" on processor ");
326    _putd(proc_id);
327    _puts("\n");
328    _release_lock(&_tty_put_lock);
329}
330
331
332/////////////////////////////////////////////////////////////////////////////////
333//      _tty_write()
334// Write one or several characters directly from a fixed-length user buffer to
335// the TTY_WRITE register of the TTY controler.
336// It doesn't use the TTY_PUT_IRQ interrupt and the associated kernel buffer.
337// This is a non blocking call: it tests the TTY_STATUS register, and stops
338// the transfer as soon as the TTY_STATUS[WRITE] bit is set.
339// The function returns  the number of characters that have been written.
340/////////////////////////////////////////////////////////////////////////////////
341unsigned int _tty_write(const char * buffer, 
342                        unsigned int length) 
343{
344    unsigned int nwritten;
345    unsigned int tty_id = _get_context_slot(CTX_TTY_ID);
346    unsigned int* tty_address = (unsigned int *) &seg_tty_base;
347
348    for (nwritten = 0; nwritten < length; nwritten++) 
349    {
350        // check tty's status
351        if ((tty_address[tty_id * TTY_SPAN + TTY_STATUS] & 0x2) == 0x2) break;
352        tty_address[tty_id * TTY_SPAN + TTY_WRITE] = (unsigned int) buffer[nwritten];
353    }
354    return nwritten;
355}
356
357//////////////////////////////////////////////////////////////////////////////
358//      _tty_read()
359// This non-blocking function uses the TTY_GET_IRQ[tty_id] interrupt and
360// the associated kernel buffer, that has been written by the ISR.
361// It get the TTY terminal index from the context of the current task.
362// It fetches one single character from the _tty_get_buf[tty_id] kernel
363// buffer, writes this character to the user buffer, and resets the
364// _tty_get_full[tty_id] buffer.
365// The length argument is not used.
366// Returns 0 if the kernel buffer is empty, 1 if the buffer is full.
367//////////////////////////////////////////////////////////////////////////////
368unsigned int _tty_read(char * buffer, 
369                       unsigned int length) 
370{
371    unsigned int tty_id = _get_context_slot(CTX_TTY_ID);
372
373    if (_tty_get_full[tty_id] == 0) 
374    {
375        return 0;
376    }
377    else 
378    {
379        *buffer = _tty_get_buf[tty_id];
380        _tty_get_full[tty_id] = 0;
381        return 1;
382    }
383}
384
385////////////////////////////////////////////////////////////////////////////////
386//     _tty_get_char()
387// This function is used by the _isr_tty to read a character in the TTY
388// terminal defined by the tty_id argument. The character is stored
389// in requested buffer, and the IRQ is acknowledged.
390// Returns 0 if success, 1 if tty_id too large.
391////////////////////////////////////////////////////////////////////////////////
392unsigned int _tty_get_char(unsigned int tty_id, 
393                           unsigned char * buffer) 
394{
395    // checking argument
396    if (tty_id >= NB_TTY_CHANNELS) { return 1; }
397
398    // compute terminal base address
399    unsigned int * tty_address = (unsigned int *) &seg_tty_base; 
400
401    *buffer = (unsigned char) tty_address[tty_id * TTY_SPAN + TTY_READ];
402    return 0;
403}
404
405
406////////////////////////////////////////////////////////////////////////////////
407//     VciMultiIcu or VciXicu driver
408////////////////////////////////////////////////////////////////////////////////
409// This hardware component is replicated in all clusters.
410// There is one vci_multi_icu (or vci_xicu) component per cluster,
411// and the number of ICU channels is equal to NB_PROCS_MAX,
412// because there is one private interrupt controler per processor.
413////////////////////////////////////////////////////////////////////////////////
414// The (virtual) base address of the associated segment is:
415//
416//       icu_address = seg_icu_base + cluster_id * vseg_cluster_increment
417//  or   icu_address = seg_xcu_base + cluster_id * vseg_cluster_increment
418//
419////////////////////////////////////////////////////////////////////////////////
420
421////////////////////////////////////////////////////////////////////////////////
422//     _icu_set_mask()
423// This function can be used with both the vci_xicu & vci_multi_icu components.
424// It set the mask register for the ICU channel identified by the cluster index
425// and the processor index: all '1' bits are set / all '0' bits are not modified.
426// Returns 0 if success, > 0 if error.
427////////////////////////////////////////////////////////////////////////////////
428unsigned int _icu_set_mask( unsigned int cluster_id,
429                            unsigned int proc_id,
430                            unsigned int value,
431                            unsigned int is_PTI) 
432{
433    // parameters checking
434    if (cluster_id >= NB_CLUSTERS) return 1; 
435    if (proc_id >= NB_PROCS_MAX)   return 1; 
436
437#if USE_XICU
438    unsigned int * icu_address = (unsigned int *) ((unsigned int)&seg_xcu_base + 
439                                 (cluster_id * (unsigned int)&vseg_cluster_increment));
440    if (is_PTI) 
441    {
442        icu_address[XICU_REG(XICU_MSK_PTI_ENABLE, proc_id)] = value;
443    }
444    else 
445    {
446        icu_address[XICU_REG(XICU_MSK_HWI_ENABLE, proc_id)] = value;
447    }
448#else
449    unsigned int * icu_address = (unsigned int *) ((unsigned int)&seg_icu_base + 
450                                 (cluster_id * (unsigned int)&vseg_cluster_increment));
451
452    icu_address[proc_id * ICU_SPAN + ICU_MASK_SET] = value; 
453#endif
454    return 0;
455}
456
457////////////////////////////////////////////////////////////////////////////////
458//     _icu_get_index()
459// This function can be used with both the vci_xicu & vci_multi_icu components.
460// It returns the index of the highest priority (smaller index) active HWI.
461// The ICU channel is identified by the cluster index and the processor index.
462// Returns 0 if success, > 0 if error.
463////////////////////////////////////////////////////////////////////////////////
464unsigned int _icu_get_index( unsigned int cluster_id, 
465                             unsigned int proc_id, 
466                             unsigned int * buffer) 
467{
468    // parameters checking
469    if (cluster_id >= NB_CLUSTERS)  return 1;
470    if (proc_id >= NB_PROCS_MAX)    return 1;
471
472#if USE_XICU
473    unsigned int * icu_address = (unsigned int *) ((unsigned int)&seg_xcu_base + 
474                                 (cluster_id * (unsigned int)&vseg_cluster_increment));
475
476    unsigned int prio = icu_address[XICU_REG(XICU_PRIO, proc_id)];
477    unsigned int pti_ok = (prio & 0x00000001);
478    unsigned int hwi_ok = (prio & 0x00000002);
479    unsigned int swi_ok = (prio & 0x00000004);
480    unsigned int pti_id = (prio & 0x00001F00) >> 8;
481    unsigned int hwi_id = (prio & 0x001F0000) >> 16;
482    unsigned int swi_id = (prio & 0x1F000000) >> 24;
483    if      (pti_ok) { *buffer = pti_id; }
484    else if (hwi_ok) { *buffer = hwi_id; }
485    else if (swi_ok) { *buffer = swi_id; }
486    else             { *buffer = 32; }
487#else
488    unsigned int * icu_address = (unsigned int *) ((unsigned int)&seg_icu_base + 
489                                 (cluster_id * (unsigned int)&vseg_cluster_increment));
490
491    *buffer = icu_address[proc_id * ICU_SPAN + ICU_IT_VECTOR]; 
492#endif
493    return 0;
494}
495
496////////////////////////////////////////////////////////////////////////////////
497//     VciGcd driver
498////////////////////////////////////////////////////////////////////////////////
499// The Greater Dommon Divider is a -very- simple hardware coprocessor
500// performing the computation of the GCD of two 32 bits integers.
501// It has no DMA capability.
502////////////////////////////////////////////////////////////////////////////////
503
504////////////////////////////////////////////////////////////////////////////////
505//     _gcd_write()
506// Write a 32-bit word in a memory mapped register of the GCD coprocessor.
507// Returns 0 if success, > 0 if error.
508////////////////////////////////////////////////////////////////////////////////
509unsigned int _gcd_write( unsigned int register_index, 
510                         unsigned int value) 
511{
512    // parameters checking
513    if (register_index >= GCD_END)  return 1; 
514
515    unsigned int * gcd_address = (unsigned int *) &seg_gcd_base;
516
517    gcd_address[register_index] = value; // write word
518    return 0;
519}
520
521
522////////////////////////////////////////////////////////////////////////////////
523//     _gcd_read()
524// Read a 32-bit word in a memory mapped register of the GCD coprocessor.
525// Returns 0 if success, > 0 if error.
526////////////////////////////////////////////////////////////////////////////////
527unsigned int _gcd_read( unsigned int register_index, 
528                        unsigned int * buffer ) 
529{
530    // parameters checking
531    if (register_index >= GCD_END)  return 1;
532
533    unsigned int * gcd_address = (unsigned int *) &seg_gcd_base;
534
535    *buffer = gcd_address[register_index]; // read word
536    return 0;
537}
538
539////////////////////////////////////////////////////////////////////////////////
540// VciBlockDevice driver
541////////////////////////////////////////////////////////////////////////////////
542// The VciBlockDevice is a single channel external storage contrÃŽler.
543//
544// The IOMMU can be activated or not:
545//
546// 1) When the IOMMU is used, a fixed size 2Mbytes vseg is allocated to
547// the IOC peripheral, in the I/O virtual space, and the user buffer is
548// dynamically remapped in the IOMMU page table. The corresponding entry
549// in the IOMMU PT1 is defined by the kernel _ioc_iommu_ix1 variable.
550// The number of pages to be unmapped is stored in the _ioc_npages variable.
551// The number of PT2 entries is dynamically computed and stored in the
552// kernel _ioc_iommu_npages variable. It cannot be larger than 512.
553// The user buffer is unmapped by the _ioc_completed() function when
554// the transfer is completed.
555//
556// 2/ If the IOMMU is not used, we check that  the user buffer is mapped to a
557// contiguous physical buffer (this is generally true because the user space
558// page tables are statically constructed to use contiguous physical memory).
559//
560// Finally, the memory buffer must fulfill the following conditions:
561// - The user buffer must be word aligned,
562// - The user buffer must be mapped in user address space,
563// - The user buffer must be writable in case of (to_mem) access,
564// - The total number of physical pages occupied by the user buffer cannot
565//   be larger than 512 pages if the IOMMU is activated,
566// - All physical pages occupied by the user buffer must be contiguous
567//   if the IOMMU is not activated.
568// An error code is returned if these conditions are not verified.
569//
570// As the IOC component can be used by several programs running in parallel,
571// the _ioc_lock variable guaranties exclusive access to the device.  The
572// _ioc_read() and _ioc_write() functions use atomic LL/SC to get the lock.
573// and set _ioc_lock to a non zero value.  The _ioc_write() and _ioc_read()
574// functions are blocking, polling the _ioc_lock variable until the device is
575// available.
576// When the tranfer is completed, the ISR routine activated by the IOC IRQ
577// set the _ioc_done variable to a non-zero value. Possible address errors
578// detected by the IOC peripheral are reported by the ISR in the _ioc_status
579// variable.
580// The _ioc_completed() function is polling the _ioc_done variable, waiting for
581// transfer completion. When the completion is signaled, the _ioc_completed()
582// function reset the _ioc_done variable to zero, and releases the _ioc_lock
583// variable.
584//
585// In a multi-processing environment, this polling policy should be replaced by
586// a descheduling policy for the requesting process.
587///////////////////////////////////////////////////////////////////////////////
588
589// IOC global variables
590in_unckdata volatile unsigned int _ioc_status= 0;
591in_unckdata volatile unsigned int _ioc_done = 0;
592in_unckdata unsigned int _ioc_lock = 0;
593in_unckdata unsigned int _ioc_iommu_ix1 = 0;
594in_unckdata unsigned int _ioc_iommu_npages; 
595
596///////////////////////////////////////////////////////////////////////////////
597//      _ioc_access()
598// This function transfer data between a memory buffer and the block device.
599// The buffer lentgth is (count*block_size) bytes.
600// Arguments are:
601// - to_mem     : from external storage to memory when non 0
602// - lba        : first block index on the external storage.
603// - user_vaddr : virtual base address of the memory buffer.
604// - count      : number of blocks to be transfered.
605// Returns 0 if success, > 0 if error.
606///////////////////////////////////////////////////////////////////////////////
607unsigned int _ioc_access( unsigned int to_mem,
608                          unsigned int lba,
609                          unsigned int user_vaddr,
610                          unsigned int count) 
611{
612    unsigned int user_vpn_min;     // first virtuel page index in user space
613    unsigned int user_vpn_max;     // last virtual page index in user space
614    unsigned int vpn;              // current virtual page index in user space
615    unsigned int ppn;              // physical page number
616    unsigned int flags;            // page protection flags
617    unsigned int ix2;              // page index in IOMMU PT1 page table
618    unsigned int ppn_first;        // first physical page number for user buffer
619    unsigned int buf_xaddr = 0;    // user buffer virtual address in IO space (if IOMMU)
620    paddr_t      buf_paddr = 0;    // user buffer physical address (if no IOMMU),
621
622    // check buffer alignment
623    if ((unsigned int) user_vaddr & 0x3)
624    {
625        _get_lock(&_tty_put_lock);
626        _puts("[GIET ERROR] in _ioc_access() : user buffer not word aligned\n");
627        _release_lock(&_tty_put_lock);
628        return 1; 
629    }
630
631    unsigned int * ioc_address = (unsigned int *) &seg_ioc_base ;
632
633    unsigned int block_size = ioc_address[BLOCK_DEVICE_BLOCK_SIZE];
634    unsigned int length = count * block_size;
635
636    // get user space page table virtual address
637    unsigned int user_pt_vbase = _get_context_slot(CTX_PTAB_ID);
638
639    user_vpn_min = user_vaddr >> 12;
640    user_vpn_max = (user_vaddr + length - 1) >> 12;
641
642    // loop on all virtual pages covering the user buffer
643    for (vpn = user_vpn_min, ix2 = 0 ; 
644         vpn <= user_vpn_max ; 
645         vpn++, ix2++ ) 
646    {
647        // get ppn and flags for each vpn
648        unsigned int ko = _v2p_translate((page_table_t *) user_pt_vbase,
649                                          vpn,
650                                          &ppn,
651                                          &flags);
652        // check access rights
653        if (ko)
654        {
655            _get_lock(&_tty_put_lock);
656            _puts("[GIET ERROR] in _ioc_access() : user buffer unmapped\n");
657            _release_lock(&_tty_put_lock);
658            return 1; 
659        }
660        if ((flags & PTE_U) == 0) 
661        {
662            _get_lock(&_tty_put_lock);
663            _puts("[GIET ERROR] in _ioc_access() : user buffer not in user space\n");
664            _release_lock(&_tty_put_lock);
665            return 1; 
666        }
667        if (((flags & PTE_W) == 0 ) && to_mem)
668        {
669            _get_lock(&_tty_put_lock);
670            _puts("[GIET ERROR] in _ioc_access() : user buffer not writable\n");
671            _release_lock(&_tty_put_lock);
672            return 1; 
673        }
674
675        // save first ppn value
676        if (ix2 == 0) ppn_first = ppn;
677
678        if ( GIET_USE_IOMMU && USE_IOB ) // user buffer remapped in the I/0 space
679        {
680            // check buffer length < 2 Mbytes
681            if (ix2 > 511) 
682            {
683                _get_lock(&_tty_put_lock);
684                _puts("[GIET ERROR] in _ioc_access() : user buffer > 2 Mbytes\n");
685                _release_lock(&_tty_put_lock);
686                return 1; 
687            }
688
689            // map the physical page in IOMMU page table
690            _iommu_add_pte2( _ioc_iommu_ix1,    // PT1 index
691                             ix2,               // PT2 index
692                             ppn,               // Physical page number   
693                             flags);            // Protection flags
694
695            // compute user buffer virtual adress in IO space
696            buf_xaddr = (_ioc_iommu_ix1) << 21 | (user_vaddr & 0xFFF);
697        }
698        else            // No IOMMU
699        {
700            // check that physical pages are contiguous
701            if ((ppn - ppn_first) != ix2) 
702            {
703                _get_lock(&_tty_put_lock);
704                _puts("[GIET ERROR] in _ioc_access() : split physical user buffer\n");
705                _release_lock(&_tty_put_lock);
706                return 1; 
707            }
708
709            // compute user buffer physical adress
710            buf_paddr = (((paddr_t)ppn_first) << 12) | (user_vaddr & 0xFFF);
711        }
712    } // end for vpn
713
714    // register the number of pages to be unmapped
715    _ioc_iommu_npages = (user_vpn_max - user_vpn_min) + 1;
716
717    // If no IOB, invalidate L1 cache in case of memory write
718    if ( to_mem && (USE_IOB == 0) ) _dcache_buf_invalidate((void *) user_vaddr, length);
719
720#if GIET_DEBUG_IOC_DRIVER
721_get_lock(&_tty_put_lock);
722_puts("\n[GIET DEBUG]  IOC_ACCESS at cycle ");
723_putd( _proctime() );
724_puts("\n - proc_id         = ");
725_putd( _procid() );
726_puts("\n - ioc_vbase       = ");
727_putx( (unsigned int)ioc_address );
728_puts("\n - psched_vbase    = ");
729_putx( (unsigned int)_get_sched() );
730_puts("\n - pt_vbase        = ");
731_putx( user_pt_vbase );
732_puts("\n - user_buf_vbase  = ");
733_putx( user_vaddr );
734_puts("\n - user_buf_length = ");
735_putx( length );
736_puts("\n - user_buf_paddr  = ");
737_putl( buf_paddr );
738_puts("\n - user_buf_xaddr  = ");
739_putx( buf_xaddr );
740_puts("\n");
741_release_lock(&_tty_put_lock);
742#endif
743
744    // If IOB, invalidate L2 cache in case of memory write
745    if ( to_mem && USE_IOB ) _memc_inval( buf_paddr, length );
746   
747    // get the lock on ioc device
748    _get_lock(&_ioc_lock);
749
750    // peripheral configuration 
751    if ( GIET_USE_IOMMU && USE_IOB ) 
752    {
753        ioc_address[BLOCK_DEVICE_BUFFER] = buf_xaddr;
754    }
755    else
756    {
757        ioc_address[BLOCK_DEVICE_BUFFER]     = (unsigned int)buf_paddr;
758        ioc_address[BLOCK_DEVICE_BUFFER_EXT] = (unsigned int)(buf_paddr>>32);
759    }
760    ioc_address[BLOCK_DEVICE_COUNT] = count;
761    ioc_address[BLOCK_DEVICE_LBA] = lba;
762    if (to_mem == 0) 
763    {
764        ioc_address[BLOCK_DEVICE_OP] = BLOCK_DEVICE_WRITE;
765    }
766    else 
767    {
768        ioc_address[BLOCK_DEVICE_OP] = BLOCK_DEVICE_READ;
769    }
770    return 0;
771}
772
773/////////////////////////////////////////////////////////////////////////////////
774// _ioc_completed()
775//
776// This function checks completion of an I/O transfer and reports errors.
777// As it is a blocking call, the processor is stalled.
778// If the virtual memory is activated, the pages mapped in the I/O virtual
779// space are unmapped, and the IOB TLB is cleared.
780// Returns 0 if success, > 0 if error.
781/////////////////////////////////////////////////////////////////////////////////
782unsigned int _ioc_completed() 
783{
784    unsigned int ret;
785    unsigned int ix2;
786
787    // busy waiting
788    while (_ioc_done == 0) { asm volatile("nop"); }
789
790#if GIET_DEBUG_IOC_DRIVER
791_get_lock(&_tty_put_lock);
792_puts("\n[GIET DEBUG]  IOC_COMPLETED at cycle ");
793_putd( _proctime() );
794_puts("\n - proc_id         = ");
795_putd( _procid() );
796_puts("\n");
797_release_lock(&_tty_put_lock);
798#endif
799
800    // unmap the buffer from IOMMU page table if IOMMU is activated
801    if ( GIET_USE_IOMMU && USE_IOB ) 
802    {
803        unsigned int * iob_address = (unsigned int *) &seg_iob_base;
804
805        for (ix2 = 0; ix2 < _ioc_iommu_npages; ix2++) 
806        {
807            // unmap the page in IOMMU page table
808            _iommu_inval_pte2(
809                    _ioc_iommu_ix1, // PT1 index
810                    ix2 );          // PT2 index
811
812            // clear IOMMU TLB
813            iob_address[IOB_INVAL_PTE] = (_ioc_iommu_ix1 << 21) | (ix2 << 12); 
814        }
815    }
816
817    // test IOC status
818    if ((_ioc_status != BLOCK_DEVICE_READ_SUCCESS)
819            && (_ioc_status != BLOCK_DEVICE_WRITE_SUCCESS)) ret = 1; // error
820    else                                                    ret = 0; // success
821
822    // reset synchronization variables
823    _ioc_done = 0;
824    asm volatile("sync");
825    _ioc_lock = 0;
826
827    return ret;
828}
829
830
831///////////////////////////////////////////////////////////////////////////////
832//     _ioc_read()
833// Transfer data from the block device to a memory buffer in user space.
834// - lba    : first block index on the block device
835// - buffer : base address of the memory buffer (must be word aligned)
836// - count  : number of blocks to be transfered.
837// Returns 0 if success, > 0 if error.
838///////////////////////////////////////////////////////////////////////////////
839unsigned int _ioc_read( unsigned int lba, 
840                        void * buffer, 
841                        unsigned int count) 
842{
843    return _ioc_access(
844            1,        // read access
845            lba,
846            (unsigned int) buffer,
847            count);
848}
849
850
851///////////////////////////////////////////////////////////////////////////////
852//     _ioc_write()
853// Transfer data from a memory buffer in user space to the block device.
854// - lba    : first block index on the block device
855// - buffer : base address of the memory buffer (must be word aligned)
856// - count  : number of blocks to be transfered.
857// Returns 0 if success, > 0 if error.
858///////////////////////////////////////////////////////////////////////////////
859unsigned int _ioc_write( unsigned int lba, 
860                         const void * buffer, 
861                         unsigned int count) 
862{
863    return _ioc_access(
864            0, // write access
865            lba,
866            (unsigned int) buffer,
867            count);
868}
869
870
871///////////////////////////////////////////////////////////////////////////////
872//     _ioc_get_status()
873// This function returns the transfert status, and acknowledge the IRQ.
874// Returns 0 if success, > 0 if error.
875///////////////////////////////////////////////////////////////////////////////
876unsigned int _ioc_get_status(unsigned int * status) 
877{
878    // get IOC base address
879    unsigned int * ioc_address = (unsigned int *) &seg_ioc_base;
880
881    *status = ioc_address[BLOCK_DEVICE_STATUS]; // read status & reset IRQ
882    return 0;
883}
884
885
886///////////////////////////////////////////////////////////////////////////////
887//     _ioc_get_block_size()
888// This function returns the block_size with which the IOC has been configured.
889///////////////////////////////////////////////////////////////////////////////
890unsigned int _ioc_get_block_size() 
891{
892    // get IOC base address
893    unsigned int * ioc_address = (unsigned int *) &seg_ioc_base;
894   
895    return  ioc_address[BLOCK_DEVICE_BLOCK_SIZE];
896}
897
898
899//////////////////////////////////////////////////////////////////////////////////
900// VciMultiDma driver
901//////////////////////////////////////////////////////////////////////////////////
902// The DMA controllers are physically distributed in the clusters.
903// There is  (NB_CLUSTERS * NB_DMA_CHANNELS) channels, indexed by a global index:
904//        dma_id = cluster_id * NB_DMA_CHANNELS + loc_id
905//
906// As a DMA channel is a private ressource allocated to a task,
907// there is no lock protecting exclusive access to the channel.
908// The signalisation between the OS and the DMA uses the _dma_done[dma_id]
909// synchronisation variables  (set by the ISR, and reset by the OS).
910// The transfer status is copied by the ISR in the _dma_status[dma_id] variables.
911//////////////////////////////////////////////////////////////////////////////////
912// The (virtual) base address of the associated segment is:
913//
914//       dma_address = seg_dma_base + cluster_id * vseg_cluster_increment
915//
916////////////////////////////////////////////////////////////////////////////////
917
918#if NB_DMA_CHANNELS > 0
919
920// in_unckdata unsigned int            _dma_lock[NB_DMA_CHANNELS * NB_CLUSTERS]
921// = { [0 ... (NB_DMA_CHANNELS * NB_CLUSTERS) - 1] = 0 };
922
923in_unckdata volatile unsigned int    _dma_done[NB_DMA_CHANNELS * NB_CLUSTERS] 
924        = { [0 ... (NB_DMA_CHANNELS * NB_CLUSTERS) - 1] = 0 };
925in_unckdata volatile unsigned int _dma_status[NB_DMA_CHANNELS * NB_CLUSTERS];
926in_unckdata unsigned int _dma_iommu_ix1 = 1;
927in_unckdata unsigned int _dma_iommu_npages[NB_DMA_CHANNELS * NB_CLUSTERS];
928#endif
929
930//////////////////////////////////////////////////////////////////////////////////
931// _dma_reset_irq()
932//////////////////////////////////////////////////////////////////////////////////
933unsigned int _dma_reset_irq( unsigned int cluster_id, 
934                             unsigned int channel_id) 
935{
936#if NB_DMA_CHANNELS > 0
937    // parameters checking
938    if (cluster_id >= NB_CLUSTERS)  return 1;
939    if (channel_id >= NB_DMA_CHANNELS)  return 1; 
940
941    // compute DMA base address
942    unsigned int * dma_address = (unsigned int *) ((unsigned int)&seg_dma_base + 
943                                 (cluster_id * (unsigned int)&vseg_cluster_increment));
944
945    dma_address[channel_id * DMA_SPAN + DMA_RESET] = 0;           
946    return 0;
947#else
948    return -1;
949#endif
950}
951
952//////////////////////////////////////////////////////////////////////////////////
953// _dma_get_status()
954//////////////////////////////////////////////////////////////////////////////////
955unsigned int _dma_get_status( unsigned int cluster_id, 
956                              unsigned int channel_id, 
957                              unsigned int * status) 
958{
959#if NB_DMA_CHANNELS > 0
960    // parameters checking
961    if (cluster_id >= NB_CLUSTERS)  return 1;
962    if (channel_id >= NB_DMA_CHANNELS)  return 1;
963
964    // compute DMA base address
965    unsigned int * dma_address = (unsigned int *) ((unsigned int)&seg_dma_base + 
966                                 (cluster_id * (unsigned int)&vseg_cluster_increment));
967
968    *status = dma_address[channel_id * DMA_SPAN + DMA_LEN];
969    return 0;
970#else
971    return -1;
972#endif
973}
974
975//////////////////////////////////////////////////////////////////////////////////
976// _dma_transfer()
977// Transfer data between a user buffer and a device buffer using DMA.
978// Only one device type is supported: Frame Buffer (dev_type == 0)
979// Arguments are:
980// - dev_type     : device type.
981// - to_user      : from  device buffer to user buffer when true.
982// - offset       : offset (in bytes) in the device buffer.
983// - user_vaddr   : virtual base address of the user buffer.
984// - length       : number of bytes to be transfered.
985//
986// The cluster_id and channel_id are obtained from task context (CTX_DMA_ID).
987// The user buffer must be mapped in user address space and word-aligned.
988// The user buffer length must be multiple of 4 bytes.
989// We compute the physical base addresses for both the device buffer
990// and the user buffer before programming the DMA transfer.
991// The GIET being fully static, we don't need to split the transfer in 4 Kbytes
992// pages, because the user buffer is contiguous in physical space.
993// Returns 0 if success, > 0 if error.
994//////////////////////////////////////////////////////////////////////////////////
995unsigned int _dma_transfer( unsigned int dev_type,
996                            unsigned int to_user,
997                            unsigned int offset,
998                            unsigned int user_vaddr,
999                            unsigned int length ) 
1000{
1001#if NB_DMA_CHANNELS > 0
1002    unsigned int ko;           // unsuccessfull V2P translation
1003    unsigned int device_vbase; // device buffer vbase address
1004    unsigned int flags;        // protection flags
1005    unsigned int ppn;          // physical page number
1006    paddr_t      user_pbase;   // user buffer pbase address
1007    paddr_t      device_pbase; // frame buffer pbase address
1008
1009    // check user buffer address and length alignment
1010    if ((user_vaddr & 0x3) || (length & 0x3)) 
1011    {
1012        _get_lock(&_tty_put_lock);
1013        _puts("\n[GIET ERROR] in _dma_transfer : user buffer not word aligned\n");
1014        _release_lock(&_tty_put_lock);
1015        return 1;
1016    }
1017
1018    // get DMA channel and compute DMA vbase address
1019    unsigned int dma_id      = _get_context_slot(CTX_DMA_ID);
1020    if ( dma_id == 0xFFFFFFFF )
1021    {
1022        _get_lock(&_tty_put_lock);
1023        _puts("\n[GIET ERROR] in _dma_transfer : no DMA channel allocated\n");
1024        _release_lock(&_tty_put_lock);
1025        return 1;
1026    }
1027    unsigned int cluster_id  = dma_id / NB_DMA_CHANNELS;
1028    unsigned int channel_id  = dma_id % NB_DMA_CHANNELS;
1029    unsigned int * dma_vbase = (unsigned int *) ((unsigned int)&seg_dma_base + 
1030                               (cluster_id * (unsigned int)&vseg_cluster_increment));
1031    // get page table address
1032    unsigned int user_ptab = _get_context_slot(CTX_PTAB_ID);
1033
1034    // get devic buffer virtual address, depending on peripheral type
1035    if (dev_type == 0) 
1036    {
1037        device_vbase = (unsigned int) &seg_fbf_base + offset;
1038    }
1039    else 
1040    {
1041        _get_lock(&_tty_put_lock);
1042        _puts("\n[GIET ERROR] in _dma_transfer : device type not supported\n");
1043        _release_lock(&_tty_put_lock);
1044        return 1;
1045    }
1046
1047    // get device buffer physical address
1048    ko = _v2p_translate( (page_table_t*) user_ptab, 
1049                         (device_vbase >> 12), 
1050                         &ppn, 
1051                         &flags );
1052    if (ko) 
1053    {
1054        _get_lock(&_tty_put_lock);
1055        _puts("\n[GIET ERROR] in _dma_transfer : device buffer unmapped\n");
1056        _release_lock(&_tty_put_lock);
1057        return 1;
1058    }
1059    device_pbase = ((paddr_t)ppn << 12) | (device_vbase & 0x00000FFF);
1060
1061    // Compute user buffer physical address
1062    ko = _v2p_translate( (page_table_t*) user_ptab, 
1063                         (user_vaddr >> 12), 
1064                         &ppn, 
1065                         &flags );
1066    if (ko) 
1067    {
1068        _get_lock(&_tty_put_lock);
1069        _puts("\n[GIET ERROR] in _dma_transfer() : user buffer unmapped\n");
1070        _release_lock(&_tty_put_lock);
1071        return 1;
1072    } 
1073    if ((flags & PTE_U) == 0) 
1074    {
1075        _get_lock(&_tty_put_lock);
1076        _puts("[GIET ERROR] in _dma_transfer() : user buffer not in user space\n");
1077        _release_lock(&_tty_put_lock);
1078        return 1; 
1079    }
1080    if (((flags & PTE_W) == 0 ) && to_user) 
1081    {
1082        _get_lock(&_tty_put_lock);
1083        _puts("\n[GIET ERROR] in _dma_transfer() : user buffer not writable\n");
1084        _release_lock(&_tty_put_lock);
1085        return 1;
1086    }
1087    user_pbase = (((paddr_t)ppn) << 12) | (user_vaddr & 0x00000FFF);
1088
1089/*  This is a draft for IOMMU support
1090
1091    // loop on all virtual pages covering the user buffer
1092    unsigned int user_vpn_min = user_vaddr >> 12;
1093    unsigned int user_vpn_max = (user_vaddr + length - 1) >> 12;
1094    unsigned int ix2          = 0;
1095    unsigned int ix1          = _dma_iommu_ix1 + dma_id;
1096
1097    for ( vpn = user_vpn_min ; vpn <= user_vpn_max ; vpn++ )
1098    {
1099    // get ppn and flags for each vpn
1100    unsigned int ko = _v2p_translate( (page_table_t*)user_pt_vbase,
1101    vpn,
1102    &ppn,
1103    &flags );
1104
1105    // check access rights
1106    if ( ko )                                 return 3;     // unmapped
1107    if ( (flags & PTE_U) == 0 )               return 4;     // not in user space
1108    if ( ( (flags & PTE_W) == 0 ) && to_user ) return 5;     // not writable
1109
1110    // save first ppn value
1111    if ( ix2 == 0 ) ppn_first = ppn;
1112
1113    if ( GIET_USE_IOMMU && USE_IOB )    // user buffer remapped in the I/0 space
1114    {
1115    // check buffer length < 2 Mbytes
1116    if ( ix2 > 511 ) return 2;
1117
1118    // map the physical page in IOMMU page table
1119    _iommu_add_pte2( ix1,        // PT1 index
1120    ix2,        // PT2 index
1121    ppn,        // physical page number
1122    flags );    // protection flags
1123    }
1124    else            // no IOMMU : check that physical pages are contiguous
1125    {
1126    if ( (ppn - ppn_first) != ix2 )       return 6;     // split physical buffer 
1127    }
1128
1129    // increment page index
1130    ix2++;
1131    } // end for vpn
1132
1133    // register the number of pages to be unmapped if iommu activated
1134    _dma_iommu_npages[dma_id] = (user_vpn_max - user_vpn_min) + 1;
1135
1136*/
1137
1138    // invalidate data cache in case of memory write
1139    if (to_user) _dcache_buf_invalidate((void *) user_vaddr, length);
1140
1141// get the lock
1142//  _get_lock(&_dma_lock[dma_id]);
1143
1144#if GIET_DEBUG_DMA_DRIVER
1145_get_lock(&_tty_put_lock);
1146_puts("\n[GIET DEBUG] DMA TRANSFER at cycle ");
1147_putd( _proctime() );
1148_puts("\n - cluster_id       = ");
1149_putx( cluster_id );
1150_puts("\n - channel_id       = ");
1151_putx( channel_id );
1152_puts("\n - dma_vbase        = ");
1153_putx( (unsigned int)dma_vbase );
1154_puts("\n - device_buf_vbase = ");
1155_putx( device_vbase );
1156_puts("\n - device_buf_pbase = ");
1157_putl( device_pbase );
1158_puts("\n - user_buf_vbase   = ");
1159_putx( user_vaddr );
1160_puts("\n - user_buf_pbase   = ");
1161_putl( user_pbase );
1162_puts("\n");
1163_release_lock(&_tty_put_lock);
1164#endif
1165
1166    // DMA configuration
1167    if (to_user) 
1168    {
1169        dma_vbase[channel_id * DMA_SPAN + DMA_SRC]     = (unsigned int)(device_pbase);
1170        dma_vbase[channel_id * DMA_SPAN + DMA_SRC_EXT] = (unsigned int)(device_pbase>>32);
1171        dma_vbase[channel_id * DMA_SPAN + DMA_DST]     = (unsigned int)(user_pbase);
1172        dma_vbase[channel_id * DMA_SPAN + DMA_DST_EXT] = (unsigned int)(user_pbase>>32);
1173    }
1174    else 
1175    {
1176        dma_vbase[channel_id * DMA_SPAN + DMA_SRC]     = (unsigned int)(user_pbase);
1177        dma_vbase[channel_id * DMA_SPAN + DMA_SRC_EXT] = (unsigned int)(user_pbase>>32);
1178        dma_vbase[channel_id * DMA_SPAN + DMA_DST]     = (unsigned int)(device_pbase);
1179        dma_vbase[channel_id * DMA_SPAN + DMA_DST_EXT] = (unsigned int)(device_pbase>>32);
1180    }
1181    dma_vbase[channel_id * DMA_SPAN + DMA_LEN] = (unsigned int) length;
1182
1183    return 0;
1184
1185#else // NB_DMA_CHANNELS == 0
1186    _get_lock(&_tty_put_lock);
1187    _puts("\n[GIET ERROR] in _dma_transfer() : NB_DMA_CHANNELS == 0");
1188    _release_lock(&_tty_put_lock);
1189    return 1;
1190#endif
1191
1192}  // end _dma_transfer() 
1193
1194//////////////////////////////////////////////////////////////////////////////////
1195// _dma_completed()
1196// This function checks completion of a DMA transfer to or from a peripheral
1197// device (Frame Buffer or Multi-Nic).
1198// As it is a blocking call, the processor is busy waiting.
1199// Returns 0 if success, > 0 if error
1200// (1 == read error / 2 == DMA idle error / 3 == write error)
1201//////////////////////////////////////////////////////////////////////////////////
1202unsigned int _dma_completed() 
1203{
1204#if NB_DMA_CHANNELS > 0
1205    unsigned int dma_id  = _get_context_slot(CTX_DMA_ID);
1206    unsigned int dma_ret;
1207
1208    // busy waiting with a pseudo random delay between bus access
1209    while (_dma_done[dma_id] == 0) 
1210    {
1211        unsigned int delay = (( _proctime() ^ _procid() << 4) & 0x3F) + 1;
1212        asm volatile(
1213                "move  $3,   %0                 \n"
1214                "loop_nic_completed:            \n"
1215                "addi  $3,   $3, -1             \n"
1216                "bnez  $3,   loop_nic_completed \n"
1217                "nop                            \n"
1218                :
1219                : "r" (delay)
1220                : "$3"); 
1221    }
1222
1223#if GIET_DEBUG_DMA_DRIVER
1224_get_lock(&_tty_put_lock);
1225_puts("\n[GIET DEBUG] DMA COMPLETED at cycle ");
1226_putd( _proctime() );
1227_puts("\n - cluster_id       = ");
1228_putx( dma_id/NB_DMA_CHANNELS );
1229_puts("\n - channel_id       = ");
1230_putx( dma_id%NB_DMA_CHANNELS );
1231_puts("\n");
1232_release_lock(&_tty_put_lock);
1233#endif
1234
1235    // reset synchronization variables
1236    _dma_done[dma_id] = 0;
1237    dma_ret = _dma_status[dma_id];
1238    asm volatile("sync\n");
1239
1240//    _dma_lock[dma_id] = 0;
1241
1242    return dma_ret;
1243
1244#else // NB_DMA_CHANNELS == 0
1245    return -1;
1246#endif
1247
1248}  // end _dma_completed
1249
1250
1251//////////////////////////////////////////////////////////////////////////////////
1252//     VciFrameBuffer driver
1253//////////////////////////////////////////////////////////////////////////////////
1254// There three methods to access the VciFrameBuffer device:
1255// 
1256// 1) The _fb_sync_write() and _fb_sync_read() functions use a memcpy strategy
1257// to implement the transfer between a data buffer (user space) and the frame
1258// buffer (kernel space). They are blocking until completion of the transfer.
1259//
1260// 2) The _fb_dma_write(), _fb_dma_read() and _fb_mdma_completed() functions use
1261// the VciMultiDma components (distributed in the clusters) to transfer data
1262// between the user buffer and the frame buffer.
1263// A DMA channel is allocated to the task requesting it in the mapping_info,
1264// and stored in the task context.
1265//
1266// 3) The _fb_cma_init(), _fb_cma_write() and _fb_cma_stop() functions use
1267// the VciChbufDma component (non replicated) to transfer a flow of images from
1268// an user space chained buffer (two buffers) to the frame buffer.
1269// A CMA channel must be allocated to the task requesting it in the mapping_info,
1270// and stored in the task context.
1271//////////////////////////////////////////////////////////////////////////////////
1272
1273//////////////////////////////////////////////////////////////////////////////////
1274// _fb_sync_write()
1275// Transfer data from an memory buffer to the frame_buffer device using a memcpy.
1276// - offset : offset (in bytes) in the frame buffer.
1277// - buffer : base address of the memory buffer.
1278// - length : number of bytes to be transfered.
1279//////////////////////////////////////////////////////////////////////////////////
1280
1281unsigned int _fb_sync_write(unsigned int offset, 
1282                            const void * buffer, 
1283                            unsigned int length) 
1284{
1285    unsigned char * fb_address = (unsigned char *) &seg_fbf_base + offset;
1286    memcpy((void *) fb_address, (void *) buffer, length);
1287    return 0;
1288}
1289
1290
1291//////////////////////////////////////////////////////////////////////////////////
1292// _fb_sync_read()
1293// Transfer data from the frame_buffer device to a memory buffer using a memcpy.
1294// - offset : offset (in bytes) in the frame buffer.
1295// - buffer : base address of the memory buffer.
1296// - length : number of bytes to be transfered.
1297//////////////////////////////////////////////////////////////////////////////////
1298unsigned int _fb_sync_read( unsigned int   offset, 
1299                            const void*    buffer, 
1300                            unsigned int   length) 
1301{
1302    unsigned char* fb_address = (unsigned char *) &seg_fbf_base + offset;
1303    memcpy((void *) buffer, (void *) fb_address, length);
1304    return 0;
1305}
1306
1307
1308//////////////////////////////////////////////////////////////////////////////////
1309// _fb_dma_write()
1310// Transfer data from a memory buffer to the frame_buffer device using  DMA.
1311// - offset : offset (in bytes) in the frame buffer.
1312// - buffer : base address of the memory buffer.
1313// - length : number of bytes to be transfered.
1314// Returns 0 if success, > 0 if error.
1315//////////////////////////////////////////////////////////////////////////////////
1316unsigned int _fb_dma_write( unsigned int   offset, 
1317                            const void*    buffer, 
1318                            unsigned int   length) 
1319{
1320    return _dma_transfer( 0,             // frame buffer
1321                          0,             // write
1322                          offset,
1323                          (unsigned int) buffer,
1324                          length );
1325}
1326//////////////////////////////////////////////////////////////////////////////////
1327// _fb_dma_read()
1328// Transfer data from the frame_buffer device to a memory buffer using  DMA.
1329// - offset : offset (in bytes) in the frame buffer.
1330// - buffer : virtual base address of the user buffer.
1331// - length : buffer size (number of bytes)
1332// Returns 0 if success, > 0 if error.
1333//////////////////////////////////////////////////////////////////////////////////
1334unsigned int _fb_dma_read( unsigned int   offset, 
1335                           const void*    buffer, 
1336                           unsigned int   length ) 
1337{
1338    return _dma_transfer( 0,    // frame buffer
1339                          1,    // read
1340                          offset,
1341                          (unsigned int) buffer,
1342                          length );
1343}
1344//////////////////////////////////////////////////////////////////////////////////
1345// _fb_completed()
1346// This function checks completion of a DMA transfer to or fom the frame buffer.
1347// As it is a blocking call, the processor is busy waiting.
1348// Returns 0 if success, > 0 if error
1349// (1 == read error / 2 == DMA idle error / 3 == write error)
1350//////////////////////////////////////////////////////////////////////////////////
1351unsigned int _fb_dma_completed() 
1352{
1353    return _dma_completed();
1354}
1355
1356// This structure contains two chbuf descriptors that can be used by
1357// the VciChbufDma component to tranfer a flow of images:
1358// - The SRC chbuf descriptor contain two slots (two user buffers)
1359// - The DST chbuf descriptor contains only one slot (frame buffer)
1360
1361typedef struct fb_cma_channel_s
1362{
1363    paddr_t       buf0;     // physical address + status for user buffer 0
1364    paddr_t       buf1;     // physical address + status for user buffer 1
1365    paddr_t       fbf;      // physical address + status for frame buffer
1366    unsigned int  length;   // buffer length (number of bytes)
1367    unsigned int  padding;  // unused (just to hahe channel size = 32 bytes)
1368} fb_cma_channel_t;
1369
1370in_unckdata volatile fb_cma_channel_t _fb_cma_channel[NB_CMA_CHANNELS] __attribute__((aligned(64)));
1371in_unckdata volatile paddr_t          _fb_cma_desc_paddr[NB_CMA_CHANNELS];
1372
1373//////////////////////////////////////////////////////////////////////////////////
1374// _fb_cma_init()
1375// This function uses the _fb_cma_channel[] and _fb_cma_desc_paddr[] arrays,
1376// that are both indexed by the channel index.
1377// where each entry contains one fb_cma_channel structure (defining two
1378// SRC and DST chbuf descriptors), and does four things:
1379//
1380// 1) computes the physical addresses for the two source user buffers, for
1381//    the destination frame buffer. It initialises the channel descriptor
1382//    _fb_cma_channel[i], containing the SRC chbuf descriptor (two buffers),
1383//    the DST chbuf descriptor (one single frame buffer), and the buffer length.
1384//
1385// 2) computes the physical address for the channel descriptor and register it
1386//    in the _fb_cma_desc_paddr[i].
1387//   
1388// 3) makes a SYNC request to L2 cache for channel descriptor, because the
1389//    channel descriptor is directly accessed in XRAM by the CMA component.
1390//
1391// 4) Starts the CMA hardware channel, that will poll the channel descriptor
1392//    to fransfer an user buffer to the frame buffer as soon as the source
1393//    user buffer is marked valid.
1394//
1395// Arguments are:
1396// - vbase0 : virtual base address of the first user buffer.
1397// - vbase1 : virtual base address of the second user buffer.
1398// - length : user buffer size (number of bytes)
1399// Returns 0 if success, > 0 if error
1400//////////////////////////////////////////////////////////////////////////////////
1401unsigned int _fb_cma_init( const void*  vbase0,
1402                           const void*  vbase1,
1403                           unsigned int length ) 
1404{
1405#if NB_CMA_CHANNELS > 0
1406
1407    unsigned int  channel_id;          // CMA channel index
1408    unsigned int  user_ptab;           // page table virtual address
1409    unsigned int  ko;                  // unsuccessfull V2P translation
1410    unsigned int  vaddr;               // virtual address
1411    unsigned int  flags;               // protection flags
1412    unsigned int  ppn;                 // physical page number
1413    paddr_t       channel_pbase;       // physical address of channel descriptor
1414
1415    // get CMA channel index
1416    channel_id = _get_context_slot(CTX_CMA_ID);
1417    if ( channel_id >= NB_CMA_CHANNELS )
1418    {
1419        _get_lock(&_tty_put_lock);
1420        _puts("\n[GIET ERROR] in _fb_cma_init() : CMA channel index too large\n");
1421        _release_lock(&_tty_put_lock);
1422        return 1;
1423    }
1424
1425    // checking size for channel descriptor
1426    if ( sizeof(fb_cma_channel_t) != 32 )
1427    {
1428        _get_lock(&_tty_put_lock);
1429        _puts("\n[GIET ERROR] in _fb_cma_init() : bad fb_cma_channel size\n");
1430        _release_lock(&_tty_put_lock);
1431        return 1;
1432    }
1433
1434    // checking channel descriptor alignment (32 bytes)
1435    if ( (unsigned int)(&_fb_cma_channel[channel_id]) & 0x1F ) 
1436    {
1437        _get_lock(&_tty_put_lock);
1438        _puts("\n[GIET ERROR] in _fb_cma_init() : bad fb_cma_channel alignment\n");
1439        _release_lock(&_tty_put_lock);
1440        return 1;
1441    }
1442
1443    // checking user buffer virtual addresses and length alignment
1444    if ( ((unsigned int)vbase0 & 0x3) || ((unsigned int)vbase1 & 0x3) || (length & 0x3) ) 
1445    {
1446        _get_lock(&_tty_put_lock);
1447        _puts("\n[GIET ERROR] in _fb_cma_init() : user buffer not word aligned\n");
1448        _release_lock(&_tty_put_lock);
1449        return 1;
1450    }
1451
1452    // get page table virtual address
1453    user_ptab = _get_context_slot(CTX_PTAB_ID);
1454
1455    // compute and register frame buffer physical address
1456    vaddr = ((unsigned int)&seg_fbf_base);
1457    ko    = _v2p_translate( (page_table_t*) user_ptab, 
1458                         (vaddr >> 12),
1459                         &ppn, 
1460                         &flags );
1461    if (ko) 
1462    {
1463        _get_lock(&_tty_put_lock);
1464        _puts("\n[GIET ERROR] in _fb_cma_init() : frame buffer unmapped\n");
1465        _release_lock(&_tty_put_lock);
1466        return 1;
1467    }
1468    _fb_cma_channel[channel_id].fbf = ((paddr_t)ppn << 12) | (vaddr & 0x00000FFF);
1469
1470    // Compute and register first user buffer physical address
1471    vaddr = (unsigned int)vbase0; 
1472    ko = _v2p_translate( (page_table_t*) user_ptab, 
1473                         (vaddr >> 12),
1474                         &ppn, 
1475                         &flags );
1476    if (ko) 
1477    {
1478        _get_lock(&_tty_put_lock);
1479        _puts("\n[GIET ERROR] in _fb_cma_init() : user buffer 0 unmapped\n");
1480        _release_lock(&_tty_put_lock);
1481        return 1;
1482    } 
1483    if ((flags & PTE_U) == 0) 
1484    {
1485        _get_lock(&_tty_put_lock);
1486        _puts("[GIET ERROR] in _fb_cma_init() : user buffer 0 not in user space\n");
1487        _release_lock(&_tty_put_lock);
1488        return 1; 
1489    }
1490    _fb_cma_channel[channel_id].buf0 = ((paddr_t)ppn << 12) | (vaddr & 0x00000FFF);
1491
1492    // Compute and register second user buffer physical address
1493    vaddr = (unsigned int)vbase1; 
1494    ko = _v2p_translate( (page_table_t*) user_ptab, 
1495                         (vaddr >> 12),
1496                         &ppn, 
1497                         &flags );
1498    if (ko) 
1499    {
1500        _get_lock(&_tty_put_lock);
1501        _puts("\n[GIET ERROR] in _fb_cma_init() : user buffer 1 unmapped\n");
1502        _release_lock(&_tty_put_lock);
1503        return 1;
1504    } 
1505    if ((flags & PTE_U) == 0) 
1506    {
1507        _get_lock(&_tty_put_lock);
1508        _puts("[GIET ERROR] in _fb_cma_init() : user buffer 1 not in user space\n");
1509        _release_lock(&_tty_put_lock);
1510        return 1; 
1511    }
1512    _fb_cma_channel[channel_id].buf1 = ((paddr_t)ppn << 12) | (vaddr & 0x00000FFF);
1513
1514    // register buffer length in channel descriptor
1515    _fb_cma_channel[channel_id].length = length;
1516
1517    // Compute and register physical adress of the channel descriptor
1518    vaddr = (unsigned int)(&_fb_cma_channel[channel_id]);
1519    ko = _v2p_translate( (page_table_t*) user_ptab, 
1520                         (vaddr >> 12),
1521                         &ppn, 
1522                         &flags );
1523    if (ko) 
1524    {
1525        _get_lock(&_tty_put_lock);
1526        _puts("\n[GIET ERROR] in _fb_cma_init() : channel descriptor unmapped\n");
1527        _release_lock(&_tty_put_lock);
1528        return 1;
1529    } 
1530    channel_pbase = (((paddr_t)ppn) << 12) | (vaddr & 0x00000FFF);
1531    _fb_cma_desc_paddr[channel_id] = channel_pbase;
1532
1533#if GIET_DEBUG_CMA_DRIVER
1534_puts("\n");
1535_puts("- fbf       pbase = ");
1536_putl( _fb_cma_channel[channel_id].fbf );
1537_puts("\n");
1538_puts("- buf0      pbase = ");
1539_putl( _fb_cma_channel[channel_id].buf0 );
1540_puts("\n");
1541_puts("- buf1      pbase = ");
1542_putl( _fb_cma_channel[channel_id].buf1 );
1543_puts("\n");
1544_puts("- channel   pbase = ");
1545_putl( channel_pbase );
1546_puts("\n");
1547#endif
1548
1549    // SYNC request for channel descriptor
1550    _memc_sync( channel_pbase, 32 );
1551
1552    // CMA channel activation
1553    unsigned int* cma_vbase = (unsigned int *)&seg_cma_base;
1554    unsigned int  offset     = channel_id * CHBUF_CHANNEL_SPAN;
1555
1556    cma_vbase[offset + CHBUF_SRC_DESC]  = (unsigned int)(channel_pbase & 0xFFFFFFFF);
1557    cma_vbase[offset + CHBUF_SRC_EXT]   = (unsigned int)(channel_pbase >> 32);
1558    cma_vbase[offset + CHBUF_SRC_NBUFS] = 2;
1559    cma_vbase[offset + CHBUF_DST_DESC]  = (unsigned int)(channel_pbase & 0xFFFFFFFF) + 16;
1560    cma_vbase[offset + CHBUF_DST_EXT]   = (unsigned int)(channel_pbase >> 32);
1561    cma_vbase[offset + CHBUF_DST_NBUFS] = 1;
1562    cma_vbase[offset + CHBUF_BUF_SIZE]  = length;
1563    cma_vbase[offset + CHBUF_PERIOD]    = 300;
1564    cma_vbase[offset + CHBUF_RUN]       = 1;
1565
1566    return 0;
1567
1568#else
1569
1570    _get_lock(&_tty_put_lock);
1571    _puts("\n[GIET ERROR] in _fb_cma_init() : no CMA channel allocated\n");
1572    _release_lock(&_tty_put_lock);
1573
1574    return 1;
1575#endif
1576}
1577//////////////////////////////////////////////////////////////////////////////////
1578// _fb_cma_write()
1579// This function makes a SYNC request for the source user buffer.
1580// Then it updates the status of the SRC and DST chbuf descriptors, to allow
1581// the CMA component to transfer the source user buffer buffer to the destination
1582// frame buffer, and makes a SYNC request for the channel descriptor.
1583//
1584// - buffer_id : user buffer index (0 => buf0 / not 0 => buf1)
1585// Returns 0 if success, > 0 if error
1586//////////////////////////////////////////////////////////////////////////////////
1587unsigned int _fb_cma_write( unsigned int buffer_id )
1588{
1589#if NB_CMA_CHANNELS > 0
1590
1591    paddr_t         buf_paddr;
1592    unsigned int    buf_length;
1593
1594    // get CMA channel index
1595    unsigned int channel_id = _get_context_slot(CTX_CMA_ID);
1596
1597    // SYNC request for the source user buffer
1598    if ( buffer_id == 0 )  buf_paddr = _fb_cma_channel[channel_id].buf0;
1599    else                   buf_paddr = _fb_cma_channel[channel_id].buf1;
1600    buf_length = _fb_cma_channel[channel_id].length;
1601    _memc_sync( buf_paddr, buf_length );
1602
1603    // set SRC full
1604    if ( buffer_id == 0 )
1605    _fb_cma_channel[channel_id].buf0 = buf_paddr | 0x8000000000000000ULL;
1606    else
1607    _fb_cma_channel[channel_id].buf1 = buf_paddr | 0x8000000000000000ULL;
1608
1609    // set DST empty
1610    _fb_cma_channel[channel_id].fbf  = _fb_cma_channel[channel_id].fbf
1611                                       & 0x7FFFFFFFFFFFFFFFULL;
1612
1613    // SYNC request for the channel descriptor
1614    buf_paddr  = _fb_cma_desc_paddr[channel_id];
1615    buf_length = 32;
1616    _memc_sync( buf_paddr, buf_length );
1617
1618    return 0;
1619
1620#else
1621
1622    _get_lock(&_tty_put_lock);
1623    _puts("\n[GIET ERROR] in _fb_cma_channel() : no CMA channel allocated\n");
1624    _release_lock(&_tty_put_lock);
1625    return 1;
1626
1627#endif
1628}
1629//////////////////////////////////////////////////////////////////////////////////
1630// _fb_cma_stop()
1631// This function desactivates the CMA channel allocated to the calling task.
1632// Returns 0 if success, > 0 if error
1633//////////////////////////////////////////////////////////////////////////////////
1634unsigned int _fb_cma_stop( unsigned int buffer_id )
1635{
1636#if NB_CMA_CHANNELS > 0
1637
1638    // get CMA channel allocated
1639    unsigned int channel_id = _get_context_slot(CTX_CMA_ID);
1640
1641    // CMA channel desactivation
1642    unsigned int* cma_vbase = (unsigned int *)&seg_cma_base;
1643    unsigned int  offset     = channel_id * CHBUF_CHANNEL_SPAN;
1644    cma_vbase[offset + CHBUF_RUN] = 0;
1645    return 0;
1646
1647#else
1648
1649    _get_lock(&_tty_put_lock);
1650    _puts("\n[GIET ERROR] in _fb_cma_stop() : no CMA channel allocated\n");
1651    _release_lock(&_tty_put_lock);
1652    return 1;
1653
1654#endif
1655}
1656   
1657//////////////////////////////////////////////////////////////////////////////////
1658//     VciMultiNic driver
1659//////////////////////////////////////////////////////////////////////////////////
1660// The VciMultiNic device can be accessed directly by software with memcpy(),
1661// or it can be accessed through a multi-channels CMA component:
1662// 
1663// The '_nic_sync_write' and '_nic_sync_read' functions use a memcpy strategy to
1664// implement the transfer between a data buffer (user space) and the NIC
1665// buffer (kernel space). They are blocking until completion of the transfer.
1666//
1667// The _nic_cma_init() and _nic_cma_stop() functions use the VciChbufDma component
1668// to transfer a flow of packets from the NIC RX hard chbuf (two containers)
1669// to an user RX chbuf (two containers), and to transfer another flow of packets
1670// from an user TX chbuf (two containers) to the NIC TX chbuf (two containers).
1671// One NIC channel and two CMA channels must be allocated to the task
1672// in the mapping_info data structure.
1673//////////////////////////////////////////////////////////////////////////////////
1674
1675//////////////////////////////////////////////////////////////////////////////////
1676// _nic_sync_write()
1677// Transfer data from an memory buffer to the NIC device using a memcpy.
1678// - buffer : base address of the memory buffer.
1679// - length : number of bytes to be transfered.
1680//////////////////////////////////////////////////////////////////////////////////
1681unsigned int _nic_sync_write( const void*    buffer,
1682                              unsigned int   length ) 
1683{
1684    // To be defined
1685    // unsigned char* nic_address = (unsigned char *) &seg_nic_base;
1686    // memcpy((void *) nic_address, (void *) buffer, length);
1687    return 0;
1688}
1689//////////////////////////////////////////////////////////////////////////////////
1690// _nic_sync_read()
1691// Transfer data from the NIC device to a memory buffer using a memcpy.
1692// - buffer : base address of the memory buffer.
1693// - length : number of bytes to be transfered.
1694//////////////////////////////////////////////////////////////////////////////////
1695unsigned int _nic_sync_read( const void*    buffer, 
1696                             unsigned int   length ) 
1697{
1698    // To be defined
1699    // unsigned char* nic_address = (unsigned char *) &seg_nic_base;
1700    // memcpy((void *) buffer, (void *) nic_address, length);
1701    return 0;
1702}
1703//////////////////////////////////////////////////////////////////////////////////
1704// _nic_cma_rx_init()
1705// Returns 0 if success, > 0 if error.
1706//////////////////////////////////////////////////////////////////////////////////
1707unsigned int _nic_cma_rx_init( const void*  buf0,
1708                               const void*  buf1,
1709                               unsigned int length ) 
1710{
1711    // to be defined
1712    // unsigned char* nic_address = (unsigned char *) &seg_nic_base;
1713    return 0;
1714}
1715//////////////////////////////////////////////////////////////////////////////////
1716// _nic_cma_tx_init()
1717// Returns 0 if success, > 0 if error.
1718//////////////////////////////////////////////////////////////////////////////////
1719unsigned int _nic_cma_tx_init( const void*  buf0,
1720                               const void*  buf1,
1721                               unsigned int length ) 
1722{
1723    // to be defined
1724    // unsigned char* nic_address = (unsigned char *) &seg_nic_base;
1725    return 0;
1726}//////////////////////////////////////////////////////////////////////////////////
1727// _nic_cma_stop()
1728// Returns 0 if success, > 0 if error.
1729//////////////////////////////////////////////////////////////////////////////////
1730unsigned int _nic_cma_stop()
1731{
1732    // to be defined
1733    // unsigned char* nic_address = (unsigned char *) &seg_nic_base;
1734    return 0;
1735}
1736
1737
1738//////////////////////////////////////////////////////////////////////////////////
1739//     VciMemCache driver
1740//////////////////////////////////////////////////////////////////////////////////
1741// The VciMemCache device can be accessed through a configuration interface.
1742// as a set of uncached, memory mapped registers.
1743///////////////////////////////////////////////////////////////////////////////////
1744// The (virtual) base address of the associated segment is:
1745//
1746//       mmc_address = seg_mmc_base + cluster_id * vseg_cluster_increment
1747//
1748////////////////////////////////////////////////////////////////////////////////
1749
1750///////////////////////////////////////////////////////////////////////////////////
1751// _memc_inval()
1752// This function invalidates all cache lines covering a memory buffer defined
1753// by the physical base address, and the length.
1754// The buffer address MSB are used to compute the cluster index.
1755///////////////////////////////////////////////////////////////////////////////////
1756void _memc_inval( paddr_t      buf_paddr,
1757                  unsigned int buf_length )
1758{
1759    unsigned int cluster_id    = (unsigned int)((buf_paddr>>32)/(256/NB_CLUSTERS));
1760
1761    unsigned int * mmc_address = (unsigned int *) ((unsigned int)&seg_mmc_base + 
1762                                 (cluster_id * (unsigned int)&vseg_cluster_increment));
1763
1764    // get the lock protecting exclusive access to MEMC
1765    while ( mmc_address[MEMC_LOCK] ) { asm volatile("nop"); }
1766
1767    // write inval arguments
1768    mmc_address[MEMC_ADDR_LO]    = (unsigned int)buf_paddr;
1769    mmc_address[MEMC_ADDR_HI]    = (unsigned int)(buf_paddr>>32);
1770    mmc_address[MEMC_BUF_LENGTH] = buf_length;
1771    mmc_address[MEMC_CMD_TYPE]   = MEMC_CMD_INVAL;
1772
1773    // release the lock protecting MEMC
1774    mmc_address[MEMC_LOCK] = 0;
1775}
1776///////////////////////////////////////////////////////////////////////////////////
1777// _memc_sync()
1778// This function copies to external RAM all cache lines covering a memory buffer
1779// defined by the physical base address, and the length, if they are dirty.
1780// The buffer address MSB are used to compute the cluster index.
1781///////////////////////////////////////////////////////////////////////////////////
1782void _memc_sync( paddr_t      buf_paddr,
1783                 unsigned int buf_length )
1784{
1785    unsigned int cluster_id    = (unsigned int)((buf_paddr>>32)/(256/NB_CLUSTERS));
1786
1787    unsigned int * mmc_address = (unsigned int *) ((unsigned int)&seg_mmc_base + 
1788                                 (cluster_id * (unsigned int)&vseg_cluster_increment));
1789
1790    // get the lock protecting exclusive access to MEMC
1791    while ( mmc_address[MEMC_LOCK] ) { asm volatile("nop"); }
1792
1793    // write inval arguments
1794    mmc_address[MEMC_ADDR_LO]    = (unsigned int)buf_paddr;
1795    mmc_address[MEMC_ADDR_HI]    = (unsigned int)(buf_paddr>>32);
1796    mmc_address[MEMC_BUF_LENGTH] = buf_length;
1797    mmc_address[MEMC_CMD_TYPE]   = MEMC_CMD_SYNC;
1798
1799    // release the lock protecting MEMC
1800    mmc_address[MEMC_LOCK] = 0;
1801}
1802
1803///////////////////////////////////////////////////////////////////////////////////
1804// _heap_info()
1805// This function returns the information associated to a heap (size and vaddr)
1806// It uses the global task index (CTX_GTID_ID, unique for each giet task) and the
1807// vspace index (CTX_VSID_ID) defined in the task context.
1808///////////////////////////////////////////////////////////////////////////////////
1809unsigned int _heap_info( unsigned int* vaddr, 
1810                         unsigned int* size ) 
1811{
1812    mapping_header_t * header  = (mapping_header_t *) (&seg_mapping_base);
1813    mapping_task_t * tasks     = _get_task_base(header);
1814    mapping_vobj_t * vobjs     = _get_vobj_base(header);
1815    mapping_vspace_t * vspaces = _get_vspace_base(header);
1816
1817    unsigned int taskid        = _get_context_slot(CTX_GTID_ID);
1818    unsigned int vspaceid      = _get_context_slot(CTX_VSID_ID);
1819
1820    int heap_local_vobjid      = tasks[taskid].heap_vobjid;
1821    if (heap_local_vobjid != -1) 
1822    {
1823        unsigned int vobjheapid = heap_local_vobjid + vspaces[vspaceid].vobj_offset;
1824        *vaddr                  = vobjs[vobjheapid].vaddr;
1825        *size                   = vobjs[vobjheapid].length;
1826        return 0;
1827    }
1828    else 
1829    {
1830        *vaddr = 0;
1831        *size = 0;
1832        return 0;
1833    }
1834}
1835
1836
1837////////////////////////////////////////////////////////////////////////////////
1838// _sim_helper_access()
1839// Accesses the Simulation Helper Component
1840// If the access is on a writable register (except SIMHELPER_PAUSE_SIM),
1841// the function should never return since the simulation will stop before
1842// If the access is on a readable register, returns 0 on success, 1 on failure,
1843// and writes the return value at address retval
1844////////////////////////////////////////////////////////////////////////////////
1845unsigned int _sim_helper_access(unsigned int register_index,
1846                                unsigned int value,
1847                                unsigned int * retval) {
1848    unsigned int * sim_helper_address = (unsigned int *) &seg_sim_base;
1849   
1850    if (register_index == SIMHELPER_SC_STOP ||
1851        register_index == SIMHELPER_END_WITH_RETVAL ||
1852        register_index == SIMHELPER_EXCEPT_WITH_VAL ||
1853        register_index == SIMHELPER_PAUSE_SIM ||
1854        register_index == SIMHELPER_SIGINT) {
1855        sim_helper_address[register_index] = value;
1856    }
1857    else if (register_index == SIMHELPER_CYCLES) {
1858        *retval = sim_helper_address[register_index];
1859    }
1860    else {
1861        _get_lock(&_tty_put_lock);
1862        _puts("\n[GIET ERROR] in _sim_helper_access() : access to unmapped register\n");
1863        _release_lock(&_tty_put_lock);
1864        return -1;
1865    }
1866
1867    return 0;
1868}
1869
1870
1871
1872// Local Variables:
1873// tab-width: 4
1874// c-basic-offset: 4
1875// c-file-offsets:((innamespace . 0)(inline-open . 0))
1876// indent-tabs-mode: nil
1877// End:
1878// vim: filetype=c:expandtab:shiftwidth=4:tabstop=4:softtabstop=4
1879
Note: See TracBrowser for help on using the repository browser.