source: soft/giet_vm/sys/drivers.c @ 255

Last change on this file since 255 was 255, checked in by meunier, 11 years ago
  • Added a syscall and some user functions to manipulate the Simulation Helper
  • Changed the the way the Vseg -> Pseg mapping is made during the boot to better utilize the address space (+ adaptation of the algorithm in memo)
  • Fixed a bug in boot_init (vobj_init): the vobj initialization could only be made for the first application (ptpr was not changed)
File size: 71.0 KB
RevLine 
[158]1///////////////////////////////////////////////////////////////////////////////////
2// File     : drivers.c
[238]3// Date     : 23/05/2013
[158]4// Author   : alain greiner
5// Copyright (c) UPMC-LIP6
6///////////////////////////////////////////////////////////////////////////////////
[253]7// The drivers.c and drivers.h files are part ot the GIET-VM kernel.
8//
[158]9// They contains the drivers for the peripherals available in the SoCLib library:
10// - vci_multi_tty
11// - vci_multi_timer
12// - vci_multi_dma
13// - vci_multi_icu
[253]14// - vci_xicu
[158]15// - vci_gcd
16// - vci_frame_buffer
17// - vci_block_device
[253]18// - vci_multi_nic
19// - vci_chbuf_dma
[158]20//
[253]21// For the peripherals replicated in each cluster (ICU, TIMER, XCU, DMA, MMC),
[238]22// the corresponding (virtual) base addresses must be completed by an offset
23// depending on the cluster index.
24//
25// The following global parameters must be defined in the hard_config.h file:
[189]26// - NB_CLUSTERS   
27// - NB_PROCS_MAX 
[238]28// - NB_TIM_CHANNELS   
29// - NB_DMA_CHANNELS     
30// - NB_TTY_CHANNELS_MAX   
[158]31//
[218]32// The following virtual base addresses must be defined in the giet_vsegs.ld file:
[158]33// - seg_icu_base
[253]34// - seg_xcu_base
[203]35// - seg_tim_base
[238]36// - seg_dma_base
[158]37// - seg_tty_base
38// - seg_gcd_base
[203]39// - seg_fbf_base
[158]40// - seg_ioc_base
[218]41// - seg_nic_base
[238]42// - seg_cma_base
43// - seg_iob_base
[249]44// - seg_mmc_base
[253]45// - vseg_cluster_increment
[158]46///////////////////////////////////////////////////////////////////////////////////
47
[166]48#include <vm_handler.h>
[158]49#include <sys_handler.h>
50#include <giet_config.h>
51#include <drivers.h>
52#include <common.h>
53#include <hwr_mapping.h>
54#include <mips32_registers.h>
55#include <ctx_handler.h>
56
57#if !defined(NB_CLUSTERS)
[238]58# error: You must define NB_CLUSTERS in the hard_config.h file
[158]59#endif
[189]60
[249]61#if (NB_CLUSTERS > 256)
62# error: NB_CLUSTERS cannot be larger than 256!
63#endif
64
[189]65#if !defined(NB_PROCS_MAX)
[238]66# error: You must define NB_PROCS_MAX in the hard_config.h file
[189]67#endif
68
69#if (NB_PROCS_MAX > 8)
70# error: NB_PROCS_MAX cannot be larger than 8!
71#endif
72
[249]73#if !defined(GIET_USE_IOMMU)
74# error: You must define GIET_USE_IOMMU in the giet_config.h file
75#endif
76
[238]77#if !defined(NB_TTY_CHANNELS)
78# error: You must define NB_TTY_CHANNELS in the hard_config.h file
[158]79#endif
80
[238]81#if (NB_TTY_CHANNELS < 1)
82# error: NB_TTY_CHANNELS cannot be smaller than 1!
[165]83#endif
84
[238]85#if !defined(NB_DMA_CHANNELS)
86# error: You must define NB_DMA_CHANNELS in the hard_config.h file
[165]87#endif
88
[238]89#if (NB_DMA_CHANNELS > 8)
90# error: NB_DMA_CHANNELS cannot be smaller than 8!
[165]91#endif
92
[238]93#if !defined(NB_TIM_CHANNELS)
94#define NB_TIM_CHANNELS 0
[189]95#endif
[165]96
[238]97#if ( (NB_TIM_CHANNELS + NB_PROC_MAX) > 32 )
98# error: NB_TIM_CHANNELS + NB_PROCS_MAX cannot be larger than 32
[189]99#endif
[158]100
[238]101#if !defined(NB_IOC_CHANNELS)
102# error: You must define NB_IOC_CHANNELS in the hard_config.h file
[189]103#endif
[158]104
[238]105#if ( NB_IOC_CHANNELS > 8 )
106# error: NB_IOC_CHANNELS cannot be larger than 8
107#endif
108
109#if !defined(NB_NIC_CHANNELS)
110# error: You must define NB_NIC_CHANNELS in the hard_config.h file
111#endif
112
113#if ( NB_NIC_CHANNELS > 8 )
114# error: NB_NIC_CHANNELS cannot be larger than 8
115#endif
116
117#if !defined(NB_CMA_CHANNELS)
118# error: You must define NB_CMA_CHANNELS in the hard_config.h file
119#endif
120
121#if ( NB_CMA_CHANNELS > 8 )
122# error: NB_CMA_CHANNELS cannot be larger than 8
123#endif
124
[215]125#if !defined( USE_XICU )
[238]126# error: You must define USE_XICU in the hard_config.h file
[215]127#endif
[158]128
[249]129#if !defined( USE_IOB )
130# error: You must define USE_IOB in the hard_config.h file
[215]131#endif
132
133
[189]134#define in_unckdata __attribute__((section (".unckdata")))
[169]135
[158]136//////////////////////////////////////////////////////////////////////////////
[228]137//     Timers driver
[158]138//////////////////////////////////////////////////////////////////////////////
[238]139// This peripheral is replicated in all clusters.
[203]140// The timers can be implemented in a vci_timer component or in a vci_xicu
[215]141// component (depending on the USE_XICU parameter).
[203]142// There is one timer (or xicu) component per cluster.
[189]143// There is two types of timers:
144// - "system" timers : one per processor, used for context switch.
145//   local_id in [0, NB_PROCS_MAX-1],
146// - "user" timers : requested by the task in the mapping_info data structure.
[203]147//   For each user timer, the timer_id is stored in the context of the task.
[238]148// The global index is cluster_id * (NB_PROCS_MAX+NB_TIM_CHANNELS) + local_id
[158]149//////////////////////////////////////////////////////////////////////////////
[238]150// The (virtual) base address of the associated segment is:
151//
[253]152//       timer_address = seg_tim_base + cluster_id * vseg_cluster_increment
153//   or  timer_address = seg_xcu_base + cluster_id * vseg_cluster_increment
[238]154//
155////////////////////////////////////////////////////////////////////////////////
[158]156
[189]157// User Timer signaling variables
158
[238]159#if (NB_TIM_CHANNELS > 0)
160in_unckdata volatile unsigned char _user_timer_event[NB_CLUSTERS * NB_TIM_CHANNELS] 
161                            = { [0 ... ((NB_CLUSTERS * NB_TIM_CHANNELS) - 1)] = 0 };
[189]162#endif
163
[158]164//////////////////////////////////////////////////////////////////////////////
[203]165//     _timer_start()
166// This function activates a timer in the vci_timer (or vci_xicu) component
167// by writing in the proper register the period value.
168// It can be used by both the kernel to initialise a "system" timer,
[189]169// or by a task (through a system call) to configure an "user" timer.
[158]170// Returns 0 if success, > 0 if error.
171//////////////////////////////////////////////////////////////////////////////
[238]172unsigned int _timer_start( unsigned int cluster_id, 
173                           unsigned int local_id, 
174                           unsigned int period) 
175{
[165]176    // parameters checking
[238]177    if (cluster_id >= NB_CLUSTERS)  return 1;
178    if (local_id >= NB_TIM_CHANNELS)  return 2;
[158]179
[215]180#if USE_XICU
[253]181    unsigned int* timer_address = (unsigned int *) ((unsigned int)&seg_xcu_base + 
182                                  (cluster_id * (unsigned int)&vseg_cluster_increment));
[158]183
[203]184    timer_address[XICU_REG(XICU_PTI_PER, local_id)] = period;
[189]185#else
[253]186    unsigned int* timer_address = (unsigned int *) ((unsigned int)&seg_tim_base + 
187                                  (cluster_id * (unsigned int)&vseg_cluster_increment));
[189]188
[203]189    timer_address[local_id * TIMER_SPAN + TIMER_PERIOD] = period;
[228]190    timer_address[local_id * TIMER_SPAN + TIMER_MODE] = 0x3;
[189]191#endif
[158]192    return 0;
193}
[228]194
[189]195//////////////////////////////////////////////////////////////////////////////
[203]196//     _timer_stop()
197// This function desactivates a timer in the vci_timer (or vci_xicu) component
198// by writing in the proper register.
[189]199// Returns 0 if success, > 0 if error.
200//////////////////////////////////////////////////////////////////////////////
[238]201unsigned int _timer_stop( unsigned int cluster_id, 
202                          unsigned int local_id) 
203{
[203]204    // parameters checking
[238]205    if (cluster_id >= NB_CLUSTERS)  return 1;
206    if (local_id >= NB_TIM_CHANNELS)  return 2;
[158]207
[215]208#if USE_XICU
[253]209    unsigned int * timer_address = (unsigned int *) ((unsigned int)&seg_xcu_base + 
210                                   (cluster_id * (unsigned int)&vseg_cluster_increment));
[203]211
212    timer_address[XICU_REG(XICU_PTI_PER, local_id)] = 0;
213#else
[253]214    unsigned int* timer_address = (unsigned int *) ((unsigned int)&seg_tim_base + 
215                                  (cluster_id * (unsigned int)&vseg_cluster_increment));
[238]216
[203]217    timer_address[local_id * TIMER_SPAN + TIMER_MODE] = 0;
218#endif
219    return 0;
[189]220}
[228]221
[158]222//////////////////////////////////////////////////////////////////////////////
[203]223//     _timer_reset_irq()
224// This function acknowlegge a timer interrupt in the vci_timer (or vci_xicu)
[204]225// component by reading/writing in the proper register.
[203]226// It can be used by both the isr_switch() for a "system" timer,
227// or by the _isr_timer() for an "user" timer.
[158]228// Returns 0 if success, > 0 if error.
229//////////////////////////////////////////////////////////////////////////////
[238]230unsigned int _timer_reset_irq( unsigned int cluster_id, 
231                               unsigned int local_id ) 
232{
[203]233    // parameters checking
[238]234    if (cluster_id >= NB_CLUSTERS)  return 1;
235    if (local_id >= NB_TIM_CHANNELS)  return 2;
[158]236
[215]237#if USE_XICU
[253]238    unsigned int * timer_address = (unsigned int *) ((unsigned int)&seg_xcu_base +
239                                   (cluster_id * (unsigned int)&vseg_cluster_increment));
[203]240
241    unsigned int bloup = timer_address[XICU_REG(XICU_PTI_ACK, local_id)];
[228]242    bloup++; // to avoid a warning
[203]243#else
[253]244    unsigned int * timer_address = (unsigned int *) ((unsigned int)&seg_tim_base + 
245                                   (cluster_id * (unsigned int)&vseg_cluster_increment));
[203]246
247    timer_address[local_id * TIMER_SPAN + TIMER_RESETIRQ] = 0;
248#endif
249    return 0;
[158]250}
251
[246]252///////////////////////////////////////////////////////////////////////
[232]253// _timer_reset_irq_cpt()
[246]254///////////////////////////////////////////////////////////////////////
255// This function resets the period at the end of which
256// an interrupt is sent. To do so, we re-write the period
[253]257// in the proper register, what causes the count to restart.
[246]258// The period value is read from the same (TIMER_PERIOD) register,
259// this is why in appearance we do nothing useful (read a value
260// from a register and write this value in the same register)
261// This function is called during a context switch (user or preemptive)
262///////////////////////////////////////////////////////////////////////
[253]263unsigned int _timer_reset_irq_cpt( unsigned int cluster_id, 
264                                   unsigned int local_id) {
[246]265    // parameters checking
266    if (cluster_id >= NB_CLUSTERS) {
267        return 1;
268    }
269    if (local_id >= NB_TIM_CHANNELS) {
270        return 2;
271    }
[232]272
[246]273#if USE_XICU
[253]274    unsigned int * timer_address = (unsigned int *) ((unsigned int) &seg_xcu_base + 
275                                   (cluster_id * (unsigned int)&vseg_cluster_increment));
276
[246]277    unsigned int timer_period = timer_address[XICU_REG(XICU_PTI_PER, local_id)];
[232]278
[253]279    // we write 0 first because if the timer is currently running,
280    //the corresponding timer counter is not reset
[246]281    timer_address[XICU_REG(XICU_PTI_PER, local_id)] = 0;
282    timer_address[XICU_REG(XICU_PTI_PER, local_id)] = timer_period;
283#else
284    // We suppose that the TIMER_MODE register value is 0x3
[253]285    unsigned int * timer_address = (unsigned int *) ((unsigned int)&seg_tim_base + 
286                                   (cluster_id * (unsigned int)&vseg_cluster_increment));
287
[246]288    unsigned int timer_period = timer_address[local_id * TIMER_SPAN + TIMER_PERIOD];
289
290    timer_address[local_id * TIMER_SPAN + TIMER_PERIOD] = timer_period;
291#endif
292
293    return 0;
294}
295
[158]296/////////////////////////////////////////////////////////////////////////////////
[228]297//     VciMultiTty driver
[158]298/////////////////////////////////////////////////////////////////////////////////
[189]299// There is only one multi_tty controler in the architecture.
[238]300// The total number of TTYs is defined by the configuration parameter NB_TTY_CHANNELS.
[189]301// The "system" terminal is TTY[0].
302// The "user" TTYs are allocated to applications by the GIET in the boot phase,
303// as defined in the mapping_info data structure. The corresponding tty_id must
304// be stored in the context of the task by the boot code.
305// The TTY address is : seg_tty_base + tty_id*TTY_SPAN
306/////////////////////////////////////////////////////////////////////////////////
[158]307
[189]308// TTY variables
[238]309in_unckdata volatile unsigned char _tty_get_buf[NB_TTY_CHANNELS];
310in_unckdata volatile unsigned char _tty_get_full[NB_TTY_CHANNELS] 
311                                     = { [0 ... NB_TTY_CHANNELS - 1] = 0 };
[228]312in_unckdata unsigned int _tty_put_lock = 0;  // protect kernel TTY[0]
[189]313
314////////////////////////////////////////////////////////////////////////////////
315//      _tty_error()
316////////////////////////////////////////////////////////////////////////////////
[238]317void _tty_error(unsigned int tty_id, unsigned int task_id) 
318{
[189]319    unsigned int proc_id = _procid();
320
321    _get_lock(&_tty_put_lock);
[249]322    if (tty_id == 0xFFFFFFFF) _puts("\n[GIET ERROR] no TTY assigned to the task ");
323    else                      _puts("\n[GIET ERROR] TTY index too large for task ");
[228]324    _putd(task_id);
[189]325    _puts(" on processor ");
[228]326    _putd(proc_id);
[189]327    _puts("\n");
328    _release_lock(&_tty_put_lock);
329}
[228]330
331
[189]332/////////////////////////////////////////////////////////////////////////////////
333//      _tty_write()
[158]334// Write one or several characters directly from a fixed-length user buffer to
335// the TTY_WRITE register of the TTY controler.
336// It doesn't use the TTY_PUT_IRQ interrupt and the associated kernel buffer.
337// This is a non blocking call: it tests the TTY_STATUS register, and stops
338// the transfer as soon as the TTY_STATUS[WRITE] bit is set.
339// The function returns  the number of characters that have been written.
[189]340/////////////////////////////////////////////////////////////////////////////////
[238]341unsigned int _tty_write(const char * buffer, 
342                        unsigned int length) 
343{
[228]344    unsigned int nwritten;
[238]345    unsigned int tty_id = _get_context_slot(CTX_TTY_ID);
346    unsigned int* tty_address = (unsigned int *) &seg_tty_base;
[158]347
[238]348    for (nwritten = 0; nwritten < length; nwritten++) 
349    {
[165]350        // check tty's status
[238]351        if ((tty_address[tty_id * TTY_SPAN + TTY_STATUS] & 0x2) == 0x2) break;
352        tty_address[tty_id * TTY_SPAN + TTY_WRITE] = (unsigned int) buffer[nwritten];
[158]353    }
354    return nwritten;
355}
[228]356
[158]357//////////////////////////////////////////////////////////////////////////////
[204]358//      _tty_read()
[158]359// This non-blocking function uses the TTY_GET_IRQ[tty_id] interrupt and
[165]360// the associated kernel buffer, that has been written by the ISR.
[204]361// It get the TTY terminal index from the context of the current task.
[158]362// It fetches one single character from the _tty_get_buf[tty_id] kernel
363// buffer, writes this character to the user buffer, and resets the
364// _tty_get_full[tty_id] buffer.
[204]365// The length argument is not used.
[158]366// Returns 0 if the kernel buffer is empty, 1 if the buffer is full.
367//////////////////////////////////////////////////////////////////////////////
[238]368unsigned int _tty_read(char * buffer, 
369                       unsigned int length) 
370{
371    unsigned int tty_id = _get_context_slot(CTX_TTY_ID);
[158]372
[238]373    if (_tty_get_full[tty_id] == 0) 
374    {
[189]375        return 0;
376    }
[238]377    else 
378    {
[158]379        *buffer = _tty_get_buf[tty_id];
380        _tty_get_full[tty_id] = 0;
[189]381        return 1;
[158]382    }
[228]383}
384
[158]385////////////////////////////////////////////////////////////////////////////////
[204]386//     _tty_get_char()
387// This function is used by the _isr_tty to read a character in the TTY
388// terminal defined by the tty_id argument. The character is stored
389// in requested buffer, and the IRQ is acknowledged.
390// Returns 0 if success, 1 if tty_id too large.
[158]391////////////////////////////////////////////////////////////////////////////////
[238]392unsigned int _tty_get_char(unsigned int tty_id, 
393                           unsigned char * buffer) 
394{
[204]395    // checking argument
[238]396    if (tty_id >= NB_TTY_CHANNELS) { return 1; }
[199]397
[204]398    // compute terminal base address
[228]399    unsigned int * tty_address = (unsigned int *) &seg_tty_base; 
[158]400
[228]401    *buffer = (unsigned char) tty_address[tty_id * TTY_SPAN + TTY_READ];
[204]402    return 0;
[158]403}
404
[228]405
[158]406////////////////////////////////////////////////////////////////////////////////
[238]407//     VciMultiIcu or VciXicu driver
[158]408////////////////////////////////////////////////////////////////////////////////
[238]409// This hardware component is replicated in all clusters.
[203]410// There is one vci_multi_icu (or vci_xicu) component per cluster,
[253]411// and the number of ICU channels is equal to NB_PROCS_MAX,
[238]412// because there is one private interrupt controler per processor.
[158]413////////////////////////////////////////////////////////////////////////////////
[238]414// The (virtual) base address of the associated segment is:
415//
[253]416//       icu_address = seg_icu_base + cluster_id * vseg_cluster_increment
417//  or   icu_address = seg_xcu_base + cluster_id * vseg_cluster_increment
[238]418//
419////////////////////////////////////////////////////////////////////////////////
[158]420
421////////////////////////////////////////////////////////////////////////////////
[203]422//     _icu_set_mask()
423// This function can be used with both the vci_xicu & vci_multi_icu components.
424// It set the mask register for the ICU channel identified by the cluster index
425// and the processor index: all '1' bits are set / all '0' bits are not modified.
[158]426// Returns 0 if success, > 0 if error.
427////////////////////////////////////////////////////////////////////////////////
[238]428unsigned int _icu_set_mask( unsigned int cluster_id,
429                            unsigned int proc_id,
430                            unsigned int value,
[253]431                            unsigned int is_PTI) 
[238]432{
[203]433    // parameters checking
[238]434    if (cluster_id >= NB_CLUSTERS) return 1; 
435    if (proc_id >= NB_PROCS_MAX)   return 1; 
[203]436
[215]437#if USE_XICU
[253]438    unsigned int * icu_address = (unsigned int *) ((unsigned int)&seg_xcu_base + 
439                                 (cluster_id * (unsigned int)&vseg_cluster_increment));
440    if (is_PTI) 
[238]441    {
[228]442        icu_address[XICU_REG(XICU_MSK_PTI_ENABLE, proc_id)] = value;
443    }
[238]444    else 
445    {
[228]446        icu_address[XICU_REG(XICU_MSK_HWI_ENABLE, proc_id)] = value;
447    }
[189]448#else
[253]449    unsigned int * icu_address = (unsigned int *) ((unsigned int)&seg_icu_base + 
450                                 (cluster_id * (unsigned int)&vseg_cluster_increment));
451
[203]452    icu_address[proc_id * ICU_SPAN + ICU_MASK_SET] = value; 
453#endif
[158]454    return 0;
455}
[228]456
[158]457////////////////////////////////////////////////////////////////////////////////
[203]458//     _icu_get_index()
459// This function can be used with both the vci_xicu & vci_multi_icu components.
460// It returns the index of the highest priority (smaller index) active HWI.
461// The ICU channel is identified by the cluster index and the processor index.
[158]462// Returns 0 if success, > 0 if error.
463////////////////////////////////////////////////////////////////////////////////
[238]464unsigned int _icu_get_index( unsigned int cluster_id, 
465                             unsigned int proc_id, 
466                             unsigned int * buffer) 
467{
[203]468    // parameters checking
[238]469    if (cluster_id >= NB_CLUSTERS)  return 1;
470    if (proc_id >= NB_PROCS_MAX)    return 1;
[203]471
[215]472#if USE_XICU
[253]473    unsigned int * icu_address = (unsigned int *) ((unsigned int)&seg_xcu_base + 
474                                 (cluster_id * (unsigned int)&vseg_cluster_increment));
475
[228]476    unsigned int prio = icu_address[XICU_REG(XICU_PRIO, proc_id)];
[203]477    unsigned int pti_ok = (prio & 0x00000001);
478    unsigned int hwi_ok = (prio & 0x00000002);
479    unsigned int swi_ok = (prio & 0x00000004);
480    unsigned int pti_id = (prio & 0x00001F00) >> 8;
481    unsigned int hwi_id = (prio & 0x001F0000) >> 16;
482    unsigned int swi_id = (prio & 0x1F000000) >> 24;
[238]483    if      (pti_ok) { *buffer = pti_id; }
484    else if (hwi_ok) { *buffer = hwi_id; }
485    else if (swi_ok) { *buffer = swi_id; }
486    else             { *buffer = 32; }
[189]487#else
[253]488    unsigned int * icu_address = (unsigned int *) ((unsigned int)&seg_icu_base + 
489                                 (cluster_id * (unsigned int)&vseg_cluster_increment));
490
[203]491    *buffer = icu_address[proc_id * ICU_SPAN + ICU_IT_VECTOR]; 
492#endif
[158]493    return 0;
494}
495
496////////////////////////////////////////////////////////////////////////////////
[228]497//     VciGcd driver
[158]498////////////////////////////////////////////////////////////////////////////////
499// The Greater Dommon Divider is a -very- simple hardware coprocessor
[165]500// performing the computation of the GCD of two 32 bits integers.
[158]501// It has no DMA capability.
502////////////////////////////////////////////////////////////////////////////////
503
504////////////////////////////////////////////////////////////////////////////////
[189]505//     _gcd_write()
[158]506// Write a 32-bit word in a memory mapped register of the GCD coprocessor.
507// Returns 0 if success, > 0 if error.
508////////////////////////////////////////////////////////////////////////////////
[238]509unsigned int _gcd_write( unsigned int register_index, 
510                         unsigned int value) 
511{
[165]512    // parameters checking
[238]513    if (register_index >= GCD_END)  return 1; 
[158]514
[228]515    unsigned int * gcd_address = (unsigned int *) &seg_gcd_base;
[165]516
517    gcd_address[register_index] = value; // write word
[158]518    return 0;
519}
[228]520
521
[158]522////////////////////////////////////////////////////////////////////////////////
[189]523//     _gcd_read()
[158]524// Read a 32-bit word in a memory mapped register of the GCD coprocessor.
525// Returns 0 if success, > 0 if error.
526////////////////////////////////////////////////////////////////////////////////
[238]527unsigned int _gcd_read( unsigned int register_index, 
528                        unsigned int * buffer ) 
529{
[165]530    // parameters checking
[238]531    if (register_index >= GCD_END)  return 1;
[158]532
[228]533    unsigned int * gcd_address = (unsigned int *) &seg_gcd_base;
[165]534
535    *buffer = gcd_address[register_index]; // read word
[158]536    return 0;
537}
538
539////////////////////////////////////////////////////////////////////////////////
540// VciBlockDevice driver
541////////////////////////////////////////////////////////////////////////////////
[165]542// The VciBlockDevice is a single channel external storage contrÃŽler.
[166]543//
544// The IOMMU can be activated or not:
545//
546// 1) When the IOMMU is used, a fixed size 2Mbytes vseg is allocated to
547// the IOC peripheral, in the I/O virtual space, and the user buffer is
548// dynamically remapped in the IOMMU page table. The corresponding entry
549// in the IOMMU PT1 is defined by the kernel _ioc_iommu_ix1 variable.
550// The number of pages to be unmapped is stored in the _ioc_npages variable.
551// The number of PT2 entries is dynamically computed and stored in the
552// kernel _ioc_iommu_npages variable. It cannot be larger than 512.
553// The user buffer is unmapped by the _ioc_completed() function when
554// the transfer is completed.
555//
556// 2/ If the IOMMU is not used, we check that  the user buffer is mapped to a
557// contiguous physical buffer (this is generally true because the user space
558// page tables are statically constructed to use contiguous physical memory).
559//
560// Finally, the memory buffer must fulfill the following conditions:
561// - The user buffer must be word aligned,
562// - The user buffer must be mapped in user address space,
563// - The user buffer must be writable in case of (to_mem) access,
564// - The total number of physical pages occupied by the user buffer cannot
565//   be larger than 512 pages if the IOMMU is activated,
566// - All physical pages occupied by the user buffer must be contiguous
567//   if the IOMMU is not activated.
568// An error code is returned if these conditions are not verified.
569//
[158]570// As the IOC component can be used by several programs running in parallel,
571// the _ioc_lock variable guaranties exclusive access to the device.  The
572// _ioc_read() and _ioc_write() functions use atomic LL/SC to get the lock.
573// and set _ioc_lock to a non zero value.  The _ioc_write() and _ioc_read()
574// functions are blocking, polling the _ioc_lock variable until the device is
575// available.
576// When the tranfer is completed, the ISR routine activated by the IOC IRQ
577// set the _ioc_done variable to a non-zero value. Possible address errors
578// detected by the IOC peripheral are reported by the ISR in the _ioc_status
579// variable.
580// The _ioc_completed() function is polling the _ioc_done variable, waiting for
[166]581// transfer completion. When the completion is signaled, the _ioc_completed()
[158]582// function reset the _ioc_done variable to zero, and releases the _ioc_lock
583// variable.
584//
585// In a multi-processing environment, this polling policy should be replaced by
586// a descheduling policy for the requesting process.
587///////////////////////////////////////////////////////////////////////////////
588
[189]589// IOC global variables
[228]590in_unckdata volatile unsigned int _ioc_status= 0;
591in_unckdata volatile unsigned int _ioc_done = 0;
592in_unckdata unsigned int _ioc_lock = 0;
593in_unckdata unsigned int _ioc_iommu_ix1 = 0;
594in_unckdata unsigned int _ioc_iommu_npages; 
[158]595
596///////////////////////////////////////////////////////////////////////////////
[189]597//      _ioc_access()
[166]598// This function transfer data between a memory buffer and the block device.
599// The buffer lentgth is (count*block_size) bytes.
600// Arguments are:
601// - to_mem     : from external storage to memory when non 0
602// - lba        : first block index on the external storage.
603// - user_vaddr : virtual base address of the memory buffer.
604// - count      : number of blocks to be transfered.
[158]605// Returns 0 if success, > 0 if error.
606///////////////////////////////////////////////////////////////////////////////
[238]607unsigned int _ioc_access( unsigned int to_mem,
608                          unsigned int lba,
609                          unsigned int user_vaddr,
610                          unsigned int count) 
611{
612    unsigned int user_vpn_min;     // first virtuel page index in user space
613    unsigned int user_vpn_max;     // last virtual page index in user space
614    unsigned int vpn;              // current virtual page index in user space
615    unsigned int ppn;              // physical page number
616    unsigned int flags;            // page protection flags
617    unsigned int ix2;              // page index in IOMMU PT1 page table
618    unsigned int ppn_first;        // first physical page number for user buffer
619    unsigned int buf_xaddr = 0;    // user buffer virtual address in IO space (if IOMMU)
620    paddr_t      buf_paddr = 0;    // user buffer physical address (if no IOMMU),
[246]621
[166]622    // check buffer alignment
[238]623    if ((unsigned int) user_vaddr & 0x3)
624    {
625        _get_lock(&_tty_put_lock);
626        _puts("[GIET ERROR] in _ioc_access() : user buffer not word aligned\n");
627        _release_lock(&_tty_put_lock);
628        return 1; 
[228]629    }
[158]630
[228]631    unsigned int * ioc_address = (unsigned int *) &seg_ioc_base ;
[204]632
[228]633    unsigned int block_size = ioc_address[BLOCK_DEVICE_BLOCK_SIZE];
634    unsigned int length = count * block_size;
[158]635
[167]636    // get user space page table virtual address
[238]637    unsigned int user_pt_vbase = _get_context_slot(CTX_PTAB_ID);
[228]638
[166]639    user_vpn_min = user_vaddr >> 12;
640    user_vpn_max = (user_vaddr + length - 1) >> 12;
[158]641
[166]642    // loop on all virtual pages covering the user buffer
[238]643    for (vpn = user_vpn_min, ix2 = 0 ; 
644         vpn <= user_vpn_max ; 
645         vpn++, ix2++ ) 
646    {
[166]647        // get ppn and flags for each vpn
[246]648        unsigned int ko = _v2p_translate((page_table_t *) user_pt_vbase,
649                                          vpn,
650                                          &ppn,
651                                          &flags);
[166]652        // check access rights
[246]653        if (ko)
[238]654        {
655            _get_lock(&_tty_put_lock);
656            _puts("[GIET ERROR] in _ioc_access() : user buffer unmapped\n");
657            _release_lock(&_tty_put_lock);
658            return 1; 
[228]659        }
[238]660        if ((flags & PTE_U) == 0) 
661        {
662            _get_lock(&_tty_put_lock);
663            _puts("[GIET ERROR] in _ioc_access() : user buffer not in user space\n");
664            _release_lock(&_tty_put_lock);
665            return 1; 
[228]666        }
[238]667        if (((flags & PTE_W) == 0 ) && to_mem)
668        {
669            _get_lock(&_tty_put_lock);
670            _puts("[GIET ERROR] in _ioc_access() : user buffer not writable\n");
671            _release_lock(&_tty_put_lock);
672            return 1; 
[228]673        }
[158]674
[166]675        // save first ppn value
[238]676        if (ix2 == 0) ppn_first = ppn;
[158]677
[249]678        if ( GIET_USE_IOMMU && USE_IOB ) // user buffer remapped in the I/0 space
[238]679        {
[166]680            // check buffer length < 2 Mbytes
[238]681            if (ix2 > 511) 
682            {
683                _get_lock(&_tty_put_lock);
684                _puts("[GIET ERROR] in _ioc_access() : user buffer > 2 Mbytes\n");
685                _release_lock(&_tty_put_lock);
686                return 1; 
[228]687            }
[158]688
[166]689            // map the physical page in IOMMU page table
[238]690            _iommu_add_pte2( _ioc_iommu_ix1,    // PT1 index
691                             ix2,               // PT2 index
692                             ppn,               // Physical page number   
693                             flags);            // Protection flags
694
695            // compute user buffer virtual adress in IO space
696            buf_xaddr = (_ioc_iommu_ix1) << 21 | (user_vaddr & 0xFFF);
[166]697        }
[238]698        else            // No IOMMU
699        {
700            // check that physical pages are contiguous
701            if ((ppn - ppn_first) != ix2) 
702            {
703                _get_lock(&_tty_put_lock);
704                _puts("[GIET ERROR] in _ioc_access() : split physical user buffer\n");
705                _release_lock(&_tty_put_lock);
706                return 1; 
[228]707            }
[238]708
709            // compute user buffer physical adress
710            buf_paddr = (((paddr_t)ppn_first) << 12) | (user_vaddr & 0xFFF);
[166]711        }
712    } // end for vpn
[158]713
[166]714    // register the number of pages to be unmapped
715    _ioc_iommu_npages = (user_vpn_max - user_vpn_min) + 1;
[158]716
[254]717    // If no IOB, invalidate L1 cache in case of memory write
718    if ( to_mem && (USE_IOB == 0) ) _dcache_buf_invalidate((void *) user_vaddr, length);
[158]719
[238]720#if GIET_DEBUG_IOC_DRIVER
721_get_lock(&_tty_put_lock);
722_puts("\n[GIET DEBUG]  IOC_ACCESS at cycle ");
723_putd( _proctime() );
724_puts("\n - proc_id         = ");
725_putd( _procid() );
726_puts("\n - ioc_vbase       = ");
727_putx( (unsigned int)ioc_address );
728_puts("\n - psched_vbase    = ");
729_putx( (unsigned int)_get_sched() );
730_puts("\n - pt_vbase        = ");
731_putx( user_pt_vbase );
732_puts("\n - user_buf_vbase  = ");
733_putx( user_vaddr );
734_puts("\n - user_buf_length = ");
735_putx( length );
736_puts("\n - user_buf_paddr  = ");
737_putl( buf_paddr );
738_puts("\n - user_buf_xaddr  = ");
739_putx( buf_xaddr );
740_puts("\n");
741_release_lock(&_tty_put_lock);
742#endif
[166]743
[254]744    // If IOB, invalidate L2 cache in case of memory write
[249]745    if ( to_mem && USE_IOB ) _memc_inval( buf_paddr, length );
746   
[166]747    // get the lock on ioc device
[228]748    _get_lock(&_ioc_lock);
[158]749
[166]750    // peripheral configuration 
[249]751    if ( GIET_USE_IOMMU && USE_IOB ) 
[238]752    {
753        ioc_address[BLOCK_DEVICE_BUFFER] = buf_xaddr;
754    }
755    else
756    {
757        ioc_address[BLOCK_DEVICE_BUFFER]     = (unsigned int)buf_paddr;
758        ioc_address[BLOCK_DEVICE_BUFFER_EXT] = (unsigned int)(buf_paddr>>32);
759    }
[228]760    ioc_address[BLOCK_DEVICE_COUNT] = count;
761    ioc_address[BLOCK_DEVICE_LBA] = lba;
[238]762    if (to_mem == 0) 
763    {
[228]764        ioc_address[BLOCK_DEVICE_OP] = BLOCK_DEVICE_WRITE;
765    }
[238]766    else 
767    {
[228]768        ioc_address[BLOCK_DEVICE_OP] = BLOCK_DEVICE_READ;
769    }
[158]770    return 0;
771}
[228]772
[158]773/////////////////////////////////////////////////////////////////////////////////
774// _ioc_completed()
775//
776// This function checks completion of an I/O transfer and reports errors.
[166]777// As it is a blocking call, the processor is stalled.
778// If the virtual memory is activated, the pages mapped in the I/O virtual
779// space are unmapped, and the IOB TLB is cleared.
[158]780// Returns 0 if success, > 0 if error.
781/////////////////////////////////////////////////////////////////////////////////
[238]782unsigned int _ioc_completed() 
783{
[228]784    unsigned int ret;
785    unsigned int ix2;
[158]786
[166]787    // busy waiting
[238]788    while (_ioc_done == 0) { asm volatile("nop"); }
[158]789
[238]790#if GIET_DEBUG_IOC_DRIVER
791_get_lock(&_tty_put_lock);
792_puts("\n[GIET DEBUG]  IOC_COMPLETED at cycle ");
793_putd( _proctime() );
794_puts("\n - proc_id         = ");
795_putd( _procid() );
796_puts("\n");
797_release_lock(&_tty_put_lock);
798#endif
799
[166]800    // unmap the buffer from IOMMU page table if IOMMU is activated
[249]801    if ( GIET_USE_IOMMU && USE_IOB ) 
[238]802    {
[228]803        unsigned int * iob_address = (unsigned int *) &seg_iob_base;
[166]804
[238]805        for (ix2 = 0; ix2 < _ioc_iommu_npages; ix2++) 
806        {
[166]807            // unmap the page in IOMMU page table
[228]808            _iommu_inval_pte2(
809                    _ioc_iommu_ix1, // PT1 index
810                    ix2 );          // PT2 index
[166]811
812            // clear IOMMU TLB
[169]813            iob_address[IOB_INVAL_PTE] = (_ioc_iommu_ix1 << 21) | (ix2 << 12); 
[166]814        }
815    }
816
817    // test IOC status
[158]818    if ((_ioc_status != BLOCK_DEVICE_READ_SUCCESS)
[238]819            && (_ioc_status != BLOCK_DEVICE_WRITE_SUCCESS)) ret = 1; // error
820    else                                                    ret = 0; // success
[158]821
[166]822    // reset synchronization variables
[223]823    _ioc_done = 0;
[228]824    asm volatile("sync");
[223]825    _ioc_lock = 0;
[158]826
827    return ret;
828}
[228]829
830
[166]831///////////////////////////////////////////////////////////////////////////////
[189]832//     _ioc_read()
[166]833// Transfer data from the block device to a memory buffer in user space.
834// - lba    : first block index on the block device
835// - buffer : base address of the memory buffer (must be word aligned)
836// - count  : number of blocks to be transfered.
837// Returns 0 if success, > 0 if error.
838///////////////////////////////////////////////////////////////////////////////
[238]839unsigned int _ioc_read( unsigned int lba, 
840                        void * buffer, 
841                        unsigned int count) 
842{
[228]843    return _ioc_access(
844            1,        // read access
845            lba,
846            (unsigned int) buffer,
847            count);
[166]848}
[228]849
850
[166]851///////////////////////////////////////////////////////////////////////////////
[189]852//     _ioc_write()
[166]853// Transfer data from a memory buffer in user space to the block device.
854// - lba    : first block index on the block device
855// - buffer : base address of the memory buffer (must be word aligned)
856// - count  : number of blocks to be transfered.
857// Returns 0 if success, > 0 if error.
858///////////////////////////////////////////////////////////////////////////////
[238]859unsigned int _ioc_write( unsigned int lba, 
860                         const void * buffer, 
861                         unsigned int count) 
862{
[228]863    return _ioc_access(
864            0, // write access
865            lba,
866            (unsigned int) buffer,
867            count);
[166]868}
[228]869
870
[204]871///////////////////////////////////////////////////////////////////////////////
872//     _ioc_get_status()
873// This function returns the transfert status, and acknowledge the IRQ.
874// Returns 0 if success, > 0 if error.
875///////////////////////////////////////////////////////////////////////////////
[238]876unsigned int _ioc_get_status(unsigned int * status) 
877{
[204]878    // get IOC base address
[228]879    unsigned int * ioc_address = (unsigned int *) &seg_ioc_base;
[166]880
[204]881    *status = ioc_address[BLOCK_DEVICE_STATUS]; // read status & reset IRQ
882    return 0;
883}
884
[228]885
[237]886///////////////////////////////////////////////////////////////////////////////
887//     _ioc_get_block_size()
888// This function returns the block_size with which the IOC has been configured.
889///////////////////////////////////////////////////////////////////////////////
[238]890unsigned int _ioc_get_block_size() 
891{
[237]892    // get IOC base address
893    unsigned int * ioc_address = (unsigned int *) &seg_ioc_base;
894   
895    return  ioc_address[BLOCK_DEVICE_BLOCK_SIZE];
896}
897
898
[158]899//////////////////////////////////////////////////////////////////////////////////
[189]900// VciMultiDma driver
901//////////////////////////////////////////////////////////////////////////////////
902// The DMA controllers are physically distributed in the clusters.
[238]903// There is  (NB_CLUSTERS * NB_DMA_CHANNELS) channels, indexed by a global index:
904//        dma_id = cluster_id * NB_DMA_CHANNELS + loc_id
[189]905//
[238]906// As a DMA channel is a private ressource allocated to a task,
907// there is no lock protecting exclusive access to the channel.
[189]908// The signalisation between the OS and the DMA uses the _dma_done[dma_id]
909// synchronisation variables  (set by the ISR, and reset by the OS).
910// The transfer status is copied by the ISR in the _dma_status[dma_id] variables.
[238]911//////////////////////////////////////////////////////////////////////////////////
912// The (virtual) base address of the associated segment is:
[189]913//
[253]914//       dma_address = seg_dma_base + cluster_id * vseg_cluster_increment
[238]915//
916////////////////////////////////////////////////////////////////////////////////
[189]917
[238]918#if NB_DMA_CHANNELS > 0
[189]919
[238]920// in_unckdata unsigned int            _dma_lock[NB_DMA_CHANNELS * NB_CLUSTERS]
921// = { [0 ... (NB_DMA_CHANNELS * NB_CLUSTERS) - 1] = 0 };
[189]922
[238]923in_unckdata volatile unsigned int    _dma_done[NB_DMA_CHANNELS * NB_CLUSTERS] 
924        = { [0 ... (NB_DMA_CHANNELS * NB_CLUSTERS) - 1] = 0 };
925in_unckdata volatile unsigned int _dma_status[NB_DMA_CHANNELS * NB_CLUSTERS];
[228]926in_unckdata unsigned int _dma_iommu_ix1 = 1;
[238]927in_unckdata unsigned int _dma_iommu_npages[NB_DMA_CHANNELS * NB_CLUSTERS];
[213]928#endif
[189]929
930//////////////////////////////////////////////////////////////////////////////////
[204]931// _dma_reset_irq()
932//////////////////////////////////////////////////////////////////////////////////
[238]933unsigned int _dma_reset_irq( unsigned int cluster_id, 
934                             unsigned int channel_id) 
935{
936#if NB_DMA_CHANNELS > 0
[204]937    // parameters checking
[238]938    if (cluster_id >= NB_CLUSTERS)  return 1;
939    if (channel_id >= NB_DMA_CHANNELS)  return 1; 
[204]940
941    // compute DMA base address
[253]942    unsigned int * dma_address = (unsigned int *) ((unsigned int)&seg_dma_base + 
943                                 (cluster_id * (unsigned int)&vseg_cluster_increment));
[204]944
[228]945    dma_address[channel_id * DMA_SPAN + DMA_RESET] = 0;           
[204]946    return 0;
[213]947#else
948    return -1;
949#endif
[204]950}
[218]951
[204]952//////////////////////////////////////////////////////////////////////////////////
953// _dma_get_status()
954//////////////////////////////////////////////////////////////////////////////////
[238]955unsigned int _dma_get_status( unsigned int cluster_id, 
956                              unsigned int channel_id, 
957                              unsigned int * status) 
958{
959#if NB_DMA_CHANNELS > 0
[204]960    // parameters checking
[238]961    if (cluster_id >= NB_CLUSTERS)  return 1;
962    if (channel_id >= NB_DMA_CHANNELS)  return 1;
[204]963
964    // compute DMA base address
[253]965    unsigned int * dma_address = (unsigned int *) ((unsigned int)&seg_dma_base + 
966                                 (cluster_id * (unsigned int)&vseg_cluster_increment));
[207]967
[228]968    *status = dma_address[channel_id * DMA_SPAN + DMA_LEN];
[204]969    return 0;
[213]970#else
971    return -1;
972#endif
[204]973}
974
975//////////////////////////////////////////////////////////////////////////////////
[218]976// _dma_transfer()
977// Transfer data between a user buffer and a device buffer using DMA.
[238]978// Only one device type is supported: Frame Buffer (dev_type == 0)
[218]979// Arguments are:
980// - dev_type     : device type.
981// - to_user      : from  device buffer to user buffer when true.
982// - offset       : offset (in bytes) in the device buffer.
983// - user_vaddr   : virtual base address of the user buffer.
984// - length       : number of bytes to be transfered.
985//
[238]986// The cluster_id and channel_id are obtained from task context (CTX_DMA_ID).
[207]987// The user buffer must be mapped in user address space and word-aligned.
[169]988// The user buffer length must be multiple of 4 bytes.
[238]989// We compute the physical base addresses for both the device buffer
[189]990// and the user buffer before programming the DMA transfer.
[207]991// The GIET being fully static, we don't need to split the transfer in 4 Kbytes
[189]992// pages, because the user buffer is contiguous in physical space.
[158]993// Returns 0 if success, > 0 if error.
994//////////////////////////////////////////////////////////////////////////////////
[238]995unsigned int _dma_transfer( unsigned int dev_type,
996                            unsigned int to_user,
997                            unsigned int offset,
998                            unsigned int user_vaddr,
999                            unsigned int length ) 
1000{
1001#if NB_DMA_CHANNELS > 0
[228]1002    unsigned int ko;           // unsuccessfull V2P translation
[238]1003    unsigned int device_vbase; // device buffer vbase address
[228]1004    unsigned int flags;        // protection flags
1005    unsigned int ppn;          // physical page number
[238]1006    paddr_t      user_pbase;   // user buffer pbase address
1007    paddr_t      device_pbase; // frame buffer pbase address
[158]1008
[189]1009    // check user buffer address and length alignment
[238]1010    if ((user_vaddr & 0x3) || (length & 0x3)) 
1011    {
[203]1012        _get_lock(&_tty_put_lock);
[218]1013        _puts("\n[GIET ERROR] in _dma_transfer : user buffer not word aligned\n");
[203]1014        _release_lock(&_tty_put_lock);
[189]1015        return 1;
1016    }
[169]1017
[218]1018    // get DMA channel and compute DMA vbase address
[238]1019    unsigned int dma_id      = _get_context_slot(CTX_DMA_ID);
1020    if ( dma_id == 0xFFFFFFFF )
1021    {
1022        _get_lock(&_tty_put_lock);
1023        _puts("\n[GIET ERROR] in _dma_transfer : no DMA channel allocated\n");
1024        _release_lock(&_tty_put_lock);
1025        return 1;
1026    }
1027    unsigned int cluster_id  = dma_id / NB_DMA_CHANNELS;
1028    unsigned int channel_id  = dma_id % NB_DMA_CHANNELS;
[253]1029    unsigned int * dma_vbase = (unsigned int *) ((unsigned int)&seg_dma_base + 
1030                               (cluster_id * (unsigned int)&vseg_cluster_increment));
[218]1031    // get page table address
[238]1032    unsigned int user_ptab = _get_context_slot(CTX_PTAB_ID);
[169]1033
[238]1034    // get devic buffer virtual address, depending on peripheral type
1035    if (dev_type == 0) 
1036    {
1037        device_vbase = (unsigned int) &seg_fbf_base + offset;
[228]1038    }
[238]1039    else 
1040    {
1041        _get_lock(&_tty_put_lock);
1042        _puts("\n[GIET ERROR] in _dma_transfer : device type not supported\n");
1043        _release_lock(&_tty_put_lock);
1044        return 1;
[228]1045    }
[189]1046
[218]1047    // get device buffer physical address
[238]1048    ko = _v2p_translate( (page_table_t*) user_ptab, 
1049                         (device_vbase >> 12), 
1050                         &ppn, 
1051                         &flags );
1052    if (ko) 
1053    {
[203]1054        _get_lock(&_tty_put_lock);
[218]1055        _puts("\n[GIET ERROR] in _dma_transfer : device buffer unmapped\n");
[203]1056        _release_lock(&_tty_put_lock);
[238]1057        return 1;
[189]1058    }
[238]1059    device_pbase = ((paddr_t)ppn << 12) | (device_vbase & 0x00000FFF);
[189]1060
[218]1061    // Compute user buffer physical address
[238]1062    ko = _v2p_translate( (page_table_t*) user_ptab, 
1063                         (user_vaddr >> 12), 
1064                         &ppn, 
1065                         &flags );
1066    if (ko) 
1067    {
[203]1068        _get_lock(&_tty_put_lock);
[218]1069        _puts("\n[GIET ERROR] in _dma_transfer() : user buffer unmapped\n");
[203]1070        _release_lock(&_tty_put_lock);
[238]1071        return 1;
[189]1072    } 
[238]1073    if ((flags & PTE_U) == 0) 
1074    {
[203]1075        _get_lock(&_tty_put_lock);
[218]1076        _puts("[GIET ERROR] in _dma_transfer() : user buffer not in user space\n");
[203]1077        _release_lock(&_tty_put_lock);
[238]1078        return 1; 
[189]1079    }
[238]1080    if (((flags & PTE_W) == 0 ) && to_user) 
1081    {
[203]1082        _get_lock(&_tty_put_lock);
[218]1083        _puts("\n[GIET ERROR] in _dma_transfer() : user buffer not writable\n");
[203]1084        _release_lock(&_tty_put_lock);
[238]1085        return 1;
[189]1086    }
[238]1087    user_pbase = (((paddr_t)ppn) << 12) | (user_vaddr & 0x00000FFF);
[189]1088
[238]1089/*  This is a draft for IOMMU support
[228]1090
[189]1091    // loop on all virtual pages covering the user buffer
[169]1092    unsigned int user_vpn_min = user_vaddr >> 12;
1093    unsigned int user_vpn_max = (user_vaddr + length - 1) >> 12;
1094    unsigned int ix2          = 0;
1095    unsigned int ix1          = _dma_iommu_ix1 + dma_id;
[158]1096
[169]1097    for ( vpn = user_vpn_min ; vpn <= user_vpn_max ; vpn++ )
1098    {
[228]1099    // get ppn and flags for each vpn
1100    unsigned int ko = _v2p_translate( (page_table_t*)user_pt_vbase,
1101    vpn,
1102    &ppn,
1103    &flags );
[158]1104
[228]1105    // check access rights
1106    if ( ko )                                 return 3;     // unmapped
1107    if ( (flags & PTE_U) == 0 )               return 4;     // not in user space
1108    if ( ( (flags & PTE_W) == 0 ) && to_user ) return 5;     // not writable
[158]1109
[228]1110    // save first ppn value
1111    if ( ix2 == 0 ) ppn_first = ppn;
[169]1112
[249]1113    if ( GIET_USE_IOMMU && USE_IOB )    // user buffer remapped in the I/0 space
[228]1114    {
1115    // check buffer length < 2 Mbytes
1116    if ( ix2 > 511 ) return 2;
[169]1117
[228]1118    // map the physical page in IOMMU page table
1119    _iommu_add_pte2( ix1,        // PT1 index
1120    ix2,        // PT2 index
1121    ppn,        // physical page number
1122    flags );    // protection flags
1123    }
1124    else            // no IOMMU : check that physical pages are contiguous
1125    {
1126    if ( (ppn - ppn_first) != ix2 )       return 6;     // split physical buffer 
1127    }
[169]1128
[228]1129    // increment page index
1130    ix2++;
[169]1131    } // end for vpn
1132
[189]1133    // register the number of pages to be unmapped if iommu activated
[169]1134    _dma_iommu_npages[dma_id] = (user_vpn_max - user_vpn_min) + 1;
1135
[189]1136*/
[204]1137
[169]1138    // invalidate data cache in case of memory write
[238]1139    if (to_user) _dcache_buf_invalidate((void *) user_vaddr, length);
[228]1140
[238]1141// get the lock
1142//  _get_lock(&_dma_lock[dma_id]);
[169]1143
[238]1144#if GIET_DEBUG_DMA_DRIVER
1145_get_lock(&_tty_put_lock);
1146_puts("\n[GIET DEBUG] DMA TRANSFER at cycle ");
1147_putd( _proctime() );
1148_puts("\n - cluster_id       = ");
1149_putx( cluster_id );
1150_puts("\n - channel_id       = ");
1151_putx( channel_id );
1152_puts("\n - dma_vbase        = ");
1153_putx( (unsigned int)dma_vbase );
1154_puts("\n - device_buf_vbase = ");
1155_putx( device_vbase );
1156_puts("\n - device_buf_pbase = ");
1157_putl( device_pbase );
1158_puts("\n - user_buf_vbase   = ");
1159_putx( user_vaddr );
1160_puts("\n - user_buf_pbase   = ");
1161_putl( user_pbase );
1162_puts("\n");
1163_release_lock(&_tty_put_lock);
1164#endif
1165
[169]1166    // DMA configuration
[238]1167    if (to_user) 
1168    {
1169        dma_vbase[channel_id * DMA_SPAN + DMA_SRC]     = (unsigned int)(device_pbase);
1170        dma_vbase[channel_id * DMA_SPAN + DMA_SRC_EXT] = (unsigned int)(device_pbase>>32);
1171        dma_vbase[channel_id * DMA_SPAN + DMA_DST]     = (unsigned int)(user_pbase);
1172        dma_vbase[channel_id * DMA_SPAN + DMA_DST_EXT] = (unsigned int)(user_pbase>>32);
[169]1173    }
[238]1174    else 
1175    {
1176        dma_vbase[channel_id * DMA_SPAN + DMA_SRC]     = (unsigned int)(user_pbase);
1177        dma_vbase[channel_id * DMA_SPAN + DMA_SRC_EXT] = (unsigned int)(user_pbase>>32);
1178        dma_vbase[channel_id * DMA_SPAN + DMA_DST]     = (unsigned int)(device_pbase);
1179        dma_vbase[channel_id * DMA_SPAN + DMA_DST_EXT] = (unsigned int)(device_pbase>>32);
[169]1180    }
[238]1181    dma_vbase[channel_id * DMA_SPAN + DMA_LEN] = (unsigned int) length;
[228]1182
[158]1183    return 0;
[238]1184
1185#else // NB_DMA_CHANNELS == 0
1186    _get_lock(&_tty_put_lock);
1187    _puts("\n[GIET ERROR] in _dma_transfer() : NB_DMA_CHANNELS == 0");
1188    _release_lock(&_tty_put_lock);
1189    return 1;
[213]1190#endif
[238]1191
[218]1192}  // end _dma_transfer() 
1193
[169]1194//////////////////////////////////////////////////////////////////////////////////
[218]1195// _dma_completed()
1196// This function checks completion of a DMA transfer to or from a peripheral
1197// device (Frame Buffer or Multi-Nic).
1198// As it is a blocking call, the processor is busy waiting.
1199// Returns 0 if success, > 0 if error
1200// (1 == read error / 2 == DMA idle error / 3 == write error)
1201//////////////////////////////////////////////////////////////////////////////////
[238]1202unsigned int _dma_completed() 
1203{
1204#if NB_DMA_CHANNELS > 0
1205    unsigned int dma_id  = _get_context_slot(CTX_DMA_ID);
[228]1206    unsigned int dma_ret;
[218]1207
1208    // busy waiting with a pseudo random delay between bus access
[238]1209    while (_dma_done[dma_id] == 0) 
1210    {
[228]1211        unsigned int delay = (( _proctime() ^ _procid() << 4) & 0x3F) + 1;
1212        asm volatile(
1213                "move  $3,   %0                 \n"
1214                "loop_nic_completed:            \n"
1215                "addi  $3,   $3, -1             \n"
1216                "bnez  $3,   loop_nic_completed \n"
1217                "nop                            \n"
1218                :
1219                : "r" (delay)
1220                : "$3"); 
[218]1221    }
[228]1222
[238]1223#if GIET_DEBUG_DMA_DRIVER
1224_get_lock(&_tty_put_lock);
1225_puts("\n[GIET DEBUG] DMA COMPLETED at cycle ");
1226_putd( _proctime() );
1227_puts("\n - cluster_id       = ");
1228_putx( dma_id/NB_DMA_CHANNELS );
1229_puts("\n - channel_id       = ");
1230_putx( dma_id%NB_DMA_CHANNELS );
1231_puts("\n");
1232_release_lock(&_tty_put_lock);
1233#endif
[218]1234
1235    // reset synchronization variables
[223]1236    _dma_done[dma_id] = 0;
[225]1237    dma_ret = _dma_status[dma_id];
1238    asm volatile("sync\n");
[218]1239
[238]1240//    _dma_lock[dma_id] = 0;
1241
[223]1242    return dma_ret;
[218]1243
[238]1244#else // NB_DMA_CHANNELS == 0
[218]1245    return -1;
1246#endif
[238]1247
[218]1248}  // end _dma_completed
1249
[238]1250
[218]1251//////////////////////////////////////////////////////////////////////////////////
[228]1252//     VciFrameBuffer driver
[218]1253//////////////////////////////////////////////////////////////////////////////////
[253]1254// There three methods to access the VciFrameBuffer device:
[218]1255// 
[253]1256// 1) The _fb_sync_write() and _fb_sync_read() functions use a memcpy strategy
1257// to implement the transfer between a data buffer (user space) and the frame
[218]1258// buffer (kernel space). They are blocking until completion of the transfer.
1259//
[253]1260// 2) The _fb_dma_write(), _fb_dma_read() and _fb_mdma_completed() functions use
1261// the VciMultiDma components (distributed in the clusters) to transfer data
1262// between the user buffer and the frame buffer.
1263// A DMA channel is allocated to the task requesting it in the mapping_info,
1264// and stored in the task context.
1265//
1266// 3) The _fb_cma_init(), _fb_cma_write() and _fb_cma_stop() functions use
1267// the VciChbufDma component (non replicated) to transfer a flow of images from
1268// an user space chained buffer (two buffers) to the frame buffer.
[254]1269// A CMA channel must be allocated to the task requesting it in the mapping_info,
[253]1270// and stored in the task context.
[218]1271//////////////////////////////////////////////////////////////////////////////////
1272
1273//////////////////////////////////////////////////////////////////////////////////
1274// _fb_sync_write()
1275// Transfer data from an memory buffer to the frame_buffer device using a memcpy.
1276// - offset : offset (in bytes) in the frame buffer.
1277// - buffer : base address of the memory buffer.
1278// - length : number of bytes to be transfered.
1279//////////////////////////////////////////////////////////////////////////////////
[246]1280
1281unsigned int _fb_sync_write(unsigned int offset, 
1282                            const void * buffer, 
1283                            unsigned int length) 
[238]1284{
[246]1285    unsigned char * fb_address = (unsigned char *) &seg_fbf_base + offset;
[228]1286    memcpy((void *) fb_address, (void *) buffer, length);
[218]1287    return 0;
1288}
1289
[228]1290
[218]1291//////////////////////////////////////////////////////////////////////////////////
1292// _fb_sync_read()
1293// Transfer data from the frame_buffer device to a memory buffer using a memcpy.
1294// - offset : offset (in bytes) in the frame buffer.
1295// - buffer : base address of the memory buffer.
1296// - length : number of bytes to be transfered.
1297//////////////////////////////////////////////////////////////////////////////////
[238]1298unsigned int _fb_sync_read( unsigned int   offset, 
1299                            const void*    buffer, 
1300                            unsigned int   length) 
1301{
1302    unsigned char* fb_address = (unsigned char *) &seg_fbf_base + offset;
[228]1303    memcpy((void *) buffer, (void *) fb_address, length);
[218]1304    return 0;
1305}
1306
[228]1307
[218]1308//////////////////////////////////////////////////////////////////////////////////
[253]1309// _fb_dma_write()
[169]1310// Transfer data from a memory buffer to the frame_buffer device using  DMA.
1311// - offset : offset (in bytes) in the frame buffer.
1312// - buffer : base address of the memory buffer.
1313// - length : number of bytes to be transfered.
1314// Returns 0 if success, > 0 if error.
1315//////////////////////////////////////////////////////////////////////////////////
[253]1316unsigned int _fb_dma_write( unsigned int   offset, 
1317                            const void*    buffer, 
1318                            unsigned int   length) 
[238]1319{
1320    return _dma_transfer( 0,             // frame buffer
1321                          0,             // write
1322                          offset,
1323                          (unsigned int) buffer,
1324                          length );
[158]1325}
1326//////////////////////////////////////////////////////////////////////////////////
[253]1327// _fb_dma_read()
[169]1328// Transfer data from the frame_buffer device to a memory buffer using  DMA.
[158]1329// - offset : offset (in bytes) in the frame buffer.
[253]1330// - buffer : virtual base address of the user buffer.
1331// - length : buffer size (number of bytes)
[158]1332// Returns 0 if success, > 0 if error.
1333//////////////////////////////////////////////////////////////////////////////////
[253]1334unsigned int _fb_dma_read( unsigned int   offset, 
1335                           const void*    buffer, 
1336                           unsigned int   length ) 
[238]1337{
1338    return _dma_transfer( 0,    // frame buffer
1339                          1,    // read
1340                          offset,
1341                          (unsigned int) buffer,
1342                          length );
[158]1343}
1344//////////////////////////////////////////////////////////////////////////////////
1345// _fb_completed()
1346// This function checks completion of a DMA transfer to or fom the frame buffer.
[169]1347// As it is a blocking call, the processor is busy waiting.
1348// Returns 0 if success, > 0 if error
1349// (1 == read error / 2 == DMA idle error / 3 == write error)
[158]1350//////////////////////////////////////////////////////////////////////////////////
[253]1351unsigned int _fb_dma_completed() 
[238]1352{
[218]1353    return _dma_completed();
1354}
[158]1355
[253]1356// This structure contains two chbuf descriptors that can be used by
1357// the VciChbufDma component to tranfer a flow of images:
1358// - The SRC chbuf descriptor contain two slots (two user buffers)
1359// - The DST chbuf descriptor contains only one slot (frame buffer)
[254]1360
[253]1361typedef struct fb_cma_channel_s
1362{
[254]1363    paddr_t       buf0;     // physical address + status for user buffer 0
1364    paddr_t       buf1;     // physical address + status for user buffer 1
1365    paddr_t       fbf;      // physical address + status for frame buffer
1366    unsigned int  length;   // buffer length (number of bytes)
1367    unsigned int  padding;  // unused (just to hahe channel size = 32 bytes)
[253]1368} fb_cma_channel_t;
1369
[254]1370in_unckdata volatile fb_cma_channel_t _fb_cma_channel[NB_CMA_CHANNELS] __attribute__((aligned(64)));
1371in_unckdata volatile paddr_t          _fb_cma_desc_paddr[NB_CMA_CHANNELS];
[253]1372
[218]1373//////////////////////////////////////////////////////////////////////////////////
[253]1374// _fb_cma_init()
[254]1375// This function uses the _fb_cma_channel[] and _fb_cma_desc_paddr[] arrays,
1376// that are both indexed by the channel index.
1377// where each entry contains one fb_cma_channel structure (defining two
1378// SRC and DST chbuf descriptors), and does four things:
1379//
1380// 1) computes the physical addresses for the two source user buffers, for
1381//    the destination frame buffer. It initialises the channel descriptor
1382//    _fb_cma_channel[i], containing the SRC chbuf descriptor (two buffers),
1383//    the DST chbuf descriptor (one single frame buffer), and the buffer length.
1384//
1385// 2) computes the physical address for the channel descriptor and register it
1386//    in the _fb_cma_desc_paddr[i].
1387//   
1388// 3) makes a SYNC request to L2 cache for channel descriptor, because the
1389//    channel descriptor is directly accessed in XRAM by the CMA component.
1390//
1391// 4) Starts the CMA hardware channel, that will poll the channel descriptor
1392//    to fransfer an user buffer to the frame buffer as soon as the source
1393//    user buffer is marked valid.
1394//
[253]1395// Arguments are:
1396// - vbase0 : virtual base address of the first user buffer.
1397// - vbase1 : virtual base address of the second user buffer.
1398// - length : user buffer size (number of bytes)
1399// Returns 0 if success, > 0 if error
1400//////////////////////////////////////////////////////////////////////////////////
1401unsigned int _fb_cma_init( const void*  vbase0,
1402                           const void*  vbase1,
1403                           unsigned int length ) 
1404{
1405#if NB_CMA_CHANNELS > 0
1406
[254]1407    unsigned int  channel_id;          // CMA channel index
1408    unsigned int  user_ptab;           // page table virtual address
1409    unsigned int  ko;                  // unsuccessfull V2P translation
1410    unsigned int  vaddr;               // virtual address
1411    unsigned int  flags;               // protection flags
1412    unsigned int  ppn;                 // physical page number
1413    paddr_t       channel_pbase;       // physical address of channel descriptor
[253]1414
1415    // get CMA channel index
1416    channel_id = _get_context_slot(CTX_CMA_ID);
1417    if ( channel_id >= NB_CMA_CHANNELS )
1418    {
1419        _get_lock(&_tty_put_lock);
1420        _puts("\n[GIET ERROR] in _fb_cma_init() : CMA channel index too large\n");
1421        _release_lock(&_tty_put_lock);
1422        return 1;
1423    }
1424
[254]1425    // checking size for channel descriptor
1426    if ( sizeof(fb_cma_channel_t) != 32 )
1427    {
1428        _get_lock(&_tty_put_lock);
1429        _puts("\n[GIET ERROR] in _fb_cma_init() : bad fb_cma_channel size\n");
1430        _release_lock(&_tty_put_lock);
1431        return 1;
1432    }
1433
1434    // checking channel descriptor alignment (32 bytes)
1435    if ( (unsigned int)(&_fb_cma_channel[channel_id]) & 0x1F ) 
1436    {
1437        _get_lock(&_tty_put_lock);
1438        _puts("\n[GIET ERROR] in _fb_cma_init() : bad fb_cma_channel alignment\n");
1439        _release_lock(&_tty_put_lock);
1440        return 1;
1441    }
1442
1443    // checking user buffer virtual addresses and length alignment
[253]1444    if ( ((unsigned int)vbase0 & 0x3) || ((unsigned int)vbase1 & 0x3) || (length & 0x3) ) 
1445    {
1446        _get_lock(&_tty_put_lock);
1447        _puts("\n[GIET ERROR] in _fb_cma_init() : user buffer not word aligned\n");
1448        _release_lock(&_tty_put_lock);
1449        return 1;
1450    }
1451
1452    // get page table virtual address
1453    user_ptab = _get_context_slot(CTX_PTAB_ID);
1454
1455    // compute and register frame buffer physical address
[254]1456    vaddr = ((unsigned int)&seg_fbf_base);
1457    ko    = _v2p_translate( (page_table_t*) user_ptab, 
1458                         (vaddr >> 12),
[253]1459                         &ppn, 
1460                         &flags );
1461    if (ko) 
1462    {
1463        _get_lock(&_tty_put_lock);
1464        _puts("\n[GIET ERROR] in _fb_cma_init() : frame buffer unmapped\n");
1465        _release_lock(&_tty_put_lock);
1466        return 1;
1467    }
[254]1468    _fb_cma_channel[channel_id].fbf = ((paddr_t)ppn << 12) | (vaddr & 0x00000FFF);
[253]1469
1470    // Compute and register first user buffer physical address
[254]1471    vaddr = (unsigned int)vbase0; 
[253]1472    ko = _v2p_translate( (page_table_t*) user_ptab, 
[254]1473                         (vaddr >> 12),
[253]1474                         &ppn, 
1475                         &flags );
1476    if (ko) 
1477    {
1478        _get_lock(&_tty_put_lock);
1479        _puts("\n[GIET ERROR] in _fb_cma_init() : user buffer 0 unmapped\n");
1480        _release_lock(&_tty_put_lock);
1481        return 1;
1482    } 
1483    if ((flags & PTE_U) == 0) 
1484    {
1485        _get_lock(&_tty_put_lock);
1486        _puts("[GIET ERROR] in _fb_cma_init() : user buffer 0 not in user space\n");
1487        _release_lock(&_tty_put_lock);
1488        return 1; 
1489    }
[254]1490    _fb_cma_channel[channel_id].buf0 = ((paddr_t)ppn << 12) | (vaddr & 0x00000FFF);
[253]1491
1492    // Compute and register second user buffer physical address
[254]1493    vaddr = (unsigned int)vbase1; 
[253]1494    ko = _v2p_translate( (page_table_t*) user_ptab, 
[254]1495                         (vaddr >> 12),
[253]1496                         &ppn, 
1497                         &flags );
1498    if (ko) 
1499    {
1500        _get_lock(&_tty_put_lock);
1501        _puts("\n[GIET ERROR] in _fb_cma_init() : user buffer 1 unmapped\n");
1502        _release_lock(&_tty_put_lock);
1503        return 1;
1504    } 
1505    if ((flags & PTE_U) == 0) 
1506    {
1507        _get_lock(&_tty_put_lock);
1508        _puts("[GIET ERROR] in _fb_cma_init() : user buffer 1 not in user space\n");
1509        _release_lock(&_tty_put_lock);
1510        return 1; 
1511    }
[254]1512    _fb_cma_channel[channel_id].buf1 = ((paddr_t)ppn << 12) | (vaddr & 0x00000FFF);
[253]1513
[254]1514    // register buffer length in channel descriptor
1515    _fb_cma_channel[channel_id].length = length;
[253]1516
[254]1517    // Compute and register physical adress of the channel descriptor
1518    vaddr = (unsigned int)(&_fb_cma_channel[channel_id]);
[253]1519    ko = _v2p_translate( (page_table_t*) user_ptab, 
[254]1520                         (vaddr >> 12),
[253]1521                         &ppn, 
1522                         &flags );
1523    if (ko) 
1524    {
1525        _get_lock(&_tty_put_lock);
[254]1526        _puts("\n[GIET ERROR] in _fb_cma_init() : channel descriptor unmapped\n");
[253]1527        _release_lock(&_tty_put_lock);
1528        return 1;
1529    } 
[254]1530    channel_pbase = (((paddr_t)ppn) << 12) | (vaddr & 0x00000FFF);
1531    _fb_cma_desc_paddr[channel_id] = channel_pbase;
[253]1532
[254]1533#if GIET_DEBUG_CMA_DRIVER
1534_puts("\n");
1535_puts("- fbf       pbase = ");
1536_putl( _fb_cma_channel[channel_id].fbf );
1537_puts("\n");
1538_puts("- buf0      pbase = ");
1539_putl( _fb_cma_channel[channel_id].buf0 );
1540_puts("\n");
1541_puts("- buf1      pbase = ");
1542_putl( _fb_cma_channel[channel_id].buf1 );
1543_puts("\n");
1544_puts("- channel   pbase = ");
1545_putl( channel_pbase );
1546_puts("\n");
1547#endif
1548
1549    // SYNC request for channel descriptor
1550    _memc_sync( channel_pbase, 32 );
1551
[253]1552    // CMA channel activation
1553    unsigned int* cma_vbase = (unsigned int *)&seg_cma_base;
1554    unsigned int  offset     = channel_id * CHBUF_CHANNEL_SPAN;
1555
[254]1556    cma_vbase[offset + CHBUF_SRC_DESC]  = (unsigned int)(channel_pbase & 0xFFFFFFFF);
1557    cma_vbase[offset + CHBUF_SRC_EXT]   = (unsigned int)(channel_pbase >> 32);
[253]1558    cma_vbase[offset + CHBUF_SRC_NBUFS] = 2;
[254]1559    cma_vbase[offset + CHBUF_DST_DESC]  = (unsigned int)(channel_pbase & 0xFFFFFFFF) + 16;
1560    cma_vbase[offset + CHBUF_DST_EXT]   = (unsigned int)(channel_pbase >> 32);
[253]1561    cma_vbase[offset + CHBUF_DST_NBUFS] = 1;
1562    cma_vbase[offset + CHBUF_BUF_SIZE]  = length;
1563    cma_vbase[offset + CHBUF_PERIOD]    = 300;
1564    cma_vbase[offset + CHBUF_RUN]       = 1;
1565
1566    return 0;
1567
1568#else
1569
1570    _get_lock(&_tty_put_lock);
1571    _puts("\n[GIET ERROR] in _fb_cma_init() : no CMA channel allocated\n");
1572    _release_lock(&_tty_put_lock);
1573
1574    return 1;
1575#endif
1576}
1577//////////////////////////////////////////////////////////////////////////////////
1578// _fb_cma_write()
[254]1579// This function makes a SYNC request for the source user buffer.
1580// Then it updates the status of the SRC and DST chbuf descriptors, to allow
1581// the CMA component to transfer the source user buffer buffer to the destination
1582// frame buffer, and makes a SYNC request for the channel descriptor.
1583//
[253]1584// - buffer_id : user buffer index (0 => buf0 / not 0 => buf1)
1585// Returns 0 if success, > 0 if error
1586//////////////////////////////////////////////////////////////////////////////////
1587unsigned int _fb_cma_write( unsigned int buffer_id )
1588{
1589#if NB_CMA_CHANNELS > 0
1590
[254]1591    paddr_t         buf_paddr;
1592    unsigned int    buf_length;
1593
[253]1594    // get CMA channel index
1595    unsigned int channel_id = _get_context_slot(CTX_CMA_ID);
[254]1596
1597    // SYNC request for the source user buffer
1598    if ( buffer_id == 0 )  buf_paddr = _fb_cma_channel[channel_id].buf0;
1599    else                   buf_paddr = _fb_cma_channel[channel_id].buf1;
1600    buf_length = _fb_cma_channel[channel_id].length;
1601    _memc_sync( buf_paddr, buf_length );
1602
[253]1603    // set SRC full
1604    if ( buffer_id == 0 )
[254]1605    _fb_cma_channel[channel_id].buf0 = buf_paddr | 0x8000000000000000ULL;
[253]1606    else
[254]1607    _fb_cma_channel[channel_id].buf1 = buf_paddr | 0x8000000000000000ULL;
1608
[253]1609    // set DST empty
1610    _fb_cma_channel[channel_id].fbf  = _fb_cma_channel[channel_id].fbf
1611                                       & 0x7FFFFFFFFFFFFFFFULL;
[254]1612
1613    // SYNC request for the channel descriptor
1614    buf_paddr  = _fb_cma_desc_paddr[channel_id];
1615    buf_length = 32;
1616    _memc_sync( buf_paddr, buf_length );
1617
[253]1618    return 0;
1619
1620#else
1621
1622    _get_lock(&_tty_put_lock);
1623    _puts("\n[GIET ERROR] in _fb_cma_channel() : no CMA channel allocated\n");
1624    _release_lock(&_tty_put_lock);
1625    return 1;
1626
1627#endif
1628}
1629//////////////////////////////////////////////////////////////////////////////////
1630// _fb_cma_stop()
1631// This function desactivates the CMA channel allocated to the calling task.
1632// Returns 0 if success, > 0 if error
1633//////////////////////////////////////////////////////////////////////////////////
1634unsigned int _fb_cma_stop( unsigned int buffer_id )
1635{
1636#if NB_CMA_CHANNELS > 0
1637
1638    // get CMA channel allocated
1639    unsigned int channel_id = _get_context_slot(CTX_CMA_ID);
[254]1640
[253]1641    // CMA channel desactivation
1642    unsigned int* cma_vbase = (unsigned int *)&seg_cma_base;
1643    unsigned int  offset     = channel_id * CHBUF_CHANNEL_SPAN;
1644    cma_vbase[offset + CHBUF_RUN] = 0;
1645    return 0;
1646
1647#else
1648
1649    _get_lock(&_tty_put_lock);
1650    _puts("\n[GIET ERROR] in _fb_cma_stop() : no CMA channel allocated\n");
1651    _release_lock(&_tty_put_lock);
1652    return 1;
1653
1654#endif
1655}
1656   
1657//////////////////////////////////////////////////////////////////////////////////
[228]1658//     VciMultiNic driver
[218]1659//////////////////////////////////////////////////////////////////////////////////
1660// The VciMultiNic device can be accessed directly by software with memcpy(),
[253]1661// or it can be accessed through a multi-channels CMA component:
[218]1662// 
1663// The '_nic_sync_write' and '_nic_sync_read' functions use a memcpy strategy to
1664// implement the transfer between a data buffer (user space) and the NIC
1665// buffer (kernel space). They are blocking until completion of the transfer.
1666//
[253]1667// The _nic_cma_init() and _nic_cma_stop() functions use the VciChbufDma component
1668// to transfer a flow of packets from the NIC RX hard chbuf (two containers)
1669// to an user RX chbuf (two containers), and to transfer another flow of packets
1670// from an user TX chbuf (two containers) to the NIC TX chbuf (two containers).
1671// One NIC channel and two CMA channels must be allocated to the task
1672// in the mapping_info data structure.
[218]1673//////////////////////////////////////////////////////////////////////////////////
[204]1674
[218]1675//////////////////////////////////////////////////////////////////////////////////
1676// _nic_sync_write()
1677// Transfer data from an memory buffer to the NIC device using a memcpy.
1678// - buffer : base address of the memory buffer.
1679// - length : number of bytes to be transfered.
1680//////////////////////////////////////////////////////////////////////////////////
[253]1681unsigned int _nic_sync_write( const void*    buffer,
[238]1682                              unsigned int   length ) 
1683{
[253]1684    // To be defined
1685    // unsigned char* nic_address = (unsigned char *) &seg_nic_base;
1686    // memcpy((void *) nic_address, (void *) buffer, length);
[218]1687    return 0;
1688}
1689//////////////////////////////////////////////////////////////////////////////////
1690// _nic_sync_read()
1691// Transfer data from the NIC device to a memory buffer using a memcpy.
1692// - buffer : base address of the memory buffer.
1693// - length : number of bytes to be transfered.
1694//////////////////////////////////////////////////////////////////////////////////
[253]1695unsigned int _nic_sync_read( const void*    buffer, 
1696                             unsigned int   length ) 
1697{
1698    // To be defined
1699    // unsigned char* nic_address = (unsigned char *) &seg_nic_base;
1700    // memcpy((void *) buffer, (void *) nic_address, length);
[218]1701    return 0;
1702}
1703//////////////////////////////////////////////////////////////////////////////////
[253]1704// _nic_cma_rx_init()
[218]1705// Returns 0 if success, > 0 if error.
1706//////////////////////////////////////////////////////////////////////////////////
[253]1707unsigned int _nic_cma_rx_init( const void*  buf0,
1708                               const void*  buf1,
1709                               unsigned int length ) 
1710{
1711    // to be defined
1712    // unsigned char* nic_address = (unsigned char *) &seg_nic_base;
1713    return 0;
[218]1714}
1715//////////////////////////////////////////////////////////////////////////////////
[253]1716// _nic_cma_tx_init()
[218]1717// Returns 0 if success, > 0 if error.
1718//////////////////////////////////////////////////////////////////////////////////
[253]1719unsigned int _nic_cma_tx_init( const void*  buf0,
1720                               const void*  buf1,
1721                               unsigned int length ) 
1722{
1723    // to be defined
1724    // unsigned char* nic_address = (unsigned char *) &seg_nic_base;
1725    return 0;
1726}//////////////////////////////////////////////////////////////////////////////////
1727// _nic_cma_stop()
1728// Returns 0 if success, > 0 if error.
[218]1729//////////////////////////////////////////////////////////////////////////////////
[253]1730unsigned int _nic_cma_stop()
[238]1731{
[253]1732    // to be defined
1733    // unsigned char* nic_address = (unsigned char *) &seg_nic_base;
1734    return 0;
[158]1735}
1736
[253]1737
[249]1738//////////////////////////////////////////////////////////////////////////////////
1739//     VciMemCache driver
1740//////////////////////////////////////////////////////////////////////////////////
1741// The VciMemCache device can be accessed through a configuration interface.
1742// as a set of uncached, memory mapped registers.
[232]1743///////////////////////////////////////////////////////////////////////////////////
[253]1744// The (virtual) base address of the associated segment is:
1745//
1746//       mmc_address = seg_mmc_base + cluster_id * vseg_cluster_increment
1747//
1748////////////////////////////////////////////////////////////////////////////////
[249]1749
1750///////////////////////////////////////////////////////////////////////////////////
1751// _memc_inval()
[254]1752// This function invalidates all cache lines covering a memory buffer defined
[249]1753// by the physical base address, and the length.
1754// The buffer address MSB are used to compute the cluster index.
1755///////////////////////////////////////////////////////////////////////////////////
1756void _memc_inval( paddr_t      buf_paddr,
1757                  unsigned int buf_length )
1758{
1759    unsigned int cluster_id    = (unsigned int)((buf_paddr>>32)/(256/NB_CLUSTERS));
1760
[253]1761    unsigned int * mmc_address = (unsigned int *) ((unsigned int)&seg_mmc_base + 
1762                                 (cluster_id * (unsigned int)&vseg_cluster_increment));
[249]1763
1764    // get the lock protecting exclusive access to MEMC
1765    while ( mmc_address[MEMC_LOCK] ) { asm volatile("nop"); }
1766
1767    // write inval arguments
1768    mmc_address[MEMC_ADDR_LO]    = (unsigned int)buf_paddr;
1769    mmc_address[MEMC_ADDR_HI]    = (unsigned int)(buf_paddr>>32);
1770    mmc_address[MEMC_BUF_LENGTH] = buf_length;
1771    mmc_address[MEMC_CMD_TYPE]   = MEMC_CMD_INVAL;
1772
1773    // release the lock protecting MEMC
1774    mmc_address[MEMC_LOCK] = 0;
1775}
[254]1776///////////////////////////////////////////////////////////////////////////////////
1777// _memc_sync()
1778// This function copies to external RAM all cache lines covering a memory buffer
1779// defined by the physical base address, and the length, if they are dirty.
1780// The buffer address MSB are used to compute the cluster index.
1781///////////////////////////////////////////////////////////////////////////////////
1782void _memc_sync( paddr_t      buf_paddr,
1783                 unsigned int buf_length )
1784{
1785    unsigned int cluster_id    = (unsigned int)((buf_paddr>>32)/(256/NB_CLUSTERS));
[253]1786
[254]1787    unsigned int * mmc_address = (unsigned int *) ((unsigned int)&seg_mmc_base + 
1788                                 (cluster_id * (unsigned int)&vseg_cluster_increment));
1789
1790    // get the lock protecting exclusive access to MEMC
1791    while ( mmc_address[MEMC_LOCK] ) { asm volatile("nop"); }
1792
1793    // write inval arguments
1794    mmc_address[MEMC_ADDR_LO]    = (unsigned int)buf_paddr;
1795    mmc_address[MEMC_ADDR_HI]    = (unsigned int)(buf_paddr>>32);
1796    mmc_address[MEMC_BUF_LENGTH] = buf_length;
1797    mmc_address[MEMC_CMD_TYPE]   = MEMC_CMD_SYNC;
1798
1799    // release the lock protecting MEMC
1800    mmc_address[MEMC_LOCK] = 0;
1801}
1802
[249]1803///////////////////////////////////////////////////////////////////////////////////
[232]1804// _heap_info()
1805// This function returns the information associated to a heap (size and vaddr)
[238]1806// It uses the global task index (CTX_GTID_ID, unique for each giet task) and the
1807// vspace index (CTX_VSID_ID) defined in the task context.
[232]1808///////////////////////////////////////////////////////////////////////////////////
[238]1809unsigned int _heap_info( unsigned int* vaddr, 
1810                         unsigned int* size ) 
1811{
[232]1812    mapping_header_t * header  = (mapping_header_t *) (&seg_mapping_base);
1813    mapping_task_t * tasks     = _get_task_base(header);
1814    mapping_vobj_t * vobjs     = _get_vobj_base(header);
1815    mapping_vspace_t * vspaces = _get_vspace_base(header);
[238]1816
1817    unsigned int taskid        = _get_context_slot(CTX_GTID_ID);
1818    unsigned int vspaceid      = _get_context_slot(CTX_VSID_ID);
1819
[232]1820    int heap_local_vobjid      = tasks[taskid].heap_vobjid;
[238]1821    if (heap_local_vobjid != -1) 
1822    {
[232]1823        unsigned int vobjheapid = heap_local_vobjid + vspaces[vspaceid].vobj_offset;
1824        *vaddr                  = vobjs[vobjheapid].vaddr;
1825        *size                   = vobjs[vobjheapid].length;
1826        return 0;
1827    }
[238]1828    else 
1829    {
[232]1830        *vaddr = 0;
1831        *size = 0;
1832        return 0;
1833    }
1834}
1835
[255]1836
1837////////////////////////////////////////////////////////////////////////////////
1838// _sim_helper_access()
1839// Accesses the Simulation Helper Component
1840// If the access is on a writable register (except SIMHELPER_PAUSE_SIM),
1841// the function should never return since the simulation will stop before
1842// If the access is on a readable register, returns 0 on success, 1 on failure,
1843// and writes the return value at address retval
1844////////////////////////////////////////////////////////////////////////////////
1845unsigned int _sim_helper_access(unsigned int register_index,
1846                                unsigned int value,
1847                                unsigned int * retval) {
1848    unsigned int * sim_helper_address = (unsigned int *) &seg_sim_base;
1849   
1850    if (register_index == SIMHELPER_SC_STOP ||
1851        register_index == SIMHELPER_END_WITH_RETVAL ||
1852        register_index == SIMHELPER_EXCEPT_WITH_VAL ||
1853        register_index == SIMHELPER_PAUSE_SIM ||
1854        register_index == SIMHELPER_SIGINT) {
1855        sim_helper_address[register_index] = value;
1856    }
1857    else if (register_index == SIMHELPER_CYCLES) {
1858        *retval = sim_helper_address[register_index];
1859    }
1860    else {
1861        _get_lock(&_tty_put_lock);
1862        _puts("\n[GIET ERROR] in _sim_helper_access() : access to unmapped register\n");
1863        _release_lock(&_tty_put_lock);
1864        return -1;
1865    }
1866
1867    return 0;
1868}
1869
1870
1871
[228]1872// Local Variables:
1873// tab-width: 4
1874// c-basic-offset: 4
1875// c-file-offsets:((innamespace . 0)(inline-open . 0))
1876// indent-tabs-mode: nil
1877// End:
1878// vim: filetype=c:expandtab:shiftwidth=4:tabstop=4:softtabstop=4
1879
Note: See TracBrowser for help on using the repository browser.