Changeset 440
- Timestamp:
- May 3, 2018, 5:51:22 PM (6 years ago)
- Location:
- trunk
- Files:
-
- 68 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Makefile
r439 r440  11 11 12 12 # Default values for hardware parameters. 13  # These parameters should be defined in the 'params .mk' file. 13 # These parameters should be defined in the 'params-hard.mk' file. 14 14 ARCH ?= /users/alain/soc/tsar-trunk-svn-2013/platforms/tsar_generic_iob 15 15 X_SIZE ?= 2 … …  19 19 FBF_WIDTH ?= 256 20 20 IOC_TYPE ?= IOC_BDV  21 21 22 # Checking hardware platform definition. 22 23 ifeq ($(wildcard $(ARCH)),) … …  26 27 # Rules that don't build target files 27 28 # always out-of-date, need to be regenerated everytime they are called 28  .PHONY: compile \ 29  hard_config.h \ 30  dirs \ 31  list \ 32  extract \ 33  fsck \ 34  clean \ 35  build_libs \ 36  build-disk \ 37  $(BOOTLOADER_PATH)/build/boot.elf \ 38  kernel/build/kernel.elf \ 39  user/init/build/init.elf \ 40  user/ksh/build/ksh.elf \ 41  user/pgcd/build/pgcd.elf \ 42  user/sort/build/sort.elf \  29 .PHONY: compile \  30 hard_config.h \  31 dirs \  32 list \  33 extract \  34 fsck \  35 clean \  36 build_libs \  37 build-disk \  38 $(BOOTLOADER_PATH)/build/boot.elf \  39 kernel/build/kernel.elf \  40 user/init/build/init.elf \  41 user/ksh/build/ksh.elf \  42 user/pgdc/build/pgcd.elf \  43 user/hello/build/hello.elf \  44 user/sort/build/sort.elf 43 45 44 46 # Virtual disk path … …  47 49 # The Mtools used to build the FAT32 disk image perfom a few sanity checks, to 48 50 # make sure that the disk is indeed an MS-DOS disk. However, the size of the 49  # disk image used by ALMOS- VMis not MS-DOS compliant. 51 # disk image used by ALMOS-MKH is not MS-DOS compliant. 50 52 # Setting this variable prevents these checks. 51 53 MTOOLS_SKIP_CHECK := 1 52 54 53  # Rule to generate boot.elf, kernel.elf, ksh.elf, sort.elfand update virtual disk.54  compile: dirs \55  build_disk \56  hard_config.h \57  build_libs \ 55 # Rule to generate boot.elf, kernel.elf, all user.elf files, and update virtual disk.  56 compile: dirs \  57 build_disk \  58 hard_config.h \  59 build_libs \ 58 60 $(BOOTLOADER_PATH)/build/boot.elf \ 59  kernel/build/kernel.elf \ 60  user/init/build/init.elf \ 61  user/ksh/build/ksh.elf \ 62  user/sort/build/sort.elf \ 63  user/pgcd/build/pgcd.elf \  61 kernel/build/kernel.elf \  62 user/init/build/init.elf \  63 user/ksh/build/ksh.elf \  64 user/pgcd/build/pgcd.elf \  65 user/hello/build/hello.elf \  66 user/sort/build/sort.elf \ 64 67 list 65 68 66  # Rule to create the build directories. 69 # Rule to create the hdd directory 67 70 dirs: 68 71 @mkdir -p hdd … …  91 94 $(MAKE) -C user/sort clean 92 95 $(MAKE) -C user/pgcd clean  96 $(MAKE) -C user/hello clean 93 97 $(MAKE) -C $(HAL_ARCH) clean 94 98 … …  126 130 mdir -/ -b -i $(DISK_IMAGE) ::/ 127 131  132 ############################################  133 # Rules to generate the user level libraries 128 134 build_libs: build_hal 129 135 $(MAKE) -C $(LIBC_PATH) … …  136 142 dd if=$@ of=$(DISK_IMAGE) seek=2 conv=notrunc 137 143  144 #####################################################################  145 # Rule to generate boot.elf and place it in sector #2 of virtual disk 138 146 build_hal: 139 147 $(MAKE) -C $(HAL_ARCH) … …  145 153 mcopy -o -i $(DISK_IMAGE) $@ ::/bin/kernel 146 154 147  ##################################################### ##########148  # Rules to generate various user.elf and copy on virtual disk 155 #####################################################  156 # Rules to generate user.elf and copy on virtual disk 149 157 user/init/build/init.elf: build_libs 150 158 $(MAKE) -C user/init … …  153 161 $(MAKE) -C user/ksh 154 162 mcopy -o -i $(DISK_IMAGE) $@ ::/bin/user 155  user/pdcg/build/pdcg.elf: build_libs 156  $(MAKE) -C user/pdcg  163 user/pgcd/build/pgcd.elf: build_libs  164 $(MAKE) -C user/pgcd  165 mcopy -o -i $(DISK_IMAGE) $@ ::/bin/user  166 user/hello/build/hello.elf: build_libs  167 $(MAKE) -C user/hello 157 168 mcopy -o -i $(DISK_IMAGE) $@ ::/bin/user 158 169 user/sort/build/sort.elf: build_libs -
trunk/hal/tsar_mips32/Makefile
r439 r440  26 26 27 27 CORE_OBJS = $(HAL_ARCH)/build/core/hal_special.o \ 28  $(HAL_ARCH)/build/core/hal_context.o \29  $(HAL_ARCH)/build/core/hal_atomic.o \30  $(HAL_ARCH)/build/core/hal_remote.o \31  $(HAL_ARCH)/build/core/hal_uspace.o \32  $(HAL_ARCH)/build/core/hal_irqmask.o \33  $(HAL_ARCH)/build/core/hal_gpt.o \34  $(HAL_ARCH)/build/core/hal_ppm.o \35  $(HAL_ARCH)/build/core/hal_vmm.o \36  $(HAL_ARCH)/build/core/hal_exception.o \37  $(HAL_ARCH)/build/core/hal_interrupt.o \38  $(HAL_ARCH)/build/core/hal_syscall.o \39  $(HAL_ARCH)/build/core/hal_drivers.o \40  $(HAL_ARCH)/build/core/hal_kentry.o \41  $(HAL_ARCH)/build/core/hal_switch.o \42  $(HAL_ARCH)/build/core/hal_user.o 28 $(HAL_ARCH)/build/core/hal_context.o \  29 $(HAL_ARCH)/build/core/hal_atomic.o \  30 $(HAL_ARCH)/build/core/hal_remote.o \  31 $(HAL_ARCH)/build/core/hal_uspace.o \  32 $(HAL_ARCH)/build/core/hal_irqmask.o \  33 $(HAL_ARCH)/build/core/hal_gpt.o \  34 $(HAL_ARCH)/build/core/hal_ppm.o \  35 $(HAL_ARCH)/build/core/hal_vmm.o \  36 $(HAL_ARCH)/build/core/hal_exception.o \  37 $(HAL_ARCH)/build/core/hal_interrupt.o \  38 $(HAL_ARCH)/build/core/hal_syscall.o \  39 $(HAL_ARCH)/build/core/hal_drivers.o \  40 $(HAL_ARCH)/build/core/hal_kentry.o \  41 $(HAL_ARCH)/build/core/hal_switch.o \  42 $(HAL_ARCH)/build/core/hal_user.o 43 43 44  HAL_INCLUDE = -I$(KERNEL)\45  -I$(HAL_ARCH)/drivers \46  -I$(HAL)/generic 47  -I$(HAL_ARCH)/core \48  -I../../tools/arch_info 49  -I$(KERNEL)/kern \50  -I$(KERNEL)/mm \51  -I$(KERNEL)/fs \52  -I$(KERNEL)/syscalls \53  -I$(KERNEL)/devices \ 44 HAL_INCLUDE = -I$(KERNEL) \  45 -I$(HAL_ARCH)/drivers \  46 -I$(HAL)/generic \  47 -I$(HAL_ARCH)/core \  48 -I../../tools/arch_info \  49 -I$(KERNEL)/kern \  50 -I$(KERNEL)/mm \  51 -I$(KERNEL)/fs \  52 -I$(KERNEL)/syscalls \  53 -I$(KERNEL)/devices \ 54 54 -I$(KERNEL)/libk 55 55  56 ############################## 56 57 # Rule to generate .o for HAL. 57 58 compile: dirs $(CORE_OBJS) $(DRIVERS_OBJS) 58 59  60 ######################################## 59 61 # Rule to create the build directories. 60 62 dirs: … …  66 68 ############################## 67 69 # rules to compile the drivers 68  $(HAL_ARCH)/build/drivers/%.o: $(HAL_ARCH)/drivers/%.c\69  $(HAL_ARCH)/drivers/%.h \70  $(KERNEL)/kernel_config.h  70 $(HAL_ARCH)/build/drivers/%.o: $(HAL_ARCH)/drivers/%.c \  71 $(HAL_ARCH)/drivers/%.h \  72 $(KERNEL)/kernel_config.h \ 71 73 $(HAL_ARCH)/core/hal_types.h 72 74 $(CC) $(HAL_INCLUDE) $(CFLAGS) -c -o $@ $< … …  74 76 ###################################### 75 77 # Rules to generate kernel/hal objects 76  $(HAL_ARCH)/build/core/%.o: $(HAL_ARCH)/core/%.c\77  $(HAL)/generic/%.h 78  $(KERNEL)/kernel_config.h  78 $(HAL_ARCH)/build/core/%.o: $(HAL_ARCH)/core/%.c \  79 $(HAL)/generic/%.h \  80 $(KERNEL)/kernel_config.h \ 79 81 $(HAL_ARCH)/core/hal_types.h 80 82 $(CC) $(HAL_INCLUDE) $(CFLAGS) -c -o $@ $< 81 83 82 84 $(HAL_ARCH)/build/core/hal_kentry.o: $(HAL_ARCH)/core/hal_kentry.S \ 83  $(HAL_ARCH)/core/hal_kentry.h \84  $(KERNEL)/kernel_config.h\85  $(HAL_ARCH)/core/hal_types.h 85 $(HAL_ARCH)/core/hal_kentry.h \  86 $(KERNEL)/kernel_config.h \  87 $(HAL_ARCH)/core/hal_types.h 86 88 $(CC) $(HAL_INCLUDE) $(CFLAGS) -c -o $@ $< 87 89 88 90 $(HAL_ARCH)/build/core/hal_switch.o: $(HAL_ARCH)/core/hal_switch.S \ 89  $(HAL)/generic/hal_switch.h 91 $(HAL)/generic/hal_switch.h 90 92 $(CC) $(HAL_INCLUDE) $(CFLAGS) -c -o $@ $< 91 93 $(DU) -D $@ > $@.txt -
trunk/hal/tsar_mips32/core/hal_exception.c
r438 r440  34 34 #include <core.h> 35 35 #include <syscalls.h>  36 #include <shared_syscalls.h> 36 37 #include <remote_spinlock.h> 37 38 #include <hal_kentry.h> … …  163 164 ////////////////////////////////////////////////////////////////////////////////////////// 164 165 // @ this : pointer on faulty thread descriptor.  166 // @ excPC : 165 167 // @ is_ins : IBE if true / DBE if false. 166 168 // @ return EXCP_NON_FATAL / EXCP_USER_ERROR / EXCP_KERNEL_PANIC … …  218 220 // try to map the unmapped PTE 219 221 error = vmm_handle_page_fault( process, 220  bad_vaddr >> CONFIG_PPM_PAGE_SHIFT ); // vpn  222 bad_vaddr >> CONFIG_PPM_PAGE_SHIFT, // vpn  223 false ); // not a COW 221 224 if( error ) 222 225 { … …  256 259 { 257 260 // try to allocate and copy the page 258  error = vmm_handle_cow( process, 259  bad_vaddr >> CONFIG_PPM_PAGE_SHIFT );  261 error = vmm_handle_page_fault( process,  262 bad_vaddr >> CONFIG_PPM_PAGE_SHIFT, // vpn  263 true ); // COW 260 264 if( error ) 261 265 { … …  295 299 default: // this is a kernel error => panic 296 300 { 297  assert( false , __FUNCTION__ , "thread %x / epc %x / %s / vaddr = %x\n", 298  this, excPC, hal_mmu_exception_str(excp_code) , bad_vaddr );  301 assert( false , __FUNCTION__ ,  302 "thread %x / core[%x,%d] / epc %x / vaddr %x / cycle %d\n",  303 this, local_cxy, this->core->lid, excPC, bad_vaddr, (uint32_t)hal_get_cycles() ); 299 304 300 305 return EXCP_KERNEL_PANIC; … …  440 445 hal_exception_dump( this , uzone , error ); 441 446 442  sys_ kill( this->process->pid , SIGKILL); 447 sys_exit( EXIT_FAILURE ); 443 448 } 444 449 else if( error == EXCP_KERNEL_PANIC ) // kernel error => kernel panic … …  447 452 448 453 assert( false , __FUNCTION__ , "thread %x in process %x on core [%x,%d]", 449  this ->trdid, this->process->pid , local_cxy , this->core->lid ); 454 this , this->process->pid , local_cxy , this->core->lid ); 450 455 } 451 456 -
trunk/hal/tsar_mips32/core/hal_gpt.c
r438 r440 Â 141 141 // check page size 142 142 assert( (CONFIG_PPM_PAGE_SIZE == 4096) , __FUNCTION__ , 143 Â "for TSAR, the page must be 4 Kbytes\n" );Â 143 "for TSAR, the page size must be 4 Kbytes\n" ); 144 144 145 145 // allocates 2 physical pages for PT1 -
trunk/hal/tsar_mips32/drivers/soclib_bdv.c
r438 r440  66 66 // get client thread cluster and local pointer 67 67 cxy_t th_cxy = GET_CXY( th_xp ); 68  thread_t * th_ptr = (thread_t *)GET_PTR( th_xp ); 68 thread_t * th_ptr = GET_PTR( th_xp ); 69 69 70 70 // get command arguments and extended pointer on IOC device … …  91 91 // get IOC device cluster and local pointer 92 92 cxy_t ioc_cxy = GET_CXY( ioc_xp ); 93  chdev_t * ioc_ptr = (chdev_t *)GET_PTR( ioc_xp ); 94  95  // get extended pointer on SOCLIB-BDV peripheral 96  xptr_t bdv_xp = hal_remote_lw( XPTR( ioc_cxy , &ioc_ptr->base ) ); 97  98  // get SOCLIB_BDV device cluster and local pointer 99  cxy_t bdv_cxy = GET_CXY( bdv_xp ); 100  uint32_t * bdv_ptr = (uint32_t *)GET_PTR( bdv_xp );  93 chdev_t * ioc_ptr = GET_PTR( ioc_xp );  94  95 // get cluster and pointers for SOCLIB-BDV peripheral segment base  96 xptr_t seg_xp = (xptr_t)hal_remote_lwd( XPTR( ioc_cxy , &ioc_ptr->base ) );  97 cxy_t seg_cxy = GET_CXY( seg_xp );  98 uint32_t * seg_ptr = GET_PTR( seg_xp ); 101 99 102 100 // split buffer address in two 32 bits words … …  110 108 111 109 // set SOCLIB_BDV registers to start one I/O operation 112  hal_remote_sw( XPTR( bdv_cxy , bdv_ptr + BDV_IRQ_ENABLE_REG ) , 1 );113  hal_remote_sw( XPTR( bdv_cxy , bdv_ptr + BDV_BUFFER_REG ) , buf_lsb );114  hal_remote_sw( XPTR( bdv_cxy , bdv_ptr + BDV_BUFFER_EXT_REG ) , buf_msb );115  hal_remote_sw( XPTR( bdv_cxy , bdv_ptr + BDV_LBA_REG ) , lba );116  hal_remote_sw( XPTR( bdv_cxy , bdv_ptr + BDV_COUNT_REG ) , count );117  hal_remote_sw( XPTR( bdv_cxy , bdv_ptr + BDV_OP_REG ) , op ); 110 hal_remote_sw( XPTR( seg_cxy , seg_ptr + BDV_IRQ_ENABLE_REG ) , 1 );  111 hal_remote_sw( XPTR( seg_cxy , seg_ptr + BDV_BUFFER_REG ) , buf_lsb );  112 hal_remote_sw( XPTR( seg_cxy , seg_ptr + BDV_BUFFER_EXT_REG ) , buf_msb );  113 hal_remote_sw( XPTR( seg_cxy , seg_ptr + BDV_LBA_REG ) , lba );  114 hal_remote_sw( XPTR( seg_cxy , seg_ptr + BDV_COUNT_REG ) , count );  115 hal_remote_sw( XPTR( seg_cxy , seg_ptr + BDV_OP_REG ) , op ); 118 116 119 117 // waiting policy depends on the command type … …  126 124 while (1) 127 125 { 128  status = hal_remote_lw( XPTR( bdv_cxy , bdv_ptr + BDV_STATUS_REG ) ); 126 status = hal_remote_lw( XPTR( seg_cxy , seg_ptr + BDV_STATUS_REG ) ); 129 127 130 128 if( status == BDV_READ_SUCCESS ) // successfully completed -
trunk/hal/tsar_mips32/drivers/soclib_hba.c
r437 r440  104 104 // get client thread cluster and local pointer 105 105 cxy_t th_cxy = GET_CXY( th_xp ); 106  thread_t * th_ptr = (thread_t *)GET_PTR( th_xp ); 106 thread_t * th_ptr = GET_PTR( th_xp ); 107 107 108 108 // get command arguments and extended pointer on IOC device … …  115 115 // get IOC device cluster and local pointer 116 116 cxy_t dev_cxy = GET_CXY( dev_xp ); 117  chdev_t * dev_ptr = (chdev_t *)GET_PTR( dev_xp ); 118  119  // get extended pointer on SOCLIB-HBA peripheral 120  xptr_t hba_xp = hal_remote_lw( XPTR( dev_cxy , &dev_ptr->base ) ); 121  122  // get SOCLIB_HBA device cluster and local pointer  117 chdev_t * dev_ptr = GET_PTR( dev_xp );  118  119 // get cluster and pointers for SOCLIB-HBA peripheral segment base  120 xptr_t hba_xp = (xptr_t)hal_remote_lwd( XPTR( dev_cxy , &dev_ptr->base ) ); 123 121 cxy_t hba_cxy = GET_CXY( hba_xp ); 124  uint32_t * hba_ptr = (uint32_t *)GET_PTR( hba_xp ); 122 uint32_t * hba_ptr = GET_PTR( hba_xp ); 125 123 126 124 // try to register the I/O operation in a free slot -
trunk/hal/tsar_mips32/drivers/soclib_mmc.c
r279 r440  52 52 xptr_t dev_xp; // extended pointer on MMC device 53 53 uint32_t type; // MMC command : type 54  uint64_t buf_paddr; // MMC command : buffer physical address 54 void * buf_ptr; // MMC command : buffer pointer 55 55 uint32_t buf_size; // MMC command : buffer size 56 56 uint32_t reg_index; // MMC command : register index in MMC peripheral … …  59 59 // get client thread cluster and local pointer 60 60 cxy_t th_cxy = GET_CXY( th_xp ); 61  thread_t * th_ptr = (thread_t *)GET_PTR( th_xp ); 61 thread_t * th_ptr = GET_PTR( th_xp ); 62 62 63 63 // get command type and extended pointer on MMC device … …  67 67 // get MMC device cluster and local pointer 68 68 cxy_t dev_cxy = GET_CXY( dev_xp ); 69  chdev_t * dev_ptr = (chdev_t *)GET_PTR( dev_xp ); 69 chdev_t * dev_ptr = GET_PTR( dev_xp ); 70 70 71  // get extended pointer on SOCLIB-MMC peripheral 72  xptr_t mmc_xp = hal_remote_lw( XPTR( dev_cxy , &dev_ptr->base ) ); 73  74  // get SOCLIB_MMC peripheral cluster and local pointer 75  cxy_t mmc_cxy = GET_CXY( mmc_xp ); 76  uint32_t * mmc_ptr = (uint32_t *)GET_PTR( mmc_xp );  71 // get cluster and pointers for SOCLIB_MMC peripheral segment base  72 xptr_t seg_xp = (xptr_t)hal_remote_lwd( XPTR( dev_cxy , &dev_ptr->base ) );  73 cxy_t seg_cxy = GET_CXY( seg_xp );  74 uint32_t * seg_ptr = GET_PTR( seg_xp ); 77 75 78 76 if( (type == MMC_CC_INVAL) || (type == MMC_CC_SYNC) ) 79 77 { 80  // get buffer paddr 81  buf_paddr = hal_remote_lwd( XPTR( th_cxy , &th_ptr->mmc_cmd.buf_paddr ) );  78 // get buffer pointer and size  79 buf_ptr = hal_remote_lpt( XPTR( th_cxy , &th_ptr->mmc_cmd.buf_ptr ) );  80 buf_size = hal_remote_lw ( XPTR( th_cxy , &th_ptr->mmc_cmd.buf_size ) ); 82 81 83  // split buffer paddr in two 32 bits words 84  uint32_t buf_lo = (uint32_t)( buf_paddr ); 85  uint32_t buf_hi = (uint32_t)( buf_paddr>>32 ); 86  87  // get buffer size 88  buf_size = hal_remote_lw( XPTR( th_cxy , &th_ptr->mmc_cmd.buf_size ) ); 89  90  // get command type 91  uint32_t cc_cmd;  82 // set command type  83 uint32_t cc_cmd; 92 84 if( type == MMC_CC_INVAL ) cc_cmd = SOCLIB_MMC_CC_INVAL; 93 85 else cc_cmd = SOCLIB_MMC_CC_SYNC; 94 86 95 87 // set SOCLIB_MMC registers to start INVAL/SYNC operation 96  hal_remote_sw( XPTR( mmc_cxy , mmc_ptr + SOCLIB_MMC_ADDR_LO ) , buf_lo);97  hal_remote_sw( XPTR( mmc_cxy , mmc_ptr + SOCLIB_MMC_ADDR_HI ) , buf_hi);98  hal_remote_sw( XPTR( mmc_cxy , mmc_ptr + SOCLIB_MMC_BUF_LENGTH ) , buf_size );99  hal_remote_sw( XPTR( mmc_cxy , mmc_ptr + SOCLIB_MMC_CMD_TYPE ) , cc_cmd ); 88 hal_remote_sw( XPTR( seg_cxy , seg_ptr + SOCLIB_MMC_ADDR_LO ) , (uint32_t)buf_ptr );  89 hal_remote_sw( XPTR( seg_cxy , seg_ptr + SOCLIB_MMC_ADDR_HI ) , (uint32_t)dev_cxy );  90 hal_remote_sw( XPTR( seg_cxy , seg_ptr + SOCLIB_MMC_BUF_LENGTH ) , buf_size );  91 hal_remote_sw( XPTR( seg_cxy , seg_ptr + SOCLIB_MMC_CMD_TYPE ) , cc_cmd ); 100 92 } 101 93 else // (type == MMC_GET_ERROR) or (type == MMC_GET_ERROR) pr (type == MMC_GET_INSTRU ) … …  108 100 if( (type == MMC_GET_ERROR) || (type == MMC_GET_INSTRU) ) 109 101 { 110  *reg_ptr = hal_remote_lw( XPTR( mmc_cxy , mmc_ptr + reg_index ) ); 102 *reg_ptr = hal_remote_lw( XPTR( seg_cxy , seg_ptr + reg_index ) ); 111 103 } 112 104 else // type == MMC_SET_ERROR 113 105 { 114  hal_remote_sw( XPTR( mmc_cxy , mmc_ptr + reg_index ) , *reg_ptr ); 106 hal_remote_sw( XPTR( seg_cxy , seg_ptr + reg_index ) , *reg_ptr ); 115 107 } 116 108 } -
trunk/hal/tsar_mips32/drivers/soclib_pic.c
r438 r440  483 483 // in TSAR : XCU output [4*lid] is connected to core [lid] 484 484 hal_remote_sw( XPTR( src_chdev_cxy , 485  &seg_xcu_ptr[ (XCU_MSK_HWI_ENABLE << 5) | (lid<< 4) ] ) , (1 << irq_id) ); 485 &seg_xcu_ptr[ (XCU_MSK_HWI_ENABLE << 5) | (lid<<2) ] ) , (1 << irq_id) ); 486 486 } 487 487 else if( irq_type == SOCLIB_TYPE_WTI ) … …  490 490 // in TSAR : XCU output [4*lid] is connected to core [lid] 491 491 hal_remote_sw( XPTR( src_chdev_cxy , 492  &seg_xcu_ptr[ (XCU_MSK_WTI_ENABLE << 5) | (lid<< 4) ] ) , (1 << irq_id) ); 492 &seg_xcu_ptr[ (XCU_MSK_WTI_ENABLE << 5) | (lid<<2) ] ) , (1 << irq_id) ); 493 493 } 494 494 else … …  525 525 // in TSAR : XCU output [4*lid] is connected to core [lid] 526 526 hal_remote_sw( XPTR( src_chdev_cxy , 527  &seg_xcu_ptr[(XCU_MSK_WTI_DISABLE << 5) | (lid<< 4) ] ) , (1 << irq_id) ); 527 &seg_xcu_ptr[(XCU_MSK_WTI_DISABLE << 5) | (lid<<2) ] ) , (1 << irq_id) ); 528 528 } 529 529 else -
trunk/hal/tsar_mips32/drivers/soclib_tty.c
r438 r440  137 137 #endif 138 138 139  // get TXT device pointers 140  xptr_t dev_xp = (xptr_t)hal_remote_lwd( XPTR( th_cxy , &th_ptr->txt_cmd.dev_xp ) ); 141  cxy_t dev_cxy = GET_CXY( dev_xp ); 142  chdev_t * dev_ptr = GET_PTR( dev_xp ); 143  144  // get extended pointer on SOCLIB_TTY base segment 145  xptr_t tty_xp = (xptr_t)hal_remote_lwd( XPTR( dev_cxy , &dev_ptr->base ) ); 146  147  // get SOCLIB_TTY base segment cluster and local pointer  139 // get TXT device cluster and pointers  140 xptr_t dev_xp = (xptr_t)hal_remote_lwd( XPTR( th_cxy , &th_ptr->txt_cmd.dev_xp ) );  141 cxy_t dev_cxy = GET_CXY( dev_xp );  142 chdev_t * dev_ptr = GET_PTR( dev_xp );  143  144 // get cluster and pointers for SOCLIB_TTY peripheral base segment  145 xptr_t tty_xp = (xptr_t)hal_remote_lwd( XPTR( dev_cxy , &dev_ptr->base ) ); 148 146 cxy_t tty_cxy = GET_CXY( tty_xp ); 149 147 uint32_t * tty_ptr = GET_PTR( tty_xp ); … …  346 344 owner_pid = hal_remote_lw( XPTR( owner_cxy , &owner_ptr->pid ) ); 347 345 348  // block owner process only if it is not aKSH 346 // block owner process only if it is not INIT or KSH 349 347 if( process_get_ppid( owner_xp ) > 1 ) 350 348 { … …  362 360 if( byte == 0x03 ) 363 361 { 364  // get pointer son TXT owner process in owner cluster 362 // get pointer on TXT owner process in owner cluster 365 363 owner_xp = process_txt_get_owner( channel ); 366 364 … …  380 378 process_txt_detach( owner_xp ); 381 379 382  // mark for delete all processesin all clusters, but the main 380 // mark for delete all thread in all clusters, but the main 383 381 process_sigaction( owner_pid , DELETE_ALL_THREADS ); 384 382 -
trunk/kernel/Makefile
r439 r440  8 8 endif 9 9 10  #We choose drivers and hal file we need to linkwith kernel.elf 10 #We choose drivers and hal file to be linked with kernel.elf 11 11 ifeq ($(ARCH_NAME), tsar_mips32)  12 12 13 DRIVERS_OBJS = $(HAL_ARCH)/build/drivers/soclib_tty.o \ 13  $(HAL_ARCH)/build/drivers/soclib_bdv.o \14  $(HAL_ARCH)/build/drivers/soclib_hba.o \15  $(HAL_ARCH)/build/drivers/soclib_mmc.o \16  $(HAL_ARCH)/build/drivers/soclib_pic.o \17  $(HAL_ARCH)/build/drivers/soclib_nic.o \18  $(HAL_ARCH)/build/drivers/soclib_dma.o \19  $(HAL_ARCH)/build/drivers/soclib_iob.o20  21  CORE_OBJS = $(HAL_ARCH)/build/core/hal_special.o\22  $(HAL_ARCH)/build/core/hal_context.o\23  $(HAL_ARCH)/build/core/hal_atomic.o\24  $(HAL_ARCH)/build/core/hal_remote.o\25  $(HAL_ARCH)/build/core/hal_uspace.o\26  $(HAL_ARCH)/build/core/hal_irqmask.o\27  $(HAL_ARCH)/build/core/hal_gpt.o\28  $(HAL_ARCH)/build/core/hal_ppm.o\29  $(HAL_ARCH)/build/core/hal_vmm.o\30  $(HAL_ARCH)/build/core/hal_exception.o\31  $(HAL_ARCH)/build/core/hal_interrupt.o\32  $(HAL_ARCH)/build/core/hal_syscall.o\33  $(HAL_ARCH)/build/core/hal_drivers.o\34  $(HAL_ARCH)/build/core/hal_kentry.o\35  $(HAL_ARCH)/build/core/hal_switch.o 14 $(HAL_ARCH)/build/drivers/soclib_bdv.o \  15 $(HAL_ARCH)/build/drivers/soclib_hba.o \  16 $(HAL_ARCH)/build/drivers/soclib_mmc.o \  17 $(HAL_ARCH)/build/drivers/soclib_pic.o \  18 $(HAL_ARCH)/build/drivers/soclib_nic.o \  19 $(HAL_ARCH)/build/drivers/soclib_dma.o \  20 $(HAL_ARCH)/build/drivers/soclib_iob.o  21  22 HAL_OBJS = $(HAL_ARCH)/build/core/hal_special.o \  23 $(HAL_ARCH)/build/core/hal_context.o \  24 $(HAL_ARCH)/build/core/hal_atomic.o \  25 $(HAL_ARCH)/build/core/hal_remote.o \  26 $(HAL_ARCH)/build/core/hal_uspace.o \  27 $(HAL_ARCH)/build/core/hal_irqmask.o \  28 $(HAL_ARCH)/build/core/hal_gpt.o \  29 $(HAL_ARCH)/build/core/hal_ppm.o \  30 $(HAL_ARCH)/build/core/hal_vmm.o \  31 $(HAL_ARCH)/build/core/hal_kentry.o \  32 $(HAL_ARCH)/build/core/hal_switch.o \  33 $(HAL_ARCH)/build/core/hal_syscall.o \  34 $(HAL_ARCH)/build/core/hal_exception.o \  35 $(HAL_ARCH)/build/core/hal_interrupt.o \  36 $(HAL_ARCH)/build/core/hal_drivers.o 36 37 endif 37 38 … …  39 40 40 41 DRIVERS_OBJS = $(HAL_ARCH)/build/drivers/ioc_ata.o \ 41  $(HAL_ARCH)/build/drivers/pic_apic.o \ 42  $(HAL_ARCH)/build/drivers/txt_rs232.o 43  44  CORE_OBJS = \ 45  $(HAL_ARCH)/build/core/hal_boot.o \ 46  $(HAL_ARCH)/build/core/hal_smpboot.o \ 47  $(HAL_ARCH)/build/core/hal_init.o \ 48  $(HAL_ARCH)/build/core/hal_cpu.o \ 49  $(HAL_ARCH)/build/core/hal_kentry.o \ 50  $(HAL_ARCH)/build/core/hal_acpi.o \ 51  $(HAL_ARCH)/build/core/hal_apic.o \ 52  $(HAL_ARCH)/build/core/x86_printf.o \ 53  $(HAL_ARCH)/build/core/hal_drivers.o \ 54  $(HAL_ARCH)/build/core/hal_special.o \ 55  $(HAL_ARCH)/build/core/hal_context.o \ 56  $(HAL_ARCH)/build/core/hal_atomic.o \ 57  $(HAL_ARCH)/build/core/hal_remote.o \ 58  $(HAL_ARCH)/build/core/hal_uspace.o \ 59  $(HAL_ARCH)/build/core/hal_irqmask.o \ 60  $(HAL_ARCH)/build/core/hal_gpt.o \ 61  $(HAL_ARCH)/build/core/hal_ppm.o \ 62  $(HAL_ARCH)/build/core/hal_exception.o \ 63  $(HAL_ARCH)/build/core/hal_interrupt.o \ 64  $(HAL_ARCH)/build/core/hal_syscall.o 65   42 $(HAL_ARCH)/build/drivers/pic_apic.o \  43 $(HAL_ARCH)/build/drivers/txt_rs232.o  44  45 HAL_OBJS = $(HAL_ARCH)/build/core/hal_boot.o \  46 $(HAL_ARCH)/build/core/hal_smpboot.o \  47 $(HAL_ARCH)/build/core/hal_init.o \  48 $(HAL_ARCH)/build/core/hal_cpu.o \  49 $(HAL_ARCH)/build/core/hal_kentry.o \  50 $(HAL_ARCH)/build/core/hal_acpi.o \  51 $(HAL_ARCH)/build/core/hal_apic.o \  52 $(HAL_ARCH)/build/core/x86_printf.o \  53 $(HAL_ARCH)/build/core/hal_drivers.o \  54 $(HAL_ARCH)/build/core/hal_special.o \  55 $(HAL_ARCH)/build/core/hal_context.o \  56 $(HAL_ARCH)/build/core/hal_atomic.o \  57 $(HAL_ARCH)/build/core/hal_remote.o \  58 $(HAL_ARCH)/build/core/hal_uspace.o \  59 $(HAL_ARCH)/build/core/hal_irqmask.o \  60 $(HAL_ARCH)/build/core/hal_gpt.o \  61 $(HAL_ARCH)/build/core/hal_ppm.o \  62 $(HAL_ARCH)/build/core/hal_exception.o \  63 $(HAL_ARCH)/build/core/hal_interrupt.o \  64 $(HAL_ARCH)/build/core/hal_syscall.o 66 65 endif 67 66 … …  187 186 # List of directories to be searched for included files 188 187 # when compiling for kernel.elf generation 189  KERNEL_INCLUDE = -I. \190  -Ikern \191  -Idevices \192  -Isyscalls \ 188 KERNEL_INCLUDE = -I. \  189 -Ikern \  190 -Idevices \  191 -Isyscalls \ 193 192 -I$(HAL_ARCH)/drivers \ 194  -Isyscalls \195  -Ilibk \196  -Imm \197  -Ifs \198  -I../tools/arch_info 199  -I$(HAL)/generic  193 -Isyscalls \  194 -Ilibk \  195 -Imm \  196 -Ifs \  197 -I../tools/arch_info \  198 -I$(HAL)/generic \ 200 199 -I$(HAL_ARCH)/core \ 201 200 -I.. … …  225 224 ####################################### 226 225 # Rules to generate kernel/kern objects 227  build/kern/%.o: kern/%.c \228  kern/%.h\229  kernel_config.h\230   226 build/kern/%.o: kern/%.c \  227 kern/%.h \  228 kernel_config.h \  229 $(HAL_ARCH)/core/hal_types.h 231 230 $(CC) $(KERNEL_INCLUDE) $(CFLAGS) -c -o $@ $< 232 231 … …  234 233 ###################################### 235 234 # Rules to generate kernel/dev objects 236  build/devices/%.o: devices/%.c\237  devices/%.h\238  kernel_config.h\239   235 build/devices/%.o: devices/%.c \  236 devices/%.h \  237 kernel_config.h \  238 $(HAL_ARCH)/core/hal_types.h 240 239 $(CC) $(KERNEL_INCLUDE) $(CFLAGS) -c -o $@ $< 241 240 242 241 ##################################### 243 242 # Rules to generate kernel/mm objects 244  build/mm/%.o: mm/%.c\245  mm/%.h\246  kernel_config.h\247   243 build/mm/%.o: mm/%.c \  244 mm/%.h \  245 kernel_config.h \  246 $(HAL_ARCH)/core/hal_types.h 248 247 $(CC) $(KERNEL_INCLUDE) $(CFLAGS) -c -o $@ $< 249 248 250 249 ####################################### 251 250 # Rules to generate kernel/libk objects 252  build/libk/%.o: libk/%.c\253  libk/%.h\254  kernel_config.h\255   251 build/libk/%.o: libk/%.c \  252 libk/%.h \  253 kernel_config.h \  254 $(HAL_ARCH)/core/hal_types.h 256 255 $(CC) $(KERNEL_INCLUDE) $(CFLAGS) -c -o $@ $< 257 256 258 257 ########################################### 259 258 # Rules to generate kernel/syscalls objects 260  build/syscalls/%.o: syscalls/%.c\261  syscalls/syscalls.h\262  kernel_config.h\263   259 build/syscalls/%.o: syscalls/%.c \  260 syscalls/syscalls.h \  261 kernel_config.h \  262 $(HAL_ARCH)/core/hal_types.h 264 263 $(CC) $(KERNEL_INCLUDE) $(CFLAGS) -c -o $@ $< 265 264 266 265 ####################################### 267 266 # Rules to generate kernel/fs objects 268  build/fs/%.o: fs/%.c\269  fs/%.h\270  kernel_config.h\271  272  $(CC) $(KERNEL_INCLUDE) $(CFLAGS) -c -o $@ $< 273  274  ############################## ############################### 267 build/fs/%.o: fs/%.c \  268 fs/%.h \  269 kernel_config.h \  270 $(HAL_ARCH)/core/hal_types.h  271 $(CC) $(KERNEL_INCLUDE) $(CFLAGS) -c -o $@ $<  272  273 ############################## 275 274 # Rule to generate kernel.elf 276  # TODO the syscalls grouped in SYS_OBJS_2 must be introduced 277  build/kernel.elf: $(KERN_OBJS) \278  $(CORE_OBJS)\279  $(DEV_OBJS)\280  $(MM_OBJS)\281  $(LIBK_OBJS)\282  $(DRIVERS_OBJS)\283  $(VFS_OBJS)\284  $(SYS_OBJS_0)\285  $(SYS_OBJS_1)\286  $(SYS_OBJS_2)\287  $(SYS_OBJS_3)\288  $(SYS_OBJS_4)\289  290  $(LD) -o $@ -T $(HAL_ARCH)/kernel.ld $(LIBGCC) \291  $(KERN_OBJS) $( CORE_OBJS) $(DEV_OBJS) $(MM_OBJS)\292  $(LIBK_OBJS) $(DRIVERS_OBJS) $(VFS_OBJS) 293  $(SYS_OBJS_0) $(SYS_OBJS_1) $(SYS_OBJS_2)  275 build/kernel.elf: $(KERN_OBJS) \  276 $(HAL_OBJS_0) \  277 $(HAL_OBJS_1) \  278 $(DEV_OBJS) \  279 $(MM_OBJS) \  280 $(LIBK_OBJS) \  281 $(DRIVERS_OBJS) \  282 $(VFS_OBJS) \  283 $(SYS_OBJS_0) \  284 $(SYS_OBJS_1) \  285 $(SYS_OBJS_2) \  286 $(SYS_OBJS_3) \  287 $(SYS_OBJS_4) \  288 $(HAL_ARCH)/kernel.ld  289 $(LD) -o $@ -T $(HAL_ARCH)/kernel.ld $(LIBGCC) \  290 $(KERN_OBJS) $(HAL_OBJS) $(DEV_OBJS) $(MM_OBJS) \  291 $(LIBK_OBJS) $(DRIVERS_OBJS) $(VFS_OBJS) \  292 $(SYS_OBJS_0) $(SYS_OBJS_1) $(SYS_OBJS_2) \ 294 293 $(SYS_OBJS_3) $(SYS_OBJS_4) -lgcc 295 294 $(DU) -D $@ > $@.txt -
trunk/kernel/devices/dev_fbf.c
r438 r440  119 119 ////////////////////////////////////////////////////////////////////////////////// 120 120 // This static function is called by dev_fbf_read() & dev_fbf_write() functions. 121  // It builds and registers the command in the calling thread descriptor, after 122  // translation of buffer virtual address to physical address. 123  // Then, it registers the calling thead in the relevant DMA chdev waiting queue.  121 // It builds and registers the command in the calling thread descriptor.  122 // Then, it registers the calling thread in the relevant DMA chdev waiting queue. 124 123 // Finally it blocks on the THREAD_BLOCKED_DEV condition and deschedule. 125 124 ////////////////////////////////////i///////////////////////////////////////////// … …  129 128 uint32_t offset ) 130 129 { 131  error_t error;132  paddr_t buf_paddr;133  134  thread_t * this = CURRENT_THREAD; // pointer on client thread135  136  // Get buffer physical address137  error = vmm_v2p_translate( CONFIG_KERNEL_IDENTITY_MAP , buffer , &buf_paddr );138  139  // check buffer is mapped140  assert( (error == 0) , __FUNCTION__ ,141  "cannot translate vaddr = %p in process %x\n", buffer, this->process->pid );142 130 143 131 // get extended pointer on FBF chdev descriptor … …  160 148 161 149 // compute extended pointers on frame buffer and memory buffer 162  xptr_t mem_buf_xp = XPTR( local_cxy , (void *)(intptr_t)buf_paddr ); 150 xptr_t mem_buf_xp = XPTR( local_cxy , buffer ); 163 151 xptr_t fbf_buf_xp = base + offset; 164 152 -
trunk/kernel/devices/dev_ioc.c
r438 r440  91 91 // This static function is called by dev_ioc_read() & dev_ioc_write() functions. 92 92 // It builds and registers the command in the calling thread descriptor. 93  // Then, it registers the calling thead in chdev waiting queue. 93 // Then, it registers the calling thead in IOCchdev waiting queue. 94 94 // Finally it blocks on the THREAD_BLOCKED_IO condition and deschedule. 95 95 ////////////////////////////////////i///////////////////////////////////////////// -
trunk/kernel/devices/dev_mmc.c
r438 r440  57 57 58 58 ///////////////////////////////////////////////////////////////////////////// 59  // This static function is called by all MMC device functions. 59 // This static function is called by all MMC device access functions. 60 60 // It makes some checking, takes the lock granting exclusive 61 61 // access to MMC peripheral, call the driver to execute the command … …  71 71 // get MMC device cluster identifier & local pointer 72 72 cxy_t dev_cxy = GET_CXY( dev_xp ); 73  chdev_t * dev_ptr = (chdev_t *)GET_PTR( dev_xp ); 73 chdev_t * dev_ptr = GET_PTR( dev_xp ); 74 74 75 75 // get driver command function pointer from MMC device descriptor … …  97 97 98 98 // get calling thread local pointer 99  thread_t * this= CURRENT_THREAD; 99 thread_t * this = CURRENT_THREAD; 100 100 101 101 #if DEBUG_DEV_MMC … …  113 113 "buffer not aligned on cache line" ); 114 114 115  // get buffer physical address116  paddr_t buf_paddr;117  error = vmm_v2p_translate( CONFIG_KERNEL_IDENTITY_MAP , buf_ptr , &buf_paddr );118  119  assert( (error == 0) , __FUNCTION__ , "cannot get buffer paddr" );120  121 115 // store command arguments in thread descriptor 122 116 this->mmc_cmd.dev_xp = chdev_dir.mmc[buf_cxy]; 123 117 this->mmc_cmd.type = MMC_CC_INVAL; 124  this->mmc_cmd.buf_p addr = buf_paddr; 118 this->mmc_cmd.buf_ptr = buf_ptr; 125 119 this->mmc_cmd.buf_size = buf_size; 126 120 … …  144 138 error_t error; 145 139 146  // get calling thread local pointer 147  thread_t * this = CURRENT_THREAD;  140 thread_t * this = CURRENT_THREAD; 148 141 149 142 #if DEBUG_DEV_MMC … …  161 154 "buffer not aligned on cache line" ); 162 155 163  // get buffer physical address164  paddr_t buf_paddr;165  error = vmm_v2p_translate( CONFIG_KERNEL_IDENTITY_MAP , buf_ptr , &buf_paddr );166  167  assert( (error == 0) , __FUNCTION__ , "cannot get buffer paddr" );168  169 156 // store command arguments in thread descriptor 170 157 this->mmc_cmd.dev_xp = chdev_dir.mmc[buf_cxy]; 171 158 this->mmc_cmd.type = MMC_CC_SYNC; 172  this->mmc_cmd.buf_p addr = buf_paddr; 159 this->mmc_cmd.buf_ptr = buf_ptr; 173 160 this->mmc_cmd.buf_size = buf_size; 174 161 -
trunk/kernel/devices/dev_mmc.h
r437 r440 Â 83 83 xptr_t dev_xp; /*! extended pointer on target MMC device descriptor */ 84 84 uint32_t type; /*! CC_INVAL / CC_SYNC / GET_ERROR / SET_ERROR / GET_INSTRU */ 85 Â paddr_t buf_paddr; /*! physical address of memory buffer(used by INVAL/SYNC) */86 Â uint32_t buf_size; /*! buffer size in bytes(used by INVAL/SYNC) */Â 85 void * buf_ptr; /*! local pointer on memory buffer (used by INVAL/SYNC) */ Â 86 uint32_t buf_size; /*! memory buffer size (bytes) (used by INVAL/SYNC) */ 87 87 uint32_t reg_index; /*! register index in MMC peripheral (used by SET/GET) */ 88 88 uint32_t * reg_ptr; /*! local pointer on src/dst buffer (used by SET/GET) */ -
trunk/kernel/fs/fatfs.c
r438 r440  61 61 62 62 ////////////////////////////////////////////////////////////////////////////////////////// 63  // This function returns the LBA of the first sector of a FAT cluster. 63 // This static function returns the LBA of the first sector of a FAT cluster. 64 64 // This function can be called by any thread running in any cluster. 65 65 ////////////////////////////////////////////////////////////////////////////////////////// -
trunk/kernel/kern/chdev.c
r438 r440  124 124 { 125 125 thread_t * server_ptr; // local pointer on server thread associated to chdev  126 xptr_t server_xp; // extended pointer on server thread 126 127 core_t * core_ptr; // local pointer on core running the server thread 127 128 uint32_t lid; // core running the server thread local index … …  140 141 thread_t * this = CURRENT_THREAD; 141 142 142  // get device descriptorcluster and local pointer 143 // get chdev cluster and local pointer 143 144 cxy_t chdev_cxy = GET_CXY( chdev_xp ); 144  chdev_t * chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );  145 chdev_t * chdev_ptr = GET_PTR( chdev_xp );  146  147 // get local and extended pointers on server thread  148 server_ptr = (thread_t *)hal_remote_lpt( XPTR( chdev_cxy , &chdev_ptr->server) );  149 server_xp = XPTR( chdev_cxy , server_ptr );  150  151 // get local pointer on core running the server thread  152 core_ptr = (core_t *)hal_remote_lpt( XPTR( chdev_cxy , &server_ptr->core ) );  153  154 // get server core local index  155 lid = hal_remote_lw( XPTR( chdev_cxy , &core_ptr->lid ) ); 145 156 146 157 #if (DEBUG_CHDEV_CMD_RX || DEBUG_CHDEV_CMD_TX) … …  162 173 #endif 163 174 164  // build extended pointers on client thread xlist and device root 165  xptr_t list_xp = XPTR( local_cxy , &this->wait_list ); 166  xptr_t root_xp = XPTR( chdev_cxy , &chdev_ptr->wait_root ); 167  168  // get local pointer on server thread 169  server_ptr = (thread_t *)hal_remote_lpt( XPTR( chdev_cxy , &chdev_ptr->server) ); 170  171  // build extended pointer on chdev lock protecting queue  175 // build extended pointer on client thread xlist  176 xptr_t list_xp = XPTR( local_cxy , &this->wait_list );  177  178 // build extended pointer on chdev waiting queue root  179 xptr_t root_xp = XPTR( chdev_cxy , &chdev_ptr->wait_root );  180  181 // build extended pointer on server thread blocked state  182 xptr_t blocked_xp = XPTR( chdev_cxy , &server_ptr->blocked );  183  184 // build extended pointer on lock protecting chdev waiting queue 172 185 lock_xp = XPTR( chdev_cxy , &chdev_ptr->wait_lock ); 173 186 174  // get local pointer on core running the server thread 175  core_ptr = (core_t *)hal_remote_lpt( XPTR( chdev_cxy , &server_ptr->core ) ); 176  177  // get core local index 178  lid = hal_remote_lw( XPTR( chdev_cxy , &core_ptr->lid ) ); 179  180  // compute server core != thread core 181  different = (lid != this->core->lid) || (local_cxy != chdev_cxy); 182  183  // enter critical section to make atomic : 184  // (1) client blocking 185  // (2) client registration in server queue 186  // (3) IPI to force server scheduling 187  // (4) descheduling  187 // critical section for the following sequence:  188 // (1) take the lock protecting waiting queue  189 // (2) block the client thread  190 // (3) unblock the server thread if required  191 // (4) register client thread in server queue  192 // (5) send IPI to force server scheduling  193 // (6) release the lock protecting waiting queue  194 // (7) deschedule 188 195 // ... in this order  196  197 // enter critical section 189 198 hal_disable_irq( &save_sr );  199  200 // take the lock  201 remote_spinlock_lock( lock_xp ); 190 202 191 203 // block current thread 192 204 thread_block( XPTR( local_cxy , CURRENT_THREAD ) , THREAD_BLOCKED_IO ); 193 205  206 if( hal_remote_lw( blocked_xp ) & THREAD_BLOCKED_IDLE )  207 thread_unblock( server_xp , THREAD_BLOCKED_IDLE );  208 194 209 // register client thread in waiting queue 195  remote_spinlock_lock( lock_xp );196 210 xlist_add_last( root_xp , list_xp ); 197  remote_spinlock_unlock( lock_xp ); 198  199  // send IPI to core running the server thread if required 211  212 // send IPI to core running the server thread when server != client  213 different = (lid != this->core->lid) || (local_cxy != chdev_cxy); 200 214 if( different ) dev_pic_send_ipi( chdev_cxy , lid ); 201 215  216 // release lock  217 remote_spinlock_unlock( lock_xp );  218 202 219 // deschedule 203 220 assert( thread_can_yield( this ) , __FUNCTION__ , "illegal sched_yield\n" ); … …  260 277 remote_spinlock_unlock( lock_xp ); 261 278  279 // block  280 thread_block( XPTR( local_cxy , server ) , THREAD_BLOCKED_IDLE );  281 262 282 // deschedule  283 assert( thread_can_yield( server ) , __FUNCTION__ , "illegal sched_yield\n" ); 263 284 sched_yield("I/O queue empty"); 264 285 } 265 286 else // waiting queue not empty 266 287 {  288 // get extended pointer on first client thread  289 client_xp = XLIST_FIRST_ELEMENT( root_xp , thread_t , wait_list );  290  291 // get client thread cluster and local pointer  292 client_cxy = GET_CXY( client_xp );  293 client_ptr = GET_PTR( client_xp );  294  295 // remove this first client thread from waiting queue  296 xlist_unlink( XPTR( client_cxy , &client_ptr->wait_list ) );  297 267 298 // release lock 268 299 remote_spinlock_unlock( lock_xp ); 269  270  // get extended pointer on first client thread271  client_xp = XLIST_FIRST_ELEMENT( root_xp , thread_t , wait_list );272  273  // get client thread cluster, local pointer, and identifier274  client_cxy = GET_CXY( client_xp );275  client_ptr = (thread_t *)GET_PTR( client_xp );276 300 277 301 #if DEBUG_CHDEV_SERVER_RX … …  300 324 chdev->cmd( client_xp ); 301 325 302  // remove the client thread from waiting queue303  remote_spinlock_lock( lock_xp );304  xlist_unlink( XPTR( client_cxy , &client_ptr->wait_list ) );305  remote_spinlock_unlock( lock_xp );306  307 326 // unblock client thread 308 327 thread_unblock( client_xp , THREAD_BLOCKED_IO ); … …  343 362 chdev_t * chdev_ptr; 344 363  364 assert( (file_xp != XPTR_NULL) , __FUNCTION__,  365 "file_xp == XPTR_NULL\n" );  366 345 367 // get cluster and local pointer on remote file descriptor 346 368 // associated inode and chdev are stored in same cluster as the file desc. … …  353 375 354 376 assert( (inode_type == INODE_TYPE_DEV) , __FUNCTION__ , 355  "inode type %d is not INODE_TYPE_DEV ", inode_type ); 377 "inode type %d is not INODE_TYPE_DEV\n", inode_type ); 356 378 357 379 // get chdev local pointer from inode extension -
trunk/kernel/kern/chdev.h
r428 r440  42 42 * independant) Channel Device descriptor (in brief "chdev"). 43 43 * ALMOS-MKH supports multi-channels peripherals, and defines one separated chdev 44  * descriptor for each channel (and for each RX/TX direction for the NIC device). 44 * descriptor for each channel (and for each RX/TX direction for the NIC and TXT devices). 45 45 * Each chdev contains a waiting queue, registering the "client threads" requests, 46 46 * and an associated "server thread", handling these requests. -
trunk/kernel/kern/cluster.c
r438 r440  153 153 #endif 154 154 155  // initialises RPC fifo 156  local_fifo_init( &cluster->rpc_fifo ); 157  cluster->rpc_threads = 0;  155 // initialises RPC FIFOs  156 for( lid = 0 ; lid < cluster->cores_nr; lid++ )  157 {  158 local_fifo_init( &cluster->rpc_fifo[lid] );  159 cluster->rpc_threads[lid] = 0;  160 } 158 161 159 162 #if( DEBUG_CLUSTER_INIT & 1 ) … …  221 224 lid_t cluster_select_local_core() 222 225 { 223  uint32_t min = 100; 224  lid_t sel = 0; 225  lid_t lid;  226 uint32_t min = 1000;  227 lid_t sel = 0;  228 uint32_t nthreads;  229 lid_t lid;  230 scheduler_t * sched; 226 231 227 232 cluster_t * cluster = LOCAL_CLUSTER; … …  229 234 for( lid = 0 ; lid < cluster->cores_nr ; lid++ ) 230 235 { 231  if( cluster->core_tbl[lid].usage < min )  236 sched = &cluster->core_tbl[lid].scheduler;  237 nthreads = sched->u_threads_nr + sched->k_threads_nr;  238  239 if( nthreads < min ) 232 240 { 233  min = cluster->core_tbl[lid].usage; 241 min = nthreads; 234 242 sel = lid; 235 243 } … …  323 331 bool_t found; 324 332  333 #if DEBUG_CLUSTER_PID_ALLOC  334 uint32_t cycle = (uint32_t)hal_get_cycles();  335 if( DEBUG_CLUSTER_PID_ALLOC < cycle )  336 printk("\n[DBG] %s : thread %x enters in cluster %x / cycle %d\n",  337 __FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );  338 #endif  339 325 340 pmgr_t * pm = &LOCAL_CLUSTER->pmgr; 326 341 … …  361 376 } 362 377  378 #if DEBUG_CLUSTER_PID_ALLOC  379 cycle = (uint32_t)hal_get_cycles();  380 if( DEBUG_CLUSTER_PID_ALLOC < cycle )  381 printk("\n[DBG] %s : thread %x exit in cluster %x / pid %x / cycle %d\n",  382 __FUNCTION__ , CURRENT_THREAD , local_cxy , *pid , cycle );  383 #endif  384 363 385 } // end cluster_pid_alloc() 364 386 … …  366 388 void cluster_pid_release( pid_t pid ) 367 389 {  390  391 #if DEBUG_CLUSTER_PID_RELEASE  392 uint32_t cycle = (uint32_t)hal_get_cycles();  393 if( DEBUG_CLUSTER_PID_RELEASE < cycle )  394 printk("\n[DBG] %s : thread %x enters in cluster %x / pid %x / cycle %d\n",  395 __FUNCTION__ , CURRENT_THREAD , local_cxy , pid , cycle );  396 #endif  397 368 398 cxy_t owner_cxy = CXY_FROM_PID( pid ); 369 399 lpid_t lpid = LPID_FROM_PID( pid ); … …  371 401 pmgr_t * pm = &LOCAL_CLUSTER->pmgr; 372 402 373  // check pid argument 374  assert( (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER) && (owner_cxy == local_cxy) , 375  __FUNCTION__ , "illegal PID" );  403 // check lpid  404 assert( (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER), __FUNCTION__ ,  405 "illegal LPID = %d" , lpid );  406  407 // check owner cluster  408 assert( (owner_cxy == local_cxy) , __FUNCTION__ ,  409 "local_cluster %x != owner_cluster %x" , local_cxy , owner_cxy ); 376 410 377 411 // get the process manager lock … …  384 418 // release the processs_manager lock 385 419 spinlock_unlock( &pm->pref_lock );  420  421 #if DEBUG_CLUSTER_PID_RELEASE  422 cycle = (uint32_t)hal_get_cycles();  423 if( DEBUG_CLUSTER_PID_RELEASE < cycle )  424 printk("\n[DBG] %s : thread %x exit in cluster %x / cycle %d\n",  425 __FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );  426 #endif 386 427 387 428 } // end cluster_pid_release() -
trunk/kernel/kern/cluster.h
r438 r440 Â 96 96 typedef struct cluster_s 97 97 { 98 Â spinlock_t kcm_lock; /*! local, protect creation of KCM allocators*/Â 98 spinlock_t kcm_lock; /*! local, protect creation of KCM allocators */ 99 99 100 100 // global parameters 101 Â uint32_t paddr_width; /*! numer of bits in physical address*/102 Â uint32_t x_width; /*! number of bits to code x_size (can be 0)*/103 Â uint32_t y_width; /*! number of bits to code y_size (can be 0)*/104 Â uint32_t x_size; /*! number of clusters in a row (can be 1)*/105 Â uint32_t y_size; /*! number of clusters in a column (can be 1)*/106 Â cxy_t io_cxy; /*! io cluster identifier*/107 Â uint32_t dqdt_root_level; /*! index of root node in dqdt_tbl[]*/108 Â uint32_t nb_txt_channels; /*! number of TXT channels*/109 Â uint32_t nb_nic_channels; /*! number of NIC channels*/110 Â uint32_t nb_ioc_channels; /*! number of IOC channels*/111 Â uint32_t nb_fbf_channels; /*! number of FBF channels*/Â 101 uint32_t paddr_width; /*! numer of bits in physical address */ Â 102 uint32_t x_width; /*! number of bits to code x_size (can be 0) */ Â 103 uint32_t y_width; /*! number of bits to code y_size (can be 0) */ Â 104 uint32_t x_size; /*! number of clusters in a row (can be 1) */ Â 105 uint32_t y_size; /*! number of clusters in a column (can be 1) */ Â 106 cxy_t io_cxy; /*! io cluster identifier */ Â 107 uint32_t dqdt_root_level; /*! index of root node in dqdt_tbl[] */ Â 108 uint32_t nb_txt_channels; /*! number of TXT channels */ Â 109 uint32_t nb_nic_channels; /*! number of NIC channels */ Â 110 uint32_t nb_ioc_channels; /*! number of IOC channels */ Â 111 uint32_t nb_fbf_channels; /*! number of FBF channels */ 112 112 113 113 // local parameters 114 Â uint32_t cores_nr; /*! actual number of cores in cluster*/115 Â uint32_t ram_size; /*! physical memory size*/116 Â uint32_t ram_base; /*! physical memory base (local address)*/117 Â 118 Â core_t core_tbl[CONFIG_MAX_LOCAL_CORES]; /*! embedded cores*/119 Â 120 Â list_entry_t dev_root; /*! root of list of devices in cluster*/Â 114 uint32_t cores_nr; /*! actual number of cores in cluster */ Â 115 uint32_t ram_size; /*! physical memory size */ Â 116 uint32_t ram_base; /*! physical memory base (local address) */ Â 117 Â 118 core_t core_tbl[CONFIG_MAX_LOCAL_CORES]; /*! embedded cores */ Â 119 Â 120 list_entry_t dev_root; /*! root of list of devices in cluster */ 121 121 122 122 // memory allocators 123 Â ppm_t ppm; /*! embedded kernel page manager*/124 Â khm_t khm; /*! embedded kernel heap manager*/125 Â kcm_t kcm; /*! embedded kernel cache manager (for KCMs)*/126 Â 127 Â kcm_t * kcm_tbl[KMEM_TYPES_NR]; /*! pointers on allocated KCMs*/Â 123 ppm_t ppm; /*! embedded kernel page manager */ Â 124 khm_t khm; /*! embedded kernel heap manager */ Â 125 kcm_t kcm; /*! embedded kernel KCMs manager */ Â 126 Â 127 kcm_t * kcm_tbl[KMEM_TYPES_NR]; /*! pointers on allocated KCMs */ 128 128 129 129 // RPC 130 Â remote_fifo_t rpc_fifo; /*! RPC fifo (one per cluster)*/131 Â uint32_t rpc_threads; /*! current number of RPC threads in cluster*/Â 130 remote_fifo_t rpc_fifo[CONFIG_MAX_LOCAL_CORES]; /*! one RPC FIFO per core */ Â 131 uint32_t rpc_threads[CONFIG_MAX_LOCAL_CORES]; /*! RPC threads per core */ 132 132 133 133 // DQDT 134 Â dqdt_node_t dqdt_tbl[CONFIG_DQDT_LEVELS_NR]; /*! embedded DQDT nodes in cluster*/Â 134 dqdt_node_t dqdt_tbl[CONFIG_DQDT_LEVELS_NR]; /*! embedded DQDT nodes */ 135 135 136 136 // Local process manager 137 Â pmgr_t pmgr;/*! embedded process manager */138 Â 139 Â void * pic_extend;/*! PIC implementation specific extension */Â 137 pmgr_t pmgr; /*! embedded process manager */ Â 138 Â 139 void * pic_extend; /*! PIC implementation specific extension */ 140 140 } 141 141 cluster_t; -
trunk/kernel/kern/kernel_init.c
r438 r440  1238 1238 dev_pic_enable_timer( CONFIG_SCHED_TICK_MS_PERIOD ); 1239 1239  1240 #if DEBUG_KERNEL_INIT  1241 printk("\n[DBG] %s : thread %x on core[%x,%d] jumps to thread_idle_func() / cycle %d\n",  1242 __FUNCTION__ , CURRENT_THREAD , local_cxy , core_lid , (uint32_t)hal_get_cycles() );  1243 #endif  1244 1240 1245 // each core jump to thread_idle_func 1241 1246 thread_idle_func(); -
trunk/kernel/kern/process.c
r438 r440  106 106 char rx_path[40]; 107 107 char tx_path[40];  108 xptr_t file_xp; 108 109 xptr_t chdev_xp; 109 110 chdev_t * chdev_ptr; … …  179 180 assert( (stdin_id == 0) , __FUNCTION__ , "stdin index must be 0" ); 180 181  182 #if (DEBUG_PROCESS_REFERENCE_INIT & 1)  183 cycle = (uint32_t)hal_get_cycles();  184 if( DEBUG_PROCESS_REFERENCE_INIT )  185 printk("\n[DBG] %s : thread %x / stdin open for process %x / cycle %d\n",  186 __FUNCTION__ , CURRENT_THREAD , pid , cycle );  187 #endif  188 181 189 // create stdout pseudo file 182 190 error = vfs_open( process, … …  190 198 assert( (stdout_id == 1) , __FUNCTION__ , "stdout index must be 1" ); 191 199  200 #if (DEBUG_PROCESS_REFERENCE_INIT & 1)  201 cycle = (uint32_t)hal_get_cycles();  202 if( DEBUG_PROCESS_REFERENCE_INIT )  203 printk("\n[DBG] %s : thread %x / stdout open for process %x / cycle %d\n",  204 __FUNCTION__ , CURRENT_THREAD , pid , cycle );  205 #endif  206 192 207 // create stderr pseudo file 193 208 error = vfs_open( process, … …  201 216 assert( (stderr_id == 2) , __FUNCTION__ , "stderr index must be 2" ); 202 217  218 #if (DEBUG_PROCESS_REFERENCE_INIT & 1)  219 cycle = (uint32_t)hal_get_cycles();  220 if( DEBUG_PROCESS_REFERENCE_INIT )  221 printk("\n[DBG] %s : thread %x / stderr open for process %x / cycle %d\n",  222 __FUNCTION__ , CURRENT_THREAD , pid , cycle );  223 #endif  224 203 225 } 204 226 else // normal user process 205 227 {  228 // get extended pointer on stdin pseudo file in model process  229 file_xp = (xptr_t)hal_remote_lwd( XPTR( model_cxy , &model_ptr->fd_array.array[0] ) );  230 206 231 // get extended pointer on model process TXT chdev 207  chdev_xp = chdev_from_file( model_ptr->fd_array.array[0]); 232 chdev_xp = chdev_from_file( file_xp ); 208 233 209 234 // get cluster and local pointer on chdev … …  374 399 uint32_t cycle = (uint32_t)hal_get_cycles(); 375 400 if( DEBUG_PROCESS_DESTROY ) 376  printk("\n[DBG] %s : thread %x enter to destroy process %x (pid = %x)/ cycle %d\n",377  __FUNCTION__ , CURRENT_THREAD , p rocess, pid, cycle ); 401 printk("\n[DBG] %s : thread %x enter in cluster %x / pid %x / process %x / cycle %d\n",  402 __FUNCTION__ , CURRENT_THREAD , pid , process , cycle ); 378 403 #endif 379 404 … …  401 426 } 402 427 403  // release the process PID to cluster manager 404  cluster_pid_release( pid ); 428 // release the process PID to cluster manager if owner cluster  429 if( CXY_FROM_PID( pid ) == local_cxy ) cluster_pid_release( pid ); 405 430 406 431 // FIXME close all open files and update dirty [AG] … …  507 532 XLIST_FOREACH( root_xp , iter_xp ) 508 533 {  534 // atomically increment responses counter  535 hal_atomic_add( (void *)&rpc.responses , 1 );  536  537 process_xp = XLIST_ELEMENT( iter_xp , process_t , copies_list );  538 process_cxy = GET_CXY( process_xp ); 509 539 510 540 #if DEBUG_PROCESS_SIGACTION … …  513 543 __FUNCTION__ , process_action_str( action_type ) , pid , process_cxy ); 514 544 #endif 515  // atomically increment responses counter516  hal_atomic_add( (void *)&rpc.responses , 1 );517  518  process_xp = XLIST_ELEMENT( iter_xp , process_t , copies_list );519  process_cxy = GET_CXY( process_xp );520  521 545 // call RPC in target cluster 522 546 rpc_process_sigaction_client( process_cxy , &rpc ); … …  529 553 hal_restore_irq( save_sr); 530 554 531  // client deschedule : will be unblocked by the last RPC server thread 555 // client thread deschedule : will be unblocked by the last RPC server thread 532 556 sched_yield("blocked on rpc_process_sigaction"); 533 557 … …  542 566 543 567 ///////////////////////////////////////////////// 544  void process_block_threads( process_t * process )  568 void process_block_threads( process_t * process,  569 xptr_t client_xp ) 545 570 { 546 571 thread_t * target; // pointer on target thread … …  567 592 spinlock_lock( &process->th_lock ); 568 593 569  // loop to block all threads but the main thread 594 // loop on target process local threads 570 595 // we use both "ltid" and "count" because it can exist "holes" in th_tbl 571 596 for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ ) … …  577 602 count++; 578 603 579  // main thread should not be deleted 580  if( (ltid != 0) || (owner_cxy != local_cxy) )  604 // main thread and client thread should not be blocked  605 if( ((ltid != 0) || (owner_cxy != local_cxy)) && // not main thread  606 (client_xp) != XPTR( local_cxy , target ) ) // not client thread 581 607 { 582 608 // set the global blocked bit in target thread descriptor. … …  626 652 } // end process_block_threads() 627 653 628  ///////////////////////////////////////////////// //629  void process_ unblock_threads( process_t * process )630  { 631  thread_t * target; // pointer on target thead  654 /////////////////////////////////////////////////  655 void process_delete_threads( process_t * process,  656 xptr_t client_xp )  657 { 632 658 thread_t * this; // pointer on calling thread  659 thread_t * target; // local pointer on target thread  660 xptr_t target_xp; // extended pointer on target thread  661 cxy_t owner_cxy; // owner process cluster 633 662 uint32_t ltid; // index in process th_tbl 634  uint32_t count; // requests counter 663 uint32_t count; // threads counter 635 664 636 665 // get calling thread pointer 637 666 this = CURRENT_THREAD;  667  668 // get target process owner cluster  669 owner_cxy = CXY_FROM_PID( process->pid ); 638 670 639 671 #if DEBUG_PROCESS_SIGACTION … …  647 679 spinlock_lock( &process->th_lock ); 648 680 649  // loop on process threads to unblock all threads 681 // loop on target process local threads 650 682 // we use both "ltid" and "count" because it can exist "holes" in th_tbl 651  for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ ) 683 for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ ) 652 684 { 653 685 target = process->th_tbl[ltid]; 654 686 655  if( target != NULL ) // thread found 687 if( target != NULL ) // valid thread 656 688 { 657 689 count++; 658  659  // reset the global blocked bit in target thread descriptor. 660  thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );  690 target_xp = XPTR( local_cxy , target );  691  692 // main thread and client thread should not be blocked  693 if( ((ltid != 0) || (owner_cxy != local_cxy)) && // not main thread  694 (client_xp) != target_xp ) // not client thread  695 {  696 // mark target thread for delete and block it  697 thread_delete( target_xp , process->pid , false ); // not forced  698 } 661 699 } 662 700 } … …  672 710 #endif 673 711 674  } // end process_unblock_threads() 675  676  ////////////////////////////////////////////////// 677  void process_delete_threads( process_t * process ) 678  { 679  thread_t * target; // pointer on target thread  712 } // end process_delete_threads()  713  714 ///////////////////////////////////////////////////  715 void process_unblock_threads( process_t * process )  716 {  717 thread_t * target; // pointer on target thead  718 thread_t * this; // pointer on calling thread 680 719 uint32_t ltid; // index in process th_tbl 681  uint32_t count; // threads counter  720 uint32_t count; // requests counter  721  722 // get calling thread pointer  723 this = CURRENT_THREAD; 682 724 683 725 #if DEBUG_PROCESS_SIGACTION … …  685 727 if( DEBUG_PROCESS_SIGACTION < cycle ) 686 728 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n", 687  __FUNCTION__ , CURRENT_THREAD, process->pid , local_cxy , cycle ); 729 __FUNCTION__ , this , process->pid , local_cxy , cycle ); 688 730 #endif 689 731 … …  691 733 spinlock_lock( &process->th_lock ); 692 734 693  // loop to set the REQ_DELETE flag on all threads but the main 735 // loop on process threads to unblock all threads 694 736 // we use both "ltid" and "count" because it can exist "holes" in th_tbl 695  for( ltid = 0 , count = 0  737 for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ ) 696 738 { 697 739 target = process->th_tbl[ltid]; 698 740 699  if( target != NULL )  741 if( target != NULL ) // thread found 700 742 { 701 743 count++; 702  703  thread_kill( XPTR( local_cxy , target ), 704  false, // is_exit 705  true ); // is_forced  744  745 // reset the global blocked bit in target thread descriptor.  746 thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL ); 706 747 } 707 748 } … …  714 755 if( DEBUG_PROCESS_SIGACTION < cycle ) 715 756 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n", 716  __FUNCTION__ , CURRENT_THREAD, process->pid , local_cxy , cycle );717  #endif 718  719  } // end process_ delete_threads() 757 __FUNCTION__ , this , process->pid , local_cxy , cycle );  758 #endif  759  760 } // end process_unblock_threads() 720 761 721 762 /////////////////////////////////////////////// … …  749 790 750 791 // allocate memory for a new local process descriptor 751  // and initialise it from reference cluster if required 792 // and initialise it from reference cluster if not found 752 793 if( !found ) 753 794 { … …  765 806 if( error ) return NULL; 766 807 }  808  809 #if DEBUG_PROCESS_GET_LOCAL_COPY  810 uint32_t cycle = (uint32_t)hal_get_cycles();  811 if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )  812 printk("\n[DBG] %s : enter in cluster %x / pid %x / process %x / cycle %d\n",  813 __FUNCTION__ , local_cxy , pid , process_ptr , cycle );  814 #endif 767 815 768 816 return process_ptr; … …  1032 1080 // check parent process is the reference process 1033 1081 ref_xp = hal_remote_lwd( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) ); 1034  1035  printk("\n@@@ %s : parent_cxy = %x / parent_ptr = %x / ref_cxy = %x / ref_ptr = %x\n",1036  __FUNCTION__, parent_process_cxy, parent_process_ptr, GET_CXY( ref_xp ), GET_PTR( ref_xp ) );1037 1082 1038 1083 assert( (parent_process_xp == ref_xp ) , __FUNCTION__ , -
trunk/kernel/kern/process.h
r436 r440  101 101 * 4) The <sem_root>, <mutex_root>, <barrier_root>, <condvar_root>, and the associated 102 102 * <sync_lock>, that are dynamically allocated, are only defined in the reference cluster. 103  * 5) The <children_root>, <children_nr>, < brothers_list>, and <txt_list> fields are only 103 * 5) The <children_root>, <children_nr>, <children_list>, and <txt_list> fields are only 104 104 * defined in the reference cluster, and are undefined in other clusters. 105 105 * 6) The <local_list>, <copies_list>, <th_tbl>, <th_nr>, <th_lock> fields 106 106 * are defined in all process descriptors copies. 107 107 * 7) The termination <flags> and <exit_status> are only defined in the reference cluster.  108 * The term state format is defined in the shared_syscalls.h file. 108 109 ********************************************************************************************/ 109 110 … …  282 283 * all threads of a process identified by the <pid> argument, depending on the 283 284 * <action_type> argument. 284  * WARNING : the DELETE a ction isNOT executed on the target process main thread285  * (thread 0 in process owner cluster) . 285 * WARNING : the DELETE and BLOCK actions are NOT executed on the target process main thread  286 * (thread 0 in process owner cluster), and not executed on the calling thread itself. 286 287 * It uses the multicast, non blocking rpc_process_sigaction_client() function to send 287  * parallel requests to all remote clusters containing a process copy. 288 * parallel requests to all remote clusters containing process copies. 288 289 * Then it blocks and deschedule to wait completion of these parallel requests. 289 290 * … …  305 306 306 307 /********************************************************************************************* 307  * This function blocks all threads - but the main thread - for a given <process> 308  * in a given cluster. It sets the THREAD_BLOCKED_GLOBAL bit in the thread descriptor, 309  * and request the relevant schedulers to acknowledge the blocking, using IPI if required.  308 * This function blocks all threads for a given <process> in the local cluster.  309 * It scan the list of local thread, and sets the THREAD_BLOCKED_GLOBAL bit for all  310 * threads, BUT the main thread (thread 0 in owner cluster), and the client thread  311 * identified by the <client_xp> argument. It request the relevant schedulers to acknowledge  312 * the blocking, using IPI if required, and returns only when all blockable threads  313 * in cluster are actually blocked. 310 314 * The threads are not detached from the scheduler, and not detached from the local process. 311  * This function returns only when all blockable threads in cluster are actually blocked.312 315 ********************************************************************************************* 313 316 * @ process : pointer on the target process descriptor. 314  ********************************************************************************************/ 315  void process_block_threads( process_t * process );  317 * @ client_xp : extended pointer on the client thread that should not be blocked.  318 ********************************************************************************************/  319 void process_block_threads( process_t * process,  320 xptr_t client_xp );  321  322 /*********************************************************************************************  323 * This function marks for deletion all threads for a given <process> in the local cluster.  324 * It scan the list of local thread, and sets the THREAD_FLAG_REQ_DELETE bit for all  325 * threads, BUT the main thread (thread 0 in owner cluster), and the client thread  326 * identified by the <client_xp> argument.  327 * The actual deletion will be done by the scheduler at the next scheduling point.  328 *********************************************************************************************  329 * @ process : pointer on the process descriptor.  330 * @ client_xp : extended pointer on the client thread that should not be marked.  331 ********************************************************************************************/  332 void process_delete_threads( process_t * process,  333 xptr_t client_xp ); 316 334 317 335 /********************************************************************************************* … …  321 339 ********************************************************************************************/ 322 340 void process_unblock_threads( process_t * process ); 323  324  /*********************************************************************************************325  * This function marks for deletion all threads - but the main thread - for a given <process>326  * in a given cluster. It sets the THREAD_FLAG_REQ_DELETE bit. For each marked thread,327  * the following actions will be done by the scheduler at the next scheduling point:328  * - the thread will be detached from the scheduler.329  * - the thread will be detached from the local process descriptor.330  * - the thread will be detached from parent if required.331  * - the memory allocated to the thread descriptor is released.332  * - the memory allocated to the process descriptor is released, if it is the last thread.333  *********************************************************************************************334  * @ process : pointer on the process descriptor.335  ********************************************************************************************/336  void process_delete_threads( process_t * process );337 341 338 342 /********************************************************************************************* … …  398 402 struct thread_s ** child_thread_ptr ); 399 403 400  401 404 /******************** File Management Operations ****************************************/ 402 405 -
trunk/kernel/kern/rpc.c
r438 r440  114 114 client_core_lid = this->core->lid; 115 115 116  // select a server_core index: 117  // use client core index if possible / core 0 otherwise  116 // select a server_core : use client core index if possible / core 0 otherwise 118 117 if( client_core_lid < hal_remote_lw( XPTR( server_cxy , &cluster->cores_nr ) ) ) 119 118 { … …  133 132 134 133 // get local pointer on rpc_fifo in remote cluster, 135  remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo ; 134 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[server_core_lid]; 136 135 137 136 // post RPC in remote fifo / deschedule and retry if fifo full … …  231 230 core_t * core = this->core; 232 231 scheduler_t * sched = &core->scheduler; 233  remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo ; 232 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[core->lid]; 234 233 235 234 #if DEBUG_RPC_SERVER_GENERIC … …  243 242 hal_disable_irq( &sr_save ); 244 243 245  // activate (or create) RPC thread if RPC FIFO not empty  244 // activate (or create) RPC thread if RPC FIFO not empty and no acive RPC thread 246 245 if( (rpc_fifo->owner == 0) && (local_fifo_is_empty(rpc_fifo) == false) ) 247 246 { … …  254 253 #endif 255 254 256  // search one IDLE RPC thread  255 // search one IDLE RPC thread associated to the selected core 257 256 list_entry_t * iter; 258 257 LIST_FOREACH( &sched->k_root , iter ) … …  270 269 } 271 270 272  // create new RPC thread if not found 271 // create new RPC thread for the selected core if not found 273 272 if( found == false ) 274 273 { … …  277 276 &rpc_thread_func, 278 277 NULL, 279  this->core->lid ); 280  if( error ) 281  { 282  assert( false , __FUNCTION__ , 283  "no memory to allocate a new RPC thread in cluster %x", local_cxy ); 284  }  278 core->lid );  279  280 assert( (error == 0), __FUNCTION__ ,  281 "no memory to allocate a new RPC thread in cluster %x", local_cxy ); 285 282 286 283 // unblock created RPC thread 287 284 thread->blocked = 0; 288 285 289  // update core descriptorcounter290  hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 ); 286 // update RRPC threads counter  287 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads[core->lid] , 1 ); 291 288 292 289 #if DEBUG_RPC_SERVER_GENERIC … …  325 322 void rpc_thread_func() 326 323 { 327  uint32_t count; // handled RPC requests counter 328  error_t empty; // local RPC fifo state 329  xptr_t desc_xp; // extended pointer on RPC request 330  cxy_t desc_cxy; // RPC request cluster (client) 331  rpc_desc_t * desc_ptr; // RPC request local pointer 332  uint32_t index; // RPC request index 333  thread_t * thread_ptr; // local pointer on client thread 334  lid_t core_lid; // local index of client core 335  bool_t blocking; // blocking RPC when true  324 error_t empty; // local RPC fifo state  325 xptr_t desc_xp; // extended pointer on RPC request  326 cxy_t desc_cxy; // RPC request cluster (client)  327 rpc_desc_t * desc_ptr; // RPC request local pointer  328 uint32_t index; // RPC request index  329 thread_t * client_ptr; // local pointer on client thread  330 thread_t * server_ptr; // local pointer on server thread  331 xptr_t server_xp; // extended pointer on server thread  332 lid_t client_core_lid; // local index of client core  333 lid_t server_core_lid; // local index of server core  334 bool_t blocking; // blocking RPC when true  335 remote_fifo_t * rpc_fifo; // local pointer on RPC fifo 336 336 337 337 // makes RPC thread not preemptable 338 338 hal_disable_irq( NULL ); 339 339 340  thread_t * this = CURRENT_THREAD; 341  remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;  340 server_ptr = CURRENT_THREAD;  341 server_xp = XPTR( local_cxy , server_ptr );  342 server_core_lid = server_ptr->core->lid;  343 rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[server_core_lid]; 342 344 343 345 // two embedded loops: 344 346 // - external loop : "infinite" RPC thread 345  // - internal loop : handle up to CONFIG_RPC_PENDING_MAX RPC requests 347 // - internal loop : handle one RPC request per iteration 346 348 347 349 while(1) // infinite loop 348 350 { 349 351 // try to take RPC_FIFO ownership 350  if( hal_atomic_test_set( &rpc_fifo->owner , this->trdid ) ) 352 if( hal_atomic_test_set( &rpc_fifo->owner , server_ptr->trdid ) ) 351 353 { 352 354 … …  355 357 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 356 358 printk("\n[DBG] %s : RPC thread %x in cluster %x takes RPC fifo ownership / cycle %d\n", 357  __FUNCTION__, this, local_cxy, cycle ); 358  #endif 359  // initializes RPC requests counter 360  count = 0; 361  362  // exit internal loop in three cases: 363  // - RPC fifo is empty 364  // - ownership has been lost (because descheduling) 365  // - max number of RPCs is reached 366  while( 1 ) // internal loop  359 __FUNCTION__, server_ptr, local_cxy, cycle );  360 #endif  361 while( 1 ) // one RPC request per iteration 367 362 { 368 363 empty = local_fifo_get_item( rpc_fifo , (uint64_t *)&desc_xp ); 369 364 370  if ( empty == 0 ) // one RPC request found  365 // exit when FIFO empty or FIFO ownership lost (in case of descheduling)  366 if ( (empty == 0) && (rpc_fifo->owner == server_ptr->trdid) ) 371 367 { 372 368 // get client cluster and pointer on RPC descriptor … …  381 377 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 382 378 printk("\n[DBG] %s : RPC thread %x in cluster %x got rpc[%d] / rpc_cxy %x / rpc_ptr %x\n", 383  __FUNCTION__, this, local_cxy, index, desc_cxy, desc_ptr ); 379 __FUNCTION__, server_ptr, local_cxy, index, desc_cxy, desc_ptr ); 384 380 #endif 385 381 // call the relevant server function … …  390 386 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 391 387 printk("\n[DBG] %s : RPC thread %x in cluster %x completes rpc[%d] / rpc_ptr %x / cycle %d\n", 392  __FUNCTION__, this, local_cxy, index, desc_ptr, cycle ); 393  #endif 394  // increment handled RPCs counter 395  count++; 396   388 __FUNCTION__, server_ptr, local_cxy, index, desc_ptr, cycle );  389 #endif 397 390 // decrement response counter in RPC descriptor if blocking 398 391 if( blocking ) … …  402 395 403 396 // get client thread pointer and client core lid from RPC descriptor 404  thread_ptr= hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) );405  c ore_lid= hal_remote_lw ( XPTR( desc_cxy , &desc_ptr->lid ) ); 397 client_ptr = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) );  398 client_core_lid = hal_remote_lw ( XPTR( desc_cxy , &desc_ptr->lid ) ); 406 399 407 400 // unblock client thread 408  thread_unblock( XPTR( desc_cxy , thread_ptr ) , THREAD_BLOCKED_RPC ); 401 thread_unblock( XPTR( desc_cxy , client_ptr ) , THREAD_BLOCKED_RPC ); 409 402 410 403 hal_fence(); … …  414 407 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 415 408 printk("\n[DBG] %s : RPC thread %x (cluster %x) unblocked client thread %x (cluster %x)\n", 416  __FUNCTION__, this, local_cxy, thread_ptr, desc_cxy, cycle ); 409 __FUNCTION__, server_ptr, local_cxy, client_ptr, desc_cxy, cycle ); 417 410 #endif 418 411 // send IPI to client core 419  dev_pic_send_ipi( desc_cxy , c ore_lid ); 412 dev_pic_send_ipi( desc_cxy , client_core_lid ); 420 413 } 421 414 } 422  423  // chek exit condition 424  if( local_fifo_is_empty( rpc_fifo ) || 425  (rpc_fifo->owner != this->trdid) || 426  (count >= CONFIG_RPC_PENDING_MAX) ) break;  415 else  416 {  417 break;  418 } 427 419 } // end internal loop 428 420 429 421 // release rpc_fifo ownership if not lost 430  if( rpc_fifo->owner == this->trdid ) rpc_fifo->owner = 0; 422 if( rpc_fifo->owner == server_ptr->trdid ) rpc_fifo->owner = 0; 431 423 432 424 } // end if RPC fifo 433 425 434  // sucide if too many RPC threads in cluster 435  if( LOCAL_CLUSTER->rpc_threads >= CONFIG_RPC_THREADS_MAX )  426 // RPC thread blocks on IDLE  427 thread_block( server_xp , THREAD_BLOCKED_IDLE );  428  429 // sucide if too many RPC threads / simply deschedule otherwise  430 if( LOCAL_CLUSTER->rpc_threads[server_core_lid] >= CONFIG_RPC_THREADS_MAX ) 436 431 { 437 432 … …  440 435 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 441 436 printk("\n[DBG] %s : RPC thread %x in cluster %x suicides / cycle %d\n", 442  __FUNCTION__, this, local_cxy, cycle ); 437 __FUNCTION__, server_ptr, local_cxy, cycle ); 443 438 #endif 444 439 // update RPC threads counter 445 440 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , -1 ); 446 441 447  // suicide 448  thread_kill( XPTR( local_cxy , this ), 449  true, // is_exit 450  true ); // is forced  442 // RPC thread blocks on GLOBAL  443 thread_block( server_xp , THREAD_BLOCKED_GLOBAL );  444  445 // RPC thread set the REQ_DELETE flag to suicide  446 hal_remote_atomic_or( server_xp , THREAD_FLAG_REQ_DELETE ); 451 447 }  448 else  449 { 452 450 453 451 #if DEBUG_RPC_SERVER_GENERIC 454 452 uint32_t cycle = (uint32_t)hal_get_cycles(); 455 453 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 456  printk("\n[DBG] %s : RPC thread %x in cluster %x deschedules / cycle %d\n", 457  __FUNCTION__, this, local_cxy, cycle ); 458  #endif 459  460  // Block and deschedule 461  thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_IDLE ); 462  sched_yield("RPC fifo empty or too much work"); 463  464  #if DEBUG_RPC_SERVER_GENERIC 465  cycle = (uint32_t)hal_get_cycles(); 466  if( DEBUG_RPC_SERVER_GENERIC < cycle ) 467  printk("\n[DBG] %s : RPC thread %x in cluster %x resumes / cycle %d\n", 468  __FUNCTION__, this, local_cxy, cycle ); 469  #endif  454 printk("\n[DBG] %s : RPC thread %x in cluster %x block & deschedules / cycle %d\n",  455 __FUNCTION__, server_ptr, local_cxy, cycle );  456 #endif  457  458 // RPC thread deschedules  459 assert( thread_can_yield( server_ptr ) , __FUNCTION__, "illegal sched_yield\n" );  460 sched_yield("RPC fifo empty");  461 } 470 462 471 463 } // end infinite loop … …  646 638 647 639 // set input arguments in RPC descriptor 648  rpc.args[0] = (uint64_t) (intptr_t)ref_process_xp;649  rpc.args[1] = (uint64_t) (intptr_t)parent_thread_xp; 640 rpc.args[0] = (uint64_t)ref_process_xp;  641 rpc.args[1] = (uint64_t)parent_thread_xp; 650 642 651 643 // register RPC request in remote RPC fifo … …  903 895 void rpc_process_sigaction_server( xptr_t xp ) 904 896 { 905  pid_t pid; // target process identifier 906  process_t * process; // pointer on local target process descriptor 907  uint32_t action; // sigaction index 908  thread_t * client_thread; // pointer on client thread in client cluster 909  cxy_t client_cxy; // client cluster identifier 910  rpc_desc_t * rpc; // pointer on rpc descriptor in client cluster 911  xptr_t count_xp; // extended pointer on response counter 912  lid_t client_lid; // client core local index  897 pid_t pid; // target process identifier  898 process_t * process; // pointer on local target process descriptor  899 uint32_t action; // sigaction index  900 thread_t * client_ptr; // pointer on client thread in client cluster  901 xptr_t client_xp; // extended pointer client thread  902 cxy_t client_cxy; // client cluster identifier  903 rpc_desc_t * rpc; // pointer on rpc descriptor in client cluster  904 xptr_t count_xp; // extended pointer on responses counter  905 uint32_t count_value; // responses counter value  906 lid_t client_lid; // client core local index 913 907 914 908 // get client cluster identifier and pointer on RPC descriptor … …  927 921 #endif 928 922  923 // get client thread pointers  924 client_ptr = (thread_t *)hal_remote_lpt( XPTR( client_cxy , &rpc->thread ) );  925 client_xp = XPTR( client_cxy , client_ptr );  926 929 927 // get local process descriptor 930 928 process = cluster_get_local_process_from_pid( pid ); 931 929 932 930 // call relevant kernel function 933  if ( action == DELETE_ALL_THREADS ) process_delete_threads ( process );934  else if ( action == BLOCK_ALL_THREADS ) process_block_threads ( process ); 931 if ( action == DELETE_ALL_THREADS ) process_delete_threads ( process , client_xp );  932 else if ( action == BLOCK_ALL_THREADS ) process_block_threads ( process , client_xp ); 935 933 else if ( action == UNBLOCK_ALL_THREADS ) process_unblock_threads( process ); 936 934 … …  939 937 940 938 // decrement the responses counter in RPC descriptor,  939 count_value = hal_remote_atomic_add( count_xp , -1 );  940 941 941 // unblock the client thread only if it is the last response. 942  if( hal_remote_atomic_add( count_xp , -1 ) == 1 ) 942 if( count_value == 1 ) 943 943 { 944  // get client thread pointer and client core lid 945  client_thread = (thread_t *)hal_remote_lpt( XPTR( client_cxy , &rpc->thread ) );  944 // get client core lid 946 945 client_lid = (lid_t) hal_remote_lw ( XPTR( client_cxy , &rpc->lid ) ); 947 946 948  thread_unblock( XPTR( client_cxy , client_thread ) , THREAD_BLOCKED_RPC );  947 // unblock client thread  948 thread_unblock( client_xp , THREAD_BLOCKED_RPC );  949  950 // send an IPI to client core 949 951 dev_pic_send_ipi( client_cxy , client_lid ); 950 952 } … …  1192 1194 vfs_dentry_t * dentry ) 1193 1195 {  1196 #if DEBUG_RPC_VFS_DENTRY_DESTROY  1197 uint32_t cycle = (uint32_t)hal_get_cycles();  1198 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY )  1199 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",  1200 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );  1201 #endif  1202 1194 1203 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1195 1204 … …  1206 1215 rpc_send( cxy , &rpc ); 1207 1216  1217 #if DEBUG_RPC_VFS_DENTRY_DESTROY  1218 cycle = (uint32_t)hal_get_cycles();  1219 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY )  1220 printk("\n[DBG] %s : thread %x exit / cycle %d\n",  1221 __FUNCTION__ , CURRENT_THREAD , cycle );  1222 #endif 1208 1223 } 1209 1224 … …  1211 1226 void rpc_vfs_dentry_destroy_server( xptr_t xp ) 1212 1227 {  1228 #if DEBUG_RPC_VFS_DENTRY_DESTROY  1229 uint32_t cycle = (uint32_t)hal_get_cycles();  1230 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY )  1231 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",  1232 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );  1233 #endif  1234 1213 1235 vfs_dentry_t * dentry; 1214 1236 … …  1223 1245 vfs_dentry_destroy( dentry ); 1224 1246  1247 #if DEBUG_RPC_VFS_DENTRY_DESTROY  1248 cycle = (uint32_t)hal_get_cycles();  1249 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY )  1250 printk("\n[DBG] %s : thread %x exit / cycle %d\n",  1251 __FUNCTION__ , CURRENT_THREAD , cycle );  1252 #endif 1225 1253 } 1226 1254 … …  1319 1347 vfs_file_t * file ) 1320 1348 {  1349 #if DEBUG_RPC_VFS_FILE_DESTROY  1350 uint32_t cycle = (uint32_t)hal_get_cycles();  1351 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY )  1352 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",  1353 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );  1354 #endif  1355 1321 1356 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1322 1357 … …  1333 1368 rpc_send( cxy , &rpc ); 1334 1369  1370 #if DEBUG_RPC_VFS_FILE_DESTROY  1371 cycle = (uint32_t)hal_get_cycles();  1372 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY )  1373 printk("\n[DBG] %s : thread %x exit / cycle %d\n",  1374 __FUNCTION__ , CURRENT_THREAD , cycle );  1375 #endif 1335 1376 } 1336 1377 … …  1338 1379 void rpc_vfs_file_destroy_server( xptr_t xp ) 1339 1380 {  1381 #if DEBUG_RPC_VFS_FILE_DESTROY  1382 uint32_t cycle = (uint32_t)hal_get_cycles();  1383 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY )  1384 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",  1385 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );  1386 #endif  1387 1340 1388 vfs_file_t * file; 1341 1389 … …  1350 1398 vfs_file_destroy( file ); 1351 1399  1400 #if DEBUG_RPC_VFS_FILE_DESTROY  1401 cycle = (uint32_t)hal_get_cycles();  1402 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY )  1403 printk("\n[DBG] %s : thread %x exit / cycle %d\n",  1404 __FUNCTION__ , CURRENT_THREAD , cycle );  1405 #endif 1352 1406 } 1353 1407 … …  1536 1590 error_t * error ) // out 1537 1591 {  1592 #if DEBUG_RPC_VMM_GET_VSEG  1593 uint32_t cycle = (uint32_t)hal_get_cycles();  1594 if( cycle > DEBUG_RPC_VMM_GET_VSEG )  1595 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",  1596 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );  1597 #endif  1598 1538 1599 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1539 1600 … …  1555 1616 *error = (error_t)rpc.args[3]; 1556 1617  1618 #if DEBUG_RPC_VMM_GET_VSEG  1619 cycle = (uint32_t)hal_get_cycles();  1620 if( cycle > DEBUG_RPC_VMM_GET_VSEG )  1621 printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",  1622 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );  1623 #endif 1557 1624 } 1558 1625 … …  1560 1627 void rpc_vmm_get_vseg_server( xptr_t xp ) 1561 1628 {  1629 #if DEBUG_RPC_VMM_GET_VSEG  1630 uint32_t cycle = (uint32_t)hal_get_cycles();  1631 if( cycle > DEBUG_RPC_VMM_GET_VSEG )  1632 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",  1633 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );  1634 #endif  1635 1562 1636 process_t * process; 1563 1637 intptr_t vaddr; … …  1582 1656 hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 1583 1657 1584  } 1585  1586  1587  ///////////////////////////////////////////////////////////////////////////////////////// 1588  // [21] Marshaling functions attached to RPC_VMM_GET_PTE (blocking)  1658 #if DEBUG_RPC_VMM_GET_VSEG  1659 cycle = (uint32_t)hal_get_cycles();  1660 if( cycle > DEBUG_RPC_VMM_GET_VSEG )  1661 printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",  1662 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );  1663 #endif  1664 }  1665  1666  1667 /////////////////////////////////////////////////////////////////////////////////////////  1668 // [21] Marshaling functions attached to RPC_VMM_GET_VSEG (blocking) 1589 1669 ///////////////////////////////////////////////////////////////////////////////////////// 1590 1670 … …  1598 1678 error_t * error ) // out 1599 1679 { 1600  assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1601  1602  // initialise RPC descriptor header 1603  rpc_desc_t rpc; 1604  rpc.index = RPC_VMM_GET_PTE;  1680 #if DEBUG_RPC_VMM_GET_PTE  1681 uint32_t cycle = (uint32_t)hal_get_cycles();  1682 if( cycle > DEBUG_RPC_VMM_GET_PTE )  1683 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",  1684 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );  1685 #endif  1686  1687 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");  1688  1689 // initialise RPC descriptor header  1690 rpc_desc_t rpc;  1691 rpc.index = RPC_VMM_GET_VSEG; 1605 1692 rpc.blocking = true; 1606 1693 rpc.responses = 1; … …  1619 1706 *error = (error_t)rpc.args[5]; 1620 1707  1708 #if DEBUG_RPC_VMM_GET_PTE  1709 cycle = (uint32_t)hal_get_cycles();  1710 if( cycle > DEBUG_RPC_VMM_GET_PTE )  1711 printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",  1712 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );  1713 #endif 1621 1714 } 1622 1715 … …  1624 1717 void rpc_vmm_get_pte_server( xptr_t xp ) 1625 1718 {  1719 #if DEBUG_RPC_VMM_GET_PTE  1720 uint32_t cycle = (uint32_t)hal_get_cycles();  1721 if( cycle > DEBUG_RPC_VMM_GET_PTE )  1722 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",  1723 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );  1724 #endif  1725 1626 1726 process_t * process; 1627 1727 vpn_t vpn; … …  1648 1748 hal_remote_swd( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)error ); 1649 1749  1750 #if DEBUG_RPC_VMM_GET_PTE  1751 cycle = (uint32_t)hal_get_cycles();  1752 if( cycle > DEBUG_RPC_VMM_GET_PTE )  1753 printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",  1754 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );  1755 #endif 1650 1756 } 1651 1757 -
trunk/kernel/kern/scheduler.c
r438 r440  125 125 thread = LIST_ELEMENT( current , thread_t , sched_list ); 126 126 127  // execute RPC thread if non blocked 128  if( (thread->blocked == 0) && 129  (thread->type == THREAD_RPC) ) 130  { 131  spinlock_unlock( &sched->lock ); 132  return thread; 133  } 134  135  // execute DEV thread if non blocked and waiting queue non empty 136  if( (thread->blocked == 0) && 137  (thread->type == THREAD_DEV) && 138  (xlist_is_empty( XPTR( local_cxy , &thread->chdev->wait_root)) == 0) )  127 // select kernel thread if non blocked and non IDLE  128 if( (thread->blocked == 0) && (thread->type != THREAD_IDLE) ) 139 129 { 140 130 spinlock_unlock( &sched->lock ); … …  186 176 187 177 list_entry_t * iter;  178 list_entry_t * root; 188 179 thread_t * thread; 189 180 process_t * process; 190 181  182 // get pointer on scheduler 191 183 scheduler_t * sched = &core->scheduler;  184  185 // get pointer on user threads root  186 root = &sched->u_root; 192 187 193 188 // take lock protecting threads lists 194 189 spinlock_lock( &sched->lock ); 195 190  191 // We use a while to scan the user threads, to control the iterator increment,  192 // because some threads will be destroyed, and we cannot use a LIST_FOREACH()  193  194 // initialise list iterator  195 iter = root->next;  196 196 197 // scan all user threads 197  LIST_FOREACH( &sched->u_root , iter ) 198  {  198 while( iter != root )  199 {  200 // get pointer on thread 199 201 thread = LIST_ELEMENT( iter , thread_t , sched_list );  202  203 // increment iterator  204 iter = iter->next; 200 205 201 206 // handle REQ_ACK … …  219 224 process = thread->process; 220 225  226 // release FPU if required  227 if( thread->core->fpu_owner == thread ) thread->core->fpu_owner = NULL;  228  229 // remove thread from scheduler (scheduler lock already taken)  230 uint32_t threads_nr = sched->u_threads_nr;  231  232 assert( (threads_nr != 0) , __FUNCTION__ , "u_threads_nr cannot be 0\n" );  233  234 sched->u_threads_nr = threads_nr - 1;  235 list_unlink( &thread->sched_list );  236 if( threads_nr == 1 ) sched->u_last = NULL;  237  238 // delete thread  239 thread_destroy( thread );  240 221 241 #if DEBUG_SCHED_HANDLE_SIGNALS 222 242 uint32_t cycle = (uint32_t)hal_get_cycles(); 223 243 if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 224  printk("\n[DBG] %s : thread %x in proces %x must be deleted / cycle %d\n", 225  __FUNCTION__ , thread , process->pid , cycle ); 226  #endif 227  // release FPU if required 228  if( thread->core->fpu_owner == thread ) thread->core->fpu_owner = NULL; 229  230  // detach thread from parent if attached 231  if( (thread->flags & THREAD_FLAG_DETACHED) == 0 ) 232  thread_child_parent_unlink( thread->parent , XPTR( local_cxy , thread ) ); 233  234  // remove thread from scheduler (scheduler lock already taken) 235  uint32_t threads_nr = sched->u_threads_nr; 236  assert( (threads_nr != 0) , __FUNCTION__ , "u_threads_nr cannot be 0\n" ); 237  sched->u_threads_nr = threads_nr - 1; 238  list_unlink( &thread->sched_list ); 239  if( threads_nr == 1 ) sched->u_last = NULL; 240  241  // delete thread 242  thread_destroy( thread ); 243  244  #if DEBUG_SCHED_HANDLE_SIGNALS 245  cycle = (uint32_t)hal_get_cycles(); 246  if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 247  printk("\n[DBG] %s : thread %x in process %x has been deleted / cycle %d\n", 248  __FUNCTION__ , thread , process->pid , cycle );  244 printk("\n[DBG] %s : thread %x in proces %x (%x) deleted / cycle %d\n",  245 __FUNCTION__ , thread , process->pid , process , cycle ); 249 246 #endif 250 247 // destroy process descriptor if no more threads … …  314 311 { 315 312  313 if( (local_cxy == 0X1) && (core->lid == 1) && ((uint32_t)current == 0xcc000) )  314 printk("\n@@@@@ cc000 exit at cycle %d\n", (uint32_t)hal_get_cycles() );  315  316 if( (local_cxy == 0X1) && (core->lid == 1) && ((uint32_t)next == 0xcc000) )  317 printk("\n@@@@@ cc000 enter at cycle %d\n", (uint32_t)hal_get_cycles() );  318 316 319 #if DEBUG_SCHED_YIELD 317 320 uint32_t cycle = (uint32_t)hal_get_cycles(); -
trunk/kernel/kern/thread.c
r438 r440  184 184 thread->blocked = THREAD_BLOCKED_GLOBAL; 185 185 186  // reset children list 187  xlist_root_init( XPTR( local_cxy , &thread->children_root ) ); 188  thread->children_nr = 0; 189  190  // reset sched list and brothers list  186 // reset sched list 191 187 list_entry_init( &thread->sched_list ); 192  xlist_entry_init( XPTR( local_cxy , &thread->brothers_list ) );193 188 194 189 // reset thread info … …  238 233 // get process descriptor local copy 239 234 process = process_get_local_copy( pid );  235 240 236 if( process == NULL ) 241 237 { … …  604 600 /////////////////////////////////////////////////////////////////////////////////////// 605 601 // TODO: check that all memory dynamically allocated during thread execution 606  // has been released, using a cache of mmap and mallocrequests. [AG] 602 // has been released, using a cache of mmap requests. [AG] 607 603 /////////////////////////////////////////////////////////////////////////////////////// 608 604 void thread_destroy( thread_t * thread ) … …  619 615 __FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle ); 620 616 #endif 621  622  assert( (thread->children_nr == 0) , __FUNCTION__ , "still attached children" );623 617 624 618 assert( (thread->local_locks == 0) , __FUNCTION__ , "all local locks not released" ); … …  663 657 } // end thread_destroy() 664 658 665  /////////////////////////////////////////////////666  void thread_child_parent_link( xptr_t xp_parent,667  xptr_t xp_child )668  {669  // get extended pointers on children list root670  cxy_t parent_cxy = GET_CXY( xp_parent );671  thread_t * parent_ptr = (thread_t *)GET_PTR( xp_parent );672  xptr_t root = XPTR( parent_cxy , &parent_ptr->children_root );673  674  // get extended pointer on children list entry675  cxy_t child_cxy = GET_CXY( xp_child );676  thread_t * child_ptr = (thread_t *)GET_PTR( xp_child );677  xptr_t entry = XPTR( child_cxy , &child_ptr->brothers_list );678  679  // set the link680  xlist_add_first( root , entry );681  hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->children_nr ) , 1 );682  683  } // end thread_child_parent_link()684  685  ///////////////////////////////////////////////////686  void thread_child_parent_unlink( xptr_t xp_parent,687  xptr_t xp_child )688  {689  // get extended pointer on children list lock690  cxy_t parent_cxy = GET_CXY( xp_parent );691  thread_t * parent_ptr = (thread_t *)GET_PTR( xp_parent );692  xptr_t lock = XPTR( parent_cxy , &parent_ptr->children_lock );693  694  // get extended pointer on children list entry695  cxy_t child_cxy = GET_CXY( xp_child );696  thread_t * child_ptr = (thread_t *)GET_PTR( xp_child );697  xptr_t entry = XPTR( child_cxy , &child_ptr->brothers_list );698  699  // get the lock700  remote_spinlock_lock( lock );701  702  // remove the link703  xlist_unlink( entry );704  hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->children_nr ) , -1 );705  706  // release the lock707  remote_spinlock_unlock( lock );708  709  } // thread_child_parent_unlink()710  711 659 ////////////////////////////////////////////////// 712 660 inline void thread_set_req_ack( thread_t * target, … …  846 794 847 795 } // end thread_unblock()  796  797 /* 848 798 849 799 //////////////////////////////////// … …  875 825 process_t * target_process; // pointer on target thread process 876 826 877  // get target thread cluster and pointer 827 // get target thread pointer and cluster 878 828 target_cxy = GET_CXY( target_xp ); 879 829 target_ptr = GET_PTR( target_xp ); … …  883 833 killer_xp = XPTR( local_cxy , killer_ptr ); 884 834 885  #if DEBUG_THREAD_ KILL 835 #if DEBUG_THREAD_DELETE 886 836 uint32_t cycle = (uint32_t)hal_get_cycles; 887  if( DEBUG_THREAD_ KILL< cycle ) 837 if( DEBUG_THREAD_DELETE < cycle ) 888 838 printk("\n[DBG] %s : thread %x enter for target thread %x / cycle %d\n", 889 839 __FUNCTION__, killer_ptr, target_ptr, cycle ); … …  982 932 else hal_remote_atomic_or( process_state_xp , PROCESS_TERM_KILL ); 983 933 984  #if DEBUG_THREAD_ KILL 934 #if DEBUG_THREAD_DELETE 985 935 cycle = (uint32_t)hal_get_cycles; 986  if( DEBUG_THREAD_ KILL< cycle ) 936 if( DEBUG_THREAD_DELETE < cycle ) 987 937 printk("\n[DBG] %s : thread %x exit for thread %x / main thread / cycle %d\n", 988 938 __FUNCTION__, killer_ptr, target_ptr, cycle ); … …  995 945 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE ); 996 946 997  #if DEBUG_THREAD_ KILL 947 #if DEBUG_THREAD_DELETE 998 948 cycle = (uint32_t)hal_get_cycles; 999  if( DEBUG_THREAD_ KILL< cycle ) 949 if( DEBUG_THREAD_DELETE < cycle ) 1000 950 printk("\n[DBG] %s : thread %x exit for thread %x / not the main thread / cycle %d\n", 1001 951 __FUNCTION__, killer_ptr, target_ptr, cycle ); … …  1005 955 1006 956 } // end thread_kill()  957  958 */  959  960 //////////////////////////////////////  961 void thread_delete( xptr_t target_xp,  962 pid_t pid,  963 bool_t is_forced )  964 {  965 reg_t save_sr; // for critical section  966 bool_t target_join_done; // joining thread arrived first  967 bool_t target_attached; // target thread attached  968 xptr_t killer_xp; // extended pointer on killer thread (this)  969 thread_t * killer_ptr; // pointer on killer thread (this)  970 cxy_t target_cxy; // target thread cluster  971 thread_t * target_ptr; // pointer on target thread  972 xptr_t target_flags_xp; // extended pointer on target thread <flags>  973 uint32_t target_flags; // target thread <flags> value  974 xptr_t target_join_lock_xp; // extended pointer on target thread <join_lock>  975 xptr_t target_join_xp_xp; // extended pointer on target thread <join_xp>  976 trdid_t target_trdid; // target thread identifier  977 ltid_t target_ltid; // target thread local index  978 xptr_t joining_xp; // extended pointer on joining thread  979 thread_t * joining_ptr; // pointer on joining thread  980 cxy_t joining_cxy; // joining thread cluster  981 cxy_t owner_cxy; // process owner cluster  982  983  984 // get target thread pointers, identifiers, and flags  985 target_cxy = GET_CXY( target_xp );  986 target_ptr = GET_PTR( target_xp );  987 target_trdid = hal_remote_lw( XPTR( target_cxy , &target_ptr->trdid ) );  988 target_ltid = LTID_FROM_TRDID( target_trdid );  989 target_flags_xp = XPTR( target_cxy , &target_ptr->flags );  990 target_flags = hal_remote_lw( target_flags_xp );  991  992 // get killer thread pointers  993 killer_ptr = CURRENT_THREAD;  994 killer_xp = XPTR( local_cxy , killer_ptr );  995  996 #if DEBUG_THREAD_DELETE  997 uint32_t cycle = (uint32_t)hal_get_cycles;  998 if( DEBUG_THREAD_DELETE < cycle )  999 printk("\n[DBG] %s : killer thread %x enter for target thread %x / cycle %d\n",  1000 __FUNCTION__, killer_ptr, target_ptr, cycle );  1001 #endif  1002  1003 // target thread cannot be the main thread, because the main thread  1004 // must be deleted by the parent process sys_wait() function  1005 owner_cxy = CXY_FROM_PID( pid );  1006 assert( ((owner_cxy != target_cxy) || (target_ltid != 0)), __FUNCTION__,  1007 "tharget thread cannot be the main thread\n" );  1008  1009 // block the target thread  1010 thread_block( target_xp , THREAD_BLOCKED_GLOBAL );  1011  1012 // get attached from target flag descriptor  1013 target_attached = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_DETACHED) != 0);  1014  1015 // synchronize with the joining thread if the target thread is attached  1016 if( target_attached && (is_forced == false) )  1017 {  1018 // build extended pointers on target thread join fields  1019 target_join_lock_xp = XPTR( target_cxy , &target_ptr->join_lock );  1020 target_join_xp_xp = XPTR( target_cxy , &target_ptr->join_xp );  1021  1022 // enter critical section  1023 hal_disable_irq( &save_sr );  1024  1025 // take the join_lock in target thread descriptor  1026 remote_spinlock_lock( target_join_lock_xp );  1027  1028 // get join_done from target thread descriptor  1029 target_join_done = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0);  1030  1031 if( target_join_done ) // joining thread arrived first => unblock the joining thread  1032 {  1033 // get extended pointer on joining thread  1034 joining_xp = (xptr_t)hal_remote_lwd( target_join_xp_xp );  1035 joining_ptr = GET_PTR( joining_xp );  1036 joining_cxy = GET_CXY( joining_xp );  1037  1038 // reset the join_done flag in target thread  1039 hal_remote_atomic_and( target_flags_xp , ~THREAD_FLAG_JOIN_DONE );  1040  1041 // unblock the joining thread  1042 thread_unblock( joining_xp , THREAD_BLOCKED_JOIN );  1043  1044 // release the join_lock in target thread descriptor  1045 remote_spinlock_unlock( target_join_lock_xp );  1046  1047 // restore IRQs  1048 hal_restore_irq( save_sr );  1049 }  1050 else // this thread arrived first => register flags and deschedule  1051 {  1052 // set the kill_done flag in target thread  1053 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE );  1054  1055 // block this thread on BLOCKED_JOIN  1056 thread_block( killer_xp , THREAD_BLOCKED_JOIN );  1057  1058 // set extended pointer on killer thread in target thread  1059 hal_remote_swd( target_join_xp_xp , killer_xp );  1060  1061 // release the join_lock in target thread descriptor  1062 remote_spinlock_unlock( target_join_lock_xp );  1063  1064 // deschedule  1065 sched_yield( "killer thread wait joining thread" );  1066  1067 // restore IRQs  1068 hal_restore_irq( save_sr );  1069 }  1070 } // end if attached  1071  1072 // set the REQ_DELETE flag in target thread descriptor  1073 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );  1074  1075 #if DEBUG_THREAD_DELETE  1076 cycle = (uint32_t)hal_get_cycles;  1077 if( DEBUG_THREAD_DELETE < cycle )  1078 printk("\n[DBG] %s : killer thread %x exit for target thread %x / cycle %d\n",  1079 __FUNCTION__, killer_ptr, target_ptr, cycle );  1080 #endif  1081  1082 } // end thread_delete()  1083  1084 1007 1085 1008 1086 /////////////////////// -
trunk/kernel/kern/thread.h
r438 r440  171 171 cxy_t fork_cxy; /*! target cluster for next fork() */ 172 172 173  xlist_entry_t children_root; /*! root of list of attached children */174  uint32_t children_nr; /*! number of attached children threads */175  remote_spinlock_t * children_lock; /*! lock protecting the children list */176  177  xlist_entry_t brothers_list; /*! list of attached threads to same parent */178  179 173 list_entry_t sched_list; /*! member of threads attached to same core */ 180 174 … …  222 216 * in an existing process. It allocates memory for an user thread descriptor in the 223 217 * local cluster, and initializes it from information contained in the arguments. 224  * The CPU context is initialized from scratch. If required by the <attr> argument, 225  * the new thread is attached to the core specified in <attr>.  218 * The CPU context is initialized from scratch. 226 219 * It is registered in the local process descriptor specified by the <pid> argument. 227  * The thread descriptor pointer is returned to allow the parent thread to register it228  * in its children list.229 220 * The THREAD_BLOCKED_GLOBAL bit is set => the thread must be activated to start. 230 221 *************************************************************************************** … …  325 316 326 317 /*************************************************************************************** 327  * This function registers a child thread in the global list of attached328  * children threads of a parent thread.329  * It does NOT take a lock, as this function is always called by the parent thread.330  ***************************************************************************************331  * @ parent_xp : extended pointer on the parent thread descriptor.332  * @ child_xp : extended pointer on the child thread descriptor.333  **************************************************************************************/334  void thread_child_parent_link( xptr_t parent_xp,335  xptr_t child_xp );336  337  /***************************************************************************************338  * This function removes an user thread from the parent thread global list339  * of attached children threads.340  ***************************************************************************************341  * @ parent_xp : extended pointer on the parent thread descriptor.342  * @ child_xp : extended pointer on the child thread descriptor.343  **************************************************************************************/344  void thread_child_parent_unlink( xptr_t parent_xp,345  xptr_t child_xp );346  347  /***************************************************************************************348 318 * This function is used by a "blocker" thread running in the same cluster as a "target" 349 319 * thread to request the scheduler of the target thread to acknowledge that the target … …  386 356 387 357 /*************************************************************************************** 388  * This function is called to handle the four pthread_cancel(), pthread_exit(), 389  * kill() and exit() system calls. It kills a "target" thread identified by the 390  * <thread_xp> argument. The "killer" thread can be the "target" thread, when the 391  * <is_exit> argument is true. The "killer" thread can run in any cluster, 392  * as it uses remote accesses. 393  * If the "target" thread is running in "attached" mode, and the <is_forced> argument  358 * This function is used by the four sys_thread_cancel(), sys_thread_exit(),  359 * sys_kill() and sys_exit() system calls to delete a given thread.  360 * It set the THREAD_BLOCKED_GLOBAL bit and set the the THREAD_FLAG_REQ_DELETE bit  361 * in the thread descriptor identified by the <thread_xp> argument, to ask the scheduler  362 * to asynchronously delete the target thread, at the next scheduling point.  363 * The calling thread can run in any cluster, as it uses remote accesses, but  364 * the target thread cannot be the main thread of the process identified by the <pid>,  365 * because the main thread must be deleted by the parent process argument.  366 * If the target thread is running in "attached" mode, and the <is_forced> argument 394 367 * is false, this function implements the required sychronisation with the joining 395  * thread, blocking the "killer" thread until the pthread_join() syscall is executed. 396  * To delete the target thread, this function sets the THREAD_FLAG_REQ_DELETE bit 397  * and the THREAD BLOCKED_GLOBAL bit in the target thread, and the actual destruction 398  * is asynchronously done by the scheduler at the next scheduling point.  368 * thread, blocking the calling thread until the pthread_join() syscall is executed. 399 369 *************************************************************************************** 400 370 * @ thread_xp : extended pointer on the target thread. 401  * @ is_exit : the killer thread is the target thread itself.402  * @ is_forced : the killingdoes not depends on the attached mode.403  **************************************************************************************/ 404  void thread_ kill( xptr_t thread_xp,405  bool_t is_exit,406  bool_t is_forced ); 371 * @ pid : process identifier (to get the owner cluster identifier).  372 * @ is_forced : the deletion does not depends on the attached mode.  373 **************************************************************************************/  374 void thread_delete( xptr_t thread_xp,  375 pid_t pid,  376 bool_t is_forced ); 407 377 408 378 /*************************************************************************************** -
trunk/kernel/kernel_config.h
r439 r440  38 38 39 39 40  #define CONFIG_DEBUG_CHDEV_CMD_RX 0 41  #define CONFIG_DEBUG_CHDEV_CMD_TX 0 42  #define CONFIG_DEBUG_CHDEV_SERVER_RX 0 43  #define CONFIG_DEBUG_CHDEV_SERVER_TX 0 44  45  #define CONFIG_DEBUG_CLUSTER_INIT 0 46  #define CONFIG_DEBUG_CLUSTER_PROCESS_COPIES 0 47  48  #define CONFIG_DEBUG_DEV_TXT_RX 0 49  #define CONFIG_DEBUG_DEV_TXT_TX 0 50  #define CONFIG_DEBUG_DEV_IOC_RX 0 51  #define CONFIG_DEBUG_DEV_IOC_TX 0 52  #define CONFIG_DEBUG_DEV_NIC_RX 0 53  #define CONFIG_DEBUG_DEV_NIC_RX 0 54  #define CONFIG_DEBUG_DEV_FBF_RX 0 55  #define CONFIG_DEBUG_DEV_FBF_TX 0 56  #define CONFIG_DEBUG_DEV_DMA 0 57  #define CONFIG_DEBUG_DEV_MMC 0 58  #define CONFIG_DEBUG_DEV_PIC 0 59  60  #define CONFIG_DEBUG_DEVFS_INIT 0 61  #define CONFIG_DEBUG_DEVFS_MOVE 0 62  63  #define CONFIG_DEBUG_FATFS_INIT 0 64  #define CONFIG_DEBUG_FATFS_MOVE 0 65  #define CONFIG_DEBUG_FATFS_LOAD 0 66  67  #define CONFIG_DEBUG_GPT_ACCESS 0 68  69  #define CONFIG_DEBUG_HAL_KENTRY 0 70  #define CONFIG_DEBUG_HAL_EXCEPTIONS 0 71  #define CONFIG_DEBUG_HAL_IRQS 0 72  #define CONFIG_DEBUG_HAL_TXT_RX 0 73  #define CONFIG_DEBUG_HAL_TXT_TX 0 74  #define CONFIG_DEBUG_HAL_IOC_RX 0 75  #define CONFIG_DEBUG_HAL_IOC_TX 0 76  77  #define CONFIG_DEBUG_KCM 0 78  #define CONFIG_DEBUG_KMEM 0 79  80  #define CONFIG_DEBUG_KERNEL_INIT 0 81  #define CONFIG_DEBUG_KMEM_ALLOC 0 82  83  #define CONFIG_DEBUG_MAPPER_GET_PAGE 0 84  #define CONFIG_DEBUG_MAPPER_MOVE_USER 0 85  #define CONFIG_DEBUG_MAPPER_MOVE_KERNEL 0 86  87  #define CONFIG_DEBUG_PPM_ALLOC_PAGES 0 88  #define CONFIG_DEBUG_PPM_FREE_PAGES 0 89  90  #define CONFIG_DEBUG_PROCESS_COPY_INIT 0 91  #define CONFIG_DEBUG_PROCESS_DESTROY 0 92  #define CONFIG_DEBUG_PROCESS_INIT_CREATE 0 93  #define CONFIG_DEBUG_PROCESS_MAKE_EXEC 1 94  #define CONFIG_DEBUG_PROCESS_MAKE_FORK 1 95  #define CONFIG_DEBUG_PROCESS_REFERENCE_INIT 0 96  #define CONFIG_DEBUG_PROCESS_SIGACTION 0 97  #define CONFIG_DEBUG_PROCESS_TXT_ATTACH 0 98  #define CONFIG_DEBUG_PROCESS_ZERO_CREATE 0 99  100  #define CONFIG_DEBUG_RPC_MARSHALING 0 101  #define CONFIG_DEBUG_RPC_SEND 0 102  #define CONFIG_DEBUG_RPC_SERVER 0 103  104  #define CONFIG_DEBUG_SCHED_HANDLE_SIGNALS 0 105  #define CONFIG_DEBUG_SCHED_YIELD 0 106  107  #define CONFIG_DEBUG_SYSCALLS_ERROR 2 108  109  #define CONFIG_DEBUG_SYS_DISPLAY 0 110  #define CONFIG_DEBUG_SYS_EXEC 1 111  #define CONFIG_DEBUG_SYS_EXIT 0 112  #define CONFIG_DEBUG_SYS_FG 0 113  #define CONFIG_DEBUG_SYS_FORK 1 114  #define CONFIG_DEBUG_SYS_GET_CONFIG 0 115  #define CONFIG_DEBUG_SYS_ISATTY 0 116  #define CONFIG_DEBUG_SYS_KILL 1 117  #define CONFIG_DEBUG_SYS_MMAP 0 118  #define CONFIG_DEBUG_SYS_READ 0 119  #define CONFIG_DEBUG_SYS_THREAD_CANCEL 0 120  #define CONFIG_DEBUG_SYS_THREAD_EXIT 0 121  #define CONFIG_DEBUG_SYS_THREAD_JOIN 0 122  #define CONFIG_DEBUG_SYS_THREAD_SLEEP 0 123  #define CONFIG_DEBUG_SYS_THREAD_WAKEUP 0 124  #define CONFIG_DEBUG_SYS_WAIT 0 125  #define CONFIG_DEBUG_SYS_WRITE 0 126  127  #define CONFIG_DEBUG_SPINLOCKS 0 128  #define CONFIG_DEBUG_REMOTE_SPINLOCKS 0 129  #define CONFIG_DEBUG_RWLOCKS 0 130  #define CONFIG_DEBUG_REMOTE_RWLOCKS 0 131  132  #define CONFIG_DEBUG_THREAD_DESTROY 0 133  #define CONFIG_DEBUG_THREAD_IDLE 0 134  #define CONFIG_DEBUG_THREAD_KERNEL_CREATE 0 135  #define CONFIG_DEBUG_THREAD_KILL 0 136  #define CONFIG_DEBUG_THREAD_USER_CREATE 0 137  #define CONFIG_DEBUG_THREAD_USER_FORK 0 138  #define CONFIG_DEBUG_THREAD_BLOCK 0 139  140  #define CONFIG_DEBUG_VFS_INODE_CREATE 0 141  #define CONFIG_DEBUG_VFS_INODE_LOAD 0 142  #define CONFIG_DEBUG_VFS_DENTRY_CREATE 0 143  #define CONFIG_DEBUG_VFS_OPEN 0 144  #define CONFIG_DEBUG_VFS_LOOKUP 0 145  #define CONFIG_DEBUG_VFS_ADD_CHILD 0 146  #define CONFIG_DEBUG_VFS_MAPPER_MOVE 0 147  #define CONFIG_DEBUG_VFS_MAPPER_LOAD 0 148  149  #define CONFIG_DEBUG_VMM_CREATE_VSEG 0 150  #define CONFIG_DEBUG_VMM_DESTROY 0 151  #define CONFIG_DEBUG_VMM_FORK_COPY 0 152  #define CONFIG_DEBUG_VMM_GET_ONE_PPN 0 153  #define CONFIG_DEBUG_VMM_GET_PTE 0 154  #define CONFIG_DEBUG_VMM_INIT 0 155  #define CONFIG_DEBUG_VMM_PAGE_ALLOCATE 0 156  #define CONFIG_DEBUG_VMM_SET_COW 0 157  #define CONFIG_DEBUG_VMM_UNMAP_VSEG 0 158  #define CONFIG_DEBUG_VMM_UPDATE_PTE 0  40 #define DEBUG_CHDEV_CMD_RX 0  41 #define DEBUG_CHDEV_CMD_TX 0  42 #define DEBUG_CHDEV_SERVER_RX 0  43 #define DEBUG_CHDEV_SERVER_TX 0  44  45 #define DEBUG_CLUSTER_INIT 0  46 #define DEBUG_CLUSTER_PID_ALLOC 0  47 #define DEBUG_CLUSTER_PID_RELEASE 0  48 #define DEBUG_CLUSTER_PROCESS_COPIES 0  49  50 #define DEBUG_DEV_TXT_RX 0  51 #define DEBUG_DEV_TXT_TX 0  52 #define DEBUG_DEV_IOC_RX 0  53 #define DEBUG_DEV_IOC_TX 0  54 #define DEBUG_DEV_NIC_RX 0  55 #define DEBUG_DEV_NIC_RX 0  56 #define DEBUG_DEV_FBF_RX 0  57 #define DEBUG_DEV_FBF_TX 0  58 #define DEBUG_DEV_DMA 0  59 #define DEBUG_DEV_MMC 0  60 #define DEBUG_DEV_PIC 0  61  62 #define DEBUG_DEVFS_INIT 0  63 #define DEBUG_DEVFS_MOVE 0  64  65 #define DEBUG_FATFS_INIT 0  66 #define DEBUG_FATFS_MOVE 0  67 #define DEBUG_FATFS_LOAD 0  68  69 #define DEBUG_GPT_ACCESS 0  70  71 #define DEBUG_HAL_KENTRY 0  72 #define DEBUG_HAL_EXCEPTIONS 0  73 #define DEBUG_HAL_IRQS 0  74 #define DEBUG_HAL_TXT_RX 0  75 #define DEBUG_HAL_TXT_TX 0  76 #define DEBUG_HAL_IOC_RX 0  77 #define DEBUG_HAL_IOC_TX 0  78  79 #define DEBUG_KCM 0  80 #define DEBUG_KMEM 0  81  82 #define DEBUG_KERNEL_INIT 0  83 #define DEBUG_KMEM_ALLOC 0  84  85 #define DEBUG_MAPPER_GET_PAGE 0  86 #define DEBUG_MAPPER_MOVE_USER 0  87 #define DEBUG_MAPPER_MOVE_KERNEL 0  88  89 #define DEBUG_PPM_ALLOC_PAGES 0  90 #define DEBUG_PPM_FREE_PAGES 0  91  92 #define DEBUG_PROCESS_COPY_INIT 0  93 #define DEBUG_PROCESS_DESTROY 0  94 #define DEBUG_PROCESS_GET_LOCAL_COPY 0  95 #define DEBUG_PROCESS_INIT_CREATE 0  96 #define DEBUG_PROCESS_MAKE_EXEC 0  97 #define DEBUG_PROCESS_MAKE_FORK 0  98 #define DEBUG_PROCESS_REFERENCE_INIT 0  99 #define DEBUG_PROCESS_SIGACTION 0  100 #define DEBUG_PROCESS_TXT_ATTACH 0  101 #define DEBUG_PROCESS_ZERO_CREATE 0  102  103 #define DEBUG_RPC_CLIENT_GENERIC 0  104 #define DEBUG_RPC_SERVER_GENERIC 0  105  106 #define DEBUG_RPC_PMEM_GET_PAGES 0  107 #define DEBUG_RPC_PMEM_RELEASE_PAGES 0  108 #define DEBUG_RPC_PROCESS_MAKE_FORK 0  109 #define DEBUG_RPC_PROCESS_SIGACTION 0  110 #define DEBUG_RPC_VFS_DENTRY_CREATE 0  111 #define DEBUG_RPC_VFS_DENTRY_DESTROY 0  112 #define DEBUG_RPC_VFS_FILE_CREATE 0  113 #define DEBUG_RPC_VFS_FILE_DESTROY 0  114 #define DEBUG_RPC_VMM_GET_PTE 0  115 #define DEBUG_RPC_VMM_GET_VSEG 0  116  117 #define DEBUG_SCHED_HANDLE_SIGNALS 0  118 #define DEBUG_SCHED_YIELD 0  119  120 #define DEBUG_SYSCALLS_ERROR 2  121  122 #define DEBUG_SYS_DISPLAY 0  123 #define DEBUG_SYS_EXEC 2  124 #define DEBUG_SYS_EXIT 2  125 #define DEBUG_SYS_FG 0  126 #define DEBUG_SYS_FORK 2  127 #define DEBUG_SYS_GET_CONFIG 0  128 #define DEBUG_SYS_ISATTY 0  129 #define DEBUG_SYS_KILL 2  130 #define DEBUG_SYS_MMAP 0  131 #define DEBUG_SYS_READ 0  132 #define DEBUG_SYS_THREAD_CANCEL 0  133 #define DEBUG_SYS_THREAD_CREATE 0  134 #define DEBUG_SYS_THREAD_EXIT 0  135 #define DEBUG_SYS_THREAD_JOIN 0  136 #define DEBUG_SYS_THREAD_SLEEP 0  137 #define DEBUG_SYS_THREAD_WAKEUP 0  138 #define DEBUG_SYS_WAIT 0  139 #define DEBUG_SYS_WRITE 0  140  141 #define DEBUG_SPINLOCKS 0  142 #define DEBUG_REMOTE_SPINLOCKS 0  143 #define DEBUG_RWLOCKS 0  144 #define DEBUG_REMOTE_RWLOCKS 0  145  146 #define DEBUG_THREAD_DESTROY 0  147 #define DEBUG_THREAD_IDLE 0  148 #define DEBUG_THREAD_KERNEL_CREATE 0  149 #define DEBUG_THREAD_KILL 0  150 #define DEBUG_THREAD_USER_CREATE 0  151 #define DEBUG_THREAD_USER_FORK 0  152 #define DEBUG_THREAD_BLOCK 0  153  154 #define DEBUG_VFS_INODE_CREATE 0  155 #define DEBUG_VFS_INODE_LOAD 0  156 #define DEBUG_VFS_DENTRY_CREATE 0  157 #define DEBUG_VFS_OPEN 0  158 #define DEBUG_VFS_LOOKUP 0  159 #define DEBUG_VFS_ADD_CHILD 0  160 #define DEBUG_VFS_MAPPER_MOVE 0  161 #define DEBUG_VFS_MAPPER_LOAD 0  162  163 #define DEBUG_VMM_CREATE_VSEG 0  164 #define DEBUG_VMM_DESTROY 0  165 #define DEBUG_VMM_FORK_COPY 0  166 #define DEBUG_VMM_GET_ONE_PPN 0  167 #define DEBUG_VMM_GET_PTE 0  168 #define DEBUG_VMM_HANDLE_PAGE_FAULT 0  169 #define DEBUG_VMM_INIT 0  170 #define DEBUG_VMM_PAGE_ALLOCATE 0  171 #define DEBUG_VMM_SET_COW 0  172 #define DEBUG_VMM_UNMAP_VSEG 0  173 #define DEBUG_VMM_UPDATE_PTE 0 159 174 160 175 //////////////////////////////////////////////////////////////////////////////////////////// … …  258 273 #define CONFIG_REMOTE_FIFO_SLOTS 16 259 274 #define CONFIG_REMOTE_FIFO_MAX_ITERATIONS 1024 260  261  #define CONFIG_RPC_PENDING_MAX 8 // max requests handled by one server 262  #define CONFIG_RPC_THREADS_MAX 8 // max number of RPC threads per core  275 #define CONFIG_RPC_THREADS_MAX 4 // max number of RPC threads per core 263 276 264 277 //////////////////////////////////////////////////////////////////////////////////////////// -
trunk/kernel/libk/list.h
r437 r440  79 79 * This macro returns the first element of a rooted double linked list. 80 80 *************************************************************************** 81  * @ root _ptr: pointer on the list root 81 * @ root : pointer on the list root 82 82 * @ type : type of the linked elements 83 83 * @ member : name of the list_entry_t field … …  90 90 * This macro returns the last element of a rooted double linked list. 91 91 *************************************************************************** 92  * @ root _ptr: pointer on the list root 92 * @ root : pointer on the list root 93 93 * @ type : type of the linked elements 94 94 * @ member : name of the list_entry_t field -
trunk/kernel/mm/mapper.c
r438 r440 Â 3 3 * 4 4 * Authors Mohamed Lamine Karaoui (2015) 5 Â * Alain Greiner (2016 )Â 5 * Alain Greiner (2016,2017,2018) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/mm/mapper.h
r407 r440 Â 3 3 * 4 4 * Authors Mohamed Lamine Karaoui (2015) 5 Â * Alain Greiner (2016 )Â 5 * Alain Greiner (2016,2017,2018) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/mm/vmm.c
r438 r440  198 198 bool_t mapping ) 199 199 { 200  assert( (process->ref_xp == XPTR( local_cxy , process )) , __FUNCTION__,201  "this function must be executed in reference cluster" );202  203 200 vmm_t * vmm = &process->vmm; 204 201 gpt_t * gpt = &vmm->gpt; 205 202 206  printk("\n***** VSL and GPT for process %x \n\n",207  process->pid ); 203 printk("\n***** VSL and GPT for process %x in cluster %x\n\n",  204 process->pid , local_cxy ); 208 205 209 206 // get lock protecting the vseg list … …  1036 1033 } // end vmm_remove_vseg() 1037 1034 1038  //////////////////////////////////////////////1039  error_t vmm_map_kernel_vseg( vseg_t * vseg,1040  uint32_t attr )1041  {1042  vpn_t vpn; // VPN of PTE to be set1043  vpn_t vpn_min; // VPN of first PTE to be set1044  vpn_t vpn_max; // VPN of last PTE to be set (excluded)1045  ppn_t ppn; // PPN of allocated physical page1046  uint32_t order; // ln( number of small pages for one single PTE )1047  page_t * page;1048  error_t error;1049  1050  // check vseg type : must be a kernel vseg1051  uint32_t type = vseg->type;1052  assert( ((type==VSEG_TYPE_KCODE) || (type==VSEG_TYPE_KDATA) || (type==VSEG_TYPE_KDEV)),1053  __FUNCTION__ , "not a kernel vseg\n" );1054  1055  // get pointer on page table1056  gpt_t * gpt = &process_zero.vmm.gpt;1057  1058  // define number of small pages per PTE1059  if( attr & GPT_SMALL ) order = 0; // 1 small page1060  else order = 9; // 512 small pages1061  1062  // loop on pages in vseg1063  vpn_min = vseg->vpn_base;1064  vpn_max = vpn_min + vseg->vpn_size;1065  for( vpn = vpn_min ; vpn < vpn_max ; vpn++ )1066  {1067  // allocate a physical page from local PPM1068  kmem_req_t req;1069  req.type = KMEM_PAGE;1070  req.size = order;1071  req.flags = AF_KERNEL | AF_ZERO;1072  page = (page_t *)kmem_alloc( &req );1073  if( page == NULL )1074  {1075  printk("\n[ERROR] in %s : cannot allocate physical memory\n", __FUNCTION__ );1076  return ENOMEM;1077  }1078  1079  // set page table entry1080  ppn = ppm_page2ppn( XPTR( local_cxy , page ) );1081  error = hal_gpt_set_pte( gpt,1082  vpn,1083  attr,1084  ppn );1085  if( error )1086  {1087  printk("\n[ERROR] in %s : cannot register PPE\n", __FUNCTION__ );1088  return ENOMEM;1089  }1090  }1091  1092  return 0;1093  1094  } // end vmm_map_kernel_vseg()1095  1096 1035 ///////////////////////////////////////// 1097 1036 void vmm_unmap_vseg( process_t * process, … …  1193 1132 1194 1133 ////////////////////////////////////////////////////////////////////////////////////////// 1195  // This low-level static function is called by the vmm_get_vseg() and vmm_resize_vseg()1196  // functions. It scan the list of registered vsegs to find the unique vseg containing1197  // a given virtual address. 1134 // This low-level static function is called by the vmm_get_vseg(), vmm_get_pte(),  1135 // and vmm_resize_vseg() functions. It scan the local VSL to find the unique vseg  1136 // containing a given virtual address. 1198 1137 ////////////////////////////////////////////////////////////////////////////////////////// 1199 1138 // @ vmm : pointer on the process VMM. … …  1331 1270 vseg_t ** found_vseg ) 1332 1271 { 1333  vmm_t * vmm = &process->vmm; 1334  1335  // get vseg from vaddr 1336  vseg_t * vseg = vseg_from_vaddr( vmm , vaddr );  1272 xptr_t vseg_xp;  1273 error_t error;  1274 vseg_t * vseg;  1275 vmm_t * vmm;  1276  1277 // get pointer on local VMM  1278 vmm = &process->vmm;  1279  1280 // try to get vseg from local VMM  1281 vseg = vseg_from_vaddr( vmm , vaddr ); 1337 1282 1338 1283 if( vseg == NULL ) // vseg not found in local cluster => try to get it from ref … …  1348 1293 1349 1294 // get extended pointer on reference vseg 1350  xptr_t vseg_xp;1351  error_t error;1352  1353 1295 rpc_vmm_get_vseg_client( ref_cxy , ref_ptr , vaddr , &vseg_xp , &error ); 1354 1296 1355  if( error ) return -1; // vseg not found => illegal user vaddr 1297 if( error ) return -1; // vseg not found => illegal user vaddr 1356 1298 1357 1299 // allocate a vseg in local cluster 1358 1300 vseg = vseg_alloc(); 1359 1301 1360  if( vseg == NULL ) return -1;  1302 if( vseg == NULL ) return -1; // cannot allocate a local vseg 1361 1303 1362 1304 // initialise local vseg from reference … …  1496 1438 1497 1439 // initialise missing page from .elf file mapper for DATA and CODE types 1498  // (the vseg->mapper_xp field is an extended pointer on the .elf file mapper) 1440 // the vseg->mapper_xp field is an extended pointer on the .elf file mapper 1499 1441 if( (type == VSEG_TYPE_CODE) || (type == VSEG_TYPE_DATA) ) 1500 1442 { … …  1521 1463 #endif 1522 1464  1465 1523 1466 // compute extended pointer on page base 1524 1467 xptr_t base_xp = ppm_page2base( page_xp ); … …  1535 1478 __FUNCTION__, CURRENT_THREAD, vpn ); 1536 1479 #endif  1480 1537 1481 1538 1482 if( GET_CXY( page_xp ) == local_cxy ) … …  1553 1497 __FUNCTION__, CURRENT_THREAD, vpn ); 1554 1498 #endif 1555  1556 1499 if( mapper_cxy == local_cxy ) 1557 1500 { … …  1644 1587 ppn_t * ppn ) 1645 1588 { 1646  vseg_t * vseg; // vseg containing VPN 1647  ppn_t old_ppn; // current PTE_PPN 1648  uint32_t old_attr; // current PTE_ATTR 1649  ppn_t new_ppn; // new PTE_PPN 1650  uint32_t new_attr; // new PTE_ATTR 1651  error_t error; 1652  1653  // this function must be called by a thread running in the reference cluster 1654  assert( (GET_CXY( process->ref_xp ) == local_cxy ) , __FUNCTION__ , 1655  "not called in the reference cluster\n" );  1589 ppn_t old_ppn; // current PTE_PPN  1590 uint32_t old_attr; // current PTE_ATTR  1591 ppn_t new_ppn; // new PTE_PPN  1592 uint32_t new_attr; // new PTE_ATTR  1593 vmm_t * vmm;  1594 vseg_t * vseg;  1595 error_t error; 1656 1596 1657 1597 #if DEBUG_VMM_GET_PTE … …  1663 1603 1664 1604 // get VMM pointer 1665  vmm_t * vmm = &process->vmm; 1666  1667  // get vseg pointer from reference VSL 1668  error = vmm_get_vseg( process , vpn<<CONFIG_PPM_PAGE_SHIFT , &vseg ); 1669  1670  if( error ) 1671  { 1672  printk("\n[ERROR] in %s : out of segment / process = %x / vpn = %x\n", 1673  __FUNCTION__ , process->pid , vpn ); 1674  return error; 1675  } 1676  1677  #if( DEBUG_VMM_GET_PTE & 1 ) 1678  cycle = (uint32_t)hal_get_cycles(); 1679  if( DEBUG_VMM_GET_PTE < cycle ) 1680  printk("\n[DBG] %s : thread %x found vseg %s / vpn_base = %x / vpn_size = %x\n", 1681  __FUNCTION__, CURRENT_THREAD, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size ); 1682  #endif  1605 vmm = &process->vmm;  1606  1607 // get local vseg descriptor  1608 error = vmm_get_vseg( process,  1609 ((intptr_t)vpn << CONFIG_PPM_PAGE_SHIFT),  1610 &vseg );  1611  1612 // vseg has been checked by the vmm_handle_page_fault() function  1613 assert( (vseg != NULL) , __FUNCTION__,  1614 "vseg undefined / vpn %x / thread %x / process %x / core[%x,%d] / cycle %d\n",  1615 vpn, CURRENT_THREAD, process->pid, local_cxy, CURRENT_THREAD->core->lid,  1616 (uint32_t)hal_get_cycles() ); 1683 1617 1684 1618 if( cow ) //////////////// copy_on_write request ////////////////////// 1685  // get PTE from referenceGPT 1619 // get PTE from local GPT 1686 1620 // allocate a new physical page if there is pending forks, 1687 1621 // initialize it from old physical page content, 1688 1622 // update PTE in all GPT copies, 1689 1623 { 1690  // access GPT to get current PTE attributes and PPN 1624 // access local GPT to get current PTE attributes and PPN 1691 1625 hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn ); 1692 1626 1693  assert( (old_attr & GPT_MAPPED) , __FUNCTION__ , 1694  "PTE must be mapped for a copy-on-write exception\n" );  1627 assert( (old_attr & GPT_MAPPED), __FUNCTION__,  1628 "PTE unmapped for a COW exception / vpn %x / thread %x / process %x / cycle %d\n",  1629 vpn, CURRENT_THREAD, process->pid, (uint32_t)hal_get_cycles() ); 1695 1630 1696 1631 #if( DEBUG_VMM_GET_PTE & 1 ) 1697  cycle = (uint32_t)hal_get_cycles();1698 1632 if( DEBUG_VMM_GET_PTE < cycle ) 1699 1633 printk("\n[DBG] %s : thread %x handling COW for vpn %x in process %x\n", … …  1745 1679 } 1746 1680 else //////////// page_fault request /////////////////////////// 1747  // get PTE from referenceGPT 1681 // get PTE from local GPT 1748 1682 // allocate a physical page if it is a true page fault,  1683 // initialize it if type is FILE, CODE, or DATA, 1749 1684 // register in reference GPT, but don't update GPT copies 1750 1685 { 1751  // access GPT to get current PTE 1686 // access local GPT to get current PTE 1752 1687 hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn ); 1753 1688 … …  1756 1691 1757 1692 #if( DEBUG_VMM_GET_PTE & 1 ) 1758  cycle = (uint32_t)hal_get_cycles();1759 1693 if( DEBUG_VMM_GET_PTE < cycle ) 1760 1694 printk("\n[DBG] %s : thread %x handling page fault for vpn %x in process %x\n", 1761 1695 __FUNCTION__, CURRENT_THREAD, vpn, process->pid ); 1762 1696 #endif 1763  1764  // allocate new_ppn, depending on vseg type  1697 // allocate new_ppn, and initialize the new page 1765 1698 error = vmm_get_one_ppn( vseg , vpn , &new_ppn ); 1766 1699 if( error ) … …  1801 1734 cycle = (uint32_t)hal_get_cycles(); 1802 1735 if( DEBUG_VMM_GET_PTE < cycle ) 1803  printk("\n[DBG] %s : thread ,%x exit / vpn %x in process %x / ppn %x / attr %x / cycle %d\n", 1736 printk("\n[DBG] %s : thread %x exit / vpn %x in process %x / ppn %x / attr %x / cycle %d\n", 1804 1737 __FUNCTION__, CURRENT_THREAD, vpn, process->pid, new_ppn, new_attr, cycle ); 1805 1738 #endif … …  1814 1747 /////////////////////////////////////////////////// 1815 1748 error_t vmm_handle_page_fault( process_t * process, 1816  vpn_t vpn )  1749 vpn_t vpn,  1750 bool_t is_cow ) 1817 1751 { 1818 1752 uint32_t attr; // missing page attributes 1819 1753 ppn_t ppn; // missing page PPN  1754 vseg_t * vseg; // vseg containing vpn  1755 uint32_t type; // vseg type  1756 cxy_t ref_cxy; // reference cluster for missing vpn  1757 process_t * ref_ptr; // reference process for missing vpn 1820 1758 error_t error; 1821 1759 1822  #if DEBUG_VMM_GET_PTE  1760 thread_t * this = CURRENT_THREAD;  1761  1762 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1823 1763 uint32_t cycle = (uint32_t)hal_get_cycles(); 1824  if( DEBUG_VMM_GET_PTE < cycle ) 1825  printk("\n[DBG] %s : thread %x enter for vpn %x / process %x / cycle %d\n", 1826  __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle ); 1827  #endif  1764 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )  1765 printk("\n[DBG] %s : thread %x enter for vpn %x / process %x / core[%x,%d] / cycle %d\n",  1766 __FUNCTION__, this, vpn, process->pid, local_cxy, this->core->lid, cycle );  1767 #endif  1768  1769 // get local vseg (access reference VSL if required)  1770 error = vmm_get_vseg( process , vpn<<CONFIG_PPM_PAGE_SHIFT , &vseg );  1771  1772 if( error )  1773 {  1774 printk("\n[ERROR] in %s : vpn %x / process %x / thread %x / core[%x,%d] / cycle %d\n",  1775 __FUNCTION__, vpn, process->pid, this->trdid, local_cxy, this->core->lid,  1776 (uint32_t)hal_get_cycles() );  1777 return error;  1778 }  1779  1780 // get segment type  1781 type = vseg->type; 1828 1782 1829 1783 // get reference process cluster and local pointer 1830  cxy_t ref_cxy = GET_CXY( process->ref_xp ); 1831  process_t * ref_ptr = GET_PTR( process->ref_xp ); 1832  1833  // get missing PTE attributes and PPN from reference cluster  1784 // for private vsegs (CODE and DATA type),  1785 // the reference is the local process descriptor.  1786 if( (type == VSEG_TYPE_STACK) || (type == VSEG_TYPE_CODE) )  1787 {  1788 ref_cxy = local_cxy;  1789 ref_ptr = process;  1790 }  1791 else  1792 {  1793 ref_cxy = GET_CXY( process->ref_xp );  1794 ref_ptr = GET_PTR( process->ref_xp );  1795 }  1796  1797 // get missing PTE attributes and PPN 1834 1798 if( local_cxy != ref_cxy ) 1835 1799 { … …  1837 1801 ref_ptr, 1838 1802 vpn, 1839  false, // page_fault 1803 is_cow, 1840 1804 &attr, 1841 1805 &ppn, … …  1855 1819 error = vmm_get_pte( process, 1856 1820 vpn, 1857  false, // page-fault 1821 is_cow, 1858 1822 &attr, 1859 1823 &ppn ); 1860 1824 } 1861 1825 1862  #if DEBUG_VMM_ GET_PTE 1826 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1863 1827 cycle = (uint32_t)hal_get_cycles(); 1864  if( DEBUG_VMM_ GET_PTE< cycle ) 1828 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) 1865 1829 printk("\n[DBG] %s : thread %x exit for vpn %x / process %x / cycle %d\n", 1866  __FUNCTION__ , CURRENT_THREAD , vpn , process->pid, cycle ); 1830 __FUNCTION__, this->trdid, vpn, process->pid, cycle ); 1867 1831 #endif 1868 1832 … …  1871 1835 } // end vmm_handle_page_fault() 1872 1836 1873  //////////////////////////////////////////// 1874  error_t vmm_handle_cow( process_t * process, 1875  vpn_t vpn ) 1876  { 1877  uint32_t attr; // page attributes 1878  ppn_t ppn; // page PPN 1879  error_t error; 1880  1881  #if DEBUG_VMM_GET_PTE 1882  uint32_t cycle = (uint32_t)hal_get_cycles(); 1883  if( DEBUG_VMM_GET_PTE < cycle ) 1884  printk("\n[DBG] %s : thread %x enter for vpn %x / process %x / cycle %d\n", 1885  __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle ); 1886  #endif 1887  1888  // get reference process cluster and local pointer 1889  cxy_t ref_cxy = GET_CXY( process->ref_xp ); 1890  process_t * ref_ptr = GET_PTR( process->ref_xp ); 1891  1892  // get new PTE attributes and PPN from reference cluster 1893  if( local_cxy != ref_cxy ) 1894  { 1895  rpc_vmm_get_pte_client( ref_cxy, 1896  ref_ptr, 1897  vpn, 1898  true, // copy-on-write 1899  &attr, 1900  &ppn, 1901  &error ); 1902  1903  // get local VMM pointer 1904  vmm_t * vmm = &process->vmm; 1905  1906  // update local GPT 1907  error |= hal_gpt_set_pte( &vmm->gpt, 1908  vpn, 1909  attr, 1910  ppn ); 1911  } 1912  else // local cluster is the reference cluster 1913  { 1914  error = vmm_get_pte( process, 1915  vpn, 1916  true, // copy-on-write 1917  &attr, 1918  &ppn ); 1919  } 1920  1921  #if DEBUG_VMM_GET_PTE 1922  cycle = (uint32_t)hal_get_cycles(); 1923  if( DEBUG_VMM_GET_PTE < cycle ) 1924  printk("\n[DBG] %s : thread %x exit for vpn %x / process %x / cycle %d\n", 1925  __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle ); 1926  #endif 1927  1928  return error; 1929  1930  } // end vmm_handle_cow() 1931  1932  /////////////////////////////////////////// 1933  error_t vmm_v2p_translate( bool_t ident, 1934  void * ptr, 1935  paddr_t * paddr ) 1936  { 1937  process_t * process = CURRENT_THREAD->process; 1938  1939  if( ident ) // identity mapping 1940  { 1941  *paddr = (paddr_t)PADDR( local_cxy , (lpa_t)ptr ); 1942  return 0; 1943  } 1944   1837 /* deprecated April 2018 [AG]  1838  1839 error_t vmm_v2p_translate( process_t * process,  1840 void * ptr,  1841 paddr_t * paddr )  1842 { 1945 1843 // access page table 1946 1844 error_t error; … …  1953 1851 offset = (uint32_t)( ((intptr_t)ptr) & CONFIG_PPM_PAGE_MASK ); 1954 1852 1955  if( local_cxy == GET_CXY( process->ref_xp) ) // callingprocess is reference process 1853 if( local_cxy == GET_CXY( process->ref_xp) ) // local process is reference process 1956 1854 { 1957 1855 error = vmm_get_pte( process, vpn , false , &attr , &ppn ); … …  1971 1869 } // end vmm_v2p_translate() 1972 1870 1973   1871 */ -
trunk/kernel/mm/vmm.h
r437 r440  293 293 294 294 /********************************************************************************************* 295  * This function allocates physical memory from the local cluster to map all PTEs296  * of a "kernel" vseg (type KCODE , KDATA, or KDEV) in the page table of process_zero.297  * WARNING : It should not be used for "user" vsegs, that must be mapped using the298  * "on-demand-paging" policy.299  *********************************************************************************************300  * @ vseg : pointer on the vseg to be mapped.301  * @ attr : GPT attributes to be set for all vseg pages.302  * @ returns 0 if success / returns ENOMEM if no memory303  ********************************************************************************************/304  error_t vmm_map_kernel_vseg( vseg_t * vseg,305  uint32_t attr );306  307  /*********************************************************************************************308 295 * This function removes a given region (defined by a base address and a size) from 309 296 * the VMM of a given process descriptor. This can modify the number of vsegs: … …  335 322 * @ process : [in] pointer on process descriptor 336 323 * @ vaddr : [in] virtual address 337  * @ vseg : [out] pointer on foundvseg338  * @ returns 0 if success / returns -1 if user error . 324 * @ vseg : [out] local pointer on local vseg  325 * @ returns 0 if success / returns -1 if user error (out of segment). 339 326 *********************************************************************************************/ 340 327 error_t vmm_get_vseg( struct process_s * process, … …  343 330 344 331 /********************************************************************************************* 345  * This function is called by the generic exception handler when a page-fault event 346  * has been detected for a given process in a given cluster. 347  * - If the local cluster is the reference, it call directly the vmm_get_pte() function. 348  * - If the local cluster is not the reference cluster, it send a RPC_VMM_GET_PTE 349  * to the reference cluster to get the missing PTE attributes and PPN, 350  * and update the local page table. 351  ********************************************************************************************* 352  * @ process : pointer on process descriptor. 353  * @ vpn : VPN of the missing PTE. 354  * @ returns 0 if success / returns ENOMEM if no memory.  332 * This function is called by the generic exception handler in case of page-fault,  333 * or copy-on-write event locally detected for a given <vpn> in a given <process>  334 * as defined by the <is_cow> argument.  335 * 1) For a Page-Fault:  336 * - If the local cluster is the reference, or for the STACK and CODE segment types,  337 * it call directly the vmm_get_pte() function to access the local VMM.  338 * - Otherwise, it send a RPC_VMM_GET_PTE to the reference cluster to get the missing  339 * PTE attributes and PPN.  340 * This function check that the missing VPN belongs to a registered vseg, allocates  341 * a new physical page if required, and updates the local page table.  342 * 2) For a Copy-On-Write:  343 * - If no pending fork, it reset the COW flag and set the WRITE flag in the reference  344 * GPT entry, and in all the GPT copies.  345 * - If there is a pending fork, it allocates a new physical page from the cluster defined  346 * by the vseg type, copies the old physical page content to the new physical page,  347 * and decrements the pending_fork counter in old physical page descriptor.  348 *********************************************************************************************  349 * @ process : pointer on local process descriptor copy.  350 * @ vpn : VPN of the missing or faulting PTE.  351 * @ is_cow : Copy-On-Write event if true / Page-fault if false.  352 * @ returns 0 if success / returns ENOMEM if no memory or illegal VPN. 355 353 ********************************************************************************************/ 356 354 error_t vmm_handle_page_fault( struct process_s * process, 357  vpn_t vpn ); 358  359  /********************************************************************************************* 360  * This function is called by the generic exception handler when a copy-on-write event 361  * has been detected for a given process in a given cluster. 362  * It takes the lock protecting the physical page, and test the pending forks counter. 363  * If no pending fork: 364  * - it reset the COW flag and set the WRITE flag in the reference GPT entry, and in all 365  * the GPT copies 366  367  * If there is a pending forkon the 368  * - It get the involved vseg pointer. 369  * - It allocates a new physical page from the cluster defined by the vseg type. 370  * - It copies the old physical page content to the new physical page. 371  * - It decrements the pending_fork counter in old physical page descriptor. 372  373  ********************************************************************************************* 374  * @ process : pointer on process descriptor. 375  * @ vpn : VPN of the missing PTE. 376  * @ returns 0 if success / returns ENOMEM if no memory. 377  ********************************************************************************************/ 378  error_t vmm_handle_cow( struct process_s * process, 379  vpn_t vpn ); 380  381  /********************************************************************************************* 382  * This function handle both the "page-fault" and "copy-on_write" events for a given <vpn> 383  * in a given <process>. The <cow> argument defines the type of event to be handled. 384  * This function must be called by a thread running in reference cluster, and the vseg 385  * containing the searched VPN must be registered in the reference VMM.  355 vpn_t vpn,  356 bool_t is_cow );  357  358 /*********************************************************************************************  359 * This function is called by the vmm_handle_page_fault() to handle both the "page-fault",  360 * and the "copy-on_write" events for a given <vpn> in a given <process>, as defined  361 * by the <is_cow> argument.  362 * The vseg containing the searched VPN must be registered in the reference VMM. 386 363 * - for an page-fault, it allocates the missing physical page from the target cluster 387 364 * defined by the vseg type, initializes it, and updates the reference GPT, but not … …  390 367 * initialise it from the old physical page, and updates the reference GPT and all 391 368 * the GPT copies, for coherence. 392  * I n both cases, it calls the RPC_PMEM_GET_PAGES to get the new physical page when393  * the target cluster is not the reference cluster. 369 * It calls the RPC_PMEM_GET_PAGES to get the new physical page when the target cluster  370 * is not the local cluster, 394 371 * It returns in the <attr> and <ppn> arguments the accessed or modified PTE. 395 372 ********************************************************************************************* 396 373 * @ process : [in] pointer on process descriptor. 397 374 * @ vpn : [in] VPN defining the missing PTE. 398  * @ cow: [in] "copy_on_write" if true / "page_fault" if false. 375 * @ is_cow : [in] "copy_on_write" if true / "page_fault" if false. 399 376 * @ attr : [out] PTE attributes. 400 377 * @ ppn : [out] PTE ppn. … …  403 380 error_t vmm_get_pte( struct process_s * process, 404 381 vpn_t vpn, 405  bool_t cow, 382 bool_t is_cow, 406 383 uint32_t * attr, 407 384 ppn_t * ppn ); … …  428 405 ppn_t * ppn ); 429 406 430  /*********************************************************************************************431  * This function makes the virtual to physical address translation, using the calling432  * process page table. It uses identity mapping if required by the <ident> argument.433  * This address translation is required to configure the peripherals having a DMA434  * capability, or to implement the software L2/L3 cache cohérence, using the MMC device435  * synchronisation primitives.436  * WARNING : the <ident> value must be defined by the CONFIG_KERNEL_IDENTITY_MAP parameter.437  *********************************************************************************************438  * @ ident : [in] uses identity mapping if true.439  * @ ptr : [in] virtual address.440  * @ paddr : [out] pointer on buffer for physical address.441  * @ returns 0 if success / returns ENOMEM if error.442  ********************************************************************************************/443  error_t vmm_v2p_translate( bool_t ident,444  void * ptr,445  paddr_t * paddr );446  447  448 407 449 408 #endif /* _VMM_H_ */ -
trunk/kernel/mm/vseg.c
r429 r440 Â 143 143 VSEG_CACHE ; 144 144 } 145 Â else if( type == VSEG_TYPE_KCODE )146 Â {147 Â vseg->flags = VSEG_EXEC |148 Â VSEG_CACHE |149 Â VSEG_PRIVATE ;150 Â }151 Â else if( type == VSEG_TYPE_KDATA )152 Â {153 Â vseg->flags = VSEG_WRITE |154 Â VSEG_CACHE |155 Â VSEG_PRIVATE ;156 Â }157 Â else if( type == VSEG_TYPE_KDEV )158 Â {159 Â vseg->flags = VSEG_WRITE ;160 Â }161 145 else 162 146 { -
trunk/kernel/mm/vseg.h
r409 r440 Â 47 47 VSEG_TYPE_FILE = 4, /*! file mmap / public / localized */ 48 48 VSEG_TYPE_REMOTE = 5, /*! remote mmap / public / localized */ 49 Â 50 Â VSEG_TYPE_KDATA = 10,51 Â VSEG_TYPE_KCODE = 11,52 Â VSEG_TYPE_KDEV = 12,53 49 } 54 50 vseg_type_t; -
trunk/kernel/syscalls/sys_barrier.c
r23 r440  2 2 * sys_barrier.c - Access a POSIX barrier. 3 3 * 4  * authors Alain Greiner (2016,2017 ) 4 * authors Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … …  37 37 { 38 38 error_t error; 39  paddr_t paddr; 39 vseg_t * vseg; 40 40 41  thread_t * this = CURRENT_THREAD;  41 thread_t * this = CURRENT_THREAD;  42 process_t * process = this->process; 42 43 43 44 // check vaddr in user vspace 44  error = vmm_v2p_translate( false , vaddr , &paddr );  45 error = vmm_get_vseg( process , (intptr_t)vaddr , &vseg );  46 45 47 if( error ) 46 48 { 47  printk("\n[ERROR] in %s : illegal barrier virtual address = %x\n", 48  __FUNCTION__ , (intptr_t)vaddr );  49  50 #if DEBUG_SYSCALLS_ERROR  51 printk("\n[ERROR] in %s : unmapped barrier %x / thread %x / process %x\n",  52 __FUNCTION__ , (intptr_t)vaddr , this->trdid , process->pid );  53 vmm_display( process , false );  54 #endif 49 55 this->errno = error; 50 56 return -1; … …  61 67 if( error ) 62 68 { 63  printk("\n[ERROR] in %s : cannot create barrier = %x\n", 64  __FUNCTION__ , (intptr_t)vaddr );  69  70 #if DEBUG_SYSCALLS_ERROR  71 printk("\n[ERROR] in %s : cannot create barrier %x / thread %x / process %x\n",  72 __FUNCTION__ , (intptr_t)vaddr , this->trdid , process->pid );  73 #endif 65 74 this->errno = error; 66 75 return -1; … …  75 84 if( barrier_xp == XPTR_NULL ) // user error 76 85 { 77  printk("\n[ERROR] in %s : barrier %x not registered\n", 78  __FUNCTION__ , (intptr_t)vaddr );  86  87 #if DEBUG_SYSCALLS_ERROR  88 printk("\n[ERROR] in %s : barrier %x not registered / thread %x / process %x\n",  89 __FUNCTION__ , (intptr_t)vaddr , this->trdid , process->pid );  90 #endif 79 91 this->errno = EINVAL; 80 92 return -1; … …  93 105 if( barrier_xp == XPTR_NULL ) // user error 94 106 { 95  printk("\n[ERROR] in %s : barrier %x not registered\n", 96  __FUNCTION__ , (intptr_t)vaddr );  107  108 #if DEBUG_SYSCALLS_ERROR  109 printk("\n[ERROR] in %s : barrier %x not registered / thread %x / process %x\n",  110 __FUNCTION__ , (intptr_t)vaddr , this->trdid , process->pid );  111 #endif 97 112 this->errno = EINVAL; 98 113 return -1; -
trunk/kernel/syscalls/sys_condvar.c
r23 r440  2 2 * sys_condvar.c - Access a POSIX condvar. 3 3 * 4  * Author Alain Greiner (2016,2017 ) 4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … …  30 30 #include <syscalls.h> 31 31 #include <remote_condvar.h>  32 #include <remote_mutex.h> 32 33 33 34 //////////////////////////////////////// … …  36 37 void * mutex ) 37 38 { 38  error_t error;39  paddr_t paddr; 39 error_t error;  40 vseg_t * vseg; 40 41 41  thread_t * this = CURRENT_THREAD;  42 thread_t * this = CURRENT_THREAD;  43 process_t * process = this->process; 42 44 43 45 // check condvar in user vspace 44  error = vmm_v2p_translate( false , condvar , &paddr );  46 error = vmm_get_vseg( process , (intptr_t)condvar , &vseg );  47 45 48 if( error ) 46 49 { 47  printk("\n[ERROR] in %s : illegal condvar virtual address = %x\n", 48  __FUNCTION__ , (intptr_t)condvar );  50  51 #if DEBUG_SYSCALLS_ERROR  52 printk("\n[ERROR] in %s : unmapped condvar %x / thread %x / process %x\n",  53 __FUNCTION__ , (intptr_t)condvar , this->trdid , process->pid );  54 vmm_display( process , false );  55 #endif 49 56 this->errno = error; 50 57 return -1; … …  61 68 if( error ) 62 69 { 63  printk("\n[ERROR] in %s : cannot create condvar = %x\n", 64  __FUNCTION__ , (intptr_t)condvar );  70  71 #if DEBUG_SYSCALLS_ERROR  72 printk("\n[ERROR] in %s : cannot create condvar %x / thread %x / process %x\n",  73 __FUNCTION__ , (intptr_t)condvar , this->trdid , process->pid );  74 #endif 65 75 this->errno = error; 66 76 return -1; … …  72 82 { 73 83 // check mutex in user vspace 74  error = vmm_ v2p_translate( false , mutex , &paddr); 84 error = vmm_get_vseg( process , (intptr_t)mutex , &vseg ); 75 85 76 86 if( error ) 77 87 { 78  printk("\n[ERROR] in %s : illegal condvar virtual address = %x\n", 79  __FUNCTION__ , (intptr_t)condvar );  88  89 #if DEBUG_SYSCALLS_ERROR  90 printk("\n[ERROR] in %s : unmapped mutex %x / thread %x / process %x\n",  91 __FUNCTION__ , (intptr_t)mutex , this->trdid , process->pid );  92 #endif 80 93 this->errno = error; 81 94 return -1; … …  86 99 if( condvar_xp == XPTR_NULL ) // user error 87 100 { 88  printk("\n[ERROR] in %s : condvar %x not registered\n", 89  __FUNCTION__ , (intptr_t)condvar );  101  102 #if DEBUG_SYSCALLS_ERROR  103 printk("\n[ERROR] in %s : condvar %x not registered / thread %x / process %x\n",  104 __FUNCTION__ , (intptr_t)condvar , this->trdid , process->pid );  105 #endif 90 106 this->errno = EINVAL; 91 107 return -1; 92 108 } 93 109 94  xptr_t mutex_xp = remote_condvar_from_ident( (intptr_t)condvar );  110 xptr_t mutex_xp = remote_mutex_from_ident( (intptr_t)mutex );  111 95 112 if( mutex_xp == XPTR_NULL ) // user error 96 113 { 97  printk("\n[ERROR] in %s : mutex %x not registered\n", 98  __FUNCTION__ , (intptr_t)condvar );  114  115 #if DEBUG_SYSCALLS_ERROR  116 printk("\n[ERROR] in %s : mutex %x not registered / thread %x / process %x\n",  117 __FUNCTION__ , (intptr_t)mutex , this->trdid , process->pid );  118 #endif 99 119 this->errno = EINVAL; 100 120 return -1; … …  112 132 if( condvar_xp == XPTR_NULL ) // user error 113 133 { 114  printk("\n[ERROR] in %s : condvar %x not registered\n", 115  __FUNCTION__ , (intptr_t)condvar );  134  135 #if DEBUG_SYSCALLS_ERROR  136 printk("\n[ERROR] in %s : condvar %x not registered / thread %x / process %x\n",  137 __FUNCTION__ , (intptr_t)condvar , this->trdid , process->pid );  138 #endif 116 139 this->errno = EINVAL; 117 140 return -1; … …  129 152 if( condvar_xp == XPTR_NULL ) // user error 130 153 { 131  printk("\n[ERROR] in %s : condvar %x not registered\n", 132  __FUNCTION__ , (intptr_t)condvar );  154  155 #if DEBUG_SYSCALLS_ERROR  156 printk("\n[ERROR] in %s : condvar %x not registered / thread %x / process %x\n",  157 __FUNCTION__ , (intptr_t)condvar , this->trdid , process->pid );  158 #endif 133 159 this->errno = EINVAL; 134 160 return -1; … …  146 172 if( condvar_xp == XPTR_NULL ) // user error 147 173 { 148  printk("\n[ERROR] in %s : condvar %x not registered\n", 149  __FUNCTION__ , (intptr_t)condvar );  174  175 #if DEBUG_SYSCALLS_ERROR  176 printk("\n[ERROR] in %s : condvar %x not registered / thread %x / process %x\n",  177 __FUNCTION__ , (intptr_t)condvar , this->trdid , process->pid );  178 #endif 150 179 this->errno = EINVAL; 151 180 return -1; -
trunk/kernel/syscalls/sys_display.c
r438 r440  2 2 * sys_display.c - display the current state of a kernel structure on TXT0 3 3 * 4  * Author Alain Greiner (2016,2017 ) 4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … …  38 38 { 39 39  40 error_t error;  41 vseg_t * vseg;  42  43 thread_t * this = CURRENT_THREAD;  44 process_t * process = this->process;  45 40 46 #if DEBUG_SYS_DISPLAY 41 47 uint64_t tm_start; 42 48 uint64_t tm_end; 43  thread_t * this;44  this = CURRENT_THREAD;45 49 tm_start = hal_get_cycles(); 46 50 if( DEBUG_SYS_DISPLAY < tm_start ) … …  49 53 #endif 50 54  55 //////////////////////////// 51 56 if( type == DISPLAY_STRING ) 52 57 { 53  paddr_t paddr;54 58 char kbuf[256]; 55 59 uint32_t length; 56 60 57 61 char * string = (char *)arg0; 58   62 59 63 // check string in user space 60  if( vmm_v2p_translate( false , string , &paddr ) ) 61  { 62  printk("\n[ERROR] in %s : string buffer %x unmapped\n", 63  __FUNCTION__ , string );  64 error = vmm_get_vseg( process , (intptr_t)arg0 , &vseg );  65  66 if( error )  67 {  68  69 #if DEBUG_SYSCALLS_ERROR  70 printk("\n[ERROR] in %s : string buffer %x unmapped / thread %x / process %x\n",  71 __FUNCTION__ , (intptr_t)arg0 , this->trdid , process->pid );  72 #endif  73 this->errno = EINVAL; 64 74 return -1; 65 75 } … …  67 77 // ckeck string length 68 78 length = hal_strlen_from_uspace( string );  79 69 80 if( length >= 256 ) 70 81 { 71  printk("\n[ERROR] in %s : string length %d too large\n", 72  __FUNCTION__ , length ); 73  return -1; 74  } 75  76  // copy string in kernel space  82  83 #if DEBUG_SYSCALLS_ERROR  84 printk("\n[ERROR] in %s : string length %d too large / thread %x / process %x\n",  85 __FUNCTION__ , length , this->trdid , process->pid );  86 #endif  87 this->errno = EINVAL;  88 return -1;  89 }  90  91 // copy string to kernel space 77 92 hal_strcpy_from_uspace( kbuf , string , 256 ); 78 93 79 94 // print message on TXT0 kernel terminal 80  printk("\n[USER] %s / cycle %d\n", kbuf, (uint32_t)hal_get_cycles() ); 81  }  95 printk("\n%s / cycle %d\n", kbuf, (uint32_t)hal_get_cycles() );  96 }  97 ////////////////////////////// 82 98 else if( type == DISPLAY_VMM ) 83 99 { … …  89 105 if( process_xp == XPTR_NULL ) 90 106 { 91  printk("\n[ERROR] in %s : undefined PID %x\n", 92  __FUNCTION__ , pid );  107  108 #if DEBUG_SYSCALLS_ERROR  109 printk("\n[ERROR] in %s : undefined pid argument %d / thread %x / process %x\n",  110 __FUNCTION__ , pid , this->trdid , process->pid );  111 #endif  112 this->errno = EINVAL; 93 113 return -1; 94 114 } … …  108 128 } 109 129 }  130 //////////////////////////////// 110 131 else if( type == DISPLAY_SCHED ) 111 132 { … …  113 134 lid_t lid = (lid_t)arg1; 114 135 115  // check c lusterargument 136 // check cxy argument 116 137 if( cluster_is_undefined( cxy ) ) 117 138 { 118  printk("\n[ERROR] in %s : undefined cluster identifier %x\n", 119  __FUNCTION__ , cxy ); 120  return -1; 121  } 122  123  // check core argument  139  140 #if DEBUG_SYSCALLS_ERROR  141 printk("\n[ERROR] in %s : illegal cxy argument %x / thread %x / process %x\n",  142 __FUNCTION__ , cxy , this->trdid , process->pid );  143 #endif  144 this->errno = EINVAL;  145 return -1;  146 }  147  148 // check lid argument 124 149 if( lid >= LOCAL_CLUSTER->cores_nr ) 125 150 { 126  printk("\n[ERROR] in %s : undefined local index %d\n", 127  __FUNCTION__ , lid );  151  152 #if DEBUG_SYSCALLS_ERROR  153 printk("\n[ERROR] in %s : illegal lid argument %x / thread %x / process %x\n",  154 __FUNCTION__ , lid , this->trdid , process->pid );  155 #endif  156 this->errno = EINVAL; 128 157 return -1; 129 158 } … …  138 167 } 139 168 }  169 //////////////////////////////////////////// 140 170 else if( type == DISPLAY_CLUSTER_PROCESSES ) 141 171 { 142 172 cxy_t cxy = (cxy_t)arg0; 143 173 144  // check c lusterargument 174 // check cxy argument 145 175 if( cluster_is_undefined( cxy ) ) 146 176 { 147  printk("\n[ERROR] in %s : undefined cluster identifier %x\n", 148  __FUNCTION__ , cxy );  177  178 #if DEBUG_SYSCALLS_ERROR  179 printk("\n[ERROR] in %s : illegal cxy argument %x / thread %x / process %x\n",  180 __FUNCTION__ , cxy , this->trdid , process->pid );  181 #endif  182 this->errno = EINVAL; 149 183 return -1; 150 184 } … …  152 186 cluster_processes_display( cxy ); 153 187 }  188 //////////////////////////////////////// 154 189 else if( type == DISPLAY_TXT_PROCESSES ) 155 190 { … …  159 194 if( txt_id >= LOCAL_CLUSTER->nb_txt_channels ) 160 195 { 161  printk("\n[ERROR] in %s : undefined TXT channel %x\n", 162  __FUNCTION__ , txt_id );  196  197 #if DEBUG_SYSCALLS_ERROR  198 printk("\n[ERROR] in %s : illegal txt_id argument %d / thread %x / process %x\n",  199 __FUNCTION__ , txt_id , this->trdid , process->pid );  200 #endif  201 this->errno = EINVAL; 163 202 return -1; 164 203 } … …  166 205 process_txt_display( txt_id ); 167 206 }  207 ////////////////////////////// 168 208 else if( type == DISPLAY_VFS ) 169 209 { 170  // call kernel function171  process_t * process = CURRENT_THREAD->process;172 210 vfs_display( process->vfs_root_xp ); 173 211 }  212 //////////////////////////////// 174 213 else if( type == DISPLAY_CHDEV ) 175 214 { 176 215 chdev_dir_display(); 177 216 }  217 //// 178 218 else 179 219 { 180  printk("\n[ERROR] in %s : undefined display type %x\n", 181  __FUNCTION__ , type );  220  221 #if DEBUG_SYSCALLS_ERROR  222 printk("\n[ERROR] in %s : undefined display type %x / thread %x / process %x\n",  223 __FUNCTION__ , type , this->trdid , process->pid );  224 #endif  225 this->errno = EINVAL; 182 226 return -1; 183 227 } -
trunk/kernel/syscalls/sys_exit.c
r438 r440  2 2 * sys_exit.c - Kernel function implementing the "exit" system call. 3 3 * 4  * Author Alain Greiner (2016,2017 ) 4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … …  41 41 process_t * process = this->process; 42 42 pid_t pid = process->pid; 43  trdid_t trdid = this->trdid;44 43 45 44 #if DEBUG_SYS_EXIT … …  52 51 #endif 53 52 54  // get owner cluster 55  cxy_t owner_cxy = CXY_FROM_PID( pid );  53 // get owner process descriptor pointers an cluster  54 xptr_t owner_xp = cluster_get_owner_process_from_pid( pid );  55 cxy_t owner_cxy = GET_CXY( owner_xp );  56 process_t * owner_ptr = GET_PTR( owner_xp ); 56 57 57  // exit must be called by the main thread 58  if( (owner_cxy != local_cxy) || (LTID_FROM_TRDID( trdid ) != 0) ) 59  { 60  61  #if DEBUG_SYSCALLS_ERROR 62  printk("\n[ERROR] in %s : calling thread %x is not thread 0 in owner cluster %x\n", 63  __FUNCTION__, trdid, owner_cxy ); 64  #endif 65  this->errno = EINVAL; 66  return -1; 67  }  58 // get pointers on the process main thread  59 thread_t * main = hal_remote_lpt( XPTR( owner_cxy , &owner_ptr->th_tbl[0] ) ); 68 60 69 61 // enable IRQs 70 62 hal_enable_irq( &save_sr ); 71 63 72  // register exit_status in owner process descriptor 73  process->term_state = status;  64 // mark for delete all process threads in all clusters  65 // (but the main thread and this calling thread)  66 process_sigaction( pid , DELETE_ALL_THREADS );  67  68 // disable IRQs  69 hal_restore_irq( save_sr ); 74 70 75 71 #if( DEBUG_SYS_EXIT & 1) 76  printk("\n[DBG] %s : set exit status in process term_state\n", __FUNCTION__);  72 if( tm_start > DEBUG_SYS_EXIT )  73 printk("\n[DBG] %s : thread %x deleted threads / process %x\n",  74 __FUNCTION__ , this, pid ); 77 75 #endif 78 76 79  // remove process from TXT list 80  process_txt_detach( XPTR( local_cxy , process ) );  77 // mark for delete this calling thread when it is not the main  78 if( (owner_cxy != local_cxy) || (main != this) )  79 { 81 80 82 81 #if( DEBUG_SYS_EXIT & 1) 83  printk("\n[DBG] %s : removed from TXT list\n", __FUNCTION__);  82 if( tm_start > DEBUG_SYS_EXIT )  83 printk("\n[DBG] %s : calling thread %x deleted itself / process %x\n",  84 __FUNCTION__ , this, pid );  85 #endif  86 thread_delete( XPTR( local_cxy , this ) , pid , true );  87 }  88  89 // remove process from TXT list  90 process_txt_detach( owner_xp );  91  92 #if( DEBUG_SYS_EXIT & 1)  93 if( tm_start > DEBUG_SYS_EXIT )  94 printk("\n[DBG] %s : thread %x removed process %x from TXT list\n",  95 __FUNCTION__ , this, pid ); 84 96 #endif 85 97 86  // mark for delete all process threads in all clusters (but the main)87  process_sigaction( pid , DELETE_ALL_THREADS); 98 // block the main thread  99 thread_block( XPTR( owner_cxy , main ) , THREAD_BLOCKED_GLOBAL ); 88 100 89 101 #if( DEBUG_SYS_EXIT & 1) 90  printk("\n[DBG] %s : deleted all other threads than main\n", __FUNCTION__); 91  #endif 92  93  // restore IRQs 94  hal_restore_irq( save_sr ); 95  96  // block the main thread itself 97  thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_GLOBAL ); 98  99  #if( DEBUG_SYS_EXIT & 1) 100  printk("\n[DBG] %s : blocked the main thread\n", __FUNCTION__);  102 if( tm_start > DEBUG_SYS_EXIT )  103 printk("\n[DBG] %s : thread %x blocked main thread for process %x\n",  104 __FUNCTION__, this , pid ); 101 105 #endif 102 106 103 107 // atomically update owner process descriptor term_state to ask 104  // the parent process sys_wait() function to delete th ismain thread105  hal_remote_atomic_or( XPTR( local_cxy , &process->term_state ) ,106  PROCESS_TERM_EXIT ); 108 // the parent process sys_wait() function to delete the main thread  109 hal_remote_atomic_or( XPTR( owner_cxy , &process->term_state ) ,  110 PROCESS_TERM_EXIT | (status & 0xFF) ); 107 111 108 112 #if( DEBUG_SYS_EXIT & 1) 109  printk("\n[DBG] %s : set EXIT flag in process term_state\n", __FUNCTION__);  113 if( tm_start > DEBUG_SYS_EXIT )  114 printk("\n[DBG] %s : thread %x set exit status in process %x term_state\n",  115 __FUNCTION__ , this, pid ); 110 116 #endif 111 117 … …  119 125 #endif 120 126 121  // mainthread deschedule 127 // this thread deschedule 122 128 sched_yield( "process exit" ); 123 129 -
trunk/kernel/syscalls/sys_fork.c
r438 r440  77 77 ref_process_xp = parent_process_ptr->ref_xp; 78 78 ref_process_cxy = GET_CXY( ref_process_xp ); 79  ref_process_ptr = (process_t *)GET_PTR( ref_process_xp ); 79 ref_process_ptr = GET_PTR( ref_process_xp ); 80 80 81 81 // check parent process children number from reference … …  104 104 } 105 105 106  #if( DEBUG_SYS_FORK & 1) 107  108  // dqdt_display(); 109  110  if( local_cxy == 0 ) 111  { 112  sched_display( 0 ); 113  rpc_sched_display_client( 1 , 0 ); 114  } 115  else 116  { 117  sched_display( 0 ); 118  rpc_sched_display_client( 0 , 0 ); 119  } 120   106 #if (DEBUG_SYS_FORK & 1 ) 121 107 if( DEBUG_SYS_FORK < tm_start ) 122 108 printk("\n[DBG] %s : parent_thread %x selected cluster %x\n", -
trunk/kernel/syscalls/sys_get_config.c
r438 r440  2 2 * sys_get_config.c - get hardware platform parameters. 3 3 * 4  * Author Alain Greiner (2016,2017 ) 4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … …  37 37 uint32_t * ncores ) 38 38 { 39  paddr_t paddr; 40  uint32_t k_x_size; 41  uint32_t k_y_size; 42  uint32_t k_ncores; 43  44  error_t error = 0;  39 error_t error;  40 vseg_t * vseg;  41 uint32_t k_x_size;  42 uint32_t k_y_size;  43 uint32_t k_ncores; 45 44 46 45 thread_t * this = CURRENT_THREAD; … …  56 55 #endif 57 56 58  // check buffer in user space 59  error |= vmm_v2p_translate( false , x_size , &paddr ); 60  error |= vmm_v2p_translate( false , y_size , &paddr ); 61  error |= vmm_v2p_translate( false , ncores , &paddr );  57 // check x_size buffer in user space  58 error = vmm_get_vseg( process , (intptr_t)x_size , &vseg ); 62 59 63 60 if( error ) … …  65 62 66 63 #if DEBUG_SYSCALLS_ERROR 67  printk("\n[ERROR] in %s : user buffer unmapped for thread %x in process %x\n", 68  __FUNCTION__ , this->trdid , process->pid );  64 printk("\n[ERROR] in %s : x_size buffer unmapped / thread %x / process %x\n",  65 __FUNCTION__ , (intptr_t)x_size , this->trdid , process->pid );  66 vmm_display( process , false ); 69 67 #endif 70  this->errno = EFAULT;  68 this->errno = EINVAL;  69 return -1;  70 }  71  72 // check y_size buffer in user space  73 error = vmm_get_vseg( process , (intptr_t)y_size , &vseg );  74  75 if( error )  76 {  77  78 #if DEBUG_SYSCALLS_ERROR  79 printk("\n[ERROR] in %s : y_size buffer unmapped / thread %x / process %x\n",  80 __FUNCTION__ , (intptr_t)y_size , this->trdid , process->pid );  81 vmm_display( process , false );  82 #endif  83 this->errno = EINVAL;  84 return -1;  85 }  86  87 // check ncores buffer in user space  88 error = vmm_get_vseg( process , (intptr_t)ncores , &vseg );  89  90 if( error )  91 {  92  93 #if DEBUG_SYSCALLS_ERROR  94 printk("\n[ERROR] in %s : ncores buffer unmapped / thread %x / process %x\n",  95 __FUNCTION__ , (intptr_t)ncores , this->trdid , process->pid );  96 vmm_display( process , false );  97 #endif  98 this->errno = EINVAL; 71 99 return -1; 72 100 } -
trunk/kernel/syscalls/sys_get_core.c
r410 r440  37 37 uint32_t * lid ) 38 38 { 39  paddr_t paddr;  39 error_t error;  40 vseg_t * vseg; 40 41 uint32_t k_cxy; 41 42 uint32_t k_lid; 42  43  error_t error = 0;44 43 45 44 thread_t * this = CURRENT_THREAD; 46 45 process_t * process = this->process; 47 46 48  // check buffers in user space 49  error |= vmm_v2p_translate( false , cxy , &paddr ); 50  error |= vmm_v2p_translate( false , lid , &paddr );  47 // check cxy buffer in user space  48 error = vmm_get_vseg( process , (intptr_t)cxy , &vseg ); 51 49 52 50 if( error ) 53 51 { 54  printk("\n[ERROR] in %s : user buffer unmapped for thread %x in process %x\n", 55  __FUNCTION__ , this->trdid , process->pid );  52  53 #if DEBUG_SYSCALLS_ERROR  54 printk("\n[ERROR] in %s : cxy buffer unmapped %x / thread %x / process %x\n",  55 __FUNCTION__ , (intptr_t)cxy , this->trdid , process->pid );  56 vmm_display( process , false );  57 #endif  58 this->errno = EFAULT;  59 return -1;  60 }  61  62 // check lid buffer in user space  63 error = vmm_get_vseg( process , (intptr_t)lid , &vseg );  64  65 if( error )  66 {  67  68 #if DEBUG_SYSCALLS_ERROR  69 printk("\n[ERROR] in %s : lid buffer unmapped %x / thread %x / process %x\n",  70 __FUNCTION__ , (intptr_t)lid , this->trdid , process->pid );  71 vmm_display( process , false );  72 #endif 56 73 this->errno = EFAULT; 57 74 return -1; -
trunk/kernel/syscalls/sys_get_cycle.c
r408 r440  2 2 * sys_get_cycle.c - get calling core cycles count. 3 3 * 4  * Author Alain Greiner (2016,2017 ) 4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … …  35 35 int sys_get_cycle ( uint64_t * cycle ) 36 36 { 37  error_t error;38  paddr_t paddr;39  uint64_t k_cycle; 37 error_t error;  38 vseg_t * vseg;  39 uint64_t k_cycle; 40 40 41 41 thread_t * this = CURRENT_THREAD; … …  43 43 44 44 // check buffer in user space 45  error = vmm_ v2p_translate( false , cycle , &paddr); 45 error = vmm_get_vseg( process , (intptr_t)cycle , &vseg ); 46 46 47 47 if( error ) 48 48 { 49  printk("\n[ERROR] in %s : user buffer unmapped for thread %x in process %x\n", 50  __FUNCTION__ , this->trdid , process->pid );  49  50 #if DEBUG_SYSCALLS_ERROR  51 printk("\n[ERROR] in %s : user buffer unmapped %x / thread %x / process %x\n",  52 __FUNCTION__ , (intptr_t)cycle , this->trdid , process->pid );  53 vmm_display( process , false );  54 #endif 51 55 this->errno = EFAULT; 52 56 return -1; -
trunk/kernel/syscalls/sys_getcwd.c
r124 r440  2 2 * sys_getcwd.c - get process current work directory 3 3 * 4  * Author Alain Greiner (2016,2017 ) 4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … …  40 40 { 41 41 error_t error; 42  paddr_t paddr; 42 vseg_t * vseg; 43 43 char kbuf[CONFIG_VFS_MAX_PATH_LENGTH]; 44 44 … …  49 49 if( nbytes < CONFIG_VFS_MAX_PATH_LENGTH ) 50 50 { 51  printk("\n[ERROR] in %s : buffer too small\n", __FUNCTION__ ); 52  this->errno = ERANGE;  51  52 #if DEBUG_SYSCALLS_ERROR  53 printk("\n[ERROR] in %s : buffer too small / thread %x / process %x\n",  54 __FUNCTION__ , this->trdid , process->pid );  55 #endif  56 this->errno = EINVAL; 53 57 return -1; 54 58 } 55 59 56 60 // check buffer in user space 57  error = vmm_ v2p_translate( false , buf , &paddr); 61 error = vmm_get_vseg( process, (intptr_t)buf , &vseg ); 58 62 59 63 if( error ) 60 64 { 61  printk("\n[ERROR] in %s : user buffer unmapped\n", __FUNCTION__ ); 62  this->errno = EFAULT;  65  66 #if DEBUG_SYSCALLS_ERROR  67 printk("\n[ERROR] in %s : user buffer unmapped %x / thread %x / process %x\n",  68 __FUNCTION__ , (intptr_t)buf , this->trdid , process->pid );  69 #endif  70 this->errno = EINVAL; 63 71 return -1; 64 72 } -
trunk/kernel/syscalls/sys_kill.c
r438 r440  2 2 * sys_kill.c - Kernel function implementing the "kill" system call. 3 3 * 4  * Author Alain Greiner (2016,2017 ) 4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … …  38 38 { 39 39 xptr_t owner_xp; // extended pointer on target process in owner cluster 40  cxy_t owner_cxy; // target process owner cluster 40 cxy_t owner_cxy; // target process in owner cluster 41 41 process_t * owner_ptr; // local pointer on target process in owner cluster 42  xptr_t parent_xp; // extended pointer on parent process43  cxy_t parent_cxy; // parent process cluster44  process_t * parent_ptr; // local pointer on parent process45  pid_t ppid; // parent process PID46 42 uint32_t retval; // return value for the switch 47 43 48 44 thread_t * this = CURRENT_THREAD; 49 45 process_t * process = this->process; 50  trdid_t trdid = this->trdid;51 46 52 47 #if DEBUG_SYS_KILL … …  75 70 } 76 71 77  // process can kill itself only when calling thread is the main thread78  if( (pid == process->pid) && ((owner_cxy != local_cxy) || (LTID_FROM_TRDID( trdid )))) 72 // process cannot kill itself  73 if( (pid == process->pid) ) 79 74 { 80 75 81 76 #if DEBUG_SYSCALLS_ERROR 82  printk("\n[ERROR] in %s : only main thread can kill itself\n", __FUNCTION__); 77 printk("\n[ERROR] in %s : process %x cannot kill itself\n", __FUNCTION__, pid ); 83 78 #endif 84 79 this->errno = EINVAL; … …  86 81 } 87 82 88  // get parent process PID 89  parent_xp = hal_remote_lwd( XPTR( owner_cxy , &owner_ptr->parent_xp ) ); 90  parent_cxy = GET_CXY( parent_xp ); 91  parent_ptr = GET_PTR( parent_xp ); 92  ppid = hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) ); 93  94  // check processe INIT  83 // processe INIT cannot be killed 95 84 if( pid == 1 ) 96 85 { -
trunk/kernel/syscalls/sys_mmap.c
r438 r440  3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5  * Alain Greiner (2016,2017 ) 5 * Alain Greiner (2016,2017,2018) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … …  44 44 xptr_t mapper_xp; 45 45 error_t error; 46  paddr_t paddr; // unused, but required for user space checking47 46 reg_t save_sr; // required to enable IRQs 48 47 … …  60 59 61 60 // check arguments in user space 62  error = vmm_ v2p_translate( false , attr , &paddr); 61 error = vmm_get_vseg( process , (intptr_t)attr , &vseg ); 63 62 64 63 if ( error ) … …  66 65 67 66 #if DEBUG_SYSCALLS_ERROR 68  printk("\n[ERROR] in %s : arguments not in used space = %x\n", __FUNCTION__ , (intptr_t)attr );  67 printk("\n[ERROR] in %s : user buffer unmapped %x / thread %x / process %x\n",  68 __FUNCTION__ , (intptr_t)attr , this->trdid , process->pid );  69 vmm_display( process , false ); 69 70 #endif 70 71 this->errno = EINVAL; … …  92 93 93 94 #if DEBUG_SYSCALLS_ERROR 94  printk("\n[ERROR] in %s : MAP_FIXED not supported\n", __FUNCTION__ );  95 printk("\n[ERROR] in %s : MAP_FIXED not supported / thread %x / process %x\n",  96 __FUNCTION__ , this->trdid , process->pid ); 95 97 #endif 96 98 this->errno = EINVAL; … …  102 104 103 105 #if DEBUG_SYSCALLS_ERROR 104  printk("\n[ERROR] in %s : MAP_SHARED xor MAP_PRIVATE\n", __FUNCTION__ );  106 printk("\n[ERROR] in %s : MAP_SHARED == MAP_PRIVATE / thread %x / process %x\n",  107 __FUNCTION__ , this->trdid , process->pid ); 105 108 #endif 106 109 this->errno = EINVAL; … …  124 127 125 128 #if DEBUG_SYSCALLS_ERROR 126  printk("\n[ERROR] in %s: bad file descriptor = %d\n", __FUNCTION__ , fdid );  129 printk("\n[ERROR] in %s: bad file descriptor %d / thread %x / process %x\n",  130 __FUNCTION__ , fdid , this->trdid , process->pid ); 127 131 #endif 128 132 this->errno = EBADFD; … …  137 141 138 142 #if DEBUG_SYSCALLS_ERROR 139  printk("\n[ERROR] in %s: file %d not found\n", __FUNCTION__ , fdid );  143 printk("\n[ERROR] in %s: file %d not found / thread %x / process %x\n",  144 __FUNCTION__ , fdid , this->trdid , process->pid ); 140 145 #endif 141 146 this->errno = EBADFD; … …  160 165 161 166 #if DEBUG_SYSCALLS_ERROR 162  printk("\n[ERROR] in %s: offset (%d) + len (%d) >= file's size (%d)\n",163  __FUNCTION__, k_attr.offset, k_attr.length, size ); 167 printk("\n[ERROR] in %s: offset(%d) + len(%d) >= file's size(%d) / thread %x / process %x\n",  168 __FUNCTION__, k_attr.offset, k_attr.length, size, this->trdid, process->pid ); 164 169 #endif 165 170 this->errno = ERANGE; … …  173 178 174 179 #if DEBUG_SYSCALLS_ERROR 175  printk("\n[ERROR] in %s: prot = %x / file_attr = %x )\n",176  __FUNCTION__ , k_attr.prot , file_attr ); 180 printk("\n[ERROR] in %s: prot = %x / file_attr = %x / thread %x , process %x\n",  181 __FUNCTION__ , k_attr.prot , file_attr , this->trdid , process->pid ); 177 182 #endif 178 183 this->errno = EACCES; … …  206 211 207 212 #if DEBUG_SYSCALLS_ERROR 208  printk("\n[ERROR] in %s : illegal cxy for MAP_REMOTE\n", __FUNCTION__ );  213 printk("\n[ERROR] in %s : illegal cxy for MAP_REMOTE / thread %x / process %x\n",  214 __FUNCTION__, this->trdid , process->pid ); 209 215 #endif 210 216 this->errno = EINVAL; … …  255 261 256 262 #if DEBUG_SYSCALLS_ERROR 257  printk("\n[ERROR] in %s : cannot create vseg\n", __FUNCTION__ );  263 printk("\n[ERROR] in %s : cannot create vseg / thread %x / process %x\n",  264 __FUNCTION__, this->trdid , process->pid ); 258 265 #endif 259 266 this->errno = ENOMEM; -
trunk/kernel/syscalls/sys_mutex.c
r23 r440  2 2 * sys_mutex.c - Access a POSIX mutex. 3 3 * 4  * Author Alain Greiner (2016,2017 ) 4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … …  37 37 uint32_t attr ) 38 38 { 39  error_t error;40  paddr_t paddr; 39 error_t error;  40 vseg_t * vseg; 41 41 42  thread_t * this = CURRENT_THREAD;  42 thread_t * this = CURRENT_THREAD;  43 process_t * process = this->process; 43 44 44 45 // check vaddr in user vspace 45  error = vmm_v2p_translate( false , vaddr , &paddr );  46 error = vmm_get_vseg( process , (intptr_t)vaddr , &vseg );  47 46 48 if( error ) 47 49 { 48  printk("\n[ERROR] in %s : illegal virtual address = %x\n", 49  __FUNCTION__ , (intptr_t)vaddr );  50  51 #if DEBUG_SYSCALLS_ERROR  52 printk("\n[ERROR] in %s : mutex unmapped %x / thread %x / process %x\n",  53 __FUNCTION__ , (intptr_t)vaddr , this->trdid , process->pid );  54 vmm_display( process , false );  55 #endif 50 56 this->errno = error; 51 57 return -1; … …  60 66 if( attr != 0 ) 61 67 { 62  printk("\n[ERROR] in %s : mutex attributes non supported yet\n", 63  __FUNCTION__ );  68  69 #if DEBUG_SYSCALLS_ERROR  70 printk("\n[ERROR] in %s : mutex attribute non supported / thread %x / process %x\n",  71 __FUNCTION__ , this->trdid , process->pid );  72 #endif 64 73 this->errno = error; 65 74 return -1; … …  70 79 if( error ) 71 80 { 72  printk("\n[ERROR] in %s : cannot create mutex\n", 73  __FUNCTION__ );  81  82 #if DEBUG_SYSCALLS_ERROR  83 printk("\n[ERROR] in %s : cannot create mutex / thread %x / process %x\n",  84 __FUNCTION__ , this->trdid , process->pid );  85 #endif 74 86 this->errno = error; 75 87 return -1; … …  84 96 if( mutex_xp == XPTR_NULL ) // user error 85 97 { 86  printk("\n[ERROR] in %s : mutex %x not registered\n", 87  __FUNCTION__ , (intptr_t)vaddr );  98  99 #if DEBUG_SYSCALLS_ERROR  100 printk("\n[ERROR] in %s : mutex %x not registered / thread %x / process %x\n",  101 __FUNCTION__ , (intptr_t)vaddr , this->trdid , process->pid );  102 #endif 88 103 this->errno = EINVAL; 89 104 return -1; … …  102 117 if( mutex_xp == XPTR_NULL ) // user error 103 118 { 104  printk("\n[ERROR] in %s : mutex %x not registered\n", 105  __FUNCTION__ , (intptr_t)vaddr );  119  120 #if DEBUG_SYSCALLS_ERROR  121 printk("\n[ERROR] in %s : mutex %x not registered / thread %x / process %x\n",  122 __FUNCTION__ , (intptr_t)vaddr , this->trdid , process->pid );  123 #endif 106 124 this->errno = EINVAL; 107 125 return -1; … …  120 138 if( mutex_xp == XPTR_NULL ) // user error 121 139 { 122  printk("\n[ERROR] in %s : mutex %x not registered\n", 123  __FUNCTION__ , (intptr_t)vaddr );  140  141 #if DEBUG_SYSCALLS_ERROR  142 printk("\n[ERROR] in %s : mutex %x not registered / thread %x / process %x\n",  143 __FUNCTION__ , (intptr_t)vaddr , this->trdid , process->pid );  144 #endif 124 145 this->errno = EINVAL; 125 146 return -1; -
trunk/kernel/syscalls/sys_read.c
r438 r440  2 2 * sys_read.c - read bytes from a file 3 3 * 4  * Author Alain Greiner (2016,2017 ) 4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … …  58 58 { 59 59 error_t error; 60  paddr_t paddr;// required for user space checking 60 vseg_t * vseg; // required for user space checking 61 61 xptr_t file_xp; // remote file extended pointer 62 62 uint32_t nbytes; // number of bytes actually read … …  91 91 92 92 // check user buffer in user space 93  error = vmm_ v2p_translate( false , vaddr , &paddr); 93 error = vmm_get_vseg( process , (intptr_t)vaddr , &vseg ); 94 94 95 95 if ( error ) … …  97 97 98 98 #if DEBUG_SYSCALLS_ERROR 99  printk("\n[ERROR] in %s : user buffer unmapped = %x\n", 100  __FUNCTION__ , (intptr_t)vaddr );  99 printk("\n[ERROR] in %s : user buffer unmapped %x / thread %x / process %x\n",  100 __FUNCTION__ , (intptr_t)vaddr, this->trdid, process->pid );  101 vmm_display( process , false ); 101 102 #endif 102 103 this->errno = EINVAL; -
trunk/kernel/syscalls/sys_sem.c
r23 r440  2 2 * sys_sem.c - Acces a POSIX unamed semaphore. 3 3 * 4  * Authors Alain Greiner (2016,2017 ) 4 * Authors Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … …  36 36 uint32_t * value ) // pointer on in/out argument 37 37 { 38  uint32_t 39  paddr_t paddr;40  error_t  38 uint32_t data;  39 vseg_t * vseg;  40 error_t error; 41 41 42  thread_t * this = CURRENT_THREAD;  42 thread_t * this = CURRENT_THREAD;  43 process_t * process = this->process; 43 44 44 45 // check vaddr in user vspace 45  error = vmm_ v2p_translate( false , vaddr , &paddr); 46 error = vmm_get_vseg( process , (intptr_t)vaddr , &vseg ); 46 47 if( error ) 47 48 { 48  printk("\n[ERROR] in %s : illegal semaphore virtual address = %x\n", 49  __FUNCTION__ , (intptr_t)vaddr ); 50  this->errno = error;  49  50 #if DEBUG_SYSCALLS_ERROR  51 printk("\n[ERROR] in %s : unmapped semaphore %x / thread %x / process %x\n",  52 __FUNCTION__ , (intptr_t)vaddr, this->trdid, process->pid );  53 vmm_display( process , false );  54 #endif  55 this->errno = EINVAL; 51 56 return -1; 52 57 } 53 58 54 59 // check value in user vspace 55  error = vmm_ v2p_translate( false , value , &paddr); 60 error = vmm_get_vseg( process , (intptr_t)value , &vseg ); 56 61 if( error ) 57 62 { 58  printk("\n[ERROR] in %s : illegal argument virtual address = %x\n", 59  __FUNCTION__ , (intptr_t)value ); 60  this->errno = error; 61  return -1;  63  64 #if DEBUG_SYSCALLS_ERROR  65 printk("\n[ERROR] in %s : unmapped value %x / thread %x / process %x\n",  66 __FUNCTION__ , (intptr_t)vaddr, this->trdid, process->pid );  67 vmm_display( process , false );  68 #endif  69 this->errno = EINVAL;  70 return -1; 62 71 } 63   72 64 73 // execute requested operation 65 74 switch( operation ) … …  91 100 if( sem_xp == XPTR_NULL ) // user error 92 101 { 93  printk("\n[ERROR] in %s : semaphore %x not registered\n", 94  __FUNCTION__ , (intptr_t)value );  102  103 #if DEBUG_SYSCALLS_ERROR  104 printk("\n[ERROR] in %s : semaphore %x not registered / thread %x / process %x\n",  105 __FUNCTION__ , (intptr_t)value, this->trdid, process->pid );  106 #endif 95 107 this->errno = EINVAL; 96 108 return -1; … …  114 126 if( sem_xp == XPTR_NULL ) // user error 115 127 { 116  printk("\n[ERROR] in %s : semaphore %x not registered\n", 117  __FUNCTION__ , (intptr_t)value );  128  129 #if DEBUG_SYSCALLS_ERROR  130 printk("\n[ERROR] in %s : semaphore %x not registered / thread %x / process %x\n",  131 __FUNCTION__ , (intptr_t)value, this->trdid, process->pid );  132 #endif 118 133 this->errno = EINVAL; 119 134 return -1; … …  134 149 if( sem_xp == XPTR_NULL ) // user error 135 150 { 136  printk("\n[ERROR] in %s : semaphore %x not registered\n", 137  __FUNCTION__ , (intptr_t)value );  151  152 #if DEBUG_SYSCALLS_ERROR  153 printk("\n[ERROR] in %s : semaphore %x not registered / thread %x / process %x\n",  154 __FUNCTION__ , (intptr_t)value, this->trdid, process->pid );  155 #endif 138 156 this->errno = EINVAL; 139 157 return -1; … …  154 172 if( sem_xp == XPTR_NULL ) // user error 155 173 { 156  printk("\n[ERROR] in %s : semaphore %x not registered\n", 157  __FUNCTION__ , (intptr_t)value );  174  175 #if DEBUG_SYSCALLS_ERROR  176 printk("\n[ERROR] in %s : semaphore %x not registered / thread %x / process %x\n",  177 __FUNCTION__ , (intptr_t)value, this->trdid, process->pid );  178 #endif 158 179 this->errno = EINVAL; 159 180 return -1; -
trunk/kernel/syscalls/sys_stat.c
r407 r440  2 2 * sys_stat.c - Return statistics on a file or directory. 3 3 * 4  * Author Alain Greiner (2016,2017 ) 4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … …  37 37 { 38 38 error_t error; 39  paddr_t paddr; 39 vseg_t * vseg; // for user space checking 40 40 struct stat k_stat; // kernel space 41 41 xptr_t file_xp; … …  46 46 47 47 // check stat structure in user space 48  error = vmm_ v2p_translate( false , u_stat , &paddr); 48 error = vmm_get_vseg( process , (intptr_t)u_stat , &vseg ); 49 49 50 50 if( error ) 51 51 { 52  printk("\n[ERROR] in %s : stat structure unmapped for thread %x in process %x\n", 53  __FUNCTION__ , this->trdid , process->pid );  52  53 #if DEBUG_SYCALL_ERROR  54 printk("\n[ERROR] in %s : stat structure unmapped %x / thread %x / process %x\n",  55 __FUNCTION__ , (intptr_t)u_stat , this->trdid , process->pid );  56 vmm_display( process , false );  57 #endif 54 58 this->errno = EINVAL; 55 59 return -1; -
trunk/kernel/syscalls/sys_thread_cancel.c
r438 r440  23 23 24 24 #include <hal_types.h>  25 #include <hal_irqmask.h> 25 26 #include <hal_remote.h> 26 27 #include <hal_special.h> … …  32 33 int sys_thread_cancel( trdid_t trdid ) 33 34 {  35 reg_t save_sr; // required to enable IRQs 34 36 xptr_t target_xp; // target thread extended pointer  37 cxy_t target_cxy; // target thread cluster identifier  38 ltid_t target_ltid; // target thread local index  39 cxy_t owner_cxy; // process owner cluster identifier  40 xptr_t owner_xp; // extended pointer on owner process 35 41 36 42 // get killer thread pointers 37 43 thread_t * this = CURRENT_THREAD; 38 44 process_t * process = this->process;  45 pid_t pid = process->pid; 39 46 40 47 // get extended pointer on target thread 41  target_xp = thread_get_xptr( p rocess->pid , trdid ); 48 target_xp = thread_get_xptr( pid , trdid ); 42 49 43 50 // check target_xp … …  61 68 #endif 62 69 63  // cal the relevant kernel function 64  thread_kill( target_xp, 65  0, // is_exit 66  0 ); // is forced  70 // get process owner cluster identifier  71 owner_cxy = CXY_FROM_PID( pid );  72  73 // get target thread ltid and cluster  74 target_cxy = CXY_FROM_TRDID( trdid );  75 target_ltid = LTID_FROM_TRDID( trdid );  76  77 // If target thread is the main thread, the process must be deleted,  78 // This require synchronisation with parent process  79 if( (target_cxy == owner_cxy) && (target_ltid == 0) )  80 {  81 // get extended pointer on owner cluster  82 owner_xp = cluster_get_owner_process_from_pid( pid );  83  84 // mark for delete all threads but the main  85 hal_enable_irq( &save_sr );  86 process_sigaction( pid , DELETE_ALL_THREADS );  87 hal_restore_irq( save_sr );  88  89 // remove process from TXT list  90 process_txt_detach( owner_xp );  91  92 // block the main thread  93 thread_block( XPTR( local_cxy ,this ) , THREAD_BLOCKED_GLOBAL );  94  95 // atomically update owner process descriptor term_state to ask  96 // the parent process sys_wait() function to delete the main thread  97 hal_remote_atomic_or( XPTR( local_cxy , &process->term_state ) ,  98 PROCESS_TERM_EXIT );  99 }  100 else  101 {  102 // block target thread and mark it for delete  103 thread_delete( target_xp , pid , false );  104 } 67 105 68 106 #if DEBUG_SYS_THREAD_CANCEL -
trunk/kernel/syscalls/sys_thread_create.c
r438 r440  53 53 trdid_t trdid; // created thread identifier 54 54 process_t * process; // pointer on local process descriptor 55  paddr_t paddr; // unused, required by vmm_v2p_translate() 55 vseg_t * vseg; // required for user space checking 56 56 cxy_t target_cxy; // target cluster identifier 57 57 error_t error; … …  67 67 tm_start = hal_get_cycles(); 68 68 if( DEBUG_SYS_THREAD_CREATE < tm_start ) 69  printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n" 70  __FUNCTION__ , parent , process->pid, (uint32_t)tm_start ); 71  #endif 72  73  // check user_attr in user space & copy to kernel space  69 printk("\n[DBG] %s : thread %x (cxy %x) enter / process %x / cycle %d\n",  70 __FUNCTION__, parent, local_cxy, process->pid, (uint32_t)tm_start );  71 #endif  72  73 // check trdid buffer in user space  74 error = vmm_get_vseg( process , (intptr_t)trdid_ptr , &vseg );  75  76 if ( error )  77 {  78  79 #if DEBUG_SYSCALLS_ERROR  80 printk("\n[ERROR] in %s : trdid buffer unmapped %x / thread %x / process %x\n",  81 __FUNCTION__ , (intptr_t)trdid_ptr, parent->trdid, process->pid );  82 vmm_display( process , false );  83 #endif  84 parent->errno = EINVAL;  85 return -1;  86 }  87  88 // check user_attr buffer in user space & copy to kernel space 74 89 if( user_attr != NULL ) 75 90 { 76  error = vmm_ v2p_translate( false , user_attr , &paddr); 91 error = vmm_get_vseg( process , (intptr_t)user_attr , &vseg ); 77 92 78 93 if( error ) … …  80 95 81 96 #if DEBUG_SYSCALLS_ERROR 82  printk("\n[ERROR] in %s : user_attr unmapped\n", __FUNCTION__ );  97 printk("\n[ERROR] in %s : user_attr buffer unmapped %x / thread %x / process %x\n",  98 __FUNCTION__ , (intptr_t)user_attr , parent->trdid , process->pid );  99 vmm_display( process , false ); 83 100 #endif 84 101 parent->errno = EINVAL; … …  90 107 91 108 // check start_func in user space 92  error = vmm_v2p_translate( false , start_func , &paddr ); 93  94  if( error ) 95  { 96  97  #if DEBUG_SYSCALLS_ERROR 98  printk("\n[ERROR] in %s : start_func unmapped\n", __FUNCTION__ ); 99  #endif 100  parent->errno = EINVAL; 101  return -1; 102  } 103  104  // check start_arg in user space 105  if( start_arg != NULL ) error = vmm_v2p_translate( false , start_arg , &paddr ); 106  107  if( error ) 108  { 109  110  #if DEBUG_SYSCALLS_ERROR 111  printk("\n[ERROR] in %s : start_arg unmapped\n", __FUNCTION__ ); 112  #endif 113  parent->errno = EINVAL; 114  return -1; 115  } 116  117  // check / define attributes an target_cxy  109 error = vmm_get_vseg( process , (intptr_t)start_func , &vseg );  110  111 if( error )  112 {  113 Â