Changeset 623
- Timestamp:
- Mar 6, 2019, 4:37:15 PM (6 years ago)
- Location:
- trunk
- Files:
-
- 60 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Makefile
r610 r623 4 4 5 5 -include params-soft.mk 6 6 7 ifeq ($(ARCH_NAME),) 7 $(error Please define inARCH_NAME parameter in params-soft.mk!)8 $(error Please define ARCH_NAME parameter in params-soft.mk!) 8 9 endif 9 10 … … 56 57 MTOOLS_SKIP_CHECK := 1 57 58 58 # Rule to generate boot.elf, kernel.elf, all user.elf files, and update virtual disk. 59 ########################################################################################## 60 # Rule to generate boot.elf, kernel.elf, all user.elf files, and update the virtual disk 61 # when the corresponding sources files have been modified or destroyed. 62 # The /home directory on the virtual disk is not modified 59 63 compile: dirs \ 60 build_disk \61 64 hard_config.h \ 62 65 build_libs \ … … 68 71 user/idbg/build/idbg.elf \ 69 72 user/sort/build/sort.elf \ 70 user/fft/build/fft.elf \73 user/fft/build/fft.elf \ 71 74 list 72 75 … … 85 88 mcopy -o -i $(DISK_IMAGE) ::/home . 86 89 90 ############################################################## 87 91 # Rules to delete all binary files from Unix File System 88 92 # without modifying the virtual disk. … … 119 123 mmd -o -i $(DISK_IMAGE) ::/bin/user || true 120 124 mmd -o -i $(DISK_IMAGE) ::/home || true 121 mcopy -o -i $(DISK_IMAGE) Makefile ::/home122 125 mdir -/ -b -i $(DISK_IMAGE) ::/ 123 126 … … 125 128 # Rules to generate hardware description files (hard_config.h, 126 129 # arch_info.bin and arch_info.xml), and update the virtual disk. 127 hard_config.h: build_disk$(ARCH)/arch_info.py130 hard_config.h: $(ARCH)/arch_info.py 128 131 tools/arch_info/genarch.py --arch=$(ARCH) \ 129 132 --x_size=$(X_SIZE) \ -
trunk/boot/tsar_mips32/boot.c
r578 r623 3 3 * 4 4 * Authors : Vu Son (2016) 5 * Alain Greiner (2016, 2017,2018)5 * Alain Greiner (2016,2017,2018,2019) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 26 26 * This file contains the ALMOS-MKH. boot-loader for the TSAR architecture. * 27 27 * * 28 * It supports clusterised shared memory multi-processor architectures,*28 * It supports a clusterised, shared memory, multi-processor architecture, * 29 29 * where each processor core is identified by a composite index [cxy,lid] * 30 30 * with one physical memory bank per cluster. * 31 31 * * 32 32 * The 'boot.elf' file (containing the boot-loader binary code) is stored * 33 * on disk and is loaded into memory by core[0,0] (cxy = 0 / lid = 0),*34 * and is copied in each other cluter by the local CP0 (lid = 0].*33 * on disk (not in the FAT file system), and must be loaded into memory by * 34 * the preloader running on the core[0][0] (cxy = 0 / lid = 0). * 35 35 * * 36 * 1) The boot-loader first phase is executed by core[0,0], while * 37 * all other cores are waiting in the preloader. * 38 * It does the following tasks: * 39 * - load into the memory bank of cluster 0 the 'arch_info.bin' * 40 * file (containing the hardware architecture description) and the * 41 * 'kernel.elf' file, at temporary locations, * 42 * - initializes the 'boot_info_t' structure in cluster(0,0) * 43 * (there is 1 'boot_info_t' per cluster), which contains both * 44 * global and cluster specific information that will be used for * 45 * kernel initialisation. * 46 * - activate CP0s in all other clusters, using IPIs. * 47 * - wait completion reports from CP0s on a global barrier. * 36 * The main task of the boot-loader is to load in the first physical page * 37 * of each cluster a copy of the kernel code (segments "kcode" and "kdata") * 38 * and to build - in each cluster - a cluster specific description of the * 39 * hardware archtecture, stored in the "kdata" segment as the boot_info_t * 40 * structure. The "kernel.elf" and "arch_info.bin" files are supposed to be * 41 * stored on disk in a FAT32 file system. * 48 42 * * 49 * 2) The boot-loader second phase is then executed in parallel by all * 50 * CP0s (other than core[0,0]). Each CP0 performs the following tasks: * 51 * - copies into the memory bank of the local cluster the 'boot.elf', * 52 * the 'arch_info.bin' (at the same addresses as the 'boot.elf' and * 53 * the 'arch_info.bin' in the memory bank of the cluster(0,0), and * 54 * the kernel image (at address 0x0), * 55 * - initializes the 'boot_info_t' structure of the local cluster, * 56 * - activate all other cores in the same cluster (CPi). * 57 * - wait local CPi completion reports on a local barrier. * 58 * - report completion on the global barrier. * 43 * All cores contribute to the boot procedure, but all cores are not * 44 * simultaneously active: * 45 * - in a first phase, only core[0][0] is running (core 0 in cluster 0). * 46 * - in a second phase, only core[cxy][0] is running in each cluster. * 47 * - in last phase, all core[cxy][lid] are running. * 59 48 * * 60 * 3) The boot-loader third phase is executed in parallel by all cores. * 61 * In each cluster (i) the CP0 * 62 * - activates the other cores of cluster(i), * 63 * - blocks on the local barrier waiting for all local CPi to report * 64 * completion on the local barrier, * 65 * - moves the local kernel image from the temporary location to the * 66 * address 0x0, (erasing the preloader code). * 49 * Finally, all cores jump to the kernel_init() function that makes the * 50 * actual kernel initialisation. * 67 51 * * 68 * 4) All cores jump to kern_init() (maybe not at the same time). * 52 * Implementation note: * * * 53 * To allows each core to use the local copy of both the boot code and the * 54 * kernel code, the boot-loader builds a minimal and temporary BPT (Boot * 55 * Page Table) containing only two big pages: page[0] maps the kernel code, * 56 * and page 1 maps the boot code. * 69 57 ****************************************************************************/ 70 58 … … 96 84 ****************************************************************************/ 97 85 86 // the Boot Page Table contains two PTE1, and should be aligned on 8 Kbytes 87 88 uint32_t boot_pt[2] __attribute__((aligned(2048))); 89 98 90 // synchronization variables. 99 91 100 volatile boot_remote_spinlock_t tty0_lock; // protect TTY0 access101 volatile boot_remote_barrier_t global_barrier; // synchronize CP0 cores102 volatile boot_remote_barrier_t local_barrier; // synchronize cores in one cluster103 uint32_t active_c p0s_nr; // number of expected CP0s92 volatile boot_remote_spinlock_t tty0_lock; // protect TTY0 access 93 volatile boot_remote_barrier_t global_barrier; // synchronize CP0 cores 94 volatile boot_remote_barrier_t local_barrier; // synchronize cores in one cluster 95 uint32_t active_cores_nr; // number of expected CP0s 104 96 105 97 // kernel segments layout variables … … 114 106 uint32_t kernel_entry; // kernel entry point 115 107 116 // Functions called by boot_entry.S108 // Functions 117 109 118 110 extern void boot_entry( void ); // boot_loader entry point … … 738 730 739 731 /********************************************************************************* 740 * This function is called by all CP0 to activate the other CPi cores.732 * This function is called by all CP0s to activate the other CPi cores. 741 733 * @ boot_info : pointer to local 'boot_info_t' structure. 742 734 *********************************************************************************/ … … 761 753 } // boot_wake_local_cores() 762 754 755 /********************************************************************************* 756 * This function is called by all core[cxy][0] to initialize the Boot Page Table: 757 * map two local big pages for the boot code and kernel code. 758 * @ cxy : local cluster identifier. 759 *********************************************************************************/ 760 void boot_page_table_init( cxy_t cxy ) 761 { 762 // set PTE1 in slot[0] for kernel code 763 uint32_t kernel_attr = 0x8A800000; // flagss : V,C,X,G 764 uint32_t kernel_ppn1 = (cxy << 20) >> 9; // big physical page index == 0 765 boot_pt[0] = kernel_attr | kernel_ppn1; 766 767 // set PTE1 in slot[1] for boot code (no global flag) 768 uint32_t boot_attr = 0x8A000000; // flags : V,C,X 769 uint32_t boot_ppn1 = ((cxy << 20) + 512) >> 9; // big physical page index == 1 770 boot_pt[1] = boot_attr | boot_ppn1; 771 } 772 773 /********************************************************************************* 774 * This function is called by all cores to activate the instruction MMU, 775 * and use the local copy of boot code. 776 *********************************************************************************/ 777 void boot_activate_ins_mmu( cxy_t cxy ) 778 { 779 // set mmu_ptpr register 780 uint32_t ptpr = ((uint32_t)boot_pt >> 13) | (cxy << 19); 781 asm volatile ( "mtc2 %0, $0 \n" : : "r" (ptpr) ); 782 783 // set ITLB bit in mmu_mode 784 asm volatile ( "mfc2 $26, $1 \n" 785 "ori $26, $26, 0x8 \n" 786 "mtc2 $26, $1 \n" ); 787 } 763 788 764 789 /********************************************************************************* … … 776 801 if (lid == 0) 777 802 { 778 /************************************ ****************779 * PHASE A : only CP0 in boot clusterexecutes it780 *************************************************** /781 if (cxy == BOOT_CORE_CXY)803 /************************************i********************** 804 * PHASE Sequencial : only core[0][0] executes it 805 **********************************************************/ 806 if (cxy == 0) 782 807 { 783 808 boot_printf("\n[BOOT] core[%x,%d] enters at cycle %d\n", … … 833 858 boot_check_core(boot_info, lid); 834 859 835 // Activate other CP0s / get number of active CP0s 836 active_cp0s_nr = boot_wake_all_cp0s() + 1; 860 // TO BE DONE 861 // core[0][0] identity maps two big pages for the boot and kernel code, 862 // boot_page_table_init( 0 ); 863 864 // TO BE DONE 865 // core[0][0] activates the instruction MMU to use the local copy of boot code 866 // boot_activate_ins_mmu( 0 ); 867 868 // Activate other core[cxy][0] / get number of activated cores 869 active_cores_nr = boot_wake_all_cp0s() + 1; 837 870 838 871 // Wait until all clusters (i.e all CP0s) ready to enter kernel. 839 872 boot_remote_barrier( XPTR( BOOT_CORE_CXY , &global_barrier ) , 840 active_c p0s_nr );873 active_cores_nr ); 841 874 842 875 // activate other local cores 843 876 boot_wake_local_cores( boot_info ); 844 845 // display address extensions846 // uint32_t cp2_data_ext;847 // uint32_t cp2_ins_ext;848 // asm volatile( "mfc2 %0, $24" : "=&r" (cp2_data_ext) );849 // asm volatile( "mfc2 %0, $25" : "=&r" (cp2_ins_ext) );850 // boot_printf("\n[BOOT] core[%x,%d] CP2_DATA_EXT = %x / CP2_INS_EXT = %x\n",851 // cxy , lid , cp2_data_ext , cp2_ins_ext );852 877 853 878 // Wait until all local cores in cluster ready … … 855 880 boot_info->cores_nr ); 856 881 } 857 /****************************************************************** 858 * PHASE B : all CP0s other than CP0 in boot clusterexecute it859 ***************************************************************** /882 /************************************************************************** 883 * PHASE partially parallel : all core[cxy][0] with (cxy != 0) execute it 884 **************************************************************************/ 860 885 else 861 886 { 862 // at this point, all INSTRUCTION address extension registers 863 // point on cluster(0,0), but the DATA extension registers point 864 // already on the local cluster to use the local stack. 865 // To access the bootloader global variables we must first copy 866 // the boot code (data and instructions) in the local cluster. 887 // at this point, the DATA extension registers point 888 // already on the local cluster cxy to use the local stack, 889 // but all cores must access the code stored in cluster 0 890 891 // Each CP0 copies the boot code (data and instructions) 892 // from the cluster 0 to the local cluster. 867 893 boot_remote_memcpy( XPTR( cxy , BOOT_BASE ), 868 894 XPTR( BOOT_CORE_CXY , BOOT_BASE ), 869 895 BOOT_MAX_SIZE ); 870 896 871 // from now, it is safe to refer to the boot codeglobal variables897 // from now, it is safe to refer to the boot global variables 872 898 boot_printf("\n[BOOT] core[%x,%d] replicated boot code at cycle %d\n", 873 899 cxy , lid , boot_get_proctime() ); 874 900 875 // switch to the INSTRUCTION local memory space, to avoid contention. 876 // asm volatile("mtc2 %0, $25" :: "r"(cxy)); 877 878 // Copy the arch_info.bin file into the local memory. 901 // TO BE DONE 902 // Each core identity maps two big pages for the boot and kernel code, 903 // boot_page_table_init( cxy ); 904 905 // Each core activates the instruction MMU to use the local copy of boot code 906 // boot_activate_ins_mmu( cxy ); 907 908 // Each CP0 copies the arch_info.bin into the local memory. 879 909 boot_remote_memcpy(XPTR(cxy, ARCHINFO_BASE), 880 910 XPTR(BOOT_CORE_CXY, ARCHINFO_BASE), … … 884 914 cxy , lid , boot_get_proctime() ); 885 915 886 // Copythe kcode segment into local memory916 // Each CP0 copies the kcode segment into local memory 887 917 boot_remote_memcpy( XPTR( cxy , seg_kcode_base ), 888 918 XPTR( BOOT_CORE_CXY , seg_kcode_base ), 889 919 seg_kcode_size ); 890 920 891 // Copythe kdata segment into local memory921 // Each CP0 copies the kdata segment into local memory 892 922 boot_remote_memcpy( XPTR( cxy , seg_kdata_base ), 893 923 XPTR( BOOT_CORE_CXY , seg_kdata_base ), 894 924 seg_kdata_size ); 895 925 896 // Copy the kentry segment into local memory 926 // [TO BE REMOVED<D-°> 927 // Each CP0 copies the kentry segment into local memory 897 928 boot_remote_memcpy( XPTR( cxy , seg_kentry_base ), 898 929 XPTR( BOOT_CORE_CXY , seg_kentry_base ), … … 902 933 cxy , lid , boot_get_proctime() ); 903 934 904 // Get local boot_info_t structure base address.935 // Each CP0 get local boot_info_t structure base address. 905 936 boot_info = (boot_info_t*)seg_kdata_base; 906 937 907 // Initializelocal boot_info_t structure.938 // Each CP0 initializes local boot_info_t structure. 908 939 boot_info_init( boot_info , cxy ); 909 940 … … 911 942 cxy , lid , boot_get_proctime() ); 912 943 913 // Checkcore information.944 // Each CP0 checks core information. 914 945 boot_check_core( boot_info , lid ); 915 946 916 // get number of active clusters from BOOT_CORE cluster917 uint32_t count = boot_remote_lw( XPTR( BOOT_CORE_CXY , &active_cp0s_nr ) );918 919 // Wait until all clusters (i.e all CP0s) ready to enter kernel947 // Each CP0 get number of active clusters from BOOT_CORE cluster 948 uint32_t count = boot_remote_lw( XPTR( 0 , &active_cores_nr ) ); 949 950 // Wait until all clusters (i.e all CP0s) ready 920 951 boot_remote_barrier( XPTR( BOOT_CORE_CXY , &global_barrier ) , count ); 921 952 922 953 // activate other local cores 923 954 boot_wake_local_cores( boot_info ); 924 925 // display address extensions926 // uint32_t cp2_data_ext;927 // uint32_t cp2_ins_ext;928 // asm volatile( "mfc2 %0, $24" : "=&r" (cp2_data_ext) );929 // asm volatile( "mfc2 %0, $25" : "=&r" (cp2_ins_ext) );930 // boot_printf("\n[BOOT] core[%x,%d] CP2_DATA_EXT = %x / CP2_INS_EXT = %x\n",931 // cxy , lid , cp2_data_ext , cp2_ins_ext );932 955 933 956 // Wait until all local cores in cluster ready … … 938 961 else 939 962 { 940 /*************************************************************** 941 * PHASE C: all non CP0 cores in all clustersexecute it942 ************************************************************** /943 944 // Switch to the INSTRUCTIONS local memory space 945 // to avoid contention at the boot cluster. 946 asm volatile("mtc2 %0, $25" :: "r"(cxy));963 /*********************************************************************** 964 * PHASE fully parallel : all cores[cxy][lid] with (lid! = 0) execute it 965 **********************************************************************/ 966 967 // TO BE DONE 968 // each core activate the instruction MMU to use the local copy of the boot code 969 // boot_activate_ins_mmu( cxy ); 947 970 948 971 // Get local boot_info_t structure base address. … … 952 975 boot_check_core(boot_info, lid); 953 976 954 // display address extensions955 // uint32_t cp2_data_ext;956 // uint32_t cp2_ins_ext;957 // asm volatile( "mfc2 %0, $24" : "=&r" (cp2_data_ext) );958 // asm volatile( "mfc2 %0, $25" : "=&r" (cp2_ins_ext) );959 // boot_printf("\n[BOOT] core[%x,%d] CP2_DATA_EXT = %x / CP2_INS_EXT = %x\n",960 // cxy , lid , cp2_data_ext , cp2_ins_ext );961 962 977 // Wait until all local cores in cluster ready 963 978 boot_remote_barrier( XPTR( cxy , &local_barrier ) , boot_info->cores_nr ); 964 979 } 965 980 981 // the "kernel_entry" global variable, set by boot_kernel_load() define 982 // the adress of the kernel_init() function. 966 983 // Each core initialise the following registers before jumping to kernel: 967 // - sp_29 : stack pointer on idle thread, 968 // - c0_sr : reset BEV bit 969 // - a0_04 : pointer on boot_info structure 970 // - c0_ebase : kentry_base(and jump to kernel_entry. 971 984 // - gr_29 : stack pointer / kernel stack allocated in idle thread descriptor, 985 // - c0_sr : status register / reset BEV bit 986 // - gr_04 : kernel_init() argument / pointer on boot_info structure 987 // - c0_ebase : kentry_base 988 989 // compute "sp" from base address of idle thread descriptors array and lid. 972 990 // The array of idle-thread descriptors is allocated in the kdata segment, 973 // just after the boot_info structure 974 uint32_t sp; 991 // just after the boot_info structure. 975 992 uint32_t base; 976 993 uint32_t offset = sizeof( boot_info_t ); 977 994 uint32_t pmask = CONFIG_PPM_PAGE_MASK; 978 995 uint32_t psize = CONFIG_PPM_PAGE_SIZE; 979 980 // compute base address of idle thread descriptors array981 996 if( offset & pmask ) base = seg_kdata_base + (offset & ~pmask) + psize; 982 997 else base = seg_kdata_base + offset; 983 984 // compute stack pointer 985 sp = base + ((lid + 1) * CONFIG_THREAD_DESC_SIZE) - 16; 998 uint32_t sp = base + ((lid + 1) * CONFIG_THREAD_DESC_SIZE) - 16; 999 1000 // get "ebase" from kerneL_info 1001 uint32_t ebase = boot_info->kentry_base; 1002 1003 // TO BE DONE 1004 // The cp0_ebase will not be set by the assenbly code below 1005 // when the kentry segment will be removed => done in kernel init 986 1006 987 1007 asm volatile( "mfc0 $27, $12 \n" … … 997 1017 : "r"(boot_info) , 998 1018 "r"(sp) , 999 "r"( boot_info->kentry_base) ,1019 "r"(ebase) , 1000 1020 "r"(kernel_entry) 1001 1021 : "$26" , "$27" , "$29" , "$4" ); -
trunk/boot/tsar_mips32/boot_entry.S
r439 r623 23 23 24 24 /********************************************************************************************** 25 * This file contains the entry point of the ALMOS-MK boot-loader for TSAR architecture .*26 * It supports a generic multi-clusters / multi-processors architecture*25 * This file contains the entry point of the ALMOS-MK boot-loader for TSAR architecture, * 26 * that is a generic multi-clusters / multi-processors architecture. * 27 27 * * 28 28 * - The number of clusters is defined by the (X_SIZE, Y_SIZE) parameters in the * … … 31 31 * hard_config.h file (up to 4 processors per cluster). * 32 32 * * 33 * This assembly code is executed by all cores. It has 2 versions (in order to see if the * 34 * contention created by the ARCHINFO core descriptor table scanning loops is acceptable): * 35 * with or without the assumption that the core hardware identifier gid has a fixed format: * 33 * This assembly code is executed by all cores, but at the same time, because all cores * 34 * are not simultaneously activated. It makes the assuption that the CPO register containing * 35 * the core gid (global hardware identifier) has a fixed format: * 36 * gid == (((x << Y_WIDTH) + y) << P_WIDTH) + lid * 36 37 * * 37 * - Version with fixed format: gid == (((x << Y_WIDTH) + y) << PADDR_WIDTH) + lid * 38 * It does 3 things: * 39 * + It initializes the stack pointer depending on the lid extracted from the gid, * 40 * using the BOOT_STACK_BASE and BOOT_STACK_SIZE parameters defined in the * 41 * 'boot_config.h' file, * 42 * + It changes the value of the address extension registers using the cxy extracted * 43 * from the gid, * 44 * + It jumps to the boot_loader() function defined in the 'boot.c' file and passes 2 * 45 * arguments which are the cxy and lid of each core to this function. * 46 * * 47 * - Version without fixed format * 48 * It has to perform an additional step in order to extract the (cxy,lid) values from the * 49 * arch_info.bin structure that has been loaded in the cluster (0,0) memory by the bscpu. * 50 * + Each core other than the bscpu scans the core descriptor table in the arch_info.bin * 51 * structure to make an associative search on the (gid), and get the (cxy,lid). * 52 * + It initializes the stack pointer depending on the lid, using the BOOT_STACK_BASE * 53 * and BOOT_STACK_SIZE parameters defined in the 'boot_config.h' file, * 54 * + It changes the value of the address extension registers using cxy obtained * 55 * previously, * 56 * + It jumps to the boot_loader() function defined in the 'boot.c' file and passes 2 * 57 * arguments which are the cxy and lid of each core to this function. * 38 * It does 3 things: * 39 * - It initializes the stack pointer depending on the lid extracted from the gid, * 40 * using the BOOT_STACK_BASE and BOOT_STACK_SIZE parameters defined in the * 41 * 'boot_config.h' file, * 42 * - It changes the value of the DATA address extension register using the cxy extracted * 43 * from the gid, * 44 * - It jumps to the boot_loader() function defined in the 'boot.c' file, passing the two * 45 * arguments (cxy and lid). * 58 46 *********************************************************************************************/ 59 47 … … 72 60 boot_entry: 73 61 74 #if USE_FIXED_FORMAT 75 76 /************* 77 * VERSION 1 * 78 *************/ 79 80 /* 81 * Get (cxy, lid) values from gid contained in coprocessor0 register. 82 */ 62 /* Get (cxy, lid) values from gid contained in CP0 register */ 83 63 84 64 mfc0 k0, CP0_PROCID … … 87 67 srl t2, k0, P_WIDTH /* t2 <= cxy */ 88 68 89 /* Initialize stack pointer from previously retrievedlid value */69 /* Initialize stack pointer from lid value */ 90 70 91 71 la t0, BOOT_STACK_BASE /* t0 <= BOOT_STACK_BASE */ … … 93 73 multu k1, t1 94 74 mflo k0 /* k0 <= BOOT_STACK_SIZE * lid */ 95 subu sp, t0, k0 /* P[cxy,lid] s tack top initialized*/75 subu sp, t0, k0 /* P[cxy,lid] sp initialized */ 96 76 97 /* Switch to local DSPACE by changing the value of the address extension register s*/77 /* Switch to local DSPACE by changing the value of the address extension register */ 98 78 99 79 mtc2 t2, CP2_DATA_PADDR_EXT 100 80 101 /* Jump to boot_loader() function after passing 2 arguments in the registers*/81 /* Jump to boot_loader() function after passing (cxy,lid) arguments in the registers */ 102 82 103 83 or a0, zero, t1 /* a0 <= lid */ … … 107 87 nop 108 88 109 #else110 111 /*************112 * VERSION 2 *113 *************/114 115 /* Test if this is bscpu */116 117 mfc0 k0, CP0_PROCID118 andi k0, k0, 0xFFF /* k0 <= gid */119 120 li t1, BOOT_CORE_GID /* t1 <= bscpu gid */121 or t3, zero, zero /* t3 <= bscpu lid = 0 */122 beq k0, t1, bscpu_exit /* if bscpu, skip scanning core tbl */123 li t4, BOOT_CORE_CXY /* t4 <= bscpu cxy */124 125 /* Get base address of the core descriptor table in 'arch_info.bin' file */126 127 la t0, ARCHINFO_BASE /* t0 <= ARCHINFO_BASE */128 li t1, 0x80 /* t1 <= ARCHINFO_HEADER_SIZE */129 addu t2, t0, t1 /* t2 <= ARCHINFO_CORE_BASE */130 131 /* scan the core descriptor table if this is not bscpu. TODO If not found? */132 133 li t3, 0x8 /* t3 <= ARCHINFO_CORE_SIZE */134 135 scanning_core_table:136 lw t1, 0(t2) /* t1 <= archinfo_core.gid */137 bne t1, k0, scanning_core_table /* if (t1 != k0) => loop */138 addu t2, t2, t3 /* t2 <= @ next archinfo_core */139 140 /* Get (cxy, lid) values from the found core descriptor */141 142 lw t3, -8(t2) /* t3 <= lid */143 lw t4, -4(t2) /* t4 <= cxy */144 145 /* Initialize stack pointer from previously retrieved lid value */146 147 bscpu_exit:148 la t0, BOOT_STACK_BASE /* t0 <= BOOT_STACK_BASE */149 li k1, BOOT_STACK_SIZE /* k1 <= BOOT_STACK_SIZE */150 multu k1, t3151 mflo k0 /* k0 <= BOOT_STACK_SIZE * lid */152 subu sp, t0, k0 /* P[cxy,lid] stack top initialized */153 154 /* Switch to local DSPACE by changing the value of the address extension registers */155 156 mtc2 t4, CP2_DATA_PADDR_EXT157 158 /* Jumping to boot_loader() function after passing 2 arguments in registers */159 160 or a0, zero, t3 /* a0 <= lid */161 or a1, zero, t4 /* a1 <= cxy */162 la ra, boot_loader163 jr ra164 nop165 166 #endif167 168 89 .end boot_entry 169 90 -
trunk/hal/generic/hal_gpt.h
r587 r623 77 77 /**************************************************************************************** 78 78 * This function allocates physical memory for first level page table (PT1), 79 * and initializes the page table descriptor.79 * and initializes the GPT descriptor, creating an empty GPT. 80 80 **************************************************************************************** 81 81 * @ gpt : pointer on generic page table descriptor. … … 126 126 127 127 /**************************************************************************************** 128 * This function map a - local or remote - GPT entry identified by its VPN, from values129 * defined by the <ppn> and <attr> arguments. It allocates physical memory in remote130 * cluster for the GPT PT2, using a RPC_PMEM_GET_PAGES,if required.131 **************************************************************************************** 132 * @ gpt 128 * This function maps in a - local or remote - GPT identified by the <gpt_xp> argument 129 * an entry identified by the <vpn> argument, as defined by <ppn> and <attr> arguments. 130 * It allocates physical memory for the GPT PT2, using a RPC_PMEM_GET_PAGES if required. 131 **************************************************************************************** 132 * @ gpt_xp : [in] pointer on the page table 133 133 * @ vpn : [in] virtual page number 134 134 * @ attr : [in] generic attributes … … 154 154 /**************************************************************************************** 155 155 * This function returns in the <attr> and <ppn> arguments the current values stored 156 * in a - local or remote - GPT entry, identified by the <gpt> and <vpn> arguments.156 * in a - local or remote - GPT entry, identified by the <gpt> and <vpn> arguments. 157 157 **************************************************************************************** 158 158 * @ gpt_xp : [in] extended pointer on the page table -
trunk/hal/generic/hal_special.h
r619 r623 31 31 32 32 struct thread_s; 33 struct gpt_s; 33 34 34 35 /////////////////////////////////////////////////////////////////////////////////////////// … … 37 38 // ALMOS-MKH uses the following API to access the core protected registers. 38 39 /////////////////////////////////////////////////////////////////////////////////////////// 40 41 /***************************************************************************************** 42 * This function initialise - for architectures requiring it - the protected register(s) 43 * containing the kernel_entry adresse(s) for interrupts / exceptions / syscalls. 44 ****************************************************************************************/ 45 void hal_set_kentry( void ); 46 47 /***************************************************************************************** 48 * This function initializes - for architectures requiring it - the MMU registers 49 * as required by the target architecture to execute the kernel threads attached 50 * to kernel process zero. It is called by all cores in the kernel_init() function. 51 ***************************************************************************************** 52 * @ gpt : local pointer on the kernel page table descriptor. 53 ****************************************************************************************/ 54 void hal_mmu_init( struct gpt_s * gpt ); 39 55 40 56 /***************************************************************************************** … … 103 119 /***************************************************************************************** 104 120 * This function makes an uncachable read to a 32 bits variable in local memory. 121 ***************************************************************************************** 105 122 * @ ptr : pointer on the variable 106 123 * @ returns the value … … 137 154 /***************************************************************************************** 138 155 * This function returns information on MMU exceptions : 156 ***************************************************************************************** 139 157 * @ mmu_ins_excp_code : [out] instruction fetch exception code 140 158 * @ mmu_ins_bad_vaddr : [out] instruction fetch faulty virtual address -
trunk/hal/generic/hal_vmm.h
r457 r623 1 1 /* 2 * hal_vmm.h - GenericVirtual Memory Manager initialisation2 * hal_vmm.h - Kernel Virtual Memory Manager initialisation 3 3 * 4 * Authors Alain Greiner (2016,2017 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 22 22 */ 23 23 24 #ifndef _HAL_ PPM_H_25 #define _HAL_ PPM_H_24 #ifndef _HAL_VMM_H_ 25 #define _HAL_VMM_H_ 26 26 27 27 #include <hal_kernel_types.h> … … 29 29 30 30 ///////////////////////////////////////////////////////////////////////////////////////// 31 // GenericVirtual Memory Manager initialisation (implementation in hal_vmm.c)31 // Kernel Virtual Memory Manager initialisation (implementation in hal_vmm.c) 32 32 // 33 33 // Any arch-specific implementation must implement this API. … … 36 36 /**** Forward declarations ****/ 37 37 38 struct vmm_s; 38 struct process_s; 39 struct boot_info_s; 39 40 40 41 /**************************************************************************************** 41 * This function makes all architecture specific initialisations 42 * in the VSL (Virtual segments List) and in the GPT (Generic Page Table). 42 * Depending on the hardware architecture, this function creates (i.e. allocates memory 43 * and initializes) the VSL (Virtual segments List) and the GPT (Generic Page Table), 44 * for all vsegs required by the kernel process. 43 45 **************************************************************************************** 44 * @ vmm : pointer on virtual memory manager.46 * @ info : local pointer on boot_info (for kernel segments base & size). 45 47 * @ return 0 if success / return ENOMEM if failure. 46 48 ***************************************************************************************/ 47 error_t hal_vmm_ init( struct vmm_s * vmm);49 error_t hal_vmm_kernel_init( struct boot_info_s * info ); 48 50 49 #endif /* HAL_PPM_H_ */ 51 /**************************************************************************************** 52 * Depending on the hardware architecture, this function updates the VMM of an user 53 * process identified by the <process> argument. It registers in VSL and GPT all 54 * kernel vsegs required by this architecture. 55 **************************************************************************************** 56 * @ process : local pointer on user process descriptor. 57 * @ return 0 if success / return ENOMEM if failure. 58 ***************************************************************************************/ 59 error_t hal_vmm_kernel_update( struct process_s * process ); 60 61 #endif /* HAL_VMM_H_ */ -
trunk/hal/tsar_mips32/core/hal_gpt.c
r611 r623 141 141 #endif 142 142 143 // check page size 144 assert( (CONFIG_PPM_PAGE_SIZE == 4096) , 145 "for TSAR, the page size must be 4 Kbytes\n" ); 143 // check page size 144 assert( (CONFIG_PPM_PAGE_SIZE == 4096) , "for TSAR, the page size must be 4 Kbytes\n" ); 146 145 147 146 // allocates 2 physical pages for PT1 … … 287 286 vpn_t vpn; 288 287 289 assert( (process != NULL) , "NULL process pointer\n"); 288 // check argument 289 assert( (process != NULL) , "NULL process pointer\n"); 290 290 291 291 // get pointer on gpt … … 295 295 pt1 = (uint32_t *)gpt->ptr; 296 296 297 printk("\n***** GenericPage Table for process %x : &gpt = %x / &pt1 = %x\n\n",297 printk("\n***** Tsar Page Table for process %x : &gpt = %x / &pt1 = %x\n\n", 298 298 process->pid , gpt , pt1 ); 299 299 … … 334 334 335 335 336 ///////////////////////////////////////////////////////////////////////////////////// 337 // FOr the TSAR architecture, this function allocates a first level PT1 (8 Kbytes), 338 // and maps one single big page for the kerne code segment in slot[0]. 339 ///////////////////////////////////////////////////////////////////////////////////// 340 void hal_gpt_build_kpt( cxy_t cxy, 341 gpt_t * gpt ) 342 { 343 error_t error; 344 345 // allocate memory for one gpt 346 error = hal_gpt_create( gpt ); 347 348 if( error ) 349 { 350 printk("\n[PANIC] in %s : cannot allocate kernel GPT in cluster %x\n", 351 __FUNCTION__ , cxy ); 352 hal_core_sleep(); 353 } 354 355 // compute attr and ppn for one PTE1 356 uint32_t attr = 0xCA800000; // bits : V,T,C,X,G 357 uint32_t ppn = (cxy << 20) >> 9; 358 359 // set PTE1 360 error = hal_gpt_set_pte( XPTR( cxy , gpt ) , 0 , attr , ppn ); 361 362 if( error ) 363 { 364 printk("\n[PANIC] in %s : cannot initialize kernel GPT in cluster %x\n", 365 __FUNCTION__ , cxy ); 366 hal_core_sleep(); 367 } 368 } 369 336 370 ////////////////////////////////////////// 337 371 error_t hal_gpt_set_pte( xptr_t gpt_xp, … … 390 424 if( small == 0 ) // map a big page in PT1 391 425 { 392 assert( (pte1 == 0) , 393 "try to set a big page in a mapped PT1 entry / PT1[%d] = %x\n", ix1 , pte1 ); 394 426 427 // check PT1 entry not mapped 428 assert( (pte1 == 0) , "try to set a big page in a mapped PT1 entry\n" ); 429 430 // check VPN aligned 431 assert( (ix2 == 0) , "illegal vpn for a big page\n" ); 432 433 // check PPN aligned 434 assert( ((ppn & 0x1FF) == 0) , "illegal ppn for a big page\n" ); 435 395 436 // set the PTE1 value in PT1 396 437 pte1 = (tsar_attr & TSAR_MMU_PTE1_ATTR_MASK) | ((ppn >> 9) & TSAR_MMU_PTE1_PPN_MASK); -
trunk/hal/tsar_mips32/core/hal_special.c
r619 r623 33 33 struct thread_s; 34 34 35 36 ////////////////////////////////////////////////////////////////////////////////// 37 // Extern global variables 38 ////////////////////////////////////////////////////////////////////////////////// 39 40 extern cxy_t local_cxy; 41 extern void hal_kentry_enter( void ); 42 43 ///////////////////////////////////////////////////////////////////////////////// 44 // For the TSAR architecture, this function register the physical address of 45 // the first level page table (PT1) in the PTPR register. 46 // It activates the intructions MMU, and de-activates the data MMU. 47 ///////////////////////////////////////////////////////////////////////////////// 48 void hal_mmu_init( gpt_t * gpt ) 49 { 50 51 // set PT1 base address in mmu_ptpr register 52 uint32_t ptpr = (((uint32_t)gpt->ptr) >> 13) | (local_cxy << 19); 53 asm volatile ( "mtc2 %0, $0 \n" : : "r" (ptpr) ); 54 55 // set ITLB | ICACHE | DCACHE bits in mmu_mode register 56 asm volatile ( "ori $26, $0, 0xB \n" 57 "mtc2 $26, $1 \n" ); 58 } 59 60 //////////////////////////////////////////////////////////////////////////////// 61 // For the TSAR architecture, this function registers the address of the 62 // hal_kentry_enter() function in the MIPS32 cp0_ebase register. 63 //////////////////////////////////////////////////////////////////////////////// 64 void hal_set_kentry( void ) 65 { 66 uint32_t kentry = (uint32_t)(&hal_kentry_enter); 67 68 asm volatile("mtc0 %0, $15, 1" : : "r" (kentry) ); 69 } 70 35 71 //////////////////////////////// 36 72 inline gid_t hal_get_gid( void ) -
trunk/hal/tsar_mips32/core/hal_vmm.c
r587 r623 2 2 * hal_vmm.c - Virtual Memory Manager Initialisation for TSAR 3 3 * 4 * Authors Alain Greiner (2016,2017 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 26 26 #include <hal_vmm.h> 27 27 #include <hal_gpt.h> 28 #include <process.h> 28 29 #include <vseg.h> 29 30 #include <xlist.h> … … 32 33 33 34 ////////////////////////////////////////////////////////////////////////////////////////// 34 // This file contains the TSAR specific code to initialize the Virtual Memory Manager. 35 // The "kentry" vseg contains the kernel code executed when a core enter/exit the kernel, 36 // in case of Interrupt, Exception, or Syscall. 37 // For the TSAR architecture, the kernel uses physical addresses, and this code must be 38 // identity mapped. The following function is called by the generic vmm_init() function 39 // and identity map all pages of the "kentry" vseg. 40 // We dont take the locks protecting the VSL and the GPT, because there is no concurrent 41 // accesses to VMM during VMM initialization. 35 // This file contains the TSAR specific code used to initialize the kernel process VMM, 36 // or to update an user process VMM with informations related to the kernel vsegs. 37 // As the TSAR architure does not use the DATA MMU, but use only the DATA extension 38 // address register to access local and remote kernel data, the kernel VSL contains only 39 // one "kcode" segment, and the kernel GPT contains only one big page in PT1[0] slot. 42 40 ////////////////////////////////////////////////////////////////////////////////////////// 43 41 44 //////////////////////////////////// 45 error_t hal_vmm_init( vmm_t * vmm ) 42 // extern global variables 43 extern process_t process_zero; 44 45 ////////////////////////////////////////////////////////////////////////////////////////// 46 // This function is called by the process_zero_init() function during kernel_init. 47 // It initializes the VMM of the kernel proces_zero (containing all kernel threads) 48 // in the local cluster. 49 ////////////////////////////////////////////////////////////////////////////////////////// 50 error_t hal_vmm_kernel_init( boot_info_t * info ) 46 51 { 47 error_t error;52 error_t error; 48 53 49 // map all pages of "kentry" vseg 50 uint32_t vpn; 51 uint32_t attr; 52 attr = GPT_MAPPED | GPT_SMALL | GPT_EXECUTABLE | GPT_CACHABLE | GPT_GLOBAL; 53 for( vpn = CONFIG_VMM_KENTRY_BASE; 54 vpn < (CONFIG_VMM_KENTRY_BASE + CONFIG_VMM_KENTRY_SIZE); vpn++ ) 54 // get pointer on kernel GPT 55 gpt_t * gpt = &process_zero.vmm.gpt; 56 57 // get cluster identifier 58 cxy_t cxy = local_cxy; 59 60 // allocate memory for kernel GPT 61 error = hal_gpt_create( gpt ); 62 63 if( error ) 55 64 { 56 error = hal_gpt_set_pte( XPTR( local_cxy , &vmm->gpt ), 57 vpn, 58 attr, 59 (local_cxy<<20) | (vpn & 0xFFFFF) ); 60 61 if( error ) return error; 65 printk("\n[PANIC] in %s : cannot allocate kernel GPT in cluster %x\n", 66 __FUNCTION__ , cxy ); 67 hal_core_sleep(); 62 68 } 63 69 64 // scan the VSL to found the "kentry" vseg65 xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root );66 xptr_t iter_xp;67 xptr_t vseg_xp; 68 vseg_t * vseg;69 bool_t found = false;70 71 XLIST_FOREACH( root_xp , iter_xp)70 // compute attr and ppn for one PTE1 71 uint32_t attr = 0x8A800000; // bits : V,C,X,G 72 uint32_t ppn = (cxy << 20) >> 9; // physical page index is 0 73 74 // set PTE1 in slot[0] 75 error = hal_gpt_set_pte( XPTR( cxy , gpt ) , 0 , attr , ppn ); 76 77 if( error ) 72 78 { 73 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 74 vseg = (vseg_t *)GET_PTR( vseg_xp ); 75 76 // set the IDENT flag in "kentry" vseg descriptor 77 if( vseg->vpn_base == CONFIG_VMM_KENTRY_BASE ) 78 { 79 vseg->flags |= VSEG_IDENT; 80 found = true; 81 break; 82 } 79 printk("\n[PANIC] in %s : cannot initialize kernel GPT in cluster %x\n", 80 __FUNCTION__ , cxy ); 81 hal_core_sleep(); 83 82 } 84 83 85 if( found == false ) return 0XFFFFFFFF; 86 87 return 0; 84 // create kcode vseg and register it in kernel VSL 85 vseg_t * vseg = vmm_create_vseg( &process_zero, 86 VSEG_TYPE_CODE, 87 info->kcode_base, 88 info->kcode_size, 89 0, 0, // file ofset and file size (unused) 90 XPTR_NULL, // no mapper 91 local_cxy ); 92 if( vseg == NULL ) 93 { 94 printk("\n[PANIC] in %s : cannot register vseg to VSL in cluster %x\n", 95 __FUNCTION__ , cxy ); 96 hal_core_sleep(); 97 } 88 98 89 99 } // end hal_vmm_init() 90 100 101 ////////////////////////////////////////////////////////////////////////////////////////// 102 // This function is called by the vmm_init() function to update the VMM of an user 103 // process identified by the <process> argument. 104 // It registers in the user VSL the "kcode" vseg, registered in the local kernel VSL, 105 // and register in the user GPT the big page[0] mapped in the local kernel GPT. 106 ////////////////////////////////////////////////////////////////////////////////////////// 107 error_t hal_vmm_kernel_update( process_t * process ) 108 { 109 error_t error; 110 uint32_t attr; 111 uint32_t ppn; 91 112 113 // TODO check ppn value in kernel GPT (must be 0) 114 115 // get cluster identifier 116 cxy_t cxy = local_cxy; 117 118 // get extended pointer on user GPT 119 xptr_t gpt_xp = XPTR( cxy , &process->vmm.gpt ); 120 121 // get ppn and attributes from slot[0] in kernel GPT 122 hal_gpt_get_pte( gpt_xp , 0 , &attr , &ppn ); 123 124 // check ppn and attributes 125 assert( (attr == 0x8A800000) && (ppn == ((cxy << 20) >> 9)), __FUNCTION__, 126 "bad ppn = %x or attr = %x in slot[0] of kernel GPT\n", ppn , attr ); 127 128 // update user GPT : set PTE1 in slot[0] 129 error = hal_gpt_set_pte( gpt_xp , 0 , attr , ppn ); 130 131 if( error ) 132 { 133 printk("\n[ERROR] in %s : cannot update GPT in cluster %x\n", 134 __FUNCTION__ , cxy ); 135 return -1; 136 } 137 138 // get pointer on the unique vseg registered in kernel VSL 139 xptr_t root_xp = XPTR( cxy , &process_zero.vmm.vsegs_root ); 140 vseg_t * vseg = XLIST_FIRST( root_xp , vseg_t , xlist ); 141 142 // check vsegs_nr 143 assert( (process_zero.vmm.vsegs_nr == 1 ) , __FUNCTION__, 144 "bad vsegs number in kernel VSL\n" ); 145 146 // update user VSL : register one new vseg for kcode 147 vseg_t * new = vmm_create_vseg( process, 148 vseg->type, 149 vseg->min, 150 vseg->max - vseg->min, 151 0, 0, // file ofset and file size (unused) 152 XPTR_NULL, // no mapper 153 local_cxy ); 154 if( new == NULL ) 155 { 156 printk("\n[ERROR] in %s : cannot update VSL in cluster %x\n", 157 __FUNCTION__ , cxy ); 158 return -1; 159 } 160 } 161 162 -
trunk/hal/tsar_mips32/kernel.ld
r570 r623 4 4 * loadable segments, that MUST be identity mapped for the TSAR architecture. 5 5 * 6 * WARNING the seg_kentry_base and seg_kcode_base defined below must be keptcoherent6 * WARNING : the seg_kentry_base and seg_kcode_base defined below must be coherent 7 7 * with the values defined in the boot_config.h file used by the TSAR bootloader. 8 8 **************************************************************************************/ -
trunk/kernel/fs/devfs.c
r614 r623 3 3 * 4 4 * Author Mohamed Lamine Karaoui (2014,2015) 5 * Alain Greiner (2016,2017 )5 * Alain Greiner (2016,2017,2018,2019) 6 6 * 7 7 * Copyright (c) Sorbonne Universites … … 91 91 xptr_t * devfs_external_inode_xp ) 92 92 { 93 error_t error; 94 xptr_t unused_xp; // required by vfs_add_child_in_parent() 93 error_t error; 94 xptr_t unused_xp; // required by vfs_add_child_in_parent() 95 vfs_inode_t * inode; 95 96 96 97 // create DEVFS "dev" inode in cluster 0 97 98 error = vfs_add_child_in_parent( 0, // cxy 98 INODE_TYPE_DIR,99 99 FS_TYPE_DEVFS, 100 100 root_inode_xp, … … 103 103 devfs_dev_inode_xp ); 104 104 105 // update inode "type" field 106 inode = GET_PTR( *devfs_dev_inode_xp ); 107 inode->type = INODE_TYPE_DIR; 108 105 109 // create dentries <.> and <..> in <dev> 106 110 error |= vfs_add_special_dentries( *devfs_dev_inode_xp, 107 111 root_inode_xp ); 108 112 109 // check success 110 assert( (error == 0) , "cannot create <dev>\n" ); 113 if( error ) 114 { 115 printk("\n[PANIC] in %s : cannot create <dev> directory\n", __FUNCTION__ ); 116 hal_core_sleep(); 117 } 111 118 112 119 #if DEBUG_DEVFS_GLOBAL_INIT … … 120 127 // create DEVFS "external" inode in cluster 0 121 128 error = vfs_add_child_in_parent( 0, // cxy 122 INODE_TYPE_DIR,123 129 FS_TYPE_DEVFS, 124 130 *devfs_dev_inode_xp, … … 127 133 devfs_external_inode_xp ); 128 134 135 // update inode "type" field 136 inode = GET_PTR( *devfs_external_inode_xp ); 137 inode->type = INODE_TYPE_DIR; 138 129 139 // create dentries <.> and <..> in <external> 130 140 error |= vfs_add_special_dentries( *devfs_external_inode_xp, 131 141 *devfs_dev_inode_xp ); 132 142 133 // check success 134 assert( (error == 0) , "cannot create <external>\n" ); 143 if( error ) 144 { 145 printk("\n[PANIC] in %s : cannot create <external> directory\n", __FUNCTION__ ); 146 hal_core_sleep(); 147 } 135 148 136 149 #if DEBUG_DEVFS_GLOBAL_INIT … … 153 166 chdev_t * chdev_ptr; 154 167 xptr_t inode_xp; 155 cxy_t inode_cxy;156 168 vfs_inode_t * inode_ptr; 157 169 uint32_t channel; … … 171 183 172 184 error = vfs_add_child_in_parent( local_cxy, 173 INODE_TYPE_DIR,174 185 FS_TYPE_DEVFS, 175 186 devfs_dev_inode_xp, … … 178 189 devfs_internal_inode_xp ); 179 190 191 // set inode "type" field 192 inode_ptr = GET_PTR( *devfs_internal_inode_xp ); 193 inode_ptr->type = INODE_TYPE_DEV; 194 180 195 // create dentries <.> and <..> in <internal> 181 196 error |= vfs_add_special_dentries( *devfs_internal_inode_xp, 182 197 devfs_dev_inode_xp ); 183 198 184 // check success 185 assert( (error == 0) , "cannot create <external>\n" ); 199 if( error ) 200 { 201 printk("\n[PANIC] in %s : cannot create <internal> directory\n", __FUNCTION__ ); 202 hal_core_sleep(); 203 } 186 204 187 205 #if DEBUG_DEVFS_LOCAL_INIT … … 199 217 chdev_cxy = GET_CXY( chdev_xp ); 200 218 201 assert( (chdev_cxy == local_cxy ), "illegal MMC chdev in cluster %x\n", local_cxy ); 219 if( chdev_cxy != local_cxy ) 220 { 221 printk("\n[PANIC] in %s : illegal MMC chdev in cluster %x\n", 222 __FUNCTION__, local_cxy ); 223 hal_core_sleep(); 224 } 202 225 203 226 error = vfs_add_child_in_parent( local_cxy, 204 INODE_TYPE_DEV,205 227 FS_TYPE_DEVFS, 206 228 *devfs_internal_inode_xp, … … 209 231 &inode_xp ); 210 232 211 assert( (error == 0) , "cannot create MMC inode\n" ); 212 213 // update child inode "extend" field 214 inode_cxy = GET_CXY( inode_xp ); 233 if( error ) 234 { 235 printk("\n[PANIC] in %s : cannot create MMC inode in cluster %x\n", 236 __FUNCTION__, local_cxy ); 237 hal_core_sleep(); 238 } 239 240 // update child inode "extend" and "type" fields 215 241 inode_ptr = GET_PTR( inode_xp ); 216 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr ); 242 inode_ptr->extend = chdev_ptr; 243 inode_ptr->type = INODE_TYPE_DEV; 217 244 218 245 #if DEBUG_DEVFS_LOCAL_INIT … … 234 261 chdev_cxy = GET_CXY( chdev_xp ); 235 262 236 assert( (chdev_cxy == local_cxy ), "illegal DMA chdev in cluster %x\n", local_cxy ); 263 if( chdev_cxy != local_cxy ) 264 { 265 printk("\d[PANIC] in %s : illegal DMA chdev in cluster %x\n", 266 __FUNCTION__, local_cxy ); 267 hal_core_sleep(); 268 } 237 269 238 270 error = vfs_add_child_in_parent( local_cxy, 239 INODE_TYPE_DEV,240 271 FS_TYPE_DEVFS, 241 272 *devfs_internal_inode_xp, … … 243 274 &unused_xp, 244 275 &inode_xp ); 245 246 assert( (error == 0) , "cannot create DMA inode\n" ); 247 248 // update child inode "extend" field 249 inode_cxy = GET_CXY( inode_xp ); 276 if( error ) 277 { 278 printk("\n[PANIC] in %s : cannot create DMA inode in cluster %x\n", 279 __FUNCTION__, local_cxy ); 280 hal_core_sleep(); 281 } 282 283 // update child inode "extend" and "type" fields 250 284 inode_ptr = GET_PTR( inode_xp ); 251 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr ); 285 inode_ptr->extend = chdev_ptr; 286 inode_ptr->type = INODE_TYPE_DEV; 252 287 253 288 #if DEBUG_DEVFS_LOCAL_INIT … … 270 305 { 271 306 error = vfs_add_child_in_parent( local_cxy, 272 INODE_TYPE_DEV,273 307 FS_TYPE_DEVFS, 274 308 devfs_external_inode_xp, … … 276 310 &unused_xp, 277 311 &inode_xp ); 278 279 assert( (error == 0) , "cannot create IOB inode\n" ); 280 281 // update child inode "extend" field 282 inode_cxy = GET_CXY( inode_xp ); 312 if( error ) 313 { 314 printk("\n[PANIC] in %s : cannot create IOB inode in cluster %x\n", 315 __FUNCTION__, local_cxy ); 316 hal_core_sleep(); 317 } 318 319 // update child inode "extend" and "type" fields 283 320 inode_ptr = GET_PTR( inode_xp ); 284 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr ); 321 inode_ptr->extend = chdev_ptr; 322 inode_ptr->type = INODE_TYPE_DEV; 285 323 286 324 #if DEBUG_DEVFS_LOCAL_INIT … … 303 341 { 304 342 error = vfs_add_child_in_parent( local_cxy, 305 INODE_TYPE_DEV,306 343 FS_TYPE_DEVFS, 307 344 devfs_external_inode_xp, … … 310 347 &inode_xp ); 311 348 312 assert( (error == 0) , "cannot create PIC inode\n" ); 349 if( error ) 350 { 351 printk("\n[PANIC] in %s : cannot create PIC inode in cluster %x\n", 352 __FUNCTION__, local_cxy ); 353 hal_core_sleep(); 354 } 313 355 314 356 // update child inode "extend" field 315 inode_cxy = GET_CXY( inode_xp );316 357 inode_ptr = GET_PTR( inode_xp ); 317 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr ); 358 inode_ptr->extend = chdev_ptr; 359 inode_ptr->type = INODE_TYPE_DEV; 318 360 319 361 #if DEBUG_DEVFS_LOCAL_INIT … … 338 380 { 339 381 error = vfs_add_child_in_parent( local_cxy, 340 INODE_TYPE_DEV,341 382 FS_TYPE_DEVFS, 342 383 devfs_external_inode_xp, … … 345 386 &inode_xp ); 346 387 347 assert( (error == 0) , "cannot create TXT_RX inode\n" ); 348 349 // update child inode "extend" field 350 inode_cxy = GET_CXY( inode_xp ); 388 if( error ) 389 { 390 printk("\n[PANIC] in %s : cannot create TXT_RX inode in cluster %x\n", 391 __FUNCTION__, local_cxy ); 392 hal_core_sleep(); 393 } 394 395 // update child inode "extend" and "type" fields 351 396 inode_ptr = GET_PTR( inode_xp ); 352 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr ); 397 inode_ptr->extend = chdev_ptr; 398 inode_ptr->type = INODE_TYPE_DEV; 353 399 354 400 #if DEBUG_DEVFS_LOCAL_INIT … … 374 420 { 375 421 error = vfs_add_child_in_parent( local_cxy, 376 INODE_TYPE_DEV,377 422 FS_TYPE_DEVFS, 378 423 devfs_external_inode_xp, … … 380 425 &unused_xp, 381 426 &inode_xp ); 382 383 assert( (error == 0) , "cannot create TXT_TX inode\n" ); 384 385 // update child inode "extend" field 386 inode_cxy = GET_CXY( inode_xp ); 427 if( error ) 428 { 429 printk("\n[PANIC] in %s : cannot create TXT_TX inode in cluster %x\n", 430 __FUNCTION__, local_cxy ); 431 hal_core_sleep(); 432 } 433 434 // update child inode "extend" and "type" fields 387 435 inode_ptr = GET_PTR( inode_xp ); 388 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr ); 436 inode_ptr->extend = chdev_ptr; 437 inode_ptr->type = INODE_TYPE_DEV; 389 438 390 439 #if DEBUG_DEVFS_LOCAL_INIT … … 410 459 { 411 460 error = vfs_add_child_in_parent( local_cxy, 412 INODE_TYPE_DEV,413 461 FS_TYPE_DEVFS, 414 462 devfs_external_inode_xp, … … 416 464 &unused_xp, 417 465 &inode_xp ); 418 419 assert( (error == 0) , "cannot create IOC inode\n" ); 420 421 // update child inode "extend" field 422 inode_cxy = GET_CXY( inode_xp ); 466 if( error ) 467 { 468 printk("\n[PANIC] in %s : cannot create IOC inode in cluster %x\n", 469 __FUNCTION__, local_cxy ); 470 hal_core_sleep(); 471 } 472 473 // update child inode "extend" and "type" fields 423 474 inode_ptr = GET_PTR( inode_xp ); 424 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr ); 475 inode_ptr->extend = chdev_ptr; 476 inode_ptr->type = INODE_TYPE_DEV; 425 477 426 478 #if DEBUG_DEVFS_LOCAL_INIT … … 446 498 { 447 499 error = vfs_add_child_in_parent( local_cxy, 448 INODE_TYPE_DEV,449 500 FS_TYPE_DEVFS, 450 501 devfs_external_inode_xp, … … 452 503 &unused_xp, 453 504 &inode_xp ); 454 455 assert( (error == 0) , "cannot create FBF inode\n" ); 456 457 // update child inode "extend" field 458 inode_cxy = GET_CXY( inode_xp ); 505 if( error ) 506 { 507 printk("\n[PANIC] in %s : cannot create FBF inode in cluster %x\n", 508 __FUNCTION__, local_cxy ); 509 hal_core_sleep(); 510 } 511 512 // update child inode "extend" and "type" fields 459 513 inode_ptr = GET_PTR( inode_xp ); 460 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr ); 514 inode_ptr->extend = chdev_ptr; 515 inode_ptr->type = INODE_TYPE_DEV; 461 516 462 517 #if DEBUG_DEVFS_LOCAL_INIT … … 482 537 { 483 538 error = vfs_add_child_in_parent( local_cxy, 484 INODE_TYPE_DEV,485 539 FS_TYPE_DEVFS, 486 540 devfs_external_inode_xp, … … 488 542 &unused_xp, 489 543 &inode_xp ); 490 491 assert( (error == 0) , "cannot create NIC_RX inode\n" ); 492 493 // update child inode "extend" field 494 inode_cxy = GET_CXY( inode_xp ); 544 if( error ) 545 { 546 printk("\n[PANIC] in %s : cannot create NIC_RX inode in cluster %x\n", 547 __FUNCTION__, local_cxy ); 548 hal_core_sleep(); 549 } 550 551 // update child inode "extend" and "type" fields 495 552 inode_ptr = GET_PTR( inode_xp ); 496 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr ); 553 inode_ptr->extend = chdev_ptr; 554 inode_ptr->type = INODE_TYPE_DEV; 497 555 498 556 #if DEBUG_DEVFS_LOCAL_INIT … … 518 576 { 519 577 error = vfs_add_child_in_parent( local_cxy, 520 INODE_TYPE_DEV,521 578 FS_TYPE_DEVFS, 522 579 devfs_external_inode_xp, … … 524 581 &unused_xp, 525 582 &inode_xp ); 526 527 assert( (error == 0) , "cannot create NIC_TX inode\n" ); 528 529 // update child inode "extend" field 530 inode_cxy = GET_CXY( inode_xp ); 583 if( error ) 584 { 585 printk("\n[PANIC] in %s : cannot create NIC_TX inode in cluster %x\n", 586 __FUNCTION__, local_cxy ); 587 hal_core_sleep(); 588 } 589 590 // update child inode "extend" and "type" fields 531 591 inode_ptr = GET_PTR( inode_xp ); 532 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr ); 592 inode_ptr->extend = chdev_ptr; 593 inode_ptr->type = INODE_TYPE_DEV; 533 594 534 595 #if DEBUG_DEVFS_LOCAL_INIT -
trunk/kernel/fs/fatfs.c
r614 r623 793 793 #if (DEBUG_FATFS_CTX_INIT & 0x1) 794 794 if( DEBUG_FATFS_CTX_INIT < cycle ) 795 { 796 uint32_t line; 797 uint32_t byte = 0; 798 printk("\n***** %s : FAT boot record\n", __FUNCTION__ ); 799 for ( line = 0 ; line < 32 ; line++ ) 800 { 801 printk(" %X | %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x |\n", 802 byte, 803 buffer[byte+ 0],buffer[byte+ 1],buffer[byte+ 2],buffer[byte+ 3], 804 buffer[byte+ 4],buffer[byte+ 5],buffer[byte+ 6],buffer[byte+ 7], 805 buffer[byte+ 8],buffer[byte+ 9],buffer[byte+10],buffer[byte+11], 806 buffer[byte+12],buffer[byte+13],buffer[byte+14],buffer[byte+15] ); 807 808 byte += 16; 809 } 810 } 795 putb( "boot record", buffer , 256 ); 811 796 #endif 812 797 … … 960 945 assert( (inode != NULL) , "inode pointer is NULL\n" ); 961 946 assert( (dentry != NULL) , "dentry pointer is NULL\n" ); 962 assert( (inode->type == INODE_TYPE_DIR) , "inode is not a directory\n" );963 947 assert( (inode->mapper != NULL ) , "mapper pointer is NULL\n" ); 964 948 … … 1359 1343 } // end fatfs_remove_dentry 1360 1344 1361 ///////////////////////////////////////////////////// 1362 error_t fatfs_get_dentry( vfs_inode_t * parent_inode, 1363 char * name, 1364 xptr_t child_inode_xp ) 1345 1346 ////////////////////////////////////////////////////////////////////////////////////////////// 1347 // This static function scan the pages of a mapper containing a FAT32 directory, identified 1348 // by the <mapper> argument, to find the directory entry identified by the <name> argument, 1349 // and return a pointer on the directory entry, described as and array of 32 bytes, and the 1350 // incex of this entry in the FAT32 mapper, seen as an array of 32 bytes entries. 1351 // It is called by the fatfs_new_dentry() and fatfs_update_dentry() functions. 1352 // It must be called by a thread running in the cluster containing the mapper. 1353 ////////////////////////////////////////////////////////////////////////////////////////////// 1354 // @ mapper : [in] local pointer on directory mapper. 1355 // @ name : [in] searched directory entry name. 1356 // @ entry : [out] buffer for the pointer on the 32 bytes directory entry (when found). 1357 // @ index : [out] buffer for the directory entry index in mapper. 1358 // @ return 0 if found / return 1 if not found / return -1 if mapper access error. 1359 ////////////////////////////////////////////////////////////////////////////////////////////// 1360 error_t fatfs_scan_directory( mapper_t * mapper, 1361 char * name, 1362 uint8_t ** entry, 1363 uint32_t * index ) 1365 1364 { 1366 1365 // Two embedded loops to scan the directory mapper: … … 1368 1367 // - scan the directory entries in each 4 Kbytes page 1369 1368 1370 #if DEBUG_FATFS_GET_DENTRY 1369 // check parent_inode and child_inode 1370 assert( (mapper != NULL) , "mapper pointer is NULL\n" ); 1371 assert( (name != NULL ), "child name is undefined\n" ); 1372 assert( (entry != NULL ), "entry buffer undefined\n" ); 1373 1374 #if DEBUG_FATFS_SCAN_DIRECTORY 1371 1375 char parent_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1372 1376 uint32_t cycle = (uint32_t)hal_get_cycles(); 1373 1377 thread_t * this = CURRENT_THREAD; 1374 vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , parent_name );1375 if( DEBUG_FATFS_ GET_DENTRY < cycle )1376 printk("\n[%s] thread[%x,%x] enter forchild <%s> in parent <%s> / cycle %d\n",1378 vfs_inode_get_name( XPTR( local_cxy , mapper->inode ) , parent_name ); 1379 if( DEBUG_FATFS_SCAN_DIRECTORY < cycle ) 1380 printk("\n[%s] thread[%x,%x] enter to search child <%s> in parent <%s> / cycle %d\n", 1377 1381 __FUNCTION__, this->process->pid, this->trdid, name , parent_name , cycle ); 1378 1382 #endif 1379 1383 1380 // check parent_inode and child_inode 1381 assert( (parent_inode != NULL) , "parent_inode is NULL\n" ); 1382 assert( (child_inode_xp != XPTR_NULL ) , "child_inode is XPTR_NULL\n" ); 1383 1384 mapper_t * mapper = parent_inode->mapper; 1385 xptr_t mapper_xp = XPTR( local_cxy , mapper ); 1386 1387 // check parent mapper 1388 assert( (mapper != NULL) , "parent mapper is NULL\n"); 1389 1390 char cname[CONFIG_VFS_MAX_NAME_LENGTH]; // name extracter from each directory entry 1384 char cname[CONFIG_VFS_MAX_NAME_LENGTH]; // name extracted from each directory entry 1391 1385 1392 1386 char lfn1[16]; // buffer for one partial cname 1393 1387 char lfn2[16]; // buffer for one partial cname 1394 1388 char lfn3[16]; // buffer for one partial cname 1389 xptr_t mapper_xp; // extended pointer on mapper descriptor 1395 1390 xptr_t page_xp; // extended pointer on page descriptor 1396 1391 xptr_t base_xp; // extended pointer on page base … … 1400 1395 uint32_t seq; // sequence index 1401 1396 uint32_t lfn = 0; // LFN entries number 1402 uint32_t size = 0; // searched file/dir size (bytes) 1403 uint32_t cluster = 0; // searched file/dir cluster index 1404 uint32_t is_dir = 0; // searched file/dir type 1405 int32_t found = 0; // not found (0) / name found (1) / end of dir (-1) 1397 int32_t found = 0; // not yet = 0 / success = 1 / not found = 2 / error = -1 1406 1398 uint32_t page_id = 0; // page index in mapper 1407 uint32_t dentry_id = 0; // directory entry index1408 1399 uint32_t offset = 0; // byte offset in page 1409 1400 1410 // scan the parent directory mapper 1401 mapper_xp = XPTR( local_cxy , mapper ); 1402 1403 // scan the mapper pages 1411 1404 while ( found == 0 ) 1412 1405 { … … 1414 1407 page_xp = mapper_remote_get_page( mapper_xp , page_id ); 1415 1408 1416 if( page_xp == XPTR_NULL) return EIO; 1409 if( page_xp == XPTR_NULL) 1410 { 1411 found = -1; 1412 } 1417 1413 1418 1414 // get page base … … 1420 1416 base = (uint8_t *)GET_PTR( base_xp ); 1421 1417 1422 #if (DEBUG_FATFS_ GET_DENTRY & 0x1)1423 if( DEBUG_FATFS_ GET_DENTRY < cycle )1418 #if (DEBUG_FATFS_SCAN_DIRECTORY & 0x1) 1419 if( DEBUG_FATFS_SCAN_DIRECTORY < cycle ) 1424 1420 mapper_display_page( mapper_xp , page_id , 256 ); 1425 1421 #endif … … 1432 1428 if (ord == NO_MORE_ENTRY) // no more entry => break 1433 1429 { 1434 found = -1;1430 found = 2; 1435 1431 } 1436 1432 else if ( ord == FREE_ENTRY ) // free entry => skip … … 1477 1473 if ( strcmp( name , cname ) == 0 ) 1478 1474 { 1479 cluster = (fatfs_get_record( DIR_FST_CLUS_HI , base + offset , 1 ) << 16) | 1480 (fatfs_get_record( DIR_FST_CLUS_LO , base + offset , 1 ) ) ; 1481 dentry_id = ((page_id<<12) + offset)>>5; 1482 is_dir = ((attr & ATTR_DIRECTORY) == ATTR_DIRECTORY); 1483 size = fatfs_get_record( DIR_FILE_SIZE , base + offset , 1 ); 1475 *entry = base + offset; 1476 *index = ((page_id<<12) + offset)>>5; 1484 1477 found = 1; 1485 1478 } … … 1494 1487 } // end loop on pages 1495 1488 1496 // analyse the result of scan 1497 1498 if ( found == -1 ) // found end of directory => failure 1499 { 1489 if( found == 1 ) 1490 { 1491 1492 #if DEBUG_FATFS_SCAN_DIRECTORY 1493 cycle = (uint32_t)hal_get_cycles(); 1494 if( DEBUG_FATFS_SCAN_DIRECTORY < cycle ) 1495 printk("\n[%s] thread[%x,%x] exit / found child <%s> in <%s>\n", 1496 __FUNCTION__, this->process->pid, this->trdid, name, parent_name ); 1497 #endif 1498 return 0; 1499 } 1500 else if( found == 2 ) 1501 { 1502 1503 #if DEBUG_FATFS_SCAN_DIRECTORY 1504 cycle = (uint32_t)hal_get_cycles(); 1505 if( DEBUG_FATFS_SCAN_DIRECTORY < cycle ) 1506 printk("\n[%s] thread[%x,%x] exit / child <%s> in <%s> not found\n", 1507 __FUNCTION__, this->process->pid, this->trdid, name, parent_name ); 1508 #endif 1509 return 1; 1510 } 1511 else 1512 { 1513 printk("\n[ERROR] in %s : cannot get page %d from mapper\n", 1514 __FUNCTION__, page_id ); 1515 1516 return -1; 1517 } 1518 } // end fatfs_scan_directory() 1519 1520 1521 1522 ///////////////////////////////////////////////////// 1523 error_t fatfs_new_dentry( vfs_inode_t * parent_inode, 1524 char * name, 1525 xptr_t child_inode_xp ) 1526 { 1527 uint8_t * entry; // pointer on FAT32 directory entry (array of 32 bytes) 1528 uint32_t index; // index of FAT32 directory entry in mapper 1529 mapper_t * mapper; // pointer on directory mapper 1530 uint32_t cluster; // directory entry cluster 1531 uint32_t size; // directory entry size 1532 bool_t is_dir; // directory entry type (file/dir) 1533 error_t error; 1534 1535 // check arguments 1536 assert( (parent_inode != NULL) , "parent_inode is NULL\n" ); 1537 assert( (name != NULL) , "name is NULL\n" ); 1538 assert( (child_inode_xp != XPTR_NULL ) , "child_inode is XPTR_NULL\n" ); 1539 1540 #if DEBUG_FATFS_GET_DENTRY 1541 char parent_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1542 uint32_t cycle = (uint32_t)hal_get_cycles(); 1543 thread_t * this = CURRENT_THREAD; 1544 vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , parent_name ); 1545 if( DEBUG_FATFS_GET_DENTRY < cycle ) 1546 printk("\n[%s] thread[%x,%x] enter for child <%s> in parent <%s> / cycle %d\n", 1547 __FUNCTION__, this->process->pid, this->trdid, name , parent_name , cycle ); 1548 #endif 1549 1550 // get pointer and index of searched directory entry in mapper 1551 mapper = parent_inode->mapper; 1552 error = fatfs_scan_directory( mapper, name , &entry , &index ); 1553 1554 // update child inode and dentry descriptors if sucess 1555 if( error == 0 ) 1556 { 1500 1557 1501 1558 #if DEBUG_FATFS_GET_DENTRY 1502 1559 cycle = (uint32_t)hal_get_cycles(); 1503 1560 if( DEBUG_FATFS_GET_DENTRY < cycle ) 1504 printk("\n[%s] thread[%x,%x] exit / child <%s> not found / cycle %d\n", 1505 __FUNCTION__, this->process->pid, this->trdid, name, cycle ); 1506 #endif 1507 1508 return -1; 1509 } 1510 1511 // get child inode cluster and local pointer 1512 cxy_t inode_cxy = GET_CXY( child_inode_xp ); 1513 vfs_inode_t * inode_ptr = GET_PTR( child_inode_xp ); 1514 1515 // build extended pointer on parent dentried root 1516 xptr_t parents_root_xp = XPTR( inode_cxy , &inode_ptr->parents ); 1561 printk("\n[%s] thread[%x,%x] exit / intialised child <%s> in %s / cycle %d\n", 1562 __FUNCTION__, this->process->pid, this->trdid, name, parent_name, cycle ); 1563 #endif 1564 // get relevant infos from FAT32 directory entry 1565 cluster = (fatfs_get_record( DIR_FST_CLUS_HI , entry , 1 ) << 16) | 1566 (fatfs_get_record( DIR_FST_CLUS_LO , entry , 1 ) ) ; 1567 is_dir = (fatfs_get_record( DIR_ATTR , entry , 1 ) & ATTR_DIRECTORY); 1568 size = fatfs_get_record( DIR_FILE_SIZE , entry , 1 ); 1569 1570 // get child inode cluster and local pointer 1571 cxy_t inode_cxy = GET_CXY( child_inode_xp ); 1572 vfs_inode_t * inode_ptr = GET_PTR( child_inode_xp ); 1573 1574 // build extended pointer on root of list of prent dentries 1575 xptr_t parents_root_xp = XPTR( inode_cxy , &inode_ptr->parents ); 1517 1576 1518 1577 // check child inode has at least one parent 1519 1578 assert( (xlist_is_empty( parents_root_xp ) == false ), "child inode must have one parent\n"); 1520 1579 1521 // get dentry pointers and cluster1522 xptr_t dentry_xp = XLIST_FIRST( parents_root_xp , vfs_dentry_t , parents );1523 vfs_dentry_t * dentry_ptr = GET_PTR( dentry_xp );1524 cxy_t dentry_cxy = GET_CXY( dentry_xp );1580 // get dentry pointers and cluster 1581 xptr_t dentry_xp = XLIST_FIRST( parents_root_xp , vfs_dentry_t , parents ); 1582 vfs_dentry_t * dentry_ptr = GET_PTR( dentry_xp ); 1583 cxy_t dentry_cxy = GET_CXY( dentry_xp ); 1525 1584 1526 1585 // check dentry descriptor in same cluster as parent inode 1527 1586 assert( (dentry_cxy == local_cxy) , "illegal dentry cluster\n" ); 1528 1587 1529 // update the child inode "type", "size", and "extend" fields 1530 vfs_inode_type_t type = (is_dir) ? INODE_TYPE_DIR : INODE_TYPE_FILE; 1531 1532 hal_remote_s32( XPTR( inode_cxy , &inode_ptr->type ) , type ); 1533 hal_remote_s32( XPTR( inode_cxy , &inode_ptr->size ) , size ); 1534 hal_remote_s32( XPTR( inode_cxy , &inode_ptr->extend ) , cluster ); 1535 1536 // update the dentry "extend" field 1537 dentry_ptr->extend = (void *)(intptr_t)dentry_id; 1538 1539 #if DEBUG_FATFS_GET_DENTRY 1588 // update the child inode "type", "size", and "extend" fields 1589 vfs_inode_type_t type = (is_dir) ? INODE_TYPE_DIR : INODE_TYPE_FILE; 1590 1591 hal_remote_s32( XPTR( inode_cxy , &inode_ptr->type ) , type ); 1592 hal_remote_s32( XPTR( inode_cxy , &inode_ptr->size ) , size ); 1593 hal_remote_s32( XPTR( inode_cxy , &inode_ptr->extend ) , cluster ); 1594 1595 // update the dentry "extend" field 1596 dentry_ptr->extend = (void *)(intptr_t)index; 1597 1598 return 0; 1599 } 1600 else 1601 { 1602 return -1; 1603 } 1604 1605 } // end fatfs_new_dentry() 1606 1607 ////////////////////////////////////////////////// 1608 error_t fatfs_update_dentry( vfs_inode_t * inode, 1609 vfs_dentry_t * dentry, 1610 uint32_t size ) 1611 { 1612 uint8_t * entry; // pointer on FAT32 directory entry (array of 32 bytes) 1613 uint32_t index; // index of FAT32 directory entry in mapper 1614 mapper_t * mapper; // pointer on directory mapper 1615 error_t error; 1616 1617 // check arguments 1618 assert( (inode != NULL) , "inode is NULL\n" ); 1619 assert( (dentry != NULL) , "dentry is NULL\n" ); 1620 assert( (size != 0 ) , "size is 0\n" ); 1621 1622 #if DEBUG_FATFS_UPDATE_DENTRY 1623 char dir_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1624 uint32_t cycle = (uint32_t)hal_get_cycles(); 1625 thread_t * this = CURRENT_THREAD; 1626 vfs_inode_get_name( XPTR( local_cxy , inode ) , dir_name ); 1627 if( DEBUG_FATFS_UPDATE_DENTRY < cycle ) 1628 printk("\n[%s] thread[%x,%x] enter for entry <%s> in dir <%s> / cycle %d\n", 1629 __FUNCTION__, this->process->pid, this->trdid, dentry->name , dir_name , cycle ); 1630 #endif 1631 1632 // get pointer and index of searched directory entry in mapper 1633 mapper = inode->mapper; 1634 error = fatfs_scan_directory( mapper, dentry->name , &entry , &index ); 1635 1636 // update size in mapper if found 1637 if( error == 0 ) 1638 { 1639 1640 #if DEBUG_FATFS_UPDATE_DENTRY 1540 1641 cycle = (uint32_t)hal_get_cycles(); 1541 if( DEBUG_FATFS_GET_DENTRY < cycle ) 1542 printk("\n[%s] thread[%x,%x] exit / child <%s> loaded in <%s> / cycle %d\n", 1543 __FUNCTION__, this->process->pid, this->trdid, name, parent_name, cycle ); 1544 #endif 1545 1546 return 0; 1547 1548 } // end fatfs_get_dentry() 1642 if( DEBUG_FATFS_UPDATE_DENTRY < cycle ) 1643 printk("\n[%s] thread[%x,%x] exit / found entry <%s> in <%s> / cycle %d\n", 1644 __FUNCTION__, this->process->pid, this->trdid, dentry->name, dir_name, cycle ); 1645 #endif 1646 // set size in FAT32 directory entry 1647 fatfs_set_record( DIR_FILE_SIZE , entry , 1 , size ); 1648 1649 // get local pointer on modified page base 1650 void * base = (void *)((intptr_t)entry & (~CONFIG_PPM_PAGE_MASK)); 1651 1652 // get extended pointer on modified page descriptor 1653 xptr_t page_xp = ppm_base2page( XPTR( local_cxy , base ) ); 1654 1655 // mark page as dirty 1656 ppm_page_do_dirty( page_xp ); 1657 1658 return 0; 1659 } 1660 else 1661 { 1662 return -1; 1663 } 1664 1665 } // end fatfs_update_dentry() 1549 1666 1550 1667 /////////////////////////////////////////////////////// … … 2056 2173 assert( (inode_xp != XPTR_NULL) , "inode pointer is NULL\n" ); 2057 2174 2058 // get first_cluster from inode extension2175 // get inode cluster and local pointer 2059 2176 inode_ptr = GET_PTR( inode_xp ); 2060 2177 inode_cxy = GET_CXY( inode_xp ); 2178 2179 // get first_cluster from inode extension 2061 2180 first_xp = XPTR( inode_cxy , &inode_ptr->extend ); 2062 2181 first_cluster = (uint32_t)(intptr_t)hal_remote_lpt( first_xp ); … … 2073 2192 printk("\n[%s] thread[%x,%x] enter for <%s> / first_cluster %x / cycle %d\n", 2074 2193 __FUNCTION__ , this->process->pid, this->trdid, name, first_cluster, cycle ); 2194 #endif 2195 2196 #if (DEBUG_FATFS_RELEASE_INODE & 1) 2197 fatfs_display_fat( 0 , 512 ); 2075 2198 #endif 2076 2199 -
trunk/kernel/fs/fatfs.h
r614 r623 309 309 310 310 /***************************************************************************************** 311 * This function implements the generic vfs_fs_get_dentry() function for the FATFS. 312 ***************************************************************************************** 313 * It initialises a new child (new inode/dentry couple in Inode Tree), identified 314 * by the <child_inode_xp> argument, from the parent directory mapper, identified by the 315 * <parent_inode> argument. 311 * This function implements the generic vfs_fs_new_dentry() function for the FATFS. 312 ***************************************************************************************** 313 * It initializes a new inode/dentry couple in Inode Tree, attached to the directory 314 * identified by the <parent_inode> argument. The new directory entry is identified 315 * by the <name> argument. The child inode descriptor identified by the <child_inode_xp> 316 * argument, and the dentry descriptor must have been previously allocated. 316 317 * It scan the parent mapper to find the <name> argument. 317 318 * It set the "type", "size", and "extend" fields in inode descriptor. … … 324 325 * @ return 0 if success / return ENOENT if child not found. 325 326 ****************************************************************************************/ 326 error_t fatfs_ get_dentry( struct vfs_inode_s * parent_inode,327 error_t fatfs_new_dentry( struct vfs_inode_s * parent_inode, 327 328 char * name, 328 329 xptr_t child_inode_xp ); 329 330 330 331 /***************************************************************************************** 332 * This function implements the generic vfs_fs_update_dentry() function for the FATFS. 333 ***************************************************************************************** 334 * It update the size of a directory entry identified by the <dentry> argument in 335 * the mapper of a directory identified by the <inode> argument, as defined by the <size> 336 * argument. 337 * It scan the mapper to find the entry identified by the dentry "name" field. 338 * It set the "size" field in the in the directory mapper AND marks the page as DIRTY. 339 * It must be called by a thread running in the cluster containing the directory inode. 340 ***************************************************************************************** 341 * @ inode : local pointer on inode (directory). 342 * @ dentry : local pointer on dentry (for name). 343 * @ size : new size value. 344 * @ return 0 if success / return ENOENT if child not found. 345 ****************************************************************************************/ 346 error_t fatfs_update_dentry( struct vfs_inode_s * inode, 347 struct vfs_dentry_s * dentry, 348 uint32_t size ); 349 350 /***************************************************************************************** 331 351 * This function implements the generic vfs_fs_get_user_dir() function for the FATFS. 332 352 ***************************************************************************************** 333 353 * It is called by the remote_dir_create() function to scan the mapper of a directory 334 * identified by the <inode> argument and copy up to <max_dirent> valid dentries to a354 * identified by the <inode> argument, and copy up to <max_dirent> valid dentries to a 335 355 * local dirent array, defined by the <array> argument. The <min_dentry> argument defines 336 * the index of the first dentry to copied to the target dirent array.356 * the index of the first dentry to be copied to the target dirent array. 337 357 * This function returns in the <entries> buffer the number of dentries actually written, 338 358 * and signals in the <done> buffer when the last valid entry has been found. 339 359 * If the <detailed> argument is true, a dentry/inode couple that does not exist in 340 * the Inode Tree is dynamically created, and all dirent fiel s are documented in the360 * the Inode Tree is dynamically created, and all dirent fields are documented in the 341 361 * dirent array. Otherwise, only the dentry name is documented. 342 362 * It must be called by a thread running in the cluster containing the directory inode. … … 443 463 * The page - and the mapper - can be located in another cluster than the calling thread. 444 464 * The pointer on the mapper and the page index in file are found in the page descriptor. 445 * It is used for both fora regular file/directory mapper, and the FAT mapper.465 * It is used for both a regular file/directory mapper, and the FAT mapper. 446 466 * For the FAT mapper, it access the FATFS to get the location on IOC device. 447 467 * For a regular file, it access the FAT mapper to get the cluster index on IOC device. -
trunk/kernel/fs/ramfs.c
r602 r623 35 35 char * ramfs_root_name ) 36 36 { 37 xptr_t unused_xp; // required by vfs_add_child_in_parent() 37 xptr_t dentry_xp; // unused but required by vfs_add_child_in_parent() 38 xptr_t inode_xp; 39 vfs_inode_t * inode_ptr; 38 40 39 41 cxy_t cxy = cluster_random_select(); … … 41 43 // create VFS dentry and VFS inode for RAMFS root directory 42 44 return vfs_add_child_in_parent( cxy, 43 INODE_TYPE_DIR,44 45 FS_TYPE_RAMFS, 45 46 parent_inode_xp, 46 47 ramfs_root_name, 47 &unused_xp, 48 &unused_xp ); 48 &dentry_xp, 49 &inode_xp ); 50 // update inode type field 51 inode_ptr = GET_PTR( inode_xp ); 52 inode_ptr->type = INODE_TYPE_DIR; 49 53 } 50 54 -
trunk/kernel/fs/vfs.c
r614 r623 3 3 * 4 4 * Author Mohamed Lamine Karaoui (2015) 5 * Alain Greiner (2016,2017,2018 )5 * Alain Greiner (2016,2017,2018,2019) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 142 142 //////////////////////////////////////////////////// 143 143 error_t vfs_inode_create( vfs_fs_type_t fs_type, 144 vfs_inode_type_t inode_type,145 144 uint32_t attr, 146 145 uint32_t rights, … … 214 213 215 214 // initialize inode descriptor 216 inode->type = inode_type;215 inode->type = INODE_TYPE_FILE; // default value 217 216 inode->inum = inum; 218 217 inode->attr = attr; … … 228 227 mapper->inode = inode; 229 228 230 // initialise threads waiting queue231 // xlist_root_init( XPTR( local_cxy , &inode->wait_root ) );232 233 229 // initialize chidren dentries xhtab 234 230 xhtab_init( &inode->children , XHTAB_DENTRY_TYPE ); … … 278 274 vfs_inode_t * ptr = GET_PTR( inode_xp ); 279 275 276 // build extended pointers on lock & size 277 xptr_t lock_xp = XPTR( cxy , &ptr->size_lock ); 278 xptr_t size_xp = XPTR( cxy , &ptr->size ); 279 280 // take lock in read mode 281 remote_rwlock_rd_acquire( lock_xp ); 282 280 283 // get size 281 remote_rwlock_rd_acquire( XPTR( cxy , &ptr->size_lock ) ); 282 uint32_t size = hal_remote_l32( XPTR( cxy , &ptr->size ) ); 283 remote_rwlock_rd_release( XPTR( cxy , &ptr->size_lock ) ); 284 uint32_t size = hal_remote_l32( size_xp ); 285 286 // release lock from read mode 287 remote_rwlock_rd_release( lock_xp ); 288 284 289 return size; 285 290 } 286 291 287 //////////////////////////////////////////// 288 void vfs_inode_ set_size( xptr_t inode_xp,289 uint32_t size )292 /////////////////////////////////////////////// 293 void vfs_inode_update_size( xptr_t inode_xp, 294 uint32_t size ) 290 295 { 291 296 // get inode cluster and local pointer … … 293 298 vfs_inode_t * ptr = GET_PTR( inode_xp ); 294 299 295 // set size 296 remote_rwlock_wr_release( XPTR( cxy , &ptr->size_lock ) ); 297 hal_remote_s32( XPTR( cxy , &ptr->size ) , size ); 298 remote_rwlock_wr_release( XPTR( cxy , &ptr->size_lock ) ); 300 // build extended pointers on lock & size 301 xptr_t lock_xp = XPTR( cxy , &ptr->size_lock ); 302 xptr_t size_xp = XPTR( cxy , &ptr->size ); 303 304 // take lock in write mode 305 remote_rwlock_wr_acquire( lock_xp ); 306 307 // get current size 308 uint32_t current_size = hal_remote_l32( size_xp ); 309 310 // set size if required 311 if( current_size < size ) hal_remote_s32( size_xp , size ); 312 313 // release lock from write mode 314 remote_rwlock_wr_release( lock_xp ); 299 315 } 300 316 … … 546 562 547 563 // check refcount 548 assert( (file->refcount == 0) , "refcount non zero\n" );564 // assert( (file->refcount == 0) , "refcount non zero\n" ); 549 565 550 566 kmem_req_t req; … … 554 570 555 571 #if DEBUG_VFS_CLOSE 572 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 573 vfs_file_get_name( XPTR( local_cxy , file ) , name ); 556 574 thread_t * this = CURRENT_THREAD; 557 575 uint32_t cycle = (uint32_t)hal_get_cycles(); 558 576 if( DEBUG_VFS_CLOSE < cycle ) 559 printk("\n[%s] thread[%x,%x] deleted file %xin cluster %x / cycle %d\n",560 __FUNCTION__, this->process->pid, this->trdid, file, local_cxy, cycle );577 printk("\n[%s] thread[%x,%x] deleted file <%s> in cluster %x / cycle %d\n", 578 __FUNCTION__, this->process->pid, this->trdid, name, local_cxy, cycle ); 561 579 #endif 562 580 … … 585 603 hal_remote_atomic_add( XPTR( file_cxy , &file_ptr->refcount ) , -1 ); 586 604 } 605 606 /////////////////////////////////////// 607 void vfs_file_get_name( xptr_t file_xp, 608 char * name ) 609 { 610 // get cluster and local pointer on remote file 611 vfs_file_t * file_ptr = GET_PTR( file_xp ); 612 cxy_t file_cxy = GET_CXY( file_xp ); 613 614 // get pointers on remote inode 615 vfs_inode_t * inode_ptr = hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) ); 616 xptr_t inode_xp = XPTR( file_cxy , inode_ptr ); 617 618 // call the relevant function 619 vfs_inode_get_name( inode_xp , name ); 620 } 621 587 622 588 623 ////////////////////////////////////////////////////////////////////////////////////////// … … 889 924 } // vfs_lseek() 890 925 891 /////////////////////////////////// 926 //////////////////////////////////// 892 927 error_t vfs_close( xptr_t file_xp, 893 928 uint32_t file_id ) 894 929 { 895 cluster_t * cluster; // local pointer on local cluster 896 cxy_t file_cxy; // cluster containing the file descriptor. 897 vfs_file_t * file_ptr; // local ponter on file descriptor 898 cxy_t owner_cxy; // process owner cluster 899 lpid_t lpid; // process local index 900 xptr_t root_xp; // root of list of process copies 901 xptr_t lock_xp; // lock protecting the list of copies 902 xptr_t iter_xp; // iterator on list of process copies 903 xptr_t process_xp; // extended pointer on one process copy 904 cxy_t process_cxy; // process copy cluster 905 process_t * process_ptr; // process copy local pointer 906 907 // check arguments 908 assert( (file_xp != XPTR_NULL) , "file_xp == XPTR_NULL\n" ); 909 assert( (file_id < CONFIG_PROCESS_FILE_MAX_NR) , "illegal file_id\n" ); 930 cxy_t file_cxy; // cluster containing the file descriptor. 931 vfs_file_t * file_ptr; // local ponter on file descriptor 932 cxy_t owner_cxy; // process owner cluster 933 pid_t pid; // process identifier 934 lpid_t lpid; // process local index 935 xptr_t root_xp; // root of xlist (processes , or dentries) 936 xptr_t lock_xp; // lock protecting the xlist 937 xptr_t iter_xp; // iterator on xlist 938 mapper_t * mapper_ptr; // local pointer on associated mapper 939 xptr_t mapper_xp; // extended pointer on mapper 940 vfs_inode_t * inode_ptr; // local pointer on associated inode 941 xptr_t inode_xp; // extended pointer on inode 942 uint32_t size; // current file size (from inode descriptor) 943 error_t error; 944 945 char name[CONFIG_VFS_MAX_NAME_LENGTH]; // file name 946 947 // check argument 948 assert( (file_xp != XPTR_NULL) , "file_xp is XPTR_NULL\n" ); 910 949 911 950 thread_t * this = CURRENT_THREAD; 912 951 process_t * process = this->process; 913 952 cluster_t * cluster = LOCAL_CLUSTER; 953 954 // get file name 955 vfs_file_get_name( file_xp , name ); 956 914 957 #if DEBUG_VFS_CLOSE 915 958 uint32_t cycle = (uint32_t)hal_get_cycles(); 916 959 if( DEBUG_VFS_CLOSE < cycle ) 917 printk("\n[%s] thread[%x,%x] enter / fdid %d / cycle %d\n", 918 __FUNCTION__, process->pid, this->trdid, file_id, cycle ); 919 #endif 920 921 // get local pointer on local cluster manager 922 cluster = LOCAL_CLUSTER; 960 printk("\n[%s] thread[%x,%x] enter for <%s> / cycle %d\n", 961 __FUNCTION__, process->pid, this->trdid, name, cycle ); 962 #endif 963 964 // get cluster and local pointer on remote file descriptor 965 file_cxy = GET_CXY( file_xp ); 966 file_ptr = GET_PTR( file_xp ); 967 968 //////// 1) update all dirty pages from mapper to device 969 970 // get pointers on mapper associated to file 971 mapper_ptr = hal_remote_lpt( XPTR( file_cxy , &file_ptr->mapper ) ); 972 mapper_xp = XPTR( file_cxy , mapper_ptr ); 973 974 // copy all dirty pages from mapper to device 975 if( file_cxy == local_cxy ) 976 { 977 error = mapper_sync( mapper_ptr ); 978 } 979 else 980 { 981 rpc_mapper_sync_client( file_cxy, 982 mapper_ptr, 983 &error ); 984 } 985 986 if( error ) 987 { 988 printk("\n[ERROR] in %s : cannot synchronise dirty pages for <%s>\n", 989 __FUNCTION__, name ); 990 return -1; 991 } 992 993 #if DEBUG_VFS_CLOSE 994 if( DEBUG_VFS_CLOSE < cycle ) 995 printk("\n[%s] thread[%x,%x] synchronised mapper of <%s> to device\n", 996 __FUNCTION__, process->pid, this->trdid, name ); 997 #endif 998 999 //////// 2) update file size in all parent directory mapper(s) and on device 1000 1001 // get pointers on remote inode 1002 inode_ptr = hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) ); 1003 inode_xp = XPTR( file_cxy , inode_ptr ); 1004 1005 // get file size from remote inode 1006 size = hal_remote_l32( XPTR( file_cxy , &inode_ptr->size ) ); 1007 1008 // get root of list of parents dentry 1009 root_xp = XPTR( file_cxy , &inode_ptr->parents ); 1010 1011 // loop on all parents 1012 XLIST_FOREACH( root_xp , iter_xp ) 1013 { 1014 // get pointers on parent directory dentry 1015 xptr_t parent_dentry_xp = XLIST_ELEMENT( iter_xp , vfs_dentry_t , parents ); 1016 cxy_t parent_cxy = GET_CXY( parent_dentry_xp ); 1017 vfs_dentry_t * parent_dentry_ptr = GET_PTR( parent_dentry_xp ); 1018 1019 // get local pointer on parent directory inode 1020 vfs_inode_t * parent_inode_ptr = hal_remote_lpt( XPTR( parent_cxy, 1021 &parent_dentry_ptr->parent ) ); 1022 1023 // get local pointer on parent directory mapper 1024 mapper_t * parent_mapper_ptr = hal_remote_lpt( XPTR( parent_cxy, 1025 &parent_inode_ptr->mapper ) ); 1026 1027 // update dentry size in parent directory mapper 1028 if( parent_cxy == local_cxy ) 1029 { 1030 error = vfs_fs_update_dentry( parent_inode_ptr, 1031 parent_dentry_ptr, 1032 size ); 1033 } 1034 else 1035 { 1036 rpc_vfs_fs_update_dentry_client( parent_cxy, 1037 parent_inode_ptr, 1038 parent_dentry_ptr, 1039 size, 1040 &error ); 1041 } 1042 1043 if( error ) 1044 { 1045 printk("\n[ERROR] in %s : cannot update size in parent\n", 1046 __FUNCTION__ ); 1047 return -1; 1048 } 1049 1050 #if DEBUG_VFS_CLOSE 1051 char parent_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1052 vfs_inode_get_name( XPTR( parent_cxy , parent_inode_ptr ) , parent_name ); 1053 if( DEBUG_VFS_CLOSE < cycle ) 1054 printk("\n[%s] thread[%x,%x] updated size of <%s> in parent <%s>\n", 1055 __FUNCTION__, process->pid, this->trdid, name, parent_name ); 1056 #endif 1057 1058 // copy all dirty pages from parent mapper to device 1059 if( parent_cxy == local_cxy ) 1060 { 1061 error = mapper_sync( parent_mapper_ptr ); 1062 } 1063 else 1064 { 1065 rpc_mapper_sync_client( parent_cxy, 1066 parent_mapper_ptr, 1067 &error ); 1068 } 1069 1070 if( error ) 1071 { 1072 printk("\n[ERROR] in %s : cannot synchronise parent mapper to device\n", 1073 __FUNCTION__ ); 1074 return -1; 1075 } 1076 1077 #if DEBUG_VFS_CLOSE 1078 if( DEBUG_VFS_CLOSE < cycle ) 1079 printk("\n[%s] thread[%x,%x] synchonized mapper of parent <%s> to device\n", 1080 __FUNCTION__, process->pid, this->trdid, parent_name ); 1081 #endif 1082 1083 } 1084 1085 //////// 3) loop on the process copies to reset all fd_array[file_id] entries 923 1086 924 1087 // get owner process cluster and lpid 925 owner_cxy = CXY_FROM_PID( process->pid ); 926 lpid = LPID_FROM_PID( process->pid ); 1088 pid = process->pid; 1089 owner_cxy = CXY_FROM_PID( pid ); 1090 lpid = LPID_FROM_PID( pid ); 927 1091 928 1092 // get extended pointers on copies root and lock … … 930 1094 lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] ); 931 1095 932 // 1) loop on the process descriptor copies to reset all fd_array[file_id] entries933 934 1096 // take the lock protecting the list of copies 935 1097 remote_queuelock_acquire( lock_xp ); … … 937 1099 XLIST_FOREACH( root_xp , iter_xp ) 938 1100 { 939 process_xp = XLIST_ELEMENT( iter_xp , process_t , copies_list ); 940 process_cxy = GET_CXY( process_xp ); 941 process_ptr = GET_PTR( process_xp ); 942 943 #if (DEBUG_VFS_CLOSE & 1 ) 944 if( DEBUG_VFS_CLOSE < cycle ) 945 printk("\n[%s] reset fd_array[%d] for process %x in cluster %x\n", 946 __FUNCTION__, file_id, process_ptr, process_cxy ); 947 #endif 948 949 // fd_array lock is required for atomic write of a 64 bits word 950 // xptr_t fd_array_lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock ); 951 952 xptr_t entry_xp = XPTR( process_cxy , &process_ptr->fd_array.array[file_id] ); 953 954 // remote_rwlock_wr_acquire( fd_array_lock_xp ); 955 1101 xptr_t process_xp = XLIST_ELEMENT( iter_xp , process_t , copies_list ); 1102 cxy_t process_cxy = GET_CXY( process_xp ); 1103 process_t * process_ptr = GET_PTR( process_xp ); 1104 1105 xptr_t entry_xp = XPTR( process_cxy , &process_ptr->fd_array.array[file_id] ); 956 1106 hal_remote_s64( entry_xp , XPTR_NULL ); 957 958 // remote_rwlock_wr_release( fd_array_lock_xp );959 960 1107 vfs_file_count_down( file_xp ); 961 962 1108 hal_fence(); 963 1109 } … … 966 1112 remote_queuelock_release( lock_xp ); 967 1113 968 #if (DEBUG_VFS_CLOSE & 1)1114 #if DEBUG_VFS_CLOSE 969 1115 if( DEBUG_VFS_CLOSE < cycle ) 970 printk("\n[%s] thread[%x,%x] reset all fd-array copies\n", 971 __FUNCTION__, process->pid, this->trdid ); 972 #endif 973 974 // 2) release memory allocated to file descriptor in remote cluster 975 976 // get cluster and local pointer on remote file descriptor 977 file_cxy = GET_CXY( file_xp ); 978 file_ptr = GET_PTR( file_xp ); 1116 printk("\n[%s] thread[%x,%x] reset all fd-array copies for <%x>\n", 1117 __FUNCTION__, process->pid, this->trdid, name ); 1118 #endif 1119 1120 //////// 4) release memory allocated to file descriptor in remote cluster 979 1121 980 1122 if( file_cxy == local_cxy ) // file cluster is local … … 990 1132 cycle = (uint32_t)hal_get_cycles(); 991 1133 if( DEBUG_VFS_CLOSE < cycle ) 992 printk("\n[%s] thread[%x,%x] exit / fdid %dclosed / cycle %d\n",993 __FUNCTION__, process->pid, this->trdid, file_id, cycle );1134 printk("\n[%s] thread[%x,%x] exit / <%s> closed / cycle %d\n", 1135 __FUNCTION__, process->pid, this->trdid, name, cycle ); 994 1136 #endif 995 1137 … … 1120 1262 { 1121 1263 error = vfs_inode_create( parent_fs_type, 1122 INODE_TYPE_DIR,1123 1264 attr, 1124 1265 rights, … … 1131 1272 rpc_vfs_inode_create_client( inode_cxy, 1132 1273 parent_fs_type, 1133 INODE_TYPE_DIR,1134 1274 attr, 1135 1275 rights, … … 1152 1292 // get new inode local pointer 1153 1293 inode_ptr = GET_PTR( inode_xp ); 1294 1295 // update inode "type" field 1296 hal_remote_s32( XPTR( inode_cxy , &inode_ptr->type ) , INODE_TYPE_DIR ); 1154 1297 1155 1298 #if(DEBUG_VFS_MKDIR & 1) … … 1455 1598 xptr_t dentry_xp; // extended pointer on dentry to unlink 1456 1599 vfs_dentry_t * dentry_ptr; // local pointer on dentry to unlink 1600 vfs_ctx_t * ctx_ptr; // local pointer on FS context 1601 vfs_fs_type_t fs_type; // File system type 1457 1602 1458 1603 char name[CONFIG_VFS_MAX_NAME_LENGTH]; // name of link to remove … … 1466 1611 vfs_inode_get_name( root_xp , root_name ); 1467 1612 if( DEBUG_VFS_UNLINK < cycle ) 1468 printk("\n[%s] thread[%x,%x] enter /root <%s> / path <%s> / cycle %d\n",1613 printk("\n[%s] thread[%x,%x] : enter for root <%s> / path <%s> / cycle %d\n", 1469 1614 __FUNCTION__, process->pid, this->trdid, root_name, path, cycle ); 1470 1615 #endif … … 1501 1646 vfs_inode_get_name( parent_xp , parent_name ); 1502 1647 if( DEBUG_VFS_UNLINK < cycle ) 1503 printk("\n[%s] thread[%x,%x] parent inode <%s> is (%x,%x)\n",1648 printk("\n[%s] thread[%x,%x] : parent inode <%s> is (%x,%x)\n", 1504 1649 __FUNCTION__, process->pid, this->trdid, parent_name, parent_cxy, parent_ptr ); 1505 1650 #endif … … 1508 1653 xptr_t children_xp = XPTR( parent_cxy , &parent_ptr->children ); 1509 1654 1510 // get extended pointer on dentry to unlink1655 // try to get extended pointer on dentry from Inode Tree 1511 1656 dentry_xp = xhtab_lookup( children_xp , name ); 1512 1657 1513 if( dentry_xp == XPTR_NULL ) 1514 { 1515 remote_rwlock_wr_release( lock_xp ); 1516 printk("\n[ERROR] in %s : cannot get target dentry <%s> in <%s>\n", 1517 __FUNCTION__, name, path ); 1518 return -1; 1519 } 1520 1521 // get local pointer on dentry to unlink 1522 dentry_ptr = GET_PTR( dentry_xp ); 1658 // when dentry not found in Inode Tree, try to get it from inode tree 1659 1660 if( dentry_xp == XPTR_NULL ) // miss target dentry in Inode Tree 1661 { 1523 1662 1524 1663 #if( DEBUG_VFS_UNLINK & 1 ) 1525 1664 if( DEBUG_VFS_UNLINK < cycle ) 1526 printk("\n[%s] thread[%x,%x] dentry <%s> to unlink is (%x,%x)\n", 1527 __FUNCTION__, process->pid, this->trdid, name, parent_cxy, dentry_ptr ); 1528 #endif 1529 1530 // get pointer on target inode 1531 inode_xp = hal_remote_l64( XPTR( parent_cxy , &dentry_ptr->child_xp ) ); 1532 inode_cxy = GET_CXY( inode_xp ); 1533 inode_ptr = GET_PTR( inode_xp ); 1534 1665 printk("\n[%s] thread[%x,%x] : inode <%s> not found => scan parent mapper\n", 1666 __FUNCTION__, process->pid, this->trdid, name ); 1667 #endif 1668 // get parent inode FS type 1669 ctx_ptr = hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->ctx ) ); 1670 fs_type = hal_remote_l32( XPTR( parent_cxy , &ctx_ptr->type ) ); 1671 1672 // select a cluster for new inode 1673 inode_cxy = cluster_random_select(); 1674 1675 // speculatively insert a new child dentry/inode couple in inode tree 1676 error = vfs_add_child_in_parent( inode_cxy, 1677 fs_type, 1678 parent_xp, 1679 name, 1680 &dentry_xp, 1681 &inode_xp ); 1682 if( error ) 1683 { 1684 printk("\n[ERROR] in %s : cannot create inode <%s> in path <%s>\n", 1685 __FUNCTION__ , name, path ); 1686 1687 vfs_remove_child_from_parent( dentry_xp ); 1688 return -1; 1689 } 1690 1691 // get local pointers on new dentry and new inode descriptors 1692 inode_ptr = GET_PTR( inode_xp ); 1693 dentry_ptr = GET_PTR( dentry_xp ); 1694 1695 // scan parent mapper to find the missing dentry, and complete 1696 // initialisation of new dentry and new inode descriptors In Inode Tree 1697 if( parent_cxy == local_cxy ) 1698 { 1699 error = vfs_fs_new_dentry( parent_ptr, 1700 name, 1701 inode_xp ); 1702 } 1703 else 1704 { 1705 rpc_vfs_fs_new_dentry_client( parent_cxy, 1706 parent_ptr, 1707 name, 1708 inode_xp, 1709 &error ); 1710 } 1711 1712 if ( error ) // dentry not found in parent mapper 1713 { 1714 printk("\n[ERROR] in %s : cannot get dentry <%s> in path <%s>\n", 1715 __FUNCTION__ , name, path ); 1716 return -1; 1717 } 1718 1719 #if (DEBUG_VFS_UNLINK & 1) 1720 if( DEBUG_VFS_UNLINK < cycle ) 1721 printk("\n[%s] thread[%x,%x] : created missing inode & dentry <%s> in cluster %x\n", 1722 __FUNCTION__, process->pid, this->trdid, name, inode_cxy ); 1723 #endif 1724 1725 } 1726 else // found target dentry in Inode Tree 1727 { 1728 dentry_ptr = GET_PTR( dentry_xp ); 1729 1730 // get pointer on target inode from dentry 1731 inode_xp = hal_remote_l64( XPTR( parent_cxy , &dentry_ptr->child_xp ) ); 1732 inode_cxy = GET_CXY( inode_xp ); 1733 inode_ptr = GET_PTR( inode_xp ); 1734 } 1735 1736 // At this point the Inode Tree contains the target dentry and child inode 1737 // we can safely remove this dentry from both the parent mapper, and the Inode Tree. 1738 1535 1739 #if( DEBUG_VFS_UNLINK & 1 ) 1536 char inode_name[CONFIG_VFS_MAX_NAME_LENGTH];1537 vfs_inode_get_name( inode_xp , inode_name );1538 1740 if( DEBUG_VFS_UNLINK < cycle ) 1539 printk("\n[%s] thread[%x,%x] target inode <%s> is (%x,%x) / cycle %d\n",1540 __FUNCTION__, process->pid, this->trdid, inode_name, inode_cxy, inode_ptr, cycle);1741 printk("\n[%s] thread[%x,%x] : dentry (%x,%x) / inode (%x,%x)\n", 1742 __FUNCTION__, process->pid, this->trdid, parent_cxy, dentry_ptr, inode_cxy, inode_ptr ); 1541 1743 #endif 1542 1744 … … 1545 1747 inode_links = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->links ) ); 1546 1748 1547 // check target inode links counter1548 assert( (inode_links >= 1), "illegal inode links count %d for <%s>\n", inode_links, path );1549 1550 1749 /////////////////////////////////////////////////////////////////////// 1551 1750 if( (inode_type == INODE_TYPE_FILE) || (inode_type == INODE_TYPE_DIR) ) 1552 1751 { 1752 1753 #if( DEBUG_VFS_UNLINK & 1 ) 1754 if( DEBUG_VFS_UNLINK < cycle ) 1755 printk("\n[%s] thread[%x,%x] : unlink inode <%s> / type %s / %d links\n", 1756 __FUNCTION__, process->pid, this->trdid, name, vfs_inode_type_str(inode_type), inode_links ); 1757 #endif 1758 1553 1759 // 1. Release clusters allocated to target inode 1554 1760 // and synchronize the FAT on IOC device if last link. … … 1557 1763 // build extended pointer on target inode "children" number 1558 1764 xptr_t inode_children_xp = XPTR( inode_cxy , &inode_ptr->children.items ); 1765 1766 printk("\n@@@ in %s : children_xp = (%x,%x)\n", 1767 __FUNCTION__, inode_cxy, &inode_ptr->children.items ); 1559 1768 1560 1769 // get target inode number of children … … 1713 1922 1714 1923 } // end vfs_stat() 1715 1716 /////////////////////////////////////////////1717 error_t vfs_readdir( xptr_t file_xp,1718 struct dirent * k_dirent )1719 {1720 assert( false , "not implemented file_xp: %x, k_dirent ptr %x\n",1721 file_xp, k_dirent );1722 return 0;1723 }1724 1725 ////////////////////////////////////1726 error_t vfs_rmdir( xptr_t file_xp,1727 char * path )1728 {1729 assert( false , "not implemented file_xp: %x, path <%s>\n",1730 file_xp, path );1731 return 0;1732 }1733 1924 1734 1925 //////////////////////////////////// … … 2195 2386 cxy_t child_cxy; // cluster for child inode 2196 2387 vfs_inode_t * child_ptr; // local pointer on child inode 2197 vfs_inode_type_t child_type; // child inode type2198 2388 vfs_fs_type_t fs_type; // File system type 2199 2389 vfs_ctx_t * ctx_ptr; // local pointer on FS context … … 2319 2509 child_cxy = cluster_random_select(); 2320 2510 2321 // define child inode type2322 if( dir ) child_type = INODE_TYPE_DIR;2323 else child_type = INODE_TYPE_FILE;2324 2325 2511 // insert a new child dentry/inode couple in inode tree 2326 2512 error = vfs_add_child_in_parent( child_cxy, 2327 child_type,2328 2513 fs_type, 2329 2514 parent_xp, … … 2350 2535 if( parent_cxy == local_cxy ) 2351 2536 { 2352 error = vfs_fs_ get_dentry( parent_ptr,2537 error = vfs_fs_new_dentry( parent_ptr, 2353 2538 name, 2354 2539 child_xp ); … … 2356 2541 else 2357 2542 { 2358 rpc_vfs_fs_ get_dentry_client( parent_cxy,2543 rpc_vfs_fs_new_dentry_client( parent_cxy, 2359 2544 parent_ptr, 2360 2545 name, … … 2961 3146 //////////////////////////////////////////////////////////////////// 2962 3147 error_t vfs_add_child_in_parent( cxy_t child_cxy, 2963 vfs_inode_type_t child_type,2964 3148 vfs_fs_type_t fs_type, 2965 3149 xptr_t parent_inode_xp, … … 3038 3222 { 3039 3223 error = vfs_inode_create( fs_type, 3040 child_type,3041 3224 attr, 3042 3225 mode, … … 3049 3232 rpc_vfs_inode_create_client( child_cxy, 3050 3233 fs_type, 3051 child_type,3052 3234 attr, 3053 3235 mode, … … 3309 3491 3310 3492 //////////////////////////////////////////////// 3311 error_t vfs_fs_ get_dentry( vfs_inode_t * parent,3493 error_t vfs_fs_new_dentry( vfs_inode_t * parent, 3312 3494 char * name, 3313 3495 xptr_t child_xp ) … … 3325 3507 if( fs_type == FS_TYPE_FATFS ) 3326 3508 { 3327 error = fatfs_ get_dentry( parent , name , child_xp );3509 error = fatfs_new_dentry( parent , name , child_xp ); 3328 3510 } 3329 3511 else if( fs_type == FS_TYPE_RAMFS ) … … 3342 3524 return error; 3343 3525 3344 } // end vfs_fs_get_dentry() 3526 } // end vfs_fs_new_dentry() 3527 3528 /////////////////////////////////////////////////// 3529 error_t vfs_fs_update_dentry( vfs_inode_t * inode, 3530 vfs_dentry_t * dentry, 3531 uint32_t size ) 3532 { 3533 error_t error = 0; 3534 3535 // check arguments 3536 assert( (inode != NULL) , "inode pointer is NULL\n"); 3537 assert( (dentry != NULL) , "dentry pointer is NULL\n"); 3538 3539 // get parent inode FS type 3540 vfs_fs_type_t fs_type = inode->ctx->type; 3541 3542 // call relevant FS function 3543 if( fs_type == FS_TYPE_FATFS ) 3544 { 3545 error = fatfs_update_dentry( inode , dentry , size ); 3546 } 3547 else if( fs_type == FS_TYPE_RAMFS ) 3548 { 3549 assert( false , "should not be called for RAMFS\n" ); 3550 } 3551 else if( fs_type == FS_TYPE_DEVFS ) 3552 { 3553 assert( false , "should not be called for DEVFS\n" ); 3554 } 3555 else 3556 { 3557 assert( false , "undefined file system type\n" ); 3558 } 3559 3560 return error; 3561 3562 } // end vfs_fs_update_dentry() 3345 3563 3346 3564 /////////////////////////////////////////////////// -
trunk/kernel/fs/vfs.h
r614 r623 108 108 /****************************************************************************************** 109 109 * This structure define a VFS inode. 110 * An inode hasseveral children dentries (if it is a directory), an can have several110 * An inode can have several children dentries (if it is a directory), an can have several 111 111 * parents dentries (if it hass several aliases links): 112 112 * - The "parents" field is the root of the xlist of parents dentries, and the "links" … … 166 166 remote_rwlock_t size_lock; /*! protect read/write to size */ 167 167 remote_rwlock_t main_lock; /*! protect inode tree traversal and modifs */ 168 // list_entry_t list; /*! member of set of inodes in same cluster */169 // list_entry_t wait_root; /*! root of threads waiting on this inode */170 168 struct mapper_s * mapper; /*! associated file cache */ 171 169 void * extend; /*! fs_type_specific inode extension */ … … 195 193 196 194 /****************************************************************************************** 197 * This structure defines a directory entry.195 Rpt* This structure defines a directory entry. 198 196 * A dentry contains the name of a remote file/dir, an extended pointer on the 199 197 * inode representing this file/dir, a local pointer on the inode representing … … 321 319 *****************************************************************************************/ 322 320 error_t vfs_inode_create( vfs_fs_type_t fs_type, 323 vfs_inode_type_t inode_type,324 321 uint32_t attr, 325 322 uint32_t rights, … … 349 346 350 347 /****************************************************************************************** 351 * This function set the <size> of a file/dir to a remote inode, 352 * taking the remote_rwlock protecting <size> in WRITE_MODE. 348 * This function updates the "size" field of a remote inode identified by <inode_xp>. 349 * It takes the rwlock protecting the file size in WRITE_MODE, and set the "size" field 350 * when the current size is smaller than the requested <size> argument. 353 351 ***************************************************************************************** 354 352 * @ inode_xp : extended pointer on the remote inode. 355 * @ size : value to be written.356 *****************************************************************************************/ 357 void vfs_inode_ set_size( xptr_t inode_xp,358 uint32_t size );353 * @ size : requested size value. 354 *****************************************************************************************/ 355 void vfs_inode_update_size( xptr_t inode_xp, 356 uint32_t size ); 359 357 360 358 /****************************************************************************************** … … 451 449 * This function releases memory allocated to a local file descriptor. 452 450 * It must be executed by a thread running in the cluster containing the inode, 453 * and the file refcount must be zero. 454 * If the client thread is not running in the owner cluster, it must use the 455 * rpc_vfs_file_destroy_client() function. 451 * and the file refcount must be zero. Use the RPC_VFS_FILE_DESTROY if required. 456 452 ****************************************************************************************** 457 453 * @ file : local pointer on file descriptor. … … 465 461 void vfs_file_count_up ( xptr_t file_xp ); 466 462 void vfs_file_count_down( xptr_t file_xp ); 463 464 /****************************************************************************************** 465 * This debug function copies the name of a the file identified by <file_xp> 466 * argument to a local buffer identified by the <name> argument. 467 * The local buffer size must be at least CONFIG_VFS_MAX_NAME_LENGTH. 468 ***************************************************************************************** 469 * @ file_xp : extended pointer on the remote inode. 470 * @ name : local buffer pointer. 471 *****************************************************************************************/ 472 void vfs_file_get_name( xptr_t inode_xp, 473 char * name ); 467 474 468 475 … … 537 544 * Only the distributed Inode Tree is modified: it does NOT modify the parent mapper, 538 545 * and does NOT update the FS on IOC device. 546 * It set the inode type to the default INODE_TYPE_FILE value 539 547 * It can be executed by any thread running in any cluster (can be different from both 540 548 * the child cluster and the parent cluster). … … 552 560 ****************************************************************************************** 553 561 * @ child_inode_cxy : [in] target cluster for child inode. 554 * @ child_inode_type : [in] child inode type555 562 * @ fs_type : [in] child inode FS type. 556 563 * @ parent_inode_xp : [in] extended pointer on parent inode. … … 561 568 *****************************************************************************************/ 562 569 error_t vfs_add_child_in_parent( cxy_t child_inode_cxy, 563 vfs_inode_type_t child_inode_type,564 570 vfs_fs_type_t fs_type, 565 571 xptr_t parent_inode_xp, … … 729 735 /****************************************************************************************** 730 736 * This function close the - non-replicated - file descriptor identified by the <file_xp> 731 * and <file_id> arguments. 732 * 1) All entries in the fd_array copies are directly reset by the calling thread, 737 * and <file_id> arguments. The <file_id> is required to reset the fd_array[] slot. 738 * It can be called by a thread running in any cluster, and executes the following actions: 739 * 1) It access the block device to updates all dirty pages from the mapper associated 740 * to the file, and removes these pages from the dirty list, using an RPC if required. 741 * 2) It updates the file size in all parent directory mapper(s), and update the modified 742 * pages on the block device, using RPCs if required. 743 * 3) All entries in the fd_array copies are directly reset by the calling thread, 733 744 * using remote accesses. 734 * 2) The memory allocated to file descriptor in cluster containing the inode is released.735 * It requires aRPC if cluster containing the file descriptor is remote.736 ****************************************************************************************** 737 * @ file_xp : extended pointer on the file descriptor in owner cluster.738 * @ file_id : file descriptor index in fd_array .745 * 4) The memory allocated to file descriptor in cluster containing the inode is released, 746 * using an RPC if cluster containing the file descriptor is remote. 747 ****************************************************************************************** 748 * @ file_xp : extended pointer on the file descriptor. 749 * @ file_id : file descriptor index in fd_array[]. 739 750 * @ returns 0 if success / -1 if error. 740 751 *****************************************************************************************/ … … 877 888 /****************************************************************************************** 878 889 * This function makes the I/O operation to move one page identified by the <page_xp> 879 * argument to/from the IOC device from/to the mapper, as defined by <cmd_type>.890 * argument to/from the IOC device from/to the mapper, as defined by the <cmd_type>. 880 891 * Depending on the file system type, it calls the proper, FS specific function. 881 892 * It is used in case of MISS on the mapper, or when a dirty page in the mapper must … … 918 929 * Finally, it synchronously updates the parent directory on IOC device. 919 930 * 931 * Depending on the file system type, it calls the relevant, FS specific function. 920 932 * It must be executed by a thread running in the cluster containing the parent directory. 921 * It can be the RPC_VFS_ VS_REMOVE_DENTRY. This function does NOT take any lock.933 * It can be the RPC_VFS_FS_REMOVE_DENTRY. This function does NOT take any lock. 922 934 ****************************************************************************************** 923 935 * @ parent : local pointer on parent (directory) inode. … … 933 945 * and updates both the child inode descriptor, identified by the <child_xp> argument, 934 946 * and the associated dentry descriptor : 935 * - It set the "size", and "extend" fields in inode descriptor.947 * - It set the "size", "type", and "extend" fields in inode descriptor. 936 948 * - It set the "extend" field in dentry descriptor. 937 949 * It is called by the vfs_lookup() function in case of miss. … … 939 951 * Depending on the file system type, it calls the relevant, FS specific function. 940 952 * It must be called by a thread running in the cluster containing the parent inode. 941 * This function does NOT take any lock.953 * It can be the RPC_VFS_FS_NEW_DENTRY. This function does NOT take any lock. 942 954 ****************************************************************************************** 943 955 * @ parent : local pointer on parent inode (directory). 944 956 * @ name : child name. 945 957 * @ child_xp : extended pointer on remote child inode (file or directory) 946 * @ return 0 if success / return ENOENT ifnot found.947 *****************************************************************************************/ 948 error_t vfs_fs_ get_dentry( vfs_inode_t * parent,958 * @ return 0 if success / return -1 if dentry not found. 959 *****************************************************************************************/ 960 error_t vfs_fs_new_dentry( vfs_inode_t * parent, 949 961 char * name, 950 962 xptr_t child_xp ); 963 964 /****************************************************************************************** 965 * This function scan the mapper of an an existing inode directory, identified by 966 * the <inode> argument, to find a directory entry identified by the <dentry> argument, 967 * and update the size for this directory entry in mapper, as defined by <size>. 968 * The searched "name" is defined in the <dentry> argument, that must be in the same 969 * cluster as the parent inode. It is called by the vfs_close() function. 970 * 971 * Depending on the file system type, it calls the relevant, FS specific function. 972 * It must be called by a thread running in the cluster containing the parent inode. 973 * It can be the RPC_VFS_FS_UPDATE_DENTRY. This function does NOT take any lock. 974 ****************************************************************************************** 975 * @ parent : local pointer on parent inode (directory). 976 * @ dentry : local pointer on dentry. 977 * @ size : new size value (bytes). 978 * @ return 0 if success / return ENOENT if not found. 979 *****************************************************************************************/ 980 error_t vfs_fs_update_dentry( vfs_inode_t * inode, 981 vfs_dentry_t * dentry, 982 uint32_t size ); 951 983 952 984 /****************************************************************************************** -
trunk/kernel/kern/kernel_init.c
r619 r623 3 3 * 4 4 * Authors : Mohamed Lamine Karaoui (2015) 5 * Alain Greiner (2016,2017,2018 )5 * Alain Greiner (2016,2017,2018,2019) 6 6 * 7 7 * Copyright (c) Sorbonne Universites … … 113 113 cxy_t local_cxy CONFIG_CACHE_LINE_ALIGNED; 114 114 115 // This variable is used for CP0cores synchronisation in kernel_init()115 // This variable is used for core[0] cores synchronisation in kernel_init() 116 116 __attribute__((section(".kdata"))) 117 117 xbarrier_t global_barrier CONFIG_CACHE_LINE_ALIGNED; … … 126 126 127 127 // kernel_init is the entry point defined in hal/tsar_mips32/kernel.ld 128 // It is used by the bootloader .128 // It is used by the bootloader to tranfer control to kernel. 129 129 extern void kernel_init( boot_info_t * info ); 130 130 … … 466 466 // These chdev descriptors are distributed on all clusters, using a modulo on a global 467 467 // index, identically computed in all clusters. 468 // This function is executed in all clusters by the CP0core, that computes a global index469 // for all external chdevs. Each CP0core creates only the chdevs that must be placed in468 // This function is executed in all clusters by the core[0] core, that computes a global index 469 // for all external chdevs. Each core[0] core creates only the chdevs that must be placed in 470 470 // the local cluster, because the global index matches the local index. 471 471 // The relevant entries in all copies of the devices directory are initialised. … … 626 626 627 627 /////////////////////////////////////////////////////////////////////////////////////////// 628 // This function is called by CP0in cluster 0 to allocate memory and initialize the PIC628 // This function is called by core[0] in cluster 0 to allocate memory and initialize the PIC 629 629 // device, namely the informations attached to the external IOPIC controller, that 630 630 // must be replicated in all clusters (struct iopic_input). … … 791 791 792 792 /////////////////////////////////////////////////////////////////////////////////////////// 793 // This function is called by all CP0s in all cluster to complete the PIC device793 // This function is called by all core[0]s in all cluster to complete the PIC device 794 794 // initialisation, namely the informations attached to the LAPIC controller. 795 795 // This initialisation must be done after the IOPIC initialisation, but before other … … 899 899 /////////////////////////////////////////////////////////////////////////////////////////// 900 900 // This function is the entry point for the kernel initialisation. 901 // It is executed by all cores in all clusters, but only core[0] , called CP0,902 // initializesthe shared resources such as the cluster manager, or the local peripherals.901 // It is executed by all cores in all clusters, but only core[0] initializes 902 // the shared resources such as the cluster manager, or the local peripherals. 903 903 // To comply with the multi-kernels paradigm, it accesses only local cluster memory, using 904 904 // only information contained in the local boot_info_t structure, set by the bootloader. 905 // Only CP0in cluster 0 print the log messages.905 // Only core[0] in cluster 0 print the log messages. 906 906 /////////////////////////////////////////////////////////////////////////////////////////// 907 907 // @ info : pointer on the local boot-info structure. … … 925 925 926 926 ///////////////////////////////////////////////////////////////////////////////// 927 // STEP 0: Each core get its core identifier from boot_info, and makes927 // STEP 1 : Each core get its core identifier from boot_info, and makes 928 928 // a partial initialisation of its private idle thread descriptor. 929 // CP0initializes the "local_cxy" global variable.930 // CP0 in cluster IO initializes the TXT0 chdev to printlog messages.929 // core[0] initializes the "local_cxy" global variable. 930 // core[0] in cluster[0] initializes the TXT0 chdev for log messages. 931 931 ///////////////////////////////////////////////////////////////////////////////// 932 932 … … 936 936 &core_gid ); 937 937 938 // all CP0sinitialize cluster identifier938 // core[0] initialize cluster identifier 939 939 if( core_lid == 0 ) local_cxy = info->cxy; 940 940 … … 956 956 #endif 957 957 958 // all CP0s initializecluster info958 // core[0] initializes cluster info 959 959 if( core_lid == 0 ) cluster_info_init( info ); 960 960 961 // CP0 in cluster 0initialises TXT0 chdev descriptor961 // core[0] in cluster[0] initialises TXT0 chdev descriptor 962 962 if( (core_lid == 0) && (core_cxy == 0) ) txt0_device_init( info ); 963 964 // all cores check identifiers 965 if( error ) 966 { 967 printk("\n[PANIC] in %s : illegal core : gid %x / cxy %x / lid %d", 968 __FUNCTION__, core_lid, core_cxy, core_lid ); 969 hal_core_sleep(); 970 } 963 971 964 972 ///////////////////////////////////////////////////////////////////////////////// … … 970 978 #if DEBUG_KERNEL_INIT 971 979 if( (core_lid == 0) & (local_cxy == 0) ) 972 printk("\n[%s] : exit barrier 0: TXT0 initialized / cycle %d\n",980 printk("\n[%s] : exit barrier 1 : TXT0 initialized / cycle %d\n", 973 981 __FUNCTION__, (uint32_t)hal_get_cycles() ); 974 982 #endif 975 983 976 ///////////////////////////////////////////////////////////////////////////// 977 // STEP 1 : all cores check core identifier. 978 // CP0 initializes the local cluster manager. 979 // This includes the memory allocators. 980 ///////////////////////////////////////////////////////////////////////////// 981 982 // all cores check identifiers 983 if( error ) 984 { 985 printk("\n[PANIC] in %s : illegal core : gid %x / cxy %x / lid %d", 986 __FUNCTION__, core_lid, core_cxy, core_lid ); 987 hal_core_sleep(); 988 } 989 990 // all CP0s initialise DQDT (only CPO in cluster 0 build the quad-tree) 984 ///////////////////////////////////////////////////////////////////////////////// 985 // STEP 2 : core[0] initializes the cluter manager, 986 // including the physical memory allocator. 987 ///////////////////////////////////////////////////////////////////////////////// 988 989 // core[0] initialises DQDT (only core[0] in cluster 0 build the quad-tree) 991 990 if( core_lid == 0 ) dqdt_init(); 992 991 993 // all CP0sinitialize other cluster manager complex structures992 // core[0] initialize other cluster manager complex structures 994 993 if( core_lid == 0 ) 995 994 { … … 1012 1011 #if DEBUG_KERNEL_INIT 1013 1012 if( (core_lid == 0) & (local_cxy == 0) ) 1014 printk("\n[%s] : exit barrier 1 : clusters initialised / cycle %d\n",1013 printk("\n[%s] : exit barrier 2 : cluster manager initialized / cycle %d\n", 1015 1014 __FUNCTION__, (uint32_t)hal_get_cycles() ); 1016 1015 #endif 1017 1016 1018 1017 ///////////////////////////////////////////////////////////////////////////////// 1019 // STEP 2 : CP0 initializes the process_zero descriptor.1020 // CP0 in cluster 0 initializes the IOPIC device.1018 // STEP 3 : core[0] initializes the process_zero descriptor, 1019 // including the kernel VMM (both GPT and VSL) 1021 1020 ///////////////////////////////////////////////////////////////////////////////// 1022 1021 … … 1025 1024 core = &cluster->core_tbl[core_lid]; 1026 1025 1027 // all CP0s initialize the process_zero descriptor 1028 if( core_lid == 0 ) process_zero_create( &process_zero ); 1029 1030 // CP0 in cluster 0 initializes the PIC chdev, 1026 // core[0] initializes the process_zero descriptor, 1027 if( core_lid == 0 ) process_zero_create( &process_zero , info ); 1028 1029 ///////////////////////////////////////////////////////////////////////////////// 1030 if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ), 1031 (info->x_size * info->y_size) ); 1032 barrier_wait( &local_barrier , info->cores_nr ); 1033 ///////////////////////////////////////////////////////////////////////////////// 1034 1035 #if DEBUG_KERNEL_INIT 1036 if( (core_lid == 0) & (local_cxy == 0) ) 1037 printk("\n[%s] : exit barrier 3 : kernel processs initialized / cycle %d\n", 1038 __FUNCTION__, (uint32_t)hal_get_cycles() ); 1039 #endif 1040 1041 ///////////////////////////////////////////////////////////////////////////////// 1042 // STEP 4 : all cores initialize their private MMU 1043 // core[0] in cluster 0 initializes the IOPIC device. 1044 ///////////////////////////////////////////////////////////////////////////////// 1045 1046 // all cores initialise their MMU 1047 hal_mmu_init( &process_zero.vmm.gpt ); 1048 1049 // core[0] in cluster[0] initializes the PIC chdev, 1031 1050 if( (core_lid == 0) && (local_cxy == 0) ) iopic_init( info ); 1032 1051 … … 1039 1058 #if DEBUG_KERNEL_INIT 1040 1059 if( (core_lid == 0) & (local_cxy == 0) ) 1041 printk("\n[%s] : exit barrier 2 : PIC initialised / cycle %d\n",1060 printk("\n[%s] : exit barrier 4 : MMU and IOPIC initialized / cycle %d\n", 1042 1061 __FUNCTION__, (uint32_t)hal_get_cycles() ); 1043 1062 #endif 1044 1063 1045 1064 //////////////////////////////////////////////////////////////////////////////// 1046 // STEP 3 : CP0initializes the distibuted LAPIC descriptor.1047 // CP0initializes the internal chdev descriptors1048 // CP0initialize the local external chdev descriptors1065 // STEP 5 : core[0] initializes the distibuted LAPIC descriptor. 1066 // core[0] initializes the internal chdev descriptors 1067 // core[0] initialize the local external chdev descriptors 1049 1068 //////////////////////////////////////////////////////////////////////////////// 1050 1069 1051 // all CP0s initialize their local LAPIC extension,1070 // all core[0]s initialize their local LAPIC extension, 1052 1071 if( core_lid == 0 ) lapic_init( info ); 1053 1072 1054 // CP0scan the internal (private) peripherals,1073 // core[0] scan the internal (private) peripherals, 1055 1074 // and allocates memory for the corresponding chdev descriptors. 1056 1075 if( core_lid == 0 ) internal_devices_init( info ); 1057 1076 1058 1077 1059 // All CP0s contribute to initialise external peripheral chdev descriptors.1060 // Each CP0[cxy] scan the set of external (shared) peripherals (but the TXT0),1078 // All core[0]s contribute to initialise external peripheral chdev descriptors. 1079 // Each core[0][cxy] scan the set of external (shared) peripherals (but the TXT0), 1061 1080 // and allocates memory for the chdev descriptors that must be placed 1062 1081 // on the (cxy) cluster according to the global index value. … … 1072 1091 #if DEBUG_KERNEL_INIT 1073 1092 if( (core_lid == 0) & (local_cxy == 0) ) 1074 printk("\n[%s] : exit barrier 3: all chdevs initialised / cycle %d\n",1093 printk("\n[%s] : exit barrier 5 : all chdevs initialised / cycle %d\n", 1075 1094 __FUNCTION__, (uint32_t)hal_get_cycles() ); 1076 1095 #endif … … 1082 1101 1083 1102 ///////////////////////////////////////////////////////////////////////////////// 1084 // STEP 4: All cores enable IPI (Inter Procesor Interrupt),1103 // STEP 6 : All cores enable IPI (Inter Procesor Interrupt), 1085 1104 // Alh cores initialize IDLE thread. 1086 // Only CP0 in cluster 0creates the VFS root inode.1105 // Only core[0] in cluster[0] creates the VFS root inode. 1087 1106 // It access the boot device to initialize the file system context. 1088 1107 ///////////////////////////////////////////////////////////////////////////////// … … 1107 1126 #endif 1108 1127 1109 // CPO in cluster 0creates the VFS root1128 // core[O] in cluster[0] creates the VFS root 1110 1129 if( (core_lid == 0) && (local_cxy == 0 ) ) 1111 1130 { … … 1137 1156 // 4. create VFS root inode in cluster 0 1138 1157 error = vfs_inode_create( FS_TYPE_FATFS, // fs_type 1139 INODE_TYPE_DIR, // inode_type1140 1158 0, // attr 1141 1159 0, // rights … … 1150 1168 } 1151 1169 1152 // 5. update FATFS root inode extension1170 // 5. update FATFS root inode "type" and "extend" fields 1153 1171 cxy_t vfs_root_cxy = GET_CXY( vfs_root_inode_xp ); 1154 1172 vfs_inode_t * vfs_root_ptr = GET_PTR( vfs_root_inode_xp ); 1173 hal_remote_s32( XPTR( vfs_root_cxy , &vfs_root_ptr->extend ), INODE_TYPE_DIR ); 1155 1174 hal_remote_spt( XPTR( vfs_root_cxy , &vfs_root_ptr->extend ), 1156 1175 (void*)(intptr_t)root_dir_cluster ); … … 1189 1208 #if DEBUG_KERNEL_INIT 1190 1209 if( (core_lid == 0) & (local_cxy == 0) ) 1191 printk("\n[%s] : exit barrier 4: VFS root (%x,%x) in cluster 0 / cycle %d\n",1210 printk("\n[%s] : exit barrier 6 : VFS root (%x,%x) in cluster 0 / cycle %d\n", 1192 1211 __FUNCTION__, GET_CXY(process_zero.vfs_root_xp), 1193 1212 GET_PTR(process_zero.vfs_root_xp), (uint32_t)hal_get_cycles() ); … … 1195 1214 1196 1215 ///////////////////////////////////////////////////////////////////////////////// 1197 // STEP 5 : Other CP0s allocate memory for the selected FS context,1198 // and initialise both the local FS context and the local VFS context1199 // from values stored in cluster 0.1216 // STEP 7 : In all other clusters than cluster[0], the core[0] allocates memory 1217 // for the selected FS context, and initialise the local FS context and 1218 // the local VFS context from values stored in cluster 0. 1200 1219 // They get the VFS root inode extended pointer from cluster 0. 1201 1220 ///////////////////////////////////////////////////////////////////////////////// … … 1259 1278 #if DEBUG_KERNEL_INIT 1260 1279 if( (core_lid == 0) & (local_cxy == 1) ) 1261 printk("\n[%s] : exit barrier 5: VFS root (%x,%x) in cluster 1 / cycle %d\n",1280 printk("\n[%s] : exit barrier 7 : VFS root (%x,%x) in cluster 1 / cycle %d\n", 1262 1281 __FUNCTION__, GET_CXY(process_zero.vfs_root_xp), 1263 1282 GET_PTR(process_zero.vfs_root_xp), (uint32_t)hal_get_cycles() ); … … 1265 1284 1266 1285 ///////////////////////////////////////////////////////////////////////////////// 1267 // STEP 6 : CP0 in cluster 0 makes the global DEVFS treeinitialisation:1286 // STEP 8 : core[0] in cluster 0 makes the global DEVFS initialisation: 1268 1287 // It initializes the DEVFS context, and creates the DEVFS 1269 1288 // "dev" and "external" inodes in cluster 0. … … 1309 1328 #if DEBUG_KERNEL_INIT 1310 1329 if( (core_lid == 0) & (local_cxy == 0) ) 1311 printk("\n[%s] : exit barrier 6: DEVFS root initialized in cluster 0 / cycle %d\n",1330 printk("\n[%s] : exit barrier 8 : DEVFS root initialized in cluster 0 / cycle %d\n", 1312 1331 __FUNCTION__, (uint32_t)hal_get_cycles() ); 1313 1332 #endif 1314 1333 1315 1334 ///////////////////////////////////////////////////////////////////////////////// 1316 // STEP 7 : All CP0s complete in parallel the DEVFS treeinitialization.1317 // Each CP0get the "dev" and "external" extended pointers from1335 // STEP 9 : All core[0]s complete in parallel the DEVFS initialization. 1336 // Each core[0] get the "dev" and "external" extended pointers from 1318 1337 // values stored in cluster 0. 1319 // Then each CP0in cluster(i) creates the DEVFS "internal" directory,1338 // Then each core[0] in cluster(i) creates the DEVFS "internal" directory, 1320 1339 // and creates the pseudo-files for all chdevs in cluster (i). 1321 1340 ///////////////////////////////////////////////////////////////////////////////// … … 1346 1365 #if DEBUG_KERNEL_INIT 1347 1366 if( (core_lid == 0) & (local_cxy == 0) ) 1348 printk("\n[%s] : exit barrier 7 : DEVinitialized in cluster 0 / cycle %d\n",1367 printk("\n[%s] : exit barrier 9 : DEVFS initialized in cluster 0 / cycle %d\n", 1349 1368 __FUNCTION__, (uint32_t)hal_get_cycles() ); 1350 1369 #endif 1351 1370 1352 ///////////////////////////////////////////////////////////////////////////////// 1353 // STEP 8 : CP0 in cluster 0 creates the first user process (process_init) 1371 #if( DEBUG_KERNEL_INIT & 1 ) 1372 if( (core_lid == 0) & (local_cxy == 0) ) 1373 vfs_display( vfs_root_inode_xp ); 1374 #endif 1375 1376 ///////////////////////////////////////////////////////////////////////////////// 1377 // STEP 10 : core[0] in cluster 0 creates the first user process (process_init). 1378 // This include the first user process VMM (GPT and VSL) creation. 1379 // Finally, it prints the ALMOS-MKH banner. 1354 1380 ///////////////////////////////////////////////////////////////////////////////// 1355 1381 1356 1382 if( (core_lid == 0) && (local_cxy == 0) ) 1357 1383 { 1358 1359 #if( DEBUG_KERNEL_INIT & 1 )1360 vfs_display( vfs_root_inode_xp );1361 #endif1362 1363 1384 process_init_create(); 1364 1385 } 1365 1366 /////////////////////////////////////////////////////////////////////////////////1367 if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ),1368 (info->x_size * info->y_size) );1369 barrier_wait( &local_barrier , info->cores_nr );1370 /////////////////////////////////////////////////////////////////////////////////1371 1372 #if DEBUG_KERNEL_INIT1373 if( (core_lid == 0) & (local_cxy == 0) )1374 printk("\n[%s] : exit barrier 8 : process init created / cycle %d\n",1375 __FUNCTION__, (uint32_t)hal_get_cycles() );1376 #endif1377 1386 1378 1387 #if (DEBUG_KERNEL_INIT & 1) … … 1381 1390 #endif 1382 1391 1383 /////////////////////////////////////////////////////////////////////////////////1384 // STEP 9 : CP0 in cluster 0 print banner1385 /////////////////////////////////////////////////////////////////////////////////1386 1387 1392 if( (core_lid == 0) && (local_cxy == 0) ) 1388 1393 { 1389 1394 print_banner( (info->x_size * info->y_size) , info->cores_nr ); 1395 } 1390 1396 1391 1397 #if( DEBUG_KERNEL_INIT & 1 ) 1398 if( (core_lid == 0) & (local_cxy == 0) ) 1392 1399 printk("\n\n***** memory fooprint for main kernel objects\n\n" 1393 1400 " - thread descriptor : %d bytes\n" … … 1437 1444 #endif 1438 1445 1439 } 1446 // each core updates the register(s) definig the kernel 1447 // entry points for interrupts, exceptions and syscalls... 1448 hal_set_kentry(); 1440 1449 1441 1450 // each core activates its private TICK IRQ … … 1448 1457 ///////////////////////////////////////////////////////////////////////////////// 1449 1458 1450 #if DEBUG_KERNEL_INIT1459 #if( DEBUG_KERNEL_INIT & 1 ) 1451 1460 thread_t * this = CURRENT_THREAD; 1452 1461 printk("\n[%s] : thread[%x,%x] on core[%x,%d] jumps to thread_idle_func() / cycle %d\n", -
trunk/kernel/kern/printk.c
r583 r623 48 48 49 49 va_list args; // printf arguments 50 uint32_t ps; // writepointer to the string buffer50 uint32_t ps; // pointer to the string buffer 51 51 52 52 ps = 0; … … 57 57 while ( *format != 0 ) 58 58 { 59 60 59 if (*format == '%') // copy argument to string 61 60 { … … 98 97 break; 99 98 } 100 case ('d'): // decimal signed integer 99 case ('b'): // excactly 2 digits hexadecimal integer 100 { 101 int val = va_arg( args, int ); 102 int val_lsb = val & 0xF; 103 int val_msb = (val >> 4) & 0xF; 104 buf[0] = HexaTab[val_msb]; 105 buf[1] = HexaTab[val_lsb]; 106 len = 2; 107 pbuf = buf; 108 break; 109 } 110 case ('d'): // up to 10 digits decimal signed integer 101 111 { 102 112 int val = va_arg( args, int ); … … 108 118 for(i = 0; i < 10; i++) 109 119 { 110 111 120 buf[9 - i] = HexaTab[val % 10]; 112 121 if (!(val /= 10)) break; … … 116 125 break; 117 126 } 118 case ('u'): // decimal unsigned integer127 case ('u'): // up to 10 digits decimal unsigned integer 119 128 { 120 129 uint32_t val = va_arg( args, uint32_t ); … … 128 137 break; 129 138 } 130 case ('x'): // 32 bits hexadecimal131 case ('l'): // 64 bits hexadecimal139 case ('x'): // up to 8 digits hexadecimal 140 case ('l'): // up to 16 digits hexadecimal 132 141 { 133 142 uint32_t imax; … … 157 166 break; 158 167 } 159 case ('X'): // 32 bits hexadecimal on 8 characters168 case ('X'): // exactly 8 digits hexadecimal 160 169 { 161 170 uint32_t val = va_arg( args , uint32_t ); … … 238 247 case ('c'): /* char conversion */ 239 248 { 240 int val = va_arg( *args , int );249 int val = va_arg( *args , int ); 241 250 len = 1; 242 buf[0] = val;251 buf[0] = (char)val; 243 252 pbuf = &buf[0]; 244 253 break; 245 254 } 246 case ('d'): /* 32 bits decimal signed */ 255 case ('b'): // excactly 2 digits hexadecimal 256 { 257 int val = va_arg( *args, int ); 258 int val_lsb = val & 0xF; 259 int val_msb = (val >> 4) & 0xF; 260 buf[0] = HexaTab[val_msb]; 261 buf[1] = HexaTab[val_lsb]; 262 len = 2; 263 pbuf = buf; 264 break; 265 } 266 case ('d'): /* up to 10 digits signed decimal */ 247 267 { 248 268 int val = va_arg( *args , int ); … … 261 281 break; 262 282 } 263 case ('u'): /* 32 bits decimal unsigned*/283 case ('u'): /* up to 10 digits unsigned decimal */ 264 284 { 265 285 uint32_t val = va_arg( *args , uint32_t ); … … 273 293 break; 274 294 } 275 case ('x'): /* 32 bits hexadecimal unsigned*/295 case ('x'): /* up to 8 digits hexadecimal */ 276 296 { 277 297 uint32_t val = va_arg( *args , uint32_t ); … … 286 306 break; 287 307 } 288 case ('X'): /* 32 bits hexadecimal unsigned on 10 char*/308 case ('X'): /* exactly 8 digits hexadecimal */ 289 309 { 290 310 uint32_t val = va_arg( *args , uint32_t ); … … 299 319 break; 300 320 } 301 case ('l'): /* 64 bits hexadecimal unsigned*/302 { 303 u nsigned long long val = va_arg( *args , unsigned long long);321 case ('l'): /* up to 16 digits hexadecimal */ 322 { 323 uint64_t val = va_arg( *args , uint64_t ); 304 324 dev_txt_sync_write( "0x" , 2 ); 305 325 for(i = 0; i < 16; i++) … … 312 332 break; 313 333 } 314 case ('L'): /* 64 bits hexadecimal unsigned on 18 char*/315 { 316 u nsigned long long val = va_arg( *args , unsigned long long);334 case ('L'): /* exactly 16 digits hexadecimal */ 335 { 336 uint64_t val = va_arg( *args , uint64_t ); 317 337 dev_txt_sync_write( "0x" , 2 ); 318 338 for(i = 0; i < 16; i++) … … 525 545 } 526 546 547 ///////////////////////////// 548 void putb( char * string, 549 uint8_t * buffer, 550 uint32_t size ) 551 { 552 uint32_t line; 553 uint32_t byte = 0; 554 555 // get pointers on TXT0 chdev 556 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 557 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 558 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 559 560 // get extended pointer on remote TXT0 chdev lock 561 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 562 563 // get TXT0 lock 564 remote_busylock_acquire( lock_xp ); 565 566 // display string on TTY0 567 nolock_printk("\n***** %s *****\n", string ); 568 569 for ( line = 0 ; line < (size>>4) ; line++ ) 570 { 571 nolock_printk(" %X | %b %b %b %b | %b %b %b %b | %b %b %b %b | %b %b %b %b \n", 572 byte, 573 buffer[byte+ 0],buffer[byte+ 1],buffer[byte+ 2],buffer[byte+ 3], 574 buffer[byte+ 4],buffer[byte+ 5],buffer[byte+ 6],buffer[byte+ 7], 575 buffer[byte+ 8],buffer[byte+ 9],buffer[byte+10],buffer[byte+11], 576 buffer[byte+12],buffer[byte+13],buffer[byte+14],buffer[byte+15] ); 577 578 byte += 16; 579 } 580 581 // release TXT0 lock 582 remote_busylock_release( lock_xp ); 583 } 584 585 527 586 528 587 // Local Variables: -
trunk/kernel/kern/printk.h
r583 r623 24 24 /////////////////////////////////////////////////////////////////////////////////// 25 25 // The printk.c and printk.h files define the functions used by the kernel 26 // to display messages on a text terminal. 27 // Two access modes are supported: 28 // - The printk() function displays kernel messages on the kernel terminal TXT0, 29 // using a busy waiting policy: It calls directly the relevant TXT driver, 30 // after taking the TXT0 busylock for exclusive access to the TXT0 terminal. 31 // - The user_printk() function displays messages on the calling thread private 32 // terminal, using a descheduling policy: it register the request in the selected 33 // TXT chdev waiting queue and deschedule. The calling thread is reactivated by 34 // the IRQ signalling completion. 35 // Both functions use the generic TXT device to call the proper implementation 36 // dependant TXT driver. 37 // Finally these files define a set of conditional trace <***_dmsg> for debug. 26 // to display messages on the kernel terminal TXT0, using a busy waiting policy. 27 // It calls synchronously the TXT0 driver, without descheduling. 38 28 /////////////////////////////////////////////////////////////////////////////////// 39 29 … … 44 34 #include <stdarg.h> 45 35 46 #include <hal_special.h> // hal_get_cycles()36 #include <hal_special.h> 47 37 48 38 /********************************************************************************** 49 39 * This function build a formatted string. 50 40 * The supported formats are defined below : 51 * %c : single character 52 * %d : signed decimal 32 bits integer 53 * %u : unsigned decimal 32 bits integer 54 * %x : hexadecimal 32 bits integer 55 * %l : hexadecimal 64 bits integer 41 * %b : exactly 2 digits hexadecimal integer (8 bits) 42 * %c : single ascii character (8 bits) 43 * %d : up to 10 digits decimal integer (32 bits) 44 * %u : up to 10 digits unsigned decimal (32 bits) 45 * %x : up to 8 digits hexadecimal integer (32 bits) 46 * %X : exactly 8 digits hexadecimal integer (32 bits) 47 * %l : up to 16 digits hexadecimal integer (64 bits) 48 * %L : exactly 16 digits hexadecimal integer (64 bits) 56 49 * %s : NUL terminated character string 57 50 ********************************************************************************** … … 153 146 void putl( uint64_t val ); 154 147 148 /********************************************************************************** 149 * This debug function displays on the kernel TXT0 terminal the content of an 150 * array of bytes defined by <buffer> and <size> arguments (16 bytes per line). 151 * The <string> argument is displayed before the buffer content. 152 * The line format is an address folowed by 16 (hexa) bytes. 153 ********************************************************************************** 154 * @ string : buffer name or identifier. 155 * @ buffer : local pointer on bytes array. 156 * @ size : number of bytes bytes to display. 157 *********************************************************************************/ 158 void putb( char * string, 159 uint8_t * buffer, 160 uint32_t size ); 161 162 155 163 156 164 #endif // _PRINTK_H -
trunk/kernel/kern/process.c
r619 r623 29 29 #include <hal_uspace.h> 30 30 #include <hal_irqmask.h> 31 #include <hal_vmm.h> 31 32 #include <errno.h> 32 33 #include <printk.h> … … 486 487 } 487 488 488 // FIXME decrement the refcount on file pointer by vfs_bin_xp [AG] 489 // FIXME decrement the refcount on file pointer for vfs_bin_xp [AG] 490 489 491 // FIXME close all open files [AG] 492 490 493 // FIXME synchronize dirty files [AG] 491 494 … … 1487 1490 printk("\n[ERROR] in %s : cannot initialise VMM for %s\n", __FUNCTION__ , path ); 1488 1491 vfs_close( file_xp , file_id ); 1489 // FIXME restore old process VMM 1492 // FIXME restore old process VMM [AG] 1490 1493 return -1; 1491 1494 } … … 1505 1508 printk("\n[ERROR] in %s : failed to access <%s>\n", __FUNCTION__ , path ); 1506 1509 vfs_close( file_xp , file_id ); 1507 // FIXME restore old process VMM 1510 // FIXME restore old process VMM [AG] 1508 1511 return -1; 1509 1512 } … … 1535 1538 1536 1539 1537 /////////////////////////////////////////////// 1538 void process_zero_create( process_t * process ) 1540 //////////////////////////////////////////////// 1541 void process_zero_create( process_t * process, 1542 boot_info_t * info ) 1539 1543 { 1540 1544 error_t error; … … 1566 1570 process->parent_xp = XPTR( local_cxy , process ); 1567 1571 process->term_state = 0; 1572 1573 // initialise kernel GPT and VSL, depending on architecture 1574 hal_vmm_kernel_init( info ); 1568 1575 1569 1576 // reset th_tbl[] array and associated fields -
trunk/kernel/kern/process.h
r618 r623 73 73 * is always stored in the same cluster as the inode associated to the file. 74 74 * A free entry in this array contains the XPTR_NULL value. 75 * The array size is defined by athe CONFIG_PROCESS_FILE_MAX_NR parameter.75 * The array size is defined by the CONFIG_PROCESS_FILE_MAX_NR parameter. 76 76 * 77 77 * NOTE: - Only the fd_array[] in the reference process contains a complete list of open … … 79 79 * - the fd_array[] in a process copy is simply a cache containing a subset of the 80 80 * open files to speed the fdid to xptr translation, but the "lock" and "current 81 * fields should not beused.81 * fields are not used. 82 82 * - all modifications made by the process_fd_remove() are done in reference cluster 83 83 * and reported in all process_copies. … … 200 200 201 201 /********************************************************************************************* 202 * This function initialize, in each cluster, the kernel "process_zero", that is the owner203 * ofall kernel threads in a given cluster. It is called by the kernel_init() function.202 * This function initialize, in each cluster, the kernel "process_zero", that contains 203 * all kernel threads in a given cluster. It is called by the kernel_init() function. 204 204 * The process_zero descriptor is allocated as a global variable in file kernel_init.c 205 205 * Both the PID and PPID fields are set to zero, the ref_xp is the local process_zero, 206 206 * and the parent process is set to XPTR_NULL. The th_tbl[] is initialized as empty. 207 ********************************************************************************************* 208 * @ process : [in] pointer on local process descriptor to initialize. 209 ********************************************************************************************/ 210 void process_zero_create( process_t * process ); 207 * The process GPT is initialised as required by the target architecture. 208 * The "kcode" and "kdata" segments are registered in the process VSL. 209 ********************************************************************************************* 210 * @ process : [in] pointer on process descriptor to initialize. 211 * @ info : pointer on local boot_info_t (for kernel segments base and size). 212 ********************************************************************************************/ 213 void process_zero_create( process_t * process, 214 boot_info_t * info ); 211 215 212 216 /********************************************************************************************* … … 428 432 * identified by the <process_xp> argument, register the <file_xp> argument in the 429 433 * allocated slot, and return the slot index in the <fdid> buffer. 430 * It can be called by any thread in any cluster, because it uses portableremote access434 * It can be called by any thread in any cluster, because it uses remote access 431 435 * primitives to access the reference process descriptor. 432 436 * It takes the lock protecting the reference fd_array against concurrent accesses. -
trunk/kernel/kern/rpc.c
r619 r623 2 2 * rpc.c - RPC operations implementation. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 58 58 &rpc_thread_user_create_server, // 6 59 59 &rpc_thread_kernel_create_server, // 7 60 &rpc_ undefined, // 8 unused slot60 &rpc_vfs_fs_update_dentry_server, // 8 61 61 &rpc_process_sigaction_server, // 9 62 62 … … 67 67 &rpc_vfs_file_create_server, // 14 68 68 &rpc_vfs_file_destroy_server, // 15 69 &rpc_vfs_fs_ get_dentry_server, // 1669 &rpc_vfs_fs_new_dentry_server, // 16 70 70 &rpc_vfs_fs_add_dentry_server, // 17 71 71 &rpc_vfs_fs_remove_dentry_server, // 18 … … 76 76 &rpc_kcm_alloc_server, // 22 77 77 &rpc_kcm_free_server, // 23 78 &rpc_ undefined, // 24 unused slot78 &rpc_mapper_sync_server, // 24 79 79 &rpc_mapper_handle_miss_server, // 25 80 80 &rpc_vmm_delete_vseg_server, // 26 … … 94 94 "THREAD_USER_CREATE", // 6 95 95 "THREAD_KERNEL_CREATE", // 7 96 " undefined",// 896 "VFS_FS_UPDATE_DENTRY", // 8 97 97 "PROCESS_SIGACTION", // 9 98 98 … … 112 112 "KCM_ALLOC", // 22 113 113 "KCM_FREE", // 23 114 " undefined",// 24114 "MAPPER_SYNC", // 24 115 115 "MAPPER_HANDLE_MISS", // 25 116 116 "VMM_DELETE_VSEG", // 26 … … 921 921 922 922 ///////////////////////////////////////////////////////////////////////////////////////// 923 // [7] Marshaling functions attached to RPC_THREAD_KERNEL_CREATE (blocking)923 // [7] Marshaling functions attached to RPC_THREAD_KERNEL_CREATE 924 924 ///////////////////////////////////////////////////////////////////////////////////////// 925 925 … … 1013 1013 1014 1014 ///////////////////////////////////////////////////////////////////////////////////////// 1015 // [8] undefined slot 1016 ///////////////////////////////////////////////////////////////////////////////////////// 1017 1015 // [8] Marshaling functions attached to RPC_VRS_FS_UPDATE_DENTRY 1016 ///////////////////////////////////////////////////////////////////////////////////////// 1017 1018 ///////////////////////////////////////////////////////// 1019 void rpc_vfs_fs_update_dentry_client( cxy_t cxy, 1020 vfs_inode_t * inode, 1021 vfs_dentry_t * dentry, 1022 uint32_t size, 1023 error_t * error ) 1024 { 1025 #if DEBUG_RPC_VFS_FS_UPDATE_DENTRY 1026 thread_t * this = CURRENT_THREAD; 1027 uint32_t cycle = (uint32_t)hal_get_cycles(); 1028 if( cycle > DEBUG_RPC_VFS_FS_UPDATE_DENTRY ) 1029 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1030 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1031 #endif 1032 1033 uint32_t responses = 1; 1034 1035 // initialise RPC descriptor header 1036 rpc_desc_t rpc; 1037 rpc.index = RPC_VFS_FS_UPDATE_DENTRY; 1038 rpc.blocking = true; 1039 rpc.rsp = &responses; 1040 1041 // set input arguments in RPC descriptor 1042 rpc.args[0] = (uint64_t)(intptr_t)inode; 1043 rpc.args[1] = (uint64_t)(intptr_t)dentry; 1044 rpc.args[2] = (uint64_t)size; 1045 1046 // register RPC request in remote RPC fifo 1047 rpc_send( cxy , &rpc ); 1048 1049 // get output values from RPC descriptor 1050 *error = (error_t)rpc.args[3]; 1051 1052 #if DEBUG_RPC_VFS_FS_UPDATE_DENTRY 1053 cycle = (uint32_t)hal_get_cycles(); 1054 if( cycle > DEBUG_RPC_VFS_FS_UPDATE_DENTRY ) 1055 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1056 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1057 #endif 1058 } 1059 1060 ///////////////////////////////////////////////// 1061 void rpc_vfs_fs_update_dentry_server( xptr_t xp ) 1062 { 1063 #if DEBUG_RPC_VFS_FS_UPDATE_DENTRY 1064 thread_t * this = CURRENT_THREAD; 1065 uint32_t cycle = (uint32_t)hal_get_cycles(); 1066 if( cycle > DEBUG_RPC_VFS_FS_UPDATE_DENTRY ) 1067 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1068 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1069 #endif 1070 1071 error_t error; 1072 vfs_inode_t * inode; 1073 vfs_dentry_t * dentry; 1074 uint32_t size; 1075 1076 // get client cluster identifier and pointer on RPC descriptor 1077 cxy_t client_cxy = GET_CXY( xp ); 1078 rpc_desc_t * desc = GET_PTR( xp ); 1079 1080 // get input arguments 1081 inode = (vfs_inode_t*)(intptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[0])); 1082 dentry = (vfs_dentry_t*)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[1])); 1083 size = (uint32_t) hal_remote_l64(XPTR(client_cxy , &desc->args[2])); 1084 1085 // call the kernel function 1086 error = vfs_fs_update_dentry( inode , dentry , size ); 1087 1088 // set output argument 1089 hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 1090 1091 #if DEBUG_RPC_VFS_FS_UPDATE_DENTRY 1092 cycle = (uint32_t)hal_get_cycles(); 1093 if( cycle > DEBUG_RPC_VFS_FS_UPDATE_DENTRY ) 1094 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1095 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1096 #endif 1097 } 1018 1098 1019 1099 ///////////////////////////////////////////////////////////////////////////////////////// … … 1110 1190 void rpc_vfs_inode_create_client( cxy_t cxy, 1111 1191 uint32_t fs_type, // in 1112 uint32_t inode_type, // in1113 1192 uint32_t attr, // in 1114 1193 uint32_t rights, // in … … 1136 1215 // set input arguments in RPC descriptor 1137 1216 rpc.args[0] = (uint64_t)fs_type; 1138 rpc.args[1] = (uint64_t)inode_type; 1139 rpc.args[2] = (uint64_t)attr; 1140 rpc.args[3] = (uint64_t)rights; 1141 rpc.args[4] = (uint64_t)uid; 1142 rpc.args[5] = (uint64_t)gid; 1217 rpc.args[1] = (uint64_t)attr; 1218 rpc.args[2] = (uint64_t)rights; 1219 rpc.args[3] = (uint64_t)uid; 1220 rpc.args[4] = (uint64_t)gid; 1143 1221 1144 1222 // register RPC request in remote RPC fifo … … 1146 1224 1147 1225 // get output values from RPC descriptor 1148 *inode_xp = (xptr_t)rpc.args[ 6];1149 *error = (error_t)rpc.args[ 7];1226 *inode_xp = (xptr_t)rpc.args[5]; 1227 *error = (error_t)rpc.args[6]; 1150 1228 1151 1229 #if DEBUG_RPC_VFS_INODE_CREATE … … 1169 1247 1170 1248 uint32_t fs_type; 1171 uint32_t inode_type;1172 1249 uint32_t attr; 1173 1250 uint32_t rights; … … 1183 1260 // get input arguments from client rpc descriptor 1184 1261 fs_type = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1185 inode_type = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 1186 attr = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) ); 1187 rights = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) ); 1188 uid = (uid_t) hal_remote_l64( XPTR( client_cxy , &desc->args[4] ) ); 1189 gid = (gid_t) hal_remote_l64( XPTR( client_cxy , &desc->args[5] ) ); 1262 attr = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 1263 rights = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) ); 1264 uid = (uid_t) hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) ); 1265 gid = (gid_t) hal_remote_l64( XPTR( client_cxy , &desc->args[4] ) ); 1190 1266 1191 1267 // call local kernel function 1192 1268 error = vfs_inode_create( fs_type, 1193 inode_type,1194 1269 attr, 1195 1270 rights, … … 1199 1274 1200 1275 // set output arguments 1201 hal_remote_s64( XPTR( client_cxy , &desc->args[ 6] ) , (uint64_t)inode_xp );1202 hal_remote_s64( XPTR( client_cxy , &desc->args[ 7] ) , (uint64_t)error );1276 hal_remote_s64( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)inode_xp ); 1277 hal_remote_s64( XPTR( client_cxy , &desc->args[6] ) , (uint64_t)error ); 1203 1278 1204 1279 #if DEBUG_RPC_VFS_INODE_CREATE … … 1601 1676 1602 1677 ///////////////////////////////////////////////////////// 1603 void rpc_vfs_fs_ get_dentry_client( cxy_t cxy,1678 void rpc_vfs_fs_new_dentry_client( cxy_t cxy, 1604 1679 vfs_inode_t * parent_inode, // in 1605 1680 char * name, // in … … 1643 1718 1644 1719 ////////////////////////////////////////////// 1645 void rpc_vfs_fs_ get_dentry_server( xptr_t xp )1720 void rpc_vfs_fs_new_dentry_server( xptr_t xp ) 1646 1721 { 1647 1722 #if DEBUG_RPC_VFS_FS_GET_DENTRY … … 1674 1749 1675 1750 // call the kernel function 1676 error = vfs_fs_ get_dentry( parent , name_copy , child_xp );1751 error = vfs_fs_new_dentry( parent , name_copy , child_xp ); 1677 1752 1678 1753 // set output argument … … 2245 2320 2246 2321 ///////////////////////////////////////////////////////////////////////////////////////// 2247 // [24] undefined slot 2248 ///////////////////////////////////////////////////////////////////////////////////////// 2322 // [25] Marshaling functions attached to RPC_MAPPER_SYNC 2323 ///////////////////////////////////////////////////////////////////////////////////////// 2324 2325 /////////////////////////////////////////////////// 2326 void rpc_mapper_sync_client( cxy_t cxy, 2327 struct mapper_s * mapper, 2328 error_t * error ) 2329 { 2330 #if DEBUG_RPC_MAPPER_SYNC 2331 thread_t * this = CURRENT_THREAD; 2332 uint32_t cycle = (uint32_t)hal_get_cycles(); 2333 if( cycle > DEBUG_RPC_MAPPER_SYNC ) 2334 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2335 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2336 #endif 2337 2338 uint32_t responses = 1; 2339 2340 // initialise RPC descriptor header 2341 rpc_desc_t rpc; 2342 rpc.index = RPC_MAPPER_SYNC; 2343 rpc.blocking = true; 2344 rpc.rsp = &responses; 2345 2346 // set input arguments in RPC descriptor 2347 rpc.args[0] = (uint64_t)(intptr_t)mapper; 2348 2349 // register RPC request in remote RPC fifo 2350 rpc_send( cxy , &rpc ); 2351 2352 // get output values from RPC descriptor 2353 *error = (error_t)rpc.args[1]; 2354 2355 #if DEBUG_RPC_MAPPER_SYNC 2356 cycle = (uint32_t)hal_get_cycles(); 2357 if( cycle > DEBUG_RPC_MAPPER_SYNC ) 2358 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2359 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2360 #endif 2361 } 2362 2363 //////////////////////////////////////// 2364 void rpc_mapper_sync_server( xptr_t xp ) 2365 { 2366 #if DEBUG_RPC_MAPPER_SYNC 2367 thread_t * this = CURRENT_THREAD; 2368 uint32_t cycle = (uint32_t)hal_get_cycles(); 2369 if( cycle > DEBUG_RPC_MAPPER_SYNC ) 2370 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2371 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2372 #endif 2373 2374 mapper_t * mapper; 2375 error_t error; 2376 2377 // get client cluster identifier and pointer on RPC descriptor 2378 cxy_t client_cxy = GET_CXY( xp ); 2379 rpc_desc_t * desc = GET_PTR( xp ); 2380 2381 // get arguments from client RPC descriptor 2382 mapper = (mapper_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 2383 2384 // call local kernel function 2385 error = mapper_sync( mapper ); 2386 2387 // set output argument to client RPC descriptor 2388 hal_remote_s64( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error ); 2389 2390 #if DEBUG_RPC_MAPPER_SYNC 2391 cycle = (uint32_t)hal_get_cycles(); 2392 if( cycle > DEBUG_RPC_MAPPER_SYNC ) 2393 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2394 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2395 #endif 2396 } 2249 2397 2250 2398 ///////////////////////////////////////////////////////////////////////////////////////// -
trunk/kernel/kern/rpc.h
r619 r623 68 68 RPC_THREAD_USER_CREATE = 6, 69 69 RPC_THREAD_KERNEL_CREATE = 7, 70 RPC_ UNDEFINED_8= 8,70 RPC_VFS_FS_UPDATE_DENTRY = 8, 71 71 RPC_PROCESS_SIGACTION = 9, 72 72 … … 86 86 RPC_KCM_ALLOC = 22, 87 87 RPC_KCM_FREE = 23, 88 RPC_ UNDEFINED_24= 24,88 RPC_MAPPER_SYNC = 24, 89 89 RPC_MAPPER_HANDLE_MISS = 25, 90 90 RPC_VMM_DELETE_VSEG = 26, … … 305 305 306 306 /*********************************************************************************** 307 * [8] undefined slot 308 **********************************************************************************/ 309 310 /*********************************************************************************** 311 * [9] The RPC_PROCESS_SIGACTION allows any client thread to request to any cluster 312 * execute a given sigaction, defined by the <action_type> for a given process, 307 * [8] The RPC_VFS_FS_UPDATE_DENTRY allows a client thread to request a remote 308 * cluster to update the <size> field of a directory entry in the mapper of a 309 * remote directory inode, identified by the <inode> local pointer. 310 * The target entry name is identified by the <dentry> local pointer. 311 *********************************************************************************** 312 * @ cxy : server cluster identifier. 313 * @ inode : [in] local pointer on remote directory inode. 314 * @ dentry : [in] local pointer on remote dentry. 315 * @ size : [in] new size value. 316 * @ error : [out] error status (0 if success). 317 **********************************************************************************/ 318 void rpc_vfs_fs_update_dentry_client( cxy_t cxy, 319 struct vfs_inode_s * inode, 320 struct vfs_dentry_s * dentry, 321 uint32_t size, 322 error_t * error ); 323 324 void rpc_vfs_fs_update_dentry_server( xptr_t xp ); 325 326 /*********************************************************************************** 327 * [9] The RPC_PROCESS_SIGACTION allows a client thread to request a remote cluster 328 * to execute a given sigaction, defined by the <action_type> for a given process, 313 329 * identified by the <pid> argument. 314 330 *********************************************************************************** … … 340 356 void rpc_vfs_inode_create_client( cxy_t cxy, 341 357 uint32_t fs_type, 342 uint32_t inode_type,343 358 uint32_t attr, 344 359 uint32_t rights, … … 423 438 424 439 /*********************************************************************************** 425 * [16] The RPC_VFS_FS_GET_DENTRY calls the vfs_fs_ get_dentry()440 * [16] The RPC_VFS_FS_GET_DENTRY calls the vfs_fs_new_dentry() 426 441 * function in a remote cluster containing a parent inode directory to scan the 427 442 * associated mapper, find a directory entry identified by its name, and update … … 434 449 * @ error : [out] error status (0 if success). 435 450 **********************************************************************************/ 436 void rpc_vfs_fs_ get_dentry_client( cxy_t cxy,451 void rpc_vfs_fs_new_dentry_client( cxy_t cxy, 437 452 struct vfs_inode_s * parent_inode, 438 453 char * name, … … 440 455 error_t * error ); 441 456 442 void rpc_vfs_fs_ get_dentry_server( xptr_t xp );457 void rpc_vfs_fs_new_dentry_server( xptr_t xp ); 443 458 444 459 /*********************************************************************************** … … 564 579 565 580 /*********************************************************************************** 566 * [24] undefined slot 567 **********************************************************************************/ 581 * [24] The RPC_MAPPER_SYNC allows a client thread to synchronize on disk 582 * all dirty pages of a remote mapper. 583 *********************************************************************************** 584 * @ cxy : server cluster identifier. 585 * @ mapper : [in] local pointer on mapper in server cluster. 586 * @ error : [out] error status (0 if success). 587 **********************************************************************************/ 588 void rpc_mapper_sync_client( cxy_t cxy, 589 struct mapper_s * mapper, 590 error_t * error ); 591 592 void rpc_mapper_sync_server( xptr_t xp ); 568 593 569 594 /*********************************************************************************** -
trunk/kernel/kern/thread.c
r620 r623 1382 1382 const char * string ) 1383 1383 { 1384 1384 1385 cxy_t thread_cxy = GET_CXY( thread_xp ); 1385 1386 thread_t * thread_ptr = GET_PTR( thread_xp ); 1386 1387 1387 #if ( DEBUG_BUSYLOCK )1388 1389 xptr_t iter_xp;1390 1391 // get relevant info from target t rhead descriptor1388 #if DEBUG_BUSYLOCK 1389 1390 xptr_t iter_xp; 1391 1392 // get relevant info from target thread descriptor 1392 1393 uint32_t locks = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->busylocks ) ); 1393 1394 trdid_t trdid = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) ); … … 1429 1430 remote_busylock_release( txt0_lock_xp ); 1430 1431 1432 #else 1433 1434 printk("\n[ERROR] in %s : set DEBUG_BUSYLOCK in kernel_config.h for %s / thread(%x,%x)\n", 1435 __FUNCTION__, string, thread_cxy, thread_ptr ); 1436 1437 #endif 1438 1431 1439 return; 1432 1440 1433 #endif1434 1435 // display a warning1436 printk("\n[WARNING] set DEBUG_BUSYLOCK in kernel_config.h to display busylocks" );1437 1438 1441 } // end thread_display_busylock() 1439 1442 -
trunk/kernel/kernel_config.h
r620 r623 81 81 #define DEBUG_FATFS_FREE_CLUSTERS 0 82 82 #define DEBUG_FATFS_GET_CLUSTER 0 83 #define DEBUG_FATFS_GET_DENTRY 084 83 #define DEBUG_FATFS_GET_USER_DIR 0 85 84 #define DEBUG_FATFS_MOVE_PAGE 0 86 #define DEBUG_FATFS_RELEASE_INODE 0 85 #define DEBUG_FATFS_NEW_DENTRY 0 86 #define DEBUG_FATFS_RELEASE_INODE 1 87 87 #define DEBUG_FATFS_REMOVE_DENTRY 0 88 88 #define DEBUG_FATFS_SYNC_FAT 0 89 89 #define DEBUG_FATFS_SYNC_FSINFO 0 90 90 #define DEBUG_FATFS_SYNC_INODE 0 91 #define DEBUG_FATFS_UPDATE_DENTRY 0 91 92 92 93 #define DEBUG_HAL_GPT_SET_PTE 0 … … 112 113 #define DEBUG_MAPPER_MOVE_USER 0 113 114 #define DEBUG_MAPPER_MOVE_KERNEL 0 115 #define DEBUG_MAPPER_SYNC 0 114 116 115 117 #define DEBUG_MUTEX 0 … … 130 132 #define DEBUG_PROCESS_ZERO_CREATE 0 131 133 132 #define DEBUG_QUEUELOCK_TYPE 0 // lock type (0 is undefined)134 #define DEBUG_QUEUELOCK_TYPE 0 // lock type (0 : undefined / 1000 : all types) 133 135 134 136 #define DEBUG_RPC_CLIENT_GENERIC 0 … … 157 159 #define DEBUG_RPC_VMM_DELETE_VSEG 0 158 160 159 #define DEBUG_RWLOCK_TYPE 0 // lock type (0 is undefined)161 #define DEBUG_RWLOCK_TYPE 0 // lock type (0 : undefined / 1000 : all types) 160 162 161 163 #define DEBUG_SCHED_HANDLE_SIGNALS 2 … … 234 236 #define DEBUG_VFS_OPENDIR 0 235 237 #define DEBUG_VFS_STAT 0 236 #define DEBUG_VFS_UNLINK 0238 #define DEBUG_VFS_UNLINK 1 237 239 238 240 #define DEBUG_VMM_CREATE_VSEG 0 … … 247 249 #define DEBUG_VMM_MMAP_ALLOC 0 248 250 #define DEBUG_VMM_PAGE_ALLOCATE 0 251 #define DEBUG_VMM_RESIZE_VSEG 0 249 252 #define DEBUG_VMM_SET_COW 0 250 253 #define DEBUG_VMM_UPDATE_PTE 0 -
trunk/kernel/libk/busylock.h
r563 r623 34 34 * a shared object located in a given cluster, made by thread(s) running in same cluster. 35 35 * It uses a busy waiting policy when the lock is taken by another thread, and should 36 * be used to execute very short actions, such as basic allocators, or to protect37 * higher level synchronisation objects, such as queuelock or rwlock.38 * WARNING: a thread cannot yield when it is owning a busylock (local or remote).36 * be used to execute very short actions, such as accessing basic allocators, or higher 37 * level synchronisation objects (barriers, queuelocks, or rwlocks). 38 * WARNING: a thread cannot yield when it is owning a busylock. 39 39 * 40 40 * - To acquire the lock, we use a ticket policy to avoid starvation: the calling thread -
trunk/kernel/libk/grdxt.h
r610 r623 132 132 * @ start_key : key starting value for the scan. 133 133 * @ found_key : [out] buffer for found key value. 134 * return pointer on first valid item if found / return NULL if not found.134 * @ return pointer on first valid item if found / return NULL if not found. 135 135 ******************************************************************************************/ 136 136 void * grdxt_get_first( grdxt_t * rt, -
trunk/kernel/libk/queuelock.c
r610 r623 66 66 busylock_acquire( &lock->lock ); 67 67 68 #if DEBUG_QUEUELOCK_TYPE 69 uint32_t lock_type = lock->lock.type; 70 #endif 71 68 72 // block and deschedule if lock already taken 69 73 while( lock->taken ) … … 71 75 72 76 #if DEBUG_QUEUELOCK_TYPE 73 uint32_t lock_type = lock->lock.type; 74 if( DEBUG_QUEUELOCK_TYPE == lock_type ) 77 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) ) 75 78 printk("\n[%s ] thread[%x,%x] BLOCK on q_lock %s [%x,%x]\n", 76 79 __FUNCTION__, this->process->pid, this->trdid, … … 97 100 98 101 #if DEBUG_QUEUELOCK_TYPE 99 if( DEBUG_QUEUELOCK_TYPE == lock_type)102 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) ) 100 103 printk("\n[%s] thread[%x,%x] ACQUIRE q_lock %s [%x,%x]\n", 101 104 __FUNCTION__, this->process->pid, this->trdid, … … 123 126 uint32_t lock_type = lock->lock.type; 124 127 thread_t * this = CURRENT_THREAD; 125 if( DEBUG_QUEUELOCK_TYPE == lock_type)128 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) ) 126 129 printk("\n[%s] thread[%x,%x] RELEASE q_lock %s [%x,%x]\n", 127 130 __FUNCTION__, this->process->pid, this->trdid, … … 139 142 140 143 #if DEBUG_QUEUELOCK_TYPE 141 if( DEBUG_QUEUELOCK_TYPE == lock_type)144 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) ) 142 145 printk("\n[%s] thread[%x,%x] UNBLOCK thread [%x,%x] / q_lock %s [%x,%x]\n", 143 146 __FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid, -
trunk/kernel/libk/remote_barrier.c
r619 r623 245 245 } // end generic_barrier_wait() 246 246 247 247 ///////////////////////////////////////////////////// 248 void generic_barrier_display( xptr_t gen_barrier_xp ) 249 { 250 // get cluster and local pointer 251 generic_barrier_t * gen_barrier_ptr = GET_PTR( gen_barrier_xp ); 252 cxy_t gen_barrier_cxy = GET_CXY( gen_barrier_xp ); 253 254 // get barrier type and extend pointer 255 bool_t is_dqt = hal_remote_l32( XPTR( gen_barrier_cxy , &gen_barrier_ptr->is_dqt ) ); 256 void * extend = hal_remote_lpt( XPTR( gen_barrier_cxy , &gen_barrier_ptr->extend ) ); 257 258 // buil extended pointer on the implementation specific barrier descriptor 259 xptr_t barrier_xp = XPTR( gen_barrier_cxy , extend ); 260 261 // display barrier state 262 if( is_dqt ) dqt_barrier_display( barrier_xp ); 263 else simple_barrier_display( barrier_xp ); 264 } 248 265 249 266 … … 454 471 455 472 } // end simple_barrier_wait() 473 474 ///////////////////////////////////////////////// 475 void simple_barrier_display( xptr_t barrier_xp ) 476 { 477 // get cluster and local pointer on simple barrier 478 simple_barrier_t * barrier_ptr = GET_PTR( barrier_xp ); 479 cxy_t barrier_cxy = GET_CXY( barrier_xp ); 480 481 // get barrier global parameters 482 uint32_t current = hal_remote_l32( XPTR( barrier_cxy , &barrier_ptr->current ) ); 483 uint32_t arity = hal_remote_l32( XPTR( barrier_cxy , &barrier_ptr->arity ) ); 484 485 printk("\n***** simple barrier : %d arrived threads on %d *****\n", 486 current, arity ); 487 488 } // end simple_barrier_display() 489 490 456 491 457 492 … … 493 528 494 529 // check x_size and y_size arguments 495 assert( (z <= 16) , "DQT dqthlarger than (16*16)\n");530 assert( (z <= 16) , "DQT mesh size larger than (16*16)\n"); 496 531 497 532 // check RPC descriptor size … … 973 1008 } // end dqt_barrier_wait() 974 1009 975 976 //////////////////////////////////////////////////////////////////////////////////////////// 977 // DQT static functions 978 //////////////////////////////////////////////////////////////////////////////////////////// 979 980 981 ////////////////////////////////////////////////////////////////////////////////////////// 982 // This recursive function decrements the distributed "count" variables, 983 // traversing the DQT from bottom to root. 984 // The last arrived thread reset the local node before returning. 985 ////////////////////////////////////////////////////////////////////////////////////////// 986 static void dqt_barrier_increment( xptr_t node_xp ) 987 { 988 uint32_t expected; 989 uint32_t sense; 990 uint32_t arity; 991 992 thread_t * this = CURRENT_THREAD; 993 994 // get node cluster and local pointer 995 dqt_node_t * node_ptr = GET_PTR( node_xp ); 996 cxy_t node_cxy = GET_CXY( node_xp ); 997 998 // build relevant extended pointers 999 xptr_t arity_xp = XPTR( node_cxy , &node_ptr->arity ); 1000 xptr_t sense_xp = XPTR( node_cxy , &node_ptr->sense ); 1001 xptr_t current_xp = XPTR( node_cxy , &node_ptr->current ); 1002 xptr_t lock_xp = XPTR( node_cxy , &node_ptr->lock ); 1003 xptr_t root_xp = XPTR( node_cxy , &node_ptr->root ); 1004 1005 #if DEBUG_BARRIER_WAIT 1006 uint32_t cycle = (uint32_t)hal_get_cycles(); 1007 uint32_t level = hal_remote_l32( XPTR( node_cxy, &node_ptr->level ) ); 1008 if( cycle > DEBUG_BARRIER_WAIT ) 1009 printk("\n[%s] thread[%x,%x] increments DQT node(%d,%d,%d) / cycle %d\n", 1010 __FUNCTION__ , this->process->pid, this->trdid, 1011 HAL_X_FROM_CXY(node_cxy), HAL_Y_FROM_CXY(node_cxy), level ); 1012 #endif 1013 1014 // get extended pointer on parent node 1015 xptr_t parent_xp = hal_remote_l64( XPTR( node_cxy , &node_ptr->parent_xp ) ); 1016 1017 // take busylock 1018 remote_busylock_acquire( lock_xp ); 1019 1020 // get sense and arity values from barrier descriptor 1021 sense = hal_remote_l32( sense_xp ); 1022 arity = hal_remote_l32( arity_xp ); 1023 1024 // compute expected value 1025 expected = (sense == 0) ? 1 : 0; 1026 1027 // increment current number of arrived threads / get value before increment 1028 uint32_t current = hal_remote_atomic_add( current_xp , 1 ); 1029 1030 // last arrived thread reset the local node, makes the recursive call 1031 // on parent node, and reactivates all waiting thread when returning. 1032 // other threads block, register in queue, and deschedule. 1033 1034 if ( current == (arity - 1) ) // last thread 1035 { 1036 1037 #if DEBUG_BARRIER_WAIT 1038 if( cycle > DEBUG_BARRIER_WAIT ) 1039 printk("\n[%s] thread[%x,%x] reset DQT node(%d,%d,%d)\n", 1040 __FUNCTION__ , this->process->pid, this->trdid, 1041 HAL_X_FROM_CXY(node_cxy), HAL_Y_FROM_CXY(node_cxy), level ); 1042 #endif 1043 // reset the current node 1044 hal_remote_s32( sense_xp , expected ); 1045 hal_remote_s32( current_xp , 0 ); 1046 1047 // release busylock protecting the current node 1048 remote_busylock_release( lock_xp ); 1049 1050 // recursive call on parent node when current node is not the root 1051 if( parent_xp != XPTR_NULL) dqt_barrier_increment( parent_xp ); 1052 1053 // unblock all waiting threads on this node 1054 while( xlist_is_empty( root_xp ) == false ) 1055 { 1056 // get pointers on first waiting thread 1057 xptr_t thread_xp = XLIST_FIRST( root_xp , thread_t , wait_list ); 1058 cxy_t thread_cxy = GET_CXY( thread_xp ); 1059 thread_t * thread_ptr = GET_PTR( thread_xp ); 1060 1061 #if (DEBUG_BARRIER_WAIT & 1) 1062 trdid_t trdid = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) ); 1063 process_t * process = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) ); 1064 pid_t pid = hal_remote_l32( XPTR( thread_cxy , &process->pid ) ); 1065 if( cycle > DEBUG_BARRIER_WAIT ) 1066 printk("\n[%s] thread[%x,%x] unblock thread[%x,%x]\n", 1067 __FUNCTION__, this->process->pid, this->trdid, pid, trdid ); 1068 #endif 1069 // remove waiting thread from queue 1070 xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_list ) ); 1071 1072 // unblock waiting thread 1073 thread_unblock( thread_xp , THREAD_BLOCKED_USERSYNC ); 1074 } 1075 } 1076 else // not the last thread 1077 { 1078 // get extended pointer on xlist entry from thread 1079 xptr_t entry_xp = XPTR( local_cxy , &this->wait_list ); 1080 1081 // register calling thread in barrier waiting queue 1082 xlist_add_last( root_xp , entry_xp ); 1083 1084 // block calling thread 1085 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_USERSYNC ); 1086 1087 // release busylock protecting the remote_barrier 1088 remote_busylock_release( lock_xp ); 1089 1090 #if DEBUG_BARRIER_WAIT 1091 if( cycle > DEBUG_BARRIER_WAIT ) 1092 printk("\n[%s] thread[%x,%x] blocks on node(%d,%d,%d)\n", 1093 __FUNCTION__ , this->process->pid, this->trdid, 1094 HAL_X_FROM_CXY(node_cxy), HAL_Y_FROM_CXY(node_cxy), level ); 1095 #endif 1096 // deschedule 1097 sched_yield("blocked on barrier"); 1098 } 1099 1100 return; 1101 1102 } // end dqt_barrier_decrement() 1103 1104 #if DEBUG_BARRIER_CREATE 1105 1106 //////////////////////////////////////////////////////////////////////////////////////////// 1107 // This debug function displays all DQT nodes in all clusters. 1108 //////////////////////////////////////////////////////////////////////////////////////////// 1109 // @ barrier_xp : extended pointer on DQT barrier descriptor. 1110 //////////////////////////////////////////////////////////////////////////////////////////// 1111 static void dqt_barrier_display( xptr_t barrier_xp ) 1010 ////////////////////////////////////////////// 1011 void dqt_barrier_display( xptr_t barrier_xp ) 1112 1012 { 1113 1013 // get cluster and local pointer on DQT barrier … … 1147 1047 uint32_t level = hal_remote_l32( XPTR( node_cxy , &node_ptr->level )); 1148 1048 uint32_t arity = hal_remote_l32( XPTR( node_cxy , &node_ptr->arity )); 1049 uint32_t count = hal_remote_l32( XPTR( node_cxy , &node_ptr->current )); 1149 1050 xptr_t pa_xp = hal_remote_l32( XPTR( node_cxy , &node_ptr->parent_xp )); 1150 1051 xptr_t c0_xp = hal_remote_l32( XPTR( node_cxy , &node_ptr->child_xp[0] )); … … 1153 1054 xptr_t c3_xp = hal_remote_l32( XPTR( node_cxy , &node_ptr->child_xp[3] )); 1154 1055 1155 printk(" . level %d : (%x,%x) / arity%d / P(%x,%x) / C0(%x,%x)"1056 printk(" . level %d : (%x,%x) / %d on %d / P(%x,%x) / C0(%x,%x)" 1156 1057 " C1(%x,%x) / C2(%x,%x) / C3(%x,%x)\n", 1157 level, node_cxy, node_ptr, arity,1058 level, node_cxy, node_ptr, count, arity, 1158 1059 GET_CXY(pa_xp), GET_PTR(pa_xp), 1159 1060 GET_CXY(c0_xp), GET_PTR(c0_xp), … … 1167 1068 } // end dqt_barrier_display() 1168 1069 1169 #endif 1070 1071 ////////////////////////////////////////////////////////////////////////////////////////// 1072 // This static (recursive) function is called by the dqt_barrier_wait() function. 1073 // It traverses the DQT from bottom to root, and decrements the "current" variables. 1074 // For each traversed node, it blocks and deschedules if it is not the last expected 1075 // thread. The last arrived thread reset the local node before returning. 1076 ////////////////////////////////////////////////////////////////////////////////////////// 1077 static void dqt_barrier_increment( xptr_t node_xp ) 1078 { 1079 uint32_t expected; 1080 uint32_t sense; 1081 uint32_t arity; 1082 1083 thread_t * this = CURRENT_THREAD; 1084 1085 // get node cluster and local pointer 1086 dqt_node_t * node_ptr = GET_PTR( node_xp ); 1087 cxy_t node_cxy = GET_CXY( node_xp ); 1088 1089 // build relevant extended pointers 1090 xptr_t arity_xp = XPTR( node_cxy , &node_ptr->arity ); 1091 xptr_t sense_xp = XPTR( node_cxy , &node_ptr->sense ); 1092 xptr_t current_xp = XPTR( node_cxy , &node_ptr->current ); 1093 xptr_t lock_xp = XPTR( node_cxy , &node_ptr->lock ); 1094 xptr_t root_xp = XPTR( node_cxy , &node_ptr->root ); 1095 1096 #if DEBUG_BARRIER_WAIT 1097 uint32_t cycle = (uint32_t)hal_get_cycles(); 1098 uint32_t level = hal_remote_l32( XPTR( node_cxy, &node_ptr->level ) ); 1099 if( cycle > DEBUG_BARRIER_WAIT ) 1100 printk("\n[%s] thread[%x,%x] increments DQT node(%d,%d,%d) / cycle %d\n", 1101 __FUNCTION__ , this->process->pid, this->trdid, 1102 HAL_X_FROM_CXY(node_cxy), HAL_Y_FROM_CXY(node_cxy), level ); 1103 #endif 1104 1105 // get extended pointer on parent node 1106 xptr_t parent_xp = hal_remote_l64( XPTR( node_cxy , &node_ptr->parent_xp ) ); 1107 1108 // take busylock 1109 remote_busylock_acquire( lock_xp ); 1110 1111 // get sense and arity values from barrier descriptor 1112 sense = hal_remote_l32( sense_xp ); 1113 arity = hal_remote_l32( arity_xp ); 1114 1115 // compute expected value 1116 expected = (sense == 0) ? 1 : 0; 1117 1118 // increment current number of arrived threads / get value before increment 1119 uint32_t current = hal_remote_atomic_add( current_xp , 1 ); 1120 1121 // last arrived thread reset the local node, makes the recursive call 1122 // on parent node, and reactivates all waiting thread when returning. 1123 // other threads block, register in queue, and deschedule. 1124 1125 if ( current == (arity - 1) ) // last thread 1126 { 1127 1128 #if DEBUG_BARRIER_WAIT 1129 if( cycle > DEBUG_BARRIER_WAIT ) 1130 printk("\n[%s] thread[%x,%x] reset DQT node(%d,%d,%d)\n", 1131 __FUNCTION__ , this->process->pid, this->trdid, 1132 HAL_X_FROM_CXY(node_cxy), HAL_Y_FROM_CXY(node_cxy), level ); 1133 #endif 1134 // reset the current node 1135 hal_remote_s32( sense_xp , expected ); 1136 hal_remote_s32( current_xp , 0 ); 1137 1138 // release busylock protecting the current node 1139 remote_busylock_release( lock_xp ); 1140 1141 // recursive call on parent node when current node is not the root 1142 if( parent_xp != XPTR_NULL) dqt_barrier_increment( parent_xp ); 1143 1144 // unblock all waiting threads on this node 1145 while( xlist_is_empty( root_xp ) == false ) 1146 { 1147 // get pointers on first waiting thread 1148 xptr_t thread_xp = XLIST_FIRST( root_xp , thread_t , wait_list ); 1149 cxy_t thread_cxy = GET_CXY( thread_xp ); 1150 thread_t * thread_ptr = GET_PTR( thread_xp ); 1151 1152 #if (DEBUG_BARRIER_WAIT & 1) 1153 trdid_t trdid = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) ); 1154 process_t * process = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) ); 1155 pid_t pid = hal_remote_l32( XPTR( thread_cxy , &process->pid ) ); 1156 if( cycle > DEBUG_BARRIER_WAIT ) 1157 printk("\n[%s] thread[%x,%x] unblock thread[%x,%x]\n", 1158 __FUNCTION__, this->process->pid, this->trdid, pid, trdid ); 1159 #endif 1160 // remove waiting thread from queue 1161 xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_list ) ); 1162 1163 // unblock waiting thread 1164 thread_unblock( thread_xp , THREAD_BLOCKED_USERSYNC ); 1165 } 1166 } 1167 else // not the last thread 1168 { 1169 // get extended pointer on xlist entry from thread 1170 xptr_t entry_xp = XPTR( local_cxy , &this->wait_list ); 1171 1172 // register calling thread in barrier waiting queue 1173 xlist_add_last( root_xp , entry_xp ); 1174 1175 // block calling thread 1176 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_USERSYNC ); 1177 1178 // release busylock protecting the remote_barrier 1179 remote_busylock_release( lock_xp ); 1180 1181 #if DEBUG_BARRIER_WAIT 1182 if( cycle > DEBUG_BARRIER_WAIT ) 1183 printk("\n[%s] thread[%x,%x] blocks on node(%d,%d,%d)\n", 1184 __FUNCTION__ , this->process->pid, this->trdid, 1185 HAL_X_FROM_CXY(node_cxy), HAL_Y_FROM_CXY(node_cxy), level ); 1186 #endif 1187 // deschedule 1188 sched_yield("blocked on barrier"); 1189 } 1190 1191 return; 1192 1193 } // end dqt_barrier_decrement() 1194 1195 -
trunk/kernel/libk/remote_barrier.h
r619 r623 42 42 * used by the kernel. ALMOS-MKH uses only the barrier virtual address as an identifier. 43 43 * For each user barrier, ALMOS-MKH creates a kernel structure, dynamically allocated 44 * by the "generic_barrier_create()" function, destroyed by the "remote_barrier_destroy()"45 * function, and used by the "generic_barrier_wait()"function.44 * by the generic_barrier_create() function, destroyed by the generic_barrier_destroy() 45 * function, and used by the generic_barrier_wait() function. 46 46 * 47 47 * Implementation note: … … 58 58 * (x_size * ysize) mesh, including cluster (0,0), with nthreads per cluster, and called 59 59 * DQT : Distributed Quad Tree. This DQT implementation supposes a regular architecture, 60 uint32_t arity = hal_remote_l32( XPTR( node_cxy , &node_ptr->arity )); 60 61 * and a strong contraint on the threads placement: exactly "nthreads" threads per 61 62 * cluster in the (x_size * y_size) mesh. … … 141 142 142 143 143 144 /***************************************************************************************** 145 * This debug function uses remote accesses to display the current state of a generic 146 * barrier identified by the <gen_barrier_xp> argument. 147 * It calls the relevant function (simple or DQT) to display relevant information. 148 * It can be called by a thread running in any cluster. 149 ***************************************************************************************** 150 * @ barrier_xp : extended pointer on generic barrier descriptor. 151 ****************************************************************************************/ 152 153 void generic_barrier_display( xptr_t gen_barrier_xp ); 144 154 145 155 … … 192 202 void simple_barrier_wait( xptr_t barrier_xp ); 193 203 204 /***************************************************************************************** 205 * This debug function uses remote accesses to display the current state of a simple 206 * barrier identified by the <barrier_xp> argument. 207 * It can be called by a thread running in any cluster. 208 ***************************************************************************************** 209 * @ barrier_xp : extended pointer on simple barrier descriptor. 210 ****************************************************************************************/ 211 void simple_barrier_display( xptr_t barrier_xp ); 194 212 195 213 … … 281 299 void dqt_barrier_wait( xptr_t barrier_xp ); 282 300 283 301 /***************************************************************************************** 302 * This debug function uses remote accesses to display the current state of all 303 * ditributed nodes in a DQT barrier identified by the <barrier_xp> argument. 304 * It can be called by a thread running in any cluster. 305 ***************************************************************************************** 306 * @ barrier_xp : extended pointer on DQT barrier descriptor. 307 ****************************************************************************************/ 308 void dqt_barrier_display( xptr_t barrier_xp ); 284 309 285 310 #endif /* _REMOTE_BARRIER_H_ */ -
trunk/kernel/libk/remote_queuelock.c
r610 r623 91 91 92 92 #if DEBUG_QUEUELOCK_TYPE 93 if( DEBUG_QUEUELOCK_TYPE == lock_type)93 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) ) 94 94 printk("\n[%s] thread[%x,%x] BLOCK on q_lock %s [%x,%x]\n", 95 95 __FUNCTION__, this->process->pid, this->trdid, … … 117 117 118 118 #if DEBUG_QUEUELOCK_TYPE 119 if( DEBUG_QUEUELOCK_TYPE == lock_type)119 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) ) 120 120 printk("\n[%s] thread[%x,%x] ACQUIRE q_lock %s [%x,%x]\n", 121 121 __FUNCTION__, this->process->pid, this->trdid, … … 152 152 thread_t * this = CURRENT_THREAD; 153 153 uint32_t lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) ); 154 if( DEBUG_QUEUELOCK_TYPE == lock_type)154 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) ) 155 155 printk("\n[%s] thread[%x,%x] RELEASE q_lock %s (%x,%x)\n", 156 156 __FUNCTION__, this->process->pid, this->trdid, … … 171 171 172 172 #if DEBUG_QUEUELOCK_TYPE 173 if( DEBUG_QUEUELOCK_TYPE == lock_type)173 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) ) 174 174 { 175 175 trdid_t trdid = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) ); -
trunk/kernel/libk/remote_rwlock.c
r610 r623 55 55 #if DEBUG_RWLOCK_TYPE 56 56 thread_t * this = CURRENT_THREAD; 57 if( type == DEBUG_RWLOCK_TYPE)57 if( DEBUG_RWLOCK_TYPE == type ) 58 58 printk("\n[%s] thread[%x,%x] initialise lock %s [%x,%x]\n", 59 59 __FUNCTION__, this->process->pid, this->trdid, … … 93 93 94 94 #if DEBUG_RWLOCK_TYPE 95 if( lock_type == DEBUG_RWLOCK_TYPE)95 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 96 96 printk("\n[%s] thread[%x,%x] READ BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n", 97 97 __FUNCTION__, this->process->pid, this->trdid, … … 124 124 125 125 #if DEBUG_RWLOCK_TYPE 126 if( lock_type == DEBUG_RWLOCK_TYPE)126 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 127 127 printk("\n[%s] thread[%x,%x] READ ACQUIRE rwlock %s [%x,%x] / taken = %d / count = %d\n", 128 128 __FUNCTION__, this->process->pid, this->trdid, … … 166 166 167 167 #if DEBUG_RWLOCK_TYPE 168 if( lock_type == DEBUG_RWLOCK_TYPE)168 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 169 169 printk("\n[%s] thread[%x,%x] WRITE BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n", 170 170 __FUNCTION__, this->process->pid, this->trdid, … … 196 196 197 197 #if DEBUG_RWLOCK_TYPE 198 if( lock_type == DEBUG_RWLOCK_TYPE)198 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 199 199 printk("\n[%s] thread[%x,%x] WRITE ACQUIRE rwlock %s [%x,%x] / taken %d / count %d\n", 200 200 __FUNCTION__, this->process->pid, this->trdid, … … 235 235 uint32_t lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) ); 236 236 xptr_t taken_xp = XPTR( lock_cxy , &lock_ptr->taken ); 237 if( lock_type == DEBUG_RWLOCK_TYPE)237 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 238 238 printk("\n[%s] thread[%x,%x] READ RELEASE rwlock %s [%x,%x] / taken %d / count %d\n", 239 239 __FUNCTION__, this->process->pid, this->trdid, … … 258 258 259 259 #if DEBUG_RWLOCK_TYPE 260 if( lock_type == DEBUG_RWLOCK_TYPE)260 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 261 261 { 262 262 trdid_t trdid = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) ); … … 289 289 290 290 #if DEBUG_RWLOCK_TYPE 291 if( lock_type == DEBUG_RWLOCK_TYPE)291 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 292 292 { 293 293 trdid_t trdid = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) ); … … 334 334 uint32_t lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) ); 335 335 xptr_t count_xp = XPTR( lock_cxy , &lock_ptr->count ); 336 if( lock_type == DEBUG_RWLOCK_TYPE)336 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 337 337 printk("\n[%s] thread[%x,%x] WRITE RELEASE rwlock %s [%x,%x] / taken %d / count %d\n", 338 338 __FUNCTION__, this->process->pid, this->trdid, … … 356 356 357 357 #if DEBUG_RWLOCK_TYPE 358 if( lock_type == DEBUG_RWLOCK_TYPE)358 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 359 359 { 360 360 trdid_t trdid = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) ); … … 386 386 387 387 #if DEBUG_RWLOCK_TYPE 388 if( lock_type == DEBUG_RWLOCK_TYPE)388 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 389 389 { 390 390 trdid_t trdid = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) ); -
trunk/kernel/libk/rwlock.c
r610 r623 71 71 busylock_acquire( &lock->lock ); 72 72 73 #if DEBUG_RWLOCK_TYPE 74 uint32_t lock_type = lock->lock.type; 75 #endif 76 73 77 // block and deschedule if lock already taken 74 78 while( lock->taken ) … … 76 80 77 81 #if DEBUG_RWLOCK_TYPE 78 uint32_t lock_type = lock->lock.type; 79 if( DEBUG_RWLOCK_TYPE == lock_type ) 82 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 80 83 printk("\n[%s] thread[%x,%x] READ BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n", 81 84 __FUNCTION__, this->process->pid, this->trdid, … … 102 105 103 106 #if DEBUG_RWLOCK_TYPE 104 if( DEBUG_RWLOCK_TYPE == lock_type)107 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 105 108 printk("\n[%s] thread[%x,%x] READ ACQUIRE rwlock %s [%x,%x] / taken %d / count %d\n", 106 109 __FUNCTION__, this->process->pid, this->trdid, … … 124 127 busylock_acquire( &lock->lock ); 125 128 129 #if DEBUG_RWLOCK_TYPE 130 uint32_t lock_type = lock->lock.type; 131 #endif 132 126 133 // block and deschedule if lock already taken or existing read access 127 134 while( lock->taken || lock->count ) … … 129 136 130 137 #if DEBUG_RWLOCK_TYPE 131 uint32_t lock_type = lock->lock.type; 132 if( DEBUG_RWLOCK_TYPE == lock_type ) 138 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 133 139 printk("\n[%s] thread[%x,%x] WRITE BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n", 134 140 __FUNCTION__, this->process->pid, this->trdid, … … 155 161 156 162 #if DEBUG_RWLOCK_TYPE 157 if( DEBUG_RWLOCK_TYPE == lock_type)163 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 158 164 printk("\n[%s] thread[%x,%x] WRITE ACQUIRE rwlock %s [%x,%x] / taken %d / count %d\n", 159 165 __FUNCTION__, this->process->pid, this->trdid, … … 181 187 thread_t * this = CURRENT_THREAD; 182 188 uint32_t lock_type = lock->lock.type; 183 if( DEBUG_RWLOCK_TYPE == lock_type)189 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 184 190 printk("\n[%s] thread[%x,%x] READ RELEASE rwlock %s [%x,%x] / taken %d / count %d\n", 185 191 __FUNCTION__, this->process->pid, this->trdid, … … 195 201 196 202 #if DEBUG_RWLOCK_TYPE 197 if( DEBUG_RWLOCK_TYPE == lock_type)203 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 198 204 printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n", 199 205 __FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid, … … 217 223 218 224 #if DEBUG_RWLOCK_TYPE 219 if( DEBUG_RWLOCK_TYPE == lock_type)225 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 220 226 printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n", 221 227 __FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid, … … 251 257 thread_t * this = CURRENT_THREAD; 252 258 uint32_t lock_type = lock->lock.type; 253 if( DEBUG_RWLOCK_TYPE == lock_type)259 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 254 260 printk("\n[%s] thread[%x,%x] WRITE RELEASE rwlock %s [%x,%x] / taken %d / count %d\n", 255 261 __FUNCTION__, this->process->pid, this->trdid, … … 264 270 265 271 #if DEBUG_RWLOCK_TYPE 266 if( DEBUG_RWLOCK_TYPE == lock_type)272 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 267 273 printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n", 268 274 __FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid, … … 285 291 286 292 #if DEBUG_RWLOCK_TYPE 287 if( DEBUG_RWLOCK_TYPE == lock_type)293 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 288 294 printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n", 289 295 __FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid, -
trunk/kernel/libk/user_dir.h
r614 r623 86 86 * - the allocation of one or several physical pages in reference cluster to store 87 87 * all directory entries in an array of 64 bytes dirent structures, 88 * - the initialisation of this array from informations found in the Inode Tree.88 * - the initialisation of this array from informations found in the directory mapper. 89 89 * - the creation of an ANON vseg containing this dirent array in reference process VMM, 90 90 * and the mapping of the relevant physical pages in this vseg. -
trunk/kernel/mm/mapper.c
r614 r623 3 3 * 4 4 * Authors Mohamed Lamine Karaoui (2015) 5 * Alain Greiner (2016,2017,2018 )5 * Alain Greiner (2016,2017,2018,2019) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 261 261 vfs_inode_t * inode = mapper->inode; 262 262 vfs_inode_get_name( XPTR( local_cxy , inode ) , name ); 263 // if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 264 // if( (page_id == 1) && (cycle > 10000000) ) 263 if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 265 264 printk("\n[%s] enter for page %d in <%s> / cycle %d", 266 265 __FUNCTION__, page_id, name, cycle ); … … 322 321 #if DEBUG_MAPPER_HANDLE_MISS 323 322 cycle = (uint32_t)hal_get_cycles(); 324 // if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 325 // if( (page_id == 1) && (cycle > 10000000) ) 323 if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 326 324 printk("\n[%s] exit for page %d in <%s> / ppn %x / cycle %d", 327 325 __FUNCTION__, page_id, name, ppm_page2ppn( *page_xp ), cycle ); … … 442 440 ppm_page_do_dirty( page_xp ); 443 441 hal_copy_from_uspace( map_ptr , buf_ptr , page_count ); 442 443 putb(" in mapper_move_user()" , map_ptr , page_count ); 444 444 445 } 445 446 … … 645 646 646 647 } // end mapper_remote_set_32() 648 649 ///////////////////////////////////////// 650 error_t mapper_sync( mapper_t * mapper ) 651 { 652 page_t * page; // local pointer on current page descriptor 653 xptr_t page_xp; // extended pointer on current page descriptor 654 grdxt_t * rt; // pointer on radix_tree descriptor 655 uint32_t start_key; // start page index in mapper 656 uint32_t found_key; // current page index in mapper 657 error_t error; 658 659 #if DEBUG_MAPPER_SYNC 660 thread_t * this = CURRENT_THREAD; 661 uint32_t cycle = (uint32_t)hal_get_cycles(); 662 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 663 vfs_inode_get_name( XPTR( local_cxy , mapper->inode ) , name ); 664 #endif 665 666 // get pointer on radix tree 667 rt = &mapper->rt; 668 669 // initialise loop variable 670 start_key = 0; 671 672 // scan radix-tree until last page found 673 while( 1 ) 674 { 675 // get page descriptor from radix tree 676 page = (page_t *)grdxt_get_first( rt , start_key , &found_key ); 677 678 if( page == NULL ) break; 679 680 assert( (page->index == found_key ), __FUNCTION__, "wrong page descriptor index" ); 681 assert( (page->order == 0), __FUNCTION__, "mapper page order must be 0" ); 682 683 // build extended pointer on page descriptor 684 page_xp = XPTR( local_cxy , page ); 685 686 // synchronize page if dirty 687 if( (page->flags & PG_DIRTY) != 0 ) 688 { 689 690 #if DEBUG_MAPPER_SYNC 691 if( cycle > DEBUG_MAPPER_SYNC ) 692 printk("\n[%s] thread[%x,%x] synchonise page %d of <%s> to device\n", 693 __FUNCTION__, this->process->pid, this->trdid, page->index, name ); 694 #endif 695 // copy page to file system 696 error = vfs_fs_move_page( page_xp , IOC_WRITE ); 697 698 if( error ) 699 { 700 printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n", 701 __FUNCTION__, page->index ); 702 return -1; 703 } 704 705 // remove page from PPM dirty list 706 ppm_page_undo_dirty( page_xp ); 707 } 708 else 709 { 710 711 #if DEBUG_MAPPER_SYNC 712 if( cycle > DEBUG_MAPPER_SYNC ) 713 printk("\n[%s] thread[%x,%x] skip page %d for <%s>\n", 714 __FUNCTION__, this->process->pid, this->trdid, page->index, name ); 715 #endif 716 } 717 718 // update loop variable 719 start_key = page->index + 1; 720 } // end while 721 722 return 0; 723 724 } // end mapper_sync() 647 725 648 726 ////////////////////////////////////////////////// -
trunk/kernel/mm/mapper.h
r614 r623 3 3 * 4 4 * Authors Mohamed Lamine Karaoui (2015) 5 * Alain Greiner (2016,2017,2018 )5 * Alain Greiner (2016,2017,2018,2019) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 48 48 * "readers", and only one "writer". 49 49 * - A "reader" thread, calling the mapper_remote_get_page() function to get a page 50 * descriptor pointer from the page index in file, can be r emote (running in any cluster).50 * descriptor pointer from the page index in file, can be running in any cluster. 51 51 * - A "writer" thread, calling the mapper_handle_miss() function to handle a page miss 52 52 * must be local (running in the mapper cluster). 53 * - The vfs_ mapper_move_page() function access the file system to handle a mapper miss,53 * - The vfs_fs_move_page() function access the file system to handle a mapper miss, 54 54 * or update a dirty page on device. 55 55 * - The vfs_mapper_load_all() functions is used to load all pages of a directory … … 63 63 * 64 64 * TODO : the mapper being only used to implement the VFS cache(s), the mapper.c 65 * and mapper.h file should be trandfered to the vfs directory.65 * and mapper.h file should be trandfered to the fs directory. 66 66 ******************************************************************************************/ 67 67 … … 230 230 231 231 /******************************************************************************************* 232 * This scans all pages present in the mapper identified by the <mapper> argument, 233 * and synchronize all pages maked as dirty" on disk. 234 * These pages are unmarked and removed from the local PPM dirty_list. 235 * This function must be called by a local thread running in same cluster as the mapper. 236 * A remote thread must call the RPC_MAPPER_SYNC function. 237 ******************************************************************************************* 238 * @ mapper : [in] local pointer on local mapper. 239 * @ returns 0 if success / return -1 if error. 240 ******************************************************************************************/ 241 error_t mapper_sync( mapper_t * mapper ); 242 243 /******************************************************************************************* 232 244 * This debug function displays the content of a given page of a given mapper. 233 245 * - the mapper is identified by the <mapper_xp> argument. -
trunk/kernel/mm/page.h
r612 r623 41 41 #define PG_INIT 0x0001 // page descriptor has been initialised 42 42 #define PG_RESERVED 0x0002 // cannot be allocated by PPM 43 #define PG_FREE 0x0004 // page can beallocated by PPM43 #define PG_FREE 0x0004 // page not yet allocated by PPM 44 44 #define PG_DIRTY 0x0040 // page has been written 45 45 #define PG_COW 0x0080 // page is copy-on-write -
trunk/kernel/mm/ppm.h
r611 r623 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018 )5 * Alain Greiner (2016,2017,2018,2019) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 37 37 * This structure defines the Physical Pages Manager in a cluster. 38 38 * In each cluster, the physical memory bank starts at local physical address 0 and 39 * contains an integer number of pages, defined by the <pages_nr> field in the39 * contains an integer number of small pages, defined by the <pages_nr> field in the 40 40 * boot_info structure. It is split in three parts: 41 41 * 42 42 * - the "kernel_code" section contains the kernel code, loaded by the boot-loader. 43 * It starts at PPN = 0 and the size is defined by the <pages_offset> field in the44 * boot_info structure.45 * - the "pages_tbl" section contains the physical page descriptors array. It starts46 * at PPN = pages_offset, and it contains one entry per small physical page in cluster.43 * It starts at local PPN = 0 and the size is defined by the <pages_offset> field 44 * in the boot_info structure. 45 * - the local "pages_tbl" section contains the physical page descriptors array. 46 * It starts at local PPN = pages_offset, and it contains one entry per small page. 47 47 * It is created and initialized by the hal_ppm_create() function. 48 48 * - The "kernel_heap" section contains all physical pages that are are not in the 49 * kernel_code and pages_tbl sections, and that have not been reserved by the 50 * architecture specific bootloader. The reserved pages are defined in the boot_info 51 * structure. 49 * "kernel_code" and "pages_tbl" sections, and that have not been reserved. 50 * The reserved pages are defined in the boot_info structure. 52 51 * 53 52 * The main service provided by the PMM is the dynamic allocation of physical pages … … 60 59 * 61 60 * Another service is to register the dirty pages in a specific dirty_list, that is 62 * also rooted in the PPM, in order to be able to s ave all dirty pages on disk.61 * also rooted in the PPM, in order to be able to synchronize all dirty pages on disk. 63 62 * This dirty list is protected by a specific remote_queuelock, because it can be 64 63 * modified by a remote thread, but it contains only local pages. … … 198 197 * . if page already dirty => do nothing 199 198 * . it page not dirty => set the PG_DIRTY flag and register page in PPM dirty list. 200 * - it releases the busylock prot cting the page flags.199 * - it releases the busylock protecting the page flags. 201 200 * - it releases the queuelock protecting the PPM dirty_list. 202 201 ***************************************************************************************** … … 214 213 * . if page not dirty => do nothing 215 214 * . it page dirty => reset the PG_DIRTY flag and remove page from PPM dirty list. 216 * - it releases the busylock prot cting the page flags.215 * - it releases the busylock protecting the page flags. 217 216 * - it releases the queuelock protecting the PPM dirty_list. 218 217 ***************************************************************************************** -
trunk/kernel/mm/vmm.c
r621 r623 59 59 { 60 60 error_t error; 61 vseg_t * vseg_kentry;62 61 vseg_t * vseg_args; 63 62 vseg_t * vseg_envs; … … 91 90 (CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) , 92 91 "STACK zone too small\n"); 93 94 // register kentry vseg in VSL95 base = CONFIG_VMM_KENTRY_BASE << CONFIG_PPM_PAGE_SHIFT;96 size = CONFIG_VMM_KENTRY_SIZE << CONFIG_PPM_PAGE_SHIFT;97 98 vseg_kentry = vmm_create_vseg( process,99 VSEG_TYPE_CODE,100 base,101 size,102 0, // file_offset unused103 0, // file_size unused104 XPTR_NULL, // mapper_xp unused105 local_cxy );106 107 if( vseg_kentry == NULL )108 {109 printk("\n[ERROR] in %s : cannot register kentry vseg\n", __FUNCTION__ );110 return -1;111 }112 113 vmm->kent_vpn_base = base;114 92 115 93 // register args vseg in VSL … … 162 140 163 141 if( error ) 164 printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ ); 142 { 143 printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ ); 144 return -1; 145 } 165 146 166 147 // initialize GPT lock 167 148 remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT ); 168 149 169 // architecture specic GPT initialisation 170 // (For TSAR, identity map the kentry_vseg) 171 error = hal_vmm_init( vmm ); 172 173 if( error ) 174 printk("\n[ERROR] in %s : cannot initialize GPT\n", __FUNCTION__ ); 150 // update process VMM with kernel vsegs 151 error = hal_vmm_kernel_update( process ); 152 153 if( error ) 154 { 155 printk("\n[ERROR] in %s : cannot update GPT for kernel vsegs\n", __FUNCTION__ ); 156 return -1; 157 } 175 158 176 159 // initialize STACK allocator … … 326 309 } 327 310 328 // release physical memory allocated for vseg descriptor if no MMAP type 329 if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) ) 311 // release physical memory allocated for vseg if no MMAP and no kernel type 312 if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) && 313 (type != VSEG_TYPE_KCODE) && (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) ) 330 314 { 331 315 vseg_free( vseg ); … … 606 590 child_vmm->vsegs_nr = 0; 607 591 608 // create child GPT592 // create the child GPT 609 593 error = hal_gpt_create( &child_vmm->gpt ); 610 594 … … 639 623 #endif 640 624 641 // all parent vsegs - but STACK - must be copied in child VSL 642 if( type != VSEG_TYPE_STACK ) 625 // all parent vsegs - but STACK and kernel vsegs - must be copied in child VSL 626 if( (type != VSEG_TYPE_STACK) && (type != VSEG_TYPE_KCODE) && 627 (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) ) 643 628 { 644 629 // allocate memory for a new child vseg … … 726 711 remote_rwlock_rd_release( parent_lock_xp ); 727 712 728 // initialize child GPT (architecture specic) 729 // => For TSAR, identity map the kentry_vseg 730 error = hal_vmm_init( child_vmm ); 713 // update child VMM with kernel vsegs 714 error = hal_vmm_kernel_update( child_process ); 731 715 732 716 if( error ) 733 717 { 734 printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ );718 printk("\n[ERROR] in %s : cannot update child VMM\n", __FUNCTION__ ); 735 719 return -1; 736 720 } … … 1098 1082 base = vpn_base << CONFIG_PPM_PAGE_SHIFT; 1099 1083 } 1100 else // VSEG_TYPE_DATA or VSEG_TYPE_CODE1084 else // VSEG_TYPE_DATA, VSEG_TYPE_CODE or KERNEL vseg 1101 1085 { 1102 1086 uint32_t vpn_min = base >> CONFIG_PPM_PAGE_SHIFT; … … 1178 1162 xptr_t lock_xp; // extended pointer on lock protecting forks counter 1179 1163 uint32_t forks; // actual number of pendinf forks 1164 uint32_t type; // vseg type 1180 1165 1181 1166 #if DEBUG_VMM_DELETE_VSEG … … 1190 1175 process = cluster_get_local_process_from_pid( pid ); 1191 1176 1192 if( process == NULL ) return; 1177 if( process == NULL ) 1178 { 1179 printk("\n[ERRORR] in %s : cannot get local process descriptor\n", 1180 __FUNCTION__ ); 1181 return; 1182 } 1193 1183 1194 1184 // get pointers on local process VMM an GPT … … 1199 1189 vseg = vmm_vseg_from_vaddr( vmm , vaddr ); 1200 1190 1201 if( vseg == NULL ) return; 1202 1203 // loop to invalidate all vseg PTEs in GPT 1191 if( vseg == NULL ) 1192 { 1193 printk("\n[ERRORR] in %s : cannot get vseg descriptor\n", 1194 __FUNCTION__ ); 1195 return; 1196 } 1197 1198 // get relevant vseg infos 1199 type = vseg->type; 1204 1200 vpn_min = vseg->vpn_base; 1205 1201 vpn_max = vpn_min + vseg->vpn_size; 1202 1203 // loop to invalidate all vseg PTEs in GPT 1206 1204 for( vpn = vpn_min ; vpn < vpn_max ; vpn++ ) 1207 1205 { … … 1216 1214 printk("- unmap vpn %x / ppn %x / vseg %s \n" , vpn , ppn, vseg_type_str(vseg->type) ); 1217 1215 #endif 1218 1219 // check small page1220 assert( (attr & GPT_SMALL) , "an user vseg must use small pages" );1221 1222 1216 // unmap GPT entry in local GPT 1223 1217 hal_gpt_reset_pte( gpt , vpn ); 1224 1218 1225 // handle pending forks counter if 1226 // 1) not identity mapped 1227 // 2) reference cluster 1228 if( ((vseg->flags & VSEG_IDENT) == 0) && 1229 (GET_CXY( process->ref_xp ) == local_cxy) ) 1219 // the allocated page is not released to KMEM for kernel vseg 1220 if( (type != VSEG_TYPE_KCODE) && 1221 (type != VSEG_TYPE_KDATA) && 1222 (type != VSEG_TYPE_KDEV ) ) 1230 1223 { 1224 1225 // FIXME This code must be completely re-written, as the actual release must depend on 1226 // - the vseg type 1227 // - the reference cluster 1228 // - the page refcount and/or the forks counter 1229 1231 1230 // get extended pointer on physical page descriptor 1232 1231 page_xp = ppm_ppn2page( ppn ); … … 1238 1237 lock_xp = XPTR( page_cxy , &page_ptr->lock ); 1239 1238 1239 // get the lock protecting the page 1240 1240 remote_busylock_acquire( lock_xp ); 1241 1241 1242 // get pending forks counter 1242 1243 forks = hal_remote_l32( forks_xp ); 1244 1243 1245 if( forks ) // decrement pending forks counter 1244 1246 { … … 1263 1265 #endif 1264 1266 } 1267 1268 // release the lock protecting the page 1265 1269 remote_busylock_release( lock_xp ); 1266 1270 } … … 1311 1315 // return failure 1312 1316 remote_rwlock_rd_release( lock_xp ); 1317 1313 1318 return NULL; 1314 1319 … … 1325 1330 vpn_t vpn_max; 1326 1331 1332 #if DEBUG_VMM_RESIZE_VSEG 1333 uint32_t cycle = (uint32_t)hal_get_cycles(); 1334 thread_t * this = CURRENT_THREAD; 1335 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1336 printk("\n[%s] thread[%x,%x] enter / process %x / base %x / size %d / cycle %d\n", 1337 __FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle ); 1338 #endif 1339 1327 1340 // get pointer on process VMM 1328 1341 vmm_t * vmm = &process->vmm; … … 1334 1347 vseg_t * vseg = vmm_vseg_from_vaddr( vmm , base ); 1335 1348 1336 if( vseg == NULL) return EINVAL;1337 1338 // get extended pointer on VSL lock1339 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock);1340 1341 // get lock protecting VSL1342 remote_rwlock_wr_acquire( lock_xp ); 1343 1349 if( vseg == NULL) 1350 { 1351 printk("\n[ERROR] in %s : vseg(%x,%d) not found\n", 1352 __FUNCTION__, base , size ); 1353 return -1; 1354 } 1355 1356 // resize depends on unmapped region base and size 1344 1357 if( (vseg->min > addr_min) || (vseg->max < addr_max) ) // not included in vseg 1345 1358 { 1359 printk("\n[ERROR] in %s : unmapped region[%x->%x[ not included in vseg[%x->%x[\n", 1360 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); 1361 1346 1362 error = -1; 1347 1363 } 1348 1364 else if( (vseg->min == addr_min) && (vseg->max == addr_max) ) // vseg must be deleted 1349 1365 { 1366 1367 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1368 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1369 printk("\n[%s] unmapped region[%x->%x[ equal vseg[%x->%x[\n", 1370 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); 1371 #endif 1350 1372 vmm_delete_vseg( process->pid , vseg->min ); 1373 1374 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1375 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1376 printk("\n[%s] thread[%x,%x] deleted vseg\n", 1377 __FUNCTION__, this->process->pid, this->trdid ); 1378 #endif 1351 1379 error = 0; 1352 1380 } 1353 1381 else if( vseg->min == addr_min ) // vseg must be resized 1354 1382 { 1355 // update vseg base address 1383 1384 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1385 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1386 printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n", 1387 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); 1388 #endif 1389 // update vseg min address 1356 1390 vseg->min = addr_max; 1357 1391 … … 1361 1395 vseg->vpn_base = vpn_min; 1362 1396 vseg->vpn_size = vpn_max - vpn_min + 1; 1397 1398 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1399 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1400 printk("\n[%s] thread[%x,%x] changed vseg_min\n", 1401 __FUNCTION__, this->process->pid, this->trdid ); 1402 #endif 1363 1403 error = 0; 1364 1404 } 1365 1405 else if( vseg->max == addr_max ) // vseg must be resized 1366 1406 { 1407 1408 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1409 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1410 printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n", 1411 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); 1412 #endif 1367 1413 // update vseg max address 1368 1414 vseg->max = addr_min; … … 1373 1419 vseg->vpn_base = vpn_min; 1374 1420 vseg->vpn_size = vpn_max - vpn_min + 1; 1421 1422 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1423 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1424 printk("\n[%s] thread[%x,%x] changed vseg_max\n", 1425 __FUNCTION__, this->process->pid, this->trdid ); 1426 #endif 1375 1427 error = 0; 1428 1376 1429 } 1377 1430 else // vseg cut in three regions 1378 1431 { 1432 1433 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1434 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1435 printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n", 1436 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); 1437 #endif 1379 1438 // resize existing vseg 1380 1439 vseg->max = addr_min; … … 1396 1455 vseg->cxy ); 1397 1456 1398 if( new == NULL ) error = EINVAL; 1457 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1458 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1459 printk("\n[%s] thread[%x,%x] replaced vseg by two smal vsegs\n", 1460 __FUNCTION__, this->process->pid, this->trdid ); 1461 #endif 1462 1463 if( new == NULL ) error = -1; 1399 1464 else error = 0; 1400 1465 } 1401 1466 1402 // release VMM lock 1403 remote_rwlock_wr_release( lock_xp ); 1467 #if DEBUG_VMM_RESIZE_VSEG 1468 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1469 printk("\n[%s] thread[%x,%x] exit / process %x / base %x / size %d / cycle %d\n", 1470 __FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle ); 1471 #endif 1404 1472 1405 1473 return error; -
trunk/kernel/mm/vmm.h
r614 r623 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018 )6 * Alain Greiner (2016,2017,2018,2019) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/mm/vseg.c
r595 r623 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,201 8,2019)6 * Alain Greiner (2016,2017,2018,2019) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 55 55 else if( vseg_type == VSEG_TYPE_FILE ) return "FILE"; 56 56 else if( vseg_type == VSEG_TYPE_REMOTE ) return "REMO"; 57 else if( vseg_type == VSEG_TYPE_KCODE ) return "KCOD"; 58 else if( vseg_type == VSEG_TYPE_KDATA ) return "KDAT"; 59 else if( vseg_type == VSEG_TYPE_KDEV ) return "KDEV"; 57 60 else return "undefined"; 58 61 } … … 142 145 VSEG_CACHE ; 143 146 } 147 else if( type == VSEG_TYPE_KCODE ) 148 { 149 vseg->flags = VSEG_EXEC | 150 VSEG_CACHE | 151 VSEG_PRIVATE ; 152 } 153 else if( type == VSEG_TYPE_KDATA ) 154 { 155 vseg->flags = VSEG_CACHE | 156 VSEG_WRITE ; 157 } 158 else if( type == VSEG_TYPE_KDEV ) 159 { 160 vseg->flags = VSEG_WRITE ; 161 } 144 162 else 145 163 { … … 158 176 159 177 // initialize vseg with remote_read access 160 vseg->type = hal_remote_l32 178 vseg->type = hal_remote_l32( XPTR( cxy , &ptr->type ) ); 161 179 vseg->min = (intptr_t)hal_remote_lpt( XPTR( cxy , &ptr->min ) ); 162 180 vseg->max = (intptr_t)hal_remote_lpt( XPTR( cxy , &ptr->max ) ); 163 vseg->vpn_base = hal_remote_l32 164 vseg->vpn_size = hal_remote_l32 165 vseg->flags = hal_remote_l32 166 vseg->file_offset = hal_remote_l32 167 vseg->file_size = hal_remote_l32 181 vseg->vpn_base = hal_remote_l32( XPTR( cxy , &ptr->vpn_base ) ); 182 vseg->vpn_size = hal_remote_l32( XPTR( cxy , &ptr->vpn_size ) ); 183 vseg->flags = hal_remote_l32( XPTR( cxy , &ptr->flags ) ); 184 vseg->file_offset = hal_remote_l32( XPTR( cxy , &ptr->file_offset ) ); 185 vseg->file_size = hal_remote_l32( XPTR( cxy , &ptr->file_size ) ); 168 186 vseg->mapper_xp = (xptr_t) hal_remote_l64( XPTR( cxy , &ptr->mapper_xp ) ); 169 187 170 188 switch (vseg->type) 171 189 { 172 case VSEG_TYPE_DATA: 190 case VSEG_TYPE_DATA: // unused 173 191 { 174 192 vseg->cxy = 0xffff; 175 193 break; 176 194 } 177 case VSEG_TYPE_CODE: 195 case VSEG_TYPE_CODE: // always local 178 196 case VSEG_TYPE_STACK: 197 case VSEG_TYPE_KCODE: 179 198 { 180 199 vseg->cxy = local_cxy; 181 200 break; 182 201 } 183 case VSEG_TYPE_ANON: 202 case VSEG_TYPE_ANON: // intrinsic 184 203 case VSEG_TYPE_FILE: 185 204 case VSEG_TYPE_REMOTE: 205 case VSEG_TYPE_KDEV: 206 case VSEG_TYPE_KDATA: 186 207 { 187 208 vseg->cxy = (cxy_t) hal_remote_l32( XPTR(cxy, &ptr->cxy) ); -
trunk/kernel/mm/vseg.h
r611 r623 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016 )6 * Alain Greiner (2016,2017,2018,2019) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 35 35 36 36 /******************************************************************************************* 37 * This enum defines the vseg types for an user process. 37 * This enum defines the vseg types. 38 * Note : the KDATA and KDEV types are not used by the TSAR HAL, because the accesses 39 * to kernel data or kernel devices are done through the DATA extension address 40 * register, but these types are probably required by the I86 HAL [AG]. 38 41 ******************************************************************************************/ 39 42 40 43 typedef enum 41 44 { 42 VSEG_TYPE_CODE = 0, /*! executable user code / private / localized */ 43 VSEG_TYPE_DATA = 1, /*! initialized user data / public / distributed */ 44 VSEG_TYPE_STACK = 2, /*! execution user stack / private / localized */ 45 VSEG_TYPE_ANON = 3, /*! anonymous mmap / public / localized */ 46 VSEG_TYPE_FILE = 4, /*! file mmap / public / localized */ 47 VSEG_TYPE_REMOTE = 5, /*! remote mmap / public / localized */ 45 VSEG_TYPE_CODE = 0, /*! executable user code / private / localized */ 46 VSEG_TYPE_DATA = 1, /*! initialized user data / public / distributed */ 47 VSEG_TYPE_STACK = 2, /*! execution user stack / private / localized */ 48 VSEG_TYPE_ANON = 3, /*! anonymous mmap / public / localized */ 49 VSEG_TYPE_FILE = 4, /*! file mmap / public / localized */ 50 VSEG_TYPE_REMOTE = 5, /*! remote mmap / public / localized */ 51 52 VSEG_TYPE_KCODE = 6, /*! executable kernel code / private / localized */ 53 VSEG_TYPE_KDATA = 7, /*! initialized kernel data / private / localized */ 54 VSEG_TYPE_KDEV = 8, /*! kernel peripheral device / public / localized */ 48 55 } 49 56 vseg_type_t; … … 60 67 #define VSEG_PRIVATE 0x0010 /*! should not be accessed from another cluster */ 61 68 #define VSEG_DISTRIB 0x0020 /*! physically distributed on all clusters */ 62 #define VSEG_IDENT 0x0040 /*! identity mapping */63 69 64 70 /******************************************************************************************* -
trunk/kernel/syscalls/shared_include/shared_almos.h
r611 r623 53 53 DISPLAY_BUSYLOCKS = 8, 54 54 DISPLAY_MAPPER = 9, 55 DISPLAY_BARRIER = 10, 55 56 } 56 57 display_type_t; -
trunk/kernel/syscalls/shared_include/shared_mman.h
r594 r623 2 2 * shred_mman.h - Shared structures & mnemonics used by the <mman.h> user library. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 26 26 27 27 /******************************************************************************************* 28 * Th ese structure areused by the mmap() syscall().28 * This structure is used by the mmap() syscall(). 29 29 ******************************************************************************************/ 30 30 -
trunk/kernel/syscalls/sys_creat.c
r457 r623 2 2 * sys_creat.c - create a file 3 3 * 4 * Author Alain Greiner (2016,2017 )4 * Author Alain Greiner (2016,2017,2017,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/syscalls/sys_display.c
r619 r623 31 31 #include <string.h> 32 32 #include <shared_syscalls.h> 33 #include <remote_barrier.h> 33 34 #include <vfs.h> 34 35 #include <mapper.h> … … 53 54 else if( type == DISPLAY_BUSYLOCKS ) return "BUSYLOCKS"; 54 55 else if( type == DISPLAY_MAPPER ) return "MAPPER"; 56 else if( type == DISPLAY_BARRIER ) return "BARRIER"; 55 57 else return "undefined"; 56 58 } … … 81 83 #endif 82 84 83 //////////////////////////// 84 if( type == DISPLAY_STRING ) 85 switch( type ) 85 86 { 86 char kbuf[512]; 87 uint32_t length; 88 89 char * string = (char *)arg0; 90 91 // check string in user space 92 error = vmm_get_vseg( process , (intptr_t)arg0 , &vseg ); 93 94 if( error ) 95 { 87 //////////////////// 88 case DISPLAY_STRING: 89 { 90 char kbuf[512]; 91 uint32_t length; 92 93 char * string = (char *)arg0; 94 95 // check string in user space 96 error = vmm_get_vseg( process , (intptr_t)arg0 , &vseg ); 97 98 if( error ) 99 { 96 100 97 101 #if DEBUG_SYSCALLS_ERROR … … 99 103 __FUNCTION__ , (intptr_t)arg0 ); 100 104 #endif 105 this->errno = EINVAL; 106 return -1; 107 } 108 109 // ckeck string length 110 length = hal_strlen_from_uspace( string ); 111 112 if( length >= 512 ) 113 { 114 115 #if DEBUG_SYSCALLS_ERROR 116 printk("\n[ERROR] in %s for STRING : string length %d too large\n", 117 __FUNCTION__ , length ); 118 #endif 119 this->errno = EINVAL; 120 return -1; 121 } 122 123 // copy string to kernel space 124 hal_strcpy_from_uspace( kbuf , string , 512 ); 125 126 // print message on TXT0 kernel terminal 127 printk("\n%s / cycle %d\n", kbuf, (uint32_t)hal_get_cycles() ); 128 129 break; 130 } 131 ///////////////// 132 case DISPLAY_VMM: 133 { 134 cxy_t cxy = (cxy_t)arg0; 135 pid_t pid = (pid_t)arg1; 136 137 // check cxy argument 138 if( cluster_is_undefined( cxy ) ) 139 { 140 141 #if DEBUG_SYSCALLS_ERROR 142 printk("\n[ERROR] in %s for VMM : process %x in cluster %x not found\n", 143 __FUNCTION__ , pid , cxy ); 144 #endif 145 this->errno = EINVAL; 146 return -1; 147 } 148 149 // get extended pointer on process PID in cluster CXY 150 xptr_t process_xp = cluster_get_process_from_pid_in_cxy( cxy , pid ); 151 152 if( process_xp == XPTR_NULL ) 153 { 154 155 #if DEBUG_SYSCALLS_ERROR 156 printk("\n[ERROR] in %s for VMM : process %x in cluster %x not found\n", 157 __FUNCTION__ , pid , cxy ); 158 #endif 159 this->errno = EINVAL; 160 return -1; 161 } 162 163 // get local pointer on process 164 process_t * process = (process_t *)GET_PTR( process_xp ); 165 166 // call kernel function 167 if( cxy == local_cxy ) 168 { 169 vmm_display( process , true ); 170 } 171 else 172 { 173 rpc_vmm_display_client( cxy , process , true ); 174 } 175 176 break; 177 } 178 /////////////////// 179 case DISPLAY_SCHED: 180 { 181 cxy_t cxy = (cxy_t)arg0; 182 lid_t lid = (lid_t)arg1; 183 184 // check cxy argument 185 if( cluster_is_undefined( cxy ) ) 186 { 187 188 #if DEBUG_SYSCALLS_ERROR 189 printk("\n[ERROR] in %s for SCHED : illegal cxy argument %x\n", 190 __FUNCTION__ , cxy ); 191 #endif 192 this->errno = EINVAL; 193 return -1; 194 } 195 196 // check lid argument 197 if( lid >= LOCAL_CLUSTER->cores_nr ) 198 { 199 200 #if DEBUG_SYSCALLS_ERROR 201 printk("\n[ERROR] in %s for SCHED : illegal lid argument %x\n", 202 __FUNCTION__ , lid ); 203 #endif 204 this->errno = EINVAL; 205 return -1; 206 } 207 208 if( cxy == local_cxy ) 209 { 210 sched_display( lid ); 211 } 212 else 213 { 214 sched_remote_display( cxy , lid ); 215 } 216 217 break; 218 } 219 /////////////////////////////// 220 case DISPLAY_CLUSTER_PROCESSES: 221 { 222 cxy_t cxy = (cxy_t)arg0; 223 bool_t owned = (bool_t)arg1; 224 225 // check cxy argument 226 if( cluster_is_undefined( cxy ) ) 227 { 228 229 #if DEBUG_SYSCALLS_ERROR 230 printk("\n[ERROR] in %s for CLUSTER_PROCESSES : illegal cxy argument %x\n", 231 __FUNCTION__ , cxy ); 232 #endif 233 this->errno = EINVAL; 234 return -1; 235 } 236 237 cluster_processes_display( cxy , owned ); 238 239 break; 240 } 241 ///////////////// 242 case DISPLAY_VFS: 243 { 244 vfs_display( process->vfs_root_xp ); 245 246 break; 247 } 248 /////////////////// 249 case DISPLAY_CHDEV: 250 { 251 chdev_dir_display(); 252 253 break; 254 } 255 /////////////////////////// 256 case DISPLAY_TXT_PROCESSES: 257 { 258 uint32_t txt_id = (uint32_t)arg0; 259 260 // check argument 261 if( txt_id >= LOCAL_CLUSTER->nb_txt_channels ) 262 { 263 264 #if DEBUG_SYSCALLS_ERROR 265 printk("\n[ERROR] in %s for TXT_PROCESSES : illegal txt_id argument %d\n", 266 __FUNCTION__ , txt_id ); 267 #endif 268 this->errno = EINVAL; 269 return -1; 270 } 271 272 process_txt_display( txt_id ); 273 274 break; 275 } 276 ////////////////// 277 case DISPLAY_DQDT: 278 { 279 dqdt_display(); 280 281 break; 282 } 283 /////////////////////// 284 case DISPLAY_BUSYLOCKS: 285 { 286 pid_t pid = (pid_t)arg0; 287 trdid_t trdid = (trdid_t)arg1; 288 289 // get extended pointer on target thread 290 xptr_t thread_xp = thread_get_xptr( pid , trdid ); 291 292 if( thread_xp == XPTR_NULL ) 293 { 294 295 #if DEBUG_SYSCALLS_ERROR 296 printk("\n[ERROR] in %s for BUSYLOCKS : thread[%x,%x] not found\n", 297 __FUNCTION__ , pid, trdid ); 298 #endif 299 this->errno = EINVAL; 300 return -1; 301 } 302 303 thread_display_busylocks( thread_xp , __FUNCTION__ ); 304 305 break; 306 } 307 //////////////////// 308 case DISPLAY_MAPPER: 309 { 310 xptr_t root_inode_xp; 311 xptr_t inode_xp; 312 cxy_t inode_cxy; 313 vfs_inode_t * inode_ptr; 314 xptr_t mapper_xp; 315 mapper_t * mapper_ptr; 316 317 char kbuf[CONFIG_VFS_MAX_PATH_LENGTH]; 318 319 char * path = (char *)arg0; 320 uint32_t page_id = (uint32_t)arg1; 321 uint32_t nbytes = (uint32_t)arg2; 322 323 // check pathname length 324 if( hal_strlen_from_uspace( path ) >= CONFIG_VFS_MAX_PATH_LENGTH ) 325 { 326 327 #if DEBUG_SYSCALLS_ERROR 328 printk("\n[ERROR] in %s for MAPPER : pathname too long\n", 329 __FUNCTION__ ); 330 #endif 331 this->errno = ENFILE; 332 return -1; 333 } 334 335 // copy pathname in kernel space 336 hal_strcpy_from_uspace( kbuf , path , CONFIG_VFS_MAX_PATH_LENGTH ); 337 338 // compute root inode for pathname 339 if( kbuf[0] == '/' ) // absolute path 340 { 341 // use extended pointer on VFS root inode 342 root_inode_xp = process->vfs_root_xp; 343 } 344 else // relative path 345 { 346 // get cluster and local pointer on reference process 347 xptr_t ref_xp = process->ref_xp; 348 process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); 349 cxy_t ref_cxy = GET_CXY( ref_xp ); 350 351 // get extended pointer on CWD inode 352 root_inode_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->cwd_xp ) ); 353 } 354 355 // get extended pointer on target inode 356 error = vfs_lookup( root_inode_xp, 357 kbuf, 358 0, 359 &inode_xp, 360 NULL ); 361 if( error ) 362 { 363 364 #if DEBUG_SYSCALLS_ERROR 365 printk("\n[ERROR] in %s for MAPPER : cannot found inode <%s>\n", 366 __FUNCTION__ , kbuf ); 367 #endif 368 this->errno = ENFILE; 369 return -1; 370 } 371 372 // get target inode cluster and local pointer 373 inode_cxy = GET_CXY( inode_xp ); 374 inode_ptr = GET_PTR( inode_xp ); 375 376 // get extended pointer on target mapper 377 mapper_ptr = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) ); 378 mapper_xp = XPTR( inode_cxy , mapper_ptr ); 379 380 // display mapper 381 error = mapper_display_page( mapper_xp , page_id , nbytes ); 382 383 if( error ) 384 { 385 386 #if DEBUG_SYSCALLS_ERROR 387 printk("\n[ERROR] in %s for MAPPER : cannot display page %d\n", 388 __FUNCTION__ , page_id ); 389 #endif 390 this->errno = ENFILE; 391 return -1; 392 } 393 394 break; 395 } 396 ///////////////////// 397 case DISPLAY_BARRIER: 398 { 399 // get target process PID 400 pid_t pid = (pid_t)arg0; 401 402 // get pointers on owner process 403 xptr_t process_xp = cluster_get_reference_process_from_pid( pid ); 404 process_t * process_ptr = GET_PTR( process_xp ); 405 cxy_t process_cxy = GET_CXY( process_xp ); 406 407 if( process_xp == XPTR_NULL ) 408 { 409 410 #if DEBUG_SYSCALLS_ERROR 411 printk("\n[ERROR] in %s for BARRIER : process %x not found\n", 412 __FUNCTION__ , pid ); 413 #endif 414 this->errno = EINVAL; 415 return -1; 416 } 417 418 // get extended pointer on root of list of barriers 419 xptr_t root_xp = XPTR( process_cxy , &process_ptr->barrier_root ); 420 421 if( xlist_is_empty( root_xp ) ) 422 { 423 424 #if DEBUG_SYSCALLS_ERROR 425 printk("\n[ERROR] in %s for BARRIER : no registered barrier in process %x\n", 426 __FUNCTION__ , pid ); 427 #endif 428 this->errno = EINVAL; 429 return -1; 430 } 431 432 // get extended pointer on first registered generic barrier descriptor 433 xptr_t gen_barrier_xp = XLIST_FIRST( root_xp , generic_barrier_t , list ); 434 435 // display barrier state 436 generic_barrier_display( gen_barrier_xp ); 437 438 break; 439 } 440 //////// 441 default: 442 { 443 444 #if DEBUG_SYSCALLS_ERROR 445 printk("\n[ERROR] in %s : undefined display type %d\n", 446 __FUNCTION__ , type ); 447 #endif 101 448 this->errno = EINVAL; 102 449 return -1; 103 450 } 104 105 // ckeck string length 106 length = hal_strlen_from_uspace( string ); 107 108 if( length >= 512 ) 109 { 110 111 #if DEBUG_SYSCALLS_ERROR 112 printk("\n[ERROR] in %s for STRING : string length %d too large\n", 113 __FUNCTION__ , length ); 114 #endif 115 this->errno = EINVAL; 116 return -1; 117 } 118 119 // copy string to kernel space 120 hal_strcpy_from_uspace( kbuf , string , 512 ); 121 122 // print message on TXT0 kernel terminal 123 printk("\n%s / cycle %d\n", kbuf, (uint32_t)hal_get_cycles() ); 124 } 125 ////////////////////////////// 126 else if( type == DISPLAY_VMM ) 127 { 128 cxy_t cxy = (cxy_t)arg0; 129 pid_t pid = (pid_t)arg1; 130 131 // check cxy argument 132 if( cluster_is_undefined( cxy ) ) 133 { 134 135 #if DEBUG_SYSCALLS_ERROR 136 printk("\n[ERROR] in %s for VMM : process %x in cluster %x not found\n", 137 __FUNCTION__ , pid , cxy ); 138 #endif 139 this->errno = EINVAL; 140 return -1; 141 } 142 143 // get extended pointer on process PID in cluster CXY 144 xptr_t process_xp = cluster_get_process_from_pid_in_cxy( cxy , pid ); 145 146 if( process_xp == XPTR_NULL ) 147 { 148 149 #if DEBUG_SYSCALLS_ERROR 150 printk("\n[ERROR] in %s for VMM : process %x in cluster %x not found\n", 151 __FUNCTION__ , pid , cxy ); 152 #endif 153 this->errno = EINVAL; 154 return -1; 155 } 156 157 // get local pointer on process 158 process_t * process = (process_t *)GET_PTR( process_xp ); 159 160 // call kernel function 161 if( cxy == local_cxy ) 162 { 163 vmm_display( process , true ); 164 } 165 else 166 { 167 rpc_vmm_display_client( cxy , process , true ); 168 } 169 } 170 //////////////////////////////// 171 else if( type == DISPLAY_SCHED ) 172 { 173 cxy_t cxy = (cxy_t)arg0; 174 lid_t lid = (lid_t)arg1; 175 176 // check cxy argument 177 if( cluster_is_undefined( cxy ) ) 178 { 179 180 #if DEBUG_SYSCALLS_ERROR 181 printk("\n[ERROR] in %s for SCHED : illegal cxy argument %x\n", 182 __FUNCTION__ , cxy ); 183 #endif 184 this->errno = EINVAL; 185 return -1; 186 } 187 188 // check lid argument 189 if( lid >= LOCAL_CLUSTER->cores_nr ) 190 { 191 192 #if DEBUG_SYSCALLS_ERROR 193 printk("\n[ERROR] in %s for SCHED : illegal lid argument %x\n", 194 __FUNCTION__ , lid ); 195 #endif 196 this->errno = EINVAL; 197 return -1; 198 } 199 200 if( cxy == local_cxy ) 201 { 202 sched_display( lid ); 203 } 204 else 205 { 206 sched_remote_display( cxy , lid ); 207 } 208 } 209 //////////////////////////////////////////// 210 else if( type == DISPLAY_CLUSTER_PROCESSES ) 211 { 212 cxy_t cxy = (cxy_t)arg0; 213 bool_t owned = (bool_t)arg1; 214 215 // check cxy argument 216 if( cluster_is_undefined( cxy ) ) 217 { 218 219 #if DEBUG_SYSCALLS_ERROR 220 printk("\n[ERROR] in %s for CLUSTER_PROCESSES : illegal cxy argument %x\n", 221 __FUNCTION__ , cxy ); 222 #endif 223 this->errno = EINVAL; 224 return -1; 225 } 226 227 cluster_processes_display( cxy , owned ); 228 } 229 ////////////////////////////// 230 else if( type == DISPLAY_VFS ) 231 { 232 vfs_display( process->vfs_root_xp ); 233 } 234 //////////////////////////////// 235 else if( type == DISPLAY_CHDEV ) 236 { 237 chdev_dir_display(); 238 } 239 //////////////////////////////////////// 240 else if( type == DISPLAY_TXT_PROCESSES ) 241 { 242 uint32_t txt_id = (uint32_t)arg0; 243 244 // check argument 245 if( txt_id >= LOCAL_CLUSTER->nb_txt_channels ) 246 { 247 248 #if DEBUG_SYSCALLS_ERROR 249 printk("\n[ERROR] in %s for TXT_PROCESSES : illegal txt_id argument %d\n", 250 __FUNCTION__ , txt_id ); 251 #endif 252 this->errno = EINVAL; 253 return -1; 254 } 255 256 process_txt_display( txt_id ); 257 } 258 /////////////////////////////// 259 else if( type == DISPLAY_DQDT ) 260 { 261 dqdt_display(); 262 } 263 //////////////////////////////////// 264 else if( type == DISPLAY_BUSYLOCKS ) 265 { 266 pid_t pid = (pid_t)arg0; 267 trdid_t trdid = (trdid_t)arg1; 268 269 // get extended pointer on target thread 270 xptr_t thread_xp = thread_get_xptr( pid , trdid ); 271 272 if( thread_xp == XPTR_NULL ) 273 { 274 275 #if DEBUG_SYSCALLS_ERROR 276 printk("\n[ERROR] in %s for BUSYLOCKS : thread[%x,%x] not found\n", 277 __FUNCTION__ , pid, trdid ); 278 #endif 279 this->errno = EINVAL; 280 return -1; 281 } 282 283 thread_display_busylocks( thread_xp , __FUNCTION__ ); 284 } 285 ///////////////////////////////// 286 else if( type == DISPLAY_MAPPER ) 287 { 288 xptr_t root_inode_xp; 289 xptr_t inode_xp; 290 cxy_t inode_cxy; 291 vfs_inode_t * inode_ptr; 292 xptr_t mapper_xp; 293 mapper_t * mapper_ptr; 294 295 char kbuf[CONFIG_VFS_MAX_PATH_LENGTH]; 296 297 char * path = (char *)arg0; 298 uint32_t page_id = (uint32_t)arg1; 299 uint32_t nbytes = (uint32_t)arg2; 300 301 // check pathname length 302 if( hal_strlen_from_uspace( path ) >= CONFIG_VFS_MAX_PATH_LENGTH ) 303 { 304 305 #if DEBUG_SYSCALLS_ERROR 306 printk("\n[ERROR] in %s for MAPPER : pathname too long\n", 307 __FUNCTION__ ); 308 #endif 309 this->errno = ENFILE; 310 return -1; 311 } 312 313 // copy pathname in kernel space 314 hal_strcpy_from_uspace( kbuf , path , CONFIG_VFS_MAX_PATH_LENGTH ); 315 316 // compute root inode for pathname 317 if( kbuf[0] == '/' ) // absolute path 318 { 319 // use extended pointer on VFS root inode 320 root_inode_xp = process->vfs_root_xp; 321 } 322 else // relative path 323 { 324 // get cluster and local pointer on reference process 325 xptr_t ref_xp = process->ref_xp; 326 process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); 327 cxy_t ref_cxy = GET_CXY( ref_xp ); 328 329 // use extended pointer on CWD inode 330 root_inode_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->cwd_xp ) ); 331 } 332 333 // get extended pointer on target inode 334 error = vfs_lookup( root_inode_xp, 335 kbuf, 336 0, 337 &inode_xp, 338 NULL ); 339 if( error ) 340 { 341 342 #if DEBUG_SYSCALLS_ERROR 343 printk("\n[ERROR] in %s for MAPPER : cannot found inode <%s>\n", 344 __FUNCTION__ , kbuf ); 345 #endif 346 this->errno = ENFILE; 347 return -1; 348 } 349 350 // get target inode cluster and local pointer 351 inode_cxy = GET_CXY( inode_xp ); 352 inode_ptr = GET_PTR( inode_xp ); 353 354 // get extended pointer on target mapper 355 mapper_ptr = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) ); 356 mapper_xp = XPTR( inode_cxy , mapper_ptr ); 357 358 // display mapper 359 error = mapper_display_page( mapper_xp , page_id , nbytes ); 360 361 if( error ) 362 { 363 364 #if DEBUG_SYSCALLS_ERROR 365 printk("\n[ERROR] in %s for MAPPER : cannot display page %d\n", 366 __FUNCTION__ , page_id ); 367 #endif 368 this->errno = ENFILE; 369 return -1; 370 } 371 } 372 //// 373 else 374 { 375 376 #if DEBUG_SYSCALLS_ERROR 377 printk("\n[ERROR] in %s : undefined display type %d\n", 378 __FUNCTION__ , type ); 379 #endif 380 this->errno = EINVAL; 381 return -1; 382 } 451 } // end switch on type 383 452 384 453 #if (DEBUG_SYS_DISPLAY || CONFIG_INSTRUMENTATION_SYSCALLS) -
trunk/kernel/syscalls/sys_mmap.c
r611 r623 56 56 57 57 #if DEBUG_SYS_MMAP 58 tm_start = hal_get_cycles(); 59 if ( DEBUG_SYS_MMAP < tm_start ) 58 if( DEBUG_SYS_MMAP < tm_start ) 60 59 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", 61 60 __FUNCTION__, process->pid, this->trdid, (uint32_t)tm_start ); … … 314 313 #endif 315 314 315 #if CONFIG_INSTRUMENTATION_SYSCALLS 316 hal_atomic_add( &syscalls_cumul_cost[SYS_MMAP] , tm_end - tm_start ); 317 hal_atomic_add( &syscalls_occurences[SYS_MMAP] , 1 ); 318 #endif 319 316 320 #if DEBUG_SYS_MMAP 317 if ( DEBUG_SYS_MMAP < tm_ start)321 if ( DEBUG_SYS_MMAP < tm_end ) 318 322 printk("\n[%s] thread[%x,%x] exit / %s / cxy %x / base %x / size %d / cycle %d\n", 319 323 __FUNCTION__, process->pid, this->trdid, -
trunk/kernel/syscalls/sys_munmap.c
r506 r623 25 25 #include <hal_kernel_types.h> 26 26 #include <hal_uspace.h> 27 #include <hal_irqmask.h> 27 28 #include <shared_syscalls.h> 28 29 #include <errno.h> … … 41 42 { 42 43 error_t error; 44 vseg_t * vseg; 45 reg_t save_sr; // required to enable IRQs 43 46 44 47 thread_t * this = CURRENT_THREAD; 45 48 process_t * process = this->process; 46 49 50 #if (DEBUG_SYS_MUNMAP || CONFIG_INSTRUMENTATION_SYSCALLS) 51 uint64_t tm_start = hal_get_cycles(); 52 #endif 53 47 54 #if DEBUG_SYS_MUNMAP 48 uint64_t tm_start;49 uint64_t tm_end;50 tm_start = hal_get_cycles();51 55 if( DEBUG_SYS_MUNMAP < tm_start ) 52 printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n" 56 printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n", 53 57 __FUNCTION__ , this, process->pid, (uint32_t)tm_start ); 54 58 #endif 59 60 // check user buffer is mapped 61 error = vmm_get_vseg( process , (intptr_t)vaddr, &vseg ); 62 63 if( error ) 64 { 65 66 #if DEBUG_SYSCALLS_ERROR 67 printk("\n[ERROR] in %s : thread[%x,%x] / user buffer unmapped %x\n", 68 __FUNCTION__ , process->pid, this->trdid, (intptr_t)vaddr ); 69 vmm_display( process , false ); 70 #endif 71 this->errno = EINVAL; 72 return -1; 73 } 74 75 // enable IRQs 76 hal_enable_irq( &save_sr ); 55 77 56 78 // call relevant kernel function … … 67 89 } 68 90 91 // restore IRQs 92 hal_restore_irq( save_sr ); 93 94 #if (DEBUG_SYS_MUNMAP || CONFIG_INSTRUMENTATION_SYSCALLS) 95 uint64_t tm_end = hal_get_cycles(); 96 #endif 97 98 #if CONFIG_INSTRUMENTATION_SYSCALLS 99 hal_atomic_add( &syscalls_cumul_cost[SYS_MUNMAP] , tm_end - tm_start ); 100 hal_atomic_add( &syscalls_occurences[SYS_MUNMAP] , 1 ); 101 #endif 102 69 103 #if DEBUG_SYS_MUNMAP 70 tm_end = hal_get_cycles();71 104 if( DEBUG_SYS_MUNMAP < tm_start ) 72 printk("\n[DBG] %s : thread %x exit / process %x / cycle %d\n" 105 printk("\n[DBG] %s : thread %x exit / process %x / cycle %d\n", 73 106 __FUNCTION__ , this, process->pid, (uint32_t)tm_end ); 74 107 #endif -
trunk/kernel/syscalls/sys_place_fork.c
r584 r623 2 2 * sys_get_core.c - get calling core cluster and local index. 3 3 * 4 * Author Alain Greiner (2016,2017 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/syscalls/sys_write.c
r610 r623 173 173 } 174 174 175 // update size field in inode if required176 xptr_t size_xp = XPTR( file_cxy , &inode_ptr->size );177 uint32_t inode_size = hal_remote_l32( size_xp );178 if ( (file_offset + count) > inode_size )179 {180 hal_remote_s32( size_xp , file_offset + count );181 } 175 // update file size in inode descriptor 176 // only if (file_offset + count) > current_size 177 // note: the parent directory entry in mapper will 178 // be updated by the close syscall 179 xptr_t inode_xp = XPTR( file_cxy , inode_ptr ); 180 vfs_inode_update_size( inode_xp , file_offset + count ); 181 182 182 } 183 183 else if( file_type == INODE_TYPE_DEV ) // write to TXT device -
trunk/kernel/syscalls/syscalls.h
r619 r623 236 236 /****************************************************************************************** 237 237 * [15] This function writes bytes to an open file identified by its file descriptor. 238 * The file can be a regular file or character oriented device. 238 * The file can be a regular file or character oriented device. For a regular file, 239 * the target inode "size" field is updated if (offset + count) is larger than the 240 * current "size" value. The size value registered in the mappers of the parent(s) 241 * directory are not modified and will be asynchronously updated when the file is closed. 239 242 * IRQs are enabled during this system call. 240 243 ****************************************************************************************** … … 329 332 330 333 /****************************************************************************************** 331 * [23] This function open a directory, that must exist in the file system, returning 332 * a DIR pointer on the dirent array in user space. 334 * [23] This function creates an user level directory descriptor (including the associated 335 * array of user level dirents), and intialise it from the kernel directory mapper, that 336 * contains all entries in this directory). The directory is identified by the <pathname> 337 * argument. If the corresponding inode is missing in the Inode Tree, the inode is created, 338 * but the directory must exist in the file system. 339 * It returns a DIR pointer <dirp> on the dirent array in user space. 333 340 ****************************************************************************************** 334 341 * @ pathname : [in] pathname (can be relative or absolute). -
trunk/libs/libalmosmkh/almosmkh.c
r611 r623 288 288 (reg_t)page_id, 289 289 (reg_t)nbytes ); 290 } 291 292 /////////////////////////////////////// 293 int display_barrier( unsigned int pid ) 294 { 295 return hal_user_syscall( SYS_DISPLAY, 296 DISPLAY_BARRIER, 297 (reg_t)pid, 0, 0 ); 290 298 } 291 299 -
trunk/libs/libalmosmkh/almosmkh.h
r611 r623 227 227 unsigned int nbytes); 228 228 229 /*************************************************************************************** 230 * This debug syscall displays on the kernel terminal TXT0 231 * the state of the barrier used by the process identified by the <pid> argument. 232 * It can be called by any thread running in any cluster. 233 *************************************************************************************** 234 * @ pid : [in] process identifier. 235 * @ return 0 if success / return -1 if illegal arguments. 236 **************************************************************************************/ 237 int display_barrier( unsigned int pid ); 238 229 239 /***************************************************************************************** 230 240 * This debug syscall is used to activate / desactivate the context switches trace -
trunk/libs/mini-libc/mman.h
r597 r623 38 38 * virtual space, as defined by the arguments. 39 39 ***************************************************************************************** 40 * @ addr : requested address in virtual space / un used : should be NULL.40 * @ addr : requested address in virtual space / unsupported : should be NULL. 41 41 * @ length : requested number of bytes. 42 42 * @ prot : access mode bit vector (PROT_EXEC / PROT_READ / PROT_WRITE) 43 * @ flags : bit_vector (MAP_FILE / MAP_ANON / MAP REMOTE / MAP_PRIVATE / MAP_SHARED)43 * @ flags : bit_vector (MAP_FILE / MAP_ANON / MAP_REMOTE / MAP_PRIVATE / MAP_SHARED) 44 44 * @ fdid : file descriptor index (if MAP_FILE). 45 45 * @ offset : offset in file (if MAP_FILE). -
trunk/libs/mini-libc/stdio.c
r610 r623 35 35 //////////////////////////////////////////////////////////////////////////////////////// 36 36 37 // This user space array registers all FILE descriptors open by a given process 37 38 FILE open_file_array[MAX_OPEN_FILE_PER_PROCESS]; // array of open files structures 38 39 … … 340 341 if( mode != NULL ) 341 342 { 342 printf("\n[ ERROR] in %s: the mode argument must be NULL\n", __FUNCTION__ );343 printf("\n[%s] error : the mode argument must be NULL\n", __FUNCTION__ ); 343 344 return NULL; 344 345 } … … 351 352 if( fd < 0 ) 352 353 { 353 printf("\n[ ERROR] in %s : file %snot found\n", __FUNCTION__ , pathname );354 printf("\n[%s] error : file <%s> not found\n", __FUNCTION__ , pathname ); 354 355 return NULL; 355 356 } 356 357 if( fd > MAX_OPEN_FILE_PER_PROCESS ) 357 358 { 358 printf("\n[ ERROR] in %s : not enough space for file %s\n", __FUNCTION__ , pathname );359 printf("\n[%s] error : not enough space for file <%s>\n", __FUNCTION__ , pathname ); 359 360 return NULL; 360 361 } … … 365 366 366 367 return &open_file_array[fd]; 368 367 369 } // end fopen() 368 370 … … 376 378 int fd = stream->fd; 377 379 378 // remove stream from open_file_array[]380 // remove stream from user open_file_array[] 379 381 open_file_array[fd].key = 0; 380 382 381 return close( fd ); 383 // close the kernel file descriptor 384 if( close( fd ) ) 385 { 386 printf("\n[%s] error : cannot close file %d\n", __FUNCTION__ , fd ); 387 return -1; 388 } 389 390 return 0; 391 382 392 } // end fclose() 383 393 … … 407 417 // get file descriptor from file pointer 408 418 fd = stream->fd; 409 419 420 // set terminating NUL 410 421 string[count] = 0; 411 422 423 printf("\n[%s] fd = %d for string :\n", __FUNCTION__, fd, string ); 424 412 425 return write( fd , &string , count ); 413 426 } -
trunk/libs/mini-libc/stdio.h
r610 r623 40 40 ********************************************************************************************/ 41 41 42 typedef struct file_s42 typedef struct stream_s 43 43 { 44 int fd; 45 int key; 44 int fd; // index in both kernel fd_array[], and user open_file_array[] 45 int key; // entry valid in open_file_array[] when (key == VALID_OPEN_FILE) 46 46 } 47 47 FILE; -
trunk/params-hard.mk
r620 r623 2 2 3 3 ARCH = /users/alain/soc/tsar-trunk-svn-2013/platforms/tsar_generic_iob 4 X_SIZE = 25 Y_SIZE = 24 X_SIZE = 1 5 Y_SIZE = 1 6 6 NB_PROCS = 1 7 7 NB_TTYS = 3 -
trunk/user/init/init.c
r588 r623 26 26 // and avoid the hard_config include [AG] 27 27 28 // TODO introduce a communication channel between INIT and KSH29 // to allow KSH to signal INIT the exec completion.30 31 28 //////////////// 32 29 int main( void ) … … 40 37 41 38 #if DEBUG_PROCESS_INIT 42 display_string("[ INIT] process enters");39 display_string("[init] process enters"); 43 40 #endif 44 41 … … 59 56 { 60 57 // INIT display error message 61 snprintf( string , 64 , "[ INITERROR] cannot fork child[%d] => suicide" , i );58 snprintf( string , 64 , "[init ERROR] cannot fork child[%d] => suicide" , i ); 62 59 display_string( string ); 63 60 … … 74 71 // CHILD[i] display error message 75 72 snprintf( string , 64 , 76 "[ INITERROR] CHILD[%d] cannot exec KSH / ret_exec = %d" , i , ret_exec );73 "[init ERROR] CHILD[%d] cannot exec KSH / ret_exec = %d" , i , ret_exec ); 77 74 display_string( string ); 78 75 } … … 81 78 { 82 79 // INIT display CHILD[i] process PID 83 snprintf( string , 64 , "[ INIT] created KSH[%d] / pid = %x", i , ret_fork );80 snprintf( string , 64 , "[init] created KSH[%d] / pid = %x", i , ret_fork ); 84 81 display_string( string ); 85 82 … … 128 125 { 129 126 // display string to report unexpected KSH process block 130 snprintf( string , 64 , "[ INIT] KSH process %x stopped => unblock it" , rcv_pid );127 snprintf( string , 64 , "[init] KSH process %x stopped => unblock it" , rcv_pid ); 131 128 display_string( string ); 132 129 … … 138 135 { 139 136 // display string to report KSH process termination 140 snprintf( string , 64 , "[ INIT] KSH process %x terminated => recreate", rcv_pid );137 snprintf( string , 64 , "[init] KSH process %x terminated => recreate", rcv_pid ); 141 138 display_string( string ); 142 139 … … 147 144 { 148 145 // INIT display error message 149 snprintf( string , 64 , "[ INITERROR] cannot fork child => suicide");146 snprintf( string , 64 , "[init ERROR] cannot fork child => suicide"); 150 147 display_string( string ); 151 148 … … 161 158 { 162 159 // CHILD display error message on TXT0 terminal 163 snprintf( string , 64 , "[ INITERROR] CHILD cannot exec KSH" );160 snprintf( string , 64 , "[init ERROR] CHILD cannot exec KSH" ); 164 161 display_string( string ); 165 162 } … … 168 165 { 169 166 // INIT display new KSH process PID 170 snprintf( string , 64 , "[ INIT] re-created KSH / pid = %x", ret_fork );167 snprintf( string , 64 , "[init] re-created KSH / pid = %x", ret_fork ); 171 168 display_string( string ); 172 169 } -
trunk/user/ksh/ksh.c
r619 r623 58 58 #define DEBUG_INTER 0 59 59 #define DEBUG_PARSE 0 60 #define DEBUG_CMD_CAT 060 #define DEBUG_CMD_CAT 1 61 61 #define DEBUG_CMD_CP 0 62 62 #define DEBUG_CMD_LOAD 0 … … 122 122 if (argc != 2) 123 123 { 124 fd = -1;125 buf = NULL;126 size = 0;127 124 printf(" usage: cat pathname\n"); 128 goto cmd_cat_exit; 125 126 sem_post( &semaphore ); 127 return; 129 128 } 130 129 … … 135 134 if (fd < 0) 136 135 { 137 buf = NULL;138 size = 0;139 136 printf(" error: cannot open file <%s>\n", path); 140 goto cmd_cat_exit; 137 138 sem_post( &semaphore ); 139 return; 141 140 } 142 141 143 142 #if DEBUG_CMD_CAT 144 snprintf( string , 64 , "[ KSH] %s : file %s open", __FUNCTION__, path );143 snprintf( string , 64 , "[ksh] %s : file %s open", __FUNCTION__, path ); 145 144 display_string( string ); 146 145 #endif … … 149 148 if ( stat( path , &st ) == -1) 150 149 { 151 buf = NULL;152 size = 0;153 150 printf(" error: cannot stat <%s>\n", path); 154 goto cmd_cat_exit; 151 152 close(fd); 153 sem_post( &semaphore ); 154 return; 155 155 } 156 156 157 157 if ( S_ISDIR(st.st_mode) ) 158 158 { 159 buf = NULL;160 size = 0;161 159 printf(" error: <%s> is a directory\n", path); 162 goto cmd_cat_exit; 160 161 close(fd); 162 sem_post( &semaphore ); 163 return; 163 164 } 164 165 … … 167 168 168 169 #if DEBUG_CMD_CAT 169 snprintf( string , 64 , "[KSH] %s : get size = %d", __FUNCTION__, size ); 170 display_string( string ); 171 #endif 172 173 // MAP_FILE is default type when MAP_ANON and MAP_REMOTE are not specified 170 snprintf( string , 64 , "[ksh] %s : size = %d", __FUNCTION__, size ); 171 display_string( string ); 172 #endif 173 174 if( size == 0 ) 175 { 176 printf(" error: size = 0 for <%s>\n", path); 177 178 close(fd); 179 sem_post( &semaphore ); 180 return; 181 } 182 183 // mapping type is MAP_FILE when MAP_ANON and MAP_REMOTE are not specified 174 184 buf = mmap( NULL , size , PROT_READ|PROT_WRITE , MAP_PRIVATE , fd , 0 ); 175 185 … … 177 187 { 178 188 printf(" error: cannot map file <%s>\n", path ); 179 goto cmd_cat_exit; 189 190 close(fd); 191 sem_post( &semaphore ); 192 return; 180 193 } 181 194 182 195 #if DEBUG_CMD_CAT 183 snprintf( string , 64 , "[KSH] %s : map file %d to buffer %x", __FUNCTION__, fd , buf ); 184 display_string( string ); 185 display_vmm( 0 , getpid() ); 196 snprintf( string , 64 , "[ksh] %s : maped file %d to buffer %x", __FUNCTION__, fd , buf ); 197 display_string( string ); 198 // unsigned int pid = getpid(); 199 // unsigned int cxy = pid >> 16; 200 // display_vmm( cxy , pid ); 186 201 #endif 187 202 … … 189 204 write( 1 , buf , size ); 190 205 191 // release semaphore to get next command 192 sem_post( &semaphore ); 193 194 return; 195 196 cmd_cat_exit: 197 198 if (buf != NULL) munmap(buf, size); 199 if (fd >= 0) close(fd); 206 // unmap te file 207 if( munmap( buf , size ) ) 208 { 209 printf(" error: cannot unmap file <%s>\n", path ); 210 } 211 212 #if DEBUG_CMD_CAT 213 snprintf( string , 64 , "[ksh] %s : unmaped file %d from buffer %x", __FUNCTION__, fd , buf ); 214 display_string( string ); 215 // display_vmm( cxy , pid ); 216 #endif 217 218 // close the file 219 if( close( fd ) ) 220 { 221 printf(" error: cannot close file <%s>\n", path ); 222 } 200 223 201 224 // release semaphore to get next command … … 267 290 268 291 #if DEBUG_CMD_CP 269 snprintf( string , 64 , "[ KSH] %s : file %s open", __FUNCTION__, srcpath );292 snprintf( string , 64 , "[ksh] %s : file %s open", __FUNCTION__, srcpath ); 270 293 display_string( string ); 271 294 #endif … … 280 303 281 304 #if DEBUG_CMD_CP 282 snprintf( string , 64 , "[ KSH] %s : got stats for %s", __FUNCTION__, srcpath );305 snprintf( string , 64 , "[ksh] %s : got stats for %s", __FUNCTION__, srcpath ); 283 306 display_string( string ); 284 307 #endif … … 304 327 305 328 #if DEBUG_CMD_CP 306 snprintf( string , 64 , "[ KSH] %s : file %s open", __FUNCTION__, dstpath );329 snprintf( string , 64 , "[ksh] %s : file %s open", __FUNCTION__, dstpath ); 307 330 display_string( string ); 308 331 #endif … … 315 338 316 339 #if DEBUG_CMD_CP 317 snprintf( string , 64 , "[ KSH] %s : got stats for %s", __FUNCTION__, dstpath );340 snprintf( string , 64 , "[ksh] %s : got stats for %s", __FUNCTION__, dstpath ); 318 341 display_string( string ); 319 342 #endif … … 339 362 340 363 #if DEBUG_CMD_CP 341 snprintf( string , 64 , "[ KSH] %s : read %d bytes from %s", __FUNCTION__, len, srcpath );364 snprintf( string , 64 , "[ksh] %s : read %d bytes from %s", __FUNCTION__, len, srcpath ); 342 365 display_string( string ); 343 366 #endif … … 351 374 352 375 #if DEBUG_CMD_CP 353 snprintf( string , 64 , "[ KSH] %s : write %d bytes to %s", __FUNCTION__, len, dstpath );376 snprintf( string , 64 , "[ksh] %s : write %d bytes to %s", __FUNCTION__, len, dstpath ); 354 377 display_string( string ); 355 378 #endif … … 381 404 " display dqdt\n" 382 405 " display locks pid trdid\n" 406 " display barrier pid\n" 383 407 " display mapper path page_id nbytes\n"); 384 408 } … … 504 528 { 505 529 printf(" error: illegal arguments pid = %x / trdid = %x\n", pid, trdid ); 530 } 531 } 532 } 533 ///////////////////////////////////////////////// 534 else if( strcmp( argv[1] , "barrier" ) == 0 ) 535 { 536 if( argc != 3 ) 537 { 538 printf(" usage: display barrier pid\n"); 539 } 540 else 541 { 542 unsigned int pid = atoi(argv[2]); 543 544 if( display_barrier( pid ) ) 545 { 546 printf(" error: illegal arguments pid = %x\n", pid ); 506 547 } 507 548 } … … 678 719 679 720 #if DEBUG_CMD_LOAD 680 snprintf( string , 64 , "[ KSH] %s : ksh_pid %x / path %s / bg %d / place %d (%x)\n",721 snprintf( string , 64 , "[ksh] %s : ksh_pid %x / path %s / bg %d / place %d (%x)\n", 681 722 __FUNCTION__, ksh_pid, argv[1], background, placement, cxy ); 682 723 display_string( string ); … … 697 738 698 739 #if DEBUG_CMD_LOAD 699 snprintf( string , 64 , "[ KSH] %s : child_pid %x after fork, before exec\n",740 snprintf( string , 64 , "[ksh] %s : child_pid %x after fork, before exec\n", 700 741 __FUNCTION__ , getpid() ); 701 742 display_string( string ); … … 706 747 707 748 #if DEBUG_CMD_LOAD 708 snprintf( string , 64 , "[ KSH] %s : child_pid %x after exec / ret_exec %x\n",749 snprintf( string , 64 , "[ksh] %s : child_pid %x after exec / ret_exec %x\n", 709 750 __FUNCTION__ , getpid(), ret_exec ); 710 751 display_string( string ); … … 722 763 723 764 #if DEBUG_CMD_LOAD 724 snprintf( string , 64 , "[ KSH] %s : ksh_pid %x after fork / ret_fork %x\n",765 snprintf( string , 64 , "[ksh] %s : ksh_pid %x after fork / ret_fork %x\n", 725 766 __FUNCTION__, getpid(), ret_fork ); 726 767 display_string( string ); … … 795 836 796 837 #if DEBUG_CMD_LS 797 snprintf( string , 64 , "[ KSH] %s : directory <%s> open / DIR %x\n",838 snprintf( string , 64 , "[ksh] %s : directory <%s> open / DIR %x\n", 798 839 __FUNCTION__, pathname , dir ); 799 840 display_string( string ); … … 803 844 { 804 845 printf(" error : directory <%s> not found\n", pathname ); 805 goto cmd_ls_exit; 846 847 sem_post( &semaphore ); 848 return; 806 849 } 807 850 … … 816 859 817 860 #if DEBUG_CMD_LS 818 snprintf( string , 64 , "[ KSH] %s : directory <%s> closed\n",861 snprintf( string , 64 , "[ksh] %s : directory <%s> closed\n", 819 862 __FUNCTION__, pathname ); 820 863 display_string( string ); … … 822 865 823 866 } 824 825 cmd_ls_exit:826 867 827 868 // release semaphore to get next command … … 908 949 909 950 #if DEBUG_CMD_PS 910 snprintf( string , 64 , "\n[ KSH] %s : call display_cluster_process()", __FUNCTION__ );951 snprintf( string , 64 , "\n[ksh] %s : call display_cluster_process()", __FUNCTION__ ); 911 952 display_string( string ); 912 953 #endif … … 1071 1112 #if DEBUG_PARSE 1072 1113 char string[64]; 1073 snprintf( string , 64 , "\n[ KSH] %s : <%s>", __FUNCTION__ , buf );1114 snprintf( string , 64 , "\n[ksh] %s : <%s>", __FUNCTION__ , buf ); 1074 1115 display_string( string ); 1075 1116 #endif … … 1094 1135 1095 1136 #if DEBUG_PARSE 1096 snprintf( string , 64 , "\n[ KSH] %s : argc = %d for <%s>", __FUNCTION__ , argc , argv[0] );1137 snprintf( string , 64 , "\n[ksh] %s : argc = %d for <%s>", __FUNCTION__ , argc , argv[0] ); 1097 1138 display_string( string ); 1098 1139 #endif … … 1193 1234 #if DEBUG_INTER 1194 1235 unsigned int pid = getpid(); 1195 snprintf( string , 64 , "\n[ KSH] %s : request a new command", __FUNCTION__ );1236 snprintf( string , 64 , "\n[ksh] %s : request a new command", __FUNCTION__ ); 1196 1237 display_string( string ); 1197 1238 #endif … … 1230 1271 1231 1272 #if DEBUG_INTER 1232 snprintf( string , 64 , "[ KSH] %s : parse and execute <%s>", __FUNCTION__, cmd );1273 snprintf( string , 64 , "[ksh] %s : parse and execute <%s>", __FUNCTION__, cmd ); 1233 1274 display_string( string ); 1234 1275 #endif … … 1350 1391 1351 1392 #if DEBUG_INTER 1352 snprintf( string , 64 , "\n[ KSH] %s : complete <%s> command", __FUNCTION__, cmd );1393 snprintf( string , 64 , "\n[ksh] %s : complete <%s> command", __FUNCTION__, cmd ); 1353 1394 display_string( string ); 1354 1395 #endif -
trunk/user/sort/sort.c
r619 r623 29 29 #include <hal_macros.h> 30 30 31 #define ARRAY_LENGTH 256 // number of values31 #define ARRAY_LENGTH 1024 // number of items 32 32 #define MAX_THREADS 1024 // 16 * 16 * 4 33 #define USE_DQT_BARRIER 1 34 #define DISPLAY_ARRAY 0 35 #define INTERACTIVE_MODE 0 33 34 #define USE_DQT_BARRIER 1 // use DQT barrier if non zero 35 #define DISPLAY_ARRAY 0 // display items values before and after 36 #define VERBOSE 0 // for debug 37 #define INTERACTIVE_MODE 0 // for debug 38 #define CHECK_RESULT 0 // for debug 39 #define INSTRUMENTATION 1 // register computation times on file 36 40 37 41 ///////////////////////////////////////////////////////////// … … 84 88 85 89 /////////////////////////////////// 86 static void merge( const int * src, 87 int * dst, 88 int length, 89 int init_pos_src_a, 90 int init_pos_src_b, 91 int init_pos_dst ) 90 static void merge( const int * src, // source array 91 int * dst, // destination array 92 int length, // number of items in a subset 93 int init_pos_src_a, // index first item in src subset A 94 int init_pos_src_b, // index first item in src subset B 95 int init_pos_dst ) // index first item in destination 92 96 { 93 97 int i; … … 135 139 unsigned int lid; 136 140 137 int * src_array = NULL;138 int * dst_array = NULL;141 int * src_array = NULL; 142 int * dst_array = NULL; 139 143 140 144 // get core coordinates an date … … 146 150 unsigned int main_uid = ptr->main_uid; 147 151 152 #if DISPLAY_ARRAY 153 unsigned int n; 154 if( thread_uid == main_uid ) 155 { 156 printf("\n*** array before sort\n"); 157 for( n=0; n<ARRAY_LENGTH; n++) printf("array[%d] = %d\n", n , array0[n] ); 158 } 159 #endif 160 161 ///////////////////////////////// 162 pthread_barrier_wait( &barrier ); 163 164 #if VERBOSE 165 printf("\n[sort] thread[%d] exit barrier 0\n", thread_uid ); 166 #endif 167 148 168 unsigned int items = ARRAY_LENGTH / threads; 149 169 unsigned int stages = __builtin_ctz( threads ) + 1; 150 170 151 printf("\n[SORT] thread[%d] : start\n", thread_uid ); 171 #if VERBOSE 172 printf("\n[sort] thread[%d] : start\n", thread_uid ); 173 #endif 152 174 153 175 bubbleSort( array0, items, items * thread_uid ); 154 176 155 printf("\n[SORT] thread[%d] : stage 0 completed\n", thread_uid ); 177 #if VERBOSE 178 printf("\n[sort] thread[%d] : stage 0 completed\n", thread_uid ); 179 #endif 156 180 157 181 ///////////////////////////////// 158 182 pthread_barrier_wait( &barrier ); 159 printf("\n[SORT] thread[%d] exit barrier 0\n", thread_uid ); 160 161 // the number of threads contributing to sort 162 // is divided by 2 at each next stage 183 184 #if VERBOSE 185 printf("\n[sort] thread[%d] exit barrier 0\n", thread_uid ); 186 #endif 187 188 #if DISPLAY_ARRAY 189 if( thread_uid == main_uid ) 190 { 191 printf("\n*** array after bubble sort\n"); 192 for( n=0; n<ARRAY_LENGTH; n++) printf("array[%d] = %d\n", n , array0[n] ); 193 } 194 #endif 195 196 // the number of threads contributing to sort is divided by 2 197 // and the number of items is multiplied by 2 at each next stage 163 198 for ( i = 1 ; i < stages ; i++ ) 164 199 { 165 pthread_barrier_wait( &barrier ); 200 if((i % 2) == 1) // odd stage 201 { 202 src_array = array0; 203 dst_array = array1; 204 } 205 else // even stage 206 { 207 src_array = array1; 208 dst_array = array0; 209 } 166 210 167 211 if( (thread_uid & ((1<<i)-1)) == 0 ) 168 212 { 169 printf("\n[SORT] thread[%d] : stage %d start\n", thread_uid , i ); 170 171 if((i % 2) == 1) // odd stage 172 { 173 src_array = array0; 174 dst_array = array1; 175 } 176 else // even stage 177 { 178 src_array = array1; 179 dst_array = array0; 180 } 181 213 214 #if VERBOSE 215 printf("\n[sort] thread[%d] : stage %d start\n", thread_uid , i ); 216 #endif 182 217 merge( src_array, 183 218 dst_array, 184 items << i,219 items << (i-1), 185 220 items * thread_uid, 186 221 items * (thread_uid + (1 << (i-1))), 187 222 items * thread_uid ); 188 223 189 printf("\n[SORT] thread[%d] : stage %d completed\n", thread_uid , i ); 224 #if VERBOSE 225 printf("\n[sort] thread[%d] : stage %d completed\n", thread_uid , i ); 226 #endif 190 227 } 191 228 192 229 ///////////////////////////////// 193 230 pthread_barrier_wait( &barrier ); 194 printf("\n[SORT] thread[%d] exit barrier %d\n", thread_uid , i ); 195 196 } 231 232 #if VERBOSE 233 printf("\n[sort] thread[%d] exit barrier %d\n", thread_uid , i ); 234 #endif 235 236 #if DISPLAY_ARRAY 237 if( thread_uid == main_uid ) 238 { 239 printf("\n*** array after merge %d\n", i ); 240 for( n=0; n<ARRAY_LENGTH; n++) printf("array[%d] = %d\n", n , dst_array[n] ); 241 } 242 #endif 243 244 } // en for stages 197 245 198 246 // all threads but the main thread exit … … 220 268 unsigned int lid; // core local index for a thread 221 269 unsigned int n; // index in array to sort 222 unsigned long long cycle; // current date for log223 270 pthread_t trdid; // kernel allocated thread index (unused) 224 271 pthread_barrierattr_t barrier_attr; // barrier attributes 225 272 273 unsigned long long start_cycle; 274 unsigned long long seq_end_cycle; 275 unsigned long long para_end_cycle; 276 277 ///////////////////////// 278 get_cycle( &start_cycle ); 279 226 280 // compute number of threads (one thread per core) 227 281 get_config( &x_size , &y_size , &ncores ); … … 240 294 (total_threads != 512) && (total_threads != 1024) ) 241 295 { 242 printf("\n[ SORT ERROR] number of cores must be power of 2\n");296 printf("\n[sort error] number of cores must be power of 2\n"); 243 297 exit( 0 ); 244 298 } … … 247 301 if ( ARRAY_LENGTH % total_threads) 248 302 { 249 printf("\n[ SORT ERROR] array size must be multiple of number of threads\n");303 printf("\n[sort error] array size must be multiple of number of threads\n"); 250 304 exit( 0 ); 251 305 } 252 306 253 printf("\n\n[ SORT] main starts on core[%x,%d] / %d threads / %d values / PID %x\n",254 main_cxy, main_lid, total_threads, ARRAY_LENGTH, getpid());307 printf("\n\n[sort] main starts / %d threads / %d items / pid %x / cycle %d\n", 308 total_threads, ARRAY_LENGTH, getpid(), (unsigned int)start_cycle ); 255 309 256 310 // initialize barrier … … 269 323 if( error ) 270 324 { 271 printf("\n[ SORT ERROR] cannot initialise barrier\n" );325 printf("\n[sort error] cannot initialise barrier\n" ); 272 326 exit( 0 ); 273 327 } 274 328 275 printf("\n[SORT] main completes barrier init\n"); 329 #if VERBOSE 330 printf("\n[sort] main completes barrier init\n"); 331 #endif 276 332 277 333 // Array to sort initialization … … 281 337 } 282 338 283 #if DISPLAY_ARRAY 284 printf("\n*** array before sort\n"); 285 for( n=0; n<ARRAY_LENGTH; n++) printf("array[%d] = %d\n", n , array0[n] ); 286 #endif 287 288 printf("\n[SORT] main completes array init\n"); 339 #if VERBOSE 340 printf("\n[sort] main completes array init\n"); 341 #endif 289 342 290 343 // launch other threads to execute sort() function … … 317 370 &arg[thread_uid] ) ) // sort arguments 318 371 { 319 printf("\n[ SORT ERROR] main cannot create thread %x \n", thread_uid );372 printf("\n[sort error] main cannot create thread %x \n", thread_uid ); 320 373 exit( 0 ); 321 374 } 322 375 else 323 376 { 324 printf("\n[SORT] main created thread %x \n", thread_uid ); 377 #if VERBOSE 378 printf("\n[sort] main created thread %x \n", thread_uid ); 379 #endif 325 380 } 326 381 } … … 329 384 } 330 385 331 get_cycle( &cycle ); 332 printf("\n[SORT] main completes threads create at cycle %d\n", (unsigned int)cycle ); 386 /////////////////////////// 387 get_cycle( &seq_end_cycle ); 388 389 #if VERBOSE 390 printf("\n[sort] main completes sequencial init at cycle %d\n", 391 (unsigned int)seq_end_cycle ); 392 #endif 333 393 334 394 #if INTERACTIVE_MODE … … 339 399 sort( &arg[main_uid] ); 340 400 401 //////////////////////////// 402 get_cycle( ¶_end_cycle ); 403 404 printf("\n[sort] main completes parallel sort at cycle %d\n", 405 (unsigned int)para_end_cycle ); 406 407 // destroy barrier 408 pthread_barrier_destroy( &barrier ); 409 341 410 #if INTERACTIVE_MODE 342 411 idbg(); 343 412 #endif 344 413 345 // destroy barrier 346 pthread_barrier_destroy( &barrier ); 347 348 #if INTERACTIVE_MODE 349 idbg(); 350 #endif 351 352 // Check result 353 int success = 1; 354 int* res_array = ( (total_threads == 2) || 355 (total_threads == 8) || 356 (total_threads == 32) || 357 (total_threads == 128) || 358 (total_threads == 512) ) ? array1 : array0; 359 360 for( n=0 ; n<(ARRAY_LENGTH-2) ; n++ ) 361 { 362 if ( res_array[n] > res_array[n+1] ) 363 { 364 printf("\n[SORT] array[%d] = %d > array[%d] = %d\n", 365 n , res_array[n] , n+1 , res_array[n+1] ); 366 success = 0; 367 break; 368 } 369 } 370 371 #if DISPLAY_ARRAY 372 printf("\n*** array after sort\n"); 373 for( n=0; n<ARRAY_LENGTH; n++) printf("array[%d] = %d\n", n , res_array[n] ); 374 #endif 375 376 get_cycle( &cycle ); 377 378 if ( success ) 379 { 380 printf("\n[SORT] success at cycle %d\n", (unsigned int)cycle ); 381 } 382 else 383 { 384 printf("\n[SORT] failure at cycle %d\n", (unsigned int)cycle ); 385 } 386 387 #if INTERACTIVE_MODE 388 idbg(); 414 #if CHECK_RESULT 415 int success = 1; 416 int* res_array = ( (total_threads == 2) || 417 (total_threads == 8) || 418 (total_threads == 32) || 419 (total_threads == 128) || 420 (total_threads == 512) ) ? array1 : array0; 421 422 for( n=0 ; n<(ARRAY_LENGTH-2) ; n++ ) 423 { 424 if ( res_array[n] > res_array[n+1] ) 425 { 426 printf("\n[sort] array[%d] = %d > array[%d] = %d\n", 427 n , res_array[n] , n+1 , res_array[n+1] ); 428 success = 0; 429 break; 430 } 431 } 432 433 if ( success ) printf("\n[sort] success\n"); 434 else printf("\n[sort] failure\n"); 435 #endif 436 437 #if INSTRUMENTATION 438 char name[64]; 439 char path[128]; 440 441 // build a file name from n_items / n_clusters / n_cores 442 if( USE_DQT_BARRIER ) snprintf( name , 64 , "sort_dqt_%d_%d_%d", 443 ARRAY_LENGTH, x_size * y_size, ncores ); 444 else snprintf( name , 64 , "sort_smp_%d_%d_%d", 445 ARRAY_LENGTH, x_size * y_size, ncores ); 446 447 // build file pathname 448 snprintf( path , 128 , "home/%s" , name ); 449 450 // compute results 451 unsigned int sequencial = (unsigned int)(seq_end_cycle - start_cycle); 452 unsigned int parallel = (unsigned int)(para_end_cycle - seq_end_cycle); 453 454 // display results on process terminal 455 printf("\n----- %s -----\n" 456 " - sequencial : %d cycles\n" 457 " - parallel : %d cycles\n", 458 name, sequencial, parallel ); 459 460 // open file 461 FILE * stream = fopen( path , NULL ); 462 if( stream == NULL ) 463 { 464 printf("\n[sort error] cannot open instrumentation file <%s>\n", name ); 465 exit(0); 466 } 467 468 // register results to file 469 fprintf( stream , "\n----- %s -----\n" 470 " - sequencial : %d cycles\n" 471 " - parallel : %d cycles\n", 472 name, sequencial, parallel ); 473 474 // close instrumentation file 475 if( fclose( stream ) ) 476 { 477 printf("\n[sort error] cannot close instrumentation file <%s>\n", name ); 478 exit(0); 479 } 389 480 #endif 390 481 … … 392 483 393 484 } // end main() 394 395 485 396 486 /*
Note: See TracChangeset
for help on using the changeset viewer.