1 | /* |
---|
2 | * mapper.c - Map memory, file or device in process virtual address space. |
---|
3 | * |
---|
4 | * Authors Mohamed Lamine Karaoui (2015) |
---|
5 | * Alain Greiner (2016,2017,2018) |
---|
6 | * |
---|
7 | * Copyright (c) UPMC Sorbonne Universites |
---|
8 | * |
---|
9 | * This file is part of ALMOS-MKH. |
---|
10 | * |
---|
11 | * ALMOS-MKH is free software; you can redistribute it and/or modify it |
---|
12 | * under the terms of the GNU General Public License as published by |
---|
13 | * the Free Software Foundation; version 2.0 of the License. |
---|
14 | * |
---|
15 | * ALMOS-MKH is distributed in the hope that it will be useful, but |
---|
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
18 | * General Public License for more details. |
---|
19 | * |
---|
20 | * You should have received a copy of the GNU General Public License |
---|
21 | * along with ALMOS-MKH; if not, write to the Free Software Foundation, |
---|
22 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
23 | */ |
---|
24 | |
---|
25 | #include <kernel_config.h> |
---|
26 | #include <hal_kernel_types.h> |
---|
27 | #include <hal_special.h> |
---|
28 | #include <hal_uspace.h> |
---|
29 | #include <grdxt.h> |
---|
30 | #include <rwlock.h> |
---|
31 | #include <printk.h> |
---|
32 | #include <memcpy.h> |
---|
33 | #include <thread.h> |
---|
34 | #include <core.h> |
---|
35 | #include <process.h> |
---|
36 | #include <kmem.h> |
---|
37 | #include <kcm.h> |
---|
38 | #include <ppm.h> |
---|
39 | #include <page.h> |
---|
40 | #include <cluster.h> |
---|
41 | #include <vfs.h> |
---|
42 | #include <mapper.h> |
---|
43 | |
---|
44 | |
---|
45 | ////////////////////////////////////////////// |
---|
46 | mapper_t * mapper_create( vfs_fs_type_t type ) |
---|
47 | { |
---|
48 | mapper_t * mapper; |
---|
49 | kmem_req_t req; |
---|
50 | error_t error; |
---|
51 | |
---|
52 | // allocate memory for associated mapper |
---|
53 | req.type = KMEM_MAPPER; |
---|
54 | req.size = sizeof(mapper_t); |
---|
55 | req.flags = AF_KERNEL | AF_ZERO; |
---|
56 | mapper = (mapper_t *)kmem_alloc( &req ); |
---|
57 | |
---|
58 | if( mapper == NULL ) |
---|
59 | { |
---|
60 | printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ ); |
---|
61 | return NULL; |
---|
62 | } |
---|
63 | |
---|
64 | // initialize refcount & inode |
---|
65 | mapper->refcount = 0; |
---|
66 | mapper->inode = NULL; |
---|
67 | |
---|
68 | // initialize radix tree |
---|
69 | error = grdxt_init( &mapper->radix, |
---|
70 | CONFIG_VMM_GRDXT_W1, |
---|
71 | CONFIG_VMM_GRDXT_W2, |
---|
72 | CONFIG_VMM_GRDXT_W3 ); |
---|
73 | |
---|
74 | if( error ) |
---|
75 | { |
---|
76 | printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ ); |
---|
77 | req.type = KMEM_MAPPER; |
---|
78 | req.ptr = mapper; |
---|
79 | kmem_free( &req ); |
---|
80 | return NULL; |
---|
81 | } |
---|
82 | |
---|
83 | // initialize mapper type |
---|
84 | mapper->type = type; |
---|
85 | |
---|
86 | // initialize mapper lock |
---|
87 | rwlock_init( &mapper->lock , LOCK_MAPPER_STATE ); |
---|
88 | |
---|
89 | // initialize waiting threads xlist (empty) |
---|
90 | xlist_root_init( XPTR( local_cxy , &mapper->wait_root ) ); |
---|
91 | |
---|
92 | // initialize vsegs xlist (empty) |
---|
93 | xlist_root_init( XPTR( local_cxy , &mapper->vsegs_root ) ); |
---|
94 | |
---|
95 | return mapper; |
---|
96 | |
---|
97 | } // end mapper_create() |
---|
98 | |
---|
99 | /////////////////////////////////////////// |
---|
100 | error_t mapper_destroy( mapper_t * mapper ) |
---|
101 | { |
---|
102 | page_t * page; |
---|
103 | uint32_t found_index = 0; |
---|
104 | uint32_t start_index = 0; |
---|
105 | kmem_req_t req; |
---|
106 | error_t error; |
---|
107 | |
---|
108 | // scan radix three and release all registered pages to PPM |
---|
109 | do |
---|
110 | { |
---|
111 | // get page from radix tree |
---|
112 | page = (page_t *)grdxt_get_first( &mapper->radix , start_index , &found_index ); |
---|
113 | |
---|
114 | if( page != NULL ) |
---|
115 | { |
---|
116 | // remove page from mapper and release to PPM |
---|
117 | error = mapper_release_page( mapper , page ); |
---|
118 | |
---|
119 | if ( error ) return error; |
---|
120 | |
---|
121 | // update start_key value for next page |
---|
122 | start_index = found_index; |
---|
123 | } |
---|
124 | } |
---|
125 | while( page != NULL ); |
---|
126 | |
---|
127 | // release the memory allocated to radix-tree itself |
---|
128 | grdxt_destroy( &mapper->radix ); |
---|
129 | |
---|
130 | // release memory for mapper descriptor |
---|
131 | req.type = KMEM_MAPPER; |
---|
132 | req.ptr = mapper; |
---|
133 | kmem_free( &req ); |
---|
134 | |
---|
135 | return 0; |
---|
136 | |
---|
137 | } // end mapper_destroy() |
---|
138 | |
---|
139 | //////////////////////////////////////////// |
---|
140 | page_t * mapper_get_page( mapper_t * mapper, |
---|
141 | uint32_t index ) |
---|
142 | { |
---|
143 | kmem_req_t req; |
---|
144 | page_t * page; |
---|
145 | error_t error; |
---|
146 | |
---|
147 | #if DEBUG_MAPPER_GET_PAGE |
---|
148 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
149 | if( DEBUG_MAPPER_GET_PAGE < cycle ) |
---|
150 | printk("\n[DBG] %s : thread %x enter for page %d / mapper %x / cycle %d\n", |
---|
151 | __FUNCTION__ , CURRENT_THREAD , index , mapper , cycle ); |
---|
152 | #endif |
---|
153 | |
---|
154 | thread_t * this = CURRENT_THREAD; |
---|
155 | |
---|
156 | // check thread can yield |
---|
157 | thread_assert_can_yield( this , __FUNCTION__ ); |
---|
158 | |
---|
159 | // take mapper lock in READ_MODE |
---|
160 | rwlock_rd_acquire( &mapper->lock ); |
---|
161 | |
---|
162 | // search page in radix tree |
---|
163 | page = (page_t *)grdxt_lookup( &mapper->radix , index ); |
---|
164 | |
---|
165 | // test if page available in mapper |
---|
166 | if( ( page == NULL) || page_is_flag( page , PG_INLOAD ) ) // page not available |
---|
167 | { |
---|
168 | |
---|
169 | // release the lock in READ_MODE and take it in WRITE_MODE |
---|
170 | rwlock_rd_release( &mapper->lock ); |
---|
171 | rwlock_wr_acquire( &mapper->lock ); |
---|
172 | |
---|
173 | // second test on missing page because the page status can have been modified |
---|
174 | // by another thread, when passing from READ_MODE to WRITE_MODE. |
---|
175 | // from this point there is no concurrent accesses to mapper. |
---|
176 | |
---|
177 | page = grdxt_lookup( &mapper->radix , index ); |
---|
178 | |
---|
179 | if ( page == NULL ) // missing page => create it and load it from file system |
---|
180 | { |
---|
181 | |
---|
182 | #if (DEBUG_MAPPER_GET_PAGE & 1) |
---|
183 | if( DEBUG_MAPPER_GET_PAGE < cycle ) |
---|
184 | printk("\n[DBG] %s : missing page => load from device\n", __FUNCTION__ ); |
---|
185 | #endif |
---|
186 | // allocate one page from PPM |
---|
187 | req.type = KMEM_PAGE; |
---|
188 | req.size = 0; |
---|
189 | req.flags = AF_NONE; |
---|
190 | page = kmem_alloc( &req ); |
---|
191 | |
---|
192 | if( page == NULL ) |
---|
193 | { |
---|
194 | printk("\n[ERROR] in %s : thread %x cannot allocate a page in cluster %x\n", |
---|
195 | __FUNCTION__ , this->trdid , local_cxy ); |
---|
196 | rwlock_wr_release( &mapper->lock ); |
---|
197 | return NULL; |
---|
198 | } |
---|
199 | |
---|
200 | // initialize the page descriptor |
---|
201 | page_init( page ); |
---|
202 | page_set_flag( page , PG_INIT | PG_INLOAD ); |
---|
203 | page_refcount_up( page ); |
---|
204 | page->mapper = mapper; |
---|
205 | page->index = index; |
---|
206 | |
---|
207 | // insert page in mapper radix tree |
---|
208 | error = grdxt_insert( &mapper->radix, index , page ); |
---|
209 | |
---|
210 | // release mapper lock from WRITE_MODE |
---|
211 | rwlock_wr_release( &mapper->lock ); |
---|
212 | |
---|
213 | if( error ) |
---|
214 | { |
---|
215 | printk("\n[ERROR] in %s : thread %x cannot insert page in mapper\n", |
---|
216 | __FUNCTION__ , this->trdid ); |
---|
217 | mapper_release_page( mapper , page ); |
---|
218 | page_clear_flag( page , PG_ALL ); |
---|
219 | req.ptr = page; |
---|
220 | req.type = KMEM_PAGE; |
---|
221 | kmem_free(&req); |
---|
222 | return NULL; |
---|
223 | } |
---|
224 | |
---|
225 | // launch I/O operation to load page from file system |
---|
226 | error = vfs_mapper_move_page( page, |
---|
227 | true ); // to mapper |
---|
228 | if( error ) |
---|
229 | { |
---|
230 | printk("\n[ERROR] in %s : thread %x cannot load page from device\n", |
---|
231 | __FUNCTION__ , this->trdid ); |
---|
232 | mapper_release_page( mapper , page ); |
---|
233 | page_clear_flag( page , PG_ALL ); |
---|
234 | req.ptr = page; |
---|
235 | req.type = KMEM_PAGE; |
---|
236 | kmem_free( &req ); |
---|
237 | return NULL; |
---|
238 | } |
---|
239 | |
---|
240 | // reset the page INLOAD flag to make the page available to all readers |
---|
241 | page_clear_flag( page , PG_INLOAD ); |
---|
242 | } |
---|
243 | else if( page_is_flag( page , PG_INLOAD ) ) // page is loaded by another thread |
---|
244 | { |
---|
245 | // release mapper lock from WRITE_MODE |
---|
246 | rwlock_wr_release( &mapper->lock ); |
---|
247 | |
---|
248 | // wait load completion |
---|
249 | while( page_is_flag( page , PG_INLOAD ) == false ) |
---|
250 | { |
---|
251 | // deschedule without blocking |
---|
252 | sched_yield("waiting page loading"); |
---|
253 | } |
---|
254 | } |
---|
255 | } |
---|
256 | else // page available in mapper |
---|
257 | { |
---|
258 | rwlock_rd_release( &mapper->lock ); |
---|
259 | } |
---|
260 | |
---|
261 | #if DEBUG_MAPPER_GET_PAGE |
---|
262 | cycle = (uint32_t)hal_get_cycles(); |
---|
263 | if( DEBUG_MAPPER_GET_PAGE < cycle ) |
---|
264 | printk("\n[DBG] %s : thread %x exit for page %d / ppn %x / cycle %d\n", |
---|
265 | __FUNCTION__, CURRENT_THREAD, index, ppm_page2ppn(XPTR(local_cxy, page)), cycle ); |
---|
266 | #endif |
---|
267 | |
---|
268 | return page; |
---|
269 | |
---|
270 | } // end mapper_get_page() |
---|
271 | |
---|
272 | /////////////////////////////////////////////// |
---|
273 | error_t mapper_release_page( mapper_t * mapper, |
---|
274 | page_t * page ) |
---|
275 | { |
---|
276 | error_t error; |
---|
277 | |
---|
278 | // lauch IO operation to update page to file system |
---|
279 | error = vfs_mapper_move_page( page , false ); // from mapper |
---|
280 | |
---|
281 | if( error ) |
---|
282 | { |
---|
283 | printk("\n[ERROR] in %s : cannot update file system\n", __FUNCTION__ ); |
---|
284 | return EIO; |
---|
285 | } |
---|
286 | |
---|
287 | // take mapper lock in WRITE_MODE |
---|
288 | rwlock_wr_acquire( &mapper->lock ); |
---|
289 | |
---|
290 | // remove physical page from radix tree |
---|
291 | grdxt_remove( &mapper->radix , page->index ); |
---|
292 | |
---|
293 | // release mapper lock from WRITE_MODE |
---|
294 | rwlock_wr_release( &mapper->lock ); |
---|
295 | |
---|
296 | // release page to PPM |
---|
297 | kmem_req_t req; |
---|
298 | req.type = KMEM_PAGE; |
---|
299 | req.ptr = page; |
---|
300 | kmem_free( &req ); |
---|
301 | |
---|
302 | return 0; |
---|
303 | |
---|
304 | } // end mapper_release_page() |
---|
305 | |
---|
306 | /////////////////////////////////////////////////// |
---|
307 | error_t mapper_move_user( mapper_t * mapper, |
---|
308 | bool_t to_buffer, |
---|
309 | uint32_t file_offset, |
---|
310 | void * buffer, |
---|
311 | uint32_t size ) |
---|
312 | { |
---|
313 | uint32_t page_offset; // first byte to move to/from a mapper page |
---|
314 | uint32_t page_count; // number of bytes to move to/from a mapper page |
---|
315 | uint32_t index; // current mapper page index |
---|
316 | uint32_t done; // number of moved bytes |
---|
317 | page_t * page; // current mapper page descriptor |
---|
318 | uint8_t * map_ptr; // current mapper address |
---|
319 | uint8_t * buf_ptr; // current buffer address |
---|
320 | |
---|
321 | #if DEBUG_MAPPER_MOVE_USER |
---|
322 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
323 | if( DEBUG_MAPPER_MOVE_USER < cycle ) |
---|
324 | printk("\n[DBG] %s : thread %x enter / to_buf %d / buffer %x / cycle %d\n", |
---|
325 | __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle ); |
---|
326 | #endif |
---|
327 | |
---|
328 | // compute offsets of first and last bytes in file |
---|
329 | uint32_t min_byte = file_offset; |
---|
330 | uint32_t max_byte = file_offset + size -1; |
---|
331 | |
---|
332 | // compute indexes of pages for first and last byte in mapper |
---|
333 | uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT; |
---|
334 | uint32_t last = max_byte >> CONFIG_PPM_PAGE_SHIFT; |
---|
335 | |
---|
336 | done = 0; |
---|
337 | |
---|
338 | // loop on pages in mapper |
---|
339 | for( index = first ; index <= last ; index++ ) |
---|
340 | { |
---|
341 | // compute page_offset |
---|
342 | if( index == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK; |
---|
343 | else page_offset = 0; |
---|
344 | |
---|
345 | // compute number of bytes in page |
---|
346 | if ( first == last ) page_count = size; |
---|
347 | else if ( index == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset; |
---|
348 | else if ( index == last ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1; |
---|
349 | else page_count = CONFIG_PPM_PAGE_SIZE; |
---|
350 | |
---|
351 | #if (DEBUG_MAPPER_MOVE_USER & 1) |
---|
352 | if( DEBUG_MAPPER_MOVE_USER < cycle ) |
---|
353 | printk("\n[DBG] %s : index = %d / offset = %d / count = %d\n", |
---|
354 | __FUNCTION__ , index , page_offset , page_count ); |
---|
355 | #endif |
---|
356 | |
---|
357 | // get page descriptor |
---|
358 | page = mapper_get_page( mapper , index ); |
---|
359 | |
---|
360 | if ( page == NULL ) return EINVAL; |
---|
361 | |
---|
362 | // compute pointer in mapper |
---|
363 | xptr_t base_xp = ppm_page2base( XPTR( local_cxy, page ) ); |
---|
364 | map_ptr = (uint8_t *)GET_PTR( base_xp ) + page_offset; |
---|
365 | |
---|
366 | // compute pointer in buffer |
---|
367 | buf_ptr = (uint8_t *)buffer + done; |
---|
368 | |
---|
369 | // move fragment |
---|
370 | if( to_buffer ) |
---|
371 | { |
---|
372 | hal_copy_to_uspace( buf_ptr , map_ptr , page_count ); |
---|
373 | } |
---|
374 | else |
---|
375 | { |
---|
376 | ppm_page_do_dirty( page ); |
---|
377 | hal_copy_from_uspace( map_ptr , buf_ptr , page_count ); |
---|
378 | } |
---|
379 | |
---|
380 | done += page_count; |
---|
381 | } |
---|
382 | |
---|
383 | #if DEBUG_MAPPER_MOVE_USER |
---|
384 | cycle = (uint32_t)hal_get_cycles(); |
---|
385 | if( DEBUG_MAPPER_MOVE_USER < cycle ) |
---|
386 | printk("\n[DBG] %s : thread %x exit / to_buf %d / buffer %x / cycle %d\n", |
---|
387 | __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle ); |
---|
388 | #endif |
---|
389 | |
---|
390 | return 0; |
---|
391 | |
---|
392 | } // end mapper_move_user() |
---|
393 | |
---|
394 | //////////////////////////////////////////////// |
---|
395 | error_t mapper_move_kernel( mapper_t * mapper, |
---|
396 | bool_t to_buffer, |
---|
397 | uint32_t file_offset, |
---|
398 | xptr_t buffer_xp, |
---|
399 | uint32_t size ) |
---|
400 | { |
---|
401 | uint32_t page_offset; // first byte to move to/from a mapper page |
---|
402 | uint32_t page_count; // number of bytes to move to/from a mapper page |
---|
403 | uint32_t index; // current mapper page index |
---|
404 | uint32_t done; // number of moved bytes |
---|
405 | page_t * page; // current mapper page descriptor |
---|
406 | |
---|
407 | uint8_t * src_ptr; // source buffer local pointer |
---|
408 | cxy_t src_cxy; // source cluster |
---|
409 | uint8_t * dst_ptr; // destination buffer local pointer |
---|
410 | cxy_t dst_cxy; // destination cluster |
---|
411 | |
---|
412 | // get buffer cluster and local pointer |
---|
413 | cxy_t buffer_cxy = GET_CXY( buffer_xp ); |
---|
414 | uint8_t * buffer_ptr = (uint8_t *)GET_PTR( buffer_xp ); |
---|
415 | |
---|
416 | #if DEBUG_MAPPER_MOVE_KERNEL |
---|
417 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
418 | if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) |
---|
419 | printk("\n[DBG] %s : thread %x enter / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n", |
---|
420 | __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle ); |
---|
421 | #endif |
---|
422 | |
---|
423 | // compute offsets of first and last bytes in file |
---|
424 | uint32_t min_byte = file_offset; |
---|
425 | uint32_t max_byte = file_offset + size -1; |
---|
426 | |
---|
427 | // compute indexes for first and last pages in mapper |
---|
428 | uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT; |
---|
429 | uint32_t last = max_byte >> CONFIG_PPM_PAGE_SHIFT; |
---|
430 | |
---|
431 | #if (DEBUG_MAPPER_MOVE_KERNEL & 1) |
---|
432 | if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) |
---|
433 | printk("\n[DBG] %s : first_page %d / last_page %d\n", __FUNCTION__, first, last ); |
---|
434 | #endif |
---|
435 | |
---|
436 | // compute source and destination clusters |
---|
437 | if( to_buffer ) |
---|
438 | { |
---|
439 | dst_cxy = buffer_cxy; |
---|
440 | src_cxy = local_cxy; |
---|
441 | } |
---|
442 | else |
---|
443 | { |
---|
444 | src_cxy = buffer_cxy; |
---|
445 | dst_cxy = local_cxy; |
---|
446 | } |
---|
447 | |
---|
448 | done = 0; |
---|
449 | |
---|
450 | // loop on pages in mapper |
---|
451 | for( index = first ; index <= last ; index++ ) |
---|
452 | { |
---|
453 | // compute page_offset |
---|
454 | if( index == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK; |
---|
455 | else page_offset = 0; |
---|
456 | |
---|
457 | // compute number of bytes to move in page |
---|
458 | if ( first == last ) page_count = size; |
---|
459 | else if ( index == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset; |
---|
460 | else if ( index == last ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1; |
---|
461 | else page_count = CONFIG_PPM_PAGE_SIZE; |
---|
462 | |
---|
463 | #if (DEBUG_MAPPER_MOVE_KERNEL & 1) |
---|
464 | if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) |
---|
465 | printk("\n[DBG] %s : page_index = %d / offset = %d / bytes = %d\n", |
---|
466 | __FUNCTION__ , index , page_offset , page_count ); |
---|
467 | #endif |
---|
468 | |
---|
469 | // get page descriptor |
---|
470 | page = mapper_get_page( mapper , index ); |
---|
471 | |
---|
472 | if ( page == NULL ) return EINVAL; |
---|
473 | |
---|
474 | // get page base address |
---|
475 | xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ) ); |
---|
476 | uint8_t * base_ptr = (uint8_t *)GET_PTR( base_xp ); |
---|
477 | |
---|
478 | // compute source and destination pointers |
---|
479 | if( to_buffer ) |
---|
480 | { |
---|
481 | dst_ptr = buffer_ptr + done; |
---|
482 | src_ptr = base_ptr + page_offset; |
---|
483 | } |
---|
484 | else |
---|
485 | { |
---|
486 | src_ptr = buffer_ptr + done; |
---|
487 | dst_ptr = base_ptr + page_offset; |
---|
488 | |
---|
489 | ppm_page_do_dirty( page ); |
---|
490 | } |
---|
491 | |
---|
492 | // move fragment |
---|
493 | hal_remote_memcpy( XPTR( dst_cxy , dst_ptr ), XPTR( src_cxy , src_ptr ), page_count ); |
---|
494 | |
---|
495 | done += page_count; |
---|
496 | } |
---|
497 | |
---|
498 | #if DEBUG_MAPPER_MOVE_KERNEL |
---|
499 | cycle = (uint32_t)hal_get_cycles(); |
---|
500 | if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) |
---|
501 | printk("\n[DBG] %s : thread %x exit / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n", |
---|
502 | __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle ); |
---|
503 | #endif |
---|
504 | |
---|
505 | return 0; |
---|
506 | |
---|
507 | } // end mapper_move_kernel() |
---|
508 | |
---|