[1] | 1 | /* |
---|
| 2 | * page.c - physical page related operations implementation |
---|
| 3 | * |
---|
| 4 | * Authors Ghassan Almaless (2008,2009,2010,2011,2012) |
---|
[23] | 5 | * Alain Greiner (2016,2017) |
---|
[1] | 6 | * |
---|
| 7 | * Copyright (c) UPMC Sorbonne Universites |
---|
| 8 | * |
---|
| 9 | * This file is part of ALMOS-MKH. |
---|
| 10 | * |
---|
| 11 | * ALMOS-MKH is free software; you can redistribute it and/or modify it |
---|
| 12 | * under the terms of the GNU General Public License as published by |
---|
| 13 | * the Free Software Foundation; version 2.0 of the License. |
---|
| 14 | * |
---|
| 15 | * ALMOS-MKH is distributed in the hope that it will be useful, but |
---|
| 16 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
| 18 | * General Public License for more details. |
---|
| 19 | * |
---|
| 20 | * You should have received a copy of the GNU General Public License |
---|
| 21 | * along with ALMOS-MKH; if not, write to the Free Software Foundation, |
---|
| 22 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
| 23 | */ |
---|
| 24 | |
---|
| 25 | #include <hal_types.h> |
---|
| 26 | #include <hal_special.h> |
---|
| 27 | #include <hal_atomic.h> |
---|
| 28 | #include <list.h> |
---|
| 29 | #include <xlist.h> |
---|
| 30 | #include <memcpy.h> |
---|
| 31 | #include <thread.h> |
---|
| 32 | #include <scheduler.h> |
---|
| 33 | #include <cluster.h> |
---|
| 34 | #include <ppm.h> |
---|
| 35 | #include <mapper.h> |
---|
| 36 | #include <printk.h> |
---|
| 37 | #include <vfs.h> |
---|
| 38 | #include <process.h> |
---|
| 39 | #include <page.h> |
---|
| 40 | |
---|
| 41 | //////////////////////////////////////// |
---|
| 42 | inline void page_init( page_t * page ) |
---|
| 43 | { |
---|
| 44 | page->flags = 0; |
---|
| 45 | page->order = 0; |
---|
[23] | 46 | page->mapper = NULL; |
---|
[1] | 47 | page->index = 0; |
---|
[22] | 48 | page->refcount = 0; |
---|
[433] | 49 | page->forks = 0; |
---|
[23] | 50 | |
---|
[1] | 51 | spinlock_init( &page->lock ); |
---|
| 52 | list_entry_init( &page->list ); |
---|
[23] | 53 | xlist_root_init( XPTR( local_cxy , &page->wait_root ) ); |
---|
[1] | 54 | } |
---|
| 55 | |
---|
| 56 | //////////////////////////////////////////// |
---|
| 57 | inline void page_set_flag( page_t * page, |
---|
[23] | 58 | uint32_t value ) |
---|
[1] | 59 | { |
---|
[22] | 60 | hal_atomic_or( (uint32_t *)&page->flags , (uint32_t)value ); |
---|
[1] | 61 | } |
---|
| 62 | |
---|
| 63 | ////////////////////////////////////////////// |
---|
| 64 | inline void page_clear_flag( page_t * page, |
---|
[23] | 65 | uint32_t value ) |
---|
[1] | 66 | { |
---|
[22] | 67 | hal_atomic_and( (uint32_t *)&page->flags , ~((uint32_t)value) ); |
---|
[1] | 68 | } |
---|
| 69 | |
---|
| 70 | ////////////////////////////////////////////// |
---|
| 71 | inline bool_t page_is_flag( page_t * page, |
---|
[23] | 72 | uint32_t value ) |
---|
[1] | 73 | { |
---|
[23] | 74 | return ( (page->flags & value) ? 1 : 0 ); |
---|
[1] | 75 | } |
---|
| 76 | |
---|
| 77 | ////////////////////////////////////// |
---|
| 78 | bool_t page_do_dirty( page_t * page ) |
---|
| 79 | { |
---|
| 80 | bool_t done = false; |
---|
| 81 | |
---|
[22] | 82 | ppm_t * ppm = &LOCAL_CLUSTER->ppm; |
---|
[1] | 83 | |
---|
[22] | 84 | // lock the PPM dirty_list |
---|
[1] | 85 | spinlock_lock( &ppm->dirty_lock ); |
---|
| 86 | |
---|
| 87 | if( !page_is_flag( page , PG_DIRTY ) ) |
---|
| 88 | { |
---|
[22] | 89 | // set dirty flag in page descriptor |
---|
[1] | 90 | page_set_flag( page , PG_DIRTY ); |
---|
[18] | 91 | |
---|
[22] | 92 | // register page in PPM dirty list |
---|
[1] | 93 | list_add_first( &ppm->dirty_root , &page->list ); |
---|
| 94 | done = true; |
---|
| 95 | } |
---|
| 96 | |
---|
[22] | 97 | // unlock the PPM dirty_list |
---|
[1] | 98 | spinlock_unlock( &ppm->dirty_lock ); |
---|
[18] | 99 | |
---|
[1] | 100 | return done; |
---|
| 101 | } |
---|
| 102 | |
---|
| 103 | //////////////////////////////////////// |
---|
| 104 | bool_t page_undo_dirty( page_t * page ) |
---|
| 105 | { |
---|
| 106 | bool_t done = false; |
---|
| 107 | |
---|
[22] | 108 | ppm_t * ppm = &LOCAL_CLUSTER->ppm; |
---|
[1] | 109 | |
---|
[22] | 110 | // lock the dirty_list |
---|
[1] | 111 | spinlock_lock( &ppm->dirty_lock ); |
---|
| 112 | |
---|
| 113 | if( page_is_flag( page , PG_DIRTY) ) |
---|
| 114 | { |
---|
[22] | 115 | // clear dirty flag in page descriptor |
---|
[1] | 116 | page_clear_flag( page , PG_DIRTY ); |
---|
| 117 | |
---|
[22] | 118 | // remove page from PPM dirty list |
---|
[1] | 119 | list_unlink( &page->list ); |
---|
| 120 | done = true; |
---|
| 121 | } |
---|
| 122 | |
---|
[22] | 123 | // unlock the dirty_list |
---|
[1] | 124 | spinlock_unlock( &ppm->dirty_lock ); |
---|
| 125 | |
---|
| 126 | return done; |
---|
| 127 | } |
---|
| 128 | |
---|
| 129 | ///////////////////// |
---|
[18] | 130 | void sync_all_pages() |
---|
[1] | 131 | { |
---|
| 132 | page_t * page; |
---|
[22] | 133 | ppm_t * ppm = &LOCAL_CLUSTER->ppm; |
---|
[1] | 134 | |
---|
[22] | 135 | // lock the dirty_list |
---|
[1] | 136 | spinlock_lock( &ppm->dirty_lock ); |
---|
| 137 | |
---|
[18] | 138 | while( !list_is_empty( &ppm->dirty_root ) ) |
---|
[1] | 139 | { |
---|
| 140 | page = LIST_FIRST( &ppm->dirty_root , page_t , list ); |
---|
| 141 | |
---|
[22] | 142 | // unlock the dirty_list |
---|
| 143 | spinlock_unlock( &ppm->dirty_lock ); |
---|
[1] | 144 | |
---|
[22] | 145 | // lock the page |
---|
[1] | 146 | page_lock( page ); |
---|
| 147 | |
---|
[22] | 148 | // sync the page |
---|
[238] | 149 | vfs_mapper_move_page( page , false ); // from mapper |
---|
[1] | 150 | |
---|
[22] | 151 | // unlock the page |
---|
[1] | 152 | page_unlock( page ); |
---|
| 153 | |
---|
[22] | 154 | // lock the dirty_list |
---|
| 155 | spinlock_lock( &ppm->dirty_lock ); |
---|
[1] | 156 | } |
---|
| 157 | |
---|
[22] | 158 | // unlock the dirty_list |
---|
[1] | 159 | spinlock_unlock( &ppm->dirty_lock ); |
---|
| 160 | |
---|
[22] | 161 | } |
---|
[1] | 162 | |
---|
| 163 | /////////////////////////////// |
---|
| 164 | void page_lock( page_t * page ) |
---|
| 165 | { |
---|
[22] | 166 | // take the spinlock protecting the PG_LOCKED flag |
---|
[1] | 167 | spinlock_lock( &page->lock ); |
---|
| 168 | |
---|
| 169 | if( page_is_flag( page , PG_LOCKED ) ) // page is already locked |
---|
| 170 | { |
---|
[22] | 171 | // get pointer on calling thread |
---|
| 172 | thread_t * thread = CURRENT_THREAD; |
---|
[1] | 173 | |
---|
[22] | 174 | // register thread in the page waiting queue |
---|
| 175 | xlist_add_last( XPTR( local_cxy , &page->wait_root ), |
---|
| 176 | XPTR( local_cxy , &thread->wait_list ) ); |
---|
[1] | 177 | |
---|
[22] | 178 | // release the spinlock |
---|
[1] | 179 | spinlock_unlock( &page->lock ); |
---|
| 180 | |
---|
[22] | 181 | // deschedule the calling thread |
---|
[436] | 182 | thread_block( XPTR( local_cxy , thread ) , THREAD_BLOCKED_PAGE ); |
---|
[408] | 183 | sched_yield("cannot lock a page"); |
---|
[1] | 184 | } |
---|
| 185 | else // page is not locked |
---|
| 186 | { |
---|
[22] | 187 | // set the PG_LOCKED flag |
---|
[1] | 188 | page_set_flag( page , PG_LOCKED ); |
---|
| 189 | |
---|
[22] | 190 | // release the spinlock |
---|
[1] | 191 | spinlock_unlock( &page->lock ); |
---|
| 192 | } |
---|
| 193 | } |
---|
| 194 | |
---|
| 195 | ///////////////////////////////// |
---|
| 196 | void page_unlock( page_t * page ) |
---|
| 197 | { |
---|
[22] | 198 | // take the spinlock protecting the PG_LOCKED flag |
---|
[1] | 199 | spinlock_lock( &page->lock ); |
---|
[18] | 200 | |
---|
[22] | 201 | // check the page waiting list |
---|
[1] | 202 | bool_t is_empty = xlist_is_empty( XPTR( local_cxy , &page->wait_root ) ); |
---|
| 203 | |
---|
| 204 | if( is_empty == false ) // at least one waiting thread => resume it |
---|
[22] | 205 | { |
---|
| 206 | // get an extended pointer on the first waiting thread |
---|
| 207 | xptr_t root_xp = XPTR( local_cxy , &page->wait_root ); |
---|
| 208 | xptr_t thread_xp = XLIST_FIRST_ELEMENT( root_xp , thread_t , wait_list ); |
---|
[1] | 209 | |
---|
[22] | 210 | // reactivate the first waiting thread |
---|
| 211 | thread_unblock( thread_xp , THREAD_BLOCKED_PAGE ); |
---|
| 212 | } |
---|
[1] | 213 | else // no waiting thread => clear the PG_LOCKED flag |
---|
| 214 | { |
---|
[22] | 215 | page_clear_flag( page , PG_LOCKED ); |
---|
| 216 | } |
---|
[1] | 217 | |
---|
[22] | 218 | // release the spinlock |
---|
[1] | 219 | spinlock_unlock( &page->lock ); |
---|
| 220 | } |
---|
| 221 | |
---|
| 222 | //////////////////////////////////////////// |
---|
| 223 | inline void page_refcount_up( page_t *page ) |
---|
| 224 | { |
---|
[23] | 225 | hal_atomic_add( &page->refcount , +1 ); |
---|
[1] | 226 | } |
---|
| 227 | |
---|
| 228 | ////////////////////////////////////////////// |
---|
| 229 | inline void page_refcount_down( page_t *page ) |
---|
| 230 | { |
---|
[23] | 231 | hal_atomic_add( &page->refcount , -1 ); |
---|
[1] | 232 | } |
---|
| 233 | |
---|
| 234 | /////////////////////////////// |
---|
| 235 | void page_zero( page_t * page ) |
---|
| 236 | { |
---|
[315] | 237 | uint32_t size = (1 << page->order) * CONFIG_PPM_PAGE_SIZE; |
---|
[1] | 238 | |
---|
[315] | 239 | xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ) ); |
---|
[1] | 240 | |
---|
[315] | 241 | memset( GET_PTR( base_xp ) , 0 , size ); |
---|
[1] | 242 | } |
---|
| 243 | |
---|
| 244 | //////////////////////////////// |
---|
| 245 | void page_print( page_t * page ) |
---|
| 246 | { |
---|
| 247 | printk("*** Page %d : base = %x / flags = %x / order = %d / count = %d\n", |
---|
[18] | 248 | page->index, |
---|
[315] | 249 | GET_PTR( ppm_page2base( XPTR( local_cxy , page ) ) ), |
---|
[18] | 250 | page->flags, |
---|
| 251 | page->order, |
---|
[1] | 252 | page->refcount ); |
---|
| 253 | } |
---|
| 254 | |
---|