Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
2 | pj | 1 | /* |
2 | * Project: HARTIK (HA-rd R-eal TI-me K-ernel) |
||
3 | * |
||
4 | * Coordinators: Giorgio Buttazzo <giorgio@sssup.it> |
||
5 | * Gerardo Lamastra <gerardo@sssup.it> |
||
6 | * |
||
7 | * Authors : Massimiliano Giorgi <massy@hartik.sssup.it> |
||
8 | * (see authors.txt for full list of hartik's authors) |
||
9 | * |
||
10 | * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy) |
||
11 | * |
||
12 | * http://www.sssup.it |
||
13 | * http://retis.sssup.it |
||
14 | * http://hartik.sssup.it |
||
15 | */ |
||
16 | |||
17 | /* |
||
18 | * Copyright (C) 1999 Massimiliano Giorgi |
||
19 | * |
||
20 | * This program is free software; you can redistribute it and/or modify |
||
21 | * it under the terms of the GNU General Public License as published by |
||
22 | * the Free Software Foundation; either version 2 of the License, or |
||
23 | * (at your option) any later version. |
||
24 | * |
||
25 | * This program is distributed in the hope that it will be useful, |
||
26 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
||
27 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||
28 | * GNU General Public License for more details. |
||
29 | * |
||
30 | * You should have received a copy of the GNU General Public License |
||
31 | * along with this program; if not, write to the Free Software |
||
32 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
||
33 | * |
||
34 | */ |
||
35 | |||
36 | /* |
||
37 | * CVS : $Id: dcache.c,v 1.1.1.1 2002-03-29 14:12:50 pj Exp $ |
||
38 | * |
||
39 | * File: $File$ |
||
40 | * Revision: $Revision: 1.1.1.1 $ |
||
41 | * Last update: $Date: 2002-03-29 14:12:50 $ |
||
42 | */ |
||
43 | |||
44 | #include <fs/util.h> |
||
45 | #include <fs/assert.h> |
||
46 | #include <fs/bdev.h> |
||
47 | #include <fs/magic.h> |
||
48 | #include <fs/major.h> |
||
49 | #include <fs/sysmacro.h> |
||
50 | #include "dcache.h" |
||
51 | #include "mutex.h" |
||
52 | #include "semaph.h" |
||
53 | #include "rwlock.h" |
||
54 | |||
55 | #include "debug.h" |
||
56 | |||
57 | /* |
||
58 | * To do: |
||
59 | * - separate the "buffer" field of a cache entry by the entry itself |
||
60 | * allowing a better memory management and a variable cache block size |
||
61 | * without wasting space. |
||
62 | * - improve the purging algorithm (the algorithm that remove entries from |
||
63 | * the cache). |
||
64 | * - implement a server to mantains an x% of cache buffers free and to write |
||
65 | * dirty cache entries to disk during idle periods. |
||
66 | */ |
||
67 | |||
68 | /* |
||
69 | * How the cache is implemented: |
||
70 | * copy-back algorithm or write-throught |
||
71 | * (define only one of this) |
||
72 | */ |
||
73 | |||
74 | #define COPYBACK |
||
75 | //#define WRITETHROUGHT |
||
76 | |||
77 | /* |
||
78 | * if a write throught policy is chosen for the cache if this |
||
79 | * is defined the 'skipwt' bits into the dcache_t structure is honoured |
||
80 | * (ie. if 'skipwt' is set the cache entry use a copyback policy and |
||
81 | * not a write throught policy). |
||
82 | */ |
||
83 | |||
84 | //#define HONOURSKIPWTFLAG |
||
85 | |||
86 | /* |
||
87 | * Data structures: |
||
88 | * |
||
89 | * We have two table: |
||
90 | * "dcache" contains cache entries |
||
91 | * "dhash" contains the index of the first entry of the hash list. |
||
92 | * Every cache entries has two fields used to concatenates the entries |
||
93 | * into lists. |
||
94 | * There is the list of free entries: "freeindex" is an index into "dcache" |
||
95 | * of the first free entries, every entry has a "next" field that points |
||
96 | * to the next free entry. |
||
97 | * Every entry into "dhash" is a pointer to the first cache entry that is |
||
98 | * into a hash bucket; every hash bucket is organized like a double linked |
||
99 | * list using the "next" and "prev" fields of the cache entry. |
||
100 | * |
||
101 | * DANGER: |
||
102 | * - to prevent dead-lock locking of "freemutex" MUST be done always |
||
103 | * after a lock of "hashmutex". |
||
104 | * - the functions that begins with "__????" do not lock/unlock |
||
105 | * the mutex. |
||
106 | */ |
||
107 | |||
108 | /*+ number of hash buckets +*/ |
||
109 | #define MAXHASH 64 |
||
110 | /*+ and-mask to obtain a number between 0 and MAXHASH-1 +*/ |
||
111 | #define MAXHASHMASK 0x3f |
||
112 | |||
113 | /*+ hash buckets +*/ |
||
114 | static int dhash[MAXHASH]; |
||
115 | /*+ mutex to operate on "dhash" +*/ |
||
116 | static __fs_mutex_t hashmutex; |
||
117 | |||
118 | /*+ number of cache entries +*/ |
||
119 | #define MAXCACHESIZE (8192*4) |
||
120 | /*+ cache entries table +*/ |
||
121 | static dcache_t dcache[MAXCACHESIZE]; |
||
122 | |||
123 | /*+ mutex to operate with the "free list" +*/ |
||
124 | static __fs_mutex_t freemutex; |
||
125 | /*+ index of first free cache entry +*/ |
||
126 | static int freeindex=0; |
||
127 | |||
128 | static __dev_t blocked_device=makedev(MAJOR_B_NULL,0); |
||
129 | |||
130 | /*++++++++++++++++++++++++++++++++++++++ |
||
131 | |||
132 | Hash function. |
||
133 | |||
134 | int hash_fun |
||
135 | return a number between 0 and MAXHASH-1. |
||
136 | |||
137 | __dev_t ldisk |
||
138 | block device. |
||
139 | |||
140 | __blkcnt_t lsector |
||
141 | block number. |
||
142 | ++++++++++++++++++++++++++++++++++++++*/ |
||
143 | |||
144 | static int hash_fun(__dev_t dev, __blkcnt_t lsector) |
||
145 | { |
||
146 | static __uint32_t table[8]={3,5,7,11,13,17,19,23}; |
||
147 | return (table[dev&0x07]*(lsector+1))&MAXHASHMASK; |
||
148 | } |
||
149 | |||
150 | /* |
||
151 | * |
||
152 | * INITIALIZATION/TERMINATION FUNCTIONS |
||
153 | * |
||
154 | */ |
||
155 | |||
156 | /*++++++++++++++++++++++++++++++++++++++ |
||
157 | |||
158 | Dump into the system logs the cache status. |
||
159 | |||
160 | ++++++++++++++++++++++++++++++++++++++*/ |
||
161 | |||
162 | #include <drivers/keyb.h> |
||
163 | |||
164 | void dcache_stats(void) |
||
165 | { |
||
166 | int i,c,d,l,j,r,lc; |
||
167 | long x,f; |
||
168 | |||
169 | /* acquire all mutex */ |
||
170 | __fs_mutex_lock(&hashmutex); |
||
171 | __fs_mutex_lock(&freemutex); |
||
172 | |||
173 | /* count free entries */ |
||
174 | c=0; |
||
175 | i=freeindex; |
||
176 | while (i!=NIL) { |
||
177 | c++; |
||
178 | i=dcache[i].next; |
||
179 | } |
||
180 | |||
181 | /* dump informations */ |
||
182 | printk(KERN_INFO "max dcache entries: %4i",MAXCACHESIZE); |
||
183 | printk(KERN_INFO " free entries: %4i",c); |
||
184 | |||
185 | /* count used entries */ |
||
186 | for (i=0,c=0,d=0,l=0,r=0;i<MAXHASH;i++) { |
||
187 | j=dhash[i]; |
||
188 | while (j!=NIL) { |
||
189 | c++; |
||
190 | if (dcache[j].used!=0) l++; |
||
191 | if (dcache[j].dirty) d++; |
||
192 | if (!dcache[j].ready) r++; |
||
193 | j=dcache[j].next; |
||
194 | } |
||
195 | } |
||
196 | |||
197 | /* dump informations */ |
||
198 | printk(KERN_INFO " cached entries: %4i",c); |
||
199 | printk(KERN_INFO " used entries: %4i",l); |
||
200 | printk(KERN_INFO " dirty entries: %4i",d); |
||
201 | printk(KERN_INFO " not ready entries: %4i",r); |
||
202 | |||
203 | /* compute hash function quality factor */ |
||
204 | for (i=0,x=0;i<MAXHASH;i++) { |
||
205 | j=dhash[i]; |
||
206 | lc=0; |
||
207 | while (j!=NIL) { |
||
208 | lc++; |
||
209 | j=dcache[j].next; |
||
210 | } |
||
211 | f=((long)lc*MAXHASH-c); |
||
212 | x+=f*f/(MAXHASH*2); |
||
213 | } |
||
214 | |||
215 | /* dump informations */ |
||
216 | printk(KERN_INFO "hash quality: %6li",x); |
||
217 | |||
218 | /* release all mutex */ |
||
219 | __fs_mutex_unlock(&freemutex); |
||
220 | __fs_mutex_unlock(&hashmutex); |
||
221 | } |
||
222 | |||
223 | /*++++++++++++++++++++++++++++++++++++++ |
||
224 | |||
225 | Module initialization; must be called prior to other functions. |
||
226 | |||
227 | int dcache_init |
||
228 | return 0 on success, other values on failure. |
||
229 | ++++++++++++++++++++++++++++++++++++++*/ |
||
230 | |||
231 | int dcache_init(void) |
||
232 | { |
||
233 | int i; |
||
234 | |||
235 | /* mutex initialization */ |
||
236 | __fs_mutex_init(&freemutex); |
||
237 | __fs_mutex_init(&hashmutex); |
||
238 | |||
239 | /* free list and cache table initialization */ |
||
240 | freeindex=0; |
||
241 | for (i=0;i<MAXCACHESIZE;i++) { |
||
242 | magic_set(dcache[i].magic,DCACHE_MAGIC); |
||
243 | dcache[i].next=i+1; |
||
244 | dcache[i].prev=NIL; |
||
245 | dcache[i].device=NULLDEV; |
||
246 | dcache[i].lsector=0; |
||
247 | dcache[i].used=-1; |
||
248 | dcache[i].dirty=0; |
||
249 | dcache[i].ready=0; |
||
250 | dcache[i].numblocked=0; |
||
251 | dcache[i].esyncreq=0; |
||
252 | dcache[i].skipwt=0; |
||
253 | __fs_sem_init(&dcache[i].sync,0); |
||
254 | __fs_sem_init(&dcache[i].esync,0); |
||
255 | __rwlock_init(&dcache[i].rwlock); |
||
256 | } |
||
257 | dcache[MAXCACHESIZE-1].next=NIL; |
||
258 | |||
259 | /* hash buckets initialization */ |
||
260 | for (i=0;i<MAXHASH;i++) |
||
261 | dhash[i]=NIL; |
||
262 | |||
263 | return 0; |
||
264 | } |
||
265 | |||
266 | /*++++++++++++++++++++++++++++++++++++++ |
||
267 | |||
268 | Terminate the cache flushing all dirty blocks and prevent |
||
269 | other request to be made to the cache module; the only function |
||
270 | that can be called after this is __dcache_flush(). |
||
271 | |||
272 | int dcache_end |
||
273 | return 0 on success other values on failure |
||
274 | |||
275 | int flag |
||
276 | if set aquire exclusive use of all cache entries |
||
277 | before ending |
||
278 | ++++++++++++++++++++++++++++++++++++++*/ |
||
279 | |||
280 | int dcache_end(int flag) |
||
281 | { |
||
282 | int __dcache_flush(int flag); |
||
283 | |||
284 | /* if a lock on all cache entries is required... */ |
||
285 | if (flag) { |
||
286 | int i,k; |
||
287 | __fs_mutex_lock(&hashmutex); |
||
288 | |||
289 | /* lock every request of other cache entries*/ |
||
290 | __fs_mutex_lock(&freemutex); |
||
291 | |||
292 | /* scan all hash bucket locking all cache entries */ |
||
293 | for (k=0;k<MAXHASH;k++) { |
||
294 | i=dhash[k]; |
||
295 | while (i!=NIL) { |
||
296 | /* so cache entry can not be removed! */ |
||
297 | dcache[i].used++; |
||
298 | __fs_mutex_unlock(&hashmutex); |
||
299 | |||
300 | /* exclusive lock */ |
||
301 | __rwlock_wrlock(&dcache[i].rwlock); |
||
302 | |||
303 | __fs_mutex_lock(&hashmutex); |
||
304 | dcache[i].used=1; |
||
305 | i=dcache[i].next; |
||
306 | } |
||
307 | } |
||
308 | __fs_mutex_unlock(&hashmutex); |
||
309 | } |
||
310 | |||
311 | /* this mutex is not released to prevent other modules use of this module */ |
||
312 | __fs_mutex_lock(&hashmutex); |
||
313 | /* flush the cache */ |
||
314 | return __dcache_flush(flag); |
||
315 | } |
||
316 | |||
317 | /* |
||
318 | * |
||
319 | * GET/FREE cache entries FUNCTIONS |
||
320 | * |
||
321 | */ |
||
322 | |||
323 | |||
324 | /*++++++++++++++++++++++++++++++++++++++ |
||
325 | |||
326 | Get a cache entry from the free list. |
||
327 | |||
328 | int get_dcache |
||
329 | return NIL on failure, an index into "dcache" on success. |
||
330 | ++++++++++++++++++++++++++++++++++++++*/ |
||
331 | |||
332 | static int get_dcache(void) |
||
333 | { |
||
334 | int ret; |
||
335 | |||
336 | /* remove the head of a list (if present) */ |
||
337 | |||
338 | __fs_mutex_lock(&freemutex); |
||
339 | ret=freeindex; |
||
340 | if (ret!=NIL) { |
||
341 | freeindex=dcache[ret].next; |
||
342 | _assert(dcache[ret].used==-1); |
||
343 | dcache[ret].used=0; |
||
344 | dcache[ret].skipwt=0; |
||
345 | magic_assert(dcache[ret].magic,DCACHE_MAGIC, |
||
346 | "dcache: get_dcache(): cache_entry[%i] overwritten", |
||
347 | ret); |
||
348 | } |
||
349 | __fs_mutex_unlock(&freemutex); |
||
350 | |||
351 | return ret; |
||
352 | } |
||
353 | |||
354 | |||
355 | /*++++++++++++++++++++++++++++++++++++++ |
||
356 | |||
357 | Insert a free cache entry into the free list. |
||
358 | |||
359 | int index |
||
360 | Index of the "dcache" entry to free. |
||
361 | ++++++++++++++++++++++++++++++++++++++*/ |
||
362 | |||
363 | static void free_dcache(int index) |
||
364 | { |
||
365 | /* insert an element into a list */ |
||
366 | |||
367 | _assert(index>=0&&index<MAXCACHESIZE); |
||
368 | _assert(dcache[index].used==0); |
||
369 | |||
370 | dcache[index].used=-1; |
||
371 | |||
372 | magic_assert(dcache[index].magic,DCACHE_MAGIC, |
||
373 | "dcache: free_dcache(): cache_entry[%i] overwritten", |
||
374 | index); |
||
375 | |||
376 | __fs_mutex_lock(&freemutex); |
||
377 | dcache[index].next=freeindex; |
||
378 | freeindex=index; |
||
379 | __fs_mutex_unlock(&freemutex); |
||
380 | } |
||
381 | |||
382 | /* |
||
383 | * |
||
384 | * HASH BUCKETS MANAGEMENT FUNCTIONS |
||
385 | * |
||
386 | */ |
||
387 | |||
388 | /*++++++++++++++++++++++++++++++++++++++ |
||
389 | |||
390 | Insert a cache entry into an hash bucket. |
||
391 | |||
392 | int index |
||
393 | Entry to insert. |
||
394 | ++++++++++++++++++++++++++++++++++++++*/ |
||
395 | |||
396 | static void __insert_dcache(int index) |
||
397 | { |
||
398 | int h; |
||
399 | |||
400 | /* insert an element into a double linked list */ |
||
401 | |||
402 | _assert(index>=0&&index<MAXCACHESIZE); |
||
403 | _assert(dcache[index].used==1); |
||
404 | |||
405 | magic_assert(dcache[index].magic,DCACHE_MAGIC, |
||
406 | "dcache: insert_dcache(): cache_entry[%i] overwritten", |
||
407 | index); |
||
408 | |||
409 | h=hash_fun(dcache[index].device,dcache[index].lsector); |
||
410 | |||
411 | dcache[index].prev=NIL; |
||
412 | dcache[index].hash=h; |
||
413 | |||
414 | dcache[index].next=dhash[h]; |
||
415 | |||
416 | if (dhash[h]!=NIL) dcache[dhash[h]].prev=index; |
||
417 | |||
418 | dhash[h]=index; |
||
419 | } |
||
420 | |||
421 | |||
422 | /*++++++++++++++++++++++++++++++++++++++ |
||
423 | |||
424 | Remove a cache entry from an hash bucket. |
||
425 | |||
426 | int index |
||
427 | Entry to remove. |
||
428 | ++++++++++++++++++++++++++++++++++++++*/ |
||
429 | |||
430 | static void __remove_dcache(int index) |
||
431 | { |
||
432 | /* remove an element from a double linked list */ |
||
433 | |||
434 | _assert(index>=0&&index<MAXCACHESIZE); |
||
435 | _assert(dcache[index].used==0); |
||
436 | |||
437 | magic_assert(dcache[index].magic,DCACHE_MAGIC, |
||
438 | "dcache: remove_dcache(): cache_entry[%i] overwritten", |
||
439 | index); |
||
440 | |||
441 | if (dcache[index].prev!=NIL) |
||
442 | dcache[dcache[index].prev].next=dcache[index].next; |
||
443 | else { |
||
444 | dhash[dcache[index].hash]=dcache[index].next; |
||
445 | } |
||
446 | if (dcache[index].next!=NIL) |
||
447 | dcache[dcache[index].next].prev=dcache[index].prev; |
||
448 | } |
||
449 | |||
450 | /*++++++++++++++++++++++++++++++++++++++ |
||
451 | |||
452 | Find an entry into an hash bucket to free. |
||
453 | |||
454 | int __purge_dcache |
||
455 | NIL on failure, an index of a free cache entry on success |
||
456 | ++++++++++++++++++++++++++++++++++++++*/ |
||
457 | |||
458 | static int __purge_dcache(void) |
||
459 | { |
||
460 | int i,k; |
||
461 | int flag; |
||
462 | int ind; |
||
463 | int res; |
||
464 | __time_t t; |
||
465 | |||
466 | /* TO IMPROVE!!! */ |
||
467 | |||
468 | /* algoritmo brutale ed inefficiente per rimuovere un'entry |
||
469 | * in cache: |
||
470 | * -- si scorrono sempre (sigh) tutte le entry |
||
471 | * -- viene trovata la entry non in uso, non sporca, piu' vecchia |
||
472 | * -- altrimenti quella non in uso, piu' vecchia |
||
473 | * -- altrimenti NIL (non rimuove niente) |
||
474 | */ |
||
475 | |||
476 | /* |
||
477 | * search an entry to purge |
||
478 | */ |
||
479 | |||
480 | flag=1; |
||
481 | ind=NIL; |
||
482 | t=0xffffffff; |
||
483 | |||
484 | for (k=0;k<MAXHASH;k++) { |
||
485 | i=dhash[k]; |
||
486 | while (i!=NIL) { |
||
487 | if (dcache[i].used==0) { |
||
488 | if (dcache[i].dirty==0) { |
||
489 | flag=0; |
||
490 | if (dcache[i].time<t||(ind!=NIL&&dcache[ind].dirty==1)) { |
||
491 | ind=i; |
||
492 | t=dcache[i].time; |
||
493 | } |
||
494 | } else if (flag) { |
||
495 | if (dcache[i].time<t) { |
||
496 | ind=i; |
||
497 | t=dcache[i].time; |
||
498 | } |
||
499 | } |
||
500 | } |
||
501 | i=dcache[i].next; |
||
502 | } |
||
503 | } |
||
504 | |||
505 | /* |
||
506 | * purging the entry |
||
507 | */ |
||
508 | |||
509 | /* if an entry is found... */ |
||
510 | if (ind!=NIL) { |
||
511 | /* if it is dirty... */ |
||
512 | if (dcache[ind].dirty) { |
||
513 | |||
514 | /* the entry is dirty... try to write to disk!*/ |
||
515 | |||
516 | /* update reference count */ |
||
517 | dcache[ind].used++; |
||
518 | /* set not ready flags */ |
||
519 | dcache[ind].ready=0; |
||
520 | /* safety stuff */ |
||
521 | _assert(dcache[ind].used==1); |
||
522 | /* release mutex */ |
||
523 | __fs_mutex_unlock(&hashmutex); |
||
524 | |||
525 | /* try to write... */ |
||
526 | /* there is no need to acquire the entry for writing... it is not used */ |
||
527 | res=bdev_write(dcache[ind].device,dcache[ind].lsector, |
||
528 | dcache[ind].buffer); |
||
529 | |||
530 | /* aquire the mutex */ |
||
531 | __fs_mutex_lock(&hashmutex); |
||
532 | |||
533 | /* if error while writing... */ |
||
534 | if (res!=0) { |
||
535 | /* restore old fields value */ |
||
536 | dcache[ind].used--; |
||
537 | dcache[ind].ready=1; |
||
538 | } |
||
539 | |||
540 | /* set flag if some thread has been blocked waiting for "ready" field */ |
||
541 | flag=(dcache[ind].numblocked>0); |
||
542 | |||
543 | /* wake up all thread waiting for synchronization */ |
||
544 | while (dcache[ind].numblocked>0) { |
||
545 | __fs_sem_signal(&dcache[ind].sync); |
||
546 | dcache[ind].numblocked--; |
||
547 | } |
||
548 | |||
549 | /* if error while writing... return NIL*/ |
||
550 | if (res!=0) return NIL; |
||
551 | |||
552 | /* to be ensure that no other treads can be blocked on this entry */ |
||
553 | dcache[ind].device=NULLDEV; |
||
554 | |||
555 | /* if some thread have been waked up... */ |
||
556 | if (flag) { |
||
557 | /* we must wait that all waked up thread had made the error test... */ |
||
558 | /* request error synchronization */ |
||
559 | dcache[ind].esyncreq=1; |
||
560 | /* release mutex */ |
||
561 | __fs_mutex_unlock(&hashmutex); |
||
562 | |||
563 | /* wait for synchronization */ |
||
564 | __fs_sem_wait(&dcache[ind].esync); |
||
565 | |||
566 | /* reacquire mutex */ |
||
567 | __fs_mutex_lock(&hashmutex); |
||
568 | /* safety stuff */ |
||
569 | /*_assert(dcache[ind].used==1);*/ |
||
570 | } |
||
571 | |||
572 | /* restore old value */ |
||
573 | dcache[ind].used--; |
||
574 | /* safety stuff */ |
||
575 | _assert(dcache[ind].used==0); |
||
576 | } |
||
577 | |||
578 | /* remove the entry from the hasb bucket*/ |
||
579 | __remove_dcache(ind); |
||
580 | } |
||
581 | |||
582 | return ind; |
||
583 | } |
||
584 | |||
585 | /*++++++++++++++++++++++++++++++++++++++ |
||
586 | |||
587 | Called when a cache entry has been modificated (AFTER has been |
||
588 | modified not before). |
||
589 | |||
590 | WARNING: this function CAN be called only by the task what has |
||
591 | aquired (for exclusive use) the cache entry. |
||
592 | |||
593 | void dcache_dirty |
||
594 | nothing to return |
||
595 | |||
596 | dcache_t *d |
||
597 | entry that must be marked dirty |
||
598 | ++++++++++++++++++++++++++++++++++++++*/ |
||
599 | |||
600 | #ifdef COPYBACK |
||
601 | |||
602 | void dcache_dirty(dcache_t *d) |
||
603 | { |
||
604 | /* safety stuff */ |
||
605 | _assert(d->used==1); |
||
606 | /* we use copy back so only set the flag */ |
||
607 | d->dirty=1; |
||
608 | } |
||
609 | |||
610 | /* used internally when we want flush a cache entry during |
||
611 | * generic unlock functions |
||
612 | */ |
||
613 | |||
614 | static __inline__ void __flush_cache(dcache_t *d) |
||
615 | { |
||
616 | /* nothing to do */ |
||
617 | return; |
||
618 | } |
||
619 | |||
620 | |||
621 | #endif |
||
622 | |||
623 | #ifdef WRITETHROUGHT |
||
624 | |||
625 | void dcache_dirty(dcache_t *d) |
||
626 | { |
||
627 | /* safety stuff */ |
||
628 | _assert(d->used==1); |
||
629 | /* we only set the flag */ |
||
630 | d->dirty=1; |
||
631 | } |
||
632 | |||
633 | /* used internally when we want flush a cache entry during |
||
634 | * generic unlock functions |
||
635 | */ |
||
636 | static __inline__ void __flush_cache(dcache_t *d) |
||
637 | { |
||
638 | int res; |
||
639 | |||
640 | #ifdef HONOURSKIPWTFLAG |
||
641 | if (d->skipwt) return; |
||
642 | #endif |
||
643 | |||
644 | if (d->dirty) { |
||
645 | /* safety stuff */ |
||
646 | _assert(d->used==1); |
||
647 | _assert(d-dcache>=0&&d-dcache<MAXCACHESIZE); |
||
648 | |||
649 | /* try to write... */ |
||
650 | /* there is no need to modify/lock the entry (see the warning!) */ |
||
651 | res=bdev_write(d->device,d->lsector,d->buffer); |
||
652 | |||
653 | /* if write OK... */ |
||
654 | if (res==0) d->dirty=0; |
||
655 | } |
||
656 | } |
||
657 | |||
658 | #endif |
||
659 | |||
660 | /*++++++++++++++++++++++++++++++++++++++ |
||
661 | |||
662 | Find a free cache entry to use. |
||
663 | |||
664 | int __catch_dcache |
||
665 | NIL on failure, a index of a free cache entry on success. |
||
666 | ++++++++++++++++++++++++++++++++++++++*/ |
||
667 | |||
668 | static __inline__ int __catch_dcache(void) |
||
669 | { |
||
670 | int ind; |
||
671 | |||
672 | ind=get_dcache(); |
||
673 | if (ind==NIL) ind=__purge_dcache(); |
||
674 | return ind; |
||
675 | } |
||
676 | |||
677 | /* |
||
678 | * |
||
679 | * FLUSHING FUNCTIONS |
||
680 | * |
||
681 | */ |
||
682 | |||
683 | /*++++++++++++++++++++++++++++++++++++++ |
||
684 | |||
685 | Flush all cache entries to disk; this function does not acquire the |
||
686 | "hashmutex" mutex so it can be called only during system shut down (with |
||
687 | no runnig thread) or after a call to dcache_end(). |
||
688 | The function dcache_end() call this function before ending. |
||
689 | |||
690 | int __dcache_flush |
||
691 | return 0 on success, other values on failure |
||
692 | |||
693 | inf flag |
||
694 | if set mean that we do have exclusive access to cache memory; if |
||
695 | we have exclusive access the "used" field must be 1 else must be 0. |
||
696 | ++++++++++++++++++++++++++++++++++++++*/ |
||
697 | |||
698 | int __dcache_flush(int flag) |
||
699 | { |
||
700 | int i,k,ret,res; |
||
701 | |||
702 | ret=0; |
||
703 | /* for every hash buckets... */ |
||
704 | for (k=0;k<MAXHASH;k++) { |
||
705 | i=dhash[k]; |
||
706 | /* scan all entries... */ |
||
707 | while (i!=NIL) { |
||
708 | _assert(dcache[i].used==(flag?1:0)); |
||
709 | /* for every dirty entry... */ |
||
710 | if (dcache[i].dirty) { |
||
711 | res=bdev_write(dcache[i].device,dcache[i].lsector,dcache[i].buffer); |
||
712 | if (res==0) |
||
713 | dcache[i].dirty=0; |
||
714 | else |
||
715 | ret=-1; |
||
716 | } |
||
717 | /* go to next entry */ |
||
718 | i=dcache[i].next; |
||
719 | } |
||
720 | } |
||
721 | |||
722 | return ret; |
||
723 | } |
||
724 | |||
725 | |||
726 | /*++++++++++++++++++++++++++++++++++++++ |
||
727 | |||
728 | Purge a device from the cache; remove every reference to the device |
||
729 | from the cache. |
||
730 | |||
731 | int dcache_purgedevice |
||
732 | |||
733 | |||
734 | __dev_t dev |
||
735 | device to purge |
||
736 | |||
737 | int cont |
||
738 | if setted continue on disk write error |
||
739 | ++++++++++++++++++++++++++++++++++++++*/ |
||
740 | |||
741 | int dcache_purgedevice(__dev_t dev, int cont) |
||
742 | { |
||
743 | int retvalue=0; |
||
744 | int i,k,inext; |
||
745 | int res; |
||
746 | |||
747 | printka("dcache_purgedevice() START for device(%08lx)",(long)dev); |
||
748 | |||
749 | __fs_mutex_lock(&hashmutex); |
||
750 | |||
751 | /* prevent other thread to cache this device*/ |
||
752 | blocked_device=dev; |
||
753 | |||
754 | for (k=0;k<MAXHASH;k++) { |
||
755 | i=dhash[k]; |
||
756 | /* scan all entries... */ |
||
757 | while (i!=NIL) { |
||
758 | |||
759 | /* remember next entry */ |
||
760 | inext=dcache[i].next; |
||
761 | |||
762 | if (dcache[i].device==dev) { |
||
763 | /* an entry to purge is found... */ |
||
764 | //printka(" found %li",dcache[i].lsector); |
||
765 | |||
766 | _assert(dcache[i].used!=-1); |
||
767 | while (dcache[i].used!=0) { |
||
768 | /* someone is using this entry... we must wait! */ |
||
769 | |||
770 | dcache[i].used++; |
||
771 | __fs_mutex_unlock(&hashmutex); |
||
772 | |||
773 | __rwlock_wrlock(&dcache[i].rwlock); |
||
774 | __rwlock_wrunlock(&dcache[i].rwlock); |
||
775 | |||
776 | __fs_mutex_lock(&hashmutex); |
||
777 | dcache[i].used--; |
||
778 | |||
779 | /* we must loop because, in general, we do not know how */ |
||
780 | /* the __rwlock queues are managed; the loop must quit because */ |
||
781 | /* we prevent other threads to be inserted into this queues */ |
||
782 | |||
783 | /* we must update 'inext' */ |
||
784 | inext=dcache[i].next; |
||
785 | } |
||
786 | |||
787 | if (dcache[i].dirty!=0) { |
||
788 | /* the cache entry is dirty... we must write to disk */ |
||
789 | printka(" dirty %li",dcache[i].lsector); |
||
790 | |||
791 | dcache[i].used++; |
||
792 | __fs_mutex_unlock(&hashmutex); |
||
793 | |||
794 | res=bdev_write(dcache[i].device,dcache[i].lsector,dcache[i].buffer); |
||
795 | |||
796 | __fs_mutex_lock(&hashmutex); |
||
797 | dcache[i].used--; |
||
798 | |||
799 | /* we must update 'inext' */ |
||
800 | inext=dcache[i].next; |
||
801 | |||
802 | if (res!=0) { |
||
803 | /* an error! */ |
||
804 | retvalue=-1; |
||
805 | if (!cont) { |
||
806 | /* ...we can not continue! */ |
||
807 | blocked_device=makedev(MAJOR_B_NULL,0); |
||
808 | __fs_mutex_unlock(&hashmutex); |
||
809 | return -1; |
||
810 | } |
||
811 | } |
||
812 | |||
813 | } |
||
814 | |||
815 | /* now we can purge the entry */ |
||
816 | __remove_dcache(i); |
||
817 | |||
818 | } |
||
819 | |||
820 | /* go to next entry */ |
||
821 | i=inext; |
||
822 | } |
||
823 | } |
||
824 | |||
825 | blocked_device=makedev(MAJOR_B_NULL,0); |
||
826 | __fs_mutex_unlock(&hashmutex); |
||
827 | |||
828 | printka("dcache_purgedevice() END"); |
||
829 | |||
830 | return retvalue; |
||
831 | } |
||
832 | |||
833 | /*++++++++++++++++++++++++++++++++++++++ |
||
834 | |||
835 | Flush all the cache to disk; it can be called always but it does not |
||
836 | assure that there are no more dirty cache entries when it return. |
||
837 | |||
838 | int dcache_flush |
||
839 | return 0 on success, other value on failure. |
||
840 | ++++++++++++++++++++++++++++++++++++++*/ |
||
841 | |||
842 | int dcache_flush(void) |
||
843 | { |
||
844 | int i,k,ret,res; |
||
845 | |||
846 | __fs_mutex_lock(&hashmutex); |
||
847 | ret=0; |
||
848 | for (k=0;k<MAXHASH;k++) { |
||
849 | i=dhash[k]; |
||
850 | /* for every dirty cache entries...*/ |
||
851 | while (i!=NIL) { |
||
852 | |||
853 | magic_assert(dcache[i].magic,DCACHE_MAGIC, |
||
854 | "dcache: dcache_flush(): cache_entry[%i] overwritten", |
||
855 | i); |
||
856 | |||
857 | if (dcache[i].dirty) { |
||
858 | /* to prevent purging the cache entry */ |
||
859 | dcache[i].used++; |
||
860 | __fs_mutex_unlock(&hashmutex); |
||
861 | |||
862 | /* aquire a "read lock" (we do not modify cache) to write to disk */ |
||
863 | __rwlock_rdlock(&dcache[i].rwlock); |
||
864 | /* write to disk */ |
||
865 | res=bdev_write(dcache[i].device,dcache[i].lsector,dcache[i].buffer); |
||
866 | /* release "read lock" */ |
||
867 | __rwlock_rdunlock(&dcache[i].rwlock); |
||
868 | |||
869 | __fs_mutex_lock(&hashmutex); |
||
870 | dcache[i].used--; |
||
871 | if (res==0) |
||
872 | dcache[i].dirty=0; |
||
873 | else |
||
874 | ret=-1; |
||
875 | } |
||
876 | i=dcache[i].next; |
||
877 | } |
||
878 | } |
||
879 | __fs_mutex_unlock(&hashmutex); |
||
880 | |||
881 | return ret; |
||
882 | } |
||
883 | |||
884 | /* |
||
885 | * |
||
886 | * LOCKING/UNLOCKING FUNCTIONS |
||
887 | * |
||
888 | * |
||
889 | * the external modules are responsable for nested lock; they can cause |
||
890 | * a dead lock of some threads if they lock blocks nesting the locks without |
||
891 | * a "locking protocol" |
||
892 | */ |
||
893 | |||
894 | |||
895 | /*++++++++++++++++++++++++++++++++++++++ |
||
896 | |||
897 | Lock (for read or write) a cache entry. |
||
898 | |||
899 | static __inline__ dcacdhe_t *dcache_genlock |
||
900 | return a pointer to the locked cache entry or NULL in case of error. |
||
901 | |||
902 | __dev_t device |
||
903 | device where there is the block to lock. |
||
904 | |||
905 | __blkcnt_t lsector |
||
906 | block to lock. |
||
907 | |||
908 | void (*lockfunc)(__rwlock_t*) |
||
909 | function used to lock; must be __rwlock_rdlock (for read) or |
||
910 | __rwlock_wrlock (for write). |
||
911 | ++++++++++++++++++++++++++++++++++++++*/ |
||
912 | |||
913 | static __inline__ dcache_t *dcache_genlock(__dev_t device, |
||
914 | __blkcnt_t lsector, |
||
915 | void (*lockfunc)(__rwlock_t*)) |
||
916 | { |
||
917 | int h,i; |
||
918 | int res; |
||
919 | |||
920 | /* compute hash value */ |
||
921 | h=hash_fun(device,lsector); |
||
922 | |||
923 | /* acquire the mutex */ |
||
924 | __fs_mutex_lock(&hashmutex); |
||
925 | |||
926 | /* |
||
927 | * STAGE 1: search into hash list |
||
928 | */ |
||
929 | |||
930 | /* get first entry */ |
||
931 | i=dhash[h]; |
||
932 | /* while there are other cache entries... */ |
||
933 | while (i!=NIL) { |
||
934 | if (dcache[i].device==device&&dcache[i].lsector==lsector) { |
||
935 | /* found... */ |
||
936 | /* safety stuff */ |
||
937 | _assert(dcache[i].used>=0); |
||
938 | /* update "used" count */ |
||
939 | dcache[i].used++; |
||
940 | /* but is ready? */ |
||
941 | if (!dcache[i].ready) { |
||
942 | /* not ready! */ |
||
943 | /* someone has already done this request but the cache is not ready */ |
||
944 | dcache[i].numblocked++; |
||
945 | /* release the mutex */ |
||
946 | __fs_mutex_unlock(&hashmutex); |
||
947 | /* wait for syncronization */ |
||
948 | __fs_sem_wait(&dcache[i].sync); |
||
949 | /* sync is arrived but ready? */ |
||
950 | if (!dcache[i].ready) { |
||
951 | /* not ready! an error has occured */ |
||
952 | /* reaquire mutex */ |
||
953 | __fs_mutex_lock(&hashmutex); |
||
954 | /* safety stuff */ |
||
955 | _assert(dcache[i].used>0); |
||
956 | /* update "used" count */ |
||
957 | dcache[i].used--; |
||
958 | /* if request for error syncronization... */ |
||
959 | if (dcache[i].esyncreq) { |
||
960 | /* request sync */ |
||
961 | /* if it is the last thread (other than the waiting tread)... */ |
||
962 | if (dcache[i].used==1) |
||
963 | /* signal error syncronization */ |
||
964 | __fs_sem_signal(&dcache[i].esync); |
||
965 | } else { |
||
966 | /* if there are not other threads... */ |
||
967 | if (dcache[i].used==0) { |
||
968 | /* free this entry */ |
||
969 | __remove_dcache(i); |
||
970 | free_dcache(i); |
||
971 | } |
||
972 | } |
||
973 | /* release mutex */ |
||
974 | __fs_mutex_unlock(&hashmutex); |
||
975 | /* nothing to return */ |
||
976 | return NULL; |
||
977 | } |
||
978 | /* aquire the "rwlock" */ |
||
979 | (*lockfunc)(&dcache[i].rwlock); |
||
980 | /* return the cache entry */ |
||
981 | return dcache+i; |
||
982 | } |
||
983 | /* ready! */ |
||
984 | /* release the mutex */ |
||
985 | __fs_mutex_unlock(&hashmutex); |
||
986 | /* aquire the "rwlock" */ |
||
987 | (*lockfunc)(&dcache[i].rwlock); |
||
988 | /* safety stuff */ |
||
989 | magic_assert(dcache[i].magic,DCACHE_MAGIC, |
||
990 | "dcache: dcache_genlock() 1: cache_entry[%i] overwritten", |
||
991 | i); |
||
992 | /* return the cache entry */ |
||
993 | return dcache+i; |
||
994 | } |
||
995 | /* get next entry */ |
||
996 | i=dcache[i].next; |
||
997 | } |
||
998 | |||
999 | /* |
||
1000 | * STAGE 2: create a new dcache entry |
||
1001 | */ |
||
1002 | |||
1003 | /* PS: if __catch_dcache() calls __purge_dcache() there is a great overhead |
||
1004 | * => to improve efficency a server that mantains x% of cache free is |
||
1005 | * required |
||
1006 | */ |
||
1007 | i=__catch_dcache(); |
||
1008 | if (i==NIL) { |
||
1009 | /* there is not space for new dcache entry...*/ |
||
1010 | __fs_mutex_unlock(&hashmutex); |
||
1011 | return NULL; |
||
1012 | } |
||
1013 | |||
1014 | /* found a free dcache entry... fill the entry */ |
||
1015 | dcache[i].used=1; |
||
1016 | dcache[i].dirty=0; |
||
1017 | dcache[i].device=device; |
||
1018 | dcache[i].lsector=lsector; |
||
1019 | dcache[i].ready=0; |
||
1020 | dcache[i].numblocked=0; |
||
1021 | dcache[i].esyncreq=0; |
||
1022 | |||
1023 | /* insert the entry intro the cache data structure */ |
||
1024 | __insert_dcache(i); |
||
1025 | |||
1026 | /* release the mutex */ |
||
1027 | __fs_mutex_unlock(&hashmutex); |
||
1028 | |||
1029 | /* the read is done after the hashmutex has been unlocked to allow */ |
||
1030 | /* thread cuncurrency */ |
||
1031 | res=bdev_read(device,lsector,dcache[i].buffer); |
||
1032 | |||
1033 | /* acquire the mutex */ |
||
1034 | __fs_mutex_lock(&hashmutex); |
||
1035 | |||
1036 | /* safety stuff */ |
||
1037 | _assert(dcache[i].used>0); |
||
1038 | _assert(dcache[i].ready==0); |
||
1039 | |||
1040 | /* if the read is OK... */ |
||
1041 | if (res==0) { |
||
1042 | /* cache ready */ |
||
1043 | dcache[i].ready=1; |
||
1044 | /* aquire the rwlock */ |
||
1045 | (*lockfunc)(&dcache[i].rwlock); |
||
1046 | } |
||
1047 | |||
1048 | /* wake up all thread waiting for synchronization */ |
||
1049 | while (dcache[i].numblocked>0) { |
||
1050 | __fs_sem_signal(&dcache[i].sync); |
||
1051 | dcache[i].numblocked--; |
||
1052 | } |
||
1053 | |||
1054 | /* if the read is not OK... */ |
||
1055 | if (res!=0) { |
||
1056 | /* to prevent other thread to take this cache entry */ |
||
1057 | dcache[i].device=NULLDEV; |
||
1058 | /* update cache "thread" count */ |
||
1059 | dcache[i].used--; |
||
1060 | /* if there are not other threads... */ |
||
1061 | if (dcache[i].used==0) { |
||
1062 | /* free this entry */ |
||
1063 | __remove_dcache(i); |
||
1064 | free_dcache(i); |
||
1065 | } |
||
1066 | /* release mutex */ |
||
1067 | __fs_mutex_unlock(&hashmutex); |
||
1068 | /* nothing to return */ |
||
1069 | return NULL; |
||
1070 | } |
||
1071 | |||
1072 | magic_assert(dcache[i].magic,DCACHE_MAGIC, |
||
1073 | "dcache: dcache_genlock() 2: cache_entry[%i] overwritten", |
||
1074 | i); |
||
1075 | |||
1076 | __fs_mutex_unlock(&hashmutex); |
||
1077 | return dcache+i; |
||
1078 | } |
||
1079 | |||
1080 | /*++++++++++++++++++++++++++++++++++++++ |
||
1081 | |||
1082 | Unlock a previously locked cache entry. |
||
1083 | |||
1084 | dcache_t *ptr |
||
1085 | cache enry to unlock. |
||
1086 | |||
1087 | void (*unlockfunc)(__rwlock_t*) |
||
1088 | unlocking function; must be __rwlock_rdunlock (for read) or |
||
1089 | __rwlock_wrunlock (for write). |
||
1090 | ++++++++++++++++++++++++++++++++++++++*/ |
||
1091 | |||
1092 | static __inline__ void dcache_genunlock(dcache_t *ptr, |
||
1093 | void (*unlockfunc)(__rwlock_t*)) |
||
1094 | { |
||
1095 | /* safety stuff */ |
||
1096 | _assert(ptr>=dcache&&ptr<dcache+MAXCACHESIZE); |
||
1097 | /* try to flush the cache if is dirty */ |
||
1098 | /* (this function depends of write thought/copy back) */ |
||
1099 | __flush_cache(ptr); |
||
1100 | /* acquire the mutex */ |
||
1101 | __fs_mutex_lock(&hashmutex); |
||
1102 | /* safety stuff */ |
||
1103 | _assert(ptr->used>0); |
||
1104 | /* update some fields */ |
||
1105 | ptr->used--; |
||
1106 | ptr->time=gettimek(); |
||
1107 | /* DANGER!!! I hope this work */ |
||
1108 | /* unlock the cache entry */ |
||
1109 | (*unlockfunc)(&ptr->rwlock); |
||
1110 | /* release the mutex */ |
||
1111 | __fs_mutex_unlock(&hashmutex); |
||
1112 | } |
||
1113 | |||
1114 | /*++++++++++++++++++++++++++++++++++++++ |
||
1115 | |||
1116 | Lock a cache entry used for READING. |
||
1117 | |||
1118 | dcacdhe_t *dcache_lock |
||
1119 | return a locked cache entry or NULL in case of error. |
||
1120 | |||
1121 | __dev_t device |
||
1122 | device where there is the block to lock. |
||
1123 | |||
1124 | __blkcnt_t lsector |
||
1125 | block to lock. |
||
1126 | ++++++++++++++++++++++++++++++++++++++*/ |
||
1127 | |||
1128 | dcache_t *dcache_lock(__dev_t device, __blkcnt_t lsector) |
||
1129 | { |
||
1130 | printk8("LOCK read request for (%04x,%li)",device,(long)lsector); |
||
1131 | if (device==blocked_device) return NULL; |
||
1132 | return dcache_genlock(device,lsector,__rwlock_rdlock); |
||
1133 | } |
||
1134 | |||
1135 | /*++++++++++++++++++++++++++++++++++++++ |
||
1136 | |||
1137 | Unlock a cache entry used for READING. |
||
1138 | |||
1139 | dcache_t *ptr |
||
1140 | pointer to the cache entry to unlock. |
||
1141 | ++++++++++++++++++++++++++++++++++++++*/ |
||
1142 | |||
1143 | void dcache_unlock(dcache_t *ptr) |
||
1144 | { |
||
1145 | printk8("LOCK read release for (%04x,%li)",ptr->device,(long)ptr->lsector); |
||
1146 | return dcache_genunlock(ptr,__rwlock_rdunlock); |
||
1147 | } |
||
1148 | |||
1149 | /*++++++++++++++++++++++++++++++++++++++ |
||
1150 | |||
1151 | Lock a cache entry used for WRITING (ie. acquire for exclusive use). |
||
1152 | If we write into cache we MUST call dcache_dirty(). |
||
1153 | |||
1154 | dcacdhe_t *dcache_acquire |
||
1155 | |||
1156 | __dev_t device |
||
1157 | device where there is the block to lock. |
||
1158 | |||
1159 | __blkcnt_t lsector |
||
1160 | block to lock. |
||
1161 | ++++++++++++++++++++++++++++++++++++++*/ |
||
1162 | |||
1163 | dcache_t *dcache_acquire(__dev_t device, __blkcnt_t lsector) |
||
1164 | { |
||
1165 | printk8("LOCK write request for (%04x,%li)",device,(long)lsector); |
||
1166 | if (device==blocked_device) return NULL; |
||
1167 | return dcache_genlock(device,lsector,__rwlock_wrlock); |
||
1168 | } |
||
1169 | |||
1170 | /*++++++++++++++++++++++++++++++++++++++ |
||
1171 | |||
1172 | Unlock a cache entry used for WRITING (ie. acquired for exclusive use). |
||
1173 | |||
1174 | dcache_t *ptr |
||
1175 | pointer to the cache entry to unlock. |
||
1176 | ++++++++++++++++++++++++++++++++++++++*/ |
||
1177 | |||
1178 | void dcache_release(dcache_t *ptr) |
||
1179 | { |
||
1180 | printk8("LOCK write released for (%04x,%li)",ptr->device,(long)ptr->lsector); |
||
1181 | return dcache_genunlock(ptr,__rwlock_wrunlock); |
||
1182 | } |
||
1183 |