1 Star 0 Fork 1

wjh731/disksim_original

forked from 颜明博/disksim_original 
加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
克隆/下载
disksim_cachemem.c 80.11 KB
一键复制 编辑 原始数据 按行查看 历史
颜明博 提交于 2018-06-25 19:31 . first commit
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554
/*
* DiskSim Storage Subsystem Simulation Environment (Version 3.0)
* Revision Authors: John Bucy, Greg Ganger
* Contributors: John Griffin, Jiri Schindler, Steve Schlosser
*
* Copyright (c) of Carnegie Mellon University, 2001, 2002, 2003.
*
* This software is being provided by the copyright holders under the
* following license. By obtaining, using and/or copying this software,
* you agree that you have read, understood, and will comply with the
* following terms and conditions:
*
* Permission to reproduce, use, and prepare derivative works of this
* software is granted provided the copyright and "No Warranty" statements
* are included with all reproductions and derivative works and associated
* documentation. This software may also be redistributed without charge
* provided that the copyright and "No Warranty" statements are included
* in all redistributions.
*
* NO WARRANTY. THIS SOFTWARE IS FURNISHED ON AN "AS IS" BASIS.
* CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER
* EXPRESSED OR IMPLIED AS TO THE MATTER INCLUDING, BUT NOT LIMITED
* TO: WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY
* OF RESULTS OR RESULTS OBTAINED FROM USE OF THIS SOFTWARE. CARNEGIE
* MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT
* TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
* COPYRIGHT HOLDERS WILL BEAR NO LIABILITY FOR ANY USE OF THIS SOFTWARE
* OR DOCUMENTATION.
*
*/
/*
* DiskSim Storage Subsystem Simulation Environment (Version 2.0)
* Revision Authors: Greg Ganger
* Contributors: Ross Cohen, John Griffin, Steve Schlosser
*
* Copyright (c) of Carnegie Mellon University, 1999.
*
* Permission to reproduce, use, and prepare derivative works of
* this software for internal use is granted provided the copyright
* and "No Warranty" statements are included with all reproductions
* and derivative works. This software may also be redistributed
* without charge provided that the copyright and "No Warranty"
* statements are included in all redistributions.
*
* NO WARRANTY. THIS SOFTWARE IS FURNISHED ON AN "AS IS" BASIS.
* CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER
* EXPRESSED OR IMPLIED AS TO THE MATTER INCLUDING, BUT NOT LIMITED
* TO: WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY
* OF RESULTS OR RESULTS OBTAINED FROM USE OF THIS SOFTWARE. CARNEGIE
* MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT
* TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
*/
/*
* DiskSim Storage Subsystem Simulation Environment
* Authors: Greg Ganger, Bruce Worthington, Yale Patt
*
* Copyright (C) 1993, 1995, 1997 The Regents of the University of Michigan
*
* This software is being provided by the copyright holders under the
* following license. By obtaining, using and/or copying this software,
* you agree that you have read, understood, and will comply with the
* following terms and conditions:
*
* Permission to use, copy, modify, distribute, and sell this software
* and its documentation for any purpose and without fee or royalty is
* hereby granted, provided that the full text of this NOTICE appears on
* ALL copies of the software and documentation or portions thereof,
* including modifications, that you make.
*
* THIS SOFTWARE IS PROVIDED "AS IS," AND COPYRIGHT HOLDERS MAKE NO
* REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE,
* BUT NOT LIMITATION, COPYRIGHT HOLDERS MAKE NO REPRESENTATIONS OR
* WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR
* THAT THE USE OF THE SOFTWARE OR DOCUMENTATION WILL NOT INFRINGE ANY
* THIRD PARTY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS. COPYRIGHT
* HOLDERS WILL BEAR NO LIABILITY FOR ANY USE OF THIS SOFTWARE OR
* DOCUMENTATION.
*
* This software is provided AS IS, WITHOUT REPRESENTATION FROM THE
* UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY PURPOSE, AND
* WITHOUT WARRANTY BY THE UNIVERSITY OF MICHIGAN OF ANY KIND, EITHER
* EXPRESSED OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE REGENTS
* OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE FOR ANY DAMAGES,
* INCLUDING SPECIAL , INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES,
* WITH RESPECT TO ANY CLAIM ARISING OUT OF OR IN CONNECTION WITH THE
* USE OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN IF IT HAS
* BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
*
* The names and trademarks of copyright holders or authors may NOT be
* used in advertising or publicity pertaining to the software without
* specific, written prior permission. Title to copyright in this software
* and any associated documentation will at all times remain with copyright
* holders.
*/
/* LAME! why can't disksim_cache.h be included here? */
#define CACHE_MEMORY 1
#include "disksim_global.h"
#include "disksim_iosim.h"
#include "disksim_ioqueue.h"
#include "disksim_cachemem.h"
#include "config.h"
#define CACHE_MAXSEGMENTS 10 /* For S-LRU */
#define CACHE_HASHSIZE (ALLOCSIZE/sizeof(int))
#define CACHE_HASHMASK (0x00000000 | (CACHE_HASHSIZE - 1))
/* cache replacement policies */
#define CACHE_REPLACE_MIN 1
#define CACHE_REPLACE_FIFO 1
#define CACHE_REPLACE_SLRU 2
#define CACHE_REPLACE_RANDOM 3
#define CACHE_REPLACE_LIFO 4
#define CACHE_REPLACE_MAX 4
/* state components of atom */
#define CACHE_VALID 0x80000000
#define CACHE_DIRTY 0x40000000
#define CACHE_LOCKDOWN 0x20000000
#define CACHE_LOCKED 0x10000000
#define CACHE_ATOMFLUSH 0x08000000
#define CACHE_REALLOCATE_WRITE 0x04000000
#define CACHE_SEGNUM 0x000000FF /* for S-LRU */
/* cache event flags */
#define CACHE_FLAG_WASBLOCKED 1
#define CACHE_FLAG_LINELOCKED_ALLOCATE 2
/* cache event types */
#define CACHE_EVENT_IOREQ 0
#define CACHE_EVENT_ALLOCATE 1
#define CACHE_EVENT_READ 2
#define CACHE_EVENT_WRITE 3
#define CACHE_EVENT_SYNC 4
#define CACHE_EVENT_SYNCPART 5
#define CACHE_EVENT_READEXTRA 6
#define CACHE_EVENT_WRITEFILLEXTRA 7
#define CACHE_EVENT_IDLESYNC 8
/* cache write schemes */
#define CACHE_WRITE_MIN 1
#define CACHE_WRITE_SYNCONLY 1
#define CACHE_WRITE_THRU 2
#define CACHE_WRITE_BACK 3
#define CACHE_WRITE_MAX 3
/* cache allocate policy flags */
#define CACHE_ALLOCATE_MIN 0
#define CACHE_ALLOCATE_NONDIRTY 1
#define CACHE_ALLOCATE_MAX 1
/* cache prefetch types */
#define CACHE_PREFETCH_MIN 0
#define CACHE_PREFETCH_NONE 0
#define CACHE_PREFETCH_FRONTOFLINE 1
#define CACHE_PREFETCH_RESTOFLINE 2
#define CACHE_PREFETCH_ALLOFLINE 3
#define CACHE_PREFETCH_MAX 3
/* cache background flush types */
#define CACHE_FLUSH_MIN 0
#define CACHE_FLUSH_DEMANDONLY 0
#define CACHE_FLUSH_PERIODIC 1
#define CACHE_FLUSH_MAX 1
#define CACHE_LOCKSPERSTRUCT 15
typedef struct cachelockh {
struct ioreq_ev *entry[CACHE_LOCKSPERSTRUCT];
struct cachelockh *next;
} cache_lockholders;
typedef struct cachelockw {
struct cacheevent *entry[CACHE_LOCKSPERSTRUCT];
struct cachelockw *next;
} cache_lockwaiters;
typedef struct cacheatom {
struct cacheatom *hash_next;
struct cacheatom *hash_prev;
struct cacheatom *line_next;
struct cacheatom *line_prev;
int devno;
int lbn;
int state;
struct cacheatom *lru_next;
struct cacheatom *lru_prev;
cache_lockholders *readlocks;
ioreq_event *writelock;
cache_lockwaiters *lockwaiters;
int busno;
int slotno;
} cache_atom;
typedef struct cacheevent {
double time;
int type;
struct cacheevent *next;
struct cacheevent *prev;
void (**donefunc)(void *,ioreq_event *); /* Function to call when complete */
void *doneparam; /* parameter for donefunc */
int flags;
ioreq_event *req;
int accblkno; /* start blkno of waited for ioacc */
cache_atom *cleaned;
cache_atom *lineprev;
int locktype;
int lockstop;
int allocstop;
struct cacheevent *waitees;
int validpoint;
} cache_event;
typedef struct {
int reads;
int readatoms;
int readhitsfull;
int readhitsfront;
int readhitsback;
int readhitsmiddle;
int readmisses;
int fillreads;
int fillreadatoms;
int writes;
int writeatoms;
int writehitsclean;
int writehitsdirty;
int writemisses;
int writeinducedfills;
int writeinducedfillatoms;
int destagewrites;
int destagewriteatoms;
int getblockreadstarts;
int getblockreaddones;
int getblockwritestarts;
int getblockwritedones;
int freeblockcleans;
int freeblockdirtys;
} cache_stats;
typedef struct { /* per-set structure for set-associative */
cache_atom *freelist;
int space;
cache_atom *lru[CACHE_MAXSEGMENTS];
int numactive[CACHE_MAXSEGMENTS];
int maxactive[CACHE_MAXSEGMENTS];
} cache_mapentry;
typedef struct cache_def {
int cachetype; /* all caches must start with an integer type */
cache_atom *hash[CACHE_HASHSIZE];
void (**issuefunc)(void *,ioreq_event *); /* to issue a disk access */
void *issueparam; /* first param for issuefunc */
struct ioq * (**queuefind)(void *,int); /* to get ioqueue ptr for dev*/
void *queuefindparam; /* first param for queuefind */
void (**wakeupfunc)(void *,struct cacheevent *); /* to re-activate slept proc */
void *wakeupparam; /* first param for wakeupfunc */
int size; /* in 512B blks */
int atomsize;
int numsegs; /* for S-LRU */
int linesize;
int atomsperbit;
int lockgran;
int sharedreadlocks;
int maxreqsize;
int replacepolicy;
int mapmask;
int writescheme;
int read_prefetch_type;
int writefill_prefetch_type;
int prefetch_waitfor_locks;
int startallflushes;
int allocatepolicy;
int read_line_by_line;
int write_line_by_line;
int maxscatgath;
int no_write_allocate;
int flush_policy;
double flush_period;
double flush_idledelay;
int flush_maxlinecluster;
cache_mapentry *map;
int linebylinetmp;
cache_event *IOwaiters;
cache_event *partwrites;
cache_event *linewaiters;
cache_stats stat;
char *name;
} cache_def;
/* internal prototypes */
static int cache_read_continue (cache_def *cache, cache_event *readdesc);
static int cache_write_continue (cache_def *cache, cache_event *writedesc);
int cachemem_get_cachetype (cache_def *cache)
{
ASSERT (cache != NULL);
return(cache->cachetype);
}
int cachemem_get_maxreqsize (cache_def *cache)
{
if (cache) {
return(cache->maxreqsize);
}
return(0);
}
static void cache_empty_donefunc (void *doneparam, ioreq_event *req)
{
addtoextraq((event *) req);
}
static int cache_concatok (void *concatokparam, int blkno1, int bcount1, int blkno2, int bcount2)
{
cache_def *cache = concatokparam;
if ((cache->size) && (cache->maxscatgath != 0)) {
int linesize = max(cache->linesize, 1);
int lineno1 = blkno1 / linesize;
int lineno2 = (blkno2 + bcount2 - 1) / linesize;
int scatgathcnt = lineno2 - lineno1;
if (scatgathcnt > cache->maxscatgath) {
return(0);
}
}
return(1);
}
static void cache_waitfor_IO (cache_def *cache, int waitcnt, cache_event *cachereq, ioreq_event *ioacc)
{
cachereq->next = cache->IOwaiters;
cachereq->prev = NULL;
if (cachereq->next) {
cachereq->next->prev = cachereq;
}
cache->IOwaiters = cachereq;
/*
fprintf (outputfile, "IOwaiters %x, next %x, nextprev %x\n", cachereq, cachereq->next, ((cachereq->next) ? cachereq->next->prev : 0));
*/
cachereq->accblkno = ioacc->blkno;
}
static void cache_insert_new_into_hash (cache_def *cache, cache_atom *new)
{
new->hash_next = cache->hash[(new->lbn & CACHE_HASHMASK)];
cache->hash[(new->lbn & CACHE_HASHMASK)] = new;
new->hash_prev = NULL;
if (new->hash_next) {
new->hash_next->hash_prev = new;
}
}
static void cache_remove_entry_from_hash (cache_def *cache, cache_atom *old)
{
/* Line must be in hash if to be removed! */
ASSERT((old->hash_prev != NULL) || (old->hash_next != NULL) || (cache->hash[(old->lbn & CACHE_HASHMASK)] == old));
if (old->hash_prev) {
old->hash_prev->hash_next = old->hash_next;
} else {
cache->hash[(old->lbn & CACHE_HASHMASK)] = old->hash_next;
}
if (old->hash_next) {
old->hash_next->hash_prev = old->hash_prev;
}
old->hash_next = NULL;
old->hash_prev = NULL;
}
static int cache_count_dirty_atoms (cache_def *cache)
{
int i;
int dirty = 0;
for (i=0; i<CACHE_HASHMASK; i++) {
cache_atom *tmp = cache->hash[i];
while (tmp) {
dirty += (tmp->state & CACHE_DIRTY) ? 1 : 0;
tmp = tmp->hash_next;
}
}
return(dirty);
}
static cache_atom * cache_find_atom (cache_def *cache, int devno, int lbn)
{
cache_atom *tmp = cache->hash[(lbn & CACHE_HASHMASK)];
/*
fprintf (outputfile, "Entered cache_find_atom: devno %d, lbn %d, CACHE_HASHMASK %x, setno %d\n", devno, lbn, CACHE_HASHMASK, (lbn & CACHE_HASHMASK));
*/
while ((tmp) && ((tmp->lbn != lbn) || (tmp->devno != devno))) {
tmp = tmp->hash_next;
}
return(tmp);
}
#if 0
static void cache_remove_lbn_from_hash (cache_def *cache, int devno, int lbn)
{
cache_atom *tmp;
if ((tmp = cache_find_atom(cache, devno, lbn))) {
cache_remove_entry_from_hash(cache, tmp);
}
}
#endif
#if 0
static void cache_check_for_residence (cache_def *cache, int devno, int lbn, int size, int *miss)
{
cache_atom *line = NULL;
int i;
for (i=0; i<size; i++) {
if (line == NULL) {
line = cache_find_atom(cache, devno, (lbn + i));
}
if ((line == NULL) || ((line->state & CACHE_VALID) == 0)) {
miss[(i & INV_BITS_PER_INT_MASK)] |= 1 << (i & BITS_PER_INT_MASK);
}
if (line) {
line = line->line_next;
}
}
}
#endif
#if 0
/* Use for setting VALID, LOCKDOWN, DIRTY and other atom state bits */
static void cache_set_state (cache_def *cache, int devno, int lbn, int size, int mask)
{
cache_atom *line = NULL;
int i;
for (i=0; i<size; i++) {
if (line == NULL) {
line = cache_find_atom(cache, devno, (lbn + i));
}
/* Can't change state of unallocated cache atoms */
ASSERT(line != NULL);
line->state |= mask;
line = line->line_next;
}
}
#endif
#if 0
/* Use for clearing VALID, LOCKDOWN, DIRTY and other atom state bits */
static void cache_reset_state (cache_def *cache, int devno, int lbn, int size, int mask)
{
cache_atom *line = NULL;
int i;
for (i=0; i<size; i++) {
if (line == NULL) {
line = cache_find_atom(cache, devno, (lbn + i));
}
/* Can't change state of unallocated cache atoms */
ASSERT(line != NULL);
line->state &= ~mask;
line = line->line_next;
}
}
#endif
static void cache_add_to_lrulist (cache_mapentry *map,
cache_atom *line,
int segnum)
{
cache_atom **head;
if (segnum == CACHE_SEGNUM) {
head = &map->freelist;
} else {
head = &map->lru[segnum];
map->numactive[segnum]++;
}
line->state |= segnum;
if (*head) {
line->lru_next = *head;
line->lru_prev = (*head)->lru_prev;
(*head)->lru_prev = line;
line->lru_prev->lru_next = line;
} else {
line->lru_next = line;
line->lru_prev = line;
*head = line;
}
}
static void cache_remove_from_lrulist (cache_mapentry *map, cache_atom *line, int segnum)
{
cache_atom **head;
if (segnum == CACHE_SEGNUM) {
head = &map->freelist;
} else {
head = &map->lru[segnum];
map->numactive[segnum]--;
}
if (line->lru_next != line) {
line->lru_prev->lru_next = line->lru_next;
line->lru_next->lru_prev = line->lru_prev;
if (*head == line) {
*head = line->lru_next;
}
} else {
*head = NULL;
}
line->state &= ~CACHE_SEGNUM;
line->lru_next = NULL;
line->lru_prev = NULL;
}
/* Reset state of LRU list given access to line */
static void cache_access (cache_def *cache, cache_atom *line)
{
int set;
int segnum = 0;
if (cache->replacepolicy != CACHE_REPLACE_SLRU) {
return;
}
while (line->line_prev) {
line = line->line_prev;
}
set = (cache->mapmask) ? (line->lbn % cache->mapmask) : 0;
if (line->lru_next) {
segnum = line->state & CACHE_SEGNUM;
cache_remove_from_lrulist(&cache->map[set], line, segnum);
if (segnum != (cache->numsegs-1)) {
segnum = (segnum + 1) & CACHE_SEGNUM;
}
}
cache_add_to_lrulist(&cache->map[set], line, segnum);
while ((segnum) &&
(cache->map[set].numactive[segnum] ==
cache->map[set].maxactive[segnum]))
{
line = cache->map[set].lru[segnum];
cache_remove_from_lrulist(&cache->map[set], line, segnum);
segnum--;
cache_add_to_lrulist(&cache->map[set], line, segnum);
}
}
static void cache_replace_waitforline (cache_def *cache, cache_event *allocdesc)
{
// fprintf (outputfile, "entered cache_replace_waitforline: linelocked %d\n", (allocdesc->flags & CACHE_FLAG_LINELOCKED_ALLOCATE));
if (cache->linewaiters) {
allocdesc->next = cache->linewaiters->next;
cache->linewaiters->next = allocdesc;
if (!(allocdesc->flags & CACHE_FLAG_LINELOCKED_ALLOCATE)) {
cache->linewaiters = allocdesc;
}
} else {
allocdesc->next = allocdesc;
cache->linewaiters = allocdesc;
}
allocdesc->flags |= CACHE_FLAG_LINELOCKED_ALLOCATE;
}
static cache_atom *cache_get_replace_startpoint (cache_def *cache, int set)
{
cache_atom *line = cache->map[set].lru[0];
if (line) {
if (cache->replacepolicy == CACHE_REPLACE_RANDOM) {
int choice = cache->map[set].numactive[0] * DISKSIM_drand48();
int i;
for (i=0; i<choice; i++) {
line = line->lru_prev;
}
} else if (cache->replacepolicy == CACHE_REPLACE_LIFO) {
line = line->lru_prev;
} else if ((cache->replacepolicy != CACHE_REPLACE_FIFO) && (cache->replacepolicy != CACHE_REPLACE_SLRU)) {
fprintf(stderr, "Unknown replacement policy at cache_get_replace_startpoint: %d\n", cache->replacepolicy);
exit(1);
}
}
return(line);
}
/* Add identifier to lockstruct only if not already present */
static void cache_add_to_lockstruct (struct cachelockw **head, void *identifier)
{
struct cachelockw *tmp = *head;
int start = FALSE;
int i;
if (tmp == NULL) {
tmp = (struct cachelockw *) getfromextraq();
memset((char *)tmp, 0, sizeof(struct cachelockw));
/* bzero ((char *)tmp, sizeof(struct cachelockw)); */
tmp->entry[0] = identifier;
*head = tmp;
} else {
while (tmp->next) {
for (i=0; i<CACHE_LOCKSPERSTRUCT; i++) {
if (tmp->entry[i] == identifier) {
return;
}
}
tmp = tmp->next;
}
for (i=0; i<CACHE_LOCKSPERSTRUCT; i++) {
if (tmp->entry[i] == identifier) {
return;
} else if (tmp->entry[i]) {
start = TRUE;
} else if (start) {
tmp->entry[i] = identifier;
return;
}
}
tmp->next = (struct cachelockw *) getfromextraq();
tmp = tmp->next;
memset ((char *)tmp, 0, sizeof(struct cachelockw));
/* bzero ((char *)tmp, sizeof(struct cachelockw)); */
tmp->entry[0] = identifier;
}
}
/* Does anyone have a write lock on the atom?? */
static int cache_atom_iswritelocked (cache_def *cache, cache_atom *target)
{
while (target->lbn % cache->lockgran) {
target = target->line_prev;
}
return(target->writelock != NULL);
}
/* Does anyone have any lock on the atom?? */
static int cache_atom_islocked (cache_def *cache, cache_atom *target)
{
while (target->lbn % cache->lockgran) {
target = target->line_prev;
}
return(target->readlocks || target->writelock);
}
#if 0
/* should change this code so that newly enabled "I-streams" do not */
/* necessarily preempt the "I-stream" that freed the lock... */
static void cache_give_lock_to_waiter (cache_def *cache, cache_atom *target, cache_event *rwdesc)
{
switch (rwdesc->type) {
case CACHE_EVENT_READ:
cache_read_continue(cache, rwdesc);
break;
case CACHE_EVENT_WRITE:
cache_write_continue(cache, rwdesc);
break;
default:
fprintf(stderr, "Unknown type at cache_give_lock_to_waiter: %d\n", rwdesc->type);
exit(1);
}
}
#endif
static void cache_lock_free (cache_def *cache, cache_atom *target)
{
cache_lockwaiters *tmp;
int writelocked = FALSE;
/* Can't give away a line that is writelock'd */
ASSERT(!target->writelock);
if ((tmp = target->lockwaiters)) {
int i = 0;
while ((tmp) && (!writelocked) && (!target->writelock)) {
cache_event *waiter = NULL;
if (tmp->entry[i]) {
writelocked = (cache->sharedreadlocks == 0) || tmp->entry[i]->locktype;
if ((!writelocked) || (target->readlocks == NULL)) {
waiter = tmp->entry[i];
tmp->entry[i] = NULL;
}
}
i++;
if (i == CACHE_LOCKSPERSTRUCT) {
target->lockwaiters = tmp->next;
addtoextraq((event *) tmp);
tmp = target->lockwaiters;
i = 0;
}
if (waiter) {
(*cache->wakeupfunc)(cache->wakeupparam, waiter);
}
}
} else if (cache->linewaiters) {
int linesize = max(cache->linesize, 1);
cache_event *allocdesc;
while (target->lbn % linesize) {
target = target->line_prev;
}
while (target) {
if ((target->writelock) || (target->readlocks)) {
return;
}
target = target->line_next;
}
allocdesc = cache->linewaiters->next;
if (allocdesc->next == allocdesc) {
cache->linewaiters = NULL;
} else {
cache->linewaiters->next = allocdesc->next;
}
allocdesc->next = NULL;
// fprintf (outputfile, "allocation continuing: line finally freed\n");
(*cache->wakeupfunc)(cache->wakeupparam, allocdesc);
}
}
/* gransize is assumed to be a factor of linesize */
static int cache_get_write_lock (cache_def *cache, cache_atom *target, cache_event *rwdesc)
{
//fprintf (outputfile, "Entered cache_get_write_lock: target %p, lbn %d, lockgran %d\n", target, target->lbn, cache->lockgran);
while (target->lbn % cache->lockgran) {
target = target->line_prev;
}
// fprintf (outputfile, "doing cache_get_write_lock: target %p, lbn %d, lockgran %d\n", target, target->lbn, cache->lockgran);
if (target->writelock == rwdesc->req) {
return(cache->lockgran);
} else if ((target->writelock) || (target->readlocks)) {
rwdesc->locktype = 1;
cache_add_to_lockstruct(&target->lockwaiters, rwdesc);
return(0);
} else {
target->writelock = rwdesc->req;
return(cache->lockgran);
}
}
static int cache_free_write_lock (cache_def *cache, cache_atom *target, ioreq_event *owner)
{
// fprintf (outputfile, "Entered cache_free_write_lock: target %p, lbn %d, lockgran %d\n", target, target->lbn, cache->lockgran);
while (target->lbn % cache->lockgran) {
target = target->line_prev;
}
// fprintf (outputfile, "doing cache_free_write_lock: target %p, lbn %d, lockgran %d\n", target, target->lbn, cache->lockgran);
if (owner == target->writelock) {
target->writelock = NULL;
cache_lock_free(cache, target);
return(cache->lockgran);
} else {
return(0);
}
}
static int cache_get_read_lock (cache_def *cache, cache_atom *target, cache_event *rwdesc)
{
// fprintf (outputfile, "Entered cache_get_read_lock: target %p, lbn %d, lockgran %d\n", target, target->lbn, cache->lockgran);
if (!cache->sharedreadlocks) {
return(cache_get_write_lock(cache, target, rwdesc));
}
while (target->lbn % cache->lockgran) {
target = target->line_prev;
}
// fprintf (outputfile, "doing cache_get_read_lock: target %p, lbn %d, lockgran %d\n", target, target->lbn, cache->lockgran);
if ((target->writelock) && (target->writelock != rwdesc->req)) {
rwdesc->locktype = 0;
cache_add_to_lockstruct(&target->lockwaiters, rwdesc);
return(0);
} else {
cache_add_to_lockstruct((struct cachelockw **)&target->readlocks, rwdesc->req);
if (target->writelock) {
target->writelock = NULL;
cache_lock_free(cache, target);
}
return(cache->lockgran);
}
}
static int cache_free_read_lock (cache_def *cache, cache_atom *target, ioreq_event *owner)
{
cache_lockholders *tmp;
int found = FALSE;
int i;
// fprintf (outputfile, "Entered cache_free_read_lock: target %p, lbn %d, lockgran %d\n", target, target->lbn, cache->lockgran);
if (!cache->sharedreadlocks) {
return(cache_free_write_lock(cache, target, owner));
}
// fprintf (outputfile, "doing cache_free_read_lock: target %p, lbn %d, lockgran %d\n", target, target->lbn, cache->lockgran);
while (target->lbn % cache->lockgran) {
target = target->line_prev;
}
tmp = target->readlocks;
while ((tmp) && (!found)) {
int active = FALSE;
for (i=0; i<CACHE_LOCKSPERSTRUCT; i++) {
if (tmp->entry[i] == owner) {
tmp->entry[i] = 0;
found = TRUE;
} else if (tmp->entry[i]) {
active = TRUE;
}
}
if ((tmp == target->readlocks) && (active == FALSE)) {
target->readlocks = tmp->next;
addtoextraq((event *) tmp);
tmp = target->readlocks;
} else {
tmp = tmp->next;
}
}
if (found) {
if (!target->readlocks) {
cache_lock_free(cache, target);
}
return(cache->lockgran);
} else {
return(0);
}
}
static void cache_get_read_lock_range (cache_def *cache, int start, int end, cache_atom *startatom, cache_event *waiter)
{
cache_atom *line = (startatom->lbn == start) ? startatom : NULL;
int lockgran = 1;
int i;
for (i=start; i<=end; i++) {
if (line == NULL) {
line = cache_find_atom(cache, startatom->devno, i);
ASSERT(line != NULL);
}
if ((line->lbn % lockgran) == 0) {
lockgran = cache_get_read_lock(cache, line, waiter);
/* Must not fail to acquire lock */
ASSERT(lockgran != 0);
}
line = line->line_next;
}
}
static int cache_issue_flushreq (cache_def *cache, int start, int end, cache_atom *startatom, cache_event *waiter)
{
ioreq_event *flushreq;
ioreq_event *flushwait;
int waiting = (cache->IOwaiters == waiter) ? 1 : 0;
// fprintf (outputfile, "Entered issue_flushreq: start %d, end %d\n", start, end);
flushreq = (ioreq_event *) getfromextraq();
flushreq->devno = startatom->devno;
flushreq->blkno = start;
flushreq->bcount = end - start + 1;
flushreq->busno = startatom->busno;
flushreq->slotno = startatom->slotno;
flushreq->type = IO_ACCESS_ARRIVE;
flushreq->flags = 0;
flushwait = (ioreq_event *) getfromextraq();
flushwait->type = IO_REQUEST_ARRIVE;
flushwait->devno = flushreq->devno;
flushwait->blkno = flushreq->blkno;
flushwait->bcount = flushreq->bcount;
flushwait->next = waiter->req;
flushwait->prev = NULL;
if (waiter->req) {
waiter->req->prev = flushwait;
}
waiter->req = flushwait;
waiter->accblkno = -1;
if (!waiting) {
cache_waitfor_IO(cache, 1, waiter, flushwait);
}
waiter->accblkno = -1;
cache->stat.destagewrites++;
cache->stat.destagewriteatoms += end - start + 1;
cache_get_read_lock_range(cache, start, end, startatom, waiter);
// fprintf (outputfile, "Issueing dirty block write-back: blkno %d, bcount %d, devno %d\n", flushreq->blkno, flushreq->bcount, flushreq->devno);
(*cache->issuefunc)(cache->issueparam, flushreq);
return(1);
}
static int cache_flush_cluster (cache_def *cache, int devno, int blkno, int linecnt, int dir)
{
cache_atom *line = NULL;
int lastclean = 0;
int writelocked;
ASSERT1((dir == 1) || (dir == -1), "dir", dir);
while (linecnt <= cache->flush_maxlinecluster) {
if (line == NULL) {
line = cache_find_atom(cache, devno, (blkno+dir));
if ((line == NULL) || (lastclean)) {
break;
}
linecnt++;
continue;
}
writelocked = cache_atom_iswritelocked(cache, line);
if ((line->state & CACHE_DIRTY) && (!writelocked)) {
line->state &= ~CACHE_DIRTY;
lastclean = 0;
blkno = line->lbn;
} else if ((writelocked) || (!(line->state & CACHE_VALID))) {
break;
} else {
lastclean = 1;
}
line = (dir == 1) ? line->line_next : line->line_prev;
}
return(blkno);
}
static int cache_initiate_dirty_block_flush (cache_def *cache, cache_atom *dirtyline, cache_event *allocdesc)
{
cache_atom *dirtyatom = 0;
int dirtyend = 0;
int dirtystart = -1;
cache_atom *tmp = dirtyline;
int flushcnt = 0;
// fprintf (outputfile, "Entered cache_initiate_dirty_block_flush: %d\n", dirtyline->lbn);
while (tmp) {
int writelocked = cache_atom_iswritelocked(cache, tmp);
if ((tmp->state & CACHE_DIRTY) && (!writelocked)) {
tmp->state &= ~CACHE_DIRTY;
if (dirtystart == -1) {
dirtyatom = tmp;
dirtystart = tmp->lbn;
}
dirtyend = tmp->lbn;
} else if ((dirtystart != -1) && ((!(tmp->state & CACHE_VALID)) || (writelocked))) {
if ((cache->flush_maxlinecluster > 1) && (dirtystart == dirtyline->lbn)) {
dirtystart = cache_flush_cluster(cache, dirtyatom->devno, dirtystart, 1, -1);
}
if (cache_issue_flushreq(cache, dirtystart, dirtyend, dirtyatom, allocdesc) == 0) {
return(flushcnt);
}
dirtystart = -1;
flushcnt++;
}
tmp = tmp->line_next;
}
if (dirtystart != -1) {
int linesize = max(cache->linesize, 1);
int linecnt;
if ((cache->flush_maxlinecluster > 1) && (dirtystart == dirtyline->lbn)) {
dirtystart = cache_flush_cluster(cache, dirtyatom->devno, dirtystart, 1, -1);
}
linecnt = 1 + ((dirtyline->lbn - dirtystart) / linesize);
if ((linecnt < cache->flush_maxlinecluster) && (dirtyend == (dirtyline->lbn + linesize -1))) {
dirtyend = cache_flush_cluster(cache, dirtyatom->devno, dirtyend, linecnt, 1);
}
flushcnt += cache_issue_flushreq(cache, dirtystart, dirtyend, dirtyatom, allocdesc);
}
// fprintf (outputfile, "flushcnt %d\n", flushcnt);
return(flushcnt);
}
static cache_event *cache_get_flushdesc()
{
cache_event *flushdesc = (cache_event *) getfromextraq();
flushdesc->type = CACHE_EVENT_SYNC;
flushdesc->donefunc = &disksim->donefunc_cachemem_empty;
flushdesc->req = NULL;
return(flushdesc);
}
static void cache_cleanup_flushdesc (cache_event *flushdesc)
{
if (flushdesc->req) {
if (flushdesc->req->next == NULL) {
flushdesc->accblkno = flushdesc->req->blkno;
}
} else {
addtoextraq((event *) flushdesc);
}
}
/* Not currently dealing with case of two-handed flushing. Easiest way to */
/* do this will be to allocate the cache as one big chunk of memory. Then,*/
/* use the addresses of cache_atoms rather than the pointers to traverse. */
static void cache_periodic_flush (timer_event *timereq)
{
cache_def *cache = (cache_def *) timereq->ptr;
int segcnt = (cache->replacepolicy == CACHE_REPLACE_SLRU) ? cache->numsegs : 1;
int i, j;
cache_atom *line;
cache_atom *stop;
cache_atom *tmp;
cache_event *flushdesc = cache_get_flushdesc();
int flushcnt = 0;
int startit;
for (i=0; i<=cache->mapmask; i++) {
for (j=0; j<segcnt; j++) {
line = cache->map[i].lru[j];
stop = line;
startit = 1;
while ((startit) || (line != stop)) {
startit = 0;
tmp = line;
while (tmp) {
if (tmp->state & CACHE_DIRTY) {
flushcnt += cache_initiate_dirty_block_flush(cache, tmp, flushdesc);
}
tmp = tmp->line_next;
}
line = line->lru_next;
};
}
}
cache_cleanup_flushdesc(flushdesc);
timereq->time += cache->flush_period;
addtointq((event *)timereq);
// fprintf (outputfile, "%f: cache_periodic_flush, %d flushes started\n", simtime, flushcnt);
}
static void cache_idletime_detected (void *idleworkparam, int idledevno)
{
cache_def *cache = idleworkparam;
cache_atom *line = cache_get_replace_startpoint(cache, 0);
cache_atom *stop = line;
cache_atom *tmp;
int segcnt = (cache->replacepolicy == CACHE_REPLACE_SLRU) ? cache->numsegs : 1;
int i;
cache_event *flushdesc;
int startit;
if (ioqueue_get_number_in_queue((*cache->queuefind)(cache->queuefindparam, idledevno))) {
return;
}
flushdesc = cache_get_flushdesc();
flushdesc->type = CACHE_EVENT_IDLESYNC;
for (i=0; i<segcnt; i++) {
if (i) {
line = cache->map[0].lru[i];
stop = line;
}
startit = 1;
while ((startit) || (line != stop)) {
startit = 0;
if (line->devno == idledevno) {
tmp = line;
while (tmp) {
if (tmp->state & CACHE_DIRTY) {
(void)cache_initiate_dirty_block_flush(cache, tmp, flushdesc);
if (flushdesc->req) {
goto cache_idletime_detected_idleused;
}
}
tmp = tmp->line_next;
}
}
line = line->lru_next;
}
}
cache_idletime_detected_idleused:
cache_cleanup_flushdesc(flushdesc);
}
static void cache_unmap_line (cache_def *cache, cache_atom *line, int set)
{
cache_atom *tmp;
if (line->lru_next) {
cache_remove_from_lrulist(&cache->map[set], line, (line->state & CACHE_SEGNUM));
}
if (cache->linesize == 0) {
while ((tmp = line)) {
line = line->line_next;
tmp->line_next = NULL;
tmp->line_prev = NULL;
cache_remove_entry_from_hash(cache, tmp);
cache_add_to_lrulist(&cache->map[set], tmp, CACHE_SEGNUM);
}
} else {
cache_add_to_lrulist(&cache->map[set], line, CACHE_SEGNUM);
while (line) {
cache_remove_entry_from_hash(cache, line);
line = line->line_next;
}
}
}
static int cache_replace (cache_def *cache, int set, cache_event *allocdesc)
{
int numwrites;
cache_atom *line;
cache_atom *tmp;
cache_atom *stop;
int dirty = FALSE;
int locked = FALSE;
cache_event *flushdesc = (cache->allocatepolicy & CACHE_ALLOCATE_NONDIRTY) ? NULL : allocdesc;
if (cache->map[set].freelist) {
return(0);
}
if ((line = cache_get_replace_startpoint(cache, set)) == NULL) {
/* All lines between ownership */
cache_replace_waitforline(cache, allocdesc);
return(-1);
}
stop = line;
cache_replace_loop_continue:
if (locked | dirty) {
line = (cache->replacepolicy == CACHE_REPLACE_LIFO) ? line->lru_prev : line->lru_next;
}
if (line == stop) {
if (locked) {
if ((flushdesc) && (cache->allocatepolicy & CACHE_ALLOCATE_NONDIRTY)) {
cache_cleanup_flushdesc(flushdesc);
}
cache_replace_waitforline(cache, allocdesc);
return(-1);
}
}
locked = FALSE;
tmp = line;
while (tmp) {
if ((locked = (tmp->readlocks || tmp->writelock))) {
goto cache_replace_loop_continue;
}
tmp = tmp->line_next;
}
dirty = FALSE;
tmp = line;
while (tmp) {
if ((dirty = tmp->state & CACHE_DIRTY)) {
if (flushdesc == NULL) {
flushdesc = cache_get_flushdesc();
}
numwrites = cache_initiate_dirty_block_flush(cache, tmp, flushdesc);
if (cache->allocatepolicy & CACHE_ALLOCATE_NONDIRTY) {
goto cache_replace_loop_continue;
} else {
return(numwrites);
}
}
tmp = tmp->line_next;
}
cache_unmap_line(cache, line, set);
return(0);
}
/* Return number of writeouts (dirty block flushes) to be waited for. */
/* Also fill pointer to block allocated. Null indicates that blocks must */
/* be written out but no specific one has yet been allocated. */
static int cache_get_free_atom (cache_def *cache, int lbn, cache_atom **ret, cache_event *allocdesc)
{
int writeouts = 0;
int set = (cache->mapmask) ? (lbn % cache->mapmask) : 0;
// fprintf (outputfile, "Entered cache_get_free_atom: lbn %d, set %d, freelist %p\n", lbn, set, cache->map[set].freelist);
if (cache->map[set].freelist == NULL) {
writeouts = cache_replace(cache, set, allocdesc);
}
if ((*ret = cache->map[set].freelist)) {
cache_remove_from_lrulist(&cache->map[set], *ret, CACHE_SEGNUM);
}
return(writeouts);
}
/* Still need to add check for outstanding allocations by other people, to
avoid allocation replication */
static cache_event *cache_allocate_space_continue (cache_def *cache, cache_event *allocdesc)
{
int numwrites = 0;
cache_atom *new;
/*
cache_atom *toclean = NULL;
cache_atom *tocleanlast;
int flushstart = -1;
*/
int devno = allocdesc->req->devno;
int lbn = allocdesc->lockstop;
int stop = allocdesc->allocstop;
cache_atom *cleaned = allocdesc->cleaned;
cache_atom *lineprev = allocdesc->lineprev;
int linesize = (cache->linesize) ? cache->linesize : 1;
// fprintf (outputfile, "Entered allocate_space_continue: lbn %d, stop %d\n", lbn, stop);
if (allocdesc->waitees) {
cache_event *rwdesc = allocdesc->waitees;
if (rwdesc->type == CACHE_EVENT_READ) {
cache_read_continue(cache, rwdesc);
} else {
cache_write_continue(cache, rwdesc);
}
addtoextraq((event *) allocdesc);
return(NULL);
}
while (lbn < stop) {
if ((new = cleaned) == NULL) {
numwrites += cache_get_free_atom(cache, lbn, &new, allocdesc);
}
if (numwrites == 0) {
ASSERT(new != NULL);
do {
new->devno = devno;
new->lbn = lbn;
/* Re-allocated cache atom must not still be locked */
ASSERT((!new->writelock) && (!new->readlocks));
/*
new->writelock = allocdesc->prev->req;
*/
new->state = CACHE_LOCKDOWN;
cache_insert_new_into_hash(cache, new);
lbn++;
new = (lbn % linesize) ? new->line_next : new;
} while (lbn % linesize);
if (cache->linesize == 0) {
new->line_next = NULL;
new->line_prev = lineprev;
if (lineprev) {
lineprev->line_next = new;
}
lineprev = ((cache->linesize == -1) || (lbn % linesize)) ? new : NULL;
}
/*
} else if (cache->startallflushes) {
if (flushstart == -1) {
flushstart = i;
}
if (new) {
if (toclean) {
tocleanlast->line_next = new;
tocleanlast = new;
} else {
toclean = new;
tocleanlast = new;
}
while (tocleanlast->line_next) {
tocleanlast = tocleanlast->line_next;
}
}
*/
} else {
allocdesc->lockstop = lbn;
allocdesc->cleaned = new;
allocdesc->lineprev = lineprev;
/* This needs fixing! */
/*
cache_waitfor_IO(cache, numwrites, allocdesc, NULL);
*/
return(allocdesc);
}
}
/*
if (numwrites) {
allocdesc->lockstop = flushstart;
allocdesc->cleaned = toclean;
allocdesc->lineprev = lineprev;
cache_wait(cache, numwrites, allocdesc);
return(allocdesc);
}
*/
addtoextraq((event *) allocdesc);
return(NULL);
}
static cache_event * cache_allocate_space (cache_def *cache, int lbn, int size, cache_event *rwdesc)
{
cache_event *allocdesc = (cache_event *) getfromextraq();
int linesize = max(1, cache->linesize);
// fprintf (outputfile, "Entered cache_allocate_space: lbn %d, size %d, linesize %d\n", lbn, size, cache->linesize);
allocdesc->type = CACHE_EVENT_ALLOCATE;
allocdesc->req = rwdesc->req;
allocdesc->flags = rwdesc->flags & CACHE_FLAG_LINELOCKED_ALLOCATE;
allocdesc->lockstop = lbn - (lbn % linesize);
allocdesc->allocstop = lbn + size + (linesize - 1 - ((lbn + size - 1) % linesize));
allocdesc->cleaned = NULL;
allocdesc->lineprev = NULL;
allocdesc->prev = rwdesc;
allocdesc->waitees = NULL;
if ((allocdesc = cache_allocate_space_continue(cache, allocdesc))) {
allocdesc->waitees = rwdesc;
rwdesc->flags |= allocdesc->flags & CACHE_FLAG_LINELOCKED_ALLOCATE;
}
return(allocdesc);
}
static int cache_get_rw_lock (cache_def *cache, int locktype, cache_event *rwdesc, cache_atom *line, int i, int stop)
{
int lockgran;
cache_atom *tmp = line;
int j = 0;
int lbn = rwdesc->req->blkno;
int devno = rwdesc->req->devno;
// fprintf (outputfile, "Entered cache_get_rw_lock: lbn %d, i %d, stop %d, locktype %d\n", line->lbn, i, stop, locktype);
while (j < stop) {
if (locktype == 1) {
lockgran = cache_get_read_lock(cache, tmp, rwdesc);
} else {
if (locktype == 3) {
cache_free_read_lock(cache, tmp, rwdesc->req);
}
lockgran = cache_get_write_lock(cache, tmp, rwdesc);
}
// fprintf (outputfile, "got lock: lockgran %d, lbn %d\n", lockgran, tmp->lbn);
if (lockgran == 0) {
return(1);
} else {
if ((line->lbn != (lbn + i)) || (line->devno != devno)) {
/* NOTE: this precaution only covers us when FIRST atom of line */
/* changes identity. Otherwise, must have other support. */
if (locktype == 1) {
cache_free_read_lock(cache, tmp, rwdesc->req);
} else {
cache_free_write_lock(cache, tmp, rwdesc->req);
}
return(2);
}
j++;
tmp = tmp->line_next;
while ((tmp) && (tmp->lbn % lockgran)) {
j++;
tmp = tmp->line_next;
}
}
}
return(0);
}
static int cache_issue_fillreq (cache_def *cache, int start, int end, cache_event *rwdesc, int prefetchtype)
{
ioreq_event *fillreq;
int linesize = max(cache->linesize, 1);
// fprintf (outputfile, "Entered cache_issue_fillreq: start %d, end %d, prefetchtype %d\n", start, end, prefetchtype);
if (prefetchtype & CACHE_PREFETCH_FRONTOFLINE) {
cache_atom *line = cache_find_atom(cache, rwdesc->req->devno, start);
int validstart = -1;
int lockgran = cache->lockgran;
while (start % linesize) {
line = line->line_prev;
if (line->state & CACHE_VALID) {
/*
fprintf (outputfile, "already valid backwards: lbn %d\n", line->lbn);
*/
break;
/*
if (line->state & CACHE_DIRTY) {
break;
}
if (validstart == -1) {
validstart = line->lbn;
}
*/
} else {
validstart = -1;
}
if ((line->lbn % lockgran) == (lockgran-1)) {
if ((!cache->prefetch_waitfor_locks) && (cache_atom_islocked(cache, line))) {
break;
}
if ((lockgran = cache_get_write_lock(cache, line, rwdesc)) == 0) {
return(0);
}
}
start--;
line->state |= CACHE_VALID;
}
/* Need to free some locks if do this...
if (validstart != -1) {
start = validstart;
}
*/
}
if (prefetchtype & CACHE_PREFETCH_RESTOFLINE) {
cache_atom *line = cache_find_atom(cache, rwdesc->req->devno, end);
int validend = -1;
int lockgran = cache->lockgran;
while ((end+1) % linesize) {
line = line->line_next;
if (line->state & CACHE_VALID) {
/*
fprintf (outputfile, "already valid forwards: lbn %d\n", line->lbn);
*/
break;
/*
if (line->state & CACHE_DIRTY) {
break;
}
if (validend == -1) {
validend = line->lbn;
}
*/
} else {
validend = -1;
}
if ((line->lbn % lockgran) == 0) {
if ((!cache->prefetch_waitfor_locks) && (cache_atom_islocked(cache, line))) {
break;
}
if ((lockgran = cache_get_write_lock(cache, line, rwdesc)) == 0) {
return(0);
}
}
end++;
line->state |= CACHE_VALID;
}
/* Need to free some locks if do this...
if (validend != -1) {
end = validend;
}
*/
}
fillreq = ioreq_copy(rwdesc->req);
fillreq->blkno = start;
fillreq->bcount = end - start + 1;
fillreq->type = IO_ACCESS_ARRIVE;
fillreq->flags |= READ;
rwdesc->req->tempint1 = start;
rwdesc->req->tempint2 = end;
rwdesc->type = (rwdesc->type == CACHE_EVENT_READ) ? CACHE_EVENT_READEXTRA : CACHE_EVENT_WRITEFILLEXTRA;
cache_waitfor_IO(cache, 1, rwdesc, fillreq);
// fprintf (outputfile, "%f: Issueing line fill request: blkno %d, bcount %d\n", simtime, fillreq->blkno, fillreq->bcount);
(*cache->issuefunc)(cache->issueparam, fillreq);
return(end - start + 1);
}
static void cache_unlock_attached_prefetch (cache_def *cache, cache_event *rwdesc)
{
int fillstart = rwdesc->req->tempint1;
int fillend = rwdesc->req->tempint2 + 1; /* one beyond, actually */
int reqstart = rwdesc->req->blkno;
int reqend = reqstart + rwdesc->req->bcount; /* one beyond, actually */
// fprintf (outputfile, "Entered cache_unlock_attached_prefetch: fillstart %d, fillend %d, reqstart %d, reqend %d\n", fillstart, fillend, reqstart, reqend);
if (fillstart < reqstart) {
int lockgran = cache->lockgran;
while (reqstart % cache->lockgran) {
reqstart--;
}
reqstart--;
if (fillstart <= reqstart) {
cache_atom *line = cache_find_atom(cache, rwdesc->req->devno, reqstart);
do {
if ((line->lbn % lockgran) == (lockgran-1)) {
lockgran = cache_free_write_lock(cache, line, rwdesc->req);
/* Can't free lock if not held */
ASSERT(lockgran != 0);
}
line = line->line_prev;
reqstart--;
} while (fillstart <= reqstart);
}
}
if (fillend > reqend) {
int lockgran = cache->lockgran;
while (reqend % cache->lockgran) {
reqend++;
}
/*
if ((fillend / cache->lockgran) == (reqend / cache->lockgran)) {
} else {
reqend++;
}
*/
if (fillend > reqend) {
cache_atom *line = cache_find_atom(cache, rwdesc->req->devno, reqend);
do {
if ((line->lbn % lockgran) == 0) {
lockgran = cache_free_write_lock(cache, line, rwdesc->req);
/* Can't free lock if not held */
ASSERT(lockgran != 0);
}
line = line->line_next;
reqend++;
} while (fillend > reqend);
}
}
}
static int cache_read_continue (cache_def *cache, cache_event *readdesc)
{
cache_atom *line = NULL;
cache_atom *tmp;
int i, j;
cache_event *waitee;
int stop = 0;
int curlock;
int lockgran;
int ret;
int linesize = max(1, cache->linesize);
int devno = readdesc->req->devno;
int lbn = readdesc->req->blkno;
int size = readdesc->req->bcount;
int validpoint = readdesc->validpoint;
if (cache->size == 0) {
cache_waitfor_IO(cache, 1, readdesc, readdesc->req);
cache->stat.readmisses++;
cache->stat.fillreads++;
cache->stat.fillreadatoms += readdesc->req->bcount;
readdesc->req->type = IO_ACCESS_ARRIVE;
(*cache->issuefunc)(cache->issueparam, ioreq_copy(readdesc->req));
return(1);
}
i = readdesc->lockstop;
// fprintf (outputfile, "Entered cache_read_continue: lbn %d, size %d, i %d\n", lbn, size, i);
read_cont_loop:
while (i < size) {
line = cache_find_atom(cache, devno, (lbn + i));
waitee = NULL;
if (line == NULL) {
if ((waitee = cache_allocate_space(cache, (lbn + i), 1, readdesc))) {
readdesc->lockstop = i;
return(1);
} else {
continue;
}
}
stop = min(rounduptomult((size - i), cache->atomsperbit), (linesize - ((lbn + i) % linesize)));
// fprintf (outputfile, "stop %d, lbn %d, atomsperbit %d, i %d, size %d, linesize %d\n", stop, lbn, cache->atomsperbit, i, size, linesize);
// fprintf (outputfile, "validpoint %d, i %d\n", validpoint, i);
j = 0;
tmp = line;
curlock = 2;
lockgran = 0;
while (j < stop) {
int locktype = (tmp->state & CACHE_VALID) ? 1 : 2;
// fprintf (outputfile, "j %d, valid %d, validpoint %d, curlock %d, lockgran %d\n", j, (tmp->state & CACHE_VALID), validpoint, curlock, lockgran);
if (locktype > curlock) {
curlock = locktype;
lockgran = 0;
locktype = 3;
} else {
curlock = locktype;
}
if ((lockgran) && ((lbn+i+j) % lockgran)) {
} else if ((ret = cache_get_rw_lock(cache, locktype, readdesc, tmp, (i+j), 1))) {
// fprintf (outputfile, "Non-zero return from cache_get_rw_lock: %d\n", ret);
if (ret == 1) {
readdesc->lockstop = i + j;
return(1);
} else { /* ret == 2, indicating that identity changed */
goto read_cont_loop;
}
}
lockgran = cache->lockgran;
if ((tmp->state & CACHE_VALID) == 0) {
tmp->state |= CACHE_VALID;
if (validpoint == -1) {
validpoint = tmp->lbn;
readdesc->validpoint = validpoint;
}
/* Possibly begin filling (one at a time) ?? */
} else {
if (validpoint != -1) {
/* start fill of partial line */
readdesc->allocstop |= 2;
cache->stat.fillreads++;
readdesc->lockstop = - (readdesc->req->blkno % cache->atomsperbit);
// fprintf (outputfile, "Going to issue_fillreq on partial line\n");
cache->stat.fillreadatoms += cache_issue_fillreq(cache, validpoint, (tmp->lbn - 1), readdesc, cache->read_prefetch_type);
readdesc->validpoint = -1;
return(1);
}
}
tmp = tmp->line_next;
j++;
}
if ((validpoint != -1) && ((cache->read_line_by_line) || (!cache_concatok(cache, validpoint, 1, (validpoint+1), (line->lbn + stop - validpoint))))) {
/* Start fill of the line */
readdesc->allocstop |= 1;
cache->stat.fillreads++;
// fprintf (outputfile, "Going to issue_fillreq on full line\n");
cache->stat.fillreadatoms += cache_issue_fillreq(cache, validpoint, (line->lbn + stop - 1), readdesc, cache->read_prefetch_type);
readdesc->validpoint = -1;
return(1);
}
i += linesize - ((lbn + i) % linesize);
//fprintf (outputfile, "validpoint %d, i %d\n", validpoint, i);
}
if (validpoint != -1) {
/* Do the fill if necessary */
readdesc->allocstop |= 1;
cache->stat.fillreads++;
/* reset to what was in beginning */
readdesc->lockstop = - (readdesc->req->blkno % cache->atomsperbit);
readdesc->validpoint = -1;
// fprintf (outputfile, "Going to issue_fillreq on full request\n");
cache->stat.fillreadatoms += cache_issue_fillreq(cache, validpoint, (line->lbn + stop - 1), readdesc, cache->read_prefetch_type);
return(1);
}
cache->stat.getblockreaddones++;
cache->stat.reads++;
cache->stat.readatoms += readdesc->req->bcount;
if (readdesc->allocstop) {
cache->stat.readmisses++;
} else {
cache->stat.readhitsfull++;
}
if (readdesc->flags & CACHE_FLAG_WASBLOCKED) {
/* callback to say done */
(*readdesc->donefunc)(readdesc->doneparam, readdesc->req);
addtoextraq((event *) readdesc);
}
return(0);
}
static int cache_write_continue (cache_def *cache, cache_event *writedesc)
{
int stop;
cache_event *waitee;
cache_atom *line;
cache_atom *tmp;
int lockgran;
int i, j;
int startfillstart;
int startfillstop = 0;
int endfillstart;
int endfillstop = 0;
int ret;
int devno = writedesc->req->devno;
int lbn = writedesc->req->blkno;
int size = writedesc->req->bcount;
int linesize = (cache->linesize > 1) ? cache->linesize : 1;
if (cache->size == 0) {
return(0);
}
i = writedesc->lockstop;
// fprintf (outputfile, "Entered cache_write_continue: lbn %d, size %d, i %d\n", lbn, size, i);
write_cont_loop:
while (i < size) {
line = cache_find_atom(cache, devno, (lbn + i));
waitee = NULL;
if (line == NULL) {
if (cache->no_write_allocate) {
/* track non-resident part and continue */
fprintf(stderr, "Not yet handling write-no-allocate\n");
exit(1);
} else {
if ((waitee = cache_allocate_space(cache, (lbn + i), 1, writedesc))) {
writedesc->lockstop = i;
return(1);
} else {
continue;
}
}
}
stop = min(rounduptomult((size - i), cache->atomsperbit), (linesize - ((lbn + i) % linesize)));
j = 0;
tmp = line;
lockgran = 0;
startfillstart = -1;
endfillstart = -1;
while (j < stop) {
if ((lockgran) && ((lbn+i+j) % lockgran)) {
} else if ((ret = cache_get_rw_lock(cache, 2, writedesc, tmp, (i+j), 1))) {
if (ret == 1) {
writedesc->lockstop = i;
return(1);
} else { /* ret == 2, indicates that line changed identity */
goto write_cont_loop;
}
}
lockgran = cache->lockgran;
if ((tmp->lbn < lbn) && ((tmp->state & CACHE_VALID) == 0)) {
writedesc->allocstop |= 2;
tmp->state |= CACHE_VALID;
if (startfillstart == -1) {
startfillstart = tmp->lbn;
}
startfillstop = tmp->lbn;
} else if ((tmp->state & CACHE_VALID) == 0) {
int tmpval = tmp->lbn - (lbn + size - 1);
writedesc->allocstop |= 2;
if ((tmpval > 0) && (tmpval < (cache->atomsperbit - ((lbn + size - 1) % cache->atomsperbit)))) {
tmp->state |= CACHE_VALID;
if (endfillstart == -1) {
endfillstart = tmp->lbn;
}
endfillstop = tmp->lbn;
}
} else if (tmp->state & CACHE_DIRTY) {
writedesc->allocstop |= 4;
}
tmp = tmp->line_next;
j++;
}
/* if writing only part of space covered by valid/dirty bit, read */
/* (fill) first -- flag undo of allocation to bypass (no bypass for now */
if ((startfillstart != -1) || (endfillstart != -1)) {
int fillblkno = (startfillstart != -1) ? startfillstart : endfillstart;
int fillbcount = 1 - fillblkno;
fillbcount += ((startfillstart != -1) && (endfillstart == -1)) ? startfillstop : endfillstop;
cache->stat.writeinducedfills++;
// fprintf (outputfile, "Write induced fill: blkno %d, bcount %d\n", fillblkno, fillbcount);
cache->stat.writeinducedfillatoms += cache_issue_fillreq(cache, fillblkno, (fillblkno + fillbcount - 1), writedesc, cache->writefill_prefetch_type);
return(1);
}
i += linesize - ((lbn + i) % linesize);
}
cache->stat.writes++;
cache->stat.writeatoms += writedesc->req->bcount;
cache->stat.getblockwritedones++;
if (writedesc->allocstop & 4) {
cache->stat.writehitsdirty++;
} else if (writedesc->allocstop) {
cache->stat.writemisses++;
} else {
cache->stat.writehitsclean++;
}
if (writedesc->flags & CACHE_FLAG_WASBLOCKED) {
/* callback */
(*writedesc->donefunc)(writedesc->doneparam, writedesc->req);
addtoextraq((event *) writedesc);
}
return(0);
}
/* Gets the appropriate block, locked and ready to be accessed read or write */
int cachemem_get_block (cache_def *cache, ioreq_event *req, void (**donefunc)(void *, ioreq_event *), void *doneparam)
{
cache_event *rwdesc = (cache_event *) getfromextraq();
int ret;
// fprintf (outputfile, "totalreqs = %d\n", disksim->totalreqs);
// fprintf (outputfile, "%.5f: Entered cache_get_block: rw %d, devno %d, blkno %d, size %d\n", simtime, (req->flags & READ), req->devno, req->blkno, req->bcount);
rwdesc->type = (req->flags & READ) ? CACHE_EVENT_READ : CACHE_EVENT_WRITE;
rwdesc->donefunc = donefunc;
rwdesc->doneparam = doneparam;
rwdesc->req = req;
req->next = NULL;
req->prev = NULL;
rwdesc->validpoint = -1;
rwdesc->lockstop = - (req->blkno % cache->atomsperbit);
rwdesc->allocstop = 0; /* overload -- use for determining hit type */
rwdesc->flags = 0;
if (req->flags & READ) {
cache->stat.getblockreadstarts++;
ret = cache_read_continue(cache, rwdesc);
} else {
cache->stat.getblockwritestarts++;
ret = cache_write_continue(cache, rwdesc);
}
// fprintf (outputfile, "rwdesc %p, ret %x, validpoint %d\n", rwdesc, ret, rwdesc->validpoint);
if (ret == 0) {
(*donefunc)(doneparam, req);
addtoextraq((event *) rwdesc);
} else {
rwdesc->flags |= CACHE_FLAG_WASBLOCKED;
}
return(ret);
}
/* frees the block after access complete, block is clean so remove locks */
/* and update lru */
void cachemem_free_block_clean (cache_def *cache, ioreq_event *req)
{
cache_atom *line = NULL;
int lockgran = 0;
int i;
// fprintf (outputfile, "%.5f: Entered cache_free_block_clean: blkno %d, bcount %d, devno %d\n", simtime, req->blkno, req->bcount, req->devno);
cache->stat.freeblockcleans++;
if (cache->size == 0) {
return;
}
for (i=0; i<req->bcount; i++) {
if (line == NULL) {
line = cache_find_atom(cache, req->devno, (req->blkno + i));
/* Can't free unallocated space */
ASSERT(line != NULL);
if (req->type) {
cache_access(cache, line);
}
}
if (((line->lbn % cache->lockgran) == (cache->lockgran-1)) || (i == (req->bcount-1))) {
lockgran += cache_free_read_lock(cache, line, req);
}
line = line->line_next;
}
/* Must have unlocked entire requests worth of data */
ASSERT2((lockgran >= req->bcount), "lockgran", lockgran, "reqbcount", req->bcount);
}
static void cache_write_line_by_line (cache_def *cache, ioreq_event *flushreq, cache_event *writedesc, int reqdone)
{
cache_event *tmp = cache->partwrites;
while ((tmp) && (tmp->req != writedesc->req)) {
tmp = tmp->next;
}
if (tmp == NULL) {
/* partial write sync must have been initiated if it is done */
ASSERT(!reqdone);
tmp = (cache_event *) getfromextraq();
tmp->req = writedesc->req;
tmp->locktype = writedesc->req->blkno;
tmp->lockstop = writedesc->req->bcount;
tmp->next = cache->partwrites;
tmp->prev = NULL;
if (tmp->next) {
tmp->next->prev = tmp;
}
cache->partwrites = tmp;
}
if (reqdone) {
tmp->req->bcount = tmp->accblkno - flushreq->blkno;
tmp->req->blkno = flushreq->blkno;
tmp->req->type = 0;
cachemem_free_block_clean(cache, tmp->req);
if (tmp->accblkno >= (tmp->locktype + tmp->lockstop)) {
if (tmp->prev) {
tmp->prev->next = tmp->next;
} else {
cache->partwrites = tmp->next;
}
if (tmp->next) {
tmp->next->prev = tmp->prev;
}
tmp->req->blkno = tmp->locktype;
tmp->req->bcount = tmp->lockstop;
(*writedesc->donefunc)(writedesc->doneparam, tmp->req);
addtoextraq((event *) tmp);
} else {
tmp->req->bcount = tmp->locktype + tmp->lockstop - tmp->accblkno;
tmp->req->blkno = tmp->accblkno;
cache->linebylinetmp = 1;
cachemem_free_block_dirty(cache, tmp->req, writedesc->donefunc, writedesc->doneparam);
}
addtoextraq((event *) writedesc);
} else {
writedesc->type = CACHE_EVENT_SYNCPART;
tmp->accblkno = flushreq->blkno + flushreq->bcount;
}
}
/* a delayed write - set dirty bits, remove locks and update lru. */
/* If cache doesn't allow delayed writes, forward this to async */
int cachemem_free_block_dirty (cache_def *cache, ioreq_event *req, void (**donefunc)(void *, ioreq_event *), void *doneparam)
{
cache_atom *line = NULL;
ioreq_event *flushreq = 0;
cache_event *writedesc = 0;
int lockgran = 0;
int flushblkno = req->blkno;
int flushbcount = req->bcount;
int linebyline = cache->linebylinetmp;
int i;
int writethru = (cache->size == 0) || (cache->writescheme != CACHE_WRITE_BACK);
// fprintf (outputfile, "%.5f, Entered cache_free_block_dirty: blkno %d, size %d, writethru %d\n", simtime, req->blkno, req->bcount, writethru);
cache->linebylinetmp = 0;
cache->stat.freeblockdirtys++;
if (writethru) {
writedesc = (cache_event *) getfromextraq();
writedesc->type = CACHE_EVENT_SYNC;
writedesc->donefunc = donefunc;
writedesc->doneparam = doneparam;
writedesc->req = req;
req->type = IO_REQUEST_ARRIVE;
req->next = NULL;
req->prev = NULL;
flushreq = ioreq_copy(req);
flushreq->type = IO_ACCESS_ARRIVE;
flushreq->buf = cache;
}
if (cache->size == 0) {
cache->stat.destagewrites++;
cache->stat.destagewriteatoms += flushreq->bcount;
cache_waitfor_IO(cache, 1, writedesc, flushreq);
(*cache->issuefunc)(cache->issueparam, flushreq);
return(1);
}
// fprintf (outputfile, "flushblkno %d, reqblkno %d, atomsperbit %d\n", flushblkno, req->blkno, cache->atomsperbit);
flushblkno -= (req->blkno % cache->atomsperbit);
flushbcount += (req->blkno % cache->atomsperbit);
i = flushblkno + flushbcount;
flushbcount += rounduptomult(i, cache->atomsperbit) - i;
// fprintf (outputfile, "in free_block_dirty: flushblkno %d, flushsize %d\n", flushblkno, flushbcount);
for (i=0; i<flushbcount; i++) {
if (line == NULL) {
if ((lockgran) && (writethru) && ((cache->write_line_by_line) || (!cache_concatok(cache, flushblkno, 1, (flushblkno+1), i)))) {
flushbcount = i;
linebyline = 1;
break;
}
line = cache_find_atom(cache, req->devno, (flushblkno + i));
/* dirtied space must be allocated */
ASSERT(line != NULL);
cache_access(cache, line);
}
if (!writethru) {
line->busno = req->busno;
line->slotno = req->slotno;
}
line->state |= (writethru) ? CACHE_VALID : (CACHE_VALID|CACHE_DIRTY);
if (((line->lbn % cache->lockgran) != (cache->lockgran-1)) && (i != (flushbcount-1))) {
} else if (writethru) {
lockgran += cache_get_read_lock(cache, line, writedesc);
} else {
lockgran += cache_free_write_lock(cache, line, req);
}
line = line->line_next;
}
/* locks must be held over entire space */
ASSERT2((lockgran >= flushbcount), "lockgran", lockgran, "flushbcount", flushbcount);
if (writethru) {
cache->stat.destagewrites++;
cache->stat.destagewriteatoms += flushbcount;
flushreq->blkno = flushblkno;
flushreq->bcount = flushbcount;
if (linebyline) {
cache_write_line_by_line(cache, flushreq, writedesc, 0);
}
cache_waitfor_IO(cache, 1, writedesc, flushreq);
// fprintf (outputfile, "Issueing dirty block flush: writedesc %p, req %p, blkno %d, bcount %d, devno %d\n", writedesc, writedesc->req, flushreq->blkno, flushreq->bcount, flushreq->devno);
(*cache->issuefunc)(cache->issueparam, flushreq);
if (cache->writescheme == CACHE_WRITE_SYNCONLY) {
return(1);
} else {
/* Assuming that it is safe to touch it after call to cache_waitfor_IO */
req->type = -1;
writedesc->donefunc = &disksim->donefunc_cachemem_empty;
req = ioreq_copy(req);
}
} else if (cache->flush_idledelay >= 0.0) {
ioqueue_reset_idledetecter((*cache->queuefind)(cache->queuefindparam, req->devno), 0);
}
(*donefunc)(doneparam, req);
return(0);
}
int cachemem_sync (cache_def *cache)
{
return(0);
}
struct cacheevent *cachemem_disk_access_complete (cache_def *cache, ioreq_event *curr)
{
ioreq_event *req;
cache_event *tmp = cache->IOwaiters;
// fprintf (outputfile, "Entered cache_disk_access_complete: blkno %d, bcount %d, devno %d\n", curr->blkno, curr->bcount, curr->devno);
while (tmp) {
req = tmp->req;
while (req) {
if ((curr->devno == req->devno) && ((curr->blkno == tmp->accblkno) || ((tmp->accblkno == -1) && ((req->next) || (tmp->type == CACHE_EVENT_SYNC) || (tmp->type == CACHE_EVENT_IDLESYNC)) && (curr->blkno == req->blkno)))) {
// fprintf (outputfile, "Matched: tmp %p, req %p, blkno %d, accblkno %d, reqblkno %d\n", tmp, req, curr->blkno, tmp->accblkno, req->blkno);
goto completed_access;
}
req = req->next;
}
tmp = tmp->next;
}
completed_access:
if (tmp == NULL) {
fprintf(stderr, "Not yet supporting non-waited for disk accesses in cache\n");
exit(1);
}
if (tmp->prev) {
tmp->prev->next = tmp->next;
} else {
cache->IOwaiters = tmp->next;
}
if (tmp->next) {
tmp->next->prev = tmp->prev;
}
/*
fprintf (outputfile, "IOwaiters: %x, tmp->prev %x, tmp->next %x, 3 %d\n", cache->IOwaiters, tmp->prev, tmp->next, 3);
*/
if ((cache->size == 0) || (tmp->type == CACHE_EVENT_SYNC) || (tmp->type == CACHE_EVENT_IDLESYNC)) {
int type = req->type;
if (req->next) {
req->next->prev = req->prev;
}
if (req->prev) {
req->prev->next = req->next;
} else {
tmp->req = req->next;
}
req->type = 0;
cachemem_free_block_clean(cache, req);
req->type = type;
if (type != -1) {
(*tmp->donefunc)(tmp->doneparam, req);
}
if (tmp->req) {
cache_waitfor_IO(cache, 1, tmp, tmp->req);
tmp->accblkno = (tmp->req->next) ? -1 : tmp->req->blkno;
tmp = (type == -1) ? (cache_event *) event_copy((event *)tmp) : NULL;
} else {
if (tmp->type == CACHE_EVENT_IDLESYNC) {
cache_idletime_detected(cache, curr->devno);
}
if (type != -1) {
addtoextraq((event *) tmp);
tmp = NULL;
}
}
if (type == -1) {
tmp->req = req;
}
} else if (tmp->type == CACHE_EVENT_READ) {
} else if (tmp->type == CACHE_EVENT_WRITE) {
} else if (tmp->type == CACHE_EVENT_SYNCPART) {
curr->next = tmp->req;
tmp->req= curr;
curr = NULL;
} else if (tmp->type == CACHE_EVENT_ALLOCATE) {
/* Must be a replacement-induced write-back */
req->next->prev = req->prev;
if (req->prev) {
req->prev->next = req->next;
} else {
tmp->req = req->next;
}
req->type = 0;
cachemem_free_block_clean(cache, req);
addtoextraq((event *) req);
if (tmp->req != tmp->waitees->req) {
cache_waitfor_IO(cache, 1, tmp, tmp->req);
tmp->accblkno = -1;
tmp = NULL;
}
} else if (tmp->type == CACHE_EVENT_READEXTRA) {
tmp->type = CACHE_EVENT_READ;
cache_unlock_attached_prefetch(cache, tmp);
} else if (tmp->type == CACHE_EVENT_WRITEFILLEXTRA) {
tmp->type = CACHE_EVENT_WRITE;
cache_unlock_attached_prefetch(cache, tmp);
} else {
fprintf(stderr, "Unknown type at cache_disk_access_complete: %d\n", tmp->type);
exit(1);
}
addtoextraq((event *) curr);
return(tmp);
}
void cachemem_wakeup_complete (cache_def *cache, cache_event *desc)
{
if (desc->type == CACHE_EVENT_READ) {
cache_read_continue(cache, desc);
} else if (desc->type == CACHE_EVENT_WRITE) {
cache_write_continue(cache, desc);
} else if (desc->type == CACHE_EVENT_ALLOCATE) {
cache_allocate_space_continue(cache, desc);
} else if (desc->type == CACHE_EVENT_SYNC) {
(*desc->donefunc)(desc->doneparam, desc->req);
addtoextraq((event *) desc);
} else if (desc->type == CACHE_EVENT_SYNCPART) {
ioreq_event *flushreq = desc->req;
desc->req = flushreq->next;
cache_write_line_by_line(cache, flushreq, desc, 1);
addtoextraq((event *) flushreq);
} else {
fprintf(stderr, "Unknown event type in cache_wakeup_complete: %d\n", desc->type);
assert(0);
exit(1);
}
}
void cachemem_resetstats (cache_def *cache)
{
cache->stat.reads = 0;
cache->stat.readatoms = 0;
cache->stat.readhitsfull = 0;
cache->stat.readhitsfront = 0;
cache->stat.readhitsback = 0;
cache->stat.readhitsmiddle = 0;
cache->stat.readmisses = 0;
cache->stat.fillreads = 0;
cache->stat.fillreadatoms = 0;
cache->stat.writes = 0;
cache->stat.writeatoms = 0;
cache->stat.writehitsclean = 0;
cache->stat.writehitsdirty = 0;
cache->stat.writemisses = 0;
cache->stat.writeinducedfills = 0;
cache->stat.writeinducedfillatoms = 0;
cache->stat.destagewrites = 0;
cache->stat.destagewriteatoms = 0;
cache->stat.getblockreadstarts = 0;
cache->stat.getblockreaddones = 0;
cache->stat.getblockwritestarts = 0;
cache->stat.getblockwritedones = 0;
cache->stat.freeblockcleans = 0;
cache->stat.freeblockdirtys = 0;
}
void cachemem_setcallbacks ()
{
disksim->donefunc_cachemem_empty = cache_empty_donefunc;
disksim->idlework_cachemem = cache_idletime_detected;
disksim->concatok_cachemem = cache_concatok;
disksim->timerfunc_cachemem = cache_periodic_flush;
}
void cachemem_initialize (cache_def *cache, void (**issuefunc)(void *,ioreq_event *), void *issueparam, struct ioq * (**queuefind)(void *,int), void *queuefindparam, void (**wakeupfunc)(void *,struct cacheevent *), void *wakeupparam, int numdevs)
{
int i, j;
cache_atom *tmp;
StaticAssert (sizeof(cache_atom) <= DISKSIM_EVENT_SIZE);
StaticAssert (sizeof(cache_event) <= DISKSIM_EVENT_SIZE);
StaticAssert (sizeof(cache_lockholders) <= DISKSIM_EVENT_SIZE);
StaticAssert (sizeof(cache_lockholders) == sizeof(cache_lockwaiters));
cache->issuefunc = issuefunc;
cache->issueparam = issueparam;
cache->queuefind = queuefind;
cache->queuefindparam = queuefindparam;
cache->wakeupfunc = wakeupfunc;
cache->wakeupparam = wakeupparam;
cache->IOwaiters = NULL;
cache->partwrites = NULL;
cache->linewaiters = NULL;
cache->linebylinetmp = 0;
for (i=0; i<CACHE_HASHSIZE; i++) {
cache->hash[i] = 0;
}
for (j=0; j<(cache->mapmask+1); j++) {
cache_mapentry *mapentry = &cache->map[j];
for (i=0; i<CACHE_MAXSEGMENTS; i++) {
while ((tmp = mapentry->lru[i])) {
cache_remove_from_lrulist(mapentry, tmp, i);
cache_add_to_lrulist(mapentry, tmp, CACHE_SEGNUM);
}
mapentry->numactive[i] = 0;
}
if (mapentry->freelist) {
/* reset all valid bits */
} else {
int atomcnt = cache->size / (cache->mapmask+1);
int linesize = (cache->linesize) ? cache->linesize : 1;
tmp = NULL;
for (i=0; i<atomcnt; i++) {
cache_atom *newatom = (cache_atom *) getfromextraq();
memset((char *) newatom, 0, DISKSIM_EVENT_SIZE);
/* bzero ((char *) newatom, DISKSIM_EVENT_SIZE); */
if (i % linesize) {
newatom->line_next = tmp;
tmp->line_prev = newatom;
} else {
if (tmp) {
cache_add_to_lrulist(mapentry, tmp, CACHE_SEGNUM);
}
}
tmp = newatom;
}
if (tmp) {
cache_add_to_lrulist(mapentry, tmp, CACHE_SEGNUM);
}
}
}
if (cache->flush_policy == CACHE_FLUSH_PERIODIC) {
timer_event *timereq = (timer_event *) getfromextraq();
timereq->type = TIMER_EXPIRED;
timereq->func = &disksim->timerfunc_cachemem;
timereq->time = cache->flush_period;
timereq->ptr = cache;
addtointq((event *)timereq);
}
for (i=0; i<numdevs; i++) {
struct ioq *queue = (*queuefind)(queuefindparam, i);
if (cache->flush_idledelay >= 0.0) {
ioqueue_set_idlework_function(queue, &disksim->idlework_cachemem, cache, cache->flush_idledelay);
}
if (cache == NULL) {
ioqueue_set_concatok_function(queue, &disksim->concatok_cachemem, cache);
}
}
cachemem_resetstats(cache);
}
void cachemem_cleanstats (cache_def *cache)
{
}
void cachemem_printstats (cache_def *cache, char *prefix)
{
int reqs = cache->stat.reads + cache->stat.writes;
int atoms = cache->stat.readatoms + cache->stat.writeatoms;
fprintf (outputfile, "%scache requests: %6d\n", prefix, reqs);
if (reqs == 0) {
return;
}
fprintf (outputfile, "%scache read requests: %6d \t%6.4f\n", prefix, cache->stat.reads, ((double) cache->stat.reads / (double) reqs));
if (cache->stat.reads) {
fprintf(outputfile, "%scache atoms read: %6d \t%6.4f\n", prefix, cache->stat.readatoms, ((double) cache->stat.readatoms / (double) atoms));
fprintf(outputfile, "%scache read misses: %6d \t%6.4f \t%6.4f\n", prefix, cache->stat.readmisses, ((double) cache->stat.readmisses / (double) reqs), ((double) cache->stat.readmisses / (double) cache->stat.reads));
fprintf(outputfile, "%scache read full hits: %6d \t%6.4f \t%6.4f\n", prefix, cache->stat.readhitsfull, ((double) cache->stat.readhitsfull / (double) reqs), ((double) cache->stat.readhitsfull / (double) cache->stat.reads));
fprintf(outputfile, "%scache fills (read): %6d \t%6.4f \t%6.4f\n", prefix, cache->stat.fillreads, ((double) cache->stat.fillreads / (double) reqs), ((double) cache->stat.fillreads / (double) cache->stat.reads));
fprintf(outputfile, "%scache atom fills (read): %6d \t%6.4f \t%6.4f\n", prefix, cache->stat.fillreadatoms, ((double) cache->stat.fillreadatoms / (double) atoms), ((double) cache->stat.fillreadatoms / (double) cache->stat.readatoms));
}
fprintf(outputfile, "%scache write requests: %6d \t%6.4f\n", prefix, cache->stat.writes, ((double) cache->stat.writes / (double) reqs));
if (cache->stat.writes) {
fprintf(outputfile, "%scache atoms written: %6d \t%6.4f\n", prefix, cache->stat.writeatoms, ((double) cache->stat.writeatoms / (double) atoms));
fprintf(outputfile, "%scache write misses: %6d \t%6.4f \t%6.4f\n", prefix, cache->stat.writemisses, ((double) cache->stat.writemisses / (double) reqs), ((double) cache->stat.writemisses / (double) cache->stat.writes));
fprintf(outputfile, "%scache write hits (clean): %6d \t%6.4f \t%6.4f\n", prefix, cache->stat.writehitsclean, ((double) cache->stat.writehitsclean / (double) reqs), ((double) cache->stat.writehitsclean / (double) cache->stat.writes));
fprintf(outputfile, "%scache write hits (dirty): %6d \t%6.4f \t%6.4f\n", prefix, cache->stat.writehitsdirty, ((double) cache->stat.writehitsdirty / (double) reqs), ((double) cache->stat.writehitsdirty / (double) cache->stat.writes));
fprintf(outputfile, "%scache fills (write): %6d \t%6.4f \t%6.4f\n", prefix, cache->stat.writeinducedfills, ((double)cache->stat.writeinducedfills / (double) reqs), ((double) cache->stat.writeinducedfills / (double) cache->stat.writes));
fprintf(outputfile, "%scache atom fills (write): %6d \t%6.4f \t%6.4f\n", prefix, cache->stat.writeinducedfillatoms, ((double)cache->stat.writeinducedfillatoms / (double) atoms), ((double) cache->stat.writeinducedfillatoms / (double) cache->stat.writeatoms));
fprintf(outputfile, "%scache destages (write): %6d \t%6.4f \t%6.4f\n", prefix, cache->stat.destagewrites, ((double) cache->stat.destagewrites / (double) reqs), ((double) cache->stat.destagewrites / (double) cache->stat.writes));
fprintf(outputfile, "%scache atom destages (write): %6d \t%6.4f \t%6.4f\n", prefix, cache->stat.destagewriteatoms, ((double) cache->stat.destagewriteatoms / (double) atoms), ((double) cache->stat.destagewriteatoms / (double) cache->stat.writeatoms));
fprintf(outputfile, "%scache end dirty atoms: %6d \t%6.4f\n", prefix, cache_count_dirty_atoms(cache), ((double) cache_count_dirty_atoms(cache) / (double) cache->stat.writeatoms));
}
#if 0 /* extra info that is helpful when debugging */
fprintf(outputfile, "%scache get_block starts (read): %6d\n", prefix, cache->stat.getblockreadstarts);
fprintf(outputfile, "%scache get_block dones (read): %6d\n", prefix, cache->stat.getblockreaddones);
fprintf(outputfile, "%scache get_block starts (write): %6d\n", prefix, cache->stat.getblockwritestarts);
fprintf(outputfile, "%scache get_block dones (write): %6d\n", prefix, cache->stat.getblockwritedones);
fprintf(outputfile, "%scache free_block_cleans: %6d\n", prefix, cache->stat.freeblockcleans);
fprintf(outputfile, "%scache free_block_dirtys: %6d\n", prefix, cache->stat.freeblockdirtys);
#endif
}
cache_def * cachemem_copy (cache_def *cache)
{
int i, j;
cache_mapentry *mapentry;
cache_def *new;
new = (cache_def *) DISKSIM_malloc(sizeof(cache_def));
bzero(new, sizeof(cache_def));
mapentry = (cache_mapentry *) DISKSIM_malloc(sizeof(cache_mapentry));
bzero(mapentry, sizeof(cache_mapentry));
ASSERT((new != NULL) && (mapentry != NULL));
new->map = mapentry;
for (i=0; i<(cache->mapmask+1); i++) {
mapentry[i].freelist = NULL;
for (j=0; j<CACHE_MAXSEGMENTS; j++) {
mapentry[i].maxactive[j] = cache->map[i].maxactive[j];
mapentry[i].lru[j] = NULL;
}
}
new->issuefunc = cache->issuefunc;
new->cachetype = cache->cachetype;
new->issueparam = cache->issueparam;
new->size = cache->size;
new->atomsize = cache->atomsize;
new->numsegs = cache->numsegs;
new->linesize = cache->linesize;
new->atomsperbit = cache->atomsperbit;
new->lockgran = cache->lockgran;
new->sharedreadlocks = cache->sharedreadlocks;
new->maxreqsize = cache->maxreqsize;
new->replacepolicy = cache->replacepolicy;
new->mapmask = cache->mapmask;
new->writescheme = cache->writescheme;
new->flush_policy = cache->flush_policy;
new->flush_period = cache->flush_period;
new->flush_idledelay = cache->flush_idledelay;
new->flush_maxlinecluster = cache->flush_maxlinecluster;
new->read_prefetch_type = cache->read_prefetch_type;
new->writefill_prefetch_type = cache->writefill_prefetch_type;
new->prefetch_waitfor_locks = cache->prefetch_waitfor_locks;
new->startallflushes = cache->startallflushes;
new->allocatepolicy = cache->allocatepolicy;
new->read_line_by_line = cache->read_line_by_line;
new->write_line_by_line = cache->write_line_by_line;
new->maxscatgath = cache->maxscatgath;
new->no_write_allocate = cache->no_write_allocate;
return(new);
}
void cachemem_setup_segs(cache_def *result, struct lp_list *l) {
int rem, c, d;
double maxsegfrac;
rem = result->size;
result->mapmask = 0;
result->map =
(cache_mapentry *) DISKSIM_malloc((result->mapmask+1) *
sizeof(cache_mapentry));
ASSERT(result->map != NULL);
bzero(result->map, (result->mapmask+1)*sizeof(cache_mapentry));
for (c = 0; c < l->values_len; c++) {
if(l->values[c]) result->numsegs++;
}
d = 0;
for (c = 0; c < (l->values_len - 1); c++) {
if(!l->values[c]) continue;
assert(l->values[c]->t == D);
maxsegfrac = l->values[c]->v.d;
result->map[0].maxactive[d] = (int)((double) result->size * maxsegfrac);
rem -= result->map[0].maxactive[d];
d++;
}
/* Sum of maximum segment sizes must not exceed cache size */
ASSERT((result->size == 0) || (rem >= 0));
if(result->numsegs > 1)
result->map[result->numsegs - 1].maxactive[0] = rem;
for (c=0; c<(result->mapmask+1); c++) {
result->map[c].freelist = NULL;
for (d=0; d<CACHE_MAXSEGMENTS; d++) {
result->map[c].maxactive[d] = result->map[0].maxactive[d];
result->map[c].lru[d] = NULL;
}
}
}
struct cache_def *disksim_cachemem_loadparams(struct lp_block *b)
{
struct cache_def *result;
result = malloc(sizeof(struct cache_def));
if(!result) return 0;
bzero(result, sizeof(struct cache_def));
result->cachetype = CACHE_MEMORY;
result->name = b->name ? strdup(b->name) : 0;
#include "modules/disksim_cachemem_param.c"
/* Valid/dirty bit granularity must divide evenly into line size */
if(result->linesize % result->atomsperbit) {
fprintf(stderr, "*** error: memcache: Valid/dirty bit granularity must divide evenly into line size.\n");
return 0;
}
/* Lock granularity must divide evenly into line size */
if(result->linesize % result->lockgran) {
fprintf(stderr, "*** error: memcache: Lock granularity must divide evenly into line size.\n");
return 0;
}
/* should these be configurable?? */
result->prefetch_waitfor_locks = FALSE;
result->atomsize = 1;
result->startallflushes = TRUE;
result->no_write_allocate = FALSE;
return result;
}
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/wjh731/disksim_original.git
git@gitee.com:wjh731/disksim_original.git
wjh731
disksim_original
disksim_original
master

搜索帮助