代码拉取完成,页面将自动刷新
/*
* DiskSim Storage Subsystem Simulation Environment (Version 3.0)
* Revision Authors: John Bucy, Greg Ganger
* Contributors: John Griffin, Jiri Schindler, Steve Schlosser
*
* Copyright (c) of Carnegie Mellon University, 2001, 2002, 2003.
*
* This software is being provided by the copyright holders under the
* following license. By obtaining, using and/or copying this software,
* you agree that you have read, understood, and will comply with the
* following terms and conditions:
*
* Permission to reproduce, use, and prepare derivative works of this
* software is granted provided the copyright and "No Warranty" statements
* are included with all reproductions and derivative works and associated
* documentation. This software may also be redistributed without charge
* provided that the copyright and "No Warranty" statements are included
* in all redistributions.
*
* NO WARRANTY. THIS SOFTWARE IS FURNISHED ON AN "AS IS" BASIS.
* CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER
* EXPRESSED OR IMPLIED AS TO THE MATTER INCLUDING, BUT NOT LIMITED
* TO: WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY
* OF RESULTS OR RESULTS OBTAINED FROM USE OF THIS SOFTWARE. CARNEGIE
* MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT
* TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
* COPYRIGHT HOLDERS WILL BEAR NO LIABILITY FOR ANY USE OF THIS SOFTWARE
* OR DOCUMENTATION.
*
*/
/*
* DiskSim Storage Subsystem Simulation Environment (Version 2.0)
* Revision Authors: Greg Ganger
* Contributors: Ross Cohen, John Griffin, Steve Schlosser
*
* Copyright (c) of Carnegie Mellon University, 1999.
*
* Permission to reproduce, use, and prepare derivative works of
* this software for internal use is granted provided the copyright
* and "No Warranty" statements are included with all reproductions
* and derivative works. This software may also be redistributed
* without charge provided that the copyright and "No Warranty"
* statements are included in all redistributions.
*
* NO WARRANTY. THIS SOFTWARE IS FURNISHED ON AN "AS IS" BASIS.
* CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER
* EXPRESSED OR IMPLIED AS TO THE MATTER INCLUDING, BUT NOT LIMITED
* TO: WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY
* OF RESULTS OR RESULTS OBTAINED FROM USE OF THIS SOFTWARE. CARNEGIE
* MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT
* TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
*/
/*
* DiskSim Storage Subsystem Simulation Environment
* Authors: Greg Ganger, Bruce Worthington, Yale Patt
*
* Copyright (C) 1993, 1995, 1997 The Regents of the University of Michigan
*
* This software is being provided by the copyright holders under the
* following license. By obtaining, using and/or copying this software,
* you agree that you have read, understood, and will comply with the
* following terms and conditions:
*
* Permission to use, copy, modify, distribute, and sell this software
* and its documentation for any purpose and without fee or royalty is
* hereby granted, provided that the full text of this NOTICE appears on
* ALL copies of the software and documentation or portions thereof,
* including modifications, that you make.
*
* THIS SOFTWARE IS PROVIDED "AS IS," AND COPYRIGHT HOLDERS MAKE NO
* REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE,
* BUT NOT LIMITATION, COPYRIGHT HOLDERS MAKE NO REPRESENTATIONS OR
* WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR
* THAT THE USE OF THE SOFTWARE OR DOCUMENTATION WILL NOT INFRINGE ANY
* THIRD PARTY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS. COPYRIGHT
* HOLDERS WILL BEAR NO LIABILITY FOR ANY USE OF THIS SOFTWARE OR
* DOCUMENTATION.
*
* This software is provided AS IS, WITHOUT REPRESENTATION FROM THE
* UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY PURPOSE, AND
* WITHOUT WARRANTY BY THE UNIVERSITY OF MICHIGAN OF ANY KIND, EITHER
* EXPRESSED OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE REGENTS
* OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE FOR ANY DAMAGES,
* INCLUDING SPECIAL , INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES,
* WITH RESPECT TO ANY CLAIM ARISING OUT OF OR IN CONNECTION WITH THE
* USE OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN IF IT HAS
* BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
*
* The names and trademarks of copyright holders or authors may NOT be
* used in advertising or publicity pertaining to the software without
* specific, written prior permission. Title to copyright in this software
* and any associated documentation will at all times remain with copyright
* holders.
*/
/***************************************************************************
This is a fairly simple module for managing a device that is being used
as a cache for another device. It does not have many functionalities
that a real device-caching-device would have, such as
1. No actual "cache"-iness. That is, it simply maps given locations
to the same locations on the cache device. This limits the max
amount of real device space that can be cached, and misses
opportunities for creating cache-device locality where none
exists in the original workload.
2. No prefetching.
3. No optimization. For example, see #1. Also, no grouping of
writes to cache-device. Also, no going to original device when
cache-device is busy.
4. No locking. Specifically, requests for the same space can be in
progress in parallel. This could cause inconsistencies, with
unfortunate sequencing.
5. No buffer space limitations. Specifically, there is no limit on
the amount of buffer space used at once for managing the cache
and real devices.
***************************************************************************/
/* shouln't we be able to #include disksim_cache.h here?? */
#define CACHE_DEVICE 2
#include "disksim_global.h"
#include "disksim_iosim.h"
#include "disksim_ioqueue.h"
#include "disksim_cachedev.h"
#include "config.h"
/* cache event types */
#define CACHE_EVENT_IOREQ 0
#define CACHE_EVENT_READ 1
#define CACHE_EVENT_POPULATE_ONLY 2
#define CACHE_EVENT_POPULATE_ALSO 3
#define CACHE_EVENT_WRITE 4
#define CACHE_EVENT_FLUSH 5
#define CACHE_EVENT_IDLEFLUSH_READ 6
#define CACHE_EVENT_IDLEFLUSH_FLUSH 7
/* cache write schemes */
#define CACHE_WRITE_MIN 1
#define CACHE_WRITE_SYNCONLY 1
#define CACHE_WRITE_THRU 2
#define CACHE_WRITE_BACK 3
#define CACHE_WRITE_MAX 3
/* cache background flush types */
#define CACHE_FLUSH_MIN 0
#define CACHE_FLUSH_DEMANDONLY 0
#define CACHE_FLUSH_PERIODIC 1
#define CACHE_FLUSH_MAX 1
typedef struct cacheevent {
double time;
int type;
struct cacheevent *next;
struct cacheevent *prev;
void (**donefunc)(void *,ioreq_event *); /* Function to call when complete */
void *doneparam; /* parameter for donefunc */
int flags;
ioreq_event *req;
struct cacheevent *waitees;
int validpoint;
} cache_event;
typedef struct {
int reads;
int readblocks;
int readhitsfull;
int readmisses;
int popwrites;
int popwriteblocks;
int writes;
int writeblocks;
int writehitsfull;
int writemisses;
int destagereads;
int destagereadblocks;
int destagewrites;
int destagewriteblocks;
int maxbufferspace;
} cache_stats;
typedef struct cache_def {
int cachetype; /* all caches must start with an integer type */
void (**issuefunc)(void *,ioreq_event *); /* to issue a disk access */
void *issueparam; /* first param for issuefunc */
struct ioq * (**queuefind)(void *,int); /* to get ioqueue ptr for dev*/
void *queuefindparam; /* first param for queuefind */
void (**wakeupfunc)(void *,struct cacheevent *); /* to re-activate slept proc */
void *wakeupparam; /* first param for wakeupfunc */
int size; /* in 512B blks */
int cache_devno; /* device used for cache */
int real_devno; /* device for which cache is used */
int maxreqsize;
int writescheme;
int flush_policy;
double flush_period;
double flush_idledelay;
int bufferspace;
cache_event *ongoing_requests;
bitstr_t *validmap;
bitstr_t *dirtymap;
cache_stats stat;
char *name;
} cache_def;
//youkim
int cache_size_in_pages;
int *map_lpn_disk_to_flash;
#define PAGE_IN_SECT 4
int cachedev_get_cachetype (cache_def *cache)
{
ASSERT (cache != NULL);
return(cache->cachetype);
}
int cachedev_get_maxreqsize (cache_def *cache)
{
if (cache) {
return(cache->maxreqsize);
}
return(0);
}
static void cachedev_empty_donefunc (void *doneparam, ioreq_event *req)
{
addtoextraq((event *) req);
}
static void cachedev_add_ongoing_request (cache_def *cache, cache_event *cachereq)
{
cachereq->next = cache->ongoing_requests;
cachereq->prev = NULL;
if (cachereq->next) {
cachereq->next->prev = cachereq;
}
cache->ongoing_requests = cachereq;
}
static void cachedev_remove_ongoing_request (cache_def *cache, cache_event *cachereq)
{
if (cachereq->next) {
cachereq->next->prev = cachereq->prev;
}
if (cachereq->prev) {
cachereq->prev->next = cachereq->next;
}
if (cache->ongoing_requests == cachereq) {
cache->ongoing_requests = cachereq->next;
}
}
static cache_event * cachedev_find_ongoing_request (cache_def *cache, ioreq_event *req)
{
cache_event *tmp = cache->ongoing_requests;
/* Is this enough to ensure equivalence?? */
while ((tmp != NULL) && ((req->opid != tmp->req->opid) || (req->blkno != tmp->req->blkno) || (req->bcount != tmp->req->bcount) || (req->buf != tmp->req->buf))) {
tmp = tmp->next;
}
return (tmp);
}
/* Return a count of how many dirty blocks are on the cachedev. */
static int cachedev_count_dirty_blocks (cache_def *cache)
{
int dirtyblocks = 0;
int i;
for (i=0; i<cache->size; i++) {
if (bit_test(cache->dirtymap,i)) {
dirtyblocks++;
}
}
return (dirtyblocks);
}
static void cachedev_setbits (bitstr_t *bitmap, ioreq_event *req)
{
bit_nset (bitmap, req->blkno, (req->blkno+req->bcount-1));
}
static void cachedev_clearbits (bitstr_t *bitmap, ioreq_event *req)
{
bit_nclear (bitmap, req->blkno, (req->blkno+req->bcount-1));
}
//youkim
int search_lpn_of_flash_to_lpn_of_disk(int lpn)
{
int i;
for(i = 0; i<cache_size_in_pages; i++){
if(map_lpn_disk_to_flash[i] == lpn)
return i;
}
return -1;
}
//youkim
int find_hole(int lpn)
{
int i;
for(i = 0; i<cache_size_in_pages; i++){
if(map_lpn_disk_to_flash[i] == -1)
return i;
}
return -1;
}
//youkim
//static int cachedev_isreadhit (cache_def *cache, ioreq_event *req)
static int cachedev_isreadhit (cache_def *cache, ioreq_event *req, int *start_lsn, int *cnt)
{
int lastblk = req->blkno + req->bcount;
int i;
/*
if (lastblk >= cache->size) {
return (0);
}
*/
//youkim
int s_lsn, e_lsn;
int s_lpn, e_lpn;
int s_lpn_of_flash;
int s_lsn_of_flash;
s_lsn = (req->blkno / PAGE_IN_SECT) * PAGE_IN_SECT;
e_lsn = ((req->blkno + req->bcount + PAGE_IN_SECT) / PAGE_IN_SECT) * PAGE_IN_SECT;
*cnt = e_lsn - s_lsn;
s_lpn = s_lsn / PAGE_IN_SECT;
e_lpn = e_lsn / PAGE_IN_SECT;
//youkim
//printf("req->blkno: %d, req->bcount: %d, s_lsn: %d, e_lsn: %d, s_lpn: %d, e_lpn: %d\n",
// req->blkno, req->bcount, s_lsn, e_lsn, s_lpn, e_lpn);
for (i=req->blkno; i<lastblk; i++) {
if (bit_test(cache->validmap,i) == 0) {
return (0);
}
}
//youkim
if(*cnt > PAGE_IN_SECT) *cnt = PAGE_IN_SECT;
s_lpn_of_flash = search_lpn_of_flash_to_lpn_of_disk(s_lpn);
if(s_lpn_of_flash == -1){
printf("youkim: mismatch in remapping table\n");
exit(0);
}
s_lsn_of_flash = s_lpn_of_flash * PAGE_IN_SECT;
*start_lsn = s_lsn_of_flash;
return (1);
}
//youkim
//static int cachedev_iswritehit (cache_def *cache, ioreq_event *req)
static int cachedev_iswritehit (cache_def *cache, ioreq_event *req, int *start_lsn, int *cnt)
{
//youkim
/*
if ((req->blkno+req->bcount) >= cache->size) {
return (0);
}
*/
//youkim
int s_lsn, e_lsn;
int s_lpn, e_lpn;
int s_lpn_of_flash;
int s_lsn_of_flash;
s_lsn = (req->blkno / PAGE_IN_SECT) * PAGE_IN_SECT;
e_lsn = ((req->blkno + req->bcount + PAGE_IN_SECT) / PAGE_IN_SECT) * PAGE_IN_SECT;
*cnt = e_lsn - s_lsn;
s_lpn = s_lsn / PAGE_IN_SECT;
e_lpn = e_lsn / PAGE_IN_SECT;
//youkim
if(*cnt > PAGE_IN_SECT) *cnt = PAGE_IN_SECT;
//s_lpn_of_flash = search_lpn_of_flash_to_lpn_of_disk(s_lpn);
s_lpn_of_flash = search_lpn_of_flash_to_lpn_of_disk(s_lpn);
if(s_lpn_of_flash == -1) return 0;
s_lsn_of_flash = s_lpn_of_flash * PAGE_IN_SECT;
*start_lsn = s_lsn_of_flash;
return (1);
}
static int cachedev_find_dirty_cache_blocks (cache_def *cache, int *blknoPtr, int *bcountPtr)
{
int blkno;
bit_ffs (cache->dirtymap, cache->size, blknoPtr);
if (*blknoPtr == -1) {
return (0);
}
blkno = *blknoPtr;
while (bit_test (cache->dirtymap, blkno) != 0) {
blkno++;
}
*bcountPtr = blkno - *blknoPtr;
return (1);
}
static void cachedev_periodic_callback (timer_event *timereq)
{
fprintf (stderr, "cachedev_periodic_callback not yet supported\n");
ASSERT (0);
exit (0);
}
static void cachedev_idlework_callback (void *idleworkparam, int idledevno)
{
cache_def *cache = idleworkparam;
cache_event *flushdesc;
ioreq_event *flushreq;
int blkno, bcount;
struct ioq *queue;
ASSERT (idledevno == cache->real_devno);
queue = (*cache->queuefind)(cache->queuefindparam, cache->real_devno);
if (ioqueue_get_number_in_queue (queue) != 0) {
return;
}
queue = (*cache->queuefind)(cache->queuefindparam, cache->cache_devno);
if (ioqueue_get_number_in_queue (queue) != 0) {
return;
}
if (cachedev_find_dirty_cache_blocks (cache, &blkno, &bcount) == 0) {
return;
}
/* Just assume that bufferspace is available */
cache->bufferspace += bcount;
if (cache->bufferspace > cache->stat.maxbufferspace) {
cache->stat.maxbufferspace = cache->bufferspace;
}
flushdesc = (cache_event *) getfromextraq();
flushdesc->type = CACHE_EVENT_IDLEFLUSH_READ;
flushreq = (ioreq_event *) getfromextraq();
flushreq->buf = flushdesc;
flushreq->devno = cache->cache_devno;
flushreq->blkno = blkno;
flushreq->bcount = bcount;
flushreq->type = IO_ACCESS_ARRIVE;
flushreq->flags = READ;
(*cache->issuefunc)(cache->issueparam, flushreq);
cache->stat.destagereads++;
cache->stat.destagereadblocks += bcount;
}
/* Gets the appropriate block, locked and ready to be accessed read or write */
int cachedev_get_block (cache_def *cache, ioreq_event *req, void (**donefunc)(void *, ioreq_event *), void *doneparam)
{
cache_event *rwdesc = (cache_event *) getfromextraq();
//youkim
//cache_event *rwdesc1 = (cache_event *) getfromextraq();
ioreq_event *fillreq;
int devno;
// fprintf (outputfile, "totalreqs = %d\n", disksim->totalreqs);
// fprintf (outputfile, "%.5f: Entered cache_get_block: rw %d, devno %d, blkno %d, size %d\n", simtime, (req->flags & READ), req->devno, req->blkno, req->bcount);
if (req->devno != cache->real_devno) {
fprintf (stderr, "cachedev_get_block trying to cache blocks for wrong device (%d should be %d)\n", req->devno, cache->real_devno);
ASSERT(0);
exit(1);
}
/* Ignore request overlap and locking issues for now. */
/* Also ignore buffer space limitation issues for now. */
cache->bufferspace += req->bcount;
if (cache->bufferspace > cache->stat.maxbufferspace) {
cache->stat.maxbufferspace = cache->bufferspace;
}
rwdesc->type = (req->flags & READ) ? CACHE_EVENT_READ : CACHE_EVENT_WRITE;
rwdesc->donefunc = donefunc;
rwdesc->doneparam = doneparam;
rwdesc->req = req;
req->next = NULL;
req->prev = NULL;
rwdesc->flags = 0;
cachedev_add_ongoing_request (cache, rwdesc);
//youkim
//rwdesc1 = ioreq_copy (rwdesc);
//memmove ((char *)rwdesc1, (char *)rwdesc, sizeof(cache_event));
//cachedev_add_ongoing_request (cache, rwdesc1);
//youkim
/*
printf("cache max buffer space: %d, size: %d\n",
cache->stat.maxbufferspace, cache->size);
printf("cache buffer space: %d\n", cache->bufferspace);
*/
if (req->flags & READ) {
cache->stat.reads++;
cache->stat.readblocks += req->bcount;
/* Send read straight to whichever device has it (preferably cachedev). */
//youkim
//if (cachedev_isreadhit (cache, req)) {
int start_lsn = 0;
int cnt = 0;
//printf("start_lsn: %d, cnt: %d\n", start_lsn, cnt);
/* For now, just assume both device's store bits at same LBNs */
fillreq = ioreq_copy (req);
if (cachedev_isreadhit (cache, req, &start_lsn, &cnt)) {
devno = cache->cache_devno;
cache->stat.readhitsfull++;
//youkim
fillreq->blkno = start_lsn;
fillreq->bcount = cnt;
printf("read hit: fillreq->blkno: %d, fillreq->bcount: %d\n", fillreq->blkno, fillreq->bcount);
} else {
//youkim
//printf("read miss\n");
devno = cache->real_devno;
cache->stat.readmisses++;
}
///* For now, just assume both device's store bits at same LBNs */
//fillreq = ioreq_copy (req);
fillreq->buf = rwdesc;
fillreq->type = IO_ACCESS_ARRIVE;
fillreq->devno = devno;
// fprintf (outputfile, "fillreq: devno %d, blkno %d, buf %p\n", fillreq->devno, fillreq->blkno, fillreq->buf);
(*cache->issuefunc)(cache->issueparam, fillreq);
return (1);
} else {
/* Grab buffer space and let the controller fill in data to be written. */
/* (for now, just assume that there is buffer space available) */
(*donefunc)(doneparam, req);
return (0);
}
}
/* frees the block after access complete, block is clean so remove locks */
/* and update lru */
void cachedev_free_block_clean (cache_def *cache, ioreq_event *req)
{
cache_event *rwdesc;
// fprintf (outputfile, "%.5f: Entered cache_free_block_clean: blkno %d, bcount %d, devno %d\n", simtime, req->blkno, req->bcount, req->devno);
/* For now, just find relevant rwdesc and free it. */
/* Later, write it to the cache device (and update the cache map thusly. */
rwdesc = cachedev_find_ongoing_request (cache, req);
ASSERT (rwdesc != NULL);
if (rwdesc->type == CACHE_EVENT_READ) {
cache->bufferspace -= req->bcount;
cachedev_remove_ongoing_request (cache, rwdesc);
addtoextraq ((event *) rwdesc);
} else {
ASSERT (rwdesc->type == CACHE_EVENT_POPULATE_ALSO);
rwdesc->type = CACHE_EVENT_POPULATE_ONLY;
}
}
/* a delayed write - set dirty bits, remove locks and update lru. */
/* If cache doesn't allow delayed writes, forward this to async */
int cachedev_free_block_dirty (cache_def *cache, ioreq_event *req, void (**donefunc)(void *, ioreq_event *), void *doneparam)
{
ioreq_event *flushreq;
cache_event *writedesc;
// fprintf (outputfile, "%.5f, Entered cache_free_block_dirty: blkno %d, size %d\n", simtime, req->blkno, req->bcount);
cache->stat.writes++;
cache->stat.writeblocks += req->bcount;
writedesc = cachedev_find_ongoing_request (cache, req);
ASSERT (writedesc != NULL);
ASSERT (writedesc->type == CACHE_EVENT_WRITE);
writedesc->donefunc = donefunc;
writedesc->doneparam = doneparam;
writedesc->req = req;
req->type = IO_REQUEST_ARRIVE;
req->next = NULL;
req->prev = NULL;
/* For now, just assume both device's store bits at same LBNs */
flushreq = ioreq_copy(req);
flushreq->type = IO_ACCESS_ARRIVE;
flushreq->buf = writedesc;
//youkim
//if (cachedev_iswritehit (cache, req)) {
int start_lsn, cnt;
if (cachedev_iswritehit (cache, req, &start_lsn, &cnt) ) {
flushreq->devno = cache->cache_devno;
cache->stat.writehitsfull++;
//youkim
flushreq->blkno = start_lsn;
flushreq->bcount = cnt;
printf("write hit: flushreq->blkno: %d, flushreq->bcount: %d\n", flushreq->blkno, flushreq->bcount);
} else {
cache->stat.writemisses++;
//youkim
//printf("write miss\n");
}
// fprintf (outputfile, "flushreq: devno %d, blkno %d, buf %p\n", flushreq->devno, flushreq->blkno, flushreq->buf);
(*cache->issuefunc)(cache->issueparam, flushreq);
#if 0
if (cache->flush_idledelay >= 0.0) {
ioqueue_reset_idledetecter((*cache->queuefind)(cache->queuefindparam, req->devno), 0);
}
#endif
return(1);
}
int cachedev_sync (cache_def *cache)
{
return(0);
}
//youkim
int call1(ioreq_event *req, int *start_lsn, int *cnt)
{
//youkim
int s_lsn, e_lsn;
int s_lpn, e_lpn;
int s_lpn_of_flash;
int s_lsn_of_flash;
s_lsn = (req->blkno / PAGE_IN_SECT) * PAGE_IN_SECT;
e_lsn = ((req->blkno + req->bcount + PAGE_IN_SECT) / PAGE_IN_SECT) * PAGE_IN_SECT;
*cnt = e_lsn - s_lsn;
s_lpn = s_lsn / PAGE_IN_SECT;
e_lpn = e_lsn / PAGE_IN_SECT;
//youkim
if(*cnt > PAGE_IN_SECT) *cnt = PAGE_IN_SECT;
s_lpn_of_flash = find_hole(s_lpn);
map_lpn_disk_to_flash[s_lpn_of_flash] = s_lpn;
if(s_lpn_of_flash == -1){
//printf("youkim: no hole\n");
//exit(0);
return -1;
}
s_lsn_of_flash = s_lpn_of_flash * PAGE_IN_SECT;
*start_lsn = s_lsn_of_flash;
return 1;
}
struct cacheevent *cachedev_disk_access_complete (cache_def *cache, ioreq_event *curr)
{
cache_event *rwdesc = curr->buf;
cache_event *tmp = NULL;
// fprintf (outputfile, "Entered cache_disk_access_complete: blkno %d, bcount %d, devno %d, buf %p\n", curr->blkno, curr->bcount, curr->devno, curr->buf);
if (rwdesc->type == CACHE_EVENT_READ) {
/* Consider writing same buffer to cache_devno, in order to populate it.*/
/* Not clear whether it is more appropriate to do it from here or from */
/* "free_block_clean" -- do it here for now to get more overlap. */
if (curr->devno == cache->real_devno) {
//youkim
int start_blkno = 0;
int cnt = 0;
if(call1(rwdesc->req, &start_blkno, &cnt) == 1)
{
ioreq_event *flushreq = ioreq_copy(rwdesc->req);
flushreq->type = IO_ACCESS_ARRIVE;
flushreq->buf = rwdesc;
flushreq->flags = WRITE;
flushreq->devno = cache->cache_devno;
//youkim
//int start_blkno = 0;
//int cnt = 0;
//call1(flushreq, &start_blkno, &cnt);
flushreq->blkno = start_blkno;
flushreq->bcount = cnt;
//printf("fetch from disk to cache: %d %d\n", start_blkno, cnt);
//}
rwdesc->type = CACHE_EVENT_POPULATE_ALSO;
(*cache->issuefunc)(cache->issueparam, flushreq);
cache->stat.popwrites++;
cache->stat.popwriteblocks += rwdesc->req->bcount;
}
}
/* Ongoing read request can now proceed, so call donefunc from get_block*/
(*rwdesc->donefunc)(rwdesc->doneparam,rwdesc->req);
} else if (rwdesc->type == CACHE_EVENT_WRITE) {
/* finished writing to cache-device */
if (curr->devno == cache->cache_devno) {
cachedev_setbits (cache->validmap, curr);
cachedev_setbits (cache->dirtymap, curr);
if (cache->writescheme == CACHE_WRITE_THRU) {
ioreq_event *flushreq = ioreq_copy(rwdesc->req);
flushreq->type = IO_ACCESS_ARRIVE;
flushreq->buf = rwdesc;
flushreq->flags = WRITE;
flushreq->devno = cache->real_devno;
rwdesc->type = CACHE_EVENT_FLUSH;
(*cache->issuefunc)(cache->issueparam, flushreq);
cache->stat.destagewrites++;
cache->stat.destagewriteblocks += rwdesc->req->bcount;
}
}
(*rwdesc->donefunc)(rwdesc->doneparam,rwdesc->req);
if (rwdesc->type != CACHE_EVENT_FLUSH) {
cachedev_remove_ongoing_request (cache, rwdesc);
addtoextraq ((event *) rwdesc);
cache->bufferspace -= curr->bcount;
}
} else if (rwdesc->type == CACHE_EVENT_POPULATE_ONLY) {
cachedev_setbits (cache->validmap, curr);
cachedev_remove_ongoing_request (cache, rwdesc);
addtoextraq ((event *) rwdesc);
cache->bufferspace -= curr->bcount;
} else if (rwdesc->type == CACHE_EVENT_POPULATE_ALSO) {
cachedev_setbits (cache->validmap, curr);
rwdesc->type = CACHE_EVENT_READ;
} else if (rwdesc->type == CACHE_EVENT_FLUSH) {
cachedev_clearbits (cache->dirtymap, curr);
cachedev_remove_ongoing_request (cache, rwdesc);
addtoextraq ((event *) rwdesc);
cache->bufferspace -= curr->bcount;
} else if (rwdesc->type == CACHE_EVENT_IDLEFLUSH_READ) {
ioreq_event *flushreq = ioreq_copy (curr);
flushreq->type = IO_ACCESS_ARRIVE;
flushreq->flags = WRITE;
flushreq->devno = cache->real_devno;
rwdesc->type = CACHE_EVENT_IDLEFLUSH_FLUSH;
(*cache->issuefunc)(cache->issueparam, flushreq);
cache->stat.destagewrites++;
cache->stat.destagewriteblocks += curr->bcount;
} else if (rwdesc->type == CACHE_EVENT_IDLEFLUSH_FLUSH) {
cachedev_clearbits (cache->dirtymap, curr);
cachedev_remove_ongoing_request (cache, rwdesc);
addtoextraq ((event *) rwdesc);
cachedev_idlework_callback (cache, curr->devno);
cache->bufferspace -= curr->bcount;
} else {
fprintf(stderr, "Unknown type at cachedev_disk_access_complete: %d\n", rwdesc->type);
ASSERT(0);
exit(1);
}
addtoextraq((event *) curr);
/* returned cacheevent will get forwarded to cachedev_wakeup_continue... */
return(tmp);
}
void cachedev_wakeup_complete (cache_def *cache, cache_event *desc)
{
ASSERT (0);
#if 0
if (desc->type == CACHE_EVENT_READ) {
cache_read_continue(cache, desc);
} else if (desc->type == CACHE_EVENT_WRITE) {
cache_write_continue(cache, desc);
} else if (desc->type == CACHE_EVENT_FLUSH) {
(*desc->donefunc)(desc->doneparam, desc->req);
addtoextraq((event *) desc);
} else {
fprintf(stderr, "Unknown event type in cache_wakeup_complete: %d\n", desc->type);
assert(0);
exit(1);
}
#endif
}
void cachedev_resetstats (cache_def *cache)
{
cache->stat.reads = 0;
cache->stat.readblocks = 0;
cache->stat.readhitsfull = 0;
cache->stat.readmisses = 0;
cache->stat.popwrites = 0;
cache->stat.popwriteblocks = 0;
cache->stat.writes = 0;
cache->stat.writeblocks = 0;
cache->stat.writehitsfull = 0;
cache->stat.writemisses = 0;
cache->stat.destagereads = 0;
cache->stat.destagereadblocks = 0;
cache->stat.destagewrites = 0;
cache->stat.destagewriteblocks = 0;
cache->stat.maxbufferspace = 0;
}
void cachedev_setcallbacks ()
{
disksim->donefunc_cachedev_empty = cachedev_empty_donefunc;
disksim->idlework_cachedev = cachedev_idlework_callback;
disksim->timerfunc_cachedev = cachedev_periodic_callback;
}
void cachedev_initialize (cache_def *cache,
void (**issuefunc)(void *,ioreq_event *),
void *issueparam,
struct ioq * (**queuefind)(void *,int),
void *queuefindparam,
void (**wakeupfunc)(void *,struct cacheevent *),
void *wakeupparam,
int numdevs)
{
StaticAssert (sizeof(cache_event) <= DISKSIM_EVENT_SIZE);
cache->issuefunc = issuefunc;
cache->issueparam = issueparam;
cache->queuefind = queuefind;
cache->queuefindparam = queuefindparam;
cache->wakeupfunc = wakeupfunc;
cache->wakeupparam = wakeupparam;
cache->bufferspace = 0;
cache->ongoing_requests = NULL;
//youkim
//bzero (cache->validmap, bitstr_size(cache->size));
//bzero (cache->dirtymap, bitstr_size(cache->size));
//argument of bitstr_size should be real disk size in sectors
cache->validmap = malloc(sizeof(int)*(bitstr_size(cache->size*10)));
cache->dirtymap = malloc(sizeof(int)*(bitstr_size(cache->size*10)));
bzero (cache->validmap, bitstr_size((cache->size*10)));
bzero (cache->dirtymap, bitstr_size((cache->size*10)));
cachedev_resetstats(cache);
if (cache->flush_idledelay) {
struct ioq *queue = (*queuefind)(queuefindparam,cache->real_devno);
ASSERT (queue != NULL);
ioqueue_set_idlework_function (queue, &disksim->idlework_cachedev, cache, cache->flush_idledelay);
}
if (device_get_number_of_blocks(cache->cache_devno) < cache->size) {
fprintf (stderr, "Size of cachedev exceeds that of actual cache device (devno %d): %d > %d\n", cache->cache_devno, cache->size, device_get_number_of_blocks(cache->cache_devno));
exit (0);
}
//youkim
cache_size_in_pages = (cache->size + PAGE_IN_SECT) / PAGE_IN_SECT;
map_lpn_disk_to_flash = (int *)malloc(sizeof(int)*cache_size_in_pages);
int i;
for(i = 0; i<cache_size_in_pages; i++){
map_lpn_disk_to_flash[i] = -1;
}
}
void cachedev_cleanstats (cache_def *cache)
{
}
void cachedev_printstats (cache_def *cache, char *prefix)
{
int reqs = cache->stat.reads + cache->stat.writes;
int blocks = cache->stat.readblocks + cache->stat.writeblocks;
fprintf (outputfile, "%scache requests: %6d\n", prefix, reqs);
if (reqs == 0) {
return;
}
fprintf (outputfile, "%scache read requests: %6d \t%6.4f\n", prefix, cache->stat.reads, ((double) cache->stat.reads / (double) reqs));
if (cache->stat.reads) {
fprintf(outputfile, "%scache blocks read: %6d \t%6.4f\n", prefix, cache->stat.readblocks, ((double) cache->stat.readblocks / (double) blocks));
fprintf(outputfile, "%scache read misses: %6d \t%6.4f \t%6.4f\n", prefix, cache->stat.readmisses, ((double) cache->stat.readmisses / (double) reqs), ((double) cache->stat.readmisses / (double) cache->stat.reads));
fprintf(outputfile, "%scache read full hits: %6d \t%6.4f \t%6.4f\n", prefix, cache->stat.readhitsfull, ((double) cache->stat.readhitsfull / (double) reqs), ((double) cache->stat.readhitsfull / (double) cache->stat.reads));
fprintf(outputfile, "%scache population writes: %6d \t%6.4f \t%6.4f\n", prefix, cache->stat.popwrites, ((double) cache->stat.popwrites / (double) reqs), ((double) cache->stat.popwrites / (double) cache->stat.reads));
fprintf(outputfile, "%scache block population writes: %6d \t%6.4f \t%6.4f\n", prefix, cache->stat.popwriteblocks, ((double) cache->stat.popwriteblocks / (double) blocks), ((double) cache->stat.popwriteblocks / (double) cache->stat.readblocks));
}
fprintf(outputfile, "%scache write requests: %6d \t%6.4f\n", prefix, cache->stat.writes, ((double) cache->stat.writes / (double) reqs));
if (cache->stat.writes) {
fprintf(outputfile, "%scache blocks written: %6d \t%6.4f\n", prefix, cache->stat.writeblocks, ((double) cache->stat.writeblocks / (double) blocks));
fprintf(outputfile, "%scache write misses: %6d \t%6.4f \t%6.4f\n", prefix, cache->stat.writemisses, ((double) cache->stat.writemisses / (double) reqs), ((double) cache->stat.writemisses / (double) cache->stat.writes));
fprintf(outputfile, "%scache full write hits: %6d \t%6.4f \t%6.4f\n", prefix, cache->stat.writehitsfull, ((double) cache->stat.writehitsfull / (double) reqs), ((double) cache->stat.writehitsfull / (double) cache->stat.writes));
fprintf(outputfile, "%scache destage pre-reads: %6d \t%6.4f \t%6.4f\n", prefix, cache->stat.destagereads, ((double) cache->stat.destagereads / (double) reqs), ((double) cache->stat.destagereads / (double) cache->stat.writes));
fprintf(outputfile, "%scache block destage pre-reads: %6d \t%6.4f \t%6.4f\n", prefix, cache->stat.destagereadblocks, ((double) cache->stat.destagereadblocks / (double) blocks), ((double) cache->stat.destagereadblocks / (double) cache->stat.writeblocks));
fprintf(outputfile, "%scache destages (write): %6d \t%6.4f \t%6.4f\n", prefix, cache->stat.destagewrites, ((double) cache->stat.destagewrites / (double) reqs), ((double) cache->stat.destagewrites / (double) cache->stat.writes));
fprintf(outputfile, "%scache block destages (write): %6d \t%6.4f \t%6.4f\n", prefix, cache->stat.destagewriteblocks, ((double) cache->stat.destagewriteblocks / (double) blocks), ((double) cache->stat.destagewriteblocks / (double) cache->stat.writeblocks));
fprintf(outputfile, "%scache end dirty blocks: %6d \t%6.4f\n", prefix, cachedev_count_dirty_blocks(cache), ((double) cachedev_count_dirty_blocks(cache) / (double) cache->stat.writeblocks));
}
fprintf (outputfile, "%scache bufferspace use end: %6d\n", prefix, cache->bufferspace);
fprintf (outputfile, "%scache bufferspace use max: %6d\n", prefix, cache->stat.maxbufferspace);
}
cache_def * cachedev_copy (cache_def *cache)
{
cache_def *new = (cache_def *) DISKSIM_malloc(sizeof(cache_def));
ASSERT(new != NULL);
bzero (new, sizeof(cache_def));
new->cachetype = cache->cachetype;
new->issuefunc = cache->issuefunc;
new->issueparam = cache->issueparam;
new->queuefind = cache->queuefind;
new->queuefindparam = cache->queuefindparam;
new->wakeupfunc = cache->wakeupfunc;
new->wakeupparam = cache->wakeupparam;
new->size = cache->size;
new->maxreqsize = cache->maxreqsize;
return(new);
}
struct cache_def *disksim_cachedev_loadparams(struct lp_block *b)
{
int c;
struct cache_def *result;
result = malloc(sizeof(struct cache_def));
bzero(result, sizeof(struct cache_def));
result->cachetype = CACHE_DEVICE;
result->name = b->name ? strdup(b->name) : 0;
#include "modules/disksim_cachedev_param.c"
return result;
}
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。