?? bp.c
字號:
{
unsigned int va1=0;
struct metadata *metadata1=NULL;
va1 =((((bio->bi_io_vec[0].bv_page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET);
metadata1 = kmalloc(sizeof(struct metadata),GFP_ATOMIC);
if(metadata1)
memset(metadata1,0,sizeof(struct metadata));
else
{
printk(KERN_ALERT "metadata1: Memory Allocation Problem\n");
return -ENOMEM;
}
memcpy((unsigned int*)metadata1,(unsigned int*)va1,sizeof(struct metadata));
if(chk_time(metadata1))
construct_mapping_table(metadata1);
kfree(metadata1);
mempool_free(bio->bi_io_vec[0].bv_page,pool_page);
tcount--;
if(tcount==0)
{
display_mapping_table();
tasklet_enable(&ts1);
}
return 0;
}
/* cdp_end_bio_io is called when we have finished servicing * a mirrored operation and are ready to return a * success/failure code to the buffer cache layer.
*/
static int cdp_end_bio_io (struct cdp_bio *cdp_bio1)
{
struct bio *bio = cdp_bio1->master_bio;
bio_endio(bio, bio->bi_size, test_bit(0 , &cdp_bio1->state) ? 0 : -EIO);
mempool_free(bio->bi_io_vec[0].bv_page,pool_page);
if(bio->bi_rw==WRITE)
mempool_free(gpage,pool_page);
mempool_free(cdp_bio1,cdp_bio_pool);
return 0;
}
static int cdp_end_read_request(struct bio *bio,unsigned int bytes_done,int err)
{
int uptodate = test_bit(BIO_UPTODATE , &bio->bi_flags);
struct cdp_bio *cdp_bio1 = (struct cdp_bio1*)(bio->bi_private);
if(bio->bi_size)
return 1;
if(!uptodate)
printk(KERN_ALERT "Uptodate ! failed\n");
else
{
set_bit(0,&cdp_bio1->state);
cdp_end_bio_io(cdp_bio1);
}
return 0;
}
static int cdp_end_write_request(struct bio *bio,unsigned int bd,int err)
{
int uptodate = test_bit(BIO_UPTODATE , &bio->bi_flags);
struct cdp_bio *cdp_bio1 = (struct cdp_bio1*)(bio->bi_private);
if(bio->bi_size)
return 1;
if(!uptodate)
printk(KERN_ALERT "Uptodate ! failed\n");
else
set_bit(0,&cdp_bio1->state);
if(atomic_dec_and_test(&cdp_bio1->remaining))
cdp_end_bio_io(cdp_bio1);
return 0;
}
/* set_time() is called to obtain the current system time * in hrs: mins : sec. This time is used to timestamp the * data blocks as soon as the I/O request arrives in * make_request() function.
*/
int set_time(void )
{
sec = CMOS_READ(RTC_SECONDS);
min = CMOS_READ(RTC_MINUTES);
hrs = CMOS_READ(RTC_HOURS);
BCD_TO_BIN(sec);
BCD_TO_BIN(min);
BCD_TO_BIN(hrs);
return 0;
}
/* store_metadata is called to store the metadata on the * CDP Repository. The metadata is stored serially on the * CDP Repository.
*/
struct bio * store_metadata(struct bio *bio1 , unsigned int cdp_rep_sector)
{
struct metadata *metadata1=NULL;
unsigned int va1=0;
metadata1 = kmalloc(sizeof(struct metadata),GFP_ATOMIC);
if(metadata1)
memset(metadata1,0,sizeof(struct metadata));
else
{
printk(KERN_ALERT "metadata1: Memory Allocation Problem\n");
goto out1;
}
memset(metadata1,0,sizeof(struct metadata));
metadata1->hrs = hrs;
metadata1->min = min;
metadata1->sec = sec;
metadata1->bisize = bio1->bi_size;
metadata1->cdp_sector = cdp_rep_sector;
metadata1->host_sector = bio1->bi_sector;
printk (KERN_ALERT "\nTimestamp = %d:%d:%d ",metadata1->hrs,metadata1->min,metadata1->sec);
printk (KERN_ALERT "\nHost Disk Sector = %lu ",(unsigned long)metadata1->host_sector);
printk (KERN_ALERT "\nCDP Repository Disk Sector = %lu\n",(unsigned long)metadata1->cdp_sector);
gpage = mempool_alloc(pool_page,GFP_NOIO);
if(!gpage)
{
printk(KERN_ALERT "Couldnt fill HighMem\n");
goto out1;
}
bio1->bi_io_vec[0].bv_page = gpage;
bio1->bi_io_vec[0].bv_len = 512;
bio1->bi_io_vec[0].bv_offset = 0 ;
va1 = (unsigned int)kmap(bio1->bi_io_vec[0].bv_page);
memcpy((unsigned int*)va1,(unsigned int*)metadata1,sizeof(struct metadata));
kunmap(bio1->bi_io_vec[0].bv_page);
bio1->bi_sector = taddr;
bio1->bi_size = 512;
taddr = taddr + 1;
kfree(metadata1);
out1:
return bio1;
}
/* make_request() is used to redirect the IO request to * both Host Disk and CDP Repository Disk
*/
static int make_request(request_queue_t *q, struct bio * bio)
{
struct cdp_bio *cdp_bio1=NULL;
unsigned int cdp_rep_sector=0;
int i=0;
/* Here the IO request is timestamped */
set_time();
cdp_bio1 = mempool_alloc(cdp_bio_pool,GFP_NOIO);
if(!cdp_bio1)
{
printk(KERN_ALERT "Couldnt fill HighMem\n");
return -ENOMEM;
}
cdp_bio1->master_bio = bio;
cdp_bio1->state = 0;
// Read operation
if(bio_data_dir(bio)==READ)
{
struct bio *read_bio = NULL;
printk(KERN_ALERT "Read Request\n");
read_bio = bio_clone(bio,GFP_NOIO);
cdp_bio1->bios[0] = read_bio;
cdp_bio1->bios[1] = NULL;
cdp_bio1->bios[2] = NULL;
read_bio->bi_bdev = (struct block_device*)dev[0];
read_bio->bi_end_io = cdp_end_read_request;
read_bio->bi_rw = READ;
read_bio->bi_private = cdp_bio1;
generic_make_request(read_bio);
return 0;
}
/* Write operation. * Here the IO request is sent to both Host Disk and * CDP Repository. The timestamp information is also * stored on the CDP Repository. */
atomic_set(&cdp_bio1->remaining , 1);
for(i=0 ; i<3 ;i++)
{
/* i=0 : writing block to Host Disk
* i=1 : writing block to CDP Repository Disk
* i=2 : writing metadata/timestamped structure on separate Disk
*/
struct bio *mbio=NULL;
mbio = bio_clone(bio,GFP_NOIO);
mbio->bi_bdev = (struct block_device *)dev[i];
mbio->bi_end_io = cdp_end_write_request;
mbio->bi_rw = WRITE;
mbio->bi_private = cdp_bio1;
if(i==1)
{
mbio->bi_sector = maddr;
cdp_rep_sector = maddr;
maddr = maddr + bio_sectors(mbio);
}
if(i==2)
{
mbio = store_metadata(mbio,cdp_rep_sector);
}
atomic_inc(&cdp_bio1->remaining);
cdp_bio1->bios[i] = mbio;
generic_make_request(mbio);
}
if(atomic_dec_and_test(&cdp_bio1->remaining))
cdp_end_bio_io(cdp_bio1);
return 0;
}
/* recovery() is called at time of recovery phase to retrieve * the metadata structures stored on the CDP Repository
*/
static ssize_t recovery(struct file *file,const char __user * buffer, size_t length, loff_t * offset)
{
int i=0;
char *msg_ptr=NULL;
struct page *page=NULL;
struct bio *rbio;
struct Time msg;
R_hrs=R_min=R_sec=0;
msg_ptr=(char *)(&msg);
for (i = 0; i < length; i++)
get_user(*(msg_ptr+i), buffer + i);
R_hrs=msg.hrs;
R_min=msg.min;
R_sec=msg.sec;
/* disables the given tasklet */
tasklet_disable(&ts1);
/* schedule the tasklet for execution */
tasklet_schedule(&ts1);
tcount = taddr;
/* The list heads of the metadata list and bio * (most recent blocks)list are initialized */
INIT_LIST_HEAD(&mt_home);
INIT_LIST_HEAD(&mr_bio_home);
for (i=0;i<taddr;i++)
{
page = (struct page *)mempool_alloc(pool_page,GFP_NOIO);
if(!page)
{
printk(KERN_ALERT "Couldnt fill HighMem\n");
return -ENOMEM;
}
rbio=NULL;
rbio=bio_alloc(GFP_NOIO,1);
rbio->bi_sector = i;
rbio->bi_bdev = (struct block_device *)dev[2];
rbio->bi_io_vec[0].bv_page = page ;
rbio->bi_io_vec[0].bv_len = 512 ;
rbio->bi_io_vec[0].bv_offset = 0 ;
rbio->bi_rw = READ ;
rbio->bi_vcnt = 1 ;
rbio->bi_idx = 0 ;
rbio->bi_size = 512;
rbio->bi_end_io = recovery_end_io;
rbio->bi_private = NULL;
bio_get(rbio);
generic_make_request(rbio);
bio_put(rbio);
}
return 0;
}
static int blk_open(struct inode *inode, struct file *filp)
{
struct blk_device *dev = inode->i_bdev->bd_disk->private_data;
filp->private_data = dev;
spin_lock(&dev->lock);
dev->users++;
spin_unlock(&dev->lock);
return 0;
}
static int blk_release(struct inode *inode, struct file *filp)
{
struct blk_device *dev = inode->i_bdev->bd_disk->private_data;
spin_lock(&dev->lock);
dev->users--;
spin_unlock(&dev->lock);
return 0;
}
int blk_ioctl (struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg)
{
struct block_device *bdev = inode->i_bdev;
if(cmd==GET_TIME)
{
if(ts_free_flag==1)
{
if (!capable(CAP_SYS_ADMIN))
{
printk(KERN_ALERT"\nCapability failure");
return -EACCES;
} /* fsync_bdev is used to flush the buffer cache * contents on the disk. This ensures consistency * at the time of recovery. Only the pending IO * requests in buffer cache which were directed * at the virtual "block_dev" device are sync ed */
lock_kernel();
fsync_bdev(bdev);
invalidate_bdev(bdev, 0);
unlock_kernel();
}
ts_free_flag=1;
recovery(filp,(char *)arg,sizeof(struct Time),0 );
return 0;
}
return -ENOTTY;
}
/* set of device operation */
static struct block_device_operations blk_ops = {
.owner = THIS_MODULE,
.open = blk_open,
.release = blk_release,
.ioctl = blk_ioctl
};
/* init_module is called at the time of module insertion */
int init_module( void )
{
int min_nr=0;
int megabytes = nr_free_pages() >> (20 - PAGE_SHIFT);
maddr = START_METADATA;
taddr = START_METADATA;
atomic_set(&free_resources,0);
ts_free_flag=0;
min_nr = megabytes * 2;
t_count=0;
if (min_nr > 256)
min_nr = 256;
/* creates and allocates a guaranteed size, * preallocated memory pool for bio and page.
*/
pool_page = mempool_create(200,(mempool_alloc_t *)cdp_page_alloc, (mempool_free_t *)cdp_page_free,NULL);
if(!pool_page)
{
printk(KERN_ALERT " Mempool creation failed\n");
goto out4;
}
cdp_bio_pool = mempool_create(50,(mempool_alloc_t *)cdp_bio_pool_alloc,(mempool_free_t *)cdp_bio_pool_free,NULL);
if(!cdp_bio_pool)
{
printk(KERN_ALERT " Mempool creation failed\n");
goto out2;
}
/* open_by_devnum function is used to open block device * by device number */
dev[0] = (dev_t)open_by_devnum (MKDEV(HOST_DISK_MAJOR_NUM,HOST_DISK_MINOR_NUM), FMODE_READ|FMODE_WRITE);
dev[1] = (dev_t)open_by_devnum (MKDEV(CDP_REPOSITORY_MAJOR_NUM,CDP_REPOSITORY_MINOR_NUM), FMODE_READ|FMODE_WRITE);
dev[2] = (dev_t)open_by_devnum (MKDEV(CDP_METADATA_MAJOR_NUM,CDP_METADATA_MINOR_NUM), FMODE_READ|FMODE_WRITE);
Device.size = nsectors*hardsect_size;
spin_lock_init(&Device.lock);
Queue = blk_alloc_queue(GFP_KERNEL);
if (Queue == NULL)
goto out;
printk(KERN_ALERT "Starting Initialization ....\n ");
blk_queue_make_request (Queue, make_request);
blk_queue_hardsect_size(Queue, hardsect_size);
/* register_blkdev function is used to register * block device "block_dev" */
major_num = register_blkdev(major_num, "block_dev");
if (major_num <= 0)
{
printk(KERN_WARNING "block_dev: unable to get major number\n");
goto out;
}
/* alloc_disk function is used to allocate,initialise * and install gendisk structure */ Device.gd = alloc_disk(4);
if (! Device.gd)
{
printk(KERN_ALERT "alloc_disk failure\n");
goto out_unregister;
}
Device.gd->major = major_num;
Device.gd->first_minor = 0;
Device.gd->fops = &blk_ops;
Device.gd->private_data = &Device;
strcpy (Device.gd->disk_name, "block_dev0");
set_capacity(Device.gd,nsectors*(hardsect_size/KERNEL_SECTOR_SIZE));
Device.gd->queue = Queue;
add_disk(Device.gd);
printk(KERN_ALERT "Intialization complete\n");
return 0;
out_unregister:
/* unregister_blkdev function is used to unregister * block device "block_dev" */
unregister_blkdev(major_num, "block_dev");
out4:
mempool_destroy(pool_page);
goto out;
out2:
mempool_destroy(cdp_bio_pool);
out:
return -ENOMEM;
}
/* cleanup_module function is called when the module is removed */
void cleanup_module(void)
{
/* tasklet_kill() ensures that the tasklet is not * scheduled to run again */
tasklet_kill(&ts1);
tasklet_kill(&ts2);
/* del_gendisk() cleans up any partitioning information */
del_gendisk(Device.gd);
/* put_disk() releases the reference to the gendisk * structure so that it can be freed */
put_disk(Device.gd);
/* mempool_destroy is used to destroy the memory pools * allocated previously */
mempool_destroy(pool_page);
mempool_destroy(cdp_bio_pool);
/* unregister_blkdev is used to unregister block device * virtual "block_dev" device */
unregister_blkdev(major_num, "block_dev");
/* blk_cleanup_queue is used to return request queue * to system , after this driver sees no more requests
* from the given queue */
blk_cleanup_queue(Queue);
printk(KERN_ALERT "Exit\n");
}
?? 快捷鍵說明
復(fù)制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -