?? shm.c
字號:
page_table = PAGE_DIR_OFFSET(page_dir,tmp);
page_table = (ulong *) (PAGE_MASK & *page_table);
page_table += (tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
*page_table = shm_sgn;
}
return 0;
}
/*
* Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
* raddr is needed to return addresses above 2Gig.
* Specific attaches are allowed over the executable....
*/
int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
{
struct shmid_ds *shp;
struct shm_desc *shmd;
int err;
unsigned int id;
unsigned long addr;
if (shmid < 0)
return -EINVAL;
if (raddr) {
err = verify_area(VERIFY_WRITE, raddr, sizeof(long));
if (err)
return err;
}
shp = shm_segs[id = shmid % SHMMNI];
if (shp == IPC_UNUSED || shp == IPC_NOID)
return -EINVAL;
if (!(addr = (ulong) shmaddr)) {
if (shmflg & SHM_REMAP)
return -EINVAL;
/* set addr below all current unspecified attaches */
addr = SHM_RANGE_END;
for (shmd = current->shm; shmd; shmd = shmd->task_next) {
if (shmd->start < SHM_RANGE_START)
continue;
if (addr >= shmd->start)
addr = shmd->start;
}
addr = (addr - shp->shm_segsz) & PAGE_MASK;
} else if (addr & (SHMLBA-1)) {
if (shmflg & SHM_RND)
addr &= ~(SHMLBA-1); /* round down */
else
return -EINVAL;
}
if ((addr > current->start_stack - 16384 - PAGE_SIZE*shp->shm_npages))
return -EINVAL;
if (shmflg & SHM_REMAP)
for (shmd = current->shm; shmd; shmd = shmd->task_next) {
if (addr >= shmd->start && addr < shmd->end)
return -EINVAL;
if (addr + shp->shm_segsz >= shmd->start &&
addr + shp->shm_segsz < shmd->end)
return -EINVAL;
}
if (ipcperms(&shp->shm_perm, shmflg & SHM_RDONLY ? S_IRUGO : S_IRUGO|S_IWUGO))
return -EACCES;
if (shp->shm_perm.seq != shmid / SHMMNI)
return -EIDRM;
shmd = (struct shm_desc *) kmalloc (sizeof(*shmd), GFP_KERNEL);
if (!shmd)
return -ENOMEM;
if ((shp != shm_segs[id]) || (shp->shm_perm.seq != shmid / SHMMNI)) {
kfree_s (shmd, sizeof (*shmd));
return -EIDRM;
}
shmd->shm_sgn = (SHM_SWP_TYPE << 1) | (id << SHM_ID_SHIFT) |
(shmflg & SHM_RDONLY ? SHM_READ_ONLY : 0);
shmd->start = addr;
shmd->end = addr + shp->shm_npages * PAGE_SIZE;
shmd->task = current;
shp->shm_nattch++; /* prevent destruction */
if (addr < current->end_data) {
iput (current->executable);
current->executable = NULL;
/* current->end_data = current->end_code = 0; */
}
if ((err = shm_map (shmd, shmflg & SHM_REMAP))) {
if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
killseg(id);
kfree_s (shmd, sizeof (*shmd));
return err;
}
shmd->task_next = current->shm;
current->shm = shmd;
shmd->seg_next = shp->attaches;
shp->attaches = shmd;
shp->shm_lpid = current->pid;
shp->shm_atime = CURRENT_TIME;
if (!raddr)
return addr;
put_fs_long (addr, raddr);
return 0;
}
/*
* remove the first attach descriptor from the list *shmdp.
* free memory for segment if it is marked destroyed.
* The descriptor is detached before the sleep in unmap_page_range.
*/
static void detach (struct shm_desc **shmdp)
{
struct shm_desc *shmd = *shmdp;
struct shmid_ds *shp;
int id;
id = (shmd->shm_sgn >> SHM_ID_SHIFT) & SHM_ID_MASK;
shp = shm_segs[id];
*shmdp = shmd->task_next;
for (shmdp = &shp->attaches; *shmdp; shmdp = &(*shmdp)->seg_next)
if (*shmdp == shmd) {
*shmdp = shmd->seg_next;
goto found;
}
printk("detach: shm segment (id=%d) attach list inconsistent\n",id);
found:
unmap_page_range (shmd->start, shp->shm_segsz); /* sleeps */
kfree_s (shmd, sizeof (*shmd));
shp->shm_lpid = current->pid;
shp->shm_dtime = CURRENT_TIME;
if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
killseg (id); /* sleeps */
return;
}
/*
* detach and kill segment if marked destroyed.
* The work is done in detach.
*/
int sys_shmdt (char *shmaddr)
{
struct shm_desc *shmd, **shmdp;
for (shmdp = ¤t->shm; (shmd = *shmdp); shmdp=&shmd->task_next) {
if (shmd->start == (ulong) shmaddr) {
detach (shmdp);
return 0;
}
}
return -EINVAL;
}
/*
* detach all attached segments.
*/
void shm_exit (void)
{
while (current->shm)
detach(¤t->shm);
return;
}
/*
* copy the parent shm descriptors and update nattch
* parent is stuck in fork so an attach on each segment is assured.
* copy_page_tables does the mapping.
*/
int shm_fork (struct task_struct *p1, struct task_struct *p2)
{
struct shm_desc *shmd, *new_desc = NULL, *tmp;
struct shmid_ds *shp;
int id;
if (!p1->shm)
return 0;
for (shmd = p1->shm; shmd; shmd = shmd->task_next) {
tmp = (struct shm_desc *) kmalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp) {
while (new_desc) {
tmp = new_desc->task_next;
kfree_s (new_desc, sizeof (*new_desc));
new_desc = tmp;
}
free_page_tables (p2);
return -ENOMEM;
}
*tmp = *shmd;
tmp->task = p2;
tmp->task_next = new_desc;
new_desc = tmp;
}
p2->shm = new_desc;
for (shmd = new_desc; shmd; shmd = shmd->task_next) {
id = (shmd->shm_sgn >> SHM_ID_SHIFT) & SHM_ID_MASK;
shp = shm_segs[id];
if (shp == IPC_UNUSED) {
printk("shm_fork: unused id=%d PANIC\n", id);
return -ENOMEM;
}
shmd->seg_next = shp->attaches;
shp->attaches = shmd;
shp->shm_nattch++;
shp->shm_atime = CURRENT_TIME;
shp->shm_lpid = current->pid;
}
return 0;
}
/*
* page not present ... go through shm_pages .. called from swap_in()
*/
void shm_no_page (unsigned long *ptent)
{
unsigned long page;
unsigned long code = *ptent;
struct shmid_ds *shp;
unsigned int id, idx;
id = (code >> SHM_ID_SHIFT) & SHM_ID_MASK;
if (id > max_shmid) {
printk ("shm_no_page: id=%d too big. proc mem corruptedn", id);
return;
}
shp = shm_segs[id];
if (shp == IPC_UNUSED || shp == IPC_NOID) {
printk ("shm_no_page: id=%d invalid. Race.\n", id);
return;
}
idx = (code >> SHM_IDX_SHIFT) & SHM_IDX_MASK;
if (idx >= shp->shm_npages) {
printk ("shm_no_page : too large page index. id=%d\n", id);
return;
}
if (!(shp->shm_pages[idx] & PAGE_PRESENT)) {
if(!(page = get_free_page(GFP_KERNEL))) {
oom(current);
*ptent = BAD_PAGE | PAGE_ACCESSED | 7;
return;
}
if (shp->shm_pages[idx] & PAGE_PRESENT) {
free_page (page);
goto done;
}
if (shp->shm_pages[idx]) {
read_swap_page (shp->shm_pages[idx], (char *) page);
if (shp->shm_pages[idx] & PAGE_PRESENT) {
free_page (page);
goto done;
}
swap_free (shp->shm_pages[idx]);
shm_swp--;
}
shm_rss++;
shp->shm_pages[idx] = page | (PAGE_SHARED | PAGE_DIRTY);
} else
--current->maj_flt; /* was incremented in do_no_page */
done:
current->min_flt++;
page = shp->shm_pages[idx];
if (code & SHM_READ_ONLY) /* write-protect */
page &= ~2;
mem_map[MAP_NR(page)]++;
*ptent = page;
return;
}
/*
* Goes through counter = (shm_rss << prio) present shm pages.
*/
static unsigned long swap_id = 0; /* currently being swapped */
static unsigned long swap_idx = 0; /* next to swap */
int shm_swap (int prio)
{
unsigned long page;
struct shmid_ds *shp;
struct shm_desc *shmd;
unsigned int swap_nr;
unsigned long id, idx, invalid = 0;
int counter;
counter = shm_rss >> prio;
if (!counter || !(swap_nr = get_swap_page()))
return 0;
check_id:
shp = shm_segs[swap_id];
if (shp == IPC_UNUSED || shp == IPC_NOID || shp->shm_perm.mode & SHM_LOCKED ) {
swap_idx = 0;
if (++swap_id > max_shmid)
swap_id = 0;
goto check_id;
}
id = swap_id;
check_table:
idx = swap_idx++;
if (idx >= shp->shm_npages) {
swap_idx = 0;
if (++swap_id > max_shmid)
swap_id = 0;
goto check_id;
}
page = shp->shm_pages[idx];
if (!(page & PAGE_PRESENT))
goto check_table;
swap_attempts++;
if (--counter < 0) { /* failed */
if (invalid)
invalidate();
swap_free (swap_nr);
return 0;
}
for (shmd = shp->attaches; shmd; shmd = shmd->seg_next) {
unsigned long tmp, *pte;
if ((shmd->shm_sgn >> SHM_ID_SHIFT & SHM_ID_MASK) != id) {
printk ("shm_swap: id=%ld does not match shmd\n", id);
continue;
}
tmp = shmd->start + (idx << PAGE_SHIFT);
if (tmp >= shmd->end) {
printk ("shm_swap: too large idx=%ld id=%ld PANIC\n",idx, id);
continue;
}
pte = PAGE_DIR_OFFSET(shmd->task->tss.cr3,tmp);
if (!(*pte & 1)) {
printk("shm_swap: bad pgtbl! id=%ld start=%lx idx=%ld\n",
id, shmd->start, idx);
*pte = 0;
continue;
}
pte = (ulong *) (PAGE_MASK & *pte);
pte += ((tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1));
tmp = *pte;
if (!(tmp & PAGE_PRESENT))
continue;
if (tmp & PAGE_ACCESSED) {
*pte &= ~PAGE_ACCESSED;
continue;
}
tmp = shmd->shm_sgn | idx << SHM_IDX_SHIFT;
*pte = tmp;
mem_map[MAP_NR(page)]--;
shmd->task->rss--;
invalid++;
}
if (mem_map[MAP_NR(page)] != 1)
goto check_table;
page &= PAGE_MASK;
shp->shm_pages[idx] = swap_nr;
if (invalid)
invalidate();
write_swap_page (swap_nr, (char *) page);
free_page (page);
swap_successes++;
shm_swp++;
shm_rss--;
return 1;
}
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -