?? mm-armv.c
字號:
/* * linux/arch/arm/mm/mm-armv.c * * Copyright (C) 1998-2002 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Page table sludge for ARM v3 and v4 processor architectures. */#include <linux/config.h>#include <linux/module.h>#include <linux/mm.h>#include <linux/init.h>#include <linux/bootmem.h>#include <linux/highmem.h>#include <asm/pgalloc.h>#include <asm/page.h>#include <asm/io.h>#include <asm/setup.h>#include <asm/tlbflush.h>#include <asm/mach/map.h>#define CPOLICY_UNCACHED 0#define CPOLICY_BUFFERED 1#define CPOLICY_WRITETHROUGH 2#define CPOLICY_WRITEBACK 3#define CPOLICY_WRITEALLOC 4static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;static unsigned int ecc_mask __initdata = 0;pgprot_t pgprot_kernel;EXPORT_SYMBOL(pgprot_kernel);struct cachepolicy { const char policy[16]; unsigned int cr_mask; unsigned int pmd; unsigned int pte;};static struct cachepolicy cache_policies[] __initdata = { { .policy = "uncached", .cr_mask = CR_W|CR_C, .pmd = PMD_SECT_UNCACHED, .pte = 0, }, { .policy = "buffered", .cr_mask = CR_C, .pmd = PMD_SECT_BUFFERED, .pte = PTE_BUFFERABLE, }, { .policy = "writethrough", .cr_mask = 0, .pmd = PMD_SECT_WT, .pte = PTE_CACHEABLE, }, { .policy = "writeback", .cr_mask = 0, .pmd = PMD_SECT_WB, .pte = PTE_BUFFERABLE|PTE_CACHEABLE, }, { .policy = "writealloc", .cr_mask = 0, .pmd = PMD_SECT_WBWA, .pte = PTE_BUFFERABLE|PTE_CACHEABLE, }};/* * These are useful for identifing cache coherency * problems by allowing the cache or the cache and * writebuffer to be turned off. (Note: the write * buffer should not be on and the cache off). */static void __init early_cachepolicy(char **p){ int i; for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { int len = strlen(cache_policies[i].policy); if (memcmp(*p, cache_policies[i].policy, len) == 0) { cachepolicy = i; cr_alignment &= ~cache_policies[i].cr_mask; cr_no_alignment &= ~cache_policies[i].cr_mask; *p += len; break; } } if (i == ARRAY_SIZE(cache_policies)) printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); flush_cache_all(); set_cr(cr_alignment);}static void __init early_nocache(char **__unused){ char *p = "buffered"; printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); early_cachepolicy(&p);}static void __init early_nowrite(char **__unused){ char *p = "uncached"; printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); early_cachepolicy(&p);}static void __init early_ecc(char **p){ if (memcmp(*p, "on", 2) == 0) { ecc_mask = PMD_PROTECTION; *p += 2; } else if (memcmp(*p, "off", 3) == 0) { ecc_mask = 0; *p += 3; }}__early_param("nocache", early_nocache);__early_param("nowb", early_nowrite);__early_param("cachepolicy=", early_cachepolicy);__early_param("ecc=", early_ecc);static int __init noalign_setup(char *__unused){ cr_alignment &= ~CR_A; cr_no_alignment &= ~CR_A; set_cr(cr_alignment); return 1;}__setup("noalign", noalign_setup);#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)/* * need to get a 16k page for level 1 */pgd_t *get_pgd_slow(struct mm_struct *mm){ pgd_t *new_pgd, *init_pgd; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); if (!new_pgd) goto no_pgd; memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); init_pgd = pgd_offset_k(0); if (vectors_base() == 0) { /* * This lock is here just to satisfy pmd_alloc and pte_lock */ spin_lock(&mm->page_table_lock); /* * On ARM, first page must always be allocated since it * contains the machine vectors. */ new_pmd = pmd_alloc(mm, new_pgd, 0); if (!new_pmd) goto no_pmd; new_pte = pte_alloc_map(mm, new_pmd, 0); if (!new_pte) goto no_pte; init_pmd = pmd_offset(init_pgd, 0); init_pte = pte_offset_map_nested(init_pmd, 0); set_pte(new_pte, *init_pte); pte_unmap_nested(init_pte); pte_unmap(new_pte); spin_unlock(&mm->page_table_lock); } /* * Copy over the kernel and IO PGD entries */ memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); return new_pgd;no_pte: spin_unlock(&mm->page_table_lock); pmd_free(new_pmd); free_pages((unsigned long)new_pgd, 2); return NULL;no_pmd: spin_unlock(&mm->page_table_lock); free_pages((unsigned long)new_pgd, 2); return NULL;no_pgd: return NULL;}void free_pgd_slow(pgd_t *pgd){ pmd_t *pmd; struct page *pte; if (!pgd) return; /* pgd is always present and good */ pmd = (pmd_t *)pgd; if (pmd_none(*pmd)) goto free; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); goto free; } pte = pmd_page(*pmd); pmd_clear(pmd); dec_page_state(nr_page_table_pages); pte_free(pte); pmd_free(pmd);free: free_pages((unsigned long) pgd, 2);}/* * Create a SECTION PGD between VIRT and PHYS in domain * DOMAIN with protection PROT */static inline voidalloc_init_section(unsigned long virt, unsigned long phys, int prot){ pmd_t *pmdp; pmdp = pmd_offset(pgd_offset_k(virt), virt); if (virt & (1 << 20)) pmdp++; set_pmd(pmdp, __pmd(phys | prot));}/* * Add a PAGE mapping between VIRT and PHYS in domain * DOMAIN with protection PROT. Note that due to the * way we map the PTEs, we must allocate two PTE_SIZE'd * blocks - one for the Linux pte table, and one for * the hardware pte table. */static inline voidalloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot){ pmd_t *pmdp; pte_t *ptep; pmdp = pmd_offset(pgd_offset_k(virt), virt); if (pmd_none(*pmdp)) { unsigned long pmdval; ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); pmdval = __pa(ptep) | prot_l1; pmdp[0] = __pmd(pmdval); pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); flush_pmd_entry(pmdp); } ptep = pte_offset_kernel(pmdp, virt); set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));}/* * Clear any PGD mapping. On a two-level page table system, * the clearance is done by the middle-level functions (pmd) * rather than the top-level (pgd) functions. */static inline void clear_mapping(unsigned long virt){ pmd_clear(pmd_offset(pgd_offset_k(virt), virt));}struct mem_types { unsigned int prot_pte; unsigned int prot_l1; unsigned int prot_sect; unsigned int domain;};static struct mem_types mem_types[] __initdata = { [MT_DEVICE] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED | PMD_SECT_AP_WRITE, .domain = DOMAIN_IO, }, [MT_CACHECLEAN] = { .prot_sect = PMD_TYPE_SECT, .domain = DOMAIN_KERNEL, }, [MT_MINICLEAN] = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE, .domain = DOMAIN_KERNEL, }, [MT_VECTORS] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_EXEC, .prot_l1 = PMD_TYPE_TABLE, .domain = DOMAIN_USER, }, [MT_MEMORY] = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .domain = DOMAIN_KERNEL, }};/* * Adjust the PMD section entries according to the CPU in use. */static void __init build_mem_type_table(void)
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -