summaryrefslogtreecommitdiffstats
path: root/kernel/lib/ioremap.c
blob: 86c8911b0e3a6fff02b9e52faa11816cfe508362 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
/*
 * Re-map IO memory to kernel address space so that we can access it.
 * This is needed for high PCI addresses that aren't mapped in the
 * 640k-1MB IO memory area on PC's
 *
 * (C) Copyright 1995 1996 Linus Torvalds
 */
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/io.h>
#include <linux/export.h>
#include <asm/cacheflush.h>
#include <asm/pgtable.h>

#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
static int __read_mostly ioremap_pud_capable;
static int __read_mostly ioremap_pmd_capable;
static int __read_mostly ioremap_huge_disabled;

static int __init set_nohugeiomap(char *str)
{
	ioremap_huge_disabled = 1;
	return 0;
}
early_param("nohugeiomap", set_nohugeiomap);

void __init ioremap_huge_init(void)
{
	if (!ioremap_huge_disabled) {
		if (arch_ioremap_pud_supported())
			ioremap_pud_capable = 1;
		if (arch_ioremap_pmd_supported())
			ioremap_pmd_capable = 1;
	}
}

static inline int ioremap_pud_enabled(void)
{
	return ioremap_pud_capable;
}

static inline int ioremap_pmd_enabled(void)
{
	return ioremap_pmd_capable;
}

#else	/* !CONFIG_HAVE_ARCH_HUGE_VMAP */
static inline int ioremap_pud_enabled(void) { return 0; }
static inline int ioremap_pmd_enabled(void) { return 0; }
#endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */

static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
	pte_t *pte;
	u64 pfn;

	pfn = phys_addr >> PAGE_SHIFT;
	pte = pte_alloc_kernel(pmd, addr);
	if (!pte)
		return -ENOMEM;
	do {
		BUG_ON(!pte_none(*pte));
		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
		pfn++;
	} while (pte++, addr += PAGE_SIZE, addr != end);
	return 0;
}

static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
	pmd_t *pmd;
	unsigned long next;

	phys_addr -= addr;
	pmd = pmd_alloc(&init_mm, pud, addr);
	if (!pmd)
		return -ENOMEM;
	do {
		next = pmd_addr_end(addr, end);

		if (ioremap_pmd_enabled() &&
		    ((next - addr) == PMD_SIZE) &&
		    IS_ALIGNED(phys_addr + addr, PMD_SIZE)) {
			if (pmd_set_huge(pmd, phys_addr + addr, prot))
				continue;
		}

		if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
			return -ENOMEM;
	} while (pmd++, addr = next, addr != end);
	return 0;
}

static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
	pud_t *pud;
	unsigned long next;

	phys_addr -= addr;
	pud = pud_alloc(&init_mm, pgd, addr);
	if (!pud)
		return -ENOMEM;
	do {
		next = pud_addr_end(addr, end);

		if (ioremap_pud_enabled() &&
		    ((next - addr) == PUD_SIZE) &&
		    IS_ALIGNED(phys_addr + addr, PUD_SIZE)) {
			if (pud_set_huge(pud, phys_addr + addr, prot))
				continue;
		}

		if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
			return -ENOMEM;
	} while (pud++, addr = next, addr != end);
	return 0;
}

int ioremap_page_range(unsigned long addr,
		       unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
	pgd_t *pgd;
	unsigned long start;
	unsigned long next;
	int err;

	BUG_ON(addr >= end);

	start = addr;
	phys_addr -= addr;
	pgd = pgd_offset_k(addr);
	do {
		next = pgd_addr_end(addr, end);
		err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, prot);
		if (err)
			break;
	} while (pgd++, addr = next, addr != end);

	flush_cache_vmap(start, end);

	return err;
}
EXPORT_SYMBOL_GPL(ioremap_page_range);
an>, fa->reciprocal_elems); } /** * flex_array_free_parts - just free the second-level pages * @fa: the flex array from which to free parts * * This is to be used in cases where the base 'struct flex_array' * has been statically allocated and should not be free. */ void flex_array_free_parts(struct flex_array *fa) { int part_nr; if (elements_fit_in_base(fa)) return; for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) kfree(fa->parts[part_nr]); } EXPORT_SYMBOL(flex_array_free_parts); void flex_array_free(struct flex_array *fa) { flex_array_free_parts(fa); kfree(fa); } EXPORT_SYMBOL(flex_array_free); static unsigned int index_inside_part(struct flex_array *fa, unsigned int element_nr, unsigned int part_nr) { unsigned int part_offset; part_offset = element_nr - part_nr * fa->elems_per_part; return part_offset * fa->element_size; } static struct flex_array_part * __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags) { struct flex_array_part *part = fa->parts[part_nr]; if (!part) { part = kmalloc(sizeof(struct flex_array_part), flags); if (!part) return NULL; if (!(flags & __GFP_ZERO)) memset(part, FLEX_ARRAY_FREE, sizeof(struct flex_array_part)); fa->parts[part_nr] = part; } return part; } /** * flex_array_put - copy data into the array at @element_nr * @fa: the flex array to copy data into * @element_nr: index of the position in which to insert * the new element. * @src: address of data to copy into the array * @flags: page allocation flags to use for array expansion * * * Note that this *copies* the contents of @src into * the array. If you are trying to store an array of * pointers, make sure to pass in &ptr instead of ptr. * You may instead wish to use the flex_array_put_ptr() * helper function. * * Locking must be provided by the caller. */ int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, gfp_t flags) { int part_nr = 0; struct flex_array_part *part; void *dst; if (element_nr >= fa->total_nr_elements) return -ENOSPC; if (!fa->element_size) return 0; if (elements_fit_in_base(fa)) part = (struct flex_array_part *)&fa->parts[0]; else { part_nr = fa_element_to_part_nr(fa, element_nr); part = __fa_get_part(fa, part_nr, flags); if (!part) return -ENOMEM; } dst = &part->elements[index_inside_part(fa, element_nr, part_nr)]; memcpy(dst, src, fa->element_size); return 0; } EXPORT_SYMBOL(flex_array_put); /** * flex_array_clear - clear element in array at @element_nr * @fa: the flex array of the element. * @element_nr: index of the position to clear. * * Locking must be provided by the caller. */ int flex_array_clear(struct flex_array *fa, unsigned int element_nr) { int part_nr = 0; struct flex_array_part *part; void *dst; if (element_nr >= fa->total_nr_elements) return -ENOSPC; if (!fa->element_size) return 0; if (elements_fit_in_base(fa)) part = (struct flex_array_part *)&fa->parts[0]; else { part_nr = fa_element_to_part_nr(fa, element_nr); part = fa->parts[part_nr]; if (!part) return -EINVAL; } dst = &part->elements[index_inside_part(fa, element_nr, part_nr)]; memset(dst, FLEX_ARRAY_FREE, fa->element_size); return 0; } EXPORT_SYMBOL(flex_array_clear); /** * flex_array_prealloc - guarantee that array space exists * @fa: the flex array for which to preallocate parts * @start: index of first array element for which space is allocated * @nr_elements: number of elements for which space is allocated * @flags: page allocation flags * * This will guarantee that no future calls to flex_array_put() * will allocate memory. It can be used if you are expecting to * be holding a lock or in some atomic context while writing * data into the array. * * Locking must be provided by the caller. */ int flex_array_prealloc(struct flex_array *fa, unsigned int start, unsigned int nr_elements, gfp_t flags) { int start_part; int end_part; int part_nr; unsigned int end; struct flex_array_part *part; if (!start && !nr_elements) return 0; if (start >= fa->total_nr_elements) return -ENOSPC; if (!nr_elements) return 0; end = start + nr_elements - 1; if (end >= fa->total_nr_elements) return -ENOSPC; if (!fa->element_size) return 0; if (elements_fit_in_base(fa)) return 0; start_part = fa_element_to_part_nr(fa, start); end_part = fa_element_to_part_nr(fa, end); for (part_nr = start_part; part_nr <= end_part; part_nr++) { part = __fa_get_part(fa, part_nr, flags); if (!part) return -ENOMEM; } return 0; } EXPORT_SYMBOL(flex_array_prealloc); /** * flex_array_get - pull data back out of the array * @fa: the flex array from which to extract data * @element_nr: index of the element to fetch from the array * * Returns a pointer to the data at index @element_nr. Note * that this is a copy of the data that was passed in. If you * are using this to store pointers, you'll get back &ptr. You * may instead wish to use the flex_array_get_ptr helper. * * Locking must be provided by the caller. */ void *flex_array_get(struct flex_array *fa, unsigned int element_nr) { int part_nr = 0; struct flex_array_part *part; if (!fa->element_size) return NULL; if (element_nr >= fa->total_nr_elements) return NULL; if (elements_fit_in_base(fa)) part = (struct flex_array_part *)&fa->parts[0]; else { part_nr = fa_element_to_part_nr(fa, element_nr); part = fa->parts[part_nr]; if (!part) return NULL; } return &part->elements[index_inside_part(fa, element_nr, part_nr)]; } EXPORT_SYMBOL(flex_array_get); /** * flex_array_get_ptr - pull a ptr back out of the array * @fa: the flex array from which to extract data * @element_nr: index of the element to fetch from the array * * Returns the pointer placed in the flex array at element_nr using * flex_array_put_ptr(). This function should not be called if the * element in question was not set using the _put_ptr() helper. */ void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr) { void **tmp; tmp = flex_array_get(fa, element_nr); if (!tmp) return NULL; return *tmp; } EXPORT_SYMBOL(flex_array_get_ptr); static int part_is_free(struct flex_array_part *part) { int i; for (i = 0; i < sizeof(struct flex_array_part); i++) if (part->elements[i] != FLEX_ARRAY_FREE) return 0; return 1; } /** * flex_array_shrink - free unused second-level pages * @fa: the flex array to shrink * * Frees all second-level pages that consist solely of unused * elements. Returns the number of pages freed. * * Locking must be provided by the caller. */ int flex_array_shrink(struct flex_array *fa) { struct flex_array_part *part; int part_nr; int ret = 0; if (!fa->total_nr_elements || !fa->element_size) return 0; if (elements_fit_in_base(fa)) return ret; for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) { part = fa->parts[part_nr]; if (!part) continue; if (part_is_free(part)) { fa->parts[part_nr] = NULL; kfree(part); ret++; } } return ret; } EXPORT_SYMBOL(flex_array_shrink);