Source
837
837
BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
838
838
return (void *)addr;
839
839
}
840
840
841
841
/**
842
842
* new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
843
843
* block. Of course pages number can't exceed VMAP_BBMAP_BITS
844
844
* @order: how many 2^order pages should be occupied in newly allocated block
845
845
* @gfp_mask: flags for the page level allocator
846
846
*
847
-
* Returns: virtual address in a newly allocated block or ERR_PTR(-errno)
847
+
* Return: virtual address in a newly allocated block or ERR_PTR(-errno)
848
848
*/
849
849
static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
850
850
{
851
851
struct vmap_block_queue *vbq;
852
852
struct vmap_block *vb;
853
853
struct vmap_area *va;
854
854
unsigned long vb_idx;
855
855
int node, err;
856
856
void *vaddr;
857
857
1426
1426
}
1427
1427
1428
1428
/**
1429
1429
* get_vm_area - reserve a contiguous kernel virtual area
1430
1430
* @size: size of the area
1431
1431
* @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
1432
1432
*
1433
1433
* Search an area of @size in the kernel virtual mapping area,
1434
1434
* and reserved it for out purposes. Returns the area descriptor
1435
1435
* on success or %NULL on failure.
1436
+
*
1437
+
* Return: the area descriptor on success or %NULL on failure.
1436
1438
*/
1437
1439
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1438
1440
{
1439
1441
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1440
1442
NUMA_NO_NODE, GFP_KERNEL,
1441
1443
__builtin_return_address(0));
1442
1444
}
1443
1445
1444
1446
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1445
1447
const void *caller)
1448
1450
NUMA_NO_NODE, GFP_KERNEL, caller);
1449
1451
}
1450
1452
1451
1453
/**
1452
1454
* find_vm_area - find a continuous kernel virtual area
1453
1455
* @addr: base address
1454
1456
*
1455
1457
* Search for the kernel VM area starting at @addr, and return it.
1456
1458
* It is up to the caller to do all required locking to keep the returned
1457
1459
* pointer valid.
1460
+
*
1461
+
* Return: pointer to the found area or %NULL on faulure
1458
1462
*/
1459
1463
struct vm_struct *find_vm_area(const void *addr)
1460
1464
{
1461
1465
struct vmap_area *va;
1462
1466
1463
1467
va = find_vmap_area((unsigned long)addr);
1464
1468
if (va && va->flags & VM_VM_AREA)
1465
1469
return va->vm;
1466
1470
1467
1471
return NULL;
1468
1472
}
1469
1473
1470
1474
/**
1471
1475
* remove_vm_area - find and remove a continuous kernel virtual area
1472
1476
* @addr: base address
1473
1477
*
1474
1478
* Search for the kernel VM area starting at @addr, and remove it.
1475
1479
* This function returns the found VM area, but using it is NOT safe
1476
1480
* on SMP machines, except for its size or flags.
1481
+
*
1482
+
* Return: pointer to the found area or %NULL on faulure
1477
1483
*/
1478
1484
struct vm_struct *remove_vm_area(const void *addr)
1479
1485
{
1480
1486
struct vmap_area *va;
1481
1487
1482
1488
might_sleep();
1483
1489
1484
1490
va = find_vmap_area((unsigned long)addr);
1485
1491
if (va && va->flags & VM_VM_AREA) {
1486
1492
struct vm_struct *vm = va->vm;
1629
1635
1630
1636
/**
1631
1637
* vmap - map an array of pages into virtually contiguous space
1632
1638
* @pages: array of page pointers
1633
1639
* @count: number of pages to map
1634
1640
* @flags: vm_area->flags
1635
1641
* @prot: page protection for the mapping
1636
1642
*
1637
1643
* Maps @count pages from @pages into contiguous kernel virtual
1638
1644
* space.
1645
+
*
1646
+
* Return: the address of the area or %NULL on failure
1639
1647
*/
1640
1648
void *vmap(struct page **pages, unsigned int count,
1641
1649
unsigned long flags, pgprot_t prot)
1642
1650
{
1643
1651
struct vm_struct *area;
1644
1652
unsigned long size; /* In bytes */
1645
1653
1646
1654
might_sleep();
1647
1655
1648
1656
if (count > totalram_pages())
1732
1740
* @end: vm area range end
1733
1741
* @gfp_mask: flags for the page level allocator
1734
1742
* @prot: protection mask for the allocated pages
1735
1743
* @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
1736
1744
* @node: node to use for allocation or NUMA_NO_NODE
1737
1745
* @caller: caller's return address
1738
1746
*
1739
1747
* Allocate enough pages to cover @size from the page level
1740
1748
* allocator with @gfp_mask flags. Map them into contiguous
1741
1749
* kernel virtual space, using a pagetable protection of @prot.
1750
+
*
1751
+
* Return: the address of the area or %NULL on failure
1742
1752
*/
1743
1753
void *__vmalloc_node_range(unsigned long size, unsigned long align,
1744
1754
unsigned long start, unsigned long end, gfp_t gfp_mask,
1745
1755
pgprot_t prot, unsigned long vm_flags, int node,
1746
1756
const void *caller)
1747
1757
{
1748
1758
struct vm_struct *area;
1749
1759
void *addr;
1750
1760
unsigned long real_size = size;
1751
1761
1799
1809
*
1800
1810
* Allocate enough pages to cover @size from the page level
1801
1811
* allocator with @gfp_mask flags. Map them into contiguous
1802
1812
* kernel virtual space, using a pagetable protection of @prot.
1803
1813
*
1804
1814
* Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
1805
1815
* and __GFP_NOFAIL are not supported
1806
1816
*
1807
1817
* Any use of gfp flags outside of GFP_KERNEL should be consulted
1808
1818
* with mm people.
1819
+
*
1820
+
* Return: pointer to the allocated memory or %NULL on error
1809
1821
*/
1810
1822
static void *__vmalloc_node(unsigned long size, unsigned long align,
1811
1823
gfp_t gfp_mask, pgprot_t prot,
1812
1824
int node, const void *caller)
1813
1825
{
1814
1826
return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
1815
1827
gfp_mask, prot, 0, node, caller);
1816
1828
}
1817
1829
1818
1830
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1838
1850
1839
1851
/**
1840
1852
* vmalloc - allocate virtually contiguous memory
1841
1853
* @size: allocation size
1842
1854
*
1843
1855
* Allocate enough pages to cover @size from the page level
1844
1856
* allocator and map them into contiguous kernel virtual space.
1845
1857
*
1846
1858
* For tight control over page level allocator and protection flags
1847
1859
* use __vmalloc() instead.
1860
+
*
1861
+
* Return: pointer to the allocated memory or %NULL on error
1848
1862
*/
1849
1863
void *vmalloc(unsigned long size)
1850
1864
{
1851
1865
return __vmalloc_node_flags(size, NUMA_NO_NODE,
1852
1866
GFP_KERNEL);
1853
1867
}
1854
1868
EXPORT_SYMBOL(vmalloc);
1855
1869
1856
1870
/**
1857
1871
* vzalloc - allocate virtually contiguous memory with zero fill
1858
1872
* @size: allocation size
1859
1873
*
1860
1874
* Allocate enough pages to cover @size from the page level
1861
1875
* allocator and map them into contiguous kernel virtual space.
1862
1876
* The memory allocated is set to zero.
1863
1877
*
1864
1878
* For tight control over page level allocator and protection flags
1865
1879
* use __vmalloc() instead.
1880
+
*
1881
+
* Return: pointer to the allocated memory or %NULL on error
1866
1882
*/
1867
1883
void *vzalloc(unsigned long size)
1868
1884
{
1869
1885
return __vmalloc_node_flags(size, NUMA_NO_NODE,
1870
1886
GFP_KERNEL | __GFP_ZERO);
1871
1887
}
1872
1888
EXPORT_SYMBOL(vzalloc);
1873
1889
1874
1890
/**
1875
1891
* vmalloc_user - allocate zeroed virtually contiguous memory for userspace
1876
1892
* @size: allocation size
1877
1893
*
1878
1894
* The resulting memory area is zeroed so it can be mapped to userspace
1879
1895
* without leaking data.
1896
+
*
1897
+
* Return: pointer to the allocated memory or %NULL on error
1880
1898
*/
1881
1899
void *vmalloc_user(unsigned long size)
1882
1900
{
1883
1901
return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
1884
1902
GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
1885
1903
VM_USERMAP, NUMA_NO_NODE,
1886
1904
__builtin_return_address(0));
1887
1905
}
1888
1906
EXPORT_SYMBOL(vmalloc_user);
1889
1907
1890
1908
/**
1891
1909
* vmalloc_node - allocate memory on a specific node
1892
1910
* @size: allocation size
1893
1911
* @node: numa node
1894
1912
*
1895
1913
* Allocate enough pages to cover @size from the page level
1896
1914
* allocator and map them into contiguous kernel virtual space.
1897
1915
*
1898
1916
* For tight control over page level allocator and protection flags
1899
1917
* use __vmalloc() instead.
1918
+
*
1919
+
* Return: pointer to the allocated memory or %NULL on error
1900
1920
*/
1901
1921
void *vmalloc_node(unsigned long size, int node)
1902
1922
{
1903
1923
return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL,
1904
1924
node, __builtin_return_address(0));
1905
1925
}
1906
1926
EXPORT_SYMBOL(vmalloc_node);
1907
1927
1908
1928
/**
1909
1929
* vzalloc_node - allocate memory on a specific node with zero fill
1910
1930
* @size: allocation size
1911
1931
* @node: numa node
1912
1932
*
1913
1933
* Allocate enough pages to cover @size from the page level
1914
1934
* allocator and map them into contiguous kernel virtual space.
1915
1935
* The memory allocated is set to zero.
1916
1936
*
1917
1937
* For tight control over page level allocator and protection flags
1918
1938
* use __vmalloc_node() instead.
1939
+
*
1940
+
* Return: pointer to the allocated memory or %NULL on error
1919
1941
*/
1920
1942
void *vzalloc_node(unsigned long size, int node)
1921
1943
{
1922
1944
return __vmalloc_node_flags(size, node,
1923
1945
GFP_KERNEL | __GFP_ZERO);
1924
1946
}
1925
1947
EXPORT_SYMBOL(vzalloc_node);
1926
1948
1927
1949
/**
1928
1950
* vmalloc_exec - allocate virtually contiguous, executable memory
1929
1951
* @size: allocation size
1930
1952
*
1931
1953
* Kernel-internal function to allocate enough pages to cover @size
1932
1954
* the page level allocator and map them into contiguous and
1933
1955
* executable kernel virtual space.
1934
1956
*
1935
1957
* For tight control over page level allocator and protection flags
1936
1958
* use __vmalloc() instead.
1959
+
*
1960
+
* Return: pointer to the allocated memory or %NULL on error
1937
1961
*/
1938
1962
void *vmalloc_exec(unsigned long size)
1939
1963
{
1940
1964
return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL_EXEC,
1941
1965
NUMA_NO_NODE, __builtin_return_address(0));
1942
1966
}
1943
1967
1944
1968
#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
1945
1969
#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
1946
1970
#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
1952
1976
*/
1953
1977
#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
1954
1978
#endif
1955
1979
1956
1980
/**
1957
1981
* vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
1958
1982
* @size: allocation size
1959
1983
*
1960
1984
* Allocate enough 32bit PA addressable pages to cover @size from the
1961
1985
* page level allocator and map them into contiguous kernel virtual space.
1986
+
*
1987
+
* Return: pointer to the allocated memory or %NULL on error
1962
1988
*/
1963
1989
void *vmalloc_32(unsigned long size)
1964
1990
{
1965
1991
return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
1966
1992
NUMA_NO_NODE, __builtin_return_address(0));
1967
1993
}
1968
1994
EXPORT_SYMBOL(vmalloc_32);
1969
1995
1970
1996
/**
1971
1997
* vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
1972
1998
* @size: allocation size
1973
1999
*
1974
2000
* The resulting memory area is 32bit addressable and zeroed so it can be
1975
2001
* mapped to userspace without leaking data.
2002
+
*
2003
+
* Return: pointer to the allocated memory or %NULL on error
1976
2004
*/
1977
2005
void *vmalloc_32_user(unsigned long size)
1978
2006
{
1979
2007
return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
1980
2008
GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
1981
2009
VM_USERMAP, NUMA_NO_NODE,
1982
2010
__builtin_return_address(0));
1983
2011
}
1984
2012
EXPORT_SYMBOL(vmalloc_32_user);
1985
2013
2063
2091
}
2064
2092
return copied;
2065
2093
}
2066
2094
2067
2095
/**
2068
2096
* vread() - read vmalloc area in a safe way.
2069
2097
* @buf: buffer for reading data
2070
2098
* @addr: vm address.
2071
2099
* @count: number of bytes to be read.
2072
2100
*
2073
-
* Returns # of bytes which addr and buf should be increased.
2074
-
* (same number to @count). Returns 0 if [addr...addr+count) doesn't
2075
-
* includes any intersect with alive vmalloc area.
2076
-
*
2077
2101
* This function checks that addr is a valid vmalloc'ed area, and
2078
2102
* copy data from that area to a given buffer. If the given memory range
2079
2103
* of [addr...addr+count) includes some valid address, data is copied to
2080
2104
* proper area of @buf. If there are memory holes, they'll be zero-filled.
2081
2105
* IOREMAP area is treated as memory hole and no copy is done.
2082
2106
*
2083
2107
* If [addr...addr+count) doesn't includes any intersects with alive
2084
2108
* vm_struct area, returns 0. @buf should be kernel's buffer.
2085
2109
*
2086
2110
* Note: In usual ops, vread() is never necessary because the caller
2087
2111
* should know vmalloc() area is valid and can use memcpy().
2088
2112
* This is for routines which have to access vmalloc area without
2089
2113
* any informaion, as /dev/kmem.
2114
+
*
2115
+
* Return: number of bytes for which addr and buf should be increased
2116
+
* (same number as @count) or %0 if [addr...addr+count) doesn't
2117
+
* include any intersection with valid vmalloc area
2090
2118
*/
2091
2119
long vread(char *buf, char *addr, unsigned long count)
2092
2120
{
2093
2121
struct vmap_area *va;
2094
2122
struct vm_struct *vm;
2095
2123
char *vaddr, *buf_start = buf;
2096
2124
unsigned long buflen = count;
2097
2125
unsigned long n;
2098
2126
2099
2127
/* Don't allow overflow */
2142
2170
2143
2171
return buflen;
2144
2172
}
2145
2173
2146
2174
/**
2147
2175
* vwrite() - write vmalloc area in a safe way.
2148
2176
* @buf: buffer for source data
2149
2177
* @addr: vm address.
2150
2178
* @count: number of bytes to be read.
2151
2179
*
2152
-
* Returns # of bytes which addr and buf should be incresed.
2153
-
* (same number to @count).
2154
-
* If [addr...addr+count) doesn't includes any intersect with valid
2155
-
* vmalloc area, returns 0.
2156
-
*
2157
2180
* This function checks that addr is a valid vmalloc'ed area, and
2158
2181
* copy data from a buffer to the given addr. If specified range of
2159
2182
* [addr...addr+count) includes some valid address, data is copied from
2160
2183
* proper area of @buf. If there are memory holes, no copy to hole.
2161
2184
* IOREMAP area is treated as memory hole and no copy is done.
2162
2185
*
2163
2186
* If [addr...addr+count) doesn't includes any intersects with alive
2164
2187
* vm_struct area, returns 0. @buf should be kernel's buffer.
2165
2188
*
2166
2189
* Note: In usual ops, vwrite() is never necessary because the caller
2167
2190
* should know vmalloc() area is valid and can use memcpy().
2168
2191
* This is for routines which have to access vmalloc area without
2169
2192
* any informaion, as /dev/kmem.
2193
+
*
2194
+
* Return: number of bytes for which addr and buf should be
2195
+
* increased (same number as @count) or %0 if [addr...addr+count)
2196
+
* doesn't include any intersection with valid vmalloc area
2170
2197
*/
2171
2198
long vwrite(char *buf, char *addr, unsigned long count)
2172
2199
{
2173
2200
struct vmap_area *va;
2174
2201
struct vm_struct *vm;
2175
2202
char *vaddr;
2176
2203
unsigned long n, buflen;
2177
2204
int copied = 0;
2178
2205
2179
2206
/* Don't allow overflow */