Source
4
4
#include <linux/io.h>
5
5
#include <linux/kasan.h>
6
6
#include <linux/memory_hotplug.h>
7
7
#include <linux/mm.h>
8
8
#include <linux/pfn_t.h>
9
9
#include <linux/swap.h>
10
10
#include <linux/swapops.h>
11
11
#include <linux/types.h>
12
12
#include <linux/wait_bit.h>
13
13
#include <linux/xarray.h>
14
+
#include <linux/hmm.h>
14
15
15
16
static DEFINE_XARRAY(pgmap_array);
16
17
#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
17
18
#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
18
19
19
20
#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
20
21
vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
21
22
unsigned long addr,
22
23
swp_entry_t entry,
23
24
unsigned int flags,
24
25
pmd_t *pmdp)
25
26
{
26
27
struct page *page = device_private_entry_to_page(entry);
28
+
struct hmm_devmem *devmem;
29
+
30
+
devmem = container_of(page->pgmap, typeof(*devmem), pagemap);
27
31
28
32
/*
29
33
* The page_fault() callback must migrate page back to system memory
30
34
* so that CPU can access it. This might fail for various reasons
31
35
* (device issue, device was unsafely unplugged, ...). When such
32
36
* error conditions happen, the callback must return VM_FAULT_SIGBUS.
33
37
*
34
38
* Note that because memory cgroup charges are accounted to the device
35
39
* memory, this should never fail because of memory restrictions (but
36
40
* allocation of regular system page might still fail because we are
37
41
* out of memory).
38
42
*
39
43
* There is a more in-depth description of what that callback can and
40
44
* cannot do, in include/linux/memremap.h
41
45
*/
42
-
return page->pgmap->page_fault(vma, addr, page, flags, pmdp);
46
+
return devmem->page_fault(vma, addr, page, flags, pmdp);
43
47
}
44
48
EXPORT_SYMBOL(device_private_entry_fault);
45
49
#endif /* CONFIG_DEVICE_PRIVATE */
46
50
47
51
static void pgmap_array_delete(struct resource *res)
48
52
{
49
53
xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end),
50
54
NULL, GFP_KERNEL);
51
55
synchronize_rcu();
52
56
}