#include <linux/memremap.h>
#include <linux/pagemap.h>
#include <linux/module.h>
#include <linux/device.h>
static struct dev_dax *ref_to_dev_dax(struct percpu_ref *ref)
return container_of(ref, struct dev_dax, ref);
static void dev_dax_percpu_release(struct percpu_ref *ref)
struct dev_dax *dev_dax = ref_to_dev_dax(ref);
dev_dbg(&dev_dax->dev, "%s\n", __func__);
static void dev_dax_percpu_exit(void *data)
struct percpu_ref *ref = data;
struct dev_dax *dev_dax = ref_to_dev_dax(ref);
dev_dbg(&dev_dax->dev, "%s\n", __func__);
wait_for_completion(&dev_dax->cmp);
static void dev_dax_percpu_kill(struct percpu_ref *data)
struct percpu_ref *ref = data;
struct dev_dax *dev_dax = ref_to_dev_dax(ref);
dev_dbg(&dev_dax->dev, "%s\n", __func__);
static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
struct dax_region *dax_region = dev_dax->region;
struct device *dev = &dev_dax->dev;
if (!dax_alive(dev_dax->dax_dev))
if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
dev_info_ratelimited(dev,
"%s: %s: fail, attempted private mapping\n",
mask = dax_region->align - 1;
if (vma->vm_start & mask || vma->vm_end & mask) {
dev_info_ratelimited(dev,
"%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
current->comm, func, vma->vm_start, vma->vm_end,
if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
&& (vma->vm_flags & VM_DONTCOPY) == 0) {
dev_info_ratelimited(dev,
"%s: %s: fail, dax range requires MADV_DONTFORK\n",
dev_info_ratelimited(dev,
"%s: %s: fail, vma is not DAX capable\n",