Source
438
438
static void dev_dax_cdev_del(void *cdev)
439
439
{
440
440
cdev_del(cdev);
441
441
}
442
442
443
443
static void dev_dax_kill(void *dev_dax)
444
444
{
445
445
kill_dev_dax(dev_dax);
446
446
}
447
447
448
-
static int dev_dax_probe(struct device *dev)
448
+
int dev_dax_probe(struct device *dev)
449
449
{
450
450
struct dev_dax *dev_dax = to_dev_dax(dev);
451
451
struct dax_device *dax_dev = dev_dax->dax_dev;
452
452
struct resource *res = &dev_dax->region->res;
453
453
struct inode *inode;
454
454
struct cdev *cdev;
455
455
void *addr;
456
456
int rc;
457
457
458
458
/* 1:1 map region resource range to device-dax instance range */
477
477
addr = devm_memremap_pages(dev, &dev_dax->pgmap);
478
478
if (IS_ERR(addr)) {
479
479
devm_remove_action(dev, dev_dax_percpu_exit, &dev_dax->ref);
480
480
percpu_ref_exit(&dev_dax->ref);
481
481
return PTR_ERR(addr);
482
482
}
483
483
484
484
inode = dax_inode(dax_dev);
485
485
cdev = inode->i_cdev;
486
486
cdev_init(cdev, &dax_fops);
487
-
cdev->owner = dev->driver->owner;
487
+
if (dev->class) {
488
+
/* for the CONFIG_DEV_DAX_PMEM_COMPAT case */
489
+
cdev->owner = dev->parent->driver->owner;
490
+
} else
491
+
cdev->owner = dev->driver->owner;
488
492
cdev_set_parent(cdev, &dev->kobj);
489
493
rc = cdev_add(cdev, dev->devt, 1);
490
494
if (rc)
491
495
return rc;
492
496
493
497
rc = devm_add_action_or_reset(dev, dev_dax_cdev_del, cdev);
494
498
if (rc)
495
499
return rc;
496
500
497
501
run_dax(dax_dev);
498
502
return devm_add_action_or_reset(dev, dev_dax_kill, dev_dax);
499
503
}
504
+
EXPORT_SYMBOL_GPL(dev_dax_probe);
500
505
501
506
static int dev_dax_remove(struct device *dev)
502
507
{
503
508
/* all probe actions are unwound by devm */
504
509
return 0;
505
510
}
506
511
507
512
static struct dax_device_driver device_dax_driver = {
508
513
.drv = {
509
514
.probe = dev_dax_probe,