Source
75
75
};
76
76
77
77
static void pci_p2pdma_percpu_release(struct percpu_ref *ref)
78
78
{
79
79
struct pci_p2pdma *p2p =
80
80
container_of(ref, struct pci_p2pdma, devmap_ref);
81
81
82
82
complete_all(&p2p->devmap_ref_done);
83
83
}
84
84
85
-
static void pci_p2pdma_percpu_kill(void *data)
85
+
static void pci_p2pdma_percpu_kill(struct percpu_ref *ref)
86
86
{
87
-
struct percpu_ref *ref = data;
88
-
89
87
/*
90
88
* pci_p2pdma_add_resource() may be called multiple times
91
89
* by a driver and may register the percpu_kill devm action multiple
92
90
* times. We only want the first action to actually kill the
93
91
* percpu_ref.
94
92
*/
95
93
if (percpu_ref_is_dying(ref))
96
94
return;
97
95
98
96
percpu_ref_kill(ref);
191
189
if (!pgmap)
192
190
return -ENOMEM;
193
191
194
192
pgmap->res.start = pci_resource_start(pdev, bar) + offset;
195
193
pgmap->res.end = pgmap->res.start + size - 1;
196
194
pgmap->res.flags = pci_resource_flags(pdev, bar);
197
195
pgmap->ref = &pdev->p2pdma->devmap_ref;
198
196
pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
199
197
pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) -
200
198
pci_resource_start(pdev, bar);
199
+
pgmap->kill = pci_p2pdma_percpu_kill;
201
200
202
201
addr = devm_memremap_pages(&pdev->dev, pgmap);
203
202
if (IS_ERR(addr)) {
204
203
error = PTR_ERR(addr);
205
204
goto pgmap_free;
206
205
}
207
206
208
207
error = gen_pool_add_virt(pdev->p2pdma->pool, (unsigned long)addr,
209
208
pci_bus_address(pdev, bar) + offset,
210
209
resource_size(&pgmap->res), dev_to_node(&pdev->dev));
211
210
if (error)
212
211
goto pgmap_free;
213
212
214
-
error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_percpu_kill,
215
-
&pdev->p2pdma->devmap_ref);
216
-
if (error)
217
-
goto pgmap_free;
218
-
219
213
pci_info(pdev, "added peer-to-peer DMA memory %pR\n",
220
214
&pgmap->res);
221
215
222
216
return 0;
223
217
224
218
pgmap_free:
225
219
devm_kfree(&pdev->dev, pgmap);
226
220
return error;
227
221
}
228
222
EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource);