Source
177
177
.verify_fn = t10_pi_type3_verify_crc,
178
178
};
179
179
EXPORT_SYMBOL(t10_pi_type3_crc);
180
180
181
181
const struct blk_integrity_profile t10_pi_type3_ip = {
182
182
.name = "T10-DIF-TYPE3-IP",
183
183
.generate_fn = t10_pi_type3_generate_ip,
184
184
.verify_fn = t10_pi_type3_verify_ip,
185
185
};
186
186
EXPORT_SYMBOL(t10_pi_type3_ip);
187
+
188
+
/**
189
+
* t10_pi_prepare - prepare PI prior submitting request to device
190
+
* @rq: request with PI that should be prepared
191
+
* @protection_type: PI type (Type 1/Type 2/Type 3)
192
+
*
193
+
* For Type 1/Type 2, the virtual start sector is the one that was
194
+
* originally submitted by the block layer for the ref_tag usage. Due to
195
+
* partitioning, MD/DM cloning, etc. the actual physical start sector is
196
+
* likely to be different. Remap protection information to match the
197
+
* physical LBA.
198
+
*
199
+
* Type 3 does not have a reference tag so no remapping is required.
200
+
*/
201
+
void t10_pi_prepare(struct request *rq, u8 protection_type)
202
+
{
203
+
const int tuple_sz = rq->q->integrity.tuple_size;
204
+
u32 ref_tag = t10_pi_ref_tag(rq);
205
+
struct bio *bio;
206
+
207
+
if (protection_type == T10_PI_TYPE3_PROTECTION)
208
+
return;
209
+
210
+
__rq_for_each_bio(bio, rq) {
211
+
struct bio_integrity_payload *bip = bio_integrity(bio);
212
+
u32 virt = bip_get_seed(bip) & 0xffffffff;
213
+
struct bio_vec iv;
214
+
struct bvec_iter iter;
215
+
216
+
/* Already remapped? */
217
+
if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
218
+
break;
219
+
220
+
bip_for_each_vec(iv, bip, iter) {
221
+
void *p, *pmap;
222
+
unsigned int j;
223
+
224
+
pmap = kmap_atomic(iv.bv_page);
225
+
p = pmap + iv.bv_offset;
226
+
for (j = 0; j < iv.bv_len; j += tuple_sz) {
227
+
struct t10_pi_tuple *pi = p;
228
+
229
+
if (be32_to_cpu(pi->ref_tag) == virt)
230
+
pi->ref_tag = cpu_to_be32(ref_tag);
231
+
virt++;
232
+
ref_tag++;
233
+
p += tuple_sz;
234
+
}
235
+
236
+
kunmap_atomic(pmap);
237
+
}
238
+
239
+
bip->bip_flags |= BIP_MAPPED_INTEGRITY;
240
+
}
241
+
}
242
+
EXPORT_SYMBOL(t10_pi_prepare);
243
+
244
+
/**
245
+
* t10_pi_complete - prepare PI prior returning request to the block layer
246
+
* @rq: request with PI that should be prepared
247
+
* @protection_type: PI type (Type 1/Type 2/Type 3)
248
+
* @intervals: total elements to prepare
249
+
*
250
+
* For Type 1/Type 2, the virtual start sector is the one that was
251
+
* originally submitted by the block layer for the ref_tag usage. Due to
252
+
* partitioning, MD/DM cloning, etc. the actual physical start sector is
253
+
* likely to be different. Since the physical start sector was submitted
254
+
* to the device, we should remap it back to virtual values expected by the
255
+
* block layer.
256
+
*
257
+
* Type 3 does not have a reference tag so no remapping is required.
258
+
*/
259
+
void t10_pi_complete(struct request *rq, u8 protection_type,
260
+
unsigned int intervals)
261
+
{
262
+
const int tuple_sz = rq->q->integrity.tuple_size;
263
+
u32 ref_tag = t10_pi_ref_tag(rq);
264
+
struct bio *bio;
265
+
266
+
if (protection_type == T10_PI_TYPE3_PROTECTION)
267
+
return;
268
+
269
+
__rq_for_each_bio(bio, rq) {
270
+
struct bio_integrity_payload *bip = bio_integrity(bio);
271
+
u32 virt = bip_get_seed(bip) & 0xffffffff;
272
+
struct bio_vec iv;
273
+
struct bvec_iter iter;
274
+
275
+
bip_for_each_vec(iv, bip, iter) {
276
+
void *p, *pmap;
277
+
unsigned int j;
278
+
279
+
pmap = kmap_atomic(iv.bv_page);
280
+
p = pmap + iv.bv_offset;
281
+
for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
282
+
struct t10_pi_tuple *pi = p;
283
+
284
+
if (be32_to_cpu(pi->ref_tag) == ref_tag)
285
+
pi->ref_tag = cpu_to_be32(virt);
286
+
virt++;
287
+
ref_tag++;
288
+
intervals--;
289
+
p += tuple_sz;
290
+
}
291
+
292
+
kunmap_atomic(pmap);
293
+
}
294
+
}
295
+
}
296
+
EXPORT_SYMBOL(t10_pi_complete);