Source
635
635
return bfqg;
636
636
}
637
637
638
638
void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
639
639
{
640
640
struct bfq_data *bfqd = bic_to_bfqd(bic);
641
641
struct bfq_group *bfqg = NULL;
642
642
uint64_t serial_nr;
643
643
644
644
rcu_read_lock();
645
-
serial_nr = bio_blkcg(bio)->css.serial_nr;
645
+
serial_nr = __bio_blkcg(bio)->css.serial_nr;
646
646
647
647
/*
648
648
* Check whether blkcg has changed. The condition may trigger
649
649
* spuriously on a newly created cic but there's no harm.
650
650
*/
651
651
if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
652
652
goto out;
653
653
654
-
bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
654
+
bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio));
655
655
/*
656
656
* Update blkg_path for bfq_log_* functions. We cache this
657
657
* path, and update it here, for the following
658
658
* reasons. Operations on blkg objects in blk-cgroup are
659
659
* protected with the request_queue lock, and not with the
660
660
* lock that protects the instances of this scheduler
661
661
* (bfqd->lock). This exposes BFQ to the following sort of
662
662
* race.
663
663
*
664
664
* The blkg_lookup performed in bfq_get_queue, protected