Source
173
173
#define SPBFCR_TXTRG_1B 0x00 /* 31 bytes (1 byte available) */
174
174
#define SPBFCR_TXTRG_32B 0x30 /* 0 byte (32 bytes available) */
175
175
#define SPBFCR_RXTRG_1B 0x00 /* 1 byte (31 bytes available) */
176
176
#define SPBFCR_RXTRG_32B 0x07 /* 32 bytes (0 byte available) */
177
177
178
178
#define QSPI_BUFFER_SIZE 32u
179
179
180
180
struct rspi_data {
181
181
void __iomem *addr;
182
182
u32 max_speed_hz;
183
-
struct spi_master *master;
183
+
struct spi_controller *ctlr;
184
184
wait_queue_head_t wait;
185
185
struct clk *clk;
186
186
u16 spcmd;
187
187
u8 spsr;
188
188
u8 sppcr;
189
189
int rx_irq, tx_irq;
190
190
const struct spi_ops *ops;
191
191
192
192
unsigned dma_callbacked:1;
193
193
unsigned byte_access:1;
230
230
{
231
231
if (rspi->byte_access)
232
232
return rspi_read8(rspi, RSPI_SPDR);
233
233
else /* 16 bit */
234
234
return rspi_read16(rspi, RSPI_SPDR);
235
235
}
236
236
237
237
/* optional functions */
238
238
struct spi_ops {
239
239
int (*set_config_register)(struct rspi_data *rspi, int access_size);
240
-
int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
241
-
struct spi_transfer *xfer);
240
+
int (*transfer_one)(struct spi_controller *ctlr,
241
+
struct spi_device *spi, struct spi_transfer *xfer);
242
242
u16 mode_bits;
243
243
u16 flags;
244
244
u16 fifo_size;
245
245
};
246
246
247
247
/*
248
248
* functions for RSPI on legacy SH
249
249
*/
250
250
static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
251
251
{
459
459
460
460
static inline int rspi_wait_for_rx_full(struct rspi_data *rspi)
461
461
{
462
462
return rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE);
463
463
}
464
464
465
465
static int rspi_data_out(struct rspi_data *rspi, u8 data)
466
466
{
467
467
int error = rspi_wait_for_tx_empty(rspi);
468
468
if (error < 0) {
469
-
dev_err(&rspi->master->dev, "transmit timeout\n");
469
+
dev_err(&rspi->ctlr->dev, "transmit timeout\n");
470
470
return error;
471
471
}
472
472
rspi_write_data(rspi, data);
473
473
return 0;
474
474
}
475
475
476
476
static int rspi_data_in(struct rspi_data *rspi)
477
477
{
478
478
int error;
479
479
u8 data;
480
480
481
481
error = rspi_wait_for_rx_full(rspi);
482
482
if (error < 0) {
483
-
dev_err(&rspi->master->dev, "receive timeout\n");
483
+
dev_err(&rspi->ctlr->dev, "receive timeout\n");
484
484
return error;
485
485
}
486
486
data = rspi_read_data(rspi);
487
487
return data;
488
488
}
489
489
490
490
static int rspi_pio_transfer(struct rspi_data *rspi, const u8 *tx, u8 *rx,
491
491
unsigned int n)
492
492
{
493
493
while (n-- > 0) {
519
519
struct sg_table *rx)
520
520
{
521
521
struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
522
522
u8 irq_mask = 0;
523
523
unsigned int other_irq = 0;
524
524
dma_cookie_t cookie;
525
525
int ret;
526
526
527
527
/* First prepare and submit the DMA request(s), as this may fail */
528
528
if (rx) {
529
-
desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx,
530
-
rx->sgl, rx->nents, DMA_DEV_TO_MEM,
529
+
desc_rx = dmaengine_prep_slave_sg(rspi->ctlr->dma_rx, rx->sgl,
530
+
rx->nents, DMA_DEV_TO_MEM,
531
531
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
532
532
if (!desc_rx) {
533
533
ret = -EAGAIN;
534
534
goto no_dma_rx;
535
535
}
536
536
537
537
desc_rx->callback = rspi_dma_complete;
538
538
desc_rx->callback_param = rspi;
539
539
cookie = dmaengine_submit(desc_rx);
540
540
if (dma_submit_error(cookie)) {
541
541
ret = cookie;
542
542
goto no_dma_rx;
543
543
}
544
544
545
545
irq_mask |= SPCR_SPRIE;
546
546
}
547
547
548
548
if (tx) {
549
-
desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx,
550
-
tx->sgl, tx->nents, DMA_MEM_TO_DEV,
549
+
desc_tx = dmaengine_prep_slave_sg(rspi->ctlr->dma_tx, tx->sgl,
550
+
tx->nents, DMA_MEM_TO_DEV,
551
551
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
552
552
if (!desc_tx) {
553
553
ret = -EAGAIN;
554
554
goto no_dma_tx;
555
555
}
556
556
557
557
if (rx) {
558
558
/* No callback */
559
559
desc_tx->callback = NULL;
560
560
} else {
577
577
if (tx)
578
578
disable_irq(other_irq = rspi->tx_irq);
579
579
if (rx && rspi->rx_irq != other_irq)
580
580
disable_irq(rspi->rx_irq);
581
581
582
582
rspi_enable_irq(rspi, irq_mask);
583
583
rspi->dma_callbacked = 0;
584
584
585
585
/* Now start DMA */
586
586
if (rx)
587
-
dma_async_issue_pending(rspi->master->dma_rx);
587
+
dma_async_issue_pending(rspi->ctlr->dma_rx);
588
588
if (tx)
589
-
dma_async_issue_pending(rspi->master->dma_tx);
589
+
dma_async_issue_pending(rspi->ctlr->dma_tx);
590
590
591
591
ret = wait_event_interruptible_timeout(rspi->wait,
592
592
rspi->dma_callbacked, HZ);
593
593
if (ret > 0 && rspi->dma_callbacked) {
594
594
ret = 0;
595
595
} else {
596
596
if (!ret) {
597
-
dev_err(&rspi->master->dev, "DMA timeout\n");
597
+
dev_err(&rspi->ctlr->dev, "DMA timeout\n");
598
598
ret = -ETIMEDOUT;
599
599
}
600
600
if (tx)
601
-
dmaengine_terminate_all(rspi->master->dma_tx);
601
+
dmaengine_terminate_all(rspi->ctlr->dma_tx);
602
602
if (rx)
603
-
dmaengine_terminate_all(rspi->master->dma_rx);
603
+
dmaengine_terminate_all(rspi->ctlr->dma_rx);
604
604
}
605
605
606
606
rspi_disable_irq(rspi, irq_mask);
607
607
608
608
if (tx)
609
609
enable_irq(rspi->tx_irq);
610
610
if (rx && rspi->rx_irq != other_irq)
611
611
enable_irq(rspi->rx_irq);
612
612
613
613
return ret;
614
614
615
615
no_dma_tx:
616
616
if (rx)
617
-
dmaengine_terminate_all(rspi->master->dma_rx);
617
+
dmaengine_terminate_all(rspi->ctlr->dma_rx);
618
618
no_dma_rx:
619
619
if (ret == -EAGAIN) {
620
620
pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
621
-
dev_driver_string(&rspi->master->dev),
622
-
dev_name(&rspi->master->dev));
621
+
dev_driver_string(&rspi->ctlr->dev),
622
+
dev_name(&rspi->ctlr->dev));
623
623
}
624
624
return ret;
625
625
}
626
626
627
627
static void rspi_receive_init(const struct rspi_data *rspi)
628
628
{
629
629
u8 spsr;
630
630
631
631
spsr = rspi_read8(rspi, RSPI_SPSR);
632
632
if (spsr & SPSR_SPRF)
653
653
rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
654
654
rspi_write8(rspi, 0, QSPI_SPBFCR);
655
655
}
656
656
657
657
static bool __rspi_can_dma(const struct rspi_data *rspi,
658
658
const struct spi_transfer *xfer)
659
659
{
660
660
return xfer->len > rspi->ops->fifo_size;
661
661
}
662
662
663
-
static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi,
663
+
static bool rspi_can_dma(struct spi_controller *ctlr, struct spi_device *spi,
664
664
struct spi_transfer *xfer)
665
665
{
666
-
struct rspi_data *rspi = spi_master_get_devdata(master);
666
+
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
667
667
668
668
return __rspi_can_dma(rspi, xfer);
669
669
}
670
670
671
671
static int rspi_dma_check_then_transfer(struct rspi_data *rspi,
672
672
struct spi_transfer *xfer)
673
673
{
674
-
if (!rspi->master->can_dma || !__rspi_can_dma(rspi, xfer))
674
+
if (!rspi->ctlr->can_dma || !__rspi_can_dma(rspi, xfer))
675
675
return -EAGAIN;
676
676
677
677
/* rx_buf can be NULL on RSPI on SH in TX-only Mode */
678
678
return rspi_dma_transfer(rspi, &xfer->tx_sg,
679
679
xfer->rx_buf ? &xfer->rx_sg : NULL);
680
680
}
681
681
682
682
static int rspi_common_transfer(struct rspi_data *rspi,
683
683
struct spi_transfer *xfer)
684
684
{
691
691
ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len);
692
692
if (ret < 0)
693
693
return ret;
694
694
695
695
/* Wait for the last transmission */
696
696
rspi_wait_for_tx_empty(rspi);
697
697
698
698
return 0;
699
699
}
700
700
701
-
static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi,
702
-
struct spi_transfer *xfer)
701
+
static int rspi_transfer_one(struct spi_controller *ctlr,
702
+
struct spi_device *spi, struct spi_transfer *xfer)
703
703
{
704
-
struct rspi_data *rspi = spi_master_get_devdata(master);
704
+
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
705
705
u8 spcr;
706
706
707
707
spcr = rspi_read8(rspi, RSPI_SPCR);
708
708
if (xfer->rx_buf) {
709
709
rspi_receive_init(rspi);
710
710
spcr &= ~SPCR_TXMD;
711
711
} else {
712
712
spcr |= SPCR_TXMD;
713
713
}
714
714
rspi_write8(rspi, spcr, RSPI_SPCR);
715
715
716
716
return rspi_common_transfer(rspi, xfer);
717
717
}
718
718
719
-
static int rspi_rz_transfer_one(struct spi_master *master,
719
+
static int rspi_rz_transfer_one(struct spi_controller *ctlr,
720
720
struct spi_device *spi,
721
721
struct spi_transfer *xfer)
722
722
{
723
-
struct rspi_data *rspi = spi_master_get_devdata(master);
723
+
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
724
724
725
725
rspi_rz_receive_init(rspi);
726
726
727
727
return rspi_common_transfer(rspi, xfer);
728
728
}
729
729
730
730
static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
731
731
u8 *rx, unsigned int len)
732
732
{
733
733
unsigned int i, n;
734
734
int ret;
735
735
736
736
while (len > 0) {
737
737
n = qspi_set_send_trigger(rspi, len);
738
738
qspi_set_receive_trigger(rspi, len);
739
739
if (n == QSPI_BUFFER_SIZE) {
740
740
ret = rspi_wait_for_tx_empty(rspi);
741
741
if (ret < 0) {
742
-
dev_err(&rspi->master->dev, "transmit timeout\n");
742
+
dev_err(&rspi->ctlr->dev, "transmit timeout\n");
743
743
return ret;
744
744
}
745
745
for (i = 0; i < n; i++)
746
746
rspi_write_data(rspi, *tx++);
747
747
748
748
ret = rspi_wait_for_rx_full(rspi);
749
749
if (ret < 0) {
750
-
dev_err(&rspi->master->dev, "receive timeout\n");
750
+
dev_err(&rspi->ctlr->dev, "receive timeout\n");
751
751
return ret;
752
752
}
753
753
for (i = 0; i < n; i++)
754
754
*rx++ = rspi_read_data(rspi);
755
755
} else {
756
756
ret = rspi_pio_transfer(rspi, tx, rx, n);
757
757
if (ret < 0)
758
758
return ret;
759
759
}
760
760
len -= n;
778
778
xfer->rx_buf, xfer->len);
779
779
}
780
780
781
781
static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
782
782
{
783
783
const u8 *tx = xfer->tx_buf;
784
784
unsigned int n = xfer->len;
785
785
unsigned int i, len;
786
786
int ret;
787
787
788
-
if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
788
+
if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) {
789
789
ret = rspi_dma_transfer(rspi, &xfer->tx_sg, NULL);
790
790
if (ret != -EAGAIN)
791
791
return ret;
792
792
}
793
793
794
794
while (n > 0) {
795
795
len = qspi_set_send_trigger(rspi, n);
796
796
if (len == QSPI_BUFFER_SIZE) {
797
797
ret = rspi_wait_for_tx_empty(rspi);
798
798
if (ret < 0) {
799
-
dev_err(&rspi->master->dev, "transmit timeout\n");
799
+
dev_err(&rspi->ctlr->dev, "transmit timeout\n");
800
800
return ret;
801
801
}
802
802
for (i = 0; i < len; i++)
803
803
rspi_write_data(rspi, *tx++);
804
804
} else {
805
805
ret = rspi_pio_transfer(rspi, tx, NULL, len);
806
806
if (ret < 0)
807
807
return ret;
808
808
}
809
809
n -= len;
815
815
return 0;
816
816
}
817
817
818
818
static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
819
819
{
820
820
u8 *rx = xfer->rx_buf;
821
821
unsigned int n = xfer->len;
822
822
unsigned int i, len;
823
823
int ret;
824
824
825
-
if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
825
+
if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) {
826
826
int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
827
827
if (ret != -EAGAIN)
828
828
return ret;
829
829
}
830
830
831
831
while (n > 0) {
832
832
len = qspi_set_receive_trigger(rspi, n);
833
833
if (len == QSPI_BUFFER_SIZE) {
834
834
ret = rspi_wait_for_rx_full(rspi);
835
835
if (ret < 0) {
836
-
dev_err(&rspi->master->dev, "receive timeout\n");
836
+
dev_err(&rspi->ctlr->dev, "receive timeout\n");
837
837
return ret;
838
838
}
839
839
for (i = 0; i < len; i++)
840
840
*rx++ = rspi_read_data(rspi);
841
841
} else {
842
842
ret = rspi_pio_transfer(rspi, NULL, rx, len);
843
843
if (ret < 0)
844
844
return ret;
845
845
}
846
846
n -= len;
847
847
}
848
848
849
849
return 0;
850
850
}
851
851
852
-
static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
853
-
struct spi_transfer *xfer)
852
+
static int qspi_transfer_one(struct spi_controller *ctlr,
853
+
struct spi_device *spi, struct spi_transfer *xfer)
854
854
{
855
-
struct rspi_data *rspi = spi_master_get_devdata(master);
855
+
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
856
856
857
857
if (spi->mode & SPI_LOOP) {
858
858
return qspi_transfer_out_in(rspi, xfer);
859
859
} else if (xfer->tx_nbits > SPI_NBITS_SINGLE) {
860
860
/* Quad or Dual SPI Write */
861
861
return qspi_transfer_out(rspi, xfer);
862
862
} else if (xfer->rx_nbits > SPI_NBITS_SINGLE) {
863
863
/* Quad or Dual SPI Read */
864
864
return qspi_transfer_in(rspi, xfer);
865
865
} else {
866
866
/* Single SPI Transfer */
867
867
return qspi_transfer_out_in(rspi, xfer);
868
868
}
869
869
}
870
870
871
871
static int rspi_setup(struct spi_device *spi)
872
872
{
873
-
struct rspi_data *rspi = spi_master_get_devdata(spi->master);
873
+
struct rspi_data *rspi = spi_controller_get_devdata(spi->controller);
874
874
875
875
rspi->max_speed_hz = spi->max_speed_hz;
876
876
877
877
rspi->spcmd = SPCMD_SSLKP;
878
878
if (spi->mode & SPI_CPOL)
879
879
rspi->spcmd |= SPCMD_CPOL;
880
880
if (spi->mode & SPI_CPHA)
881
881
rspi->spcmd |= SPCMD_CPHA;
882
882
883
883
/* CMOS output mode and MOSI signal from previous transfer */
948
948
}
949
949
if (i) {
950
950
/* Set final transfer data length and sequence length */
951
951
rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
952
952
rspi_write8(rspi, i - 1, RSPI_SPSCR);
953
953
}
954
954
955
955
return 0;
956
956
}
957
957
958
-
static int rspi_prepare_message(struct spi_master *master,
958
+
static int rspi_prepare_message(struct spi_controller *ctlr,
959
959
struct spi_message *msg)
960
960
{
961
-
struct rspi_data *rspi = spi_master_get_devdata(master);
961
+
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
962
962
int ret;
963
963
964
964
if (msg->spi->mode &
965
965
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) {
966
966
/* Setup sequencer for messages with multiple transfer modes */
967
967
ret = qspi_setup_sequencer(rspi, msg);
968
968
if (ret < 0)
969
969
return ret;
970
970
}
971
971
972
972
/* Enable SPI function in master mode */
973
973
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
974
974
return 0;
975
975
}
976
976
977
-
static int rspi_unprepare_message(struct spi_master *master,
977
+
static int rspi_unprepare_message(struct spi_controller *ctlr,
978
978
struct spi_message *msg)
979
979
{
980
-
struct rspi_data *rspi = spi_master_get_devdata(master);
980
+
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
981
981
982
982
/* Disable SPI function */
983
983
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
984
984
985
985
/* Reset sequencer for Single SPI Transfers */
986
986
rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
987
987
rspi_write8(rspi, 0, RSPI_SPSCR);
988
988
return 0;
989
989
}
990
990
1074
1074
ret = dmaengine_slave_config(chan, &cfg);
1075
1075
if (ret) {
1076
1076
dev_warn(dev, "dmaengine_slave_config failed %d\n", ret);
1077
1077
dma_release_channel(chan);
1078
1078
return NULL;
1079
1079
}
1080
1080
1081
1081
return chan;
1082
1082
}
1083
1083
1084
-
static int rspi_request_dma(struct device *dev, struct spi_master *master,
1084
+
static int rspi_request_dma(struct device *dev, struct spi_controller *ctlr,
1085
1085
const struct resource *res)
1086
1086
{
1087
1087
const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev);
1088
1088
unsigned int dma_tx_id, dma_rx_id;
1089
1089
1090
1090
if (dev->of_node) {
1091
1091
/* In the OF case we will get the slave IDs from the DT */
1092
1092
dma_tx_id = 0;
1093
1093
dma_rx_id = 0;
1094
1094
} else if (rspi_pd && rspi_pd->dma_tx_id && rspi_pd->dma_rx_id) {
1095
1095
dma_tx_id = rspi_pd->dma_tx_id;
1096
1096
dma_rx_id = rspi_pd->dma_rx_id;
1097
1097
} else {
1098
1098
/* The driver assumes no error. */
1099
1099
return 0;
1100
1100
}
1101
1101
1102
-
master->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
1103
-
res->start + RSPI_SPDR);
1104
-
if (!master->dma_tx)
1102
+
ctlr->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
1103
+
res->start + RSPI_SPDR);
1104
+
if (!ctlr->dma_tx)
1105
1105
return -ENODEV;
1106
1106
1107
-
master->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
1108
-
res->start + RSPI_SPDR);
1109
-
if (!master->dma_rx) {
1110
-
dma_release_channel(master->dma_tx);
1111
-
master->dma_tx = NULL;
1107
+
ctlr->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
1108
+
res->start + RSPI_SPDR);
1109
+
if (!ctlr->dma_rx) {
1110
+
dma_release_channel(ctlr->dma_tx);
1111
+
ctlr->dma_tx = NULL;
1112
1112
return -ENODEV;
1113
1113
}
1114
1114
1115
-
master->can_dma = rspi_can_dma;
1115
+
ctlr->can_dma = rspi_can_dma;
1116
1116
dev_info(dev, "DMA available");
1117
1117
return 0;
1118
1118
}
1119
1119
1120
-
static void rspi_release_dma(struct spi_master *master)
1120
+
static void rspi_release_dma(struct spi_controller *ctlr)
1121
1121
{
1122
-
if (master->dma_tx)
1123
-
dma_release_channel(master->dma_tx);
1124
-
if (master->dma_rx)
1125
-
dma_release_channel(master->dma_rx);
1122
+
if (ctlr->dma_tx)
1123
+
dma_release_channel(ctlr->dma_tx);
1124
+
if (ctlr->dma_rx)
1125
+
dma_release_channel(ctlr->dma_rx);
1126
1126
}
1127
1127
1128
1128
static int rspi_remove(struct platform_device *pdev)
1129
1129
{
1130
1130
struct rspi_data *rspi = platform_get_drvdata(pdev);
1131
1131
1132
-
rspi_release_dma(rspi->master);
1132
+
rspi_release_dma(rspi->ctlr);
1133
1133
pm_runtime_disable(&pdev->dev);
1134
1134
1135
1135
return 0;
1136
1136
}
1137
1137
1138
1138
static const struct spi_ops rspi_ops = {
1139
1139
.set_config_register = rspi_set_config_register,
1140
1140
.transfer_one = rspi_transfer_one,
1141
1141
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
1142
-
.flags = SPI_MASTER_MUST_TX,
1142
+
.flags = SPI_CONTROLLER_MUST_TX,
1143
1143
.fifo_size = 8,
1144
1144
};
1145
1145
1146
1146
static const struct spi_ops rspi_rz_ops = {
1147
1147
.set_config_register = rspi_rz_set_config_register,
1148
1148
.transfer_one = rspi_rz_transfer_one,
1149
1149
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
1150
-
.flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
1150
+
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
1151
1151
.fifo_size = 8, /* 8 for TX, 32 for RX */
1152
1152
};
1153
1153
1154
1154
static const struct spi_ops qspi_ops = {
1155
1155
.set_config_register = qspi_set_config_register,
1156
1156
.transfer_one = qspi_transfer_one,
1157
1157
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP |
1158
1158
SPI_TX_DUAL | SPI_TX_QUAD |
1159
1159
SPI_RX_DUAL | SPI_RX_QUAD,
1160
-
.flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
1160
+
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
1161
1161
.fifo_size = 32,
1162
1162
};
1163
1163
1164
1164
#ifdef CONFIG_OF
1165
1165
static const struct of_device_id rspi_of_match[] = {
1166
1166
/* RSPI on legacy SH */
1167
1167
{ .compatible = "renesas,rspi", .data = &rspi_ops },
1168
1168
/* RSPI on RZ/A1H */
1169
1169
{ .compatible = "renesas,rspi-rz", .data = &rspi_rz_ops },
1170
1170
/* QSPI on R-Car Gen2 */
1171
1171
{ .compatible = "renesas,qspi", .data = &qspi_ops },
1172
1172
{ /* sentinel */ }
1173
1173
};
1174
1174
1175
1175
MODULE_DEVICE_TABLE(of, rspi_of_match);
1176
1176
1177
-
static int rspi_parse_dt(struct device *dev, struct spi_master *master)
1177
+
static int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
1178
1178
{
1179
1179
u32 num_cs;
1180
1180
int error;
1181
1181
1182
1182
/* Parse DT properties */
1183
1183
error = of_property_read_u32(dev->of_node, "num-cs", &num_cs);
1184
1184
if (error) {
1185
1185
dev_err(dev, "of_property_read_u32 num-cs failed %d\n", error);
1186
1186
return error;
1187
1187
}
1188
1188
1189
-
master->num_chipselect = num_cs;
1189
+
ctlr->num_chipselect = num_cs;
1190
1190
return 0;
1191
1191
}
1192
1192
#else
1193
1193
#define rspi_of_match NULL
1194
-
static inline int rspi_parse_dt(struct device *dev, struct spi_master *master)
1194
+
static inline int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
1195
1195
{
1196
1196
return -EINVAL;
1197
1197
}
1198
1198
#endif /* CONFIG_OF */
1199
1199
1200
1200
static int rspi_request_irq(struct device *dev, unsigned int irq,
1201
1201
irq_handler_t handler, const char *suffix,
1202
1202
void *dev_id)
1203
1203
{
1204
1204
const char *name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s",
1205
1205
dev_name(dev), suffix);
1206
1206
if (!name)
1207
1207
return -ENOMEM;
1208
1208
1209
1209
return devm_request_irq(dev, irq, handler, 0, name, dev_id);
1210
1210
}
1211
1211
1212
1212
static int rspi_probe(struct platform_device *pdev)
1213
1213
{
1214
1214
struct resource *res;
1215
-
struct spi_master *master;
1215
+
struct spi_controller *ctlr;
1216
1216
struct rspi_data *rspi;
1217
1217
int ret;
1218
1218
const struct rspi_plat_data *rspi_pd;
1219
1219
const struct spi_ops *ops;
1220
1220
1221
-
master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
1222
-
if (master == NULL)
1221
+
ctlr = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
1222
+
if (ctlr == NULL)
1223
1223
return -ENOMEM;
1224
1224
1225
1225
ops = of_device_get_match_data(&pdev->dev);
1226
1226
if (ops) {
1227
-
ret = rspi_parse_dt(&pdev->dev, master);
1227
+
ret = rspi_parse_dt(&pdev->dev, ctlr);
1228
1228
if (ret)
1229
1229
goto error1;
1230
1230
} else {
1231
1231
ops = (struct spi_ops *)pdev->id_entry->driver_data;
1232
1232
rspi_pd = dev_get_platdata(&pdev->dev);
1233
1233
if (rspi_pd && rspi_pd->num_chipselect)
1234
-
master->num_chipselect = rspi_pd->num_chipselect;
1234
+
ctlr->num_chipselect = rspi_pd->num_chipselect;
1235
1235
else
1236
-
master->num_chipselect = 2; /* default */
1236
+
ctlr->num_chipselect = 2; /* default */
1237
1237
}
1238
1238
1239
1239
/* ops parameter check */
1240
1240
if (!ops->set_config_register) {
1241
1241
dev_err(&pdev->dev, "there is no set_config_register\n");
1242
1242
ret = -ENODEV;
1243
1243
goto error1;
1244
1244
}
1245
1245
1246
-
rspi = spi_master_get_devdata(master);
1246
+
rspi = spi_controller_get_devdata(ctlr);
1247
1247
platform_set_drvdata(pdev, rspi);
1248
1248
rspi->ops = ops;
1249
-
rspi->master = master;
1249
+
rspi->ctlr = ctlr;
1250
1250
1251
1251
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1252
1252
rspi->addr = devm_ioremap_resource(&pdev->dev, res);
1253
1253
if (IS_ERR(rspi->addr)) {
1254
1254
ret = PTR_ERR(rspi->addr);
1255
1255
goto error1;
1256
1256
}
1257
1257
1258
1258
rspi->clk = devm_clk_get(&pdev->dev, NULL);
1259
1259
if (IS_ERR(rspi->clk)) {
1260
1260
dev_err(&pdev->dev, "cannot get clock\n");
1261
1261
ret = PTR_ERR(rspi->clk);
1262
1262
goto error1;
1263
1263
}
1264
1264
1265
1265
pm_runtime_enable(&pdev->dev);
1266
1266
1267
1267
init_waitqueue_head(&rspi->wait);
1268
1268
1269
-
master->bus_num = pdev->id;
1270
-
master->setup = rspi_setup;
1271
-
master->auto_runtime_pm = true;
1272
-
master->transfer_one = ops->transfer_one;
1273
-
master->prepare_message = rspi_prepare_message;
1274
-
master->unprepare_message = rspi_unprepare_message;
1275
-
master->mode_bits = ops->mode_bits;
1276
-
master->flags = ops->flags;
1277
-
master->dev.of_node = pdev->dev.of_node;
1269
+
ctlr->bus_num = pdev->id;
1270
+
ctlr->setup = rspi_setup;
1271
+
ctlr->auto_runtime_pm = true;
1272
+
ctlr->transfer_one = ops->transfer_one;
1273
+
ctlr->prepare_message = rspi_prepare_message;
1274
+
ctlr->unprepare_message = rspi_unprepare_message;
1275
+
ctlr->mode_bits = ops->mode_bits;
1276
+
ctlr->flags = ops->flags;
1277
+
ctlr->dev.of_node = pdev->dev.of_node;
1278
1278
1279
1279
ret = platform_get_irq_byname(pdev, "rx");
1280
1280
if (ret < 0) {
1281
1281
ret = platform_get_irq_byname(pdev, "mux");
1282
1282
if (ret < 0)
1283
1283
ret = platform_get_irq(pdev, 0);
1284
1284
if (ret >= 0)
1285
1285
rspi->rx_irq = rspi->tx_irq = ret;
1286
1286
} else {
1287
1287
rspi->rx_irq = ret;
1304
1304
"rx", rspi);
1305
1305
if (!ret)
1306
1306
ret = rspi_request_irq(&pdev->dev, rspi->tx_irq,
1307
1307
rspi_irq_tx, "tx", rspi);
1308
1308
}
1309
1309
if (ret < 0) {
1310
1310
dev_err(&pdev->dev, "request_irq error\n");
1311
1311
goto error2;
1312
1312
}
1313
1313
1314
-
ret = rspi_request_dma(&pdev->dev, master, res);
1314
+
ret = rspi_request_dma(&pdev->dev, ctlr, res);
1315
1315
if (ret < 0)
1316
1316
dev_warn(&pdev->dev, "DMA not available, using PIO\n");
1317
1317
1318
-
ret = devm_spi_register_master(&pdev->dev, master);
1318
+
ret = devm_spi_register_controller(&pdev->dev, ctlr);
1319
1319
if (ret < 0) {
1320
-
dev_err(&pdev->dev, "spi_register_master error.\n");
1320
+
dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
1321
1321
goto error3;
1322
1322
}
1323
1323
1324
1324
dev_info(&pdev->dev, "probed\n");
1325
1325
1326
1326
return 0;
1327
1327
1328
1328
error3:
1329
-
rspi_release_dma(master);
1329
+
rspi_release_dma(ctlr);
1330
1330
error2:
1331
1331
pm_runtime_disable(&pdev->dev);
1332
1332
error1:
1333
-
spi_master_put(master);
1333
+
spi_controller_put(ctlr);
1334
1334
1335
1335
return ret;
1336
1336
}
1337
1337
1338
1338
static const struct platform_device_id spi_driver_ids[] = {
1339
1339
{ "rspi", (kernel_ulong_t)&rspi_ops },
1340
1340
{ "rspi-rz", (kernel_ulong_t)&rspi_rz_ops },
1341
1341
{ "qspi", (kernel_ulong_t)&qspi_ops },
1342
1342
{},
1343
1343
};
1344
1344
1345
1345
MODULE_DEVICE_TABLE(platform, spi_driver_ids);
1346
1346
1347
1347
#ifdef CONFIG_PM_SLEEP
1348
1348
static int rspi_suspend(struct device *dev)
1349
1349
{
1350
1350
struct rspi_data *rspi = dev_get_drvdata(dev);
1351
1351
1352
-
return spi_master_suspend(rspi->master);
1352
+
return spi_controller_suspend(rspi->ctlr);
1353
1353
}
1354
1354
1355
1355
static int rspi_resume(struct device *dev)
1356
1356
{
1357
1357
struct rspi_data *rspi = dev_get_drvdata(dev);
1358
1358
1359
-
return spi_master_resume(rspi->master);
1359
+
return spi_controller_resume(rspi->ctlr);
1360
1360
}
1361
1361
1362
1362
static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
1363
1363
#define DEV_PM_OPS &rspi_pm_ops
1364
1364
#else
1365
1365
#define DEV_PM_OPS NULL
1366
1366
#endif /* CONFIG_PM_SLEEP */
1367
1367
1368
1368
static struct platform_driver rspi_driver = {
1369
1369
.probe = rspi_probe,