Source
82
82
unsigned int tx_rem_bytes;
83
83
unsigned int rx_rem_bytes;
84
84
const struct spi_transfer *cur_xfer;
85
85
struct completion xfer_done;
86
86
unsigned int oversampling;
87
87
spinlock_t lock;
88
88
enum spi_m_cmd_opcode cur_mcmd;
89
89
int irq;
90
90
};
91
91
92
-
static void handle_fifo_timeout(struct spi_master *spi,
93
-
struct spi_message *msg);
94
-
95
92
static int get_spi_clk_cfg(unsigned int speed_hz,
96
93
struct spi_geni_master *mas,
97
94
unsigned int *clk_idx,
98
95
unsigned int *clk_div)
99
96
{
100
97
unsigned long sclk_freq;
101
98
unsigned int actual_hz;
102
99
struct geni_se *se = &mas->se;
103
100
int ret;
104
101
115
112
actual_hz = sclk_freq / (mas->oversampling * *clk_div);
116
113
117
114
dev_dbg(mas->dev, "req %u=>%u sclk %lu, idx %d, div %d\n", speed_hz,
118
115
actual_hz, sclk_freq, *clk_idx, *clk_div);
119
116
ret = clk_set_rate(se->clk, sclk_freq);
120
117
if (ret)
121
118
dev_err(mas->dev, "clk_set_rate failed %d\n", ret);
122
119
return ret;
123
120
}
124
121
122
+
static void handle_fifo_timeout(struct spi_master *spi,
123
+
struct spi_message *msg)
124
+
{
125
+
struct spi_geni_master *mas = spi_master_get_devdata(spi);
126
+
unsigned long time_left, flags;
127
+
struct geni_se *se = &mas->se;
128
+
129
+
spin_lock_irqsave(&mas->lock, flags);
130
+
reinit_completion(&mas->xfer_done);
131
+
mas->cur_mcmd = CMD_CANCEL;
132
+
geni_se_cancel_m_cmd(se);
133
+
writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
134
+
spin_unlock_irqrestore(&mas->lock, flags);
135
+
time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
136
+
if (time_left)
137
+
return;
138
+
139
+
spin_lock_irqsave(&mas->lock, flags);
140
+
reinit_completion(&mas->xfer_done);
141
+
geni_se_abort_m_cmd(se);
142
+
spin_unlock_irqrestore(&mas->lock, flags);
143
+
time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
144
+
if (!time_left)
145
+
dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
146
+
}
147
+
125
148
static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
126
149
{
127
150
struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
128
151
struct spi_master *spi = dev_get_drvdata(mas->dev);
129
152
struct geni_se *se = &mas->se;
130
153
unsigned long time_left;
131
154
132
155
reinit_completion(&mas->xfer_done);
133
156
pm_runtime_get_sync(mas->dev);
134
157
if (!(slv->mode & SPI_CS_HIGH))
349
372
350
373
/*
351
374
* TX_WATERMARK_REG should be set after SPI configuration and
352
375
* setting up GENI SE engine, as driver starts data transfer
353
376
* for the watermark interrupt.
354
377
*/
355
378
if (m_cmd & SPI_TX_ONLY)
356
379
writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
357
380
}
358
381
359
-
static void handle_fifo_timeout(struct spi_master *spi,
360
-
struct spi_message *msg)
361
-
{
362
-
struct spi_geni_master *mas = spi_master_get_devdata(spi);
363
-
unsigned long time_left, flags;
364
-
struct geni_se *se = &mas->se;
365
-
366
-
spin_lock_irqsave(&mas->lock, flags);
367
-
reinit_completion(&mas->xfer_done);
368
-
mas->cur_mcmd = CMD_CANCEL;
369
-
geni_se_cancel_m_cmd(se);
370
-
writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
371
-
spin_unlock_irqrestore(&mas->lock, flags);
372
-
time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
373
-
if (time_left)
374
-
return;
375
-
376
-
spin_lock_irqsave(&mas->lock, flags);
377
-
reinit_completion(&mas->xfer_done);
378
-
geni_se_abort_m_cmd(se);
379
-
spin_unlock_irqrestore(&mas->lock, flags);
380
-
time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
381
-
if (!time_left)
382
-
dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
383
-
}
384
-
385
382
static int spi_geni_transfer_one(struct spi_master *spi,
386
383
struct spi_device *slv,
387
384
struct spi_transfer *xfer)
388
385
{
389
386
struct spi_geni_master *mas = spi_master_get_devdata(spi);
390
387
391
388
/* Terminate and return success for 0 byte length transfer */
392
389
if (!xfer->len)
393
390
return 0;
394
391