blob: b45ef529a50566cf3138ce9b8103881dc1281052 [file] [log] [blame]
Weijie Gaob34a2362022-09-09 19:59:45 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2022 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: SkyLake.Huang <skylake.huang@mediatek.com>
6 */
7
8#include <clk.h>
9#include <cpu_func.h>
10#include <div64.h>
11#include <dm.h>
12#include <spi.h>
13#include <spi-mem.h>
14#include <stdbool.h>
15#include <watchdog.h>
16#include <dm/device.h>
17#include <dm/device_compat.h>
18#include <dm/devres.h>
19#include <dm/pinctrl.h>
20#include <linux/bitops.h>
21#include <linux/completion.h>
22#include <linux/dma-mapping.h>
23#include <linux/io.h>
24#include <linux/iopoll.h>
25
26#define SPI_CFG0_REG 0x0000
27#define SPI_CFG1_REG 0x0004
28#define SPI_TX_SRC_REG 0x0008
29#define SPI_RX_DST_REG 0x000c
30#define SPI_TX_DATA_REG 0x0010
31#define SPI_RX_DATA_REG 0x0014
32#define SPI_CMD_REG 0x0018
33#define SPI_IRQ_REG 0x001c
34#define SPI_STATUS_REG 0x0020
35#define SPI_PAD_SEL_REG 0x0024
36#define SPI_CFG2_REG 0x0028
37#define SPI_TX_SRC_REG_64 0x002c
38#define SPI_RX_DST_REG_64 0x0030
39#define SPI_CFG3_IPM_REG 0x0040
40
41#define SPI_CFG0_SCK_HIGH_OFFSET 0
42#define SPI_CFG0_SCK_LOW_OFFSET 8
43#define SPI_CFG0_CS_HOLD_OFFSET 16
44#define SPI_CFG0_CS_SETUP_OFFSET 24
45#define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0
46#define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16
47
48#define SPI_CFG1_CS_IDLE_OFFSET 0
49#define SPI_CFG1_PACKET_LOOP_OFFSET 8
50#define SPI_CFG1_PACKET_LENGTH_OFFSET 16
51#define SPI_CFG1_GET_TICKDLY_OFFSET 29
52
53#define SPI_CFG1_GET_TICKDLY_MASK GENMASK(31, 29)
54#define SPI_CFG1_CS_IDLE_MASK 0xff
55#define SPI_CFG1_PACKET_LOOP_MASK 0xff00
56#define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
57#define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16)
58#define SPI_CFG2_SCK_HIGH_OFFSET 0
59#define SPI_CFG2_SCK_LOW_OFFSET 16
60#define SPI_CFG2_SCK_HIGH_MASK GENMASK(15, 0)
61#define SPI_CFG2_SCK_LOW_MASK GENMASK(31, 16)
62
63#define SPI_CMD_ACT BIT(0)
64#define SPI_CMD_RESUME BIT(1)
65#define SPI_CMD_RST BIT(2)
66#define SPI_CMD_PAUSE_EN BIT(4)
67#define SPI_CMD_DEASSERT BIT(5)
68#define SPI_CMD_SAMPLE_SEL BIT(6)
69#define SPI_CMD_CS_POL BIT(7)
70#define SPI_CMD_CPHA BIT(8)
71#define SPI_CMD_CPOL BIT(9)
72#define SPI_CMD_RX_DMA BIT(10)
73#define SPI_CMD_TX_DMA BIT(11)
74#define SPI_CMD_TXMSBF BIT(12)
75#define SPI_CMD_RXMSBF BIT(13)
76#define SPI_CMD_RX_ENDIAN BIT(14)
77#define SPI_CMD_TX_ENDIAN BIT(15)
78#define SPI_CMD_FINISH_IE BIT(16)
79#define SPI_CMD_PAUSE_IE BIT(17)
80#define SPI_CMD_IPM_NONIDLE_MODE BIT(19)
81#define SPI_CMD_IPM_SPIM_LOOP BIT(21)
82#define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22
83
84#define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22)
85
86#define PIN_MODE_CFG(x) ((x) / 2)
87
88#define SPI_CFG3_IPM_PIN_MODE_OFFSET 0
89#define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
90#define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
91#define SPI_CFG3_IPM_XMODE_EN BIT(4)
92#define SPI_CFG3_IPM_NODATA_FLAG BIT(5)
93#define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET 8
94#define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12
95#define SPI_CFG3_IPM_DUMMY_BYTELEN_OFFSET 16
96
97#define SPI_CFG3_IPM_CMD_PIN_MODE_MASK GENMASK(1, 0)
98#define SPI_CFG3_IPM_CMD_BYTELEN_MASK GENMASK(11, 8)
99#define SPI_CFG3_IPM_ADDR_BYTELEN_MASK GENMASK(15, 12)
100#define SPI_CFG3_IPM_DUMMY_BYTELEN_MASK GENMASK(19, 16)
101
102#define MT8173_SPI_MAX_PAD_SEL 3
103
104#define MTK_SPI_PAUSE_INT_STATUS 0x2
105
106#define MTK_SPI_IDLE 0
107#define MTK_SPI_PAUSED 1
108
109#define MTK_SPI_MAX_FIFO_SIZE 32U
110#define MTK_SPI_PACKET_SIZE 1024
111#define MTK_SPI_IPM_PACKET_SIZE SZ_64K
112#define MTK_SPI_IPM_PACKET_LOOP SZ_256
113
114#define MTK_SPI_32BITS_MASK 0xffffffff
115
116#define DMA_ADDR_EXT_BITS 36
117#define DMA_ADDR_DEF_BITS 32
118
119#define CLK_TO_US(freq, clkcnt) DIV_ROUND_UP((clkcnt), (freq) / 1000000)
120
121/* struct mtk_spim_capability
122 * @enhance_timing: Some IC design adjust cfg register to enhance time accuracy
123 * @dma_ext: Some IC support DMA addr extension
124 * @ipm_design: The IPM IP design improves some features, and supports dual/quad mode
125 * @support_quad: Whether quad mode is supported
126 */
127struct mtk_spim_capability {
128 bool enhance_timing;
129 bool dma_ext;
130 bool ipm_design;
131 bool support_quad;
132};
133
134/* struct mtk_spim_priv
135 * @base: Base address of the spi controller
136 * @state: Controller state
137 * @sel_clk: Pad clock
138 * @spi_clk: Core clock
139 * @xfer_len: Current length of data for transfer
140 * @hw_cap: Controller capabilities
141 * @tick_dly: Used to postpone SPI sampling time
142 * @sample_sel: Sample edge of MISO
143 * @dev: udevice of this spi controller
144 * @tx_dma: Tx DMA address
145 * @rx_dma: Rx DMA address
146 */
147struct mtk_spim_priv {
148 void __iomem *base;
149 u32 state;
150 struct clk sel_clk, spi_clk;
151 u32 xfer_len;
152 struct mtk_spim_capability hw_cap;
153 u32 tick_dly;
154 u32 sample_sel;
155
156 struct device *dev;
157 dma_addr_t tx_dma;
158 dma_addr_t rx_dma;
159};
160
161static void mtk_spim_reset(struct mtk_spim_priv *priv)
162{
163 /* set the software reset bit in SPI_CMD_REG. */
164 setbits_le32(priv->base + SPI_CMD_REG, SPI_CMD_RST);
165 clrbits_le32(priv->base + SPI_CMD_REG, SPI_CMD_RST);
166}
167
168static int mtk_spim_hw_init(struct spi_slave *slave)
169{
170 struct udevice *bus = dev_get_parent(slave->dev);
171 struct mtk_spim_priv *priv = dev_get_priv(bus);
172 u16 cpha, cpol;
173 u32 reg_val;
174
175 cpha = slave->mode & SPI_CPHA ? 1 : 0;
176 cpol = slave->mode & SPI_CPOL ? 1 : 0;
177
178 if (priv->hw_cap.enhance_timing) {
179 if (priv->hw_cap.ipm_design) {
180 /* CFG3 reg only used for spi-mem,
181 * here write to default value
182 */
183 writel(0x0, priv->base + SPI_CFG3_IPM_REG);
184 clrsetbits_le32(priv->base + SPI_CMD_REG,
185 SPI_CMD_IPM_GET_TICKDLY_MASK,
186 priv->tick_dly <<
187 SPI_CMD_IPM_GET_TICKDLY_OFFSET);
188 } else {
189 clrsetbits_le32(priv->base + SPI_CFG1_REG,
190 SPI_CFG1_GET_TICKDLY_MASK,
191 priv->tick_dly <<
192 SPI_CFG1_GET_TICKDLY_OFFSET);
193 }
194 }
195
196 reg_val = readl(priv->base + SPI_CMD_REG);
197 if (priv->hw_cap.ipm_design) {
198 /* SPI transfer without idle time until packet length done */
199 reg_val |= SPI_CMD_IPM_NONIDLE_MODE;
200 if (slave->mode & SPI_LOOP)
201 reg_val |= SPI_CMD_IPM_SPIM_LOOP;
202 else
203 reg_val &= ~SPI_CMD_IPM_SPIM_LOOP;
204 }
205
206 if (cpha)
207 reg_val |= SPI_CMD_CPHA;
208 else
209 reg_val &= ~SPI_CMD_CPHA;
210 if (cpol)
211 reg_val |= SPI_CMD_CPOL;
212 else
213 reg_val &= ~SPI_CMD_CPOL;
214
215 /* set the mlsbx and mlsbtx */
216 if (slave->mode & SPI_LSB_FIRST) {
217 reg_val &= ~SPI_CMD_TXMSBF;
218 reg_val &= ~SPI_CMD_RXMSBF;
219 } else {
220 reg_val |= SPI_CMD_TXMSBF;
221 reg_val |= SPI_CMD_RXMSBF;
222 }
223
224 /* do not reverse tx/rx endian */
225 reg_val &= ~SPI_CMD_TX_ENDIAN;
226 reg_val &= ~SPI_CMD_RX_ENDIAN;
227
228 if (priv->hw_cap.enhance_timing) {
229 /* set CS polarity */
230 if (slave->mode & SPI_CS_HIGH)
231 reg_val |= SPI_CMD_CS_POL;
232 else
233 reg_val &= ~SPI_CMD_CS_POL;
234
235 if (priv->sample_sel)
236 reg_val |= SPI_CMD_SAMPLE_SEL;
237 else
238 reg_val &= ~SPI_CMD_SAMPLE_SEL;
239 }
240
241 /* disable dma mode */
242 reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA);
243
244 /* disable deassert mode */
245 reg_val &= ~SPI_CMD_DEASSERT;
246
247 writel(reg_val, priv->base + SPI_CMD_REG);
248
249 return 0;
250}
251
252static void mtk_spim_prepare_transfer(struct mtk_spim_priv *priv,
253 u32 speed_hz)
254{
255 u32 spi_clk_hz, div, sck_time, cs_time, reg_val;
256
257 spi_clk_hz = clk_get_rate(&priv->spi_clk);
258 if (speed_hz <= spi_clk_hz / 4)
259 div = DIV_ROUND_UP(spi_clk_hz, speed_hz);
260 else
261 div = 4;
262
263 sck_time = (div + 1) / 2;
264 cs_time = sck_time * 2;
265
266 if (priv->hw_cap.enhance_timing) {
267 reg_val = ((sck_time - 1) & 0xffff)
268 << SPI_CFG2_SCK_HIGH_OFFSET;
269 reg_val |= ((sck_time - 1) & 0xffff)
270 << SPI_CFG2_SCK_LOW_OFFSET;
271 writel(reg_val, priv->base + SPI_CFG2_REG);
272
273 reg_val = ((cs_time - 1) & 0xffff)
274 << SPI_ADJUST_CFG0_CS_HOLD_OFFSET;
275 reg_val |= ((cs_time - 1) & 0xffff)
276 << SPI_ADJUST_CFG0_CS_SETUP_OFFSET;
277 writel(reg_val, priv->base + SPI_CFG0_REG);
278 } else {
279 reg_val = ((sck_time - 1) & 0xff)
280 << SPI_CFG0_SCK_HIGH_OFFSET;
281 reg_val |= ((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET;
282 reg_val |= ((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET;
283 reg_val |= ((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET;
284 writel(reg_val, priv->base + SPI_CFG0_REG);
285 }
286
287 reg_val = readl(priv->base + SPI_CFG1_REG);
288 reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
289 reg_val |= ((cs_time - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET;
290 writel(reg_val, priv->base + SPI_CFG1_REG);
291}
292
293/**
294 * mtk_spim_setup_packet() - setup packet format.
295 * @priv: controller priv
296 *
297 * This controller sents/receives data in packets. The packet size is
298 * configurable.
299 *
300 * This function calculates the maximum packet size available for current
301 * data, and calculates the number of packets required to sent/receive data
302 * as much as possible.
303 */
304static void mtk_spim_setup_packet(struct mtk_spim_priv *priv)
305{
306 u32 packet_size, packet_loop, reg_val;
307
308 /* Calculate maximum packet size */
309 if (priv->hw_cap.ipm_design)
310 packet_size = min_t(u32,
311 priv->xfer_len,
312 MTK_SPI_IPM_PACKET_SIZE);
313 else
314 packet_size = min_t(u32,
315 priv->xfer_len,
316 MTK_SPI_PACKET_SIZE);
317
318 /* Calculates number of packets to sent/receive */
319 packet_loop = priv->xfer_len / packet_size;
320
321 reg_val = readl(priv->base + SPI_CFG1_REG);
322 if (priv->hw_cap.ipm_design)
323 reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK;
324 else
325 reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK;
326
327 reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
328
329 reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK;
330
331 reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
332
333 writel(reg_val, priv->base + SPI_CFG1_REG);
334}
335
336static void mtk_spim_enable_transfer(struct mtk_spim_priv *priv)
337{
338 u32 cmd;
339
340 cmd = readl(priv->base + SPI_CMD_REG);
341 if (priv->state == MTK_SPI_IDLE)
342 cmd |= SPI_CMD_ACT;
343 else
344 cmd |= SPI_CMD_RESUME;
345 writel(cmd, priv->base + SPI_CMD_REG);
346}
347
348static bool mtk_spim_supports_op(struct spi_slave *slave,
349 const struct spi_mem_op *op)
350{
351 struct udevice *bus = dev_get_parent(slave->dev);
352 struct mtk_spim_priv *priv = dev_get_priv(bus);
353
354 if (op->cmd.buswidth == 0 || op->cmd.buswidth > 4 ||
355 op->addr.buswidth > 4 || op->dummy.buswidth > 4 ||
356 op->data.buswidth > 4)
357 return false;
358
359 if (!priv->hw_cap.support_quad && (op->cmd.buswidth > 2 ||
360 op->addr.buswidth > 2 || op->dummy.buswidth > 2 ||
361 op->data.buswidth > 2))
362 return false;
363
364 if (op->addr.nbytes && op->dummy.nbytes &&
365 op->addr.buswidth != op->dummy.buswidth)
366 return false;
367
368 if (op->addr.nbytes + op->dummy.nbytes > 16)
369 return false;
370
371 if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
372 if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE >
373 MTK_SPI_IPM_PACKET_LOOP ||
374 op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0)
375 return false;
376 }
377
378 return true;
379}
380
381static void mtk_spim_setup_dma_xfer(struct mtk_spim_priv *priv,
382 const struct spi_mem_op *op)
383{
384 writel((u32)(priv->tx_dma & MTK_SPI_32BITS_MASK),
385 priv->base + SPI_TX_SRC_REG);
386
387 if (priv->hw_cap.dma_ext)
388 writel((u32)(priv->tx_dma >> 32),
389 priv->base + SPI_TX_SRC_REG_64);
390
391 if (op->data.dir == SPI_MEM_DATA_IN) {
392 writel((u32)(priv->rx_dma & MTK_SPI_32BITS_MASK),
393 priv->base + SPI_RX_DST_REG);
394
395 if (priv->hw_cap.dma_ext)
396 writel((u32)(priv->rx_dma >> 32),
397 priv->base + SPI_RX_DST_REG_64);
398 }
399}
400
401static int mtk_spim_transfer_wait(struct spi_slave *slave,
402 const struct spi_mem_op *op)
403{
404 struct udevice *bus = dev_get_parent(slave->dev);
405 struct mtk_spim_priv *priv = dev_get_priv(bus);
406 u32 sck_l, sck_h, spi_bus_clk, clk_count, reg;
407 ulong us = 1;
408 int ret = 0;
409
410 if (op->data.dir == SPI_MEM_NO_DATA)
411 clk_count = 32;
412 else
413 clk_count = op->data.nbytes;
414
415 spi_bus_clk = clk_get_rate(&priv->spi_clk);
416 sck_l = readl(priv->base + SPI_CFG2_REG) >> SPI_CFG2_SCK_LOW_OFFSET;
417 sck_h = readl(priv->base + SPI_CFG2_REG) & SPI_CFG2_SCK_HIGH_MASK;
418 do_div(spi_bus_clk, sck_l + sck_h + 2);
419
420 us = CLK_TO_US(spi_bus_clk, clk_count * 8);
421 us += 1000 * 1000; /* 1s tolerance */
422
423 if (us > UINT_MAX)
424 us = UINT_MAX;
425
426 ret = readl_poll_timeout(priv->base + SPI_STATUS_REG, reg,
427 reg & 0x1, us);
428 if (ret < 0) {
429 dev_err(priv->dev, "transfer timeout, val: 0x%lx\n", us);
430 return -ETIMEDOUT;
431 }
432
433 return 0;
434}
435
436static int mtk_spim_exec_op(struct spi_slave *slave,
437 const struct spi_mem_op *op)
438{
439 struct udevice *bus = dev_get_parent(slave->dev);
440 struct mtk_spim_priv *priv = dev_get_priv(bus);
441 u32 reg_val, nio = 1, tx_size;
442 char *tx_tmp_buf;
443 char *rx_tmp_buf;
444 int i, ret = 0;
445
446 mtk_spim_reset(priv);
447 mtk_spim_hw_init(slave);
448 mtk_spim_prepare_transfer(priv, slave->max_hz);
449
450 reg_val = readl(priv->base + SPI_CFG3_IPM_REG);
451 /* opcode byte len */
452 reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK;
453 reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET;
454
455 /* addr & dummy byte len */
456 if (op->addr.nbytes || op->dummy.nbytes)
457 reg_val |= (op->addr.nbytes + op->dummy.nbytes) <<
458 SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET;
459
460 /* data byte len */
461 if (!op->data.nbytes) {
462 reg_val |= SPI_CFG3_IPM_NODATA_FLAG;
463 writel(0, priv->base + SPI_CFG1_REG);
464 } else {
465 reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG;
466 priv->xfer_len = op->data.nbytes;
467 mtk_spim_setup_packet(priv);
468 }
469
470 if (op->addr.nbytes || op->dummy.nbytes) {
471 if (op->addr.buswidth == 1 || op->dummy.buswidth == 1)
472 reg_val |= SPI_CFG3_IPM_XMODE_EN;
473 else
474 reg_val &= ~SPI_CFG3_IPM_XMODE_EN;
475 }
476
477 if (op->addr.buswidth == 2 ||
478 op->dummy.buswidth == 2 ||
479 op->data.buswidth == 2)
480 nio = 2;
481 else if (op->addr.buswidth == 4 ||
482 op->dummy.buswidth == 4 ||
483 op->data.buswidth == 4)
484 nio = 4;
485
486 reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
487 reg_val |= PIN_MODE_CFG(nio) << SPI_CFG3_IPM_PIN_MODE_OFFSET;
488
489 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
490 if (op->data.dir == SPI_MEM_DATA_IN)
491 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
492 else
493 reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
494 writel(reg_val, priv->base + SPI_CFG3_IPM_REG);
495
496 tx_size = 1 + op->addr.nbytes + op->dummy.nbytes;
497 if (op->data.dir == SPI_MEM_DATA_OUT)
498 tx_size += op->data.nbytes;
499
500 tx_size = max(tx_size, (u32)32);
501
502 /* Fill up tx data */
503 tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL);
504 if (!tx_tmp_buf) {
505 ret = -ENOMEM;
506 goto exit;
507 }
508
509 tx_tmp_buf[0] = op->cmd.opcode;
510
511 if (op->addr.nbytes) {
512 for (i = 0; i < op->addr.nbytes; i++)
513 tx_tmp_buf[i + 1] = op->addr.val >>
514 (8 * (op->addr.nbytes - i - 1));
515 }
516
517 if (op->dummy.nbytes)
518 memset(tx_tmp_buf + op->addr.nbytes + 1, 0xff,
519 op->dummy.nbytes);
520
521 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
522 memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1,
523 op->data.buf.out, op->data.nbytes);
524 /* Finish filling up tx data */
525
526 priv->tx_dma = dma_map_single(tx_tmp_buf, tx_size, DMA_TO_DEVICE);
527 if (dma_mapping_error(priv->dev, priv->tx_dma)) {
528 ret = -ENOMEM;
529 goto tx_free;
530 }
531
532 if (op->data.dir == SPI_MEM_DATA_IN) {
533 if (!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
534 rx_tmp_buf = kzalloc(op->data.nbytes, GFP_KERNEL);
535 if (!rx_tmp_buf) {
536 ret = -ENOMEM;
537 goto tx_unmap;
538 }
539 } else {
540 rx_tmp_buf = op->data.buf.in;
541 }
542
543 priv->rx_dma = dma_map_single(rx_tmp_buf, op->data.nbytes,
544 DMA_FROM_DEVICE);
545 if (dma_mapping_error(priv->dev, priv->rx_dma)) {
546 ret = -ENOMEM;
547 goto rx_free;
548 }
549 }
550
551 reg_val = readl(priv->base + SPI_CMD_REG);
552 reg_val |= SPI_CMD_TX_DMA;
553 if (op->data.dir == SPI_MEM_DATA_IN)
554 reg_val |= SPI_CMD_RX_DMA;
555
556 writel(reg_val, priv->base + SPI_CMD_REG);
557
558 mtk_spim_setup_dma_xfer(priv, op);
559
560 mtk_spim_enable_transfer(priv);
561
562 /* Wait for the interrupt. */
563 ret = mtk_spim_transfer_wait(slave, op);
564 if (ret)
565 goto rx_unmap;
566
567 if (op->data.dir == SPI_MEM_DATA_IN &&
568 !IS_ALIGNED((size_t)op->data.buf.in, 4))
569 memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes);
570
571rx_unmap:
572 /* spi disable dma */
573 reg_val = readl(priv->base + SPI_CMD_REG);
574 reg_val &= ~SPI_CMD_TX_DMA;
575 if (op->data.dir == SPI_MEM_DATA_IN)
576 reg_val &= ~SPI_CMD_RX_DMA;
577 writel(reg_val, priv->base + SPI_CMD_REG);
578
579 writel(0, priv->base + SPI_TX_SRC_REG);
580 writel(0, priv->base + SPI_RX_DST_REG);
581
582 if (op->data.dir == SPI_MEM_DATA_IN)
583 dma_unmap_single(priv->rx_dma,
584 op->data.nbytes, DMA_FROM_DEVICE);
585rx_free:
586 if (op->data.dir == SPI_MEM_DATA_IN &&
587 !IS_ALIGNED((size_t)op->data.buf.in, 4))
588 kfree(rx_tmp_buf);
589tx_unmap:
590 dma_unmap_single(priv->tx_dma,
591 tx_size, DMA_TO_DEVICE);
592tx_free:
593 kfree(tx_tmp_buf);
594exit:
595 return ret;
596}
597
598static int mtk_spim_adjust_op_size(struct spi_slave *slave,
599 struct spi_mem_op *op)
600{
601 int opcode_len;
602
603 if (!op->data.nbytes)
604 return 0;
605
606 if (op->data.dir != SPI_MEM_NO_DATA) {
607 opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
608 if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
609 op->data.nbytes = MTK_SPI_IPM_PACKET_SIZE - opcode_len;
610 /* force data buffer dma-aligned. */
611 op->data.nbytes -= op->data.nbytes % 4;
612 }
613 }
614
615 return 0;
616}
617
618static int mtk_spim_get_attr(struct mtk_spim_priv *priv, struct udevice *dev)
619{
620 int ret;
621
622 priv->hw_cap.enhance_timing = dev_read_bool(dev, "enhance_timing");
623 priv->hw_cap.dma_ext = dev_read_bool(dev, "dma_ext");
624 priv->hw_cap.ipm_design = dev_read_bool(dev, "ipm_design");
625 priv->hw_cap.support_quad = dev_read_bool(dev, "support_quad");
626
627 ret = dev_read_u32(dev, "tick_dly", &priv->tick_dly);
628 if (ret < 0)
629 dev_err(priv->dev, "tick dly not set.\n");
630
631 ret = dev_read_u32(dev, "sample_sel", &priv->sample_sel);
632 if (ret < 0)
633 dev_err(priv->dev, "sample sel not set.\n");
634
635 return ret;
636}
637
638static int mtk_spim_probe(struct udevice *dev)
639{
640 struct mtk_spim_priv *priv = dev_get_priv(dev);
641 int ret;
642
643 priv->base = (void __iomem *)devfdt_get_addr(dev);
644 if (!priv->base)
645 return -EINVAL;
646
647 mtk_spim_get_attr(priv, dev);
648
649 ret = clk_get_by_name(dev, "sel-clk", &priv->sel_clk);
650 if (ret < 0) {
651 dev_err(dev, "failed to get sel-clk\n");
652 return ret;
653 }
654
655 ret = clk_get_by_name(dev, "spi-clk", &priv->spi_clk);
656 if (ret < 0) {
657 dev_err(dev, "failed to get spi-clk\n");
658 return ret;
659 }
660
661 clk_enable(&priv->sel_clk);
662 clk_enable(&priv->spi_clk);
663
664 return 0;
665}
666
667static int mtk_spim_set_speed(struct udevice *dev, uint speed)
668{
669 return 0;
670}
671
672static int mtk_spim_set_mode(struct udevice *dev, uint mode)
673{
674 return 0;
675}
676
677static const struct spi_controller_mem_ops mtk_spim_mem_ops = {
678 .adjust_op_size = mtk_spim_adjust_op_size,
679 .supports_op = mtk_spim_supports_op,
680 .exec_op = mtk_spim_exec_op
681};
682
683static const struct dm_spi_ops mtk_spim_ops = {
684 .mem_ops = &mtk_spim_mem_ops,
685 .set_speed = mtk_spim_set_speed,
686 .set_mode = mtk_spim_set_mode,
687};
688
689static const struct udevice_id mtk_spim_ids[] = {
690 { .compatible = "mediatek,ipm-spi" },
691 {}
692};
693
694U_BOOT_DRIVER(mtk_spim) = {
695 .name = "mtk_spim",
696 .id = UCLASS_SPI,
697 .of_match = mtk_spim_ids,
698 .ops = &mtk_spim_ops,
699 .priv_auto = sizeof(struct mtk_spim_priv),
700 .probe = mtk_spim_probe,
701};