blob: b360eca2b915cf21f78cb0c2c61d72bba08a5d88 [file] [log] [blame]
Weijie Gaob34a2362022-09-09 19:59:45 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2022 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: SkyLake.Huang <skylake.huang@mediatek.com>
6 */
7
8#include <clk.h>
9#include <cpu_func.h>
10#include <div64.h>
11#include <dm.h>
12#include <spi.h>
13#include <spi-mem.h>
14#include <stdbool.h>
15#include <watchdog.h>
16#include <dm/device.h>
17#include <dm/device_compat.h>
18#include <dm/devres.h>
19#include <dm/pinctrl.h>
20#include <linux/bitops.h>
21#include <linux/completion.h>
22#include <linux/dma-mapping.h>
23#include <linux/io.h>
24#include <linux/iopoll.h>
Tom Rini15713fc2022-10-28 20:27:08 -040025#include <linux/sizes.h>
Weijie Gaob34a2362022-09-09 19:59:45 +080026
27#define SPI_CFG0_REG 0x0000
28#define SPI_CFG1_REG 0x0004
29#define SPI_TX_SRC_REG 0x0008
30#define SPI_RX_DST_REG 0x000c
31#define SPI_TX_DATA_REG 0x0010
32#define SPI_RX_DATA_REG 0x0014
33#define SPI_CMD_REG 0x0018
34#define SPI_IRQ_REG 0x001c
35#define SPI_STATUS_REG 0x0020
36#define SPI_PAD_SEL_REG 0x0024
37#define SPI_CFG2_REG 0x0028
38#define SPI_TX_SRC_REG_64 0x002c
39#define SPI_RX_DST_REG_64 0x0030
40#define SPI_CFG3_IPM_REG 0x0040
41
42#define SPI_CFG0_SCK_HIGH_OFFSET 0
43#define SPI_CFG0_SCK_LOW_OFFSET 8
44#define SPI_CFG0_CS_HOLD_OFFSET 16
45#define SPI_CFG0_CS_SETUP_OFFSET 24
46#define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0
47#define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16
48
49#define SPI_CFG1_CS_IDLE_OFFSET 0
50#define SPI_CFG1_PACKET_LOOP_OFFSET 8
51#define SPI_CFG1_PACKET_LENGTH_OFFSET 16
52#define SPI_CFG1_GET_TICKDLY_OFFSET 29
53
54#define SPI_CFG1_GET_TICKDLY_MASK GENMASK(31, 29)
55#define SPI_CFG1_CS_IDLE_MASK 0xff
56#define SPI_CFG1_PACKET_LOOP_MASK 0xff00
57#define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
58#define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16)
59#define SPI_CFG2_SCK_HIGH_OFFSET 0
60#define SPI_CFG2_SCK_LOW_OFFSET 16
61#define SPI_CFG2_SCK_HIGH_MASK GENMASK(15, 0)
62#define SPI_CFG2_SCK_LOW_MASK GENMASK(31, 16)
63
64#define SPI_CMD_ACT BIT(0)
65#define SPI_CMD_RESUME BIT(1)
66#define SPI_CMD_RST BIT(2)
67#define SPI_CMD_PAUSE_EN BIT(4)
68#define SPI_CMD_DEASSERT BIT(5)
69#define SPI_CMD_SAMPLE_SEL BIT(6)
70#define SPI_CMD_CS_POL BIT(7)
71#define SPI_CMD_CPHA BIT(8)
72#define SPI_CMD_CPOL BIT(9)
73#define SPI_CMD_RX_DMA BIT(10)
74#define SPI_CMD_TX_DMA BIT(11)
75#define SPI_CMD_TXMSBF BIT(12)
76#define SPI_CMD_RXMSBF BIT(13)
77#define SPI_CMD_RX_ENDIAN BIT(14)
78#define SPI_CMD_TX_ENDIAN BIT(15)
79#define SPI_CMD_FINISH_IE BIT(16)
80#define SPI_CMD_PAUSE_IE BIT(17)
81#define SPI_CMD_IPM_NONIDLE_MODE BIT(19)
82#define SPI_CMD_IPM_SPIM_LOOP BIT(21)
83#define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22
84
85#define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22)
86
87#define PIN_MODE_CFG(x) ((x) / 2)
88
89#define SPI_CFG3_IPM_PIN_MODE_OFFSET 0
90#define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
91#define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
92#define SPI_CFG3_IPM_XMODE_EN BIT(4)
93#define SPI_CFG3_IPM_NODATA_FLAG BIT(5)
94#define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET 8
95#define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12
96#define SPI_CFG3_IPM_DUMMY_BYTELEN_OFFSET 16
97
98#define SPI_CFG3_IPM_CMD_PIN_MODE_MASK GENMASK(1, 0)
99#define SPI_CFG3_IPM_CMD_BYTELEN_MASK GENMASK(11, 8)
100#define SPI_CFG3_IPM_ADDR_BYTELEN_MASK GENMASK(15, 12)
101#define SPI_CFG3_IPM_DUMMY_BYTELEN_MASK GENMASK(19, 16)
102
103#define MT8173_SPI_MAX_PAD_SEL 3
104
105#define MTK_SPI_PAUSE_INT_STATUS 0x2
106
107#define MTK_SPI_IDLE 0
108#define MTK_SPI_PAUSED 1
109
110#define MTK_SPI_MAX_FIFO_SIZE 32U
111#define MTK_SPI_PACKET_SIZE 1024
112#define MTK_SPI_IPM_PACKET_SIZE SZ_64K
113#define MTK_SPI_IPM_PACKET_LOOP SZ_256
114
115#define MTK_SPI_32BITS_MASK 0xffffffff
116
117#define DMA_ADDR_EXT_BITS 36
118#define DMA_ADDR_DEF_BITS 32
119
120#define CLK_TO_US(freq, clkcnt) DIV_ROUND_UP((clkcnt), (freq) / 1000000)
121
122/* struct mtk_spim_capability
123 * @enhance_timing: Some IC design adjust cfg register to enhance time accuracy
124 * @dma_ext: Some IC support DMA addr extension
125 * @ipm_design: The IPM IP design improves some features, and supports dual/quad mode
126 * @support_quad: Whether quad mode is supported
127 */
128struct mtk_spim_capability {
129 bool enhance_timing;
130 bool dma_ext;
131 bool ipm_design;
132 bool support_quad;
133};
134
135/* struct mtk_spim_priv
136 * @base: Base address of the spi controller
137 * @state: Controller state
138 * @sel_clk: Pad clock
139 * @spi_clk: Core clock
Christian Marangi6f0e7662024-06-24 23:03:29 +0200140 * @parent_clk: Parent clock (needed for mediatek,spi-ipm, upstream DTSI)
141 * @hclk: HCLK clock (needed for mediatek,spi-ipm, upstream DTSI)
Weijie Gao793e6232023-07-19 17:15:54 +0800142 * @pll_clk_rate: Controller's PLL source clock rate, which is different
143 * from SPI bus clock rate
Weijie Gaob34a2362022-09-09 19:59:45 +0800144 * @xfer_len: Current length of data for transfer
145 * @hw_cap: Controller capabilities
146 * @tick_dly: Used to postpone SPI sampling time
147 * @sample_sel: Sample edge of MISO
148 * @dev: udevice of this spi controller
149 * @tx_dma: Tx DMA address
150 * @rx_dma: Rx DMA address
151 */
152struct mtk_spim_priv {
153 void __iomem *base;
154 u32 state;
155 struct clk sel_clk, spi_clk;
Christian Marangi6f0e7662024-06-24 23:03:29 +0200156 struct clk parent_clk, hclk;
Weijie Gao793e6232023-07-19 17:15:54 +0800157 u32 pll_clk_rate;
Weijie Gaob34a2362022-09-09 19:59:45 +0800158 u32 xfer_len;
159 struct mtk_spim_capability hw_cap;
160 u32 tick_dly;
161 u32 sample_sel;
162
163 struct device *dev;
164 dma_addr_t tx_dma;
165 dma_addr_t rx_dma;
166};
167
168static void mtk_spim_reset(struct mtk_spim_priv *priv)
169{
170 /* set the software reset bit in SPI_CMD_REG. */
171 setbits_le32(priv->base + SPI_CMD_REG, SPI_CMD_RST);
172 clrbits_le32(priv->base + SPI_CMD_REG, SPI_CMD_RST);
173}
174
175static int mtk_spim_hw_init(struct spi_slave *slave)
176{
177 struct udevice *bus = dev_get_parent(slave->dev);
178 struct mtk_spim_priv *priv = dev_get_priv(bus);
179 u16 cpha, cpol;
180 u32 reg_val;
181
182 cpha = slave->mode & SPI_CPHA ? 1 : 0;
183 cpol = slave->mode & SPI_CPOL ? 1 : 0;
184
185 if (priv->hw_cap.enhance_timing) {
186 if (priv->hw_cap.ipm_design) {
187 /* CFG3 reg only used for spi-mem,
188 * here write to default value
189 */
190 writel(0x0, priv->base + SPI_CFG3_IPM_REG);
191 clrsetbits_le32(priv->base + SPI_CMD_REG,
192 SPI_CMD_IPM_GET_TICKDLY_MASK,
193 priv->tick_dly <<
194 SPI_CMD_IPM_GET_TICKDLY_OFFSET);
195 } else {
196 clrsetbits_le32(priv->base + SPI_CFG1_REG,
197 SPI_CFG1_GET_TICKDLY_MASK,
198 priv->tick_dly <<
199 SPI_CFG1_GET_TICKDLY_OFFSET);
200 }
201 }
202
203 reg_val = readl(priv->base + SPI_CMD_REG);
204 if (priv->hw_cap.ipm_design) {
205 /* SPI transfer without idle time until packet length done */
206 reg_val |= SPI_CMD_IPM_NONIDLE_MODE;
207 if (slave->mode & SPI_LOOP)
208 reg_val |= SPI_CMD_IPM_SPIM_LOOP;
209 else
210 reg_val &= ~SPI_CMD_IPM_SPIM_LOOP;
211 }
212
213 if (cpha)
214 reg_val |= SPI_CMD_CPHA;
215 else
216 reg_val &= ~SPI_CMD_CPHA;
217 if (cpol)
218 reg_val |= SPI_CMD_CPOL;
219 else
220 reg_val &= ~SPI_CMD_CPOL;
221
222 /* set the mlsbx and mlsbtx */
223 if (slave->mode & SPI_LSB_FIRST) {
224 reg_val &= ~SPI_CMD_TXMSBF;
225 reg_val &= ~SPI_CMD_RXMSBF;
226 } else {
227 reg_val |= SPI_CMD_TXMSBF;
228 reg_val |= SPI_CMD_RXMSBF;
229 }
230
231 /* do not reverse tx/rx endian */
232 reg_val &= ~SPI_CMD_TX_ENDIAN;
233 reg_val &= ~SPI_CMD_RX_ENDIAN;
234
235 if (priv->hw_cap.enhance_timing) {
236 /* set CS polarity */
237 if (slave->mode & SPI_CS_HIGH)
238 reg_val |= SPI_CMD_CS_POL;
239 else
240 reg_val &= ~SPI_CMD_CS_POL;
241
242 if (priv->sample_sel)
243 reg_val |= SPI_CMD_SAMPLE_SEL;
244 else
245 reg_val &= ~SPI_CMD_SAMPLE_SEL;
246 }
247
Weijie Gaob43512d2023-07-19 17:16:02 +0800248 /* Disable interrupt enable for pause mode & normal mode */
249 reg_val &= ~(SPI_CMD_PAUSE_IE | SPI_CMD_FINISH_IE);
250
Weijie Gaob34a2362022-09-09 19:59:45 +0800251 /* disable dma mode */
252 reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA);
253
254 /* disable deassert mode */
255 reg_val &= ~SPI_CMD_DEASSERT;
256
257 writel(reg_val, priv->base + SPI_CMD_REG);
258
259 return 0;
260}
261
262static void mtk_spim_prepare_transfer(struct mtk_spim_priv *priv,
263 u32 speed_hz)
264{
Weijie Gao793e6232023-07-19 17:15:54 +0800265 u32 div, sck_time, cs_time, reg_val;
Weijie Gaob34a2362022-09-09 19:59:45 +0800266
Weijie Gao793e6232023-07-19 17:15:54 +0800267 if (speed_hz <= priv->pll_clk_rate / 4)
268 div = DIV_ROUND_UP(priv->pll_clk_rate, speed_hz);
Weijie Gaob34a2362022-09-09 19:59:45 +0800269 else
270 div = 4;
271
272 sck_time = (div + 1) / 2;
273 cs_time = sck_time * 2;
274
275 if (priv->hw_cap.enhance_timing) {
276 reg_val = ((sck_time - 1) & 0xffff)
277 << SPI_CFG2_SCK_HIGH_OFFSET;
278 reg_val |= ((sck_time - 1) & 0xffff)
279 << SPI_CFG2_SCK_LOW_OFFSET;
280 writel(reg_val, priv->base + SPI_CFG2_REG);
281
282 reg_val = ((cs_time - 1) & 0xffff)
283 << SPI_ADJUST_CFG0_CS_HOLD_OFFSET;
284 reg_val |= ((cs_time - 1) & 0xffff)
285 << SPI_ADJUST_CFG0_CS_SETUP_OFFSET;
286 writel(reg_val, priv->base + SPI_CFG0_REG);
287 } else {
288 reg_val = ((sck_time - 1) & 0xff)
289 << SPI_CFG0_SCK_HIGH_OFFSET;
290 reg_val |= ((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET;
291 reg_val |= ((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET;
292 reg_val |= ((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET;
293 writel(reg_val, priv->base + SPI_CFG0_REG);
294 }
295
296 reg_val = readl(priv->base + SPI_CFG1_REG);
297 reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
298 reg_val |= ((cs_time - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET;
299 writel(reg_val, priv->base + SPI_CFG1_REG);
300}
301
302/**
303 * mtk_spim_setup_packet() - setup packet format.
304 * @priv: controller priv
305 *
306 * This controller sents/receives data in packets. The packet size is
307 * configurable.
308 *
309 * This function calculates the maximum packet size available for current
310 * data, and calculates the number of packets required to sent/receive data
311 * as much as possible.
312 */
313static void mtk_spim_setup_packet(struct mtk_spim_priv *priv)
314{
315 u32 packet_size, packet_loop, reg_val;
316
317 /* Calculate maximum packet size */
318 if (priv->hw_cap.ipm_design)
319 packet_size = min_t(u32,
320 priv->xfer_len,
321 MTK_SPI_IPM_PACKET_SIZE);
322 else
323 packet_size = min_t(u32,
324 priv->xfer_len,
325 MTK_SPI_PACKET_SIZE);
326
327 /* Calculates number of packets to sent/receive */
328 packet_loop = priv->xfer_len / packet_size;
329
330 reg_val = readl(priv->base + SPI_CFG1_REG);
331 if (priv->hw_cap.ipm_design)
332 reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK;
333 else
334 reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK;
335
336 reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
337
338 reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK;
339
340 reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
341
342 writel(reg_val, priv->base + SPI_CFG1_REG);
343}
344
345static void mtk_spim_enable_transfer(struct mtk_spim_priv *priv)
346{
347 u32 cmd;
348
349 cmd = readl(priv->base + SPI_CMD_REG);
350 if (priv->state == MTK_SPI_IDLE)
351 cmd |= SPI_CMD_ACT;
352 else
353 cmd |= SPI_CMD_RESUME;
354 writel(cmd, priv->base + SPI_CMD_REG);
355}
356
357static bool mtk_spim_supports_op(struct spi_slave *slave,
358 const struct spi_mem_op *op)
359{
360 struct udevice *bus = dev_get_parent(slave->dev);
361 struct mtk_spim_priv *priv = dev_get_priv(bus);
362
363 if (op->cmd.buswidth == 0 || op->cmd.buswidth > 4 ||
364 op->addr.buswidth > 4 || op->dummy.buswidth > 4 ||
365 op->data.buswidth > 4)
366 return false;
367
368 if (!priv->hw_cap.support_quad && (op->cmd.buswidth > 2 ||
369 op->addr.buswidth > 2 || op->dummy.buswidth > 2 ||
370 op->data.buswidth > 2))
371 return false;
372
373 if (op->addr.nbytes && op->dummy.nbytes &&
374 op->addr.buswidth != op->dummy.buswidth)
375 return false;
376
377 if (op->addr.nbytes + op->dummy.nbytes > 16)
378 return false;
379
380 if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
381 if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE >
382 MTK_SPI_IPM_PACKET_LOOP ||
383 op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0)
384 return false;
385 }
386
387 return true;
388}
389
390static void mtk_spim_setup_dma_xfer(struct mtk_spim_priv *priv,
391 const struct spi_mem_op *op)
392{
393 writel((u32)(priv->tx_dma & MTK_SPI_32BITS_MASK),
394 priv->base + SPI_TX_SRC_REG);
395
396 if (priv->hw_cap.dma_ext)
397 writel((u32)(priv->tx_dma >> 32),
398 priv->base + SPI_TX_SRC_REG_64);
399
400 if (op->data.dir == SPI_MEM_DATA_IN) {
401 writel((u32)(priv->rx_dma & MTK_SPI_32BITS_MASK),
402 priv->base + SPI_RX_DST_REG);
403
404 if (priv->hw_cap.dma_ext)
405 writel((u32)(priv->rx_dma >> 32),
406 priv->base + SPI_RX_DST_REG_64);
407 }
408}
409
410static int mtk_spim_transfer_wait(struct spi_slave *slave,
411 const struct spi_mem_op *op)
412{
413 struct udevice *bus = dev_get_parent(slave->dev);
414 struct mtk_spim_priv *priv = dev_get_priv(bus);
Nicolò Veronese538b97d2023-10-04 00:14:26 +0200415 u32 pll_clk, sck_l, sck_h, clk_count, reg;
Weijie Gaob34a2362022-09-09 19:59:45 +0800416 ulong us = 1;
417 int ret = 0;
418
419 if (op->data.dir == SPI_MEM_NO_DATA)
420 clk_count = 32;
421 else
422 clk_count = op->data.nbytes;
423
Nicolò Veronese538b97d2023-10-04 00:14:26 +0200424 pll_clk = priv->pll_clk_rate;
Weijie Gaob34a2362022-09-09 19:59:45 +0800425 sck_l = readl(priv->base + SPI_CFG2_REG) >> SPI_CFG2_SCK_LOW_OFFSET;
426 sck_h = readl(priv->base + SPI_CFG2_REG) & SPI_CFG2_SCK_HIGH_MASK;
Nicolò Veronese538b97d2023-10-04 00:14:26 +0200427 do_div(pll_clk, sck_l + sck_h + 2);
Weijie Gaob34a2362022-09-09 19:59:45 +0800428
Nicolò Veronese538b97d2023-10-04 00:14:26 +0200429 us = CLK_TO_US(pll_clk, clk_count * 8);
Weijie Gaob34a2362022-09-09 19:59:45 +0800430 us += 1000 * 1000; /* 1s tolerance */
431
432 if (us > UINT_MAX)
433 us = UINT_MAX;
434
435 ret = readl_poll_timeout(priv->base + SPI_STATUS_REG, reg,
436 reg & 0x1, us);
437 if (ret < 0) {
438 dev_err(priv->dev, "transfer timeout, val: 0x%lx\n", us);
439 return -ETIMEDOUT;
440 }
441
442 return 0;
443}
444
445static int mtk_spim_exec_op(struct spi_slave *slave,
446 const struct spi_mem_op *op)
447{
448 struct udevice *bus = dev_get_parent(slave->dev);
449 struct mtk_spim_priv *priv = dev_get_priv(bus);
450 u32 reg_val, nio = 1, tx_size;
451 char *tx_tmp_buf;
452 char *rx_tmp_buf;
453 int i, ret = 0;
454
455 mtk_spim_reset(priv);
456 mtk_spim_hw_init(slave);
457 mtk_spim_prepare_transfer(priv, slave->max_hz);
458
459 reg_val = readl(priv->base + SPI_CFG3_IPM_REG);
460 /* opcode byte len */
461 reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK;
462 reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET;
463
464 /* addr & dummy byte len */
465 if (op->addr.nbytes || op->dummy.nbytes)
466 reg_val |= (op->addr.nbytes + op->dummy.nbytes) <<
467 SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET;
468
469 /* data byte len */
470 if (!op->data.nbytes) {
471 reg_val |= SPI_CFG3_IPM_NODATA_FLAG;
472 writel(0, priv->base + SPI_CFG1_REG);
473 } else {
474 reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG;
475 priv->xfer_len = op->data.nbytes;
476 mtk_spim_setup_packet(priv);
477 }
478
479 if (op->addr.nbytes || op->dummy.nbytes) {
480 if (op->addr.buswidth == 1 || op->dummy.buswidth == 1)
481 reg_val |= SPI_CFG3_IPM_XMODE_EN;
482 else
483 reg_val &= ~SPI_CFG3_IPM_XMODE_EN;
484 }
485
486 if (op->addr.buswidth == 2 ||
487 op->dummy.buswidth == 2 ||
488 op->data.buswidth == 2)
489 nio = 2;
490 else if (op->addr.buswidth == 4 ||
491 op->dummy.buswidth == 4 ||
492 op->data.buswidth == 4)
493 nio = 4;
494
495 reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
496 reg_val |= PIN_MODE_CFG(nio) << SPI_CFG3_IPM_PIN_MODE_OFFSET;
497
498 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
499 if (op->data.dir == SPI_MEM_DATA_IN)
500 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
501 else
502 reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
503 writel(reg_val, priv->base + SPI_CFG3_IPM_REG);
504
505 tx_size = 1 + op->addr.nbytes + op->dummy.nbytes;
506 if (op->data.dir == SPI_MEM_DATA_OUT)
507 tx_size += op->data.nbytes;
508
509 tx_size = max(tx_size, (u32)32);
510
511 /* Fill up tx data */
512 tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL);
513 if (!tx_tmp_buf) {
514 ret = -ENOMEM;
515 goto exit;
516 }
517
518 tx_tmp_buf[0] = op->cmd.opcode;
519
520 if (op->addr.nbytes) {
521 for (i = 0; i < op->addr.nbytes; i++)
522 tx_tmp_buf[i + 1] = op->addr.val >>
523 (8 * (op->addr.nbytes - i - 1));
524 }
525
526 if (op->dummy.nbytes)
527 memset(tx_tmp_buf + op->addr.nbytes + 1, 0xff,
528 op->dummy.nbytes);
529
530 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
531 memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1,
532 op->data.buf.out, op->data.nbytes);
533 /* Finish filling up tx data */
534
535 priv->tx_dma = dma_map_single(tx_tmp_buf, tx_size, DMA_TO_DEVICE);
536 if (dma_mapping_error(priv->dev, priv->tx_dma)) {
537 ret = -ENOMEM;
538 goto tx_free;
539 }
540
541 if (op->data.dir == SPI_MEM_DATA_IN) {
542 if (!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
543 rx_tmp_buf = kzalloc(op->data.nbytes, GFP_KERNEL);
544 if (!rx_tmp_buf) {
545 ret = -ENOMEM;
546 goto tx_unmap;
547 }
548 } else {
549 rx_tmp_buf = op->data.buf.in;
550 }
551
552 priv->rx_dma = dma_map_single(rx_tmp_buf, op->data.nbytes,
553 DMA_FROM_DEVICE);
554 if (dma_mapping_error(priv->dev, priv->rx_dma)) {
555 ret = -ENOMEM;
556 goto rx_free;
557 }
558 }
559
560 reg_val = readl(priv->base + SPI_CMD_REG);
561 reg_val |= SPI_CMD_TX_DMA;
562 if (op->data.dir == SPI_MEM_DATA_IN)
563 reg_val |= SPI_CMD_RX_DMA;
564
565 writel(reg_val, priv->base + SPI_CMD_REG);
566
567 mtk_spim_setup_dma_xfer(priv, op);
568
569 mtk_spim_enable_transfer(priv);
570
571 /* Wait for the interrupt. */
572 ret = mtk_spim_transfer_wait(slave, op);
573 if (ret)
574 goto rx_unmap;
575
576 if (op->data.dir == SPI_MEM_DATA_IN &&
577 !IS_ALIGNED((size_t)op->data.buf.in, 4))
578 memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes);
579
580rx_unmap:
581 /* spi disable dma */
582 reg_val = readl(priv->base + SPI_CMD_REG);
583 reg_val &= ~SPI_CMD_TX_DMA;
584 if (op->data.dir == SPI_MEM_DATA_IN)
585 reg_val &= ~SPI_CMD_RX_DMA;
586 writel(reg_val, priv->base + SPI_CMD_REG);
587
588 writel(0, priv->base + SPI_TX_SRC_REG);
589 writel(0, priv->base + SPI_RX_DST_REG);
590
591 if (op->data.dir == SPI_MEM_DATA_IN)
592 dma_unmap_single(priv->rx_dma,
593 op->data.nbytes, DMA_FROM_DEVICE);
594rx_free:
595 if (op->data.dir == SPI_MEM_DATA_IN &&
596 !IS_ALIGNED((size_t)op->data.buf.in, 4))
597 kfree(rx_tmp_buf);
598tx_unmap:
599 dma_unmap_single(priv->tx_dma,
600 tx_size, DMA_TO_DEVICE);
601tx_free:
602 kfree(tx_tmp_buf);
603exit:
604 return ret;
605}
606
607static int mtk_spim_adjust_op_size(struct spi_slave *slave,
608 struct spi_mem_op *op)
609{
610 int opcode_len;
611
612 if (!op->data.nbytes)
613 return 0;
614
615 if (op->data.dir != SPI_MEM_NO_DATA) {
616 opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
617 if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
618 op->data.nbytes = MTK_SPI_IPM_PACKET_SIZE - opcode_len;
619 /* force data buffer dma-aligned. */
620 op->data.nbytes -= op->data.nbytes % 4;
621 }
622 }
623
624 return 0;
625}
626
627static int mtk_spim_get_attr(struct mtk_spim_priv *priv, struct udevice *dev)
628{
629 int ret;
630
631 priv->hw_cap.enhance_timing = dev_read_bool(dev, "enhance_timing");
632 priv->hw_cap.dma_ext = dev_read_bool(dev, "dma_ext");
633 priv->hw_cap.ipm_design = dev_read_bool(dev, "ipm_design");
634 priv->hw_cap.support_quad = dev_read_bool(dev, "support_quad");
635
636 ret = dev_read_u32(dev, "tick_dly", &priv->tick_dly);
637 if (ret < 0)
638 dev_err(priv->dev, "tick dly not set.\n");
639
640 ret = dev_read_u32(dev, "sample_sel", &priv->sample_sel);
641 if (ret < 0)
642 dev_err(priv->dev, "sample sel not set.\n");
643
644 return ret;
645}
646
647static int mtk_spim_probe(struct udevice *dev)
648{
649 struct mtk_spim_priv *priv = dev_get_priv(dev);
650 int ret;
651
Johan Jonker0d010462023-03-13 01:32:44 +0100652 priv->base = devfdt_get_addr_ptr(dev);
Weijie Gaob34a2362022-09-09 19:59:45 +0800653 if (!priv->base)
654 return -EINVAL;
655
Christian Marangi6f0e7662024-06-24 23:03:29 +0200656 /*
657 * Upstream linux driver for ipm design enable all the modes
658 * and setup the calibrarion values directly in the driver with
659 * standard values.
660 */
661 if (device_is_compatible(dev, "mediatek,spi-ipm")) {
662 priv->hw_cap.enhance_timing = true;
663 priv->hw_cap.dma_ext = true;
664 priv->hw_cap.ipm_design = true;
665 priv->hw_cap.support_quad = true;
666 priv->sample_sel = 0;
667 priv->tick_dly = 2;
668 } else {
669 mtk_spim_get_attr(priv, dev);
670 }
Weijie Gaob34a2362022-09-09 19:59:45 +0800671
672 ret = clk_get_by_name(dev, "sel-clk", &priv->sel_clk);
673 if (ret < 0) {
674 dev_err(dev, "failed to get sel-clk\n");
675 return ret;
676 }
677
678 ret = clk_get_by_name(dev, "spi-clk", &priv->spi_clk);
679 if (ret < 0) {
680 dev_err(dev, "failed to get spi-clk\n");
681 return ret;
682 }
683
Christian Marangi6f0e7662024-06-24 23:03:29 +0200684 /*
685 * Upstream DTSI use a different compatible that provide additional
686 * clock instead of the assigned-clock implementation.
687 */
688 if (device_is_compatible(dev, "mediatek,spi-ipm")) {
689 ret = clk_get_by_name(dev, "parent-clk", &priv->parent_clk);
690 if (ret < 0) {
691 dev_err(dev, "failed to get parent-clk\n");
692 return ret;
693 }
694
695 ret = clk_get_by_name(dev, "hclk", &priv->hclk);
696 if (ret < 0) {
697 dev_err(dev, "failed to get hclk\n");
698 return ret;
699 }
700
701 clk_enable(&priv->parent_clk);
702 clk_set_parent(&priv->sel_clk, &priv->parent_clk);
703
704 clk_enable(&priv->hclk);
705 }
706
Weijie Gaob34a2362022-09-09 19:59:45 +0800707 clk_enable(&priv->spi_clk);
Christian Marangi6f0e7662024-06-24 23:03:29 +0200708 clk_enable(&priv->sel_clk);
Weijie Gaob34a2362022-09-09 19:59:45 +0800709
Weijie Gao793e6232023-07-19 17:15:54 +0800710 priv->pll_clk_rate = clk_get_rate(&priv->spi_clk);
711 if (priv->pll_clk_rate == 0)
712 return -EINVAL;
713
Weijie Gaob34a2362022-09-09 19:59:45 +0800714 return 0;
715}
716
717static int mtk_spim_set_speed(struct udevice *dev, uint speed)
718{
719 return 0;
720}
721
722static int mtk_spim_set_mode(struct udevice *dev, uint mode)
723{
724 return 0;
725}
726
727static const struct spi_controller_mem_ops mtk_spim_mem_ops = {
728 .adjust_op_size = mtk_spim_adjust_op_size,
729 .supports_op = mtk_spim_supports_op,
730 .exec_op = mtk_spim_exec_op
731};
732
733static const struct dm_spi_ops mtk_spim_ops = {
734 .mem_ops = &mtk_spim_mem_ops,
735 .set_speed = mtk_spim_set_speed,
736 .set_mode = mtk_spim_set_mode,
737};
738
739static const struct udevice_id mtk_spim_ids[] = {
740 { .compatible = "mediatek,ipm-spi" },
Christian Marangi6f0e7662024-06-24 23:03:29 +0200741 { .compatible = "mediatek,spi-ipm", },
Weijie Gaob34a2362022-09-09 19:59:45 +0800742 {}
743};
744
745U_BOOT_DRIVER(mtk_spim) = {
746 .name = "mtk_spim",
747 .id = UCLASS_SPI,
748 .of_match = mtk_spim_ids,
749 .ops = &mtk_spim_ops,
750 .priv_auto = sizeof(struct mtk_spim_priv),
751 .probe = mtk_spim_probe,
752};