blob: eec968e5ec129b0a02f935bec299bc93e83683b6 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Kuldeep Singh91afd362020-02-20 22:57:52 +05302
Alison Wang6b57ff62014-05-06 09:13:01 +08003/*
Kuldeep Singh91afd362020-02-20 22:57:52 +05304 * Freescale QuadSPI driver.
Alison Wang6b57ff62014-05-06 09:13:01 +08005 *
Kuldeep Singh91afd362020-02-20 22:57:52 +05306 * Copyright (C) 2013 Freescale Semiconductor, Inc.
7 * Copyright (C) 2018 Bootlin
8 * Copyright (C) 2018 exceet electronics GmbH
9 * Copyright (C) 2018 Kontron Electronics GmbH
10 * Copyright 2019-2020 NXP
11 *
12 * This driver is a ported version of Linux Freescale QSPI driver taken from
13 * v5.5-rc1 tag having following information.
14 *
15 * Transition to SPI MEM interface:
16 * Authors:
17 * Boris Brezillon <bbrezillon@kernel.org>
18 * Frieder Schrempf <frieder.schrempf@kontron.de>
19 * Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
20 * Suresh Gupta <suresh.gupta@nxp.com>
21 *
22 * Based on the original fsl-quadspi.c spi-nor driver.
23 * Transition to spi-mem in spi-fsl-qspi.c
Alison Wang6b57ff62014-05-06 09:13:01 +080024 */
25
26#include <common.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060027#include <log.h>
Alison Wang6b57ff62014-05-06 09:13:01 +080028#include <asm/io.h>
Simon Glasscd93d622020-05-10 11:40:13 -060029#include <linux/bitops.h>
Simon Glassc05ed002020-05-10 11:40:11 -060030#include <linux/delay.h>
Simon Glass4d72caa2020-05-10 11:40:01 -060031#include <linux/libfdt.h>
32#include <linux/sizes.h>
33#include <linux/iopoll.h>
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +080034#include <dm.h>
Kuldeep Singh91afd362020-02-20 22:57:52 +053035#include <linux/iopoll.h>
36#include <linux/sizes.h>
37#include <linux/err.h>
38#include <spi.h>
39#include <spi-mem.h>
Alison Wang6b57ff62014-05-06 09:13:01 +080040
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +080041DECLARE_GLOBAL_DATA_PTR;
42
Kuldeep Singh91afd362020-02-20 22:57:52 +053043/*
44 * The driver only uses one single LUT entry, that is updated on
45 * each call of exec_op(). Index 0 is preset at boot with a basic
46 * read operation, so let's use the last entry (15).
47 */
48#define SEQID_LUT 15
Ye Lidef88bc2020-06-09 00:59:06 -070049#define SEQID_LUT_AHB 14
Alison Wang6b57ff62014-05-06 09:13:01 +080050
Kuldeep Singh91afd362020-02-20 22:57:52 +053051/* Registers used by the driver */
52#define QUADSPI_MCR 0x00
53#define QUADSPI_MCR_RESERVED_MASK GENMASK(19, 16)
54#define QUADSPI_MCR_MDIS_MASK BIT(14)
55#define QUADSPI_MCR_CLR_TXF_MASK BIT(11)
56#define QUADSPI_MCR_CLR_RXF_MASK BIT(10)
57#define QUADSPI_MCR_DDR_EN_MASK BIT(7)
58#define QUADSPI_MCR_END_CFG_MASK GENMASK(3, 2)
59#define QUADSPI_MCR_SWRSTHD_MASK BIT(1)
60#define QUADSPI_MCR_SWRSTSD_MASK BIT(0)
Alison Wang6b57ff62014-05-06 09:13:01 +080061
Kuldeep Singh91afd362020-02-20 22:57:52 +053062#define QUADSPI_IPCR 0x08
63#define QUADSPI_IPCR_SEQID(x) ((x) << 24)
64#define QUADSPI_FLSHCR 0x0c
65#define QUADSPI_FLSHCR_TCSS_MASK GENMASK(3, 0)
66#define QUADSPI_FLSHCR_TCSH_MASK GENMASK(11, 8)
67#define QUADSPI_FLSHCR_TDH_MASK GENMASK(17, 16)
Alison Wang6b57ff62014-05-06 09:13:01 +080068
Kuldeep Singh91afd362020-02-20 22:57:52 +053069#define QUADSPI_BUF3CR 0x1c
70#define QUADSPI_BUF3CR_ALLMST_MASK BIT(31)
71#define QUADSPI_BUF3CR_ADATSZ(x) ((x) << 8)
72#define QUADSPI_BUF3CR_ADATSZ_MASK GENMASK(15, 8)
Alison Wang6b57ff62014-05-06 09:13:01 +080073
Kuldeep Singh91afd362020-02-20 22:57:52 +053074#define QUADSPI_BFGENCR 0x20
75#define QUADSPI_BFGENCR_SEQID(x) ((x) << 12)
Peng Fana2358782015-01-04 17:07:14 +080076
Kuldeep Singh91afd362020-02-20 22:57:52 +053077#define QUADSPI_BUF0IND 0x30
78#define QUADSPI_BUF1IND 0x34
79#define QUADSPI_BUF2IND 0x38
80#define QUADSPI_SFAR 0x100
Peng Fana2358782015-01-04 17:07:14 +080081
Kuldeep Singh91afd362020-02-20 22:57:52 +053082#define QUADSPI_SMPR 0x108
83#define QUADSPI_SMPR_DDRSMP_MASK GENMASK(18, 16)
84#define QUADSPI_SMPR_FSDLY_MASK BIT(6)
85#define QUADSPI_SMPR_FSPHS_MASK BIT(5)
86#define QUADSPI_SMPR_HSENA_MASK BIT(0)
Yuan Yaofebffe82016-03-15 14:36:42 +080087
Kuldeep Singh91afd362020-02-20 22:57:52 +053088#define QUADSPI_RBCT 0x110
89#define QUADSPI_RBCT_WMRK_MASK GENMASK(4, 0)
90#define QUADSPI_RBCT_RXBRD_USEIPS BIT(8)
Alison Wang6b57ff62014-05-06 09:13:01 +080091
Kuldeep Singh91afd362020-02-20 22:57:52 +053092#define QUADSPI_TBDR 0x154
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +080093
Kuldeep Singh91afd362020-02-20 22:57:52 +053094#define QUADSPI_SR 0x15c
95#define QUADSPI_SR_IP_ACC_MASK BIT(1)
96#define QUADSPI_SR_AHB_ACC_MASK BIT(2)
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +080097
Kuldeep Singh91afd362020-02-20 22:57:52 +053098#define QUADSPI_FR 0x160
99#define QUADSPI_FR_TFF_MASK BIT(0)
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800100
Kuldeep Singh91afd362020-02-20 22:57:52 +0530101#define QUADSPI_RSER 0x164
102#define QUADSPI_RSER_TFIE BIT(0)
103
104#define QUADSPI_SPTRCLR 0x16c
105#define QUADSPI_SPTRCLR_IPPTRC BIT(8)
106#define QUADSPI_SPTRCLR_BFPTRC BIT(0)
107
108#define QUADSPI_SFA1AD 0x180
109#define QUADSPI_SFA2AD 0x184
110#define QUADSPI_SFB1AD 0x188
111#define QUADSPI_SFB2AD 0x18c
112#define QUADSPI_RBDR(x) (0x200 + ((x) * 4))
113
114#define QUADSPI_LUTKEY 0x300
115#define QUADSPI_LUTKEY_VALUE 0x5AF05AF0
116
117#define QUADSPI_LCKCR 0x304
118#define QUADSPI_LCKER_LOCK BIT(0)
119#define QUADSPI_LCKER_UNLOCK BIT(1)
120
121#define QUADSPI_LUT_BASE 0x310
122#define QUADSPI_LUT_OFFSET (SEQID_LUT * 4 * 4)
123#define QUADSPI_LUT_REG(idx) \
124 (QUADSPI_LUT_BASE + QUADSPI_LUT_OFFSET + (idx) * 4)
125
Ye Lidef88bc2020-06-09 00:59:06 -0700126#define QUADSPI_AHB_LUT_OFFSET (SEQID_LUT_AHB * 4 * 4)
127#define QUADSPI_AHB_LUT_REG(idx) \
128 (QUADSPI_LUT_BASE + QUADSPI_AHB_LUT_OFFSET + (idx) * 4)
129
Kuldeep Singh91afd362020-02-20 22:57:52 +0530130/* Instruction set for the LUT register */
131#define LUT_STOP 0
132#define LUT_CMD 1
133#define LUT_ADDR 2
134#define LUT_DUMMY 3
135#define LUT_MODE 4
136#define LUT_MODE2 5
137#define LUT_MODE4 6
138#define LUT_FSL_READ 7
139#define LUT_FSL_WRITE 8
140#define LUT_JMP_ON_CS 9
141#define LUT_ADDR_DDR 10
142#define LUT_MODE_DDR 11
143#define LUT_MODE2_DDR 12
144#define LUT_MODE4_DDR 13
145#define LUT_FSL_READ_DDR 14
146#define LUT_FSL_WRITE_DDR 15
147#define LUT_DATA_LEARN 16
148
149/*
150 * The PAD definitions for LUT register.
151 *
152 * The pad stands for the number of IO lines [0:3].
153 * For example, the quad read needs four IO lines,
154 * so you should use LUT_PAD(4).
155 */
156#define LUT_PAD(x) (fls(x) - 1)
157
158/*
159 * Macro for constructing the LUT entries with the following
160 * register layout:
161 *
162 * ---------------------------------------------------
163 * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
164 * ---------------------------------------------------
165 */
166#define LUT_DEF(idx, ins, pad, opr) \
167 ((((ins) << 10) | ((pad) << 8) | (opr)) << (((idx) % 2) * 16))
168
169/* Controller needs driver to swap endianness */
Ye Lice7575a2019-08-14 11:31:36 +0000170#define QUADSPI_QUIRK_SWAP_ENDIAN BIT(0)
171
Kuldeep Singh91afd362020-02-20 22:57:52 +0530172/* Controller needs 4x internal clock */
173#define QUADSPI_QUIRK_4X_INT_CLK BIT(1)
174
175/*
176 * TKT253890, the controller needs the driver to fill the txfifo with
177 * 16 bytes at least to trigger a data transfer, even though the extra
178 * data won't be transferred.
179 */
180#define QUADSPI_QUIRK_TKT253890 BIT(2)
181
182/* TKT245618, the controller cannot wake up from wait mode */
183#define QUADSPI_QUIRK_TKT245618 BIT(3)
184
185/*
186 * Controller adds QSPI_AMBA_BASE (base address of the mapped memory)
187 * internally. No need to add it when setting SFXXAD and SFAR registers
188 */
189#define QUADSPI_QUIRK_BASE_INTERNAL BIT(4)
190
191/*
192 * Controller uses TDH bits in register QUADSPI_FLSHCR.
193 * They need to be set in accordance with the DDR/SDR mode.
194 */
195#define QUADSPI_QUIRK_USE_TDH_SETTING BIT(5)
Ye Lice7575a2019-08-14 11:31:36 +0000196
Ye Lidef88bc2020-06-09 00:59:06 -0700197/*
198 * Controller only has Two CS on flash A, no flash B port
199 */
200#define QUADSPI_QUIRK_SINGLE_BUS BIT(6)
201
Ye Lice7575a2019-08-14 11:31:36 +0000202struct fsl_qspi_devtype_data {
Kuldeep Singh91afd362020-02-20 22:57:52 +0530203 unsigned int rxfifo;
204 unsigned int txfifo;
205 unsigned int ahb_buf_size;
206 unsigned int quirks;
207 bool little_endian;
Alison Wang6b57ff62014-05-06 09:13:01 +0800208};
209
Ye Lice7575a2019-08-14 11:31:36 +0000210static const struct fsl_qspi_devtype_data vybrid_data = {
Kuldeep Singh91afd362020-02-20 22:57:52 +0530211 .rxfifo = SZ_128,
212 .txfifo = SZ_64,
213 .ahb_buf_size = SZ_1K,
214 .quirks = QUADSPI_QUIRK_SWAP_ENDIAN,
215 .little_endian = true,
Ye Lice7575a2019-08-14 11:31:36 +0000216};
217
218static const struct fsl_qspi_devtype_data imx6sx_data = {
Kuldeep Singh91afd362020-02-20 22:57:52 +0530219 .rxfifo = SZ_128,
220 .txfifo = SZ_512,
221 .ahb_buf_size = SZ_1K,
222 .quirks = QUADSPI_QUIRK_4X_INT_CLK | QUADSPI_QUIRK_TKT245618,
223 .little_endian = true,
Ye Lice7575a2019-08-14 11:31:36 +0000224};
225
Kuldeep Singh91afd362020-02-20 22:57:52 +0530226static const struct fsl_qspi_devtype_data imx7d_data = {
227 .rxfifo = SZ_128,
228 .txfifo = SZ_512,
229 .ahb_buf_size = SZ_1K,
230 .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK |
231 QUADSPI_QUIRK_USE_TDH_SETTING,
232 .little_endian = true,
Ye Lice7575a2019-08-14 11:31:36 +0000233};
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800234
Kuldeep Singh91afd362020-02-20 22:57:52 +0530235static const struct fsl_qspi_devtype_data imx6ul_data = {
236 .rxfifo = SZ_128,
237 .txfifo = SZ_512,
238 .ahb_buf_size = SZ_1K,
239 .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK |
240 QUADSPI_QUIRK_USE_TDH_SETTING,
241 .little_endian = true,
Ye Li9699fb42019-08-14 11:31:40 +0000242};
243
Ye Li93d6c8f2020-06-09 00:59:05 -0700244static const struct fsl_qspi_devtype_data imx7ulp_data = {
245 .rxfifo = SZ_64,
246 .txfifo = SZ_64,
247 .ahb_buf_size = SZ_128,
248 .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK |
Ye Lidef88bc2020-06-09 00:59:06 -0700249 QUADSPI_QUIRK_USE_TDH_SETTING | QUADSPI_QUIRK_SINGLE_BUS,
Ye Li93d6c8f2020-06-09 00:59:05 -0700250 .little_endian = true,
251};
252
Kuldeep Singh91afd362020-02-20 22:57:52 +0530253static const struct fsl_qspi_devtype_data ls1021a_data = {
254 .rxfifo = SZ_128,
255 .txfifo = SZ_64,
256 .ahb_buf_size = SZ_1K,
257 .quirks = 0,
258 .little_endian = false,
259};
260
261static const struct fsl_qspi_devtype_data ls1088a_data = {
262 .rxfifo = SZ_128,
263 .txfifo = SZ_128,
264 .ahb_buf_size = SZ_1K,
265 .quirks = QUADSPI_QUIRK_TKT253890,
266 .little_endian = true,
267};
268
269static const struct fsl_qspi_devtype_data ls2080a_data = {
270 .rxfifo = SZ_128,
271 .txfifo = SZ_64,
272 .ahb_buf_size = SZ_1K,
273 .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_BASE_INTERNAL,
274 .little_endian = true,
275};
276
277struct fsl_qspi {
278 struct udevice *dev;
279 void __iomem *iobase;
280 void __iomem *ahb_addr;
281 u32 memmap_phy;
Ye Lidef88bc2020-06-09 00:59:06 -0700282 u32 memmap_size;
Kuldeep Singh91afd362020-02-20 22:57:52 +0530283 const struct fsl_qspi_devtype_data *devtype_data;
284 int selected;
285};
286
287static inline int needs_swap_endian(struct fsl_qspi *q)
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800288{
Kuldeep Singh91afd362020-02-20 22:57:52 +0530289 return q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN;
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800290}
291
Kuldeep Singh91afd362020-02-20 22:57:52 +0530292static inline int needs_4x_clock(struct fsl_qspi *q)
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800293{
Kuldeep Singh91afd362020-02-20 22:57:52 +0530294 return q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK;
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800295}
Alison Wang6b57ff62014-05-06 09:13:01 +0800296
Kuldeep Singh91afd362020-02-20 22:57:52 +0530297static inline int needs_fill_txfifo(struct fsl_qspi *q)
Rajat Srivastava1f553562018-03-22 13:30:55 +0530298{
Kuldeep Singh91afd362020-02-20 22:57:52 +0530299 return q->devtype_data->quirks & QUADSPI_QUIRK_TKT253890;
Rajat Srivastava1f553562018-03-22 13:30:55 +0530300}
301
Kuldeep Singh91afd362020-02-20 22:57:52 +0530302static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
Alison Wang6b57ff62014-05-06 09:13:01 +0800303{
Kuldeep Singh91afd362020-02-20 22:57:52 +0530304 return q->devtype_data->quirks & QUADSPI_QUIRK_TKT245618;
Alison Wang6b57ff62014-05-06 09:13:01 +0800305}
306
Kuldeep Singh91afd362020-02-20 22:57:52 +0530307static inline int needs_amba_base_offset(struct fsl_qspi *q)
Alison Wang6b57ff62014-05-06 09:13:01 +0800308{
Kuldeep Singh91afd362020-02-20 22:57:52 +0530309 return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL);
Alison Wang6b57ff62014-05-06 09:13:01 +0800310}
311
Kuldeep Singh91afd362020-02-20 22:57:52 +0530312static inline int needs_tdh_setting(struct fsl_qspi *q)
313{
314 return q->devtype_data->quirks & QUADSPI_QUIRK_USE_TDH_SETTING;
315}
316
Ye Lidef88bc2020-06-09 00:59:06 -0700317static inline int needs_single_bus(struct fsl_qspi *q)
318{
319 return q->devtype_data->quirks & QUADSPI_QUIRK_SINGLE_BUS;
320}
321
Peng Fan5f7f70c2015-01-08 10:40:20 +0800322/*
Kuldeep Singh91afd362020-02-20 22:57:52 +0530323 * An IC bug makes it necessary to rearrange the 32-bit data.
324 * Later chips, such as IMX6SLX, have fixed this bug.
Peng Fan5f7f70c2015-01-08 10:40:20 +0800325 */
Kuldeep Singh91afd362020-02-20 22:57:52 +0530326static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
Peng Fan5f7f70c2015-01-08 10:40:20 +0800327{
Kuldeep Singh91afd362020-02-20 22:57:52 +0530328 return needs_swap_endian(q) ? __swab32(a) : a;
329}
330
331/*
332 * R/W functions for big- or little-endian registers:
333 * The QSPI controller's endianness is independent of
334 * the CPU core's endianness. So far, although the CPU
335 * core is little-endian the QSPI controller can use
336 * big-endian or little-endian.
337 */
338static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr)
339{
340 if (q->devtype_data->little_endian)
341 out_le32(addr, val);
342 else
343 out_be32(addr, val);
344}
345
346static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr)
347{
348 if (q->devtype_data->little_endian)
349 return in_le32(addr);
350
351 return in_be32(addr);
352}
353
354static int fsl_qspi_check_buswidth(struct fsl_qspi *q, u8 width)
355{
356 switch (width) {
357 case 1:
358 case 2:
359 case 4:
360 return 0;
361 }
362
363 return -ENOTSUPP;
364}
365
366static bool fsl_qspi_supports_op(struct spi_slave *slave,
367 const struct spi_mem_op *op)
368{
369 struct fsl_qspi *q = dev_get_priv(slave->dev->parent);
370 int ret;
371
372 ret = fsl_qspi_check_buswidth(q, op->cmd.buswidth);
373
374 if (op->addr.nbytes)
375 ret |= fsl_qspi_check_buswidth(q, op->addr.buswidth);
376
377 if (op->dummy.nbytes)
378 ret |= fsl_qspi_check_buswidth(q, op->dummy.buswidth);
379
380 if (op->data.nbytes)
381 ret |= fsl_qspi_check_buswidth(q, op->data.buswidth);
382
383 if (ret)
384 return false;
385
386 /*
387 * The number of instructions needed for the op, needs
388 * to fit into a single LUT entry.
389 */
390 if (op->addr.nbytes +
391 (op->dummy.nbytes ? 1 : 0) +
392 (op->data.nbytes ? 1 : 0) > 6)
393 return false;
394
395 /* Max 64 dummy clock cycles supported */
396 if (op->dummy.nbytes &&
397 (op->dummy.nbytes * 8 / op->dummy.buswidth > 64))
398 return false;
399
400 /* Max data length, check controller limits and alignment */
401 if (op->data.dir == SPI_MEM_DATA_IN &&
402 (op->data.nbytes > q->devtype_data->ahb_buf_size ||
403 (op->data.nbytes > q->devtype_data->rxfifo - 4 &&
404 !IS_ALIGNED(op->data.nbytes, 8))))
405 return false;
406
407 if (op->data.dir == SPI_MEM_DATA_OUT &&
408 op->data.nbytes > q->devtype_data->txfifo)
409 return false;
410
411 return true;
412}
413
414static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
415 const struct spi_mem_op *op)
416{
417 void __iomem *base = q->iobase;
418 u32 lutval[4] = {};
419 int lutidx = 1, i;
420
421 lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth),
422 op->cmd.opcode);
423
Ye Lidef88bc2020-06-09 00:59:06 -0700424 if (IS_ENABLED(CONFIG_FSL_QSPI_AHB_FULL_MAP)) {
425 if (op->addr.nbytes) {
426 lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_ADDR,
427 LUT_PAD(op->addr.buswidth),
428 (op->addr.nbytes == 4) ? 0x20 : 0x18);
429 lutidx++;
430 }
431 } else {
432 /*
433 * For some unknown reason, using LUT_ADDR doesn't work in some
434 * cases (at least with only one byte long addresses), so
435 * let's use LUT_MODE to write the address bytes one by one
436 */
437 for (i = 0; i < op->addr.nbytes; i++) {
438 u8 addrbyte = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
Kuldeep Singh91afd362020-02-20 22:57:52 +0530439
Ye Lidef88bc2020-06-09 00:59:06 -0700440 lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_MODE,
441 LUT_PAD(op->addr.buswidth),
442 addrbyte);
443 lutidx++;
444 }
Kuldeep Singh91afd362020-02-20 22:57:52 +0530445 }
446
447 if (op->dummy.nbytes) {
448 lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY,
449 LUT_PAD(op->dummy.buswidth),
450 op->dummy.nbytes * 8 /
451 op->dummy.buswidth);
452 lutidx++;
453 }
454
455 if (op->data.nbytes) {
456 lutval[lutidx / 2] |= LUT_DEF(lutidx,
457 op->data.dir == SPI_MEM_DATA_IN ?
458 LUT_FSL_READ : LUT_FSL_WRITE,
459 LUT_PAD(op->data.buswidth),
460 0);
461 lutidx++;
462 }
463
464 lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0);
465
466 /* unlock LUT */
467 qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
468 qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR);
469
470 dev_dbg(q->dev, "CMD[%x] lutval[0:%x \t 1:%x \t 2:%x \t 3:%x]\n",
471 op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3]);
472
473 /* fill LUT */
474 for (i = 0; i < ARRAY_SIZE(lutval); i++)
475 qspi_writel(q, lutval[i], base + QUADSPI_LUT_REG(i));
476
Ye Lidef88bc2020-06-09 00:59:06 -0700477 if (IS_ENABLED(CONFIG_FSL_QSPI_AHB_FULL_MAP)) {
478 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN &&
479 op->addr.nbytes) {
480 for (i = 0; i < ARRAY_SIZE(lutval); i++)
481 qspi_writel(q, lutval[i], base + QUADSPI_AHB_LUT_REG(i));
482 }
483 }
484
Kuldeep Singh91afd362020-02-20 22:57:52 +0530485 /* lock LUT */
486 qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
487 qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR);
488}
489
490/*
491 * If we have changed the content of the flash by writing or erasing, or if we
492 * read from flash with a different offset into the page buffer, we need to
493 * invalidate the AHB buffer. If we do not do so, we may read out the wrong
494 * data. The spec tells us reset the AHB domain and Serial Flash domain at
495 * the same time.
496 */
497static void fsl_qspi_invalidate(struct fsl_qspi *q)
498{
Peng Fan5f7f70c2015-01-08 10:40:20 +0800499 u32 reg;
500
Kuldeep Singh91afd362020-02-20 22:57:52 +0530501 reg = qspi_readl(q, q->iobase + QUADSPI_MCR);
502 reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK;
503 qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
Peng Fan5f7f70c2015-01-08 10:40:20 +0800504
505 /*
506 * The minimum delay : 1 AHB + 2 SFCK clocks.
507 * Delay 1 us is enough.
508 */
509 udelay(1);
510
Kuldeep Singh91afd362020-02-20 22:57:52 +0530511 reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK);
512 qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
Peng Fan5f7f70c2015-01-08 10:40:20 +0800513}
514
Kuldeep Singh91afd362020-02-20 22:57:52 +0530515static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_slave *slave)
Peng Fan5f7f70c2015-01-08 10:40:20 +0800516{
Kuldeep Singh91afd362020-02-20 22:57:52 +0530517 struct dm_spi_slave_platdata *plat =
518 dev_get_parent_platdata(slave->dev);
Peng Fan5f7f70c2015-01-08 10:40:20 +0800519
Kuldeep Singh91afd362020-02-20 22:57:52 +0530520 if (q->selected == plat->cs)
521 return;
Peng Fan5f7f70c2015-01-08 10:40:20 +0800522
Kuldeep Singh91afd362020-02-20 22:57:52 +0530523 q->selected = plat->cs;
524 fsl_qspi_invalidate(q);
Peng Fan5f7f70c2015-01-08 10:40:20 +0800525}
526
Ye Lidef88bc2020-06-09 00:59:06 -0700527static u32 fsl_qspi_memsize_per_cs(struct fsl_qspi *q)
528{
529 if (IS_ENABLED(CONFIG_FSL_QSPI_AHB_FULL_MAP)) {
530 if (needs_single_bus(q))
531 return q->memmap_size / 2;
532 else
533 return q->memmap_size / 4;
534 } else {
535 return ALIGN(q->devtype_data->ahb_buf_size, 0x400);
536 }
537}
538
Kuldeep Singh91afd362020-02-20 22:57:52 +0530539static void fsl_qspi_read_ahb(struct fsl_qspi *q, const struct spi_mem_op *op)
Peng Fan5f7f70c2015-01-08 10:40:20 +0800540{
Ye Lidef88bc2020-06-09 00:59:06 -0700541 void __iomem *ahb_read_addr = q->ahb_addr;
542
543 if (IS_ENABLED(CONFIG_FSL_QSPI_AHB_FULL_MAP)) {
544 if (op->addr.nbytes)
545 ahb_read_addr += op->addr.val;
546 }
547
Kuldeep Singh91afd362020-02-20 22:57:52 +0530548 memcpy_fromio(op->data.buf.in,
Ye Lidef88bc2020-06-09 00:59:06 -0700549 ahb_read_addr + q->selected * fsl_qspi_memsize_per_cs(q),
Kuldeep Singh91afd362020-02-20 22:57:52 +0530550 op->data.nbytes);
Peng Fan5f7f70c2015-01-08 10:40:20 +0800551}
552
Kuldeep Singh91afd362020-02-20 22:57:52 +0530553static void fsl_qspi_fill_txfifo(struct fsl_qspi *q,
554 const struct spi_mem_op *op)
Peng Fan5f7f70c2015-01-08 10:40:20 +0800555{
Kuldeep Singh91afd362020-02-20 22:57:52 +0530556 void __iomem *base = q->iobase;
Gong Qianyu52070142016-01-26 15:06:40 +0800557 int i;
Kuldeep Singh91afd362020-02-20 22:57:52 +0530558 u32 val;
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800559
Kuldeep Singh91afd362020-02-20 22:57:52 +0530560 for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
561 memcpy(&val, op->data.buf.out + i, 4);
562 val = fsl_qspi_endian_xchg(q, val);
563 qspi_writel(q, val, base + QUADSPI_TBDR);
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800564 }
565
Kuldeep Singh91afd362020-02-20 22:57:52 +0530566 if (i < op->data.nbytes) {
567 memcpy(&val, op->data.buf.out + i, op->data.nbytes - i);
568 val = fsl_qspi_endian_xchg(q, val);
569 qspi_writel(q, val, base + QUADSPI_TBDR);
570 }
571
572 if (needs_fill_txfifo(q)) {
573 for (i = op->data.nbytes; i < 16; i += 4)
574 qspi_writel(q, 0, base + QUADSPI_TBDR);
575 }
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800576}
577
Kuldeep Singh91afd362020-02-20 22:57:52 +0530578static void fsl_qspi_read_rxfifo(struct fsl_qspi *q,
579 const struct spi_mem_op *op)
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800580{
Kuldeep Singh91afd362020-02-20 22:57:52 +0530581 void __iomem *base = q->iobase;
582 int i;
583 u8 *buf = op->data.buf.in;
584 u32 val;
Yuan Yaofebffe82016-03-15 14:36:42 +0800585
Kuldeep Singh91afd362020-02-20 22:57:52 +0530586 for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
587 val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
588 val = fsl_qspi_endian_xchg(q, val);
589 memcpy(buf + i, &val, 4);
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800590 }
591
Kuldeep Singh91afd362020-02-20 22:57:52 +0530592 if (i < op->data.nbytes) {
593 val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
594 val = fsl_qspi_endian_xchg(q, val);
595 memcpy(buf + i, &val, op->data.nbytes - i);
596 }
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800597}
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800598
Kuldeep Singh91afd362020-02-20 22:57:52 +0530599static int fsl_qspi_readl_poll_tout(struct fsl_qspi *q, void __iomem *base,
600 u32 mask, u32 delay_us, u32 timeout_us)
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800601{
Kuldeep Singh91afd362020-02-20 22:57:52 +0530602 u32 reg;
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800603
Kuldeep Singh91afd362020-02-20 22:57:52 +0530604 if (!q->devtype_data->little_endian)
605 mask = (u32)cpu_to_be32(mask);
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800606
Kuldeep Singh91afd362020-02-20 22:57:52 +0530607 return readl_poll_timeout(base, reg, !(reg & mask), timeout_us);
608}
Alexander Steinbeedbc22015-11-04 09:19:10 +0100609
Kuldeep Singh91afd362020-02-20 22:57:52 +0530610static int fsl_qspi_do_op(struct fsl_qspi *q, const struct spi_mem_op *op)
611{
612 void __iomem *base = q->iobase;
613 int err = 0;
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800614
Suresh Gupta10509982017-06-05 14:37:20 +0530615 /*
Kuldeep Singh91afd362020-02-20 22:57:52 +0530616 * Always start the sequence at the same index since we update
617 * the LUT at each exec_op() call. And also specify the DATA
618 * length, since it's has not been specified in the LUT.
Suresh Gupta10509982017-06-05 14:37:20 +0530619 */
Kuldeep Singh91afd362020-02-20 22:57:52 +0530620 qspi_writel(q, op->data.nbytes | QUADSPI_IPCR_SEQID(SEQID_LUT),
621 base + QUADSPI_IPCR);
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800622
Kuldeep Singh91afd362020-02-20 22:57:52 +0530623 /* wait for the controller being ready */
624 err = fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR,
625 (QUADSPI_SR_IP_ACC_MASK |
626 QUADSPI_SR_AHB_ACC_MASK),
627 10, 1000);
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800628
Kuldeep Singh91afd362020-02-20 22:57:52 +0530629 if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
630 fsl_qspi_read_rxfifo(q, op);
631
632 return err;
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800633}
634
Kuldeep Singh91afd362020-02-20 22:57:52 +0530635static int fsl_qspi_exec_op(struct spi_slave *slave,
636 const struct spi_mem_op *op)
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800637{
Kuldeep Singh91afd362020-02-20 22:57:52 +0530638 struct fsl_qspi *q = dev_get_priv(slave->dev->parent);
639 void __iomem *base = q->iobase;
640 u32 addr_offset = 0;
641 int err = 0;
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800642
Kuldeep Singh91afd362020-02-20 22:57:52 +0530643 /* wait for the controller being ready */
644 fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR, (QUADSPI_SR_IP_ACC_MASK |
645 QUADSPI_SR_AHB_ACC_MASK), 10, 1000);
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800646
Kuldeep Singh91afd362020-02-20 22:57:52 +0530647 fsl_qspi_select_mem(q, slave);
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800648
Kuldeep Singh91afd362020-02-20 22:57:52 +0530649 if (needs_amba_base_offset(q))
650 addr_offset = q->memmap_phy;
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800651
Ye Lidef88bc2020-06-09 00:59:06 -0700652 if (IS_ENABLED(CONFIG_FSL_QSPI_AHB_FULL_MAP)) {
653 if (op->addr.nbytes)
654 addr_offset += op->addr.val;
655 }
656
Kuldeep Singh91afd362020-02-20 22:57:52 +0530657 qspi_writel(q,
Ye Lidef88bc2020-06-09 00:59:06 -0700658 q->selected * fsl_qspi_memsize_per_cs(q) + addr_offset,
Kuldeep Singh91afd362020-02-20 22:57:52 +0530659 base + QUADSPI_SFAR);
Alexander Stein4df24f22017-06-01 09:32:19 +0200660
Kuldeep Singh91afd362020-02-20 22:57:52 +0530661 qspi_writel(q, qspi_readl(q, base + QUADSPI_MCR) |
662 QUADSPI_MCR_CLR_RXF_MASK | QUADSPI_MCR_CLR_TXF_MASK,
663 base + QUADSPI_MCR);
664
665 qspi_writel(q, QUADSPI_SPTRCLR_BFPTRC | QUADSPI_SPTRCLR_IPPTRC,
666 base + QUADSPI_SPTRCLR);
667
668 fsl_qspi_prepare_lut(q, op);
669
670 /*
671 * If we have large chunks of data, we read them through the AHB bus
672 * by accessing the mapped memory. In all other cases we use
673 * IP commands to access the flash.
674 */
675 if (op->data.nbytes > (q->devtype_data->rxfifo - 4) &&
676 op->data.dir == SPI_MEM_DATA_IN) {
677 fsl_qspi_read_ahb(q, op);
678 } else {
679 qspi_writel(q, QUADSPI_RBCT_WMRK_MASK |
680 QUADSPI_RBCT_RXBRD_USEIPS, base + QUADSPI_RBCT);
681
682 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
683 fsl_qspi_fill_txfifo(q, op);
684
685 err = fsl_qspi_do_op(q, op);
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800686 }
687
Kuldeep Singh91afd362020-02-20 22:57:52 +0530688 /* Invalidate the data in the AHB buffer. */
689 fsl_qspi_invalidate(q);
690
691 return err;
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800692}
693
Kuldeep Singh91afd362020-02-20 22:57:52 +0530694static int fsl_qspi_adjust_op_size(struct spi_slave *slave,
695 struct spi_mem_op *op)
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800696{
Kuldeep Singh91afd362020-02-20 22:57:52 +0530697 struct fsl_qspi *q = dev_get_priv(slave->dev->parent);
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800698
Kuldeep Singh91afd362020-02-20 22:57:52 +0530699 if (op->data.dir == SPI_MEM_DATA_OUT) {
700 if (op->data.nbytes > q->devtype_data->txfifo)
701 op->data.nbytes = q->devtype_data->txfifo;
702 } else {
703 if (op->data.nbytes > q->devtype_data->ahb_buf_size)
704 op->data.nbytes = q->devtype_data->ahb_buf_size;
705 else if (op->data.nbytes > (q->devtype_data->rxfifo - 4))
706 op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8);
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800707 }
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800708
709 return 0;
710}
711
Kuldeep Singh91afd362020-02-20 22:57:52 +0530712static int fsl_qspi_default_setup(struct fsl_qspi *q)
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800713{
Kuldeep Singh91afd362020-02-20 22:57:52 +0530714 void __iomem *base = q->iobase;
Ye Lidef88bc2020-06-09 00:59:06 -0700715 u32 reg, addr_offset = 0, memsize_cs;
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800716
Kuldeep Singh91afd362020-02-20 22:57:52 +0530717 /* Reset the module */
718 qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK,
719 base + QUADSPI_MCR);
720 udelay(1);
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800721
Kuldeep Singh91afd362020-02-20 22:57:52 +0530722 /* Disable the module */
723 qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
724 base + QUADSPI_MCR);
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800725
Kuldeep Singh91afd362020-02-20 22:57:52 +0530726 /*
727 * Previous boot stages (BootROM, bootloader) might have used DDR
728 * mode and did not clear the TDH bits. As we currently use SDR mode
729 * only, clear the TDH bits if necessary.
730 */
731 if (needs_tdh_setting(q))
732 qspi_writel(q, qspi_readl(q, base + QUADSPI_FLSHCR) &
733 ~QUADSPI_FLSHCR_TDH_MASK,
734 base + QUADSPI_FLSHCR);
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800735
Kuldeep Singh91afd362020-02-20 22:57:52 +0530736 reg = qspi_readl(q, base + QUADSPI_SMPR);
737 qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK
738 | QUADSPI_SMPR_FSPHS_MASK
739 | QUADSPI_SMPR_HSENA_MASK
740 | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR);
Alison Wang6b57ff62014-05-06 09:13:01 +0800741
Kuldeep Singh91afd362020-02-20 22:57:52 +0530742 /* We only use the buffer3 for AHB read */
743 qspi_writel(q, 0, base + QUADSPI_BUF0IND);
744 qspi_writel(q, 0, base + QUADSPI_BUF1IND);
745 qspi_writel(q, 0, base + QUADSPI_BUF2IND);
Peng Fan5f7f70c2015-01-08 10:40:20 +0800746
Ye Lidef88bc2020-06-09 00:59:06 -0700747 if (IS_ENABLED(CONFIG_FSL_QSPI_AHB_FULL_MAP))
748 qspi_writel(q, QUADSPI_BFGENCR_SEQID(SEQID_LUT_AHB),
749 q->iobase + QUADSPI_BFGENCR);
750 else
751 qspi_writel(q, QUADSPI_BFGENCR_SEQID(SEQID_LUT),
752 q->iobase + QUADSPI_BFGENCR);
753
Kuldeep Singh91afd362020-02-20 22:57:52 +0530754 qspi_writel(q, QUADSPI_RBCT_WMRK_MASK, base + QUADSPI_RBCT);
755 qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK |
756 QUADSPI_BUF3CR_ADATSZ(q->devtype_data->ahb_buf_size / 8),
757 base + QUADSPI_BUF3CR);
758
759 if (needs_amba_base_offset(q))
760 addr_offset = q->memmap_phy;
761
762 /*
763 * In HW there can be a maximum of four chips on two buses with
764 * two chip selects on each bus. We use four chip selects in SW
765 * to differentiate between the four chips.
766 * We use ahb_buf_size for each chip and set SFA1AD, SFA2AD, SFB1AD,
767 * SFB2AD accordingly.
768 */
Ye Lidef88bc2020-06-09 00:59:06 -0700769 memsize_cs = fsl_qspi_memsize_per_cs(q);
770 qspi_writel(q, memsize_cs + addr_offset,
Kuldeep Singh91afd362020-02-20 22:57:52 +0530771 base + QUADSPI_SFA1AD);
Ye Lidef88bc2020-06-09 00:59:06 -0700772 qspi_writel(q, memsize_cs * 2 + addr_offset,
Kuldeep Singh91afd362020-02-20 22:57:52 +0530773 base + QUADSPI_SFA2AD);
Ye Lidef88bc2020-06-09 00:59:06 -0700774 if (!needs_single_bus(q)) {
775 qspi_writel(q, memsize_cs * 3 + addr_offset,
776 base + QUADSPI_SFB1AD);
777 qspi_writel(q, memsize_cs * 4 + addr_offset,
778 base + QUADSPI_SFB2AD);
779 }
Kuldeep Singh91afd362020-02-20 22:57:52 +0530780
781 q->selected = -1;
782
783 /* Enable the module */
784 qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
785 base + QUADSPI_MCR);
Alison Wang6b57ff62014-05-06 09:13:01 +0800786 return 0;
787}
788
Kuldeep Singh91afd362020-02-20 22:57:52 +0530789static const struct spi_controller_mem_ops fsl_qspi_mem_ops = {
790 .adjust_op_size = fsl_qspi_adjust_op_size,
791 .supports_op = fsl_qspi_supports_op,
792 .exec_op = fsl_qspi_exec_op,
793};
794
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800795static int fsl_qspi_probe(struct udevice *bus)
796{
Kuldeep Singh91afd362020-02-20 22:57:52 +0530797 struct dm_spi_bus *dm_bus = bus->uclass_priv;
798 struct fsl_qspi *q = dev_get_priv(bus);
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800799 const void *blob = gd->fdt_blob;
Simon Glasse160f7d2017-01-17 16:52:55 -0700800 int node = dev_of_offset(bus);
Kuldeep Singh91afd362020-02-20 22:57:52 +0530801 struct fdt_resource res;
802 int ret;
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800803
Kuldeep Singh91afd362020-02-20 22:57:52 +0530804 q->dev = bus;
805 q->devtype_data = (struct fsl_qspi_devtype_data *)
806 dev_get_driver_data(bus);
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800807
Kuldeep Singh91afd362020-02-20 22:57:52 +0530808 /* find the resources */
809 ret = fdt_get_named_resource(blob, node, "reg", "reg-names", "QuadSPI",
810 &res);
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800811 if (ret) {
Kuldeep Singh91afd362020-02-20 22:57:52 +0530812 dev_err(bus, "Can't get regs base addresses(ret = %d)!\n", ret);
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800813 return -ENOMEM;
814 }
815
Kuldeep Singh91afd362020-02-20 22:57:52 +0530816 q->iobase = map_physmem(res.start, res.end - res.start, MAP_NOCACHE);
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800817
Kuldeep Singh91afd362020-02-20 22:57:52 +0530818 ret = fdt_get_named_resource(blob, node, "reg", "reg-names",
819 "QuadSPI-memory", &res);
820 if (ret) {
821 dev_err(bus, "Can't get AMBA base addresses(ret = %d)!\n", ret);
822 return -ENOMEM;
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800823 }
824
Kuldeep Singh91afd362020-02-20 22:57:52 +0530825 q->ahb_addr = map_physmem(res.start, res.end - res.start, MAP_NOCACHE);
826 q->memmap_phy = res.start;
Ye Lidef88bc2020-06-09 00:59:06 -0700827 q->memmap_size = res.end - res.start;
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800828
Kuldeep Singh91afd362020-02-20 22:57:52 +0530829 dm_bus->max_hz = fdtdec_get_int(blob, node, "spi-max-frequency",
830 66000000);
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800831
Kuldeep Singh91afd362020-02-20 22:57:52 +0530832 fsl_qspi_default_setup(q);
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800833
834 return 0;
835}
836
837static int fsl_qspi_xfer(struct udevice *dev, unsigned int bitlen,
Kuldeep Singh91afd362020-02-20 22:57:52 +0530838 const void *dout, void *din, unsigned long flags)
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800839{
Kuldeep Singh91afd362020-02-20 22:57:52 +0530840 return 0;
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800841}
842
843static int fsl_qspi_claim_bus(struct udevice *dev)
844{
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800845 return 0;
846}
847
848static int fsl_qspi_release_bus(struct udevice *dev)
849{
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800850 return 0;
851}
852
853static int fsl_qspi_set_speed(struct udevice *bus, uint speed)
Alison Wang6b57ff62014-05-06 09:13:01 +0800854{
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800855 return 0;
Alison Wang6b57ff62014-05-06 09:13:01 +0800856}
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800857
858static int fsl_qspi_set_mode(struct udevice *bus, uint mode)
859{
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800860 return 0;
861}
862
863static const struct dm_spi_ops fsl_qspi_ops = {
864 .claim_bus = fsl_qspi_claim_bus,
865 .release_bus = fsl_qspi_release_bus,
866 .xfer = fsl_qspi_xfer,
867 .set_speed = fsl_qspi_set_speed,
868 .set_mode = fsl_qspi_set_mode,
Kuldeep Singh91afd362020-02-20 22:57:52 +0530869 .mem_ops = &fsl_qspi_mem_ops,
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800870};
871
872static const struct udevice_id fsl_qspi_ids[] = {
Kuldeep Singh91afd362020-02-20 22:57:52 +0530873 { .compatible = "fsl,vf610-qspi", .data = (ulong)&vybrid_data, },
874 { .compatible = "fsl,imx6sx-qspi", .data = (ulong)&imx6sx_data, },
875 { .compatible = "fsl,imx6ul-qspi", .data = (ulong)&imx6ul_data, },
876 { .compatible = "fsl,imx7d-qspi", .data = (ulong)&imx7d_data, },
Ye Li93d6c8f2020-06-09 00:59:05 -0700877 { .compatible = "fsl,imx7ulp-qspi", .data = (ulong)&imx7ulp_data, },
Kuldeep Singh91afd362020-02-20 22:57:52 +0530878 { .compatible = "fsl,ls1021a-qspi", .data = (ulong)&ls1021a_data, },
879 { .compatible = "fsl,ls1088a-qspi", .data = (ulong)&ls1088a_data, },
880 { .compatible = "fsl,ls2080a-qspi", .data = (ulong)&ls2080a_data, },
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800881 { }
882};
883
884U_BOOT_DRIVER(fsl_qspi) = {
885 .name = "fsl_qspi",
886 .id = UCLASS_SPI,
887 .of_match = fsl_qspi_ids,
888 .ops = &fsl_qspi_ops,
Kuldeep Singh91afd362020-02-20 22:57:52 +0530889 .priv_auto_alloc_size = sizeof(struct fsl_qspi),
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800890 .probe = fsl_qspi_probe,
Haikun.Wang@freescale.com5bc48302015-04-01 11:10:40 +0800891};