blob: c75cf26e0a53a0452347c1379aa6c64c3ced4547 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Hou Zhiqianga7294ab2016-12-13 14:54:16 +08002/*
Wasim Khan1185b222020-01-06 12:05:57 +00003 * Copyright 2017-2020 NXP
Hou Zhiqianga7294ab2016-12-13 14:54:16 +08004 * Copyright 2014-2015 Freescale Semiconductor, Inc.
5 * Layerscape PCIe driver
Hou Zhiqianga7294ab2016-12-13 14:54:16 +08006 */
7
8#include <common.h>
Simon Glass51a4a852020-07-19 10:15:49 -06009#include <dm.h>
Simon Glass691d7192020-05-10 11:40:02 -060010#include <init.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060011#include <log.h>
Hou Zhiqianga7294ab2016-12-13 14:54:16 +080012#include <pci.h>
13#include <asm/arch/fsl_serdes.h>
14#include <asm/io.h>
15#include <errno.h>
16#ifdef CONFIG_OF_BOARD_SETUP
Masahiro Yamadab08c8c42018-03-05 01:20:11 +090017#include <linux/libfdt.h>
Hou Zhiqianga7294ab2016-12-13 14:54:16 +080018#include <fdt_support.h>
Simon Glass6e2941d2017-05-17 08:23:06 -060019#ifdef CONFIG_ARM
20#include <asm/arch/clock.h>
21#endif
Laurentiu Tudor2a5bbb12020-09-10 12:42:19 +030022#include <malloc.h>
23#include <env.h>
Hou Zhiqianga7294ab2016-12-13 14:54:16 +080024#include "pcie_layerscape.h"
Wasim Khan1185b222020-01-06 12:05:57 +000025#include "pcie_layerscape_fixup_common.h"
Hou Zhiqianga7294ab2016-12-13 14:54:16 +080026
Laurentiu Tudorf4cd9632020-09-10 12:42:17 +030027static int fdt_pcie_get_nodeoffset(void *blob, struct ls_pcie_rc *pcie_rc)
28{
29 int nodeoffset;
30 uint svr;
31 char *compat = NULL;
32
33 /* find pci controller node */
34 nodeoffset = fdt_node_offset_by_compat_reg(blob, "fsl,ls-pcie",
35 pcie_rc->dbi_res.start);
36 if (nodeoffset < 0) {
37#ifdef CONFIG_FSL_PCIE_COMPAT /* Compatible with older version of dts node */
38 svr = (get_svr() >> SVR_VAR_PER_SHIFT) & 0xFFFFFE;
39 if (svr == SVR_LS2088A || svr == SVR_LS2084A ||
40 svr == SVR_LS2048A || svr == SVR_LS2044A ||
41 svr == SVR_LS2081A || svr == SVR_LS2041A)
42 compat = "fsl,ls2088a-pcie";
43 else
44 compat = CONFIG_FSL_PCIE_COMPAT;
45
46 nodeoffset =
47 fdt_node_offset_by_compat_reg(blob, compat,
48 pcie_rc->dbi_res.start);
49#endif
50 }
51
52 return nodeoffset;
53}
54
Bharat Bhushan47d17362017-03-22 12:06:30 +053055#if defined(CONFIG_FSL_LSCH3) || defined(CONFIG_FSL_LSCH2)
Hou Zhiqianga7294ab2016-12-13 14:54:16 +080056/*
57 * Return next available LUT index.
58 */
Xiaowei Bao118e58e2020-07-09 23:31:33 +080059static int ls_pcie_next_lut_index(struct ls_pcie_rc *pcie_rc)
Hou Zhiqianga7294ab2016-12-13 14:54:16 +080060{
Xiaowei Bao118e58e2020-07-09 23:31:33 +080061 if (pcie_rc->next_lut_index < PCIE_LUT_ENTRY_COUNT)
62 return pcie_rc->next_lut_index++;
Hou Zhiqianga7294ab2016-12-13 14:54:16 +080063 else
64 return -ENOSPC; /* LUT is full */
65}
66
Xiaowei Bao118e58e2020-07-09 23:31:33 +080067static void lut_writel(struct ls_pcie_rc *pcie_rc, unsigned int value,
Minghuan Lian80afc632016-12-13 14:54:17 +080068 unsigned int offset)
69{
Xiaowei Bao118e58e2020-07-09 23:31:33 +080070 struct ls_pcie *pcie = pcie_rc->pcie;
71
Minghuan Lian80afc632016-12-13 14:54:17 +080072 if (pcie->big_endian)
73 out_be32(pcie->lut + offset, value);
74 else
75 out_le32(pcie->lut + offset, value);
76}
77
78/*
79 * Program a single LUT entry
80 */
Xiaowei Bao118e58e2020-07-09 23:31:33 +080081static void ls_pcie_lut_set_mapping(struct ls_pcie_rc *pcie_rc, int index,
82 u32 devid, u32 streamid)
Minghuan Lian80afc632016-12-13 14:54:17 +080083{
84 /* leave mask as all zeroes, want to match all bits */
Xiaowei Bao118e58e2020-07-09 23:31:33 +080085 lut_writel(pcie_rc, devid << 16, PCIE_LUT_UDR(index));
86 lut_writel(pcie_rc, streamid | PCIE_LUT_ENABLE, PCIE_LUT_LDR(index));
Minghuan Lian80afc632016-12-13 14:54:17 +080087}
88
89/*
90 * An msi-map is a property to be added to the pci controller
91 * node. It is a table, where each entry consists of 4 fields
92 * e.g.:
93 *
94 * msi-map = <[devid] [phandle-to-msi-ctrl] [stream-id] [count]
95 * [devid] [phandle-to-msi-ctrl] [stream-id] [count]>;
96 */
Xiaowei Bao118e58e2020-07-09 23:31:33 +080097static void fdt_pcie_set_msi_map_entry_ls(void *blob,
98 struct ls_pcie_rc *pcie_rc,
Wasim Khan485304a2019-11-15 09:23:35 +000099 u32 devid, u32 streamid)
Minghuan Lian80afc632016-12-13 14:54:17 +0800100{
101 u32 *prop;
102 u32 phandle;
103 int nodeoffset;
Hou Zhiqiang0aaa1a92017-03-03 12:35:10 +0800104 uint svr;
105 char *compat = NULL;
Xiaowei Bao118e58e2020-07-09 23:31:33 +0800106 struct ls_pcie *pcie = pcie_rc->pcie;
Minghuan Lian80afc632016-12-13 14:54:17 +0800107
108 /* find pci controller node */
109 nodeoffset = fdt_node_offset_by_compat_reg(blob, "fsl,ls-pcie",
Xiaowei Bao118e58e2020-07-09 23:31:33 +0800110 pcie_rc->dbi_res.start);
Minghuan Lian80afc632016-12-13 14:54:17 +0800111 if (nodeoffset < 0) {
Hou Zhiqiang19538f32016-12-13 14:54:24 +0800112#ifdef CONFIG_FSL_PCIE_COMPAT /* Compatible with older version of dts node */
Hou Zhiqiang0aaa1a92017-03-03 12:35:10 +0800113 svr = (get_svr() >> SVR_VAR_PER_SHIFT) & 0xFFFFFE;
114 if (svr == SVR_LS2088A || svr == SVR_LS2084A ||
Priyanka Jaine809e742017-04-27 15:08:06 +0530115 svr == SVR_LS2048A || svr == SVR_LS2044A ||
116 svr == SVR_LS2081A || svr == SVR_LS2041A)
Hou Zhiqiang0aaa1a92017-03-03 12:35:10 +0800117 compat = "fsl,ls2088a-pcie";
118 else
119 compat = CONFIG_FSL_PCIE_COMPAT;
120 if (compat)
121 nodeoffset = fdt_node_offset_by_compat_reg(blob,
Xiaowei Bao118e58e2020-07-09 23:31:33 +0800122 compat, pcie_rc->dbi_res.start);
Hou Zhiqiang0aaa1a92017-03-03 12:35:10 +0800123#endif
Minghuan Lian80afc632016-12-13 14:54:17 +0800124 if (nodeoffset < 0)
125 return;
Minghuan Lian80afc632016-12-13 14:54:17 +0800126 }
127
128 /* get phandle to MSI controller */
129 prop = (u32 *)fdt_getprop(blob, nodeoffset, "msi-parent", 0);
130 if (prop == NULL) {
131 debug("\n%s: ERROR: missing msi-parent: PCIe%d\n",
132 __func__, pcie->idx);
133 return;
134 }
135 phandle = fdt32_to_cpu(*prop);
136
137 /* set one msi-map row */
138 fdt_appendprop_u32(blob, nodeoffset, "msi-map", devid);
139 fdt_appendprop_u32(blob, nodeoffset, "msi-map", phandle);
140 fdt_appendprop_u32(blob, nodeoffset, "msi-map", streamid);
141 fdt_appendprop_u32(blob, nodeoffset, "msi-map", 1);
142}
143
Bharat Bhushan78be6222017-03-22 12:12:33 +0530144/*
145 * An iommu-map is a property to be added to the pci controller
146 * node. It is a table, where each entry consists of 4 fields
147 * e.g.:
148 *
149 * iommu-map = <[devid] [phandle-to-iommu-ctrl] [stream-id] [count]
150 * [devid] [phandle-to-iommu-ctrl] [stream-id] [count]>;
151 */
Xiaowei Bao118e58e2020-07-09 23:31:33 +0800152static void fdt_pcie_set_iommu_map_entry_ls(void *blob,
153 struct ls_pcie_rc *pcie_rc,
Wasim Khan485304a2019-11-15 09:23:35 +0000154 u32 devid, u32 streamid)
Bharat Bhushan78be6222017-03-22 12:12:33 +0530155{
156 u32 *prop;
157 u32 iommu_map[4];
158 int nodeoffset;
159 int lenp;
Xiaowei Bao118e58e2020-07-09 23:31:33 +0800160 struct ls_pcie *pcie = pcie_rc->pcie;
Bharat Bhushan78be6222017-03-22 12:12:33 +0530161
Laurentiu Tudorf4cd9632020-09-10 12:42:17 +0300162 nodeoffset = fdt_pcie_get_nodeoffset(blob, pcie_rc);
163 if (nodeoffset < 0)
164 return;
Bharat Bhushan78be6222017-03-22 12:12:33 +0530165
166 /* get phandle to iommu controller */
167 prop = fdt_getprop_w(blob, nodeoffset, "iommu-map", &lenp);
168 if (prop == NULL) {
169 debug("\n%s: ERROR: missing iommu-map: PCIe%d\n",
170 __func__, pcie->idx);
171 return;
172 }
173
174 /* set iommu-map row */
175 iommu_map[0] = cpu_to_fdt32(devid);
176 iommu_map[1] = *++prop;
177 iommu_map[2] = cpu_to_fdt32(streamid);
178 iommu_map[3] = cpu_to_fdt32(1);
179
180 if (devid == 0) {
181 fdt_setprop_inplace(blob, nodeoffset, "iommu-map",
182 iommu_map, 16);
183 } else {
184 fdt_appendprop(blob, nodeoffset, "iommu-map", iommu_map, 16);
185 }
186}
187
Laurentiu Tudor1f46e672020-10-23 13:35:27 +0530188static int fdt_fixup_pcie_device_ls(void *blob, pci_dev_t bdf,
189 struct ls_pcie_rc *pcie_rc)
190{
191 int streamid, index;
192
193 streamid = pcie_next_streamid(pcie_rc->stream_id_cur,
194 pcie_rc->pcie->idx);
195 if (streamid < 0) {
196 printf("ERROR: out of stream ids for BDF %d.%d.%d\n",
197 PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf));
198 return -ENOENT;
199 }
200 pcie_rc->stream_id_cur++;
201
202 index = ls_pcie_next_lut_index(pcie_rc);
203 if (index < 0) {
204 printf("ERROR: out of LUT indexes for BDF %d.%d.%d\n",
205 PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf));
206 return -ENOENT;
207 }
208
209 /* map PCI b.d.f to streamID in LUT */
210 ls_pcie_lut_set_mapping(pcie_rc, index, bdf >> 8, streamid);
211 /* update msi-map in device tree */
212 fdt_pcie_set_msi_map_entry_ls(blob, pcie_rc, bdf >> 8, streamid);
213 /* update iommu-map in device tree */
214 fdt_pcie_set_iommu_map_entry_ls(blob, pcie_rc, bdf >> 8, streamid);
215
216 return 0;
217}
218
Laurentiu Tudor2a5bbb12020-09-10 12:42:19 +0300219struct extra_iommu_entry {
220 int action;
221 pci_dev_t bdf;
222 int num_vfs;
223 bool noari;
224};
225
226#define EXTRA_IOMMU_ENTRY_HOTPLUG 1
227#define EXTRA_IOMMU_ENTRY_VFS 2
228
229static struct extra_iommu_entry *get_extra_iommu_ents(void *blob,
230 int nodeoffset,
231 phys_addr_t addr,
232 int *cnt)
233{
234 const char *s, *p, *tok;
235 struct extra_iommu_entry *entries;
236 int i = 0, b, d, f;
237
238 /*
239 * Retrieve extra IOMMU configuration from env var or from device tree.
240 * Env var is given priority.
241 */
242 s = env_get("pci_iommu_extra");
243 if (!s) {
244 s = fdt_getprop(blob, nodeoffset, "pci-iommu-extra", NULL);
245 } else {
246 phys_addr_t pci_base;
247 char *endp;
248
249 /*
250 * In env var case the config string has "pci@0x..." in
251 * addition. Parse this part and match it by address against
252 * the input pci controller's registers base address.
253 */
254 tok = s;
255 p = strchrnul(s + 1, ',');
256 s = NULL;
257 do {
258 if (!strncmp(tok, "pci", 3)) {
259 pci_base = simple_strtoul(tok + 4, &endp, 0);
260 if (pci_base == addr) {
261 s = endp + 1;
262 break;
263 }
264 }
265 p = strchrnul(p + 1, ',');
266 tok = p + 1;
267 } while (*p);
268 }
269
270 /*
271 * If no env var or device tree property found or pci register base
272 * address mismatches, bail out
273 */
274 if (!s)
275 return NULL;
276
277 /*
278 * In order to find how many action entries to allocate, count number
279 * of actions by interating through the pairs of bdfs and actions.
280 */
281 *cnt = 0;
282 p = s;
283 while (*p && strncmp(p, "pci", 3)) {
284 if (*p == ',')
285 (*cnt)++;
286 p++;
287 }
288 if (!(*p))
289 (*cnt)++;
290
291 if (!(*cnt) || (*cnt) % 2) {
292 printf("ERROR: invalid or odd extra iommu token count %d\n",
293 *cnt);
294 return NULL;
295 }
296 *cnt = (*cnt) / 2;
297
298 entries = malloc((*cnt) * sizeof(*entries));
299 if (!entries) {
300 printf("ERROR: fail to allocate extra iommu entries\n");
301 return NULL;
302 }
303
304 /*
305 * Parse action entries one by one and store the information in the
306 * newly allocated actions array.
307 */
308 p = s;
309 while (p) {
310 /* Extract BDF */
311 b = simple_strtoul(p, (char **)&p, 0); p++;
312 d = simple_strtoul(p, (char **)&p, 0); p++;
313 f = simple_strtoul(p, (char **)&p, 0); p++;
314 entries[i].bdf = PCI_BDF(b, d, f);
315
316 /* Parse action */
317 if (!strncmp(p, "hp", 2)) {
318 /* Hot-plug entry */
319 entries[i].action = EXTRA_IOMMU_ENTRY_HOTPLUG;
320 p += 2;
321 } else if (!strncmp(p, "vfs", 3) ||
322 !strncmp(p, "noari_vfs", 9)) {
323 /* VFs or VFs with ARI disabled entry */
324 entries[i].action = EXTRA_IOMMU_ENTRY_VFS;
325 entries[i].noari = !strncmp(p, "noari_vfs", 9);
326
327 /*
328 * Parse and store total number of VFs to allocate
329 * IOMMU entries for.
330 */
331 p = strchr(p, '=');
332 entries[i].num_vfs = simple_strtoul(p + 1, (char **)&p,
333 0);
334 if (*p)
335 p++;
336 } else {
337 printf("ERROR: invalid action in extra iommu entry\n");
338 free(entries);
339
340 return NULL;
341 }
342
343 if (!(*p) || !strncmp(p, "pci", 3))
344 break;
345
346 i++;
347 }
348
349 return entries;
350}
351
352static void get_vf_offset_and_stride(struct udevice *dev, int sriov_pos,
353 struct extra_iommu_entry *entry,
354 u16 *offset, u16 *stride)
355{
356 u16 tmp16;
357 u32 tmp32;
358 bool have_ari = false;
359 int pos;
360 struct udevice *pf_dev;
361
362 dm_pci_read_config16(dev, sriov_pos + PCI_SRIOV_TOTAL_VF, &tmp16);
363 if (entry->num_vfs > tmp16) {
364 printf("WARN: requested no. of VFs %d exceeds total of %d\n",
365 entry->num_vfs, tmp16);
366 }
367
368 /*
369 * The code below implements the VF Discovery recomandations specified
370 * in PCIe base spec "9.2.1.2 VF Discovery", quoted below:
371 *
372 * VF Discovery
373 *
374 * The First VF Offset and VF Stride fields in the SR-IOV extended
375 * capability are 16-bit Routing ID offsets. These offsets are used to
376 * compute the Routing IDs for the VFs with the following restrictions:
377 * - The value in NumVFs in a PF (Section 9.3.3.7) may affect the
378 * values in First VF Offset (Section 9.3.3.9) and VF Stride
379 * (Section 9.3.3.10) of that PF.
380 * - The value in ARI Capable Hierarchy (Section 9.3.3.3.5) in the
381 * lowest-numbered PF of the Device (for example PF0) may affect
382 * the values in First VF Offset and VF Stride in all PFs of the
383 * Device.
384 * - NumVFs of a PF may only be changed when VF Enable
385 * (Section 9.3.3.3.1) of that PF is Clear.
386 * - ARI Capable Hierarchy (Section 9.3.3.3.5) may only be changed
387 * when VF Enable is Clear in all PFs of a Device.
388 */
389
390 /* Clear VF enable for all PFs */
391 device_foreach_child(pf_dev, dev->parent) {
392 dm_pci_read_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL,
393 &tmp16);
394 tmp16 &= ~PCI_SRIOV_CTRL_VFE;
395 dm_pci_write_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL,
396 tmp16);
397 }
398
399 /* Obtain a reference to PF0 device */
400 if (dm_pci_bus_find_bdf(PCI_BDF(PCI_BUS(entry->bdf),
401 PCI_DEV(entry->bdf), 0), &pf_dev)) {
402 printf("WARN: failed to get PF0\n");
403 }
404
405 if (entry->noari)
406 goto skip_ari;
407
408 /* Check that connected downstream port supports ARI Forwarding */
409 pos = dm_pci_find_capability(dev->parent, PCI_CAP_ID_EXP);
410 dm_pci_read_config32(dev->parent, pos + PCI_EXP_DEVCAP2, &tmp32);
411 if (!(tmp32 & PCI_EXP_DEVCAP2_ARI))
412 goto skip_ari;
413
414 /* Check that PF supports Alternate Routing ID */
415 if (!dm_pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI))
416 goto skip_ari;
417
418 /* Set ARI Capable Hierarcy for PF0 */
419 dm_pci_read_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL, &tmp16);
420 tmp16 |= PCI_SRIOV_CTRL_ARI;
421 dm_pci_write_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL, tmp16);
422 have_ari = true;
423
424skip_ari:
425 if (!have_ari) {
426 /*
427 * No ARI support or disabled so clear ARI Capable Hierarcy
428 * for PF0
429 */
430 dm_pci_read_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL,
431 &tmp16);
432 tmp16 &= ~PCI_SRIOV_CTRL_ARI;
433 dm_pci_write_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL,
434 tmp16);
435 }
436
437 /* Set requested number of VFs */
438 dm_pci_write_config16(dev, sriov_pos + PCI_SRIOV_NUM_VF,
439 entry->num_vfs);
440
441 /* Read VF stride and offset with the configs just made */
442 dm_pci_read_config16(dev, sriov_pos + PCI_SRIOV_VF_OFFSET, offset);
443 dm_pci_read_config16(dev, sriov_pos + PCI_SRIOV_VF_STRIDE, stride);
444
445 if (have_ari) {
446 /* Reset to default ARI Capable Hierarcy bit for PF0 */
447 dm_pci_read_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL,
448 &tmp16);
449 tmp16 &= ~PCI_SRIOV_CTRL_ARI;
450 dm_pci_write_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL,
451 tmp16);
452 }
453 /* Reset to default the number of VFs */
454 dm_pci_write_config16(dev, sriov_pos + PCI_SRIOV_NUM_VF, 0);
455}
456
457static int fdt_fixup_pci_vfs(void *blob, struct extra_iommu_entry *entry,
458 struct ls_pcie_rc *pcie_rc)
459{
460 struct udevice *dev, *bus;
461 u16 vf_offset, vf_stride;
462 int i, sriov_pos;
463 pci_dev_t bdf;
464
465 if (dm_pci_bus_find_bdf(entry->bdf, &dev)) {
466 printf("ERROR: BDF %d.%d.%d not found\n", PCI_BUS(entry->bdf),
467 PCI_DEV(entry->bdf), PCI_FUNC(entry->bdf));
468 return 0;
469 }
470
471 sriov_pos = dm_pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
472 if (!sriov_pos) {
473 printf("WARN: trying to set VFs on non-SRIOV dev\n");
474 return 0;
475 }
476
477 get_vf_offset_and_stride(dev, sriov_pos, entry, &vf_offset, &vf_stride);
478
479 for (bus = dev; device_is_on_pci_bus(bus);)
480 bus = bus->parent;
481
482 bdf = entry->bdf - PCI_BDF(bus->seq, 0, 0) + (vf_offset << 8);
483
484 for (i = 0; i < entry->num_vfs; i++) {
485 if (fdt_fixup_pcie_device_ls(blob, bdf, pcie_rc) < 0)
486 return -1;
487 bdf += vf_stride << 8;
488 }
489
490 printf("Added %d iommu VF mappings for PF %d.%d.%d\n",
491 entry->num_vfs, PCI_BUS(entry->bdf),
492 PCI_DEV(entry->bdf), PCI_FUNC(entry->bdf));
493
494 return 0;
495}
496
Wasim Khan485304a2019-11-15 09:23:35 +0000497static void fdt_fixup_pcie_ls(void *blob)
Minghuan Lian80afc632016-12-13 14:54:17 +0800498{
499 struct udevice *dev, *bus;
Xiaowei Bao118e58e2020-07-09 23:31:33 +0800500 struct ls_pcie_rc *pcie_rc;
Minghuan Lian80afc632016-12-13 14:54:17 +0800501 pci_dev_t bdf;
Laurentiu Tudor2a5bbb12020-09-10 12:42:19 +0300502 struct extra_iommu_entry *entries;
503 int i, cnt, nodeoffset;
504
Minghuan Lian80afc632016-12-13 14:54:17 +0800505
506 /* Scan all known buses */
507 for (pci_find_first_device(&dev);
508 dev;
509 pci_find_next_device(&dev)) {
510 for (bus = dev; device_is_on_pci_bus(bus);)
511 bus = bus->parent;
Michael Walleb7585aa2020-08-04 00:16:33 +0200512
513 /* Only do the fixups for layerscape PCIe controllers */
514 if (!device_is_compatible(bus, "fsl,ls-pcie") &&
515 !device_is_compatible(bus, CONFIG_FSL_PCIE_COMPAT))
516 continue;
517
Xiaowei Bao118e58e2020-07-09 23:31:33 +0800518 pcie_rc = dev_get_priv(bus);
Minghuan Lian80afc632016-12-13 14:54:17 +0800519
Minghuan Lian80afc632016-12-13 14:54:17 +0800520 /* the DT fixup must be relative to the hose first_busno */
521 bdf = dm_pci_get_bdf(dev) - PCI_BDF(bus->seq, 0, 0);
Laurentiu Tudor1f46e672020-10-23 13:35:27 +0530522
523 if (fdt_fixup_pcie_device_ls(blob, bdf, pcie_rc) < 0)
524 break;
Minghuan Lian80afc632016-12-13 14:54:17 +0800525 }
Laurentiu Tudor2a5bbb12020-09-10 12:42:19 +0300526
527 if (!IS_ENABLED(CONFIG_PCI_IOMMU_EXTRA_MAPPINGS))
528 goto skip;
529
530 list_for_each_entry(pcie_rc, &ls_pcie_list, list) {
531 nodeoffset = fdt_pcie_get_nodeoffset(blob, pcie_rc);
532 if (nodeoffset < 0) {
533 printf("ERROR: couldn't find pci node\n");
534 continue;
535 }
536
537 entries = get_extra_iommu_ents(blob, nodeoffset,
538 pcie_rc->dbi_res.start, &cnt);
539 if (!entries)
540 continue;
541
542 for (i = 0; i < cnt; i++) {
543 if (entries[i].action == EXTRA_IOMMU_ENTRY_HOTPLUG) {
544 bdf = entries[i].bdf;
545 printf("Added iommu map for hotplug %d.%d.%d\n",
546 PCI_BUS(bdf), PCI_DEV(bdf),
547 PCI_FUNC(bdf));
548 if (fdt_fixup_pcie_device_ls(blob, bdf,
549 pcie_rc) < 0) {
550 free(entries);
551 return;
552 }
553 } else if (entries[i].action == EXTRA_IOMMU_ENTRY_VFS) {
554 if (fdt_fixup_pci_vfs(blob, &entries[i],
555 pcie_rc) < 0) {
556 free(entries);
557 return;
558 }
559 } else {
560 printf("Invalid action %d for BDF %d.%d.%d\n",
561 entries[i].action,
562 PCI_BUS(entries[i].bdf),
563 PCI_DEV(entries[i].bdf),
564 PCI_FUNC(entries[i].bdf));
565 }
566 }
567 free(entries);
568 }
569
570skip:
Wasim Khan9c2969e2020-01-06 12:06:00 +0000571 pcie_board_fix_fdt(blob);
Minghuan Lian80afc632016-12-13 14:54:17 +0800572}
573#endif
574
Xiaowei Bao118e58e2020-07-09 23:31:33 +0800575static void ft_pcie_rc_fix(void *blob, struct ls_pcie_rc *pcie_rc)
Minghuan Lian80afc632016-12-13 14:54:17 +0800576{
577 int off;
Xiaowei Bao118e58e2020-07-09 23:31:33 +0800578 struct ls_pcie *pcie = pcie_rc->pcie;
Minghuan Lian80afc632016-12-13 14:54:17 +0800579
Laurentiu Tudorf4cd9632020-09-10 12:42:17 +0300580 off = fdt_pcie_get_nodeoffset(blob, pcie_rc);
581 if (off < 0)
582 return;
Minghuan Lian80afc632016-12-13 14:54:17 +0800583
Xiaowei Bao118e58e2020-07-09 23:31:33 +0800584 if (pcie_rc->enabled && pcie->mode == PCI_HEADER_TYPE_BRIDGE)
Minghuan Lian80afc632016-12-13 14:54:17 +0800585 fdt_set_node_status(blob, off, FDT_STATUS_OKAY, 0);
586 else
587 fdt_set_node_status(blob, off, FDT_STATUS_DISABLED, 0);
588}
589
Xiaowei Bao118e58e2020-07-09 23:31:33 +0800590static void ft_pcie_ep_fix(void *blob, struct ls_pcie_rc *pcie_rc)
Xiaowei Bao59a557f2018-10-26 09:56:26 +0800591{
592 int off;
Xiaowei Bao118e58e2020-07-09 23:31:33 +0800593 struct ls_pcie *pcie = pcie_rc->pcie;
Xiaowei Bao59a557f2018-10-26 09:56:26 +0800594
Pankaj Bansal63618e72019-11-30 13:14:10 +0000595 off = fdt_node_offset_by_compat_reg(blob, CONFIG_FSL_PCIE_EP_COMPAT,
Xiaowei Bao118e58e2020-07-09 23:31:33 +0800596 pcie_rc->dbi_res.start);
Xiaowei Bao59a557f2018-10-26 09:56:26 +0800597 if (off < 0)
598 return;
599
Xiaowei Bao118e58e2020-07-09 23:31:33 +0800600 if (pcie_rc->enabled && pcie->mode == PCI_HEADER_TYPE_NORMAL)
Xiaowei Bao59a557f2018-10-26 09:56:26 +0800601 fdt_set_node_status(blob, off, FDT_STATUS_OKAY, 0);
602 else
603 fdt_set_node_status(blob, off, FDT_STATUS_DISABLED, 0);
604}
605
Xiaowei Bao118e58e2020-07-09 23:31:33 +0800606static void ft_pcie_ls_setup(void *blob, struct ls_pcie_rc *pcie_rc)
Xiaowei Bao59a557f2018-10-26 09:56:26 +0800607{
Xiaowei Bao118e58e2020-07-09 23:31:33 +0800608 ft_pcie_ep_fix(blob, pcie_rc);
609 ft_pcie_rc_fix(blob, pcie_rc);
Xiaowei Bao59a557f2018-10-26 09:56:26 +0800610}
611
Minghuan Lian80afc632016-12-13 14:54:17 +0800612/* Fixup Kernel DT for PCIe */
Masahiro Yamadab75d8dc2020-06-26 15:13:33 +0900613void ft_pci_setup_ls(void *blob, struct bd_info *bd)
Minghuan Lian80afc632016-12-13 14:54:17 +0800614{
Xiaowei Bao118e58e2020-07-09 23:31:33 +0800615 struct ls_pcie_rc *pcie_rc;
Minghuan Lian80afc632016-12-13 14:54:17 +0800616
Xiaowei Bao118e58e2020-07-09 23:31:33 +0800617 list_for_each_entry(pcie_rc, &ls_pcie_list, list)
618 ft_pcie_ls_setup(blob, pcie_rc);
Minghuan Lian80afc632016-12-13 14:54:17 +0800619
Bharat Bhushan47d17362017-03-22 12:06:30 +0530620#if defined(CONFIG_FSL_LSCH3) || defined(CONFIG_FSL_LSCH2)
Wasim Khan485304a2019-11-15 09:23:35 +0000621 fdt_fixup_pcie_ls(blob);
Minghuan Lian80afc632016-12-13 14:54:17 +0800622#endif
623}
Minghuan Lian80afc632016-12-13 14:54:17 +0800624
Hou Zhiqianga7294ab2016-12-13 14:54:16 +0800625#else /* !CONFIG_OF_BOARD_SETUP */
Masahiro Yamadab75d8dc2020-06-26 15:13:33 +0900626void ft_pci_setup_ls(void *blob, struct bd_info *bd)
Hou Zhiqianga7294ab2016-12-13 14:54:16 +0800627{
628}
629#endif