Aaron Williams | 2f573fd | 2021-05-10 15:15:39 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2018-2022 Marvell International Ltd. |
| 4 | * |
| 5 | * IPD Support. |
| 6 | */ |
| 7 | |
| 8 | #include <errno.h> |
| 9 | #include <log.h> |
| 10 | #include <time.h> |
| 11 | #include <linux/delay.h> |
| 12 | |
| 13 | #include <mach/cvmx-regs.h> |
| 14 | #include <mach/cvmx-csr.h> |
| 15 | #include <mach/cvmx-bootmem.h> |
| 16 | #include <mach/octeon-model.h> |
| 17 | #include <mach/cvmx-fuse.h> |
| 18 | #include <mach/octeon-feature.h> |
| 19 | #include <mach/cvmx-qlm.h> |
| 20 | #include <mach/octeon_qlm.h> |
| 21 | #include <mach/cvmx-pcie.h> |
| 22 | #include <mach/cvmx-coremask.h> |
| 23 | #include <mach/cvmx-range.h> |
| 24 | #include <mach/cvmx-global-resources.h> |
| 25 | |
| 26 | #include <mach/cvmx-agl-defs.h> |
| 27 | #include <mach/cvmx-bgxx-defs.h> |
| 28 | #include <mach/cvmx-ciu-defs.h> |
| 29 | #include <mach/cvmx-gmxx-defs.h> |
| 30 | #include <mach/cvmx-gserx-defs.h> |
| 31 | #include <mach/cvmx-ilk-defs.h> |
| 32 | #include <mach/cvmx-ipd-defs.h> |
| 33 | #include <mach/cvmx-pcsx-defs.h> |
| 34 | #include <mach/cvmx-pcsxx-defs.h> |
| 35 | #include <mach/cvmx-pki-defs.h> |
| 36 | #include <mach/cvmx-pko-defs.h> |
| 37 | #include <mach/cvmx-xcv-defs.h> |
| 38 | |
| 39 | #include <mach/cvmx-hwpko.h> |
| 40 | #include <mach/cvmx-ilk.h> |
| 41 | #include <mach/cvmx-ipd.h> |
| 42 | #include <mach/cvmx-pki.h> |
| 43 | #include <mach/cvmx-pko3.h> |
| 44 | #include <mach/cvmx-pko3-queue.h> |
| 45 | #include <mach/cvmx-pko3-resources.h> |
| 46 | |
| 47 | #include <mach/cvmx-helper.h> |
| 48 | #include <mach/cvmx-helper-board.h> |
| 49 | #include <mach/cvmx-helper-cfg.h> |
| 50 | |
| 51 | #include <mach/cvmx-helper-bgx.h> |
| 52 | #include <mach/cvmx-helper-cfg.h> |
| 53 | #include <mach/cvmx-helper-util.h> |
| 54 | #include <mach/cvmx-helper-pki.h> |
| 55 | |
| 56 | cvmx_ipd_config_t cvmx_ipd_cfg = { |
| 57 | .first_mbuf_skip = 184, |
| 58 | .ipd_enable = 1, |
| 59 | .cache_mode = CVMX_IPD_OPC_MODE_STT, |
| 60 | .packet_pool = { 0, 2048, 0 }, |
| 61 | .wqe_pool = { 1, 128, 0 }, |
| 62 | .port_config = { CVMX_PIP_PORT_CFG_MODE_SKIPL2, |
| 63 | CVMX_POW_TAG_TYPE_ORDERED, CVMX_PIP_TAG_MODE_TUPLE, |
| 64 | .tag_fields = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 } } |
| 65 | }; |
| 66 | |
| 67 | #define IPD_RED_AVG_DLY 1000 |
| 68 | #define IPD_RED_PRB_DLY 1000 |
| 69 | |
| 70 | void cvmx_ipd_config(u64 mbuff_size, u64 first_mbuff_skip, |
| 71 | u64 not_first_mbuff_skip, u64 first_back, u64 second_back, |
| 72 | u64 wqe_fpa_pool, cvmx_ipd_mode_t cache_mode, |
| 73 | u64 back_pres_enable_flag) |
| 74 | { |
| 75 | cvmx_ipd_1st_mbuff_skip_t first_skip; |
| 76 | cvmx_ipd_mbuff_not_first_skip_t not_first_skip; |
| 77 | cvmx_ipd_packet_mbuff_size_t size; |
| 78 | cvmx_ipd_1st_next_ptr_back_t first_back_struct; |
| 79 | cvmx_ipd_second_next_ptr_back_t second_back_struct; |
| 80 | cvmx_ipd_wqe_fpa_queue_t wqe_pool; |
| 81 | cvmx_ipd_ctl_status_t ipd_ctl_reg; |
| 82 | |
| 83 | /* Enforce 1st skip minimum if WQE shares the buffer with packet */ |
| 84 | if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) { |
| 85 | union cvmx_ipd_ctl_status ctl_status; |
| 86 | |
| 87 | ctl_status.u64 = csr_rd(CVMX_IPD_CTL_STATUS); |
| 88 | if (ctl_status.s.no_wptr != 0 && first_mbuff_skip < 16) |
| 89 | first_mbuff_skip = 16; |
| 90 | } |
| 91 | |
| 92 | first_skip.u64 = 0; |
| 93 | first_skip.s.skip_sz = first_mbuff_skip; |
| 94 | csr_wr(CVMX_IPD_1ST_MBUFF_SKIP, first_skip.u64); |
| 95 | |
| 96 | not_first_skip.u64 = 0; |
| 97 | not_first_skip.s.skip_sz = not_first_mbuff_skip; |
| 98 | csr_wr(CVMX_IPD_NOT_1ST_MBUFF_SKIP, not_first_skip.u64); |
| 99 | |
| 100 | size.u64 = 0; |
| 101 | size.s.mb_size = mbuff_size; |
| 102 | csr_wr(CVMX_IPD_PACKET_MBUFF_SIZE, size.u64); |
| 103 | |
| 104 | first_back_struct.u64 = 0; |
| 105 | first_back_struct.s.back = first_back; |
| 106 | csr_wr(CVMX_IPD_1st_NEXT_PTR_BACK, first_back_struct.u64); |
| 107 | |
| 108 | second_back_struct.u64 = 0; |
| 109 | second_back_struct.s.back = second_back; |
| 110 | csr_wr(CVMX_IPD_2nd_NEXT_PTR_BACK, second_back_struct.u64); |
| 111 | |
| 112 | wqe_pool.u64 = 0; |
| 113 | wqe_pool.s.wqe_pool = wqe_fpa_pool; |
| 114 | csr_wr(CVMX_IPD_WQE_FPA_QUEUE, wqe_pool.u64); |
| 115 | |
| 116 | ipd_ctl_reg.u64 = csr_rd(CVMX_IPD_CTL_STATUS); |
| 117 | ipd_ctl_reg.s.opc_mode = cache_mode; |
| 118 | ipd_ctl_reg.s.pbp_en = back_pres_enable_flag; |
| 119 | csr_wr(CVMX_IPD_CTL_STATUS, ipd_ctl_reg.u64); |
| 120 | |
| 121 | /* Note: the example RED code is below */ |
| 122 | } |
| 123 | |
| 124 | /** |
| 125 | * Enable IPD |
| 126 | */ |
| 127 | void cvmx_ipd_enable(void) |
| 128 | { |
| 129 | cvmx_ipd_ctl_status_t ipd_reg; |
| 130 | |
| 131 | ipd_reg.u64 = csr_rd(CVMX_IPD_CTL_STATUS); |
| 132 | |
| 133 | /* |
| 134 | * busy-waiting for rst_done in o68 |
| 135 | */ |
| 136 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) |
| 137 | while (ipd_reg.s.rst_done != 0) |
| 138 | ipd_reg.u64 = csr_rd(CVMX_IPD_CTL_STATUS); |
| 139 | |
| 140 | if (ipd_reg.s.ipd_en) |
| 141 | debug("Warning: Enabling IPD when IPD already enabled.\n"); |
| 142 | |
| 143 | ipd_reg.s.ipd_en = 1; |
| 144 | |
| 145 | if (cvmx_ipd_cfg.enable_len_M8_fix) |
| 146 | ipd_reg.s.len_m8 = 1; |
| 147 | |
| 148 | csr_wr(CVMX_IPD_CTL_STATUS, ipd_reg.u64); |
| 149 | } |