Merge: Update kernel's PCI subsystem to v6.11
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/5357 ``` This series updates RHEL9's PCI subsystem with content from upstream v6.11 - Merge tag 'pci-v6.11-fixes-4' of git://git.kernel.org/pub/scm/../pci/pci https://lkml.org/lkml/2024/9/13/ commit b7718454f937f50f44f98c1222f5135eaef29132 Merge: e936e7d4a83b fc8c818e7569 Merge tag 'pci-v6.11-fixes-3' of git://git.kernel.org/pub/scm/../pci/pci https://lkml.org/lkml/2024/9/6/1405 commit 487ee43bac846446fb3e832436bdedd7acb4fe46 Merge: a86b83f77797 8f62819aaace 4 files changed, 44 insertions(+), 5 deletions(-) Merge tag 'pci-v6.11-fixes-2' of git://git.kernel.org/pub/scm/../pci/pci https://lkml.org/lkml/2024/8/30/1561 commit 8101b2766d5bfee43a4de737107b9592db251470 Merge: 216d163165a9 150b572a7c1d 3 files changed, 21 insertions(+), 2 deletions(-) Merge tag 'pci-v6.11-fixes-1' of git://git.kernel.org/pub/scm/../pci/pci https://lkml.org/lkml/2024/8/1/1278 commit c0ecd6388360d930440cc5554026818895199923 Merge: 183d46ff422e 5560a612c20d 2 files changed, 11 insertions(+), 8 deletions(-) Merge tag 'pci-v6.11-changes' of git://git.kernel.org/pub/scm/../pci/pci https://lkml.org/lkml/2024/7/19/844 commit 3f386cb8ee9f04ff4be164ca7a1d0ef3f81f7374 Merge: 8e5c0abfa02d 45659274e608 105 files changed, 5208 insertions(+), 1932 deletions(-) All but three of patches within the series back-ported cleanly. However, there were a few back-ports where some changes were made to the originating upstream patch due to it either not being quite up to date with more recent changes, or subsequent changes were made during its merge commit. All such occurances are noted in the back-port's commit message with the same changes that occurred upstream being made in the back-port to keep things in sync. v2: Removing back-ports of merge commit df5dd337283a "Merge branch 'pci/controller/qcom'" due to prerequisite content that conflicts with other MRs. Will create a separate MR for df5dd337283a once the dependent MRs have merged. JIRA: https://issues.redhat.com/browse/RHEL-59033 Signed-off-by: Myron Stowe <mstowe@redhat.com> ``` Approved-by: Andrew Halaney <ahalaney@redhat.com> Approved-by: Jarod Wilson <jarod@redhat.com> Approved-by: Mika Penttilä <mpenttil@redhat.com> Approved-by: John W. Linville <linville@redhat.com> Approved-by: Ivan Vecera <ivecera@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: Rado Vrbovsky <rvrbovsk@redhat.com>
This commit is contained in:
commit
67448d15b8
|
@ -172,8 +172,8 @@ by the PCI endpoint function driver.
|
|||
* bind: ops to perform when a EPC device has been bound to EPF device
|
||||
* unbind: ops to perform when a binding has been lost between a EPC
|
||||
device and EPF device
|
||||
* linkup: ops to perform when the EPC device has established a
|
||||
connection with a host system
|
||||
* add_cfs: optional ops to create function specific configfs
|
||||
attributes
|
||||
|
||||
The PCI Function driver can then register the PCI EPF driver by using
|
||||
pci_epf_register_driver().
|
||||
|
|
|
@ -139,7 +139,7 @@ driver data structure.
|
|||
|
||||
static struct pcie_port_service_driver root_aerdrv = {
|
||||
.name = (char *)device_name,
|
||||
.id_table = &service_id[0],
|
||||
.id_table = service_id,
|
||||
|
||||
.probe = aerdrv_load,
|
||||
.remove = aerdrv_unload,
|
||||
|
|
|
@ -13,6 +13,35 @@ description: |+
|
|||
MediaTek MT7621 PCIe subsys supports a single Root Complex (RC)
|
||||
with 3 Root Ports. Each Root Port supports a Gen1 1-lane Link
|
||||
|
||||
MT7621 PCIe HOST Topology
|
||||
|
||||
.-------.
|
||||
| |
|
||||
| CPU |
|
||||
| |
|
||||
'-------'
|
||||
|
|
||||
|
|
||||
|
|
||||
v
|
||||
.------------------.
|
||||
.-----------| HOST/PCI Bridge |------------.
|
||||
| '------------------' | Type1
|
||||
BUS0 | | | Access
|
||||
v v v On Bus0
|
||||
.-------------. .-------------. .-------------.
|
||||
| VIRTUAL P2P | | VIRTUAL P2P | | VIRTUAL P2P |
|
||||
| BUS0 | | BUS0 | | BUS0 |
|
||||
| DEV0 | | DEV1 | | DEV2 |
|
||||
'-------------' '-------------' '-------------'
|
||||
Type0 | Type0 | Type0 |
|
||||
Access BUS1 | Access BUS2| Access BUS3|
|
||||
On Bus1 v On Bus2 v On Bus3 v
|
||||
.----------. .----------. .----------.
|
||||
| Device 0 | | Device 0 | | Device 0 |
|
||||
| Func 0 | | Func 0 | | Func 0 |
|
||||
'----------' '----------' '----------'
|
||||
|
||||
allOf:
|
||||
- $ref: /schemas/pci/pci-bus.yaml#
|
||||
|
||||
|
|
|
@ -69,6 +69,10 @@ properties:
|
|||
- const: msi6
|
||||
- const: msi7
|
||||
|
||||
operating-points-v2: true
|
||||
opp-table:
|
||||
type: object
|
||||
|
||||
resets:
|
||||
maxItems: 1
|
||||
|
||||
|
|
|
@ -19,11 +19,10 @@ properties:
|
|||
const: qcom,pcie-x1e80100
|
||||
|
||||
reg:
|
||||
minItems: 5
|
||||
minItems: 6
|
||||
maxItems: 6
|
||||
|
||||
reg-names:
|
||||
minItems: 5
|
||||
items:
|
||||
- const: parf # Qualcomm specific registers
|
||||
- const: dbi # DesignWare PCIe registers
|
||||
|
|
|
@ -0,0 +1,126 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/pci/rockchip-dw-pcie-common.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: DesignWare based PCIe RC/EP controller on Rockchip SoCs
|
||||
|
||||
maintainers:
|
||||
- Shawn Lin <shawn.lin@rock-chips.com>
|
||||
- Simon Xue <xxm@rock-chips.com>
|
||||
- Heiko Stuebner <heiko@sntech.de>
|
||||
|
||||
description: |+
|
||||
Generic properties for the DesignWare based PCIe RC/EP controller on Rockchip
|
||||
SoCs.
|
||||
|
||||
properties:
|
||||
clocks:
|
||||
minItems: 5
|
||||
items:
|
||||
- description: AHB clock for PCIe master
|
||||
- description: AHB clock for PCIe slave
|
||||
- description: AHB clock for PCIe dbi
|
||||
- description: APB clock for PCIe
|
||||
- description: Auxiliary clock for PCIe
|
||||
- description: PIPE clock
|
||||
- description: Reference clock for PCIe
|
||||
|
||||
clock-names:
|
||||
minItems: 5
|
||||
items:
|
||||
- const: aclk_mst
|
||||
- const: aclk_slv
|
||||
- const: aclk_dbi
|
||||
- const: pclk
|
||||
- const: aux
|
||||
- const: pipe
|
||||
- const: ref
|
||||
|
||||
interrupts:
|
||||
minItems: 5
|
||||
items:
|
||||
- description:
|
||||
Combined system interrupt, which is used to signal the following
|
||||
interrupts - phy_link_up, dll_link_up, link_req_rst_not, hp_pme,
|
||||
hp, hp_msi, link_auto_bw, link_auto_bw_msi, bw_mgt, bw_mgt_msi,
|
||||
edma_wr, edma_rd, dpa_sub_upd, rbar_update, link_eq_req, ep_elbi_app
|
||||
- description:
|
||||
Combined PM interrupt, which is used to signal the following
|
||||
interrupts - linkst_in_l1sub, linkst_in_l1, linkst_in_l2,
|
||||
linkst_in_l0s, linkst_out_l1sub, linkst_out_l1, linkst_out_l2,
|
||||
linkst_out_l0s, pm_dstate_update
|
||||
- description:
|
||||
Combined message interrupt, which is used to signal the following
|
||||
interrupts - ven_msg, unlock_msg, ltr_msg, cfg_pme, cfg_pme_msi,
|
||||
pm_pme, pm_to_ack, pm_turnoff, obff_idle, obff_obff, obff_cpu_active
|
||||
- description:
|
||||
Combined legacy interrupt, which is used to signal the following
|
||||
interrupts - inta, intb, intc, intd, tx_inta, tx_intb, tx_intc,
|
||||
tx_intd
|
||||
- description:
|
||||
Combined error interrupt, which is used to signal the following
|
||||
interrupts - aer_rc_err, aer_rc_err_msi, rx_cpl_timeout,
|
||||
tx_cpl_timeout, cor_err_sent, nf_err_sent, f_err_sent, cor_err_rx,
|
||||
nf_err_rx, f_err_rx, radm_qoverflow
|
||||
- description:
|
||||
eDMA write channel 0 interrupt
|
||||
- description:
|
||||
eDMA write channel 1 interrupt
|
||||
- description:
|
||||
eDMA read channel 0 interrupt
|
||||
- description:
|
||||
eDMA read channel 1 interrupt
|
||||
|
||||
interrupt-names:
|
||||
minItems: 5
|
||||
items:
|
||||
- const: sys
|
||||
- const: pmc
|
||||
- const: msg
|
||||
- const: legacy
|
||||
- const: err
|
||||
- const: dma0
|
||||
- const: dma1
|
||||
- const: dma2
|
||||
- const: dma3
|
||||
|
||||
num-lanes: true
|
||||
|
||||
phys:
|
||||
maxItems: 1
|
||||
|
||||
phy-names:
|
||||
const: pcie-phy
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
resets:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
reset-names:
|
||||
oneOf:
|
||||
- const: pipe
|
||||
- items:
|
||||
- const: pwr
|
||||
- const: pipe
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- reg-names
|
||||
- clocks
|
||||
- clock-names
|
||||
- num-lanes
|
||||
- phys
|
||||
- phy-names
|
||||
- power-domains
|
||||
- resets
|
||||
- reset-names
|
||||
|
||||
additionalProperties: true
|
||||
|
||||
...
|
|
@ -0,0 +1,95 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/pci/rockchip-dw-pcie-ep.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: DesignWare based PCIe Endpoint controller on Rockchip SoCs
|
||||
|
||||
maintainers:
|
||||
- Niklas Cassel <cassel@kernel.org>
|
||||
|
||||
description: |+
|
||||
RK3588 SoC PCIe Endpoint controller is based on the Synopsys DesignWare
|
||||
PCIe IP and thus inherits all the common properties defined in
|
||||
snps,dw-pcie-ep.yaml.
|
||||
|
||||
allOf:
|
||||
- $ref: /schemas/pci/snps,dw-pcie-ep.yaml#
|
||||
- $ref: /schemas/pci/rockchip-dw-pcie-common.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- rockchip,rk3568-pcie-ep
|
||||
- rockchip,rk3588-pcie-ep
|
||||
|
||||
reg:
|
||||
items:
|
||||
- description: Data Bus Interface (DBI) registers
|
||||
- description: Data Bus Interface (DBI) shadow registers
|
||||
- description: Rockchip designed configuration registers
|
||||
- description: Memory region used to map remote RC address space
|
||||
- description: Internal Address Translation Unit (iATU) registers
|
||||
|
||||
reg-names:
|
||||
items:
|
||||
- const: dbi
|
||||
- const: dbi2
|
||||
- const: apb
|
||||
- const: addr_space
|
||||
- const: atu
|
||||
|
||||
required:
|
||||
- interrupts
|
||||
- interrupt-names
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/rockchip,rk3588-cru.h>
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/interrupt-controller/irq.h>
|
||||
#include <dt-bindings/power/rk3588-power.h>
|
||||
#include <dt-bindings/reset/rockchip,rk3588-cru.h>
|
||||
|
||||
soc {
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
|
||||
pcie3x4_ep: pcie-ep@fe150000 {
|
||||
compatible = "rockchip,rk3588-pcie-ep";
|
||||
reg = <0xa 0x40000000 0x0 0x00100000>,
|
||||
<0xa 0x40100000 0x0 0x00100000>,
|
||||
<0x0 0xfe150000 0x0 0x00010000>,
|
||||
<0x9 0x00000000 0x0 0x40000000>,
|
||||
<0xa 0x40300000 0x0 0x00100000>;
|
||||
reg-names = "dbi", "dbi2", "apb", "addr_space", "atu";
|
||||
clocks = <&cru ACLK_PCIE_4L_MSTR>, <&cru ACLK_PCIE_4L_SLV>,
|
||||
<&cru ACLK_PCIE_4L_DBI>, <&cru PCLK_PCIE_4L>,
|
||||
<&cru CLK_PCIE_AUX0>, <&cru CLK_PCIE4L_PIPE>;
|
||||
clock-names = "aclk_mst", "aclk_slv",
|
||||
"aclk_dbi", "pclk",
|
||||
"aux", "pipe";
|
||||
interrupts = <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH 0>,
|
||||
<GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH 0>,
|
||||
<GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH 0>,
|
||||
<GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH 0>,
|
||||
<GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH 0>,
|
||||
<GIC_SPI 271 IRQ_TYPE_LEVEL_HIGH 0>,
|
||||
<GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH 0>,
|
||||
<GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH 0>,
|
||||
<GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH 0>;
|
||||
interrupt-names = "sys", "pmc", "msg", "legacy", "err",
|
||||
"dma0", "dma1", "dma2", "dma3";
|
||||
max-link-speed = <3>;
|
||||
num-lanes = <4>;
|
||||
phys = <&pcie30phy>;
|
||||
phy-names = "pcie-phy";
|
||||
power-domains = <&power RK3588_PD_PCIE>;
|
||||
resets = <&cru SRST_PCIE0_POWER_UP>, <&cru SRST_P_PCIE0>;
|
||||
reset-names = "pwr", "pipe";
|
||||
};
|
||||
};
|
||||
...
|
|
@ -4,7 +4,7 @@
|
|||
$id: http://devicetree.org/schemas/pci/rockchip-dw-pcie.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: DesignWare based PCIe controller on Rockchip SoCs
|
||||
title: DesignWare based PCIe Root Complex controller on Rockchip SoCs
|
||||
|
||||
maintainers:
|
||||
- Shawn Lin <shawn.lin@rock-chips.com>
|
||||
|
@ -12,12 +12,13 @@ maintainers:
|
|||
- Heiko Stuebner <heiko@sntech.de>
|
||||
|
||||
description: |+
|
||||
RK3568 SoC PCIe host controller is based on the Synopsys DesignWare
|
||||
RK3568 SoC PCIe Root Complex controller is based on the Synopsys DesignWare
|
||||
PCIe IP and thus inherits all the common properties defined in
|
||||
snps,dw-pcie.yaml.
|
||||
|
||||
allOf:
|
||||
- $ref: /schemas/pci/snps,dw-pcie.yaml#
|
||||
- $ref: /schemas/pci/rockchip-dw-pcie-common.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
|
@ -40,61 +41,6 @@ properties:
|
|||
- const: apb
|
||||
- const: config
|
||||
|
||||
clocks:
|
||||
minItems: 5
|
||||
items:
|
||||
- description: AHB clock for PCIe master
|
||||
- description: AHB clock for PCIe slave
|
||||
- description: AHB clock for PCIe dbi
|
||||
- description: APB clock for PCIe
|
||||
- description: Auxiliary clock for PCIe
|
||||
- description: PIPE clock
|
||||
- description: Reference clock for PCIe
|
||||
|
||||
clock-names:
|
||||
minItems: 5
|
||||
items:
|
||||
- const: aclk_mst
|
||||
- const: aclk_slv
|
||||
- const: aclk_dbi
|
||||
- const: pclk
|
||||
- const: aux
|
||||
- const: pipe
|
||||
- const: ref
|
||||
|
||||
interrupts:
|
||||
items:
|
||||
- description:
|
||||
Combined system interrupt, which is used to signal the following
|
||||
interrupts - phy_link_up, dll_link_up, link_req_rst_not, hp_pme,
|
||||
hp, hp_msi, link_auto_bw, link_auto_bw_msi, bw_mgt, bw_mgt_msi,
|
||||
edma_wr, edma_rd, dpa_sub_upd, rbar_update, link_eq_req, ep_elbi_app
|
||||
- description:
|
||||
Combined PM interrupt, which is used to signal the following
|
||||
interrupts - linkst_in_l1sub, linkst_in_l1, linkst_in_l2,
|
||||
linkst_in_l0s, linkst_out_l1sub, linkst_out_l1, linkst_out_l2,
|
||||
linkst_out_l0s, pm_dstate_update
|
||||
- description:
|
||||
Combined message interrupt, which is used to signal the following
|
||||
interrupts - ven_msg, unlock_msg, ltr_msg, cfg_pme, cfg_pme_msi,
|
||||
pm_pme, pm_to_ack, pm_turnoff, obff_idle, obff_obff, obff_cpu_active
|
||||
- description:
|
||||
Combined legacy interrupt, which is used to signal the following
|
||||
interrupts - inta, intb, intc, intd
|
||||
- description:
|
||||
Combined error interrupt, which is used to signal the following
|
||||
interrupts - aer_rc_err, aer_rc_err_msi, rx_cpl_timeout,
|
||||
tx_cpl_timeout, cor_err_sent, nf_err_sent, f_err_sent, cor_err_rx,
|
||||
nf_err_rx, f_err_rx, radm_qoverflow
|
||||
|
||||
interrupt-names:
|
||||
items:
|
||||
- const: sys
|
||||
- const: pmc
|
||||
- const: msg
|
||||
- const: legacy
|
||||
- const: err
|
||||
|
||||
legacy-interrupt-controller:
|
||||
description: Interrupt controller node for handling legacy PCI interrupts.
|
||||
type: object
|
||||
|
@ -119,47 +65,14 @@ properties:
|
|||
|
||||
msi-map: true
|
||||
|
||||
num-lanes: true
|
||||
|
||||
phys:
|
||||
maxItems: 1
|
||||
|
||||
phy-names:
|
||||
const: pcie-phy
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
ranges:
|
||||
minItems: 2
|
||||
maxItems: 3
|
||||
|
||||
resets:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
reset-names:
|
||||
oneOf:
|
||||
- const: pipe
|
||||
- items:
|
||||
- const: pwr
|
||||
- const: pipe
|
||||
|
||||
vpcie3v3-supply: true
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- reg-names
|
||||
- clocks
|
||||
- clock-names
|
||||
- msi-map
|
||||
- num-lanes
|
||||
- phys
|
||||
- phy-names
|
||||
- power-domains
|
||||
- resets
|
||||
- reset-names
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
|
|
|
@ -100,7 +100,7 @@ properties:
|
|||
for new bindings.
|
||||
oneOf:
|
||||
- description: See native 'elbi/app' CSR region for details.
|
||||
enum: [ link, appl ]
|
||||
enum: [ apb, link, appl ]
|
||||
- description: See native 'atu' CSR region for details.
|
||||
enum: [ atu_dma ]
|
||||
allOf:
|
||||
|
@ -151,12 +151,21 @@ properties:
|
|||
Application-specific IRQ raised depending on the vendor-specific
|
||||
events basis.
|
||||
const: app
|
||||
- description:
|
||||
Interrupts triggered when the controller itself (in Endpoint mode)
|
||||
has sent an Assert_INT{A,B,C,D}/Desassert_INT{A,B,C,D} message to
|
||||
the upstream device.
|
||||
pattern: "^tx_int(a|b|c|d)$"
|
||||
- description:
|
||||
Combined interrupt signal raised when the controller has sent an
|
||||
Assert_INT{A,B,C,D} message. See "^tx_int(a|b|c|d)$" for details.
|
||||
const: legacy
|
||||
- description:
|
||||
Vendor-specific IRQ names. Consider using the generic names above
|
||||
for new bindings.
|
||||
oneOf:
|
||||
- description: See native "app" IRQ for details
|
||||
enum: [ intr ]
|
||||
enum: [ intr, sys, pmc, msg, err ]
|
||||
|
||||
max-functions:
|
||||
maximum: 32
|
||||
|
|
|
@ -90,7 +90,7 @@ examples:
|
|||
<0 0 0 3 &pcie_intc_0 2>,
|
||||
<0 0 0 4 &pcie_intc_0 3>;
|
||||
bus-range = <0x00 0xff>;
|
||||
ranges = <0x02000000 0x0 0xe0000000 0x0 0xe0000000 0x0 0x10000000>,
|
||||
ranges = <0x02000000 0x0 0xe0010000 0x0 0xe0010000 0x0 0x10000000>,
|
||||
<0x43000000 0x80 0x00000000 0x80 0x00000000 0x0 0x80000000>;
|
||||
msi-map = <0x0 &its_gic 0x0 0x10000>;
|
||||
reg = <0x0 0xfca10000 0x0 0x1000>,
|
||||
|
|
|
@ -293,11 +293,6 @@ struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_pci_find_root);
|
||||
|
||||
struct acpi_handle_node {
|
||||
struct list_head node;
|
||||
acpi_handle handle;
|
||||
};
|
||||
|
||||
/**
|
||||
* acpi_get_pci_dev - convert ACPI CA handle to struct pci_dev
|
||||
* @handle: the handle in question
|
||||
|
@ -1008,7 +1003,6 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
|
|||
int node = acpi_get_node(device->handle);
|
||||
struct pci_bus *bus;
|
||||
struct pci_host_bridge *host_bridge;
|
||||
union acpi_object *obj;
|
||||
|
||||
info->root = root;
|
||||
info->bridge = device;
|
||||
|
@ -1050,17 +1044,6 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
|
|||
if (!(root->osc_ext_control_set & OSC_CXL_ERROR_REPORTING_CONTROL))
|
||||
host_bridge->native_cxl_error = 0;
|
||||
|
||||
/*
|
||||
* Evaluate the "PCI Boot Configuration" _DSM Function. If it
|
||||
* exists and returns 0, we must preserve any PCI resource
|
||||
* assignments made by firmware for this host bridge.
|
||||
*/
|
||||
obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 1,
|
||||
DSM_PCI_PRESERVE_BOOT_CONFIG, NULL, ACPI_TYPE_INTEGER);
|
||||
if (obj && obj->integer.value == 0)
|
||||
host_bridge->preserve_config = 1;
|
||||
ACPI_FREE(obj);
|
||||
|
||||
acpi_dev_power_up_children_with_adr(device);
|
||||
|
||||
pci_scan_child_bus(bus);
|
||||
|
|
|
@ -42,12 +42,11 @@ static int vbox_accel_init(struct vbox_private *vbox)
|
|||
/* Take a command buffer for each screen from the end of usable VRAM. */
|
||||
vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
|
||||
|
||||
vbox->vbva_buffers = pci_iomap_range(pdev, 0,
|
||||
vbox->available_vram_size,
|
||||
vbox->num_crtcs *
|
||||
VBVA_MIN_BUFFER_SIZE);
|
||||
if (!vbox->vbva_buffers)
|
||||
return -ENOMEM;
|
||||
vbox->vbva_buffers = pcim_iomap_range(
|
||||
pdev, 0, vbox->available_vram_size,
|
||||
vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE);
|
||||
if (IS_ERR(vbox->vbva_buffers))
|
||||
return PTR_ERR(vbox->vbva_buffers);
|
||||
|
||||
for (i = 0; i < vbox->num_crtcs; ++i) {
|
||||
vbva_setup_buffer_context(&vbox->vbva_info[i],
|
||||
|
@ -116,11 +115,10 @@ int vbox_hw_init(struct vbox_private *vbox)
|
|||
DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
|
||||
|
||||
/* Map guest-heap at end of vram */
|
||||
vbox->guest_heap =
|
||||
pci_iomap_range(pdev, 0, GUEST_HEAP_OFFSET(vbox),
|
||||
GUEST_HEAP_SIZE);
|
||||
if (!vbox->guest_heap)
|
||||
return -ENOMEM;
|
||||
vbox->guest_heap = pcim_iomap_range(pdev, 0,
|
||||
GUEST_HEAP_OFFSET(vbox), GUEST_HEAP_SIZE);
|
||||
if (IS_ERR(vbox->guest_heap))
|
||||
return PTR_ERR(vbox->guest_heap);
|
||||
|
||||
/* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
|
||||
vbox->guest_pool = devm_gen_pool_create(vbox->ddev.dev, 4, -1,
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/cleanup.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/io.h>
|
||||
|
@ -71,6 +72,7 @@
|
|||
#define PCI_DEVICE_ID_TI_AM654 0xb00c
|
||||
#define PCI_DEVICE_ID_TI_J7200 0xb00f
|
||||
#define PCI_DEVICE_ID_TI_AM64 0xb010
|
||||
#define PCI_DEVICE_ID_TI_J721S2 0xb013
|
||||
#define PCI_DEVICE_ID_LS1088A 0x80c0
|
||||
#define PCI_DEVICE_ID_IMX8 0x0808
|
||||
|
||||
|
@ -83,6 +85,9 @@
|
|||
#define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
|
||||
#define PCI_DEVICE_ID_RENESAS_R8A779F0 0x0031
|
||||
|
||||
#define PCI_VENDOR_ID_ROCKCHIP 0x1d87
|
||||
#define PCI_DEVICE_ID_ROCKCHIP_RK3588 0x3588
|
||||
|
||||
static DEFINE_IDA(pci_endpoint_test_ida);
|
||||
|
||||
#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
|
||||
|
@ -139,18 +144,6 @@ static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
|
|||
writel(value, test->base + offset);
|
||||
}
|
||||
|
||||
static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
|
||||
int bar, int offset)
|
||||
{
|
||||
return readl(test->bar[bar] + offset);
|
||||
}
|
||||
|
||||
static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
|
||||
int bar, u32 offset, u32 value)
|
||||
{
|
||||
writel(value, test->bar[bar] + offset);
|
||||
}
|
||||
|
||||
static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
|
||||
{
|
||||
struct pci_endpoint_test *test = dev_id;
|
||||
|
@ -271,31 +264,60 @@ static const u32 bar_test_pattern[] = {
|
|||
0xA5A5A5A5,
|
||||
};
|
||||
|
||||
static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test,
|
||||
enum pci_barno barno, int offset,
|
||||
void *write_buf, void *read_buf,
|
||||
int size)
|
||||
{
|
||||
memset(write_buf, bar_test_pattern[barno], size);
|
||||
memcpy_toio(test->bar[barno] + offset, write_buf, size);
|
||||
|
||||
memcpy_fromio(read_buf, test->bar[barno] + offset, size);
|
||||
|
||||
return memcmp(write_buf, read_buf, size);
|
||||
}
|
||||
|
||||
static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
|
||||
enum pci_barno barno)
|
||||
{
|
||||
int j;
|
||||
u32 val;
|
||||
int size;
|
||||
int j, bar_size, buf_size, iters, remain;
|
||||
void *write_buf __free(kfree) = NULL;
|
||||
void *read_buf __free(kfree) = NULL;
|
||||
struct pci_dev *pdev = test->pdev;
|
||||
|
||||
if (!test->bar[barno])
|
||||
return false;
|
||||
|
||||
size = pci_resource_len(pdev, barno);
|
||||
bar_size = pci_resource_len(pdev, barno);
|
||||
|
||||
if (barno == test->test_reg_bar)
|
||||
size = 0x4;
|
||||
bar_size = 0x4;
|
||||
|
||||
for (j = 0; j < size; j += 4)
|
||||
pci_endpoint_test_bar_writel(test, barno, j,
|
||||
bar_test_pattern[barno]);
|
||||
/*
|
||||
* Allocate a buffer of max size 1MB, and reuse that buffer while
|
||||
* iterating over the whole BAR size (which might be much larger).
|
||||
*/
|
||||
buf_size = min(SZ_1M, bar_size);
|
||||
|
||||
for (j = 0; j < size; j += 4) {
|
||||
val = pci_endpoint_test_bar_readl(test, barno, j);
|
||||
if (val != bar_test_pattern[barno])
|
||||
write_buf = kmalloc(buf_size, GFP_KERNEL);
|
||||
if (!write_buf)
|
||||
return false;
|
||||
|
||||
read_buf = kmalloc(buf_size, GFP_KERNEL);
|
||||
if (!read_buf)
|
||||
return false;
|
||||
|
||||
iters = bar_size / buf_size;
|
||||
for (j = 0; j < iters; j++)
|
||||
if (pci_endpoint_test_bar_memcmp(test, barno, buf_size * j,
|
||||
write_buf, read_buf, buf_size))
|
||||
return false;
|
||||
|
||||
remain = bar_size % buf_size;
|
||||
if (remain)
|
||||
if (pci_endpoint_test_bar_memcmp(test, barno, buf_size * iters,
|
||||
write_buf, read_buf, remain))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -337,6 +359,22 @@ static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
|
|||
return pci_irq_vector(pdev, msi_num - 1) == test->last_irq;
|
||||
}
|
||||
|
||||
static int pci_endpoint_test_validate_xfer_params(struct device *dev,
|
||||
struct pci_endpoint_test_xfer_param *param, size_t alignment)
|
||||
{
|
||||
if (!param->size) {
|
||||
dev_dbg(dev, "Data size is zero\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (param->size > SIZE_MAX - alignment) {
|
||||
dev_dbg(dev, "Maximum transfer data size exceeded\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
|
||||
unsigned long arg)
|
||||
{
|
||||
|
@ -368,9 +406,11 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
|
|||
return false;
|
||||
}
|
||||
|
||||
err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
|
||||
if (err)
|
||||
return false;
|
||||
|
||||
size = param.size;
|
||||
if (size > SIZE_MAX - alignment)
|
||||
goto err;
|
||||
|
||||
use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
|
||||
if (use_dma)
|
||||
|
@ -502,9 +542,11 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
|
|||
return false;
|
||||
}
|
||||
|
||||
err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
|
||||
if (err)
|
||||
return false;
|
||||
|
||||
size = param.size;
|
||||
if (size > SIZE_MAX - alignment)
|
||||
goto err;
|
||||
|
||||
use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
|
||||
if (use_dma)
|
||||
|
@ -600,9 +642,11 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
|
|||
return false;
|
||||
}
|
||||
|
||||
err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
|
||||
if (err)
|
||||
return false;
|
||||
|
||||
size = param.size;
|
||||
if (size > SIZE_MAX - alignment)
|
||||
goto err;
|
||||
|
||||
use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
|
||||
if (use_dma)
|
||||
|
@ -801,11 +845,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
|
|||
init_completion(&test->irq_raised);
|
||||
mutex_init(&test->mutex);
|
||||
|
||||
if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
|
||||
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
|
||||
dev_err(dev, "Cannot set DMA mask\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
|
||||
|
||||
err = pci_enable_device(pdev);
|
||||
if (err) {
|
||||
|
@ -957,6 +997,15 @@ static const struct pci_endpoint_test_data j721e_data = {
|
|||
.irq_type = IRQ_TYPE_MSI,
|
||||
};
|
||||
|
||||
static const struct pci_endpoint_test_data rk3588_data = {
|
||||
.alignment = SZ_64K,
|
||||
.irq_type = IRQ_TYPE_MSI,
|
||||
};
|
||||
|
||||
/*
|
||||
* If the controller's Vendor/Device ID are programmable, you may be able to
|
||||
* use one of the existing entries for testing instead of adding a new one.
|
||||
*/
|
||||
static const struct pci_device_id pci_endpoint_test_tbl[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
|
||||
.driver_data = (kernel_ulong_t)&default_data,
|
||||
|
@ -991,6 +1040,12 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
|
|||
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
|
||||
.driver_data = (kernel_ulong_t)&j721e_data,
|
||||
},
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
|
||||
.driver_data = (kernel_ulong_t)&j721e_data,
|
||||
},
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_ROCKCHIP, PCI_DEVICE_ID_ROCKCHIP_RK3588),
|
||||
.driver_data = (kernel_ulong_t)&rk3588_data,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
|
||||
|
|
|
@ -5286,7 +5286,6 @@ err_load:
|
|||
ice_deinit(pf);
|
||||
err_init:
|
||||
ice_adapter_put(pdev);
|
||||
pci_disable_device(pdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -5393,7 +5392,6 @@ static void ice_remove(struct pci_dev *pdev)
|
|||
ice_set_wake(pf);
|
||||
|
||||
ice_adapter_put(pdev);
|
||||
pci_disable_device(pdev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1565,7 +1565,7 @@ static struct class_interface switchtec_interface = {
|
|||
|
||||
static int __init switchtec_ntb_init(void)
|
||||
{
|
||||
switchtec_interface.class = switchtec_class;
|
||||
switchtec_interface.class = &switchtec_class;
|
||||
return class_interface_register(&switchtec_interface);
|
||||
}
|
||||
module_init(switchtec_ntb_init);
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/pci.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
|
@ -175,10 +176,7 @@ static void pci_clip_resource_to_region(struct pci_bus *bus,
|
|||
static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
|
||||
resource_size_t size, resource_size_t align,
|
||||
resource_size_t min, unsigned long type_mask,
|
||||
resource_size_t (*alignf)(void *,
|
||||
const struct resource *,
|
||||
resource_size_t,
|
||||
resource_size_t),
|
||||
resource_alignf alignf,
|
||||
void *alignf_data,
|
||||
struct pci_bus_region *region)
|
||||
{
|
||||
|
@ -249,10 +247,7 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
|
|||
int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
|
||||
resource_size_t size, resource_size_t align,
|
||||
resource_size_t min, unsigned long type_mask,
|
||||
resource_size_t (*alignf)(void *,
|
||||
const struct resource *,
|
||||
resource_size_t,
|
||||
resource_size_t),
|
||||
resource_alignf alignf,
|
||||
void *alignf_data)
|
||||
{
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
|
@ -332,6 +327,7 @@ void __weak pcibios_bus_add_device(struct pci_dev *pdev) { }
|
|||
*/
|
||||
void pci_bus_add_device(struct pci_dev *dev)
|
||||
{
|
||||
struct device_node *dn = dev->dev.of_node;
|
||||
int retval;
|
||||
|
||||
/*
|
||||
|
@ -346,7 +342,7 @@ void pci_bus_add_device(struct pci_dev *dev)
|
|||
pci_proc_attach_device(dev);
|
||||
pci_bridge_d3_update(dev);
|
||||
|
||||
dev->match_driver = true;
|
||||
dev->match_driver = !dn || of_device_is_available(dn);
|
||||
retval = device_attach(&dev->dev);
|
||||
if (retval < 0 && retval != -EPROBE_DEFER)
|
||||
pci_warn(dev, "device attach failed (%d)\n", retval);
|
||||
|
|
|
@ -13,11 +13,11 @@
|
|||
#include <linux/err.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/of_pci.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/phy/phy.h>
|
||||
|
@ -113,9 +113,9 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
|
|||
writel(value, pcie->base + offset);
|
||||
}
|
||||
|
||||
static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
|
||||
static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
|
||||
{
|
||||
return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
|
||||
return cpu_addr & DRA7XX_CPU_TO_BUS_ADDR;
|
||||
}
|
||||
|
||||
static int dra7xx_pcie_link_up(struct dw_pcie *pci)
|
||||
|
@ -474,7 +474,7 @@ static int dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
|
|||
return ret;
|
||||
}
|
||||
|
||||
dw_pcie_ep_init_notify(ep);
|
||||
pci_epc_init_notify(ep->epc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -54,43 +54,11 @@
|
|||
struct exynos_pcie {
|
||||
struct dw_pcie pci;
|
||||
void __iomem *elbi_base;
|
||||
struct clk *clk;
|
||||
struct clk *bus_clk;
|
||||
struct clk_bulk_data *clks;
|
||||
struct phy *phy;
|
||||
struct regulator_bulk_data supplies[2];
|
||||
};
|
||||
|
||||
static int exynos_pcie_init_clk_resources(struct exynos_pcie *ep)
|
||||
{
|
||||
struct device *dev = ep->pci.dev;
|
||||
int ret;
|
||||
|
||||
ret = clk_prepare_enable(ep->clk);
|
||||
if (ret) {
|
||||
dev_err(dev, "cannot enable pcie rc clock");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(ep->bus_clk);
|
||||
if (ret) {
|
||||
dev_err(dev, "cannot enable pcie bus clock");
|
||||
goto err_bus_clk;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_bus_clk:
|
||||
clk_disable_unprepare(ep->clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void exynos_pcie_deinit_clk_resources(struct exynos_pcie *ep)
|
||||
{
|
||||
clk_disable_unprepare(ep->bus_clk);
|
||||
clk_disable_unprepare(ep->clk);
|
||||
}
|
||||
|
||||
static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg)
|
||||
{
|
||||
writel(val, base + reg);
|
||||
|
@ -332,17 +300,9 @@ static int exynos_pcie_probe(struct platform_device *pdev)
|
|||
if (IS_ERR(ep->elbi_base))
|
||||
return PTR_ERR(ep->elbi_base);
|
||||
|
||||
ep->clk = devm_clk_get(dev, "pcie");
|
||||
if (IS_ERR(ep->clk)) {
|
||||
dev_err(dev, "Failed to get pcie rc clock\n");
|
||||
return PTR_ERR(ep->clk);
|
||||
}
|
||||
|
||||
ep->bus_clk = devm_clk_get(dev, "pcie_bus");
|
||||
if (IS_ERR(ep->bus_clk)) {
|
||||
dev_err(dev, "Failed to get pcie bus clock\n");
|
||||
return PTR_ERR(ep->bus_clk);
|
||||
}
|
||||
ret = devm_clk_bulk_get_all_enable(dev, &ep->clks);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ep->supplies[0].supply = "vdd18";
|
||||
ep->supplies[1].supply = "vdd10";
|
||||
|
@ -351,10 +311,6 @@ static int exynos_pcie_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = exynos_pcie_init_clk_resources(ep);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -369,7 +325,6 @@ static int exynos_pcie_probe(struct platform_device *pdev)
|
|||
|
||||
fail_probe:
|
||||
phy_exit(ep->phy);
|
||||
exynos_pcie_deinit_clk_resources(ep);
|
||||
regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
|
||||
|
||||
return ret;
|
||||
|
@ -383,7 +338,6 @@ static void exynos_pcie_remove(struct platform_device *pdev)
|
|||
exynos_pcie_assert_core_reset(ep);
|
||||
phy_power_off(ep->phy);
|
||||
phy_exit(ep->phy);
|
||||
exynos_pcie_deinit_clk_resources(ep);
|
||||
regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
|
||||
}
|
||||
|
||||
|
@ -437,5 +391,6 @@ static struct platform_driver exynos_pcie_driver = {
|
|||
},
|
||||
};
|
||||
module_platform_driver(exynos_pcie_driver);
|
||||
MODULE_DESCRIPTION("Samsung Exynos PCIe host controller driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
|
||||
|
|
|
@ -11,14 +11,13 @@
|
|||
#include <linux/bitfield.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
|
||||
#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
@ -107,8 +106,7 @@ struct imx6_pcie_drvdata {
|
|||
|
||||
struct imx6_pcie {
|
||||
struct dw_pcie *pci;
|
||||
int reset_gpio;
|
||||
bool gpio_active_high;
|
||||
struct gpio_desc *reset_gpiod;
|
||||
bool link_is_up;
|
||||
struct clk_bulk_data clks[IMX6_PCIE_MAX_CLKS];
|
||||
struct regmap *iomuxc_gpr;
|
||||
|
@ -721,9 +719,7 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
|
|||
}
|
||||
|
||||
/* Some boards don't have PCIe reset GPIO. */
|
||||
if (gpio_is_valid(imx6_pcie->reset_gpio))
|
||||
gpio_set_value_cansleep(imx6_pcie->reset_gpio,
|
||||
imx6_pcie->gpio_active_high);
|
||||
gpiod_set_value_cansleep(imx6_pcie->reset_gpiod, 1);
|
||||
}
|
||||
|
||||
static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
|
||||
|
@ -771,10 +767,9 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
|
|||
}
|
||||
|
||||
/* Some boards don't have PCIe reset GPIO. */
|
||||
if (gpio_is_valid(imx6_pcie->reset_gpio)) {
|
||||
if (imx6_pcie->reset_gpiod) {
|
||||
msleep(100);
|
||||
gpio_set_value_cansleep(imx6_pcie->reset_gpio,
|
||||
!imx6_pcie->gpio_active_high);
|
||||
gpiod_set_value_cansleep(imx6_pcie->reset_gpiod, 0);
|
||||
/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
|
||||
msleep(100);
|
||||
}
|
||||
|
@ -1131,7 +1126,7 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
|
|||
return ret;
|
||||
}
|
||||
|
||||
dw_pcie_ep_init_notify(ep);
|
||||
pci_epc_init_notify(ep->epc);
|
||||
|
||||
/* Start LTSSM. */
|
||||
imx6_pcie_ltssm_enable(dev);
|
||||
|
@ -1285,22 +1280,11 @@ static int imx6_pcie_probe(struct platform_device *pdev)
|
|||
return PTR_ERR(pci->dbi_base);
|
||||
|
||||
/* Fetch GPIOs */
|
||||
imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
|
||||
imx6_pcie->gpio_active_high = of_property_read_bool(node,
|
||||
"reset-gpio-active-high");
|
||||
if (gpio_is_valid(imx6_pcie->reset_gpio)) {
|
||||
ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
|
||||
imx6_pcie->gpio_active_high ?
|
||||
GPIOF_OUT_INIT_HIGH :
|
||||
GPIOF_OUT_INIT_LOW,
|
||||
"PCIe reset");
|
||||
if (ret) {
|
||||
dev_err(dev, "unable to get reset gpio\n");
|
||||
return ret;
|
||||
}
|
||||
} else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
|
||||
return imx6_pcie->reset_gpio;
|
||||
}
|
||||
imx6_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
|
||||
if (IS_ERR(imx6_pcie->reset_gpiod))
|
||||
return dev_err_probe(dev, PTR_ERR(imx6_pcie->reset_gpiod),
|
||||
"unable to get reset gpio\n");
|
||||
gpiod_set_consumer_name(imx6_pcie->reset_gpiod, "PCIe reset");
|
||||
|
||||
if (imx6_pcie->drvdata->clks_cnt >= IMX6_PCIE_MAX_CLKS)
|
||||
return dev_err_probe(dev, -ENOMEM, "clks_cnt is too big\n");
|
||||
|
|
|
@ -34,6 +34,11 @@
|
|||
#define PCIE_DEVICEID_SHIFT 16
|
||||
|
||||
/* Application registers */
|
||||
#define PID 0x000
|
||||
#define RTL GENMASK(15, 11)
|
||||
#define RTL_SHIFT 11
|
||||
#define AM6_PCI_PG1_RTL_VER 0x15
|
||||
|
||||
#define CMD_STATUS 0x004
|
||||
#define LTSSM_EN_VAL BIT(0)
|
||||
#define OB_XLAT_EN_VAL BIT(1)
|
||||
|
@ -104,6 +109,8 @@
|
|||
|
||||
#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
|
||||
|
||||
#define PCI_DEVICE_ID_TI_AM654X 0xb00c
|
||||
|
||||
struct ks_pcie_of_data {
|
||||
enum dw_pcie_device_mode mode;
|
||||
const struct dw_pcie_host_ops *host_ops;
|
||||
|
@ -245,8 +252,68 @@ static struct irq_chip ks_pcie_msi_irq_chip = {
|
|||
.irq_unmask = ks_pcie_msi_unmask,
|
||||
};
|
||||
|
||||
/**
|
||||
* ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
|
||||
* @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
|
||||
* PCIe host controller driver information.
|
||||
*
|
||||
* Since modification of dbi_cs2 involves different clock domain, read the
|
||||
* status back to ensure the transition is complete.
|
||||
*/
|
||||
static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
|
||||
val |= DBI_CS2;
|
||||
ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
|
||||
|
||||
do {
|
||||
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
|
||||
} while (!(val & DBI_CS2));
|
||||
}
|
||||
|
||||
/**
|
||||
* ks_pcie_clear_dbi_mode() - Disable DBI mode
|
||||
* @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
|
||||
* PCIe host controller driver information.
|
||||
*
|
||||
* Since modification of dbi_cs2 involves different clock domain, read the
|
||||
* status back to ensure the transition is complete.
|
||||
*/
|
||||
static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
|
||||
val &= ~DBI_CS2;
|
||||
ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
|
||||
|
||||
do {
|
||||
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
|
||||
} while (val & DBI_CS2);
|
||||
}
|
||||
|
||||
static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
|
||||
|
||||
/* Configure and set up BAR0 */
|
||||
ks_pcie_set_dbi_mode(ks_pcie);
|
||||
|
||||
/* Enable BAR0 */
|
||||
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
|
||||
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
|
||||
|
||||
ks_pcie_clear_dbi_mode(ks_pcie);
|
||||
|
||||
/*
|
||||
* For BAR0, just setting bus address for inbound writes (MSI) should
|
||||
* be sufficient. Use physical address to avoid any conflicts.
|
||||
*/
|
||||
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
|
||||
|
||||
pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
|
||||
return dw_pcie_allocate_domains(pp);
|
||||
}
|
||||
|
@ -340,48 +407,6 @@ static const struct irq_domain_ops ks_pcie_intx_irq_domain_ops = {
|
|||
.xlate = irq_domain_xlate_onetwocell,
|
||||
};
|
||||
|
||||
/**
|
||||
* ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
|
||||
* @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
|
||||
* PCIe host controller driver information.
|
||||
*
|
||||
* Since modification of dbi_cs2 involves different clock domain, read the
|
||||
* status back to ensure the transition is complete.
|
||||
*/
|
||||
static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
|
||||
val |= DBI_CS2;
|
||||
ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
|
||||
|
||||
do {
|
||||
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
|
||||
} while (!(val & DBI_CS2));
|
||||
}
|
||||
|
||||
/**
|
||||
* ks_pcie_clear_dbi_mode() - Disable DBI mode
|
||||
* @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
|
||||
* PCIe host controller driver information.
|
||||
*
|
||||
* Since modification of dbi_cs2 involves different clock domain, read the
|
||||
* status back to ensure the transition is complete.
|
||||
*/
|
||||
static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
|
||||
val &= ~DBI_CS2;
|
||||
ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
|
||||
|
||||
do {
|
||||
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
|
||||
} while (val & DBI_CS2);
|
||||
}
|
||||
|
||||
static int ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
|
||||
{
|
||||
u32 val;
|
||||
|
@ -452,44 +477,10 @@ static struct pci_ops ks_child_pcie_ops = {
|
|||
.write = pci_generic_config_write,
|
||||
};
|
||||
|
||||
/**
|
||||
* ks_pcie_v3_65_add_bus() - keystone add_bus post initialization
|
||||
* @bus: A pointer to the PCI bus structure.
|
||||
*
|
||||
* This sets BAR0 to enable inbound access for MSI_IRQ register
|
||||
*/
|
||||
static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
|
||||
{
|
||||
struct dw_pcie_rp *pp = bus->sysdata;
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
|
||||
|
||||
if (!pci_is_root_bus(bus))
|
||||
return 0;
|
||||
|
||||
/* Configure and set up BAR0 */
|
||||
ks_pcie_set_dbi_mode(ks_pcie);
|
||||
|
||||
/* Enable BAR0 */
|
||||
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
|
||||
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
|
||||
|
||||
ks_pcie_clear_dbi_mode(ks_pcie);
|
||||
|
||||
/*
|
||||
* For BAR0, just setting bus address for inbound writes (MSI) should
|
||||
* be sufficient. Use physical address to avoid any conflicts.
|
||||
*/
|
||||
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pci_ops ks_pcie_ops = {
|
||||
.map_bus = dw_pcie_own_conf_map_bus,
|
||||
.read = pci_generic_config_read,
|
||||
.write = pci_generic_config_write,
|
||||
.add_bus = ks_pcie_v3_65_add_bus,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -532,7 +523,11 @@ static int ks_pcie_start_link(struct dw_pcie *pci)
|
|||
static void ks_pcie_quirk(struct pci_dev *dev)
|
||||
{
|
||||
struct pci_bus *bus = dev->bus;
|
||||
struct keystone_pcie *ks_pcie;
|
||||
struct device *bridge_dev;
|
||||
struct pci_dev *bridge;
|
||||
u32 val;
|
||||
|
||||
static const struct pci_device_id rc_pci_devids[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
|
||||
.class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
|
||||
|
@ -544,6 +539,11 @@ static void ks_pcie_quirk(struct pci_dev *dev)
|
|||
.class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
|
||||
{ 0, },
|
||||
};
|
||||
static const struct pci_device_id am6_pci_devids[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654X),
|
||||
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
|
||||
{ 0, },
|
||||
};
|
||||
|
||||
if (pci_is_root_bus(bus))
|
||||
bridge = dev;
|
||||
|
@ -565,10 +565,36 @@ static void ks_pcie_quirk(struct pci_dev *dev)
|
|||
*/
|
||||
if (pci_match_id(rc_pci_devids, bridge)) {
|
||||
if (pcie_get_readrq(dev) > 256) {
|
||||
dev_info(&dev->dev, "limiting MRRS to 256\n");
|
||||
dev_info(&dev->dev, "limiting MRRS to 256 bytes\n");
|
||||
pcie_set_readrq(dev, 256);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Memory transactions fail with PCI controller in AM654 PG1.0
|
||||
* when MRRS is set to more than 128 bytes. Force the MRRS to
|
||||
* 128 bytes in all downstream devices.
|
||||
*/
|
||||
if (pci_match_id(am6_pci_devids, bridge)) {
|
||||
bridge_dev = pci_get_host_bridge_device(dev);
|
||||
if (!bridge_dev || !bridge_dev->parent)
|
||||
return;
|
||||
|
||||
ks_pcie = dev_get_drvdata(bridge_dev->parent);
|
||||
if (!ks_pcie)
|
||||
return;
|
||||
|
||||
val = ks_pcie_app_readl(ks_pcie, PID);
|
||||
val &= RTL;
|
||||
val >>= RTL_SHIFT;
|
||||
if (val != AM6_PCI_PG1_RTL_VER)
|
||||
return;
|
||||
|
||||
if (pcie_get_readrq(dev) > 128) {
|
||||
dev_info(&dev->dev, "limiting MRRS to 128 bytes\n");
|
||||
pcie_set_readrq(dev, 128);
|
||||
}
|
||||
}
|
||||
}
|
||||
DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
|
||||
|
||||
|
@ -1303,7 +1329,7 @@ static int ks_pcie_probe(struct platform_device *pdev)
|
|||
goto err_ep_init;
|
||||
}
|
||||
|
||||
dw_pcie_ep_init_notify(&pci->ep);
|
||||
pci_epc_init_notify(pci->ep.epc);
|
||||
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -104,7 +104,7 @@ static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id)
|
|||
dev_dbg(pci->dev, "Link up\n");
|
||||
} else if (val & PEX_PF0_PME_MES_DR_LDD) {
|
||||
dev_dbg(pci->dev, "Link down\n");
|
||||
pci_epc_linkdown(pci->ep.epc);
|
||||
dw_pcie_ep_linkdown(&pci->ep);
|
||||
} else if (val & PEX_PF0_PME_MES_DR_HRD) {
|
||||
dev_dbg(pci->dev, "Hot reset\n");
|
||||
}
|
||||
|
@ -286,7 +286,7 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
dw_pcie_ep_init_notify(&pci->ep);
|
||||
pci_epc_init_notify(pci->ep.epc);
|
||||
|
||||
return ls_pcie_ep_interrupt_init(pcie, pdev);
|
||||
}
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/reset.h>
|
||||
|
|
|
@ -242,18 +242,24 @@ static struct pci_ops al_child_pci_ops = {
|
|||
.write = pci_generic_config_write,
|
||||
};
|
||||
|
||||
static void al_pcie_config_prepare(struct al_pcie *pcie)
|
||||
static int al_pcie_config_prepare(struct al_pcie *pcie)
|
||||
{
|
||||
struct al_pcie_target_bus_cfg *target_bus_cfg;
|
||||
struct dw_pcie_rp *pp = &pcie->pci->pp;
|
||||
unsigned int ecam_bus_mask;
|
||||
struct resource_entry *ft;
|
||||
u32 cfg_control_offset;
|
||||
struct resource *bus;
|
||||
u8 subordinate_bus;
|
||||
u8 secondary_bus;
|
||||
u32 cfg_control;
|
||||
u32 reg;
|
||||
struct resource *bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
|
||||
|
||||
ft = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
|
||||
if (!ft)
|
||||
return -ENODEV;
|
||||
|
||||
bus = ft->res;
|
||||
target_bus_cfg = &pcie->target_bus_cfg;
|
||||
|
||||
ecam_bus_mask = (pcie->ecam_size >> PCIE_ECAM_BUS_SHIFT) - 1;
|
||||
|
@ -287,6 +293,8 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)
|
|||
FIELD_PREP(CFG_CONTROL_SEC_BUS_MASK, secondary_bus);
|
||||
|
||||
al_pcie_controller_writel(pcie, cfg_control_offset, reg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int al_pcie_host_init(struct dw_pcie_rp *pp)
|
||||
|
@ -305,7 +313,9 @@ static int al_pcie_host_init(struct dw_pcie_rp *pp)
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
al_pcie_config_prepare(pcie);
|
||||
rc = al_pcie_config_prepare(pcie);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -94,7 +94,7 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u
|
|||
regmap_write(artpec6_pcie->regmap, offset, val);
|
||||
}
|
||||
|
||||
static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
|
||||
static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
|
||||
{
|
||||
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
|
||||
struct dw_pcie_rp *pp = &pci->pp;
|
||||
|
@ -102,13 +102,13 @@ static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
|
|||
|
||||
switch (artpec6_pcie->mode) {
|
||||
case DW_PCIE_RC_TYPE:
|
||||
return pci_addr - pp->cfg0_base;
|
||||
return cpu_addr - pp->cfg0_base;
|
||||
case DW_PCIE_EP_TYPE:
|
||||
return pci_addr - ep->phys_base;
|
||||
return cpu_addr - ep->phys_base;
|
||||
default:
|
||||
dev_err(pci->dev, "UNKNOWN device type\n");
|
||||
}
|
||||
return pci_addr;
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
static int artpec6_pcie_establish_link(struct dw_pcie *pci)
|
||||
|
@ -452,7 +452,7 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
dw_pcie_ep_init_notify(&pci->ep);
|
||||
pci_epc_init_notify(pci->ep.epc);
|
||||
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -15,30 +15,6 @@
|
|||
#include <linux/pci-epc.h>
|
||||
#include <linux/pci-epf.h>
|
||||
|
||||
/**
|
||||
* dw_pcie_ep_linkup - Notify EPF drivers about Link Up event
|
||||
* @ep: DWC EP device
|
||||
*/
|
||||
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
|
||||
{
|
||||
struct pci_epc *epc = ep->epc;
|
||||
|
||||
pci_epc_linkup(epc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
|
||||
|
||||
/**
|
||||
* dw_pcie_ep_init_notify - Notify EPF drivers about EPC initialization complete
|
||||
* @ep: DWC EP device
|
||||
*/
|
||||
void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
|
||||
{
|
||||
struct pci_epc *epc = ep->epc;
|
||||
|
||||
pci_epc_init_notify(epc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify);
|
||||
|
||||
/**
|
||||
* dw_pcie_ep_get_func_from_ep - Get the struct dw_pcie_ep_func corresponding to
|
||||
* the endpoint function
|
||||
|
@ -161,7 +137,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
|
|||
if (!ep->bar_to_atu[bar])
|
||||
free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
|
||||
else
|
||||
free_win = ep->bar_to_atu[bar];
|
||||
free_win = ep->bar_to_atu[bar] - 1;
|
||||
|
||||
if (free_win >= pci->num_ib_windows) {
|
||||
dev_err(pci->dev, "No free inbound window\n");
|
||||
|
@ -175,15 +151,18 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ep->bar_to_atu[bar] = free_win;
|
||||
/*
|
||||
* Always increment free_win before assignment, since value 0 is used to identify
|
||||
* unallocated mapping.
|
||||
*/
|
||||
ep->bar_to_atu[bar] = free_win + 1;
|
||||
set_bit(free_win, ep->ib_window_map);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
|
||||
phys_addr_t phys_addr,
|
||||
u64 pci_addr, size_t size)
|
||||
static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep,
|
||||
struct dw_pcie_ob_atu_cfg *atu)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
u32 free_win;
|
||||
|
@ -195,13 +174,13 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
|
||||
phys_addr, pci_addr, size);
|
||||
atu->index = free_win;
|
||||
ret = dw_pcie_prog_outbound_atu(pci, atu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
set_bit(free_win, ep->ob_window_map);
|
||||
ep->outbound_addr[free_win] = phys_addr;
|
||||
ep->outbound_addr[free_win] = atu->cpu_addr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -212,7 +191,10 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
|
|||
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
enum pci_barno bar = epf_bar->barno;
|
||||
u32 atu_index = ep->bar_to_atu[bar];
|
||||
u32 atu_index = ep->bar_to_atu[bar] - 1;
|
||||
|
||||
if (!ep->bar_to_atu[bar])
|
||||
return;
|
||||
|
||||
__dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
|
||||
|
||||
|
@ -233,6 +215,13 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
|
|||
int ret, type;
|
||||
u32 reg;
|
||||
|
||||
/*
|
||||
* DWC does not allow BAR pairs to overlap, e.g. you cannot combine BARs
|
||||
* 1 and 2 to form a 64-bit BAR.
|
||||
*/
|
||||
if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1))
|
||||
return -EINVAL;
|
||||
|
||||
reg = PCI_BASE_ADDRESS_0 + (4 * bar);
|
||||
|
||||
if (!(flags & PCI_BASE_ADDRESS_SPACE))
|
||||
|
@ -301,8 +290,14 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
|
|||
int ret;
|
||||
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
struct dw_pcie_ob_atu_cfg atu = { 0 };
|
||||
|
||||
ret = dw_pcie_ep_outbound_atu(ep, func_no, addr, pci_addr, size);
|
||||
atu.func_no = func_no;
|
||||
atu.type = PCIE_ATU_TYPE_MEM;
|
||||
atu.cpu_addr = addr;
|
||||
atu.pci_addr = pci_addr;
|
||||
atu.size = size;
|
||||
ret = dw_pcie_ep_outbound_atu(ep, &atu);
|
||||
if (ret) {
|
||||
dev_err(pci->dev, "Failed to enable address\n");
|
||||
return ret;
|
||||
|
@ -632,7 +627,6 @@ void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
|
|||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
|
||||
dw_pcie_edma_remove(pci);
|
||||
ep->epc->init_complete = false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup);
|
||||
|
||||
|
@ -674,6 +668,34 @@ static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
|
||||
{
|
||||
unsigned int offset;
|
||||
unsigned int nbars;
|
||||
u32 reg, i;
|
||||
|
||||
offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
|
||||
|
||||
dw_pcie_dbi_ro_wr_en(pci);
|
||||
|
||||
if (offset) {
|
||||
reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
|
||||
nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
|
||||
PCI_REBAR_CTRL_NBAR_SHIFT;
|
||||
|
||||
/*
|
||||
* PCIe r6.0, sec 7.8.6.2 require us to support at least one
|
||||
* size in the range from 1 MB to 512 GB. Advertise support
|
||||
* for 1 MB BAR size only.
|
||||
*/
|
||||
for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
|
||||
dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
|
||||
}
|
||||
|
||||
dw_pcie_setup(pci);
|
||||
dw_pcie_dbi_ro_wr_dis(pci);
|
||||
}
|
||||
|
||||
/**
|
||||
* dw_pcie_ep_init_registers - Initialize DWC EP specific registers
|
||||
* @ep: DWC EP device
|
||||
|
@ -688,13 +710,11 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
|
|||
struct dw_pcie_ep_func *ep_func;
|
||||
struct device *dev = pci->dev;
|
||||
struct pci_epc *epc = ep->epc;
|
||||
unsigned int offset, ptm_cap_base;
|
||||
unsigned int nbars;
|
||||
u32 ptm_cap_base, reg;
|
||||
u8 hdr_type;
|
||||
u8 func_no;
|
||||
int i, ret;
|
||||
void *addr;
|
||||
u32 reg;
|
||||
int ret;
|
||||
|
||||
hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
|
||||
PCI_HEADER_TYPE_MASK;
|
||||
|
@ -757,25 +777,8 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
|
|||
if (ep->ops->init)
|
||||
ep->ops->init(ep);
|
||||
|
||||
offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
|
||||
ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
|
||||
|
||||
dw_pcie_dbi_ro_wr_en(pci);
|
||||
|
||||
if (offset) {
|
||||
reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
|
||||
nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
|
||||
PCI_REBAR_CTRL_NBAR_SHIFT;
|
||||
|
||||
/*
|
||||
* PCIe r6.0, sec 7.8.6.2 require us to support at least one
|
||||
* size in the range from 1 MB to 512 GB. Advertise support
|
||||
* for 1 MB BAR size only.
|
||||
*/
|
||||
for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
|
||||
dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4));
|
||||
}
|
||||
|
||||
/*
|
||||
* PTM responder capability can be disabled only after disabling
|
||||
* PTM root capability.
|
||||
|
@ -792,8 +795,7 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
|
|||
dw_pcie_dbi_ro_wr_dis(pci);
|
||||
}
|
||||
|
||||
dw_pcie_setup(pci);
|
||||
dw_pcie_dbi_ro_wr_dis(pci);
|
||||
dw_pcie_ep_init_non_sticky_registers(pci);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -804,6 +806,43 @@ err_remove_edma:
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(dw_pcie_ep_init_registers);
|
||||
|
||||
/**
|
||||
* dw_pcie_ep_linkup - Notify EPF drivers about Link Up event
|
||||
* @ep: DWC EP device
|
||||
*/
|
||||
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
|
||||
{
|
||||
struct pci_epc *epc = ep->epc;
|
||||
|
||||
pci_epc_linkup(epc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
|
||||
|
||||
/**
|
||||
* dw_pcie_ep_linkdown - Notify EPF drivers about Link Down event
|
||||
* @ep: DWC EP device
|
||||
*
|
||||
* Non-sticky registers are also initialized before sending the notification to
|
||||
* the EPF drivers. This is needed since the registers need to be initialized
|
||||
* before the link comes back again.
|
||||
*/
|
||||
void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
struct pci_epc *epc = ep->epc;
|
||||
|
||||
/*
|
||||
* Initialize the non-sticky DWC registers as they would've reset post
|
||||
* Link Down. This is specifically needed for drivers not supporting
|
||||
* PERST# as they have no way to reinitialize the registers before the
|
||||
* link comes back again.
|
||||
*/
|
||||
dw_pcie_ep_init_non_sticky_registers(pci);
|
||||
|
||||
pci_epc_linkdown(epc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_pcie_ep_linkdown);
|
||||
|
||||
/**
|
||||
* dw_pcie_ep_init - Initialize the endpoint device
|
||||
* @ep: DWC EP device
|
||||
|
|
|
@ -398,6 +398,32 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct resource_entry *win;
|
||||
struct resource *res;
|
||||
|
||||
win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
|
||||
if (win) {
|
||||
res = devm_kzalloc(pci->dev, sizeof(*res), GFP_KERNEL);
|
||||
if (!res)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Allocate MSG TLP region of size 'region_align' at the end of
|
||||
* the host bridge window.
|
||||
*/
|
||||
res->start = win->res->end - pci->region_align + 1;
|
||||
res->end = win->res->end;
|
||||
res->name = "msg";
|
||||
res->flags = win->res->flags | IORESOURCE_BUSY;
|
||||
|
||||
if (!devm_request_resource(pci->dev, win->res, res))
|
||||
pp->msg_res = res;
|
||||
}
|
||||
}
|
||||
|
||||
int dw_pcie_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
|
@ -484,6 +510,18 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
|
|||
|
||||
dw_pcie_iatu_detect(pci);
|
||||
|
||||
/*
|
||||
* Allocate the resource for MSG TLP before programming the iATU
|
||||
* outbound window in dw_pcie_setup_rc(). Since the allocation depends
|
||||
* on the value of 'region_align', this has to be done after
|
||||
* dw_pcie_iatu_detect().
|
||||
*
|
||||
* Glue drivers need to set 'use_atu_msg' before dw_pcie_host_init() to
|
||||
* make use of the generic MSG TLP implementation.
|
||||
*/
|
||||
if (pp->use_atu_msg)
|
||||
dw_pcie_host_request_msg_tlp_res(pp);
|
||||
|
||||
ret = dw_pcie_edma_detect(pci);
|
||||
if (ret)
|
||||
goto err_free_msi;
|
||||
|
@ -554,6 +592,7 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
|
|||
{
|
||||
struct dw_pcie_rp *pp = bus->sysdata;
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct dw_pcie_ob_atu_cfg atu = { 0 };
|
||||
int type, ret;
|
||||
u32 busdev;
|
||||
|
||||
|
@ -576,8 +615,12 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
|
|||
else
|
||||
type = PCIE_ATU_TYPE_CFG1;
|
||||
|
||||
ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev,
|
||||
pp->cfg0_size);
|
||||
atu.type = type;
|
||||
atu.cpu_addr = pp->cfg0_base;
|
||||
atu.pci_addr = busdev;
|
||||
atu.size = pp->cfg0_size;
|
||||
|
||||
ret = dw_pcie_prog_outbound_atu(pci, &atu);
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
|
@ -589,6 +632,7 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
|
|||
{
|
||||
struct dw_pcie_rp *pp = bus->sysdata;
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct dw_pcie_ob_atu_cfg atu = { 0 };
|
||||
int ret;
|
||||
|
||||
ret = pci_generic_config_read(bus, devfn, where, size, val);
|
||||
|
@ -596,9 +640,12 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
|
|||
return ret;
|
||||
|
||||
if (pp->cfg0_io_shared) {
|
||||
ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
|
||||
pp->io_base, pp->io_bus_addr,
|
||||
pp->io_size);
|
||||
atu.type = PCIE_ATU_TYPE_IO;
|
||||
atu.cpu_addr = pp->io_base;
|
||||
atu.pci_addr = pp->io_bus_addr;
|
||||
atu.size = pp->io_size;
|
||||
|
||||
ret = dw_pcie_prog_outbound_atu(pci, &atu);
|
||||
if (ret)
|
||||
return PCIBIOS_SET_FAILED;
|
||||
}
|
||||
|
@ -611,6 +658,7 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
|
|||
{
|
||||
struct dw_pcie_rp *pp = bus->sysdata;
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct dw_pcie_ob_atu_cfg atu = { 0 };
|
||||
int ret;
|
||||
|
||||
ret = pci_generic_config_write(bus, devfn, where, size, val);
|
||||
|
@ -618,9 +666,12 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
|
|||
return ret;
|
||||
|
||||
if (pp->cfg0_io_shared) {
|
||||
ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
|
||||
pp->io_base, pp->io_bus_addr,
|
||||
pp->io_size);
|
||||
atu.type = PCIE_ATU_TYPE_IO;
|
||||
atu.cpu_addr = pp->io_base;
|
||||
atu.pci_addr = pp->io_bus_addr;
|
||||
atu.size = pp->io_size;
|
||||
|
||||
ret = dw_pcie_prog_outbound_atu(pci, &atu);
|
||||
if (ret)
|
||||
return PCIBIOS_SET_FAILED;
|
||||
}
|
||||
|
@ -655,6 +706,7 @@ static struct pci_ops dw_pcie_ops = {
|
|||
static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct dw_pcie_ob_atu_cfg atu = { 0 };
|
||||
struct resource_entry *entry;
|
||||
int i, ret;
|
||||
|
||||
|
@ -682,10 +734,19 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
|
|||
if (pci->num_ob_windows <= ++i)
|
||||
break;
|
||||
|
||||
ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM,
|
||||
entry->res->start,
|
||||
entry->res->start - entry->offset,
|
||||
resource_size(entry->res));
|
||||
atu.index = i;
|
||||
atu.type = PCIE_ATU_TYPE_MEM;
|
||||
atu.cpu_addr = entry->res->start;
|
||||
atu.pci_addr = entry->res->start - entry->offset;
|
||||
|
||||
/* Adjust iATU size if MSG TLP region was allocated before */
|
||||
if (pp->msg_res && pp->msg_res->parent == entry->res)
|
||||
atu.size = resource_size(entry->res) -
|
||||
resource_size(pp->msg_res);
|
||||
else
|
||||
atu.size = resource_size(entry->res);
|
||||
|
||||
ret = dw_pcie_prog_outbound_atu(pci, &atu);
|
||||
if (ret) {
|
||||
dev_err(pci->dev, "Failed to set MEM range %pr\n",
|
||||
entry->res);
|
||||
|
@ -695,10 +756,13 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
|
|||
|
||||
if (pp->io_size) {
|
||||
if (pci->num_ob_windows > ++i) {
|
||||
ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO,
|
||||
pp->io_base,
|
||||
pp->io_bus_addr,
|
||||
pp->io_size);
|
||||
atu.index = i;
|
||||
atu.type = PCIE_ATU_TYPE_IO;
|
||||
atu.cpu_addr = pp->io_base;
|
||||
atu.pci_addr = pp->io_bus_addr;
|
||||
atu.size = pp->io_size;
|
||||
|
||||
ret = dw_pcie_prog_outbound_atu(pci, &atu);
|
||||
if (ret) {
|
||||
dev_err(pci->dev, "Failed to set IO range %pr\n",
|
||||
entry->res);
|
||||
|
@ -713,6 +777,8 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
|
|||
dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
|
||||
pci->num_ob_windows);
|
||||
|
||||
pp->msg_atu_index = i;
|
||||
|
||||
i = 0;
|
||||
resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
|
||||
if (resource_type(entry->res) != IORESOURCE_MEM)
|
||||
|
@ -818,11 +884,47 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
|
||||
|
||||
static int dw_pcie_pme_turn_off(struct dw_pcie *pci)
|
||||
{
|
||||
struct dw_pcie_ob_atu_cfg atu = { 0 };
|
||||
void __iomem *mem;
|
||||
int ret;
|
||||
|
||||
if (pci->num_ob_windows <= pci->pp.msg_atu_index)
|
||||
return -ENOSPC;
|
||||
|
||||
if (!pci->pp.msg_res)
|
||||
return -ENOSPC;
|
||||
|
||||
atu.code = PCIE_MSG_CODE_PME_TURN_OFF;
|
||||
atu.routing = PCIE_MSG_TYPE_R_BC;
|
||||
atu.type = PCIE_ATU_TYPE_MSG;
|
||||
atu.size = resource_size(pci->pp.msg_res);
|
||||
atu.index = pci->pp.msg_atu_index;
|
||||
|
||||
atu.cpu_addr = pci->pp.msg_res->start;
|
||||
|
||||
ret = dw_pcie_prog_outbound_atu(pci, &atu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mem = ioremap(atu.cpu_addr, pci->region_align);
|
||||
if (!mem)
|
||||
return -ENOMEM;
|
||||
|
||||
/* A dummy write is converted to a Msg TLP */
|
||||
writel(0, mem);
|
||||
|
||||
iounmap(mem);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dw_pcie_suspend_noirq(struct dw_pcie *pci)
|
||||
{
|
||||
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
|
||||
u32 val;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* If L1SS is supported, then do not put the link into L2 as some
|
||||
|
@ -834,10 +936,13 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci)
|
|||
if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT)
|
||||
return 0;
|
||||
|
||||
if (!pci->pp.ops->pme_turn_off)
|
||||
return 0;
|
||||
if (pci->pp.ops->pme_turn_off)
|
||||
pci->pp.ops->pme_turn_off(&pci->pp);
|
||||
else
|
||||
ret = dw_pcie_pme_turn_off(pci);
|
||||
|
||||
pci->pp.ops->pme_turn_off(&pci->pp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE,
|
||||
PCIE_PME_TO_L2_TIMEOUT_US/10,
|
||||
|
|
|
@ -154,7 +154,7 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
|
|||
dw_pcie_ep_deinit(&pci->ep);
|
||||
}
|
||||
|
||||
dw_pcie_ep_init_notify(&pci->ep);
|
||||
pci_epc_init_notify(pci->ep.epc);
|
||||
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -465,56 +465,61 @@ static inline u32 dw_pcie_enable_ecrc(u32 val)
|
|||
return val | PCIE_ATU_TD;
|
||||
}
|
||||
|
||||
static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
|
||||
int index, int type, u64 cpu_addr,
|
||||
u64 pci_addr, u64 size)
|
||||
int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
|
||||
const struct dw_pcie_ob_atu_cfg *atu)
|
||||
{
|
||||
u64 cpu_addr = atu->cpu_addr;
|
||||
u32 retries, val;
|
||||
u64 limit_addr;
|
||||
|
||||
if (pci->ops && pci->ops->cpu_addr_fixup)
|
||||
cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
|
||||
|
||||
limit_addr = cpu_addr + size - 1;
|
||||
limit_addr = cpu_addr + atu->size - 1;
|
||||
|
||||
if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) ||
|
||||
!IS_ALIGNED(cpu_addr, pci->region_align) ||
|
||||
!IS_ALIGNED(pci_addr, pci->region_align) || !size) {
|
||||
!IS_ALIGNED(atu->pci_addr, pci->region_align) || !atu->size) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_BASE,
|
||||
dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_BASE,
|
||||
lower_32_bits(cpu_addr));
|
||||
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_BASE,
|
||||
dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_BASE,
|
||||
upper_32_bits(cpu_addr));
|
||||
|
||||
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LIMIT,
|
||||
dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LIMIT,
|
||||
lower_32_bits(limit_addr));
|
||||
if (dw_pcie_ver_is_ge(pci, 460A))
|
||||
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_LIMIT,
|
||||
dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_LIMIT,
|
||||
upper_32_bits(limit_addr));
|
||||
|
||||
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_TARGET,
|
||||
lower_32_bits(pci_addr));
|
||||
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_TARGET,
|
||||
upper_32_bits(pci_addr));
|
||||
dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_TARGET,
|
||||
lower_32_bits(atu->pci_addr));
|
||||
dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_TARGET,
|
||||
upper_32_bits(atu->pci_addr));
|
||||
|
||||
val = type | PCIE_ATU_FUNC_NUM(func_no);
|
||||
val = atu->type | atu->routing | PCIE_ATU_FUNC_NUM(atu->func_no);
|
||||
if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) &&
|
||||
dw_pcie_ver_is_ge(pci, 460A))
|
||||
val |= PCIE_ATU_INCREASE_REGION_SIZE;
|
||||
if (dw_pcie_ver_is(pci, 490A))
|
||||
val = dw_pcie_enable_ecrc(val);
|
||||
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL1, val);
|
||||
dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL1, val);
|
||||
|
||||
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
|
||||
val = PCIE_ATU_ENABLE;
|
||||
if (atu->type == PCIE_ATU_TYPE_MSG) {
|
||||
/* The data-less messages only for now */
|
||||
val |= PCIE_ATU_INHIBIT_PAYLOAD | atu->code;
|
||||
}
|
||||
dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL2, val);
|
||||
|
||||
/*
|
||||
* Make sure ATU enable takes effect before any subsequent config
|
||||
* and I/O accesses.
|
||||
*/
|
||||
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
|
||||
val = dw_pcie_readl_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2);
|
||||
val = dw_pcie_readl_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL2);
|
||||
if (val & PCIE_ATU_ENABLE)
|
||||
return 0;
|
||||
|
||||
|
@ -526,21 +531,6 @@ static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
|
|||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
|
||||
u64 cpu_addr, u64 pci_addr, u64 size)
|
||||
{
|
||||
return __dw_pcie_prog_outbound_atu(pci, 0, index, type,
|
||||
cpu_addr, pci_addr, size);
|
||||
}
|
||||
|
||||
int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
|
||||
int type, u64 cpu_addr, u64 pci_addr,
|
||||
u64 size)
|
||||
{
|
||||
return __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
|
||||
cpu_addr, pci_addr, size);
|
||||
}
|
||||
|
||||
static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg)
|
||||
{
|
||||
return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg);
|
||||
|
@ -655,7 +645,7 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci)
|
|||
if (dw_pcie_link_up(pci))
|
||||
break;
|
||||
|
||||
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
|
||||
msleep(LINK_WAIT_SLEEP_MS);
|
||||
}
|
||||
|
||||
if (retries >= LINK_WAIT_MAX_RETRIES) {
|
||||
|
|
|
@ -63,14 +63,16 @@
|
|||
|
||||
/* Parameters for the waiting for link up routine */
|
||||
#define LINK_WAIT_MAX_RETRIES 10
|
||||
#define LINK_WAIT_USLEEP_MIN 90000
|
||||
#define LINK_WAIT_USLEEP_MAX 100000
|
||||
#define LINK_WAIT_SLEEP_MS 90
|
||||
|
||||
/* Parameters for the waiting for iATU enabled routine */
|
||||
#define LINK_WAIT_MAX_IATU_RETRIES 5
|
||||
#define LINK_WAIT_IATU 9
|
||||
|
||||
/* Synopsys-specific PCIe configuration registers */
|
||||
#define PCIE_PORT_FORCE 0x708
|
||||
#define PORT_FORCE_DO_DESKEW_FOR_SRIS BIT(23)
|
||||
|
||||
#define PCIE_PORT_AFR 0x70C
|
||||
#define PORT_AFR_N_FTS_MASK GENMASK(15, 8)
|
||||
#define PORT_AFR_N_FTS(n) FIELD_PREP(PORT_AFR_N_FTS_MASK, n)
|
||||
|
@ -92,6 +94,9 @@
|
|||
#define PORT_LINK_MODE_4_LANES PORT_LINK_MODE(0x7)
|
||||
#define PORT_LINK_MODE_8_LANES PORT_LINK_MODE(0xf)
|
||||
|
||||
#define PCIE_PORT_LANE_SKEW 0x714
|
||||
#define PORT_LANE_SKEW_INSERT_MASK GENMASK(23, 0)
|
||||
|
||||
#define PCIE_PORT_DEBUG0 0x728
|
||||
#define PORT_LOGIC_LTSSM_STATE_MASK 0x1f
|
||||
#define PORT_LOGIC_LTSSM_STATE_L0 0x11
|
||||
|
@ -148,11 +153,13 @@
|
|||
#define PCIE_ATU_TYPE_IO 0x2
|
||||
#define PCIE_ATU_TYPE_CFG0 0x4
|
||||
#define PCIE_ATU_TYPE_CFG1 0x5
|
||||
#define PCIE_ATU_TYPE_MSG 0x10
|
||||
#define PCIE_ATU_TD BIT(8)
|
||||
#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20)
|
||||
#define PCIE_ATU_REGION_CTRL2 0x004
|
||||
#define PCIE_ATU_ENABLE BIT(31)
|
||||
#define PCIE_ATU_BAR_MODE_ENABLE BIT(30)
|
||||
#define PCIE_ATU_INHIBIT_PAYLOAD BIT(22)
|
||||
#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19)
|
||||
#define PCIE_ATU_LOWER_BASE 0x008
|
||||
#define PCIE_ATU_UPPER_BASE 0x00C
|
||||
|
@ -286,6 +293,17 @@ enum dw_pcie_ltssm {
|
|||
DW_PCIE_LTSSM_UNKNOWN = 0xFFFFFFFF,
|
||||
};
|
||||
|
||||
struct dw_pcie_ob_atu_cfg {
|
||||
int index;
|
||||
int type;
|
||||
u8 func_no;
|
||||
u8 code;
|
||||
u8 routing;
|
||||
u64 cpu_addr;
|
||||
u64 pci_addr;
|
||||
u64 size;
|
||||
};
|
||||
|
||||
struct dw_pcie_host_ops {
|
||||
int (*init)(struct dw_pcie_rp *pp);
|
||||
void (*deinit)(struct dw_pcie_rp *pp);
|
||||
|
@ -315,6 +333,9 @@ struct dw_pcie_rp {
|
|||
struct pci_host_bridge *bridge;
|
||||
raw_spinlock_t lock;
|
||||
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
|
||||
bool use_atu_msg;
|
||||
int msg_atu_index;
|
||||
struct resource *msg_res;
|
||||
};
|
||||
|
||||
struct dw_pcie_ep_ops {
|
||||
|
@ -420,10 +441,8 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
|
|||
int dw_pcie_link_up(struct dw_pcie *pci);
|
||||
void dw_pcie_upconfig_setup(struct dw_pcie *pci);
|
||||
int dw_pcie_wait_for_link(struct dw_pcie *pci);
|
||||
int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
|
||||
u64 cpu_addr, u64 pci_addr, u64 size);
|
||||
int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
|
||||
int type, u64 cpu_addr, u64 pci_addr, u64 size);
|
||||
int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
|
||||
const struct dw_pcie_ob_atu_cfg *atu);
|
||||
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
|
||||
u64 cpu_addr, u64 pci_addr, u64 size);
|
||||
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
|
||||
|
@ -655,9 +674,9 @@ static inline void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus,
|
|||
|
||||
#ifdef CONFIG_PCIE_DW_EP
|
||||
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
|
||||
void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep);
|
||||
int dw_pcie_ep_init(struct dw_pcie_ep *ep);
|
||||
int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep);
|
||||
void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep);
|
||||
void dw_pcie_ep_deinit(struct dw_pcie_ep *ep);
|
||||
void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep);
|
||||
int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no);
|
||||
|
@ -675,6 +694,10 @@ static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
|
||||
{
|
||||
return 0;
|
||||
|
@ -685,10 +708,6 @@ static inline int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -49,25 +49,29 @@
|
|||
#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
|
||||
|
||||
struct rockchip_pcie {
|
||||
struct dw_pcie pci;
|
||||
void __iomem *apb_base;
|
||||
struct phy *phy;
|
||||
struct clk_bulk_data *clks;
|
||||
unsigned int clk_cnt;
|
||||
struct reset_control *rst;
|
||||
struct gpio_desc *rst_gpio;
|
||||
struct regulator *vpcie3v3;
|
||||
struct irq_domain *irq_domain;
|
||||
struct dw_pcie pci;
|
||||
void __iomem *apb_base;
|
||||
struct phy *phy;
|
||||
struct clk_bulk_data *clks;
|
||||
unsigned int clk_cnt;
|
||||
struct reset_control *rst;
|
||||
struct gpio_desc *rst_gpio;
|
||||
struct regulator *vpcie3v3;
|
||||
struct irq_domain *irq_domain;
|
||||
const struct rockchip_pcie_of_data *data;
|
||||
};
|
||||
|
||||
static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip,
|
||||
u32 reg)
|
||||
struct rockchip_pcie_of_data {
|
||||
enum dw_pcie_device_mode mode;
|
||||
};
|
||||
|
||||
static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip, u32 reg)
|
||||
{
|
||||
return readl_relaxed(rockchip->apb_base + reg);
|
||||
}
|
||||
|
||||
static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip,
|
||||
u32 val, u32 reg)
|
||||
static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip, u32 val,
|
||||
u32 reg)
|
||||
{
|
||||
writel_relaxed(val, rockchip->apb_base + reg);
|
||||
}
|
||||
|
@ -144,6 +148,11 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u32 rockchip_pcie_get_ltssm(struct rockchip_pcie *rockchip)
|
||||
{
|
||||
return rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS);
|
||||
}
|
||||
|
||||
static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
|
||||
{
|
||||
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,
|
||||
|
@ -153,7 +162,7 @@ static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
|
|||
static int rockchip_pcie_link_up(struct dw_pcie *pci)
|
||||
{
|
||||
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
|
||||
u32 val = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS);
|
||||
u32 val = rockchip_pcie_get_ltssm(rockchip);
|
||||
|
||||
if ((val & PCIE_LINKUP) == PCIE_LINKUP &&
|
||||
(val & PCIE_LTSSM_STATUS_MASK) == PCIE_L0S_ENTRY)
|
||||
|
@ -191,7 +200,6 @@ static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
|
|||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
|
||||
struct device *dev = rockchip->pci.dev;
|
||||
u32 val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
|
||||
int irq, ret;
|
||||
|
||||
irq = of_irq_get_byname(dev->of_node, "legacy");
|
||||
|
@ -205,12 +213,6 @@ static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
|
|||
irq_set_chained_handler_and_data(irq, rockchip_pcie_intx_handler,
|
||||
rockchip);
|
||||
|
||||
/* LTSSM enable control mode */
|
||||
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
|
||||
|
||||
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE,
|
||||
PCIE_CLIENT_GENERAL_CONTROL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -225,11 +227,15 @@ static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)
|
|||
|
||||
ret = devm_clk_bulk_get_all(dev, &rockchip->clks);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return dev_err_probe(dev, ret, "failed to get clocks\n");
|
||||
|
||||
rockchip->clk_cnt = ret;
|
||||
|
||||
return clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks);
|
||||
ret = clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "failed to enable clocks\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rockchip_pcie_resource_get(struct platform_device *pdev,
|
||||
|
@ -237,12 +243,14 @@ static int rockchip_pcie_resource_get(struct platform_device *pdev,
|
|||
{
|
||||
rockchip->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
|
||||
if (IS_ERR(rockchip->apb_base))
|
||||
return PTR_ERR(rockchip->apb_base);
|
||||
return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->apb_base),
|
||||
"failed to map apb registers\n");
|
||||
|
||||
rockchip->rst_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
|
||||
GPIOD_OUT_HIGH);
|
||||
GPIOD_OUT_LOW);
|
||||
if (IS_ERR(rockchip->rst_gpio))
|
||||
return PTR_ERR(rockchip->rst_gpio);
|
||||
return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->rst_gpio),
|
||||
"failed to get reset gpio\n");
|
||||
|
||||
rockchip->rst = devm_reset_control_array_get_exclusive(&pdev->dev);
|
||||
if (IS_ERR(rockchip->rst))
|
||||
|
@ -284,13 +292,35 @@ static const struct dw_pcie_ops dw_pcie_ops = {
|
|||
.start_link = rockchip_pcie_start_link,
|
||||
};
|
||||
|
||||
static int rockchip_pcie_configure_rc(struct rockchip_pcie *rockchip)
|
||||
{
|
||||
struct dw_pcie_rp *pp;
|
||||
u32 val;
|
||||
|
||||
/* LTSSM enable control mode */
|
||||
val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
|
||||
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
|
||||
|
||||
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE,
|
||||
PCIE_CLIENT_GENERAL_CONTROL);
|
||||
|
||||
pp = &rockchip->pci.pp;
|
||||
pp->ops = &rockchip_pcie_host_ops;
|
||||
|
||||
return dw_pcie_host_init(pp);
|
||||
}
|
||||
|
||||
static int rockchip_pcie_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct rockchip_pcie *rockchip;
|
||||
struct dw_pcie_rp *pp;
|
||||
const struct rockchip_pcie_of_data *data;
|
||||
int ret;
|
||||
|
||||
data = of_device_get_match_data(dev);
|
||||
if (!data)
|
||||
return -EINVAL;
|
||||
|
||||
rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
|
||||
if (!rockchip)
|
||||
return -ENOMEM;
|
||||
|
@ -299,9 +329,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
|
|||
|
||||
rockchip->pci.dev = dev;
|
||||
rockchip->pci.ops = &dw_pcie_ops;
|
||||
|
||||
pp = &rockchip->pci.pp;
|
||||
pp->ops = &rockchip_pcie_host_ops;
|
||||
rockchip->data = data;
|
||||
|
||||
ret = rockchip_pcie_resource_get(pdev, rockchip);
|
||||
if (ret)
|
||||
|
@ -320,10 +348,9 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
|
|||
rockchip->vpcie3v3 = NULL;
|
||||
} else {
|
||||
ret = regulator_enable(rockchip->vpcie3v3);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to enable vpcie3v3 regulator\n");
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret,
|
||||
"failed to enable vpcie3v3 regulator\n");
|
||||
}
|
||||
|
||||
ret = rockchip_pcie_phy_init(rockchip);
|
||||
|
@ -338,10 +365,21 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
goto deinit_phy;
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
if (!ret)
|
||||
return 0;
|
||||
switch (data->mode) {
|
||||
case DW_PCIE_RC_TYPE:
|
||||
ret = rockchip_pcie_configure_rc(rockchip);
|
||||
if (ret)
|
||||
goto deinit_clk;
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "INVALID device type %d\n", data->mode);
|
||||
ret = -EINVAL;
|
||||
goto deinit_clk;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
deinit_clk:
|
||||
clk_bulk_disable_unprepare(rockchip->clk_cnt, rockchip->clks);
|
||||
deinit_phy:
|
||||
rockchip_pcie_phy_deinit(rockchip);
|
||||
|
@ -352,8 +390,15 @@ disable_regulator:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static const struct rockchip_pcie_of_data rockchip_pcie_rc_of_data_rk3568 = {
|
||||
.mode = DW_PCIE_RC_TYPE,
|
||||
};
|
||||
|
||||
static const struct of_device_id rockchip_pcie_of_match[] = {
|
||||
{ .compatible = "rockchip,rk3568-pcie", },
|
||||
{
|
||||
.compatible = "rockchip,rk3568-pcie",
|
||||
.data = &rockchip_pcie_rc_of_data_rk3568,
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
|
|
|
@ -442,7 +442,7 @@ static int keembay_pcie_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
dw_pcie_ep_init_notify(&pci->ep);
|
||||
pci_epc_init_notify(pci->ep.epc);
|
||||
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -12,12 +12,10 @@
|
|||
#include <linux/compiler.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/of_pci.h>
|
||||
#include <linux/phy/phy.h>
|
||||
#include <linux/pci.h>
|
||||
|
@ -78,16 +76,16 @@ struct kirin_pcie {
|
|||
void *phy_priv; /* only for PCIE_KIRIN_INTERNAL_PHY */
|
||||
|
||||
/* DWC PERST# */
|
||||
int gpio_id_dwc_perst;
|
||||
struct gpio_desc *id_dwc_perst_gpio;
|
||||
|
||||
/* Per-slot PERST# */
|
||||
int num_slots;
|
||||
int gpio_id_reset[MAX_PCI_SLOTS];
|
||||
struct gpio_desc *id_reset_gpio[MAX_PCI_SLOTS];
|
||||
const char *reset_names[MAX_PCI_SLOTS];
|
||||
|
||||
/* Per-slot clkreq */
|
||||
int n_gpio_clkreq;
|
||||
int gpio_id_clkreq[MAX_PCI_SLOTS];
|
||||
struct gpio_desc *id_clkreq_gpio[MAX_PCI_SLOTS];
|
||||
const char *clkreq_names[MAX_PCI_SLOTS];
|
||||
};
|
||||
|
||||
|
@ -381,15 +379,20 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,
|
|||
pcie->n_gpio_clkreq = ret;
|
||||
|
||||
for (i = 0; i < pcie->n_gpio_clkreq; i++) {
|
||||
pcie->gpio_id_clkreq[i] = of_get_named_gpio(dev->of_node,
|
||||
"hisilicon,clken-gpios", i);
|
||||
if (pcie->gpio_id_clkreq[i] < 0)
|
||||
return pcie->gpio_id_clkreq[i];
|
||||
pcie->id_clkreq_gpio[i] = devm_gpiod_get_index(dev,
|
||||
"hisilicon,clken", i,
|
||||
GPIOD_OUT_LOW);
|
||||
if (IS_ERR(pcie->id_clkreq_gpio[i]))
|
||||
return dev_err_probe(dev, PTR_ERR(pcie->id_clkreq_gpio[i]),
|
||||
"unable to get a valid clken gpio\n");
|
||||
|
||||
pcie->clkreq_names[i] = devm_kasprintf(dev, GFP_KERNEL,
|
||||
"pcie_clkreq_%d", i);
|
||||
if (!pcie->clkreq_names[i])
|
||||
return -ENOMEM;
|
||||
|
||||
gpiod_set_consumer_name(pcie->id_clkreq_gpio[i],
|
||||
pcie->clkreq_names[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -400,29 +403,33 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie,
|
|||
struct device_node *node)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *parent, *child;
|
||||
int ret, slot, i;
|
||||
|
||||
for_each_available_child_of_node(node, parent) {
|
||||
for_each_available_child_of_node(parent, child) {
|
||||
for_each_available_child_of_node_scoped(node, parent) {
|
||||
for_each_available_child_of_node_scoped(parent, child) {
|
||||
i = pcie->num_slots;
|
||||
|
||||
pcie->gpio_id_reset[i] = of_get_named_gpio(child,
|
||||
"reset-gpios", 0);
|
||||
if (pcie->gpio_id_reset[i] < 0)
|
||||
continue;
|
||||
pcie->id_reset_gpio[i] = devm_fwnode_gpiod_get_index(dev,
|
||||
of_fwnode_handle(child),
|
||||
"reset", 0, GPIOD_OUT_LOW,
|
||||
NULL);
|
||||
if (IS_ERR(pcie->id_reset_gpio[i])) {
|
||||
if (PTR_ERR(pcie->id_reset_gpio[i]) == -ENOENT)
|
||||
continue;
|
||||
return dev_err_probe(dev, PTR_ERR(pcie->id_reset_gpio[i]),
|
||||
"unable to get a valid reset gpio\n");
|
||||
}
|
||||
|
||||
pcie->num_slots++;
|
||||
if (pcie->num_slots > MAX_PCI_SLOTS) {
|
||||
dev_err(dev, "Too many PCI slots!\n");
|
||||
ret = -EINVAL;
|
||||
goto put_node;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = of_pci_get_devfn(child);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to parse devfn: %d\n", ret);
|
||||
goto put_node;
|
||||
return ret;
|
||||
}
|
||||
|
||||
slot = PCI_SLOT(ret);
|
||||
|
@ -430,19 +437,15 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie,
|
|||
pcie->reset_names[i] = devm_kasprintf(dev, GFP_KERNEL,
|
||||
"pcie_perst_%d",
|
||||
slot);
|
||||
if (!pcie->reset_names[i]) {
|
||||
ret = -ENOMEM;
|
||||
goto put_node;
|
||||
}
|
||||
if (!pcie->reset_names[i])
|
||||
return -ENOMEM;
|
||||
|
||||
gpiod_set_consumer_name(pcie->id_reset_gpio[i],
|
||||
pcie->reset_names[i]);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
put_node:
|
||||
of_node_put(child);
|
||||
of_node_put(parent);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
|
||||
|
@ -463,14 +466,11 @@ static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
|
|||
return PTR_ERR(kirin_pcie->apb);
|
||||
|
||||
/* pcie internal PERST# gpio */
|
||||
kirin_pcie->gpio_id_dwc_perst = of_get_named_gpio(dev->of_node,
|
||||
"reset-gpios", 0);
|
||||
if (kirin_pcie->gpio_id_dwc_perst == -EPROBE_DEFER) {
|
||||
return -EPROBE_DEFER;
|
||||
} else if (!gpio_is_valid(kirin_pcie->gpio_id_dwc_perst)) {
|
||||
dev_err(dev, "unable to get a valid gpio pin\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
kirin_pcie->id_dwc_perst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
|
||||
if (IS_ERR(kirin_pcie->id_dwc_perst_gpio))
|
||||
return dev_err_probe(dev, PTR_ERR(kirin_pcie->id_dwc_perst_gpio),
|
||||
"unable to get a valid gpio pin\n");
|
||||
gpiod_set_consumer_name(kirin_pcie->id_dwc_perst_gpio, "pcie_perst_bridge");
|
||||
|
||||
ret = kirin_pcie_get_gpio_enable(kirin_pcie, pdev);
|
||||
if (ret)
|
||||
|
@ -553,7 +553,7 @@ static int kirin_pcie_add_bus(struct pci_bus *bus)
|
|||
|
||||
/* Send PERST# to each slot */
|
||||
for (i = 0; i < kirin_pcie->num_slots; i++) {
|
||||
ret = gpio_direction_output(kirin_pcie->gpio_id_reset[i], 1);
|
||||
ret = gpiod_direction_output_raw(kirin_pcie->id_reset_gpio[i], 1);
|
||||
if (ret) {
|
||||
dev_err(pci->dev, "PERST# %s error: %d\n",
|
||||
kirin_pcie->reset_names[i], ret);
|
||||
|
@ -623,44 +623,6 @@ static int kirin_pcie_host_init(struct dw_pcie_rp *pp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int kirin_pcie_gpio_request(struct kirin_pcie *kirin_pcie,
|
||||
struct device *dev)
|
||||
{
|
||||
int ret, i;
|
||||
|
||||
for (i = 0; i < kirin_pcie->num_slots; i++) {
|
||||
if (!gpio_is_valid(kirin_pcie->gpio_id_reset[i])) {
|
||||
dev_err(dev, "unable to get a valid %s gpio\n",
|
||||
kirin_pcie->reset_names[i]);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ret = devm_gpio_request(dev, kirin_pcie->gpio_id_reset[i],
|
||||
kirin_pcie->reset_names[i]);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++) {
|
||||
if (!gpio_is_valid(kirin_pcie->gpio_id_clkreq[i])) {
|
||||
dev_err(dev, "unable to get a valid %s gpio\n",
|
||||
kirin_pcie->clkreq_names[i]);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ret = devm_gpio_request(dev, kirin_pcie->gpio_id_clkreq[i],
|
||||
kirin_pcie->clkreq_names[i]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dw_pcie_ops kirin_dw_pcie_ops = {
|
||||
.read_dbi = kirin_pcie_read_dbi,
|
||||
.write_dbi = kirin_pcie_write_dbi,
|
||||
|
@ -680,7 +642,7 @@ static int kirin_pcie_power_off(struct kirin_pcie *kirin_pcie)
|
|||
return hi3660_pcie_phy_power_off(kirin_pcie);
|
||||
|
||||
for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++)
|
||||
gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 1);
|
||||
gpiod_direction_output_raw(kirin_pcie->id_clkreq_gpio[i], 1);
|
||||
|
||||
phy_power_off(kirin_pcie->phy);
|
||||
phy_exit(kirin_pcie->phy);
|
||||
|
@ -707,10 +669,6 @@ static int kirin_pcie_power_on(struct platform_device *pdev,
|
|||
if (IS_ERR(kirin_pcie->phy))
|
||||
return PTR_ERR(kirin_pcie->phy);
|
||||
|
||||
ret = kirin_pcie_gpio_request(kirin_pcie, dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = phy_init(kirin_pcie->phy);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
@ -723,11 +681,9 @@ static int kirin_pcie_power_on(struct platform_device *pdev,
|
|||
/* perst assert Endpoint */
|
||||
usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX);
|
||||
|
||||
if (!gpio_request(kirin_pcie->gpio_id_dwc_perst, "pcie_perst_bridge")) {
|
||||
ret = gpio_direction_output(kirin_pcie->gpio_id_dwc_perst, 1);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
ret = gpiod_direction_output_raw(kirin_pcie->id_dwc_perst_gpio, 1);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
|
||||
|
||||
|
|
|
@ -482,7 +482,7 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
|
|||
val &= ~PARF_MSTR_AXI_CLK_EN;
|
||||
writel_relaxed(val, pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
|
||||
|
||||
dw_pcie_ep_init_notify(&pcie_ep->pci.ep);
|
||||
pci_epc_init_notify(pcie_ep->pci.ep.epc);
|
||||
|
||||
/* Enable LTSSM */
|
||||
val = readl_relaxed(pcie_ep->parf + PARF_LTSSM);
|
||||
|
@ -507,6 +507,7 @@ static void qcom_pcie_perst_assert(struct dw_pcie *pci)
|
|||
return;
|
||||
}
|
||||
|
||||
pci_epc_deinit_notify(pci->ep.epc);
|
||||
dw_pcie_ep_cleanup(&pci->ep);
|
||||
qcom_pcie_disable_resources(pcie_ep);
|
||||
pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED;
|
||||
|
@ -640,12 +641,12 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
|
|||
if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) {
|
||||
dev_dbg(dev, "Received Linkdown event\n");
|
||||
pcie_ep->link_status = QCOM_PCIE_EP_LINK_DOWN;
|
||||
pci_epc_linkdown(pci->ep.epc);
|
||||
dw_pcie_ep_linkdown(&pci->ep);
|
||||
} else if (FIELD_GET(PARF_INT_ALL_BME, status)) {
|
||||
dev_dbg(dev, "Received BME event. Link is enabled!\n");
|
||||
dev_dbg(dev, "Received Bus Master Enable event\n");
|
||||
pcie_ep->link_status = QCOM_PCIE_EP_LINK_ENABLED;
|
||||
qcom_pcie_ep_icc_update(pcie_ep);
|
||||
pci_epc_bme_notify(pci->ep.epc);
|
||||
pci_epc_bus_master_enable_notify(pci->ep.epc);
|
||||
} else if (FIELD_GET(PARF_INT_ALL_PM_TURNOFF, status)) {
|
||||
dev_dbg(dev, "Received PM Turn-off event! Entering L23\n");
|
||||
val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
|
|
@ -2,11 +2,17 @@
|
|||
/*
|
||||
* PCIe controller driver for Renesas R-Car Gen4 Series SoCs
|
||||
* Copyright (C) 2022-2023 Renesas Electronics Corporation
|
||||
*
|
||||
* The r8a779g0 (R-Car V4H) controller requires a specific firmware to be
|
||||
* provided, to initialize the PHY. Otherwise, the PCIe controller will not
|
||||
* work.
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/pci.h>
|
||||
|
@ -20,9 +26,10 @@
|
|||
/* Renesas-specific */
|
||||
/* PCIe Mode Setting Register 0 */
|
||||
#define PCIEMSR0 0x0000
|
||||
#define BIFUR_MOD_SET_ON BIT(0)
|
||||
#define APP_SRIS_MODE BIT(6)
|
||||
#define DEVICE_TYPE_EP 0
|
||||
#define DEVICE_TYPE_RC BIT(4)
|
||||
#define BIFUR_MOD_SET_ON BIT(0)
|
||||
|
||||
/* PCIe Interrupt Status 0 */
|
||||
#define PCIEINTSTS0 0x0084
|
||||
|
@ -37,47 +44,49 @@
|
|||
#define PCIEDMAINTSTSEN 0x0314
|
||||
#define PCIEDMAINTSTSEN_INIT GENMASK(15, 0)
|
||||
|
||||
/* Port Logic Registers 89 */
|
||||
#define PRTLGC89 0x0b70
|
||||
|
||||
/* Port Logic Registers 90 */
|
||||
#define PRTLGC90 0x0b74
|
||||
|
||||
/* PCIe Reset Control Register 1 */
|
||||
#define PCIERSTCTRL1 0x0014
|
||||
#define APP_HOLD_PHY_RST BIT(16)
|
||||
#define APP_LTSSM_ENABLE BIT(0)
|
||||
|
||||
/* PCIe Power Management Control */
|
||||
#define PCIEPWRMNGCTRL 0x0070
|
||||
#define APP_CLK_REQ_N BIT(11)
|
||||
#define APP_CLK_PM_EN BIT(10)
|
||||
|
||||
#define RCAR_NUM_SPEED_CHANGE_RETRIES 10
|
||||
#define RCAR_MAX_LINK_SPEED 4
|
||||
|
||||
#define RCAR_GEN4_PCIE_EP_FUNC_DBI_OFFSET 0x1000
|
||||
#define RCAR_GEN4_PCIE_EP_FUNC_DBI2_OFFSET 0x800
|
||||
|
||||
#define RCAR_GEN4_PCIE_FIRMWARE_NAME "rcar_gen4_pcie.bin"
|
||||
#define RCAR_GEN4_PCIE_FIRMWARE_BASE_ADDR 0xc000
|
||||
MODULE_FIRMWARE(RCAR_GEN4_PCIE_FIRMWARE_NAME);
|
||||
|
||||
struct rcar_gen4_pcie;
|
||||
struct rcar_gen4_pcie_drvdata {
|
||||
void (*additional_common_init)(struct rcar_gen4_pcie *rcar);
|
||||
int (*ltssm_control)(struct rcar_gen4_pcie *rcar, bool enable);
|
||||
enum dw_pcie_device_mode mode;
|
||||
};
|
||||
|
||||
struct rcar_gen4_pcie {
|
||||
struct dw_pcie dw;
|
||||
void __iomem *base;
|
||||
void __iomem *phy_base;
|
||||
struct platform_device *pdev;
|
||||
enum dw_pcie_device_mode mode;
|
||||
const struct rcar_gen4_pcie_drvdata *drvdata;
|
||||
};
|
||||
#define to_rcar_gen4_pcie(_dw) container_of(_dw, struct rcar_gen4_pcie, dw)
|
||||
|
||||
/* Common */
|
||||
static void rcar_gen4_pcie_ltssm_enable(struct rcar_gen4_pcie *rcar,
|
||||
bool enable)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = readl(rcar->base + PCIERSTCTRL1);
|
||||
if (enable) {
|
||||
val |= APP_LTSSM_ENABLE;
|
||||
val &= ~APP_HOLD_PHY_RST;
|
||||
} else {
|
||||
/*
|
||||
* Since the datasheet of R-Car doesn't mention how to assert
|
||||
* the APP_HOLD_PHY_RST, don't assert it again. Otherwise,
|
||||
* hang-up issue happened in the dw_edma_core_off() when
|
||||
* the controller didn't detect a PCI device.
|
||||
*/
|
||||
val &= ~APP_LTSSM_ENABLE;
|
||||
}
|
||||
writel(val, rcar->base + PCIERSTCTRL1);
|
||||
}
|
||||
|
||||
static int rcar_gen4_pcie_link_up(struct dw_pcie *dw)
|
||||
{
|
||||
struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
|
||||
|
@ -123,9 +132,13 @@ static int rcar_gen4_pcie_speed_change(struct dw_pcie *dw)
|
|||
static int rcar_gen4_pcie_start_link(struct dw_pcie *dw)
|
||||
{
|
||||
struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
|
||||
int i, changes;
|
||||
int i, changes, ret;
|
||||
|
||||
rcar_gen4_pcie_ltssm_enable(rcar, true);
|
||||
if (rcar->drvdata->ltssm_control) {
|
||||
ret = rcar->drvdata->ltssm_control(rcar, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Require direct speed change with retrying here if the link_gen is
|
||||
|
@ -137,7 +150,7 @@ static int rcar_gen4_pcie_start_link(struct dw_pcie *dw)
|
|||
* Since dw_pcie_setup_rc() sets it once, PCIe Gen2 will be trained.
|
||||
* So, this needs remaining times for up to PCIe Gen4 if RC mode.
|
||||
*/
|
||||
if (changes && rcar->mode == DW_PCIE_RC_TYPE)
|
||||
if (changes && rcar->drvdata->mode == DW_PCIE_RC_TYPE)
|
||||
changes--;
|
||||
|
||||
for (i = 0; i < changes; i++) {
|
||||
|
@ -153,7 +166,8 @@ static void rcar_gen4_pcie_stop_link(struct dw_pcie *dw)
|
|||
{
|
||||
struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
|
||||
|
||||
rcar_gen4_pcie_ltssm_enable(rcar, false);
|
||||
if (rcar->drvdata->ltssm_control)
|
||||
rcar->drvdata->ltssm_control(rcar, false);
|
||||
}
|
||||
|
||||
static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)
|
||||
|
@ -172,9 +186,9 @@ static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)
|
|||
reset_control_assert(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
|
||||
|
||||
val = readl(rcar->base + PCIEMSR0);
|
||||
if (rcar->mode == DW_PCIE_RC_TYPE) {
|
||||
if (rcar->drvdata->mode == DW_PCIE_RC_TYPE) {
|
||||
val |= DEVICE_TYPE_RC;
|
||||
} else if (rcar->mode == DW_PCIE_EP_TYPE) {
|
||||
} else if (rcar->drvdata->mode == DW_PCIE_EP_TYPE) {
|
||||
val |= DEVICE_TYPE_EP;
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
|
@ -190,6 +204,9 @@ static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)
|
|||
if (ret)
|
||||
goto err_unprepare;
|
||||
|
||||
if (rcar->drvdata->additional_common_init)
|
||||
rcar->drvdata->additional_common_init(rcar);
|
||||
|
||||
return 0;
|
||||
|
||||
err_unprepare:
|
||||
|
@ -231,6 +248,10 @@ static void rcar_gen4_pcie_unprepare(struct rcar_gen4_pcie *rcar)
|
|||
|
||||
static int rcar_gen4_pcie_get_resources(struct rcar_gen4_pcie *rcar)
|
||||
{
|
||||
rcar->phy_base = devm_platform_ioremap_resource_byname(rcar->pdev, "phy");
|
||||
if (IS_ERR(rcar->phy_base))
|
||||
return PTR_ERR(rcar->phy_base);
|
||||
|
||||
/* Renesas-specific registers */
|
||||
rcar->base = devm_platform_ioremap_resource_byname(rcar->pdev, "app");
|
||||
|
||||
|
@ -437,7 +458,7 @@ static int rcar_gen4_add_dw_pcie_ep(struct rcar_gen4_pcie *rcar)
|
|||
rcar_gen4_pcie_ep_deinit(rcar);
|
||||
}
|
||||
|
||||
dw_pcie_ep_init_notify(ep);
|
||||
pci_epc_init_notify(ep->epc);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -451,9 +472,11 @@ static void rcar_gen4_remove_dw_pcie_ep(struct rcar_gen4_pcie *rcar)
|
|||
/* Common */
|
||||
static int rcar_gen4_add_dw_pcie(struct rcar_gen4_pcie *rcar)
|
||||
{
|
||||
rcar->mode = (uintptr_t)of_device_get_match_data(&rcar->pdev->dev);
|
||||
rcar->drvdata = of_device_get_match_data(&rcar->pdev->dev);
|
||||
if (!rcar->drvdata)
|
||||
return -EINVAL;
|
||||
|
||||
switch (rcar->mode) {
|
||||
switch (rcar->drvdata->mode) {
|
||||
case DW_PCIE_RC_TYPE:
|
||||
return rcar_gen4_add_dw_pcie_rp(rcar);
|
||||
case DW_PCIE_EP_TYPE:
|
||||
|
@ -494,7 +517,7 @@ err_unprepare:
|
|||
|
||||
static void rcar_gen4_remove_dw_pcie(struct rcar_gen4_pcie *rcar)
|
||||
{
|
||||
switch (rcar->mode) {
|
||||
switch (rcar->drvdata->mode) {
|
||||
case DW_PCIE_RC_TYPE:
|
||||
rcar_gen4_remove_dw_pcie_rp(rcar);
|
||||
break;
|
||||
|
@ -514,14 +537,227 @@ static void rcar_gen4_pcie_remove(struct platform_device *pdev)
|
|||
rcar_gen4_pcie_unprepare(rcar);
|
||||
}
|
||||
|
||||
static int r8a779f0_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = readl(rcar->base + PCIERSTCTRL1);
|
||||
if (enable) {
|
||||
val |= APP_LTSSM_ENABLE;
|
||||
val &= ~APP_HOLD_PHY_RST;
|
||||
} else {
|
||||
/*
|
||||
* Since the datasheet of R-Car doesn't mention how to assert
|
||||
* the APP_HOLD_PHY_RST, don't assert it again. Otherwise,
|
||||
* hang-up issue happened in the dw_edma_core_off() when
|
||||
* the controller didn't detect a PCI device.
|
||||
*/
|
||||
val &= ~APP_LTSSM_ENABLE;
|
||||
}
|
||||
writel(val, rcar->base + PCIERSTCTRL1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rcar_gen4_pcie_additional_common_init(struct rcar_gen4_pcie *rcar)
|
||||
{
|
||||
struct dw_pcie *dw = &rcar->dw;
|
||||
u32 val;
|
||||
|
||||
val = dw_pcie_readl_dbi(dw, PCIE_PORT_LANE_SKEW);
|
||||
val &= ~PORT_LANE_SKEW_INSERT_MASK;
|
||||
if (dw->num_lanes < 4)
|
||||
val |= BIT(6);
|
||||
dw_pcie_writel_dbi(dw, PCIE_PORT_LANE_SKEW, val);
|
||||
|
||||
val = readl(rcar->base + PCIEPWRMNGCTRL);
|
||||
val |= APP_CLK_REQ_N | APP_CLK_PM_EN;
|
||||
writel(val, rcar->base + PCIEPWRMNGCTRL);
|
||||
}
|
||||
|
||||
static void rcar_gen4_pcie_phy_reg_update_bits(struct rcar_gen4_pcie *rcar,
|
||||
u32 offset, u32 mask, u32 val)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
tmp = readl(rcar->phy_base + offset);
|
||||
tmp &= ~mask;
|
||||
tmp |= val;
|
||||
writel(tmp, rcar->phy_base + offset);
|
||||
}
|
||||
|
||||
/*
|
||||
* SoC datasheet suggests checking port logic register bits during firmware
|
||||
* write. If read returns non-zero value, then this function returns -EAGAIN
|
||||
* indicating that the write needs to be done again. If read returns zero,
|
||||
* then return 0 to indicate success.
|
||||
*/
|
||||
static int rcar_gen4_pcie_reg_test_bit(struct rcar_gen4_pcie *rcar,
|
||||
u32 offset, u32 mask)
|
||||
{
|
||||
struct dw_pcie *dw = &rcar->dw;
|
||||
|
||||
if (dw_pcie_readl_dbi(dw, offset) & mask)
|
||||
return -EAGAIN;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rcar_gen4_pcie_download_phy_firmware(struct rcar_gen4_pcie *rcar)
|
||||
{
|
||||
/* The check_addr values are magical numbers in the datasheet */
|
||||
const u32 check_addr[] = { 0x00101018, 0x00101118, 0x00101021, 0x00101121};
|
||||
struct dw_pcie *dw = &rcar->dw;
|
||||
const struct firmware *fw;
|
||||
unsigned int i, timeout;
|
||||
u32 data;
|
||||
int ret;
|
||||
|
||||
ret = request_firmware(&fw, RCAR_GEN4_PCIE_FIRMWARE_NAME, dw->dev);
|
||||
if (ret) {
|
||||
dev_err(dw->dev, "Failed to load firmware (%s): %d\n",
|
||||
RCAR_GEN4_PCIE_FIRMWARE_NAME, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < (fw->size / 2); i++) {
|
||||
data = fw->data[(i * 2) + 1] << 8 | fw->data[i * 2];
|
||||
timeout = 100;
|
||||
do {
|
||||
dw_pcie_writel_dbi(dw, PRTLGC89, RCAR_GEN4_PCIE_FIRMWARE_BASE_ADDR + i);
|
||||
dw_pcie_writel_dbi(dw, PRTLGC90, data);
|
||||
if (!rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC89, BIT(30)))
|
||||
break;
|
||||
if (!(--timeout)) {
|
||||
ret = -ETIMEDOUT;
|
||||
goto exit;
|
||||
}
|
||||
usleep_range(100, 200);
|
||||
} while (1);
|
||||
}
|
||||
|
||||
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(17), BIT(17));
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(check_addr); i++) {
|
||||
timeout = 100;
|
||||
do {
|
||||
dw_pcie_writel_dbi(dw, PRTLGC89, check_addr[i]);
|
||||
ret = rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC89, BIT(30));
|
||||
ret |= rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC90, BIT(0));
|
||||
if (!ret)
|
||||
break;
|
||||
if (!(--timeout)) {
|
||||
ret = -ETIMEDOUT;
|
||||
goto exit;
|
||||
}
|
||||
usleep_range(100, 200);
|
||||
} while (1);
|
||||
}
|
||||
|
||||
exit:
|
||||
release_firmware(fw);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int rcar_gen4_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable)
|
||||
{
|
||||
struct dw_pcie *dw = &rcar->dw;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
if (!enable) {
|
||||
val = readl(rcar->base + PCIERSTCTRL1);
|
||||
val &= ~APP_LTSSM_ENABLE;
|
||||
writel(val, rcar->base + PCIERSTCTRL1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
val = dw_pcie_readl_dbi(dw, PCIE_PORT_FORCE);
|
||||
val |= PORT_FORCE_DO_DESKEW_FOR_SRIS;
|
||||
dw_pcie_writel_dbi(dw, PCIE_PORT_FORCE, val);
|
||||
|
||||
val = readl(rcar->base + PCIEMSR0);
|
||||
val |= APP_SRIS_MODE;
|
||||
writel(val, rcar->base + PCIEMSR0);
|
||||
|
||||
/*
|
||||
* The R-Car Gen4 datasheet doesn't describe the PHY registers' name.
|
||||
* But, the initialization procedure describes these offsets. So,
|
||||
* this driver has magical offset numbers.
|
||||
*/
|
||||
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(28), 0);
|
||||
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(20), 0);
|
||||
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(12), 0);
|
||||
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(4), 0);
|
||||
|
||||
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(23, 22), BIT(22));
|
||||
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(18, 16), GENMASK(17, 16));
|
||||
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(7, 6), BIT(6));
|
||||
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(2, 0), GENMASK(11, 0));
|
||||
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x1d4, GENMASK(16, 15), GENMASK(16, 15));
|
||||
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x514, BIT(26), BIT(26));
|
||||
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(16), 0);
|
||||
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(19), BIT(19));
|
||||
|
||||
val = readl(rcar->base + PCIERSTCTRL1);
|
||||
val &= ~APP_HOLD_PHY_RST;
|
||||
writel(val, rcar->base + PCIERSTCTRL1);
|
||||
|
||||
ret = readl_poll_timeout(rcar->phy_base + 0x0f8, val, !(val & BIT(18)), 100, 10000);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = rcar_gen4_pcie_download_phy_firmware(rcar);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
val = readl(rcar->base + PCIERSTCTRL1);
|
||||
val |= APP_LTSSM_ENABLE;
|
||||
writel(val, rcar->base + PCIERSTCTRL1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct rcar_gen4_pcie_drvdata drvdata_r8a779f0_pcie = {
|
||||
.ltssm_control = r8a779f0_pcie_ltssm_control,
|
||||
.mode = DW_PCIE_RC_TYPE,
|
||||
};
|
||||
|
||||
static struct rcar_gen4_pcie_drvdata drvdata_r8a779f0_pcie_ep = {
|
||||
.ltssm_control = r8a779f0_pcie_ltssm_control,
|
||||
.mode = DW_PCIE_EP_TYPE,
|
||||
};
|
||||
|
||||
static struct rcar_gen4_pcie_drvdata drvdata_rcar_gen4_pcie = {
|
||||
.additional_common_init = rcar_gen4_pcie_additional_common_init,
|
||||
.ltssm_control = rcar_gen4_pcie_ltssm_control,
|
||||
.mode = DW_PCIE_RC_TYPE,
|
||||
};
|
||||
|
||||
static struct rcar_gen4_pcie_drvdata drvdata_rcar_gen4_pcie_ep = {
|
||||
.additional_common_init = rcar_gen4_pcie_additional_common_init,
|
||||
.ltssm_control = rcar_gen4_pcie_ltssm_control,
|
||||
.mode = DW_PCIE_EP_TYPE,
|
||||
};
|
||||
|
||||
static const struct of_device_id rcar_gen4_pcie_of_match[] = {
|
||||
{
|
||||
.compatible = "renesas,r8a779f0-pcie",
|
||||
.data = &drvdata_r8a779f0_pcie,
|
||||
},
|
||||
{
|
||||
.compatible = "renesas,r8a779f0-pcie-ep",
|
||||
.data = &drvdata_r8a779f0_pcie_ep,
|
||||
},
|
||||
{
|
||||
.compatible = "renesas,rcar-gen4-pcie",
|
||||
.data = (void *)DW_PCIE_RC_TYPE,
|
||||
.data = &drvdata_rcar_gen4_pcie,
|
||||
},
|
||||
{
|
||||
.compatible = "renesas,rcar-gen4-pcie-ep",
|
||||
.data = (void *)DW_PCIE_EP_TYPE,
|
||||
.data = &drvdata_rcar_gen4_pcie_ep,
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
#include <linux/clk.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/interconnect.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
@ -21,7 +20,6 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/of_pci.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/phy/phy.h>
|
||||
|
@ -308,10 +306,6 @@ static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
|
|||
return readl_relaxed(pcie->appl_base + reg);
|
||||
}
|
||||
|
||||
struct tegra_pcie_soc {
|
||||
enum dw_pcie_device_mode mode;
|
||||
};
|
||||
|
||||
static void tegra_pcie_icc_set(struct tegra_pcie_dw *pcie)
|
||||
{
|
||||
struct dw_pcie *pci = &pcie->pci;
|
||||
|
@ -1715,6 +1709,7 @@ static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
|
|||
if (ret)
|
||||
dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret);
|
||||
|
||||
pci_epc_deinit_notify(pcie->pci.ep.epc);
|
||||
dw_pcie_ep_cleanup(&pcie->pci.ep);
|
||||
|
||||
reset_control_assert(pcie->core_rst);
|
||||
|
@ -1903,7 +1898,7 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
|
|||
goto fail_init_complete;
|
||||
}
|
||||
|
||||
dw_pcie_ep_init_notify(ep);
|
||||
pci_epc_init_notify(ep->epc);
|
||||
|
||||
/* Program the private control to allow sending LTR upstream */
|
||||
if (pcie->of_data->has_ltr_req_fix) {
|
||||
|
@ -2015,6 +2010,7 @@ static const struct pci_epc_features tegra_pcie_epc_features = {
|
|||
.bar[BAR_3] = { .type = BAR_RESERVED, },
|
||||
.bar[BAR_4] = { .type = BAR_RESERVED, },
|
||||
.bar[BAR_5] = { .type = BAR_RESERVED, },
|
||||
.align = SZ_64K,
|
||||
};
|
||||
|
||||
static const struct pci_epc_features*
|
||||
|
|
|
@ -410,7 +410,7 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
dw_pcie_ep_init_notify(&priv->pci.ep);
|
||||
pci_epc_init_notify(priv->pci.ep.epc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -190,7 +190,7 @@ static void ls_g4_pcie_reset(struct work_struct *work)
|
|||
ls_g4_pcie_enable_interrupt(pcie);
|
||||
}
|
||||
|
||||
static struct mobiveil_rp_ops ls_g4_pcie_rp_ops = {
|
||||
static const struct mobiveil_rp_ops ls_g4_pcie_rp_ops = {
|
||||
.interrupt_init = ls_g4_pcie_interrupt_init,
|
||||
};
|
||||
|
||||
|
|
|
@ -151,7 +151,7 @@ struct mobiveil_rp_ops {
|
|||
struct mobiveil_root_port {
|
||||
void __iomem *config_axi_slave_base; /* endpoint config base */
|
||||
struct resource *ob_io_res;
|
||||
struct mobiveil_rp_ops *ops;
|
||||
const struct mobiveil_rp_ops *ops;
|
||||
int irq;
|
||||
raw_spinlock_t intx_mask_lock;
|
||||
struct irq_domain *intx_domain;
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
#include <linux/platform_device.h>
|
||||
#include <linux/msi.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/of_pci.h>
|
||||
|
||||
#include "../pci.h"
|
||||
|
|
|
@ -73,10 +73,6 @@ int pci_host_common_probe(struct platform_device *pdev)
|
|||
if (IS_ERR(cfg))
|
||||
return PTR_ERR(cfg);
|
||||
|
||||
/* Do not reassign resources if probe only */
|
||||
if (!pci_has_flag(PCI_PROBE_ONLY))
|
||||
pci_add_flags(PCI_REASSIGN_ALL_BUS);
|
||||
|
||||
bridge->sysdata = cfg;
|
||||
bridge->ops = (struct pci_ops *)&ops->pci_ops;
|
||||
bridge->msi_domain = true;
|
||||
|
@ -96,4 +92,5 @@ void pci_host_common_remove(struct platform_device *pdev)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(pci_host_common_remove);
|
||||
|
||||
MODULE_DESCRIPTION("Generic PCI host common driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
|
|
@ -86,4 +86,5 @@ static struct platform_driver gen_pci_driver = {
|
|||
};
|
||||
module_platform_driver(gen_pci_driver);
|
||||
|
||||
MODULE_DESCRIPTION("Generic PCI host controller driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
|
|
@ -163,6 +163,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
|
|||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
|
||||
DEV_LS7A_HDMI, loongson_pci_pin_quirk);
|
||||
|
||||
static void loongson_pci_msi_quirk(struct pci_dev *dev)
|
||||
{
|
||||
u16 val, class = dev->class >> 8;
|
||||
|
||||
if (class != PCI_CLASS_BRIDGE_HOST)
|
||||
return;
|
||||
|
||||
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &val);
|
||||
val |= PCI_MSI_FLAGS_ENABLE;
|
||||
pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, val);
|
||||
}
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_PCIE_PORT5, loongson_pci_msi_quirk);
|
||||
|
||||
static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus)
|
||||
{
|
||||
struct pci_config_window *cfg;
|
||||
|
|
|
@ -1375,7 +1375,7 @@ static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
|
|||
struct tegra_pcie_port *port;
|
||||
int err;
|
||||
|
||||
if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL)
|
||||
if (!soc->has_gen2 || of_property_present(np, "phys"))
|
||||
return tegra_pcie_phys_get_legacy(pcie);
|
||||
|
||||
list_for_each_entry(port, &pcie->ports, list) {
|
||||
|
@ -1944,7 +1944,7 @@ static bool of_regulator_bulk_available(struct device_node *np,
|
|||
for (i = 0; i < num_supplies; i++) {
|
||||
snprintf(property, 32, "%s-supply", supplies[i].supply);
|
||||
|
||||
if (of_find_property(np, property, NULL) == NULL)
|
||||
if (!of_property_present(np, property))
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -290,4 +290,5 @@ static void __exit altera_msi_exit(void)
|
|||
subsys_initcall(altera_msi_init);
|
||||
MODULE_DEVICE_TABLE(of, altera_msi_of_match);
|
||||
module_exit(altera_msi_exit);
|
||||
MODULE_DESCRIPTION("Altera PCIe MSI support driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
|
|
@ -826,4 +826,5 @@ static struct platform_driver altera_pcie_driver = {
|
|||
|
||||
MODULE_DEVICE_TABLE(of, altera_pcie_of_match);
|
||||
module_platform_driver(altera_pcie_driver);
|
||||
MODULE_DESCRIPTION("Altera PCIe host controller driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
|
|
@ -1091,4 +1091,5 @@ static struct platform_driver mtk_pcie_driver = {
|
|||
};
|
||||
|
||||
module_platform_driver(mtk_pcie_driver);
|
||||
MODULE_DESCRIPTION("MediaTek Gen3 PCIe host controller driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
|
|
@ -649,7 +649,7 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
|
|||
return err;
|
||||
}
|
||||
|
||||
if (of_find_property(dev->of_node, "interrupt-names", NULL))
|
||||
if (of_property_present(dev->of_node, "interrupt-names"))
|
||||
port->irq = platform_get_irq_byname(pdev, "pcie_irq");
|
||||
else
|
||||
port->irq = platform_get_irq(pdev, port->slot);
|
||||
|
@ -1252,4 +1252,5 @@ static struct platform_driver mtk_pcie_driver = {
|
|||
},
|
||||
};
|
||||
module_platform_driver(mtk_pcie_driver);
|
||||
MODULE_DESCRIPTION("MediaTek PCIe host controller driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
|
|
@ -549,4 +549,5 @@ static struct platform_driver mt7621_pcie_driver = {
|
|||
};
|
||||
builtin_platform_driver(mt7621_pcie_driver);
|
||||
|
||||
MODULE_DESCRIPTION("MediaTek MT7621 PCIe host controller driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
|
|
@ -78,7 +78,11 @@ static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
|
|||
writel(L1IATN, pcie_base + PMCTLR);
|
||||
ret = readl_poll_timeout_atomic(pcie_base + PMSR, val,
|
||||
val & L1FAEG, 10, 1000);
|
||||
WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret);
|
||||
if (ret) {
|
||||
dev_warn_ratelimited(pcie_dev,
|
||||
"Timeout waiting for L1 link state, ret=%d\n",
|
||||
ret);
|
||||
}
|
||||
writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
|
||||
}
|
||||
|
||||
|
|
|
@ -322,8 +322,11 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
|
|||
rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
|
||||
PCIE_CLIENT_CONFIG);
|
||||
|
||||
msleep(PCIE_T_PVPERL_MS);
|
||||
gpiod_set_value_cansleep(rockchip->ep_gpio, 1);
|
||||
|
||||
msleep(PCIE_T_RRS_READY_MS);
|
||||
|
||||
/* 500ms timeout value should be enough for Gen1/2 training */
|
||||
err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
|
||||
status, PCIE_LINK_UP(status), 20,
|
||||
|
|
|
@ -121,7 +121,7 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
|
|||
|
||||
if (rockchip->is_rc) {
|
||||
rockchip->ep_gpio = devm_gpiod_get_optional(dev, "ep",
|
||||
GPIOD_OUT_HIGH);
|
||||
GPIOD_OUT_LOW);
|
||||
if (IS_ERR(rockchip->ep_gpio))
|
||||
return dev_err_probe(dev, PTR_ERR(rockchip->ep_gpio),
|
||||
"failed to get ep GPIO\n");
|
||||
|
|
|
@ -925,6 +925,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
|
|||
dev_set_msi_domain(&vmd->bus->dev,
|
||||
dev_get_msi_domain(&vmd->dev->dev));
|
||||
|
||||
WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
|
||||
"domain"), "Can't create symlink to domain\n");
|
||||
|
||||
vmd_acpi_begin();
|
||||
|
||||
pci_scan_child_bus(vmd->bus);
|
||||
|
@ -964,9 +967,6 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
|
|||
pci_bus_add_devices(vmd->bus);
|
||||
|
||||
vmd_acpi_end();
|
||||
|
||||
WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
|
||||
"domain"), "Can't create symlink to domain\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1042,8 +1042,8 @@ static void vmd_remove(struct pci_dev *dev)
|
|||
{
|
||||
struct vmd_dev *vmd = pci_get_drvdata(dev);
|
||||
|
||||
sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
|
||||
pci_stop_root_bus(vmd->bus);
|
||||
sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
|
||||
pci_remove_root_bus(vmd->bus);
|
||||
vmd_cleanup_srcu(vmd);
|
||||
vmd_detach_resources(vmd);
|
||||
|
@ -1128,5 +1128,6 @@ static struct pci_driver vmd_drv = {
|
|||
module_pci_driver(vmd_drv);
|
||||
|
||||
MODULE_AUTHOR("Intel Corporation");
|
||||
MODULE_DESCRIPTION("Volume Management Device driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_VERSION("0.6");
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -485,7 +485,7 @@ static void pci_epf_mhi_dma_deinit(struct pci_epf_mhi *epf_mhi)
|
|||
epf_mhi->dma_chan_rx = NULL;
|
||||
}
|
||||
|
||||
static int pci_epf_mhi_core_init(struct pci_epf *epf)
|
||||
static int pci_epf_mhi_epc_init(struct pci_epf *epf)
|
||||
{
|
||||
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
|
||||
const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
|
||||
|
@ -522,9 +522,35 @@ static int pci_epf_mhi_core_init(struct pci_epf *epf)
|
|||
if (!epf_mhi->epc_features)
|
||||
return -ENODATA;
|
||||
|
||||
if (info->flags & MHI_EPF_USE_DMA) {
|
||||
ret = pci_epf_mhi_dma_init(epf_mhi);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to initialize DMA: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pci_epf_mhi_epc_deinit(struct pci_epf *epf)
|
||||
{
|
||||
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
|
||||
const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
|
||||
struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
|
||||
struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
|
||||
struct pci_epc *epc = epf->epc;
|
||||
|
||||
if (mhi_cntrl->mhi_dev) {
|
||||
mhi_ep_power_down(mhi_cntrl);
|
||||
if (info->flags & MHI_EPF_USE_DMA)
|
||||
pci_epf_mhi_dma_deinit(epf_mhi);
|
||||
mhi_ep_unregister_controller(mhi_cntrl);
|
||||
}
|
||||
|
||||
pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
|
||||
}
|
||||
|
||||
static int pci_epf_mhi_link_up(struct pci_epf *epf)
|
||||
{
|
||||
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
|
||||
|
@ -534,14 +560,6 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
|
|||
struct device *dev = &epf->dev;
|
||||
int ret;
|
||||
|
||||
if (info->flags & MHI_EPF_USE_DMA) {
|
||||
ret = pci_epf_mhi_dma_init(epf_mhi);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to initialize DMA: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
mhi_cntrl->mmio = epf_mhi->mmio;
|
||||
mhi_cntrl->irq = epf_mhi->irq;
|
||||
mhi_cntrl->mru = info->mru;
|
||||
|
@ -587,7 +605,7 @@ static int pci_epf_mhi_link_down(struct pci_epf *epf)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int pci_epf_mhi_bme(struct pci_epf *epf)
|
||||
static int pci_epf_mhi_bus_master_enable(struct pci_epf *epf)
|
||||
{
|
||||
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
|
||||
const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
|
||||
|
@ -650,8 +668,8 @@ static void pci_epf_mhi_unbind(struct pci_epf *epf)
|
|||
|
||||
/*
|
||||
* Forcefully power down the MHI EP stack. Only way to bring the MHI EP
|
||||
* stack back to working state after successive bind is by getting BME
|
||||
* from host.
|
||||
* stack back to working state after successive bind is by getting Bus
|
||||
* Master Enable event from host.
|
||||
*/
|
||||
if (mhi_cntrl->mhi_dev) {
|
||||
mhi_ep_power_down(mhi_cntrl);
|
||||
|
@ -665,10 +683,11 @@ static void pci_epf_mhi_unbind(struct pci_epf *epf)
|
|||
}
|
||||
|
||||
static const struct pci_epc_event_ops pci_epf_mhi_event_ops = {
|
||||
.core_init = pci_epf_mhi_core_init,
|
||||
.epc_init = pci_epf_mhi_epc_init,
|
||||
.epc_deinit = pci_epf_mhi_epc_deinit,
|
||||
.link_up = pci_epf_mhi_link_up,
|
||||
.link_down = pci_epf_mhi_link_down,
|
||||
.bme = pci_epf_mhi_bme,
|
||||
.bus_master_enable = pci_epf_mhi_bus_master_enable,
|
||||
};
|
||||
|
||||
static int pci_epf_mhi_probe(struct pci_epf *epf,
|
||||
|
|
|
@ -686,25 +686,6 @@ reset_handler:
|
|||
msecs_to_jiffies(1));
|
||||
}
|
||||
|
||||
static void pci_epf_test_unbind(struct pci_epf *epf)
|
||||
{
|
||||
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
||||
struct pci_epc *epc = epf->epc;
|
||||
int bar;
|
||||
|
||||
cancel_delayed_work(&epf_test->cmd_handler);
|
||||
pci_epf_test_clean_dma_chan(epf_test);
|
||||
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
|
||||
if (!epf_test->reg[bar])
|
||||
continue;
|
||||
|
||||
pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
|
||||
&epf->bar[bar]);
|
||||
pci_epf_free_space(epf, epf_test->reg[bar], bar,
|
||||
PRIMARY_INTERFACE);
|
||||
}
|
||||
}
|
||||
|
||||
static int pci_epf_test_set_bar(struct pci_epf *epf)
|
||||
{
|
||||
int bar, ret;
|
||||
|
@ -731,23 +712,36 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int pci_epf_test_core_init(struct pci_epf *epf)
|
||||
static void pci_epf_test_clear_bar(struct pci_epf *epf)
|
||||
{
|
||||
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
||||
struct pci_epc *epc = epf->epc;
|
||||
int bar;
|
||||
|
||||
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
|
||||
if (!epf_test->reg[bar])
|
||||
continue;
|
||||
|
||||
pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
|
||||
&epf->bar[bar]);
|
||||
}
|
||||
}
|
||||
|
||||
static int pci_epf_test_epc_init(struct pci_epf *epf)
|
||||
{
|
||||
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
||||
struct pci_epf_header *header = epf->header;
|
||||
const struct pci_epc_features *epc_features;
|
||||
const struct pci_epc_features *epc_features = epf_test->epc_features;
|
||||
struct pci_epc *epc = epf->epc;
|
||||
struct device *dev = &epf->dev;
|
||||
bool linkup_notifier = false;
|
||||
bool msix_capable = false;
|
||||
bool msi_capable = true;
|
||||
int ret;
|
||||
|
||||
epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
|
||||
if (epc_features) {
|
||||
msix_capable = epc_features->msix_capable;
|
||||
msi_capable = epc_features->msi_capable;
|
||||
}
|
||||
epf_test->dma_supported = true;
|
||||
|
||||
ret = pci_epf_test_init_dma_chan(epf_test);
|
||||
if (ret)
|
||||
epf_test->dma_supported = false;
|
||||
|
||||
if (epf->vfunc_no <= 1) {
|
||||
ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
|
||||
|
@ -761,7 +755,7 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (msi_capable) {
|
||||
if (epc_features->msi_capable) {
|
||||
ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
|
||||
epf->msi_interrupts);
|
||||
if (ret) {
|
||||
|
@ -770,7 +764,7 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
|
|||
}
|
||||
}
|
||||
|
||||
if (msix_capable) {
|
||||
if (epc_features->msix_capable) {
|
||||
ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
|
||||
epf->msix_interrupts,
|
||||
epf_test->test_reg_bar,
|
||||
|
@ -788,6 +782,15 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void pci_epf_test_epc_deinit(struct pci_epf *epf)
|
||||
{
|
||||
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
||||
|
||||
cancel_delayed_work(&epf_test->cmd_handler);
|
||||
pci_epf_test_clean_dma_chan(epf_test);
|
||||
pci_epf_test_clear_bar(epf);
|
||||
}
|
||||
|
||||
static int pci_epf_test_link_up(struct pci_epf *epf)
|
||||
{
|
||||
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
||||
|
@ -798,9 +801,20 @@ static int pci_epf_test_link_up(struct pci_epf *epf)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int pci_epf_test_link_down(struct pci_epf *epf)
|
||||
{
|
||||
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
||||
|
||||
cancel_delayed_work_sync(&epf_test->cmd_handler);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct pci_epc_event_ops pci_epf_test_event_ops = {
|
||||
.core_init = pci_epf_test_core_init,
|
||||
.epc_init = pci_epf_test_epc_init,
|
||||
.epc_deinit = pci_epf_test_epc_deinit,
|
||||
.link_up = pci_epf_test_link_up,
|
||||
.link_down = pci_epf_test_link_down,
|
||||
};
|
||||
|
||||
static int pci_epf_test_alloc_space(struct pci_epf *epf)
|
||||
|
@ -810,19 +824,15 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
|
|||
size_t msix_table_size = 0;
|
||||
size_t test_reg_bar_size;
|
||||
size_t pba_size = 0;
|
||||
bool msix_capable;
|
||||
void *base;
|
||||
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
|
||||
enum pci_barno bar;
|
||||
const struct pci_epc_features *epc_features;
|
||||
const struct pci_epc_features *epc_features = epf_test->epc_features;
|
||||
size_t test_reg_size;
|
||||
|
||||
epc_features = epf_test->epc_features;
|
||||
|
||||
test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
|
||||
|
||||
msix_capable = epc_features->msix_capable;
|
||||
if (msix_capable) {
|
||||
if (epc_features->msix_capable) {
|
||||
msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
|
||||
epf_test->msix_table_offset = test_reg_bar_size;
|
||||
/* Align to QWORD or 8 Bytes */
|
||||
|
@ -857,6 +867,20 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void pci_epf_test_free_space(struct pci_epf *epf)
|
||||
{
|
||||
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
||||
int bar;
|
||||
|
||||
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
|
||||
if (!epf_test->reg[bar])
|
||||
continue;
|
||||
|
||||
pci_epf_free_space(epf, epf_test->reg[bar], bar,
|
||||
PRIMARY_INTERFACE);
|
||||
}
|
||||
}
|
||||
|
||||
static int pci_epf_test_bind(struct pci_epf *epf)
|
||||
{
|
||||
int ret;
|
||||
|
@ -885,15 +909,22 @@ static int pci_epf_test_bind(struct pci_epf *epf)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
epf_test->dma_supported = true;
|
||||
|
||||
ret = pci_epf_test_init_dma_chan(epf_test);
|
||||
if (ret)
|
||||
epf_test->dma_supported = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pci_epf_test_unbind(struct pci_epf *epf)
|
||||
{
|
||||
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
||||
struct pci_epc *epc = epf->epc;
|
||||
|
||||
cancel_delayed_work(&epf_test->cmd_handler);
|
||||
if (epc->init_complete) {
|
||||
pci_epf_test_clean_dma_chan(epf_test);
|
||||
pci_epf_test_clear_bar(epf);
|
||||
}
|
||||
pci_epf_test_free_space(epf);
|
||||
}
|
||||
|
||||
static const struct pci_epf_device_id pci_epf_test_ids[] = {
|
||||
{
|
||||
.name = "pci_epf_test",
|
||||
|
|
|
@ -799,8 +799,9 @@ err_config_interrupt:
|
|||
*/
|
||||
static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
|
||||
{
|
||||
epf_ntb_db_bar_clear(ntb);
|
||||
epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
|
||||
epf_ntb_db_bar_clear(ntb);
|
||||
epf_ntb_config_sspad_bar_clear(ntb);
|
||||
}
|
||||
|
||||
#define EPF_NTB_R(_name) \
|
||||
|
@ -1018,8 +1019,10 @@ static int vpci_scan_bus(void *sysdata)
|
|||
struct epf_ntb *ndev = sysdata;
|
||||
|
||||
vpci_bus = pci_scan_bus(ndev->vbus_number, &vpci_ops, sysdata);
|
||||
if (vpci_bus)
|
||||
pr_err("create pci bus\n");
|
||||
if (!vpci_bus) {
|
||||
pr_err("create pci bus failed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pci_bus_add_devices(vpci_bus);
|
||||
|
||||
|
@ -1335,13 +1338,19 @@ static int epf_ntb_bind(struct pci_epf *epf)
|
|||
ret = pci_register_driver(&vntb_pci_driver);
|
||||
if (ret) {
|
||||
dev_err(dev, "failure register vntb pci driver\n");
|
||||
goto err_bar_alloc;
|
||||
goto err_epc_cleanup;
|
||||
}
|
||||
|
||||
vpci_scan_bus(ntb);
|
||||
ret = vpci_scan_bus(ntb);
|
||||
if (ret)
|
||||
goto err_unregister;
|
||||
|
||||
return 0;
|
||||
|
||||
err_unregister:
|
||||
pci_unregister_driver(&vntb_pci_driver);
|
||||
err_epc_cleanup:
|
||||
epf_ntb_epc_cleanup(ntb);
|
||||
err_bar_alloc:
|
||||
epf_ntb_config_spad_bar_free(ntb);
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ struct pci_epf_group {
|
|||
struct config_group group;
|
||||
struct config_group primary_epc_group;
|
||||
struct config_group secondary_epc_group;
|
||||
struct config_group *type_group;
|
||||
struct delayed_work cfs_work;
|
||||
struct pci_epf *epf;
|
||||
int index;
|
||||
|
|
|
@ -14,7 +14,9 @@
|
|||
#include <linux/pci-epf.h>
|
||||
#include <linux/pci-ep-cfs.h>
|
||||
|
||||
static struct class *pci_epc_class;
|
||||
static const struct class pci_epc_class = {
|
||||
.name = "pci_epc",
|
||||
};
|
||||
|
||||
static void devm_pci_epc_release(struct device *dev, void *res)
|
||||
{
|
||||
|
@ -60,7 +62,7 @@ struct pci_epc *pci_epc_get(const char *epc_name)
|
|||
struct device *dev;
|
||||
struct class_dev_iter iter;
|
||||
|
||||
class_dev_iter_init(&iter, pci_epc_class, NULL, NULL);
|
||||
class_dev_iter_init(&iter, &pci_epc_class, NULL, NULL);
|
||||
while ((dev = class_dev_iter_next(&iter))) {
|
||||
if (strcmp(epc_name, dev_name(dev)))
|
||||
continue;
|
||||
|
@ -727,9 +729,9 @@ void pci_epc_linkdown(struct pci_epc *epc)
|
|||
EXPORT_SYMBOL_GPL(pci_epc_linkdown);
|
||||
|
||||
/**
|
||||
* pci_epc_init_notify() - Notify the EPF device that EPC device's core
|
||||
* initialization is completed.
|
||||
* @epc: the EPC device whose core initialization is completed
|
||||
* pci_epc_init_notify() - Notify the EPF device that EPC device initialization
|
||||
* is completed.
|
||||
* @epc: the EPC device whose initialization is completed
|
||||
*
|
||||
* Invoke to Notify the EPF device that the EPC device's initialization
|
||||
* is completed.
|
||||
|
@ -744,8 +746,8 @@ void pci_epc_init_notify(struct pci_epc *epc)
|
|||
mutex_lock(&epc->list_lock);
|
||||
list_for_each_entry(epf, &epc->pci_epf, list) {
|
||||
mutex_lock(&epf->lock);
|
||||
if (epf->event_ops && epf->event_ops->core_init)
|
||||
epf->event_ops->core_init(epf);
|
||||
if (epf->event_ops && epf->event_ops->epc_init)
|
||||
epf->event_ops->epc_init(epf);
|
||||
mutex_unlock(&epf->lock);
|
||||
}
|
||||
epc->init_complete = true;
|
||||
|
@ -756,7 +758,7 @@ EXPORT_SYMBOL_GPL(pci_epc_init_notify);
|
|||
/**
|
||||
* pci_epc_notify_pending_init() - Notify the pending EPC device initialization
|
||||
* complete to the EPF device
|
||||
* @epc: the EPC device whose core initialization is pending to be notified
|
||||
* @epc: the EPC device whose initialization is pending to be notified
|
||||
* @epf: the EPF device to be notified
|
||||
*
|
||||
* Invoke to notify the pending EPC device initialization complete to the EPF
|
||||
|
@ -767,22 +769,20 @@ void pci_epc_notify_pending_init(struct pci_epc *epc, struct pci_epf *epf)
|
|||
{
|
||||
if (epc->init_complete) {
|
||||
mutex_lock(&epf->lock);
|
||||
if (epf->event_ops && epf->event_ops->core_init)
|
||||
epf->event_ops->core_init(epf);
|
||||
if (epf->event_ops && epf->event_ops->epc_init)
|
||||
epf->event_ops->epc_init(epf);
|
||||
mutex_unlock(&epf->lock);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_epc_notify_pending_init);
|
||||
|
||||
/**
|
||||
* pci_epc_bme_notify() - Notify the EPF device that the EPC device has received
|
||||
* the BME event from the Root complex
|
||||
* @epc: the EPC device that received the BME event
|
||||
* pci_epc_deinit_notify() - Notify the EPF device about EPC deinitialization
|
||||
* @epc: the EPC device whose deinitialization is completed
|
||||
*
|
||||
* Invoke to Notify the EPF device that the EPC device has received the Bus
|
||||
* Master Enable (BME) event from the Root complex
|
||||
* Invoke to notify the EPF device that the EPC deinitialization is completed.
|
||||
*/
|
||||
void pci_epc_bme_notify(struct pci_epc *epc)
|
||||
void pci_epc_deinit_notify(struct pci_epc *epc)
|
||||
{
|
||||
struct pci_epf *epf;
|
||||
|
||||
|
@ -792,13 +792,41 @@ void pci_epc_bme_notify(struct pci_epc *epc)
|
|||
mutex_lock(&epc->list_lock);
|
||||
list_for_each_entry(epf, &epc->pci_epf, list) {
|
||||
mutex_lock(&epf->lock);
|
||||
if (epf->event_ops && epf->event_ops->bme)
|
||||
epf->event_ops->bme(epf);
|
||||
if (epf->event_ops && epf->event_ops->epc_deinit)
|
||||
epf->event_ops->epc_deinit(epf);
|
||||
mutex_unlock(&epf->lock);
|
||||
}
|
||||
epc->init_complete = false;
|
||||
mutex_unlock(&epc->list_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_epc_deinit_notify);
|
||||
|
||||
/**
|
||||
* pci_epc_bus_master_enable_notify() - Notify the EPF device that the EPC
|
||||
* device has received the Bus Master
|
||||
* Enable event from the Root complex
|
||||
* @epc: the EPC device that received the Bus Master Enable event
|
||||
*
|
||||
* Notify the EPF device that the EPC device has generated the Bus Master Enable
|
||||
* event due to host setting the Bus Master Enable bit in the Command register.
|
||||
*/
|
||||
void pci_epc_bus_master_enable_notify(struct pci_epc *epc)
|
||||
{
|
||||
struct pci_epf *epf;
|
||||
|
||||
if (IS_ERR_OR_NULL(epc))
|
||||
return;
|
||||
|
||||
mutex_lock(&epc->list_lock);
|
||||
list_for_each_entry(epf, &epc->pci_epf, list) {
|
||||
mutex_lock(&epf->lock);
|
||||
if (epf->event_ops && epf->event_ops->bus_master_enable)
|
||||
epf->event_ops->bus_master_enable(epf);
|
||||
mutex_unlock(&epf->lock);
|
||||
}
|
||||
mutex_unlock(&epc->list_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_epc_bme_notify);
|
||||
EXPORT_SYMBOL_GPL(pci_epc_bus_master_enable_notify);
|
||||
|
||||
/**
|
||||
* pci_epc_destroy() - destroy the EPC device
|
||||
|
@ -867,7 +895,7 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
|
|||
INIT_LIST_HEAD(&epc->pci_epf);
|
||||
|
||||
device_initialize(&epc->dev);
|
||||
epc->dev.class = pci_epc_class;
|
||||
epc->dev.class = &pci_epc_class;
|
||||
epc->dev.parent = dev;
|
||||
epc->dev.release = pci_epc_release;
|
||||
epc->ops = ops;
|
||||
|
@ -927,20 +955,13 @@ EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
|
|||
|
||||
static int __init pci_epc_init(void)
|
||||
{
|
||||
pci_epc_class = class_create("pci_epc");
|
||||
if (IS_ERR(pci_epc_class)) {
|
||||
pr_err("failed to create pci epc class --> %ld\n",
|
||||
PTR_ERR(pci_epc_class));
|
||||
return PTR_ERR(pci_epc_class);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return class_register(&pci_epc_class);
|
||||
}
|
||||
module_init(pci_epc_init);
|
||||
|
||||
static void __exit pci_epc_exit(void)
|
||||
{
|
||||
class_destroy(pci_epc_class);
|
||||
class_unregister(&pci_epc_class);
|
||||
}
|
||||
module_exit(pci_epc_exit);
|
||||
|
||||
|
|
|
@ -278,7 +278,7 @@ int rpaphp_check_drc_props(struct device_node *dn, char *drc_name,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (of_find_property(dn->parent, "ibm,drc-info", NULL))
|
||||
if (of_property_present(dn->parent, "ibm,drc-info"))
|
||||
return rpaphp_check_drc_props_v2(dn, drc_name, drc_type,
|
||||
be32_to_cpu(*my_index));
|
||||
else
|
||||
|
@ -440,7 +440,7 @@ int rpaphp_add_slot(struct device_node *dn)
|
|||
if (!of_node_name_eq(dn, "pci"))
|
||||
return 0;
|
||||
|
||||
if (of_find_property(dn, "ibm,drc-info", NULL))
|
||||
if (of_property_present(dn, "ibm,drc-info"))
|
||||
return rpaphp_drc_info_add_slot(dn);
|
||||
else
|
||||
return rpaphp_drc_add_slot(dn);
|
||||
|
|
|
@ -23,6 +23,10 @@
|
|||
*
|
||||
* @maxlen specifies the maximum length to map. If you want to get access to
|
||||
* the complete BAR from offset to the end, pass %0 here.
|
||||
*
|
||||
* NOTE:
|
||||
* This function is never managed, even if you initialized with
|
||||
* pcim_enable_device().
|
||||
* */
|
||||
void __iomem *pci_iomap_range(struct pci_dev *dev,
|
||||
int bar,
|
||||
|
@ -63,6 +67,10 @@ EXPORT_SYMBOL(pci_iomap_range);
|
|||
*
|
||||
* @maxlen specifies the maximum length to map. If you want to get access to
|
||||
* the complete BAR from offset to the end, pass %0 here.
|
||||
*
|
||||
* NOTE:
|
||||
* This function is never managed, even if you initialized with
|
||||
* pcim_enable_device().
|
||||
* */
|
||||
void __iomem *pci_iomap_wc_range(struct pci_dev *dev,
|
||||
int bar,
|
||||
|
@ -106,6 +114,10 @@ EXPORT_SYMBOL_GPL(pci_iomap_wc_range);
|
|||
*
|
||||
* @maxlen specifies the maximum length to map. If you want to get access to
|
||||
* the complete BAR without checking for its length first, pass %0 here.
|
||||
*
|
||||
* NOTE:
|
||||
* This function is never managed, even if you initialized with
|
||||
* pcim_enable_device(). If you need automatic cleanup, use pcim_iomap().
|
||||
* */
|
||||
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
|
||||
{
|
||||
|
@ -127,6 +139,10 @@ EXPORT_SYMBOL(pci_iomap);
|
|||
*
|
||||
* @maxlen specifies the maximum length to map. If you want to get access to
|
||||
* the complete BAR without checking for its length first, pass %0 here.
|
||||
*
|
||||
* NOTE:
|
||||
* This function is never managed, even if you initialized with
|
||||
* pcim_enable_device().
|
||||
* */
|
||||
void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
|
||||
{
|
||||
|
|
|
@ -34,11 +34,6 @@ int pci_set_of_node(struct pci_dev *dev)
|
|||
if (!node)
|
||||
return 0;
|
||||
|
||||
if (!of_device_is_available(node)) {
|
||||
of_node_put(node);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
device_set_node(&dev->dev, of_fwnode_handle(node));
|
||||
return 0;
|
||||
}
|
||||
|
@ -238,28 +233,62 @@ int of_get_pci_domain_nr(struct device_node *node)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
|
||||
|
||||
/**
|
||||
* of_pci_preserve_config - Return true if the boot configuration needs to
|
||||
* be preserved
|
||||
* @node: Device tree node.
|
||||
*
|
||||
* Look for "linux,pci-probe-only" property for a given PCI controller's
|
||||
* node and return true if found. Also look in the chosen node if the
|
||||
* property is not found in the given controller's node. Having this
|
||||
* property ensures that the kernel doesn't reconfigure the BARs and bridge
|
||||
* windows that are already done by the platform firmware.
|
||||
*
|
||||
* Return: true if the property exists; false otherwise.
|
||||
*/
|
||||
bool of_pci_preserve_config(struct device_node *node)
|
||||
{
|
||||
u32 val = 0;
|
||||
int ret;
|
||||
|
||||
if (!node) {
|
||||
pr_warn("device node is NULL, trying with of_chosen\n");
|
||||
node = of_chosen;
|
||||
}
|
||||
|
||||
retry:
|
||||
ret = of_property_read_u32(node, "linux,pci-probe-only", &val);
|
||||
if (ret) {
|
||||
if (ret == -ENODATA || ret == -EOVERFLOW) {
|
||||
pr_warn("Incorrect value for linux,pci-probe-only in %pOF, ignoring\n",
|
||||
node);
|
||||
return false;
|
||||
}
|
||||
if (ret == -EINVAL) {
|
||||
if (node == of_chosen)
|
||||
return false;
|
||||
|
||||
node = of_chosen;
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
||||
if (val)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only
|
||||
* is present and valid
|
||||
*/
|
||||
void of_pci_check_probe_only(void)
|
||||
{
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
ret = of_property_read_u32(of_chosen, "linux,pci-probe-only", &val);
|
||||
if (ret) {
|
||||
if (ret == -ENODATA || ret == -EOVERFLOW)
|
||||
pr_warn("linux,pci-probe-only without valid value, ignoring\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (val)
|
||||
if (of_pci_preserve_config(of_chosen))
|
||||
pci_add_flags(PCI_PROBE_ONLY);
|
||||
else
|
||||
pci_clear_flags(PCI_PROBE_ONLY);
|
||||
|
||||
pr_info("PROBE_ONLY %s\n", val ? "enabled" : "disabled");
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
|
||||
|
||||
|
@ -429,7 +458,7 @@ failed:
|
|||
*/
|
||||
static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
|
||||
{
|
||||
struct device_node *dn, *ppnode;
|
||||
struct device_node *dn, *ppnode = NULL;
|
||||
struct pci_dev *ppdev;
|
||||
__be32 laddr[3];
|
||||
u8 pin;
|
||||
|
@ -458,8 +487,14 @@ static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *
|
|||
if (pin == 0)
|
||||
return -ENODEV;
|
||||
|
||||
/* Local interrupt-map in the device node? Use it! */
|
||||
if (of_property_present(dn, "interrupt-map")) {
|
||||
pin = pci_swizzle_interrupt_pin(pdev, pin);
|
||||
ppnode = dn;
|
||||
}
|
||||
|
||||
/* Now we walk up the PCI tree */
|
||||
for (;;) {
|
||||
while (!ppnode) {
|
||||
/* Get the pci_dev of our parent */
|
||||
ppdev = pdev->bus->self;
|
||||
|
||||
|
|
|
@ -119,6 +119,28 @@ phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
|
|||
return (phys_addr_t)mcfg_addr;
|
||||
}
|
||||
|
||||
bool pci_acpi_preserve_config(struct pci_host_bridge *host_bridge)
|
||||
{
|
||||
if (ACPI_HANDLE(&host_bridge->dev)) {
|
||||
union acpi_object *obj;
|
||||
|
||||
/*
|
||||
* Evaluate the "PCI Boot Configuration" _DSM Function. If it
|
||||
* exists and returns 0, we must preserve any PCI resource
|
||||
* assignments made by firmware for this host bridge.
|
||||
*/
|
||||
obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(&host_bridge->dev),
|
||||
&pci_acpi_dsm_guid,
|
||||
1, DSM_PCI_PRESERVE_BOOT_CONFIG,
|
||||
NULL, ACPI_TYPE_INTEGER);
|
||||
if (obj && obj->integer.value == 0)
|
||||
return true;
|
||||
ACPI_FREE(obj);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* _HPX PCI Setting Record (Type 0); same as _HPP */
|
||||
struct hpx_type0 {
|
||||
u32 revision; /* Not present in _HPP */
|
||||
|
|
|
@ -38,8 +38,8 @@ pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
|
|||
* arch/x86/platform/intel-mid/pwr.c.
|
||||
*/
|
||||
static const struct x86_cpu_id lpss_cpu_ids[] = {
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL_MID, NULL),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, NULL),
|
||||
X86_MATCH_VFM(INTEL_ATOM_SALTWELL_MID, NULL),
|
||||
X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, NULL),
|
||||
{}
|
||||
};
|
||||
|
||||
|
|
|
@ -39,4 +39,5 @@ static struct pci_driver pf_stub_driver = {
|
|||
};
|
||||
module_pci_driver(pf_stub_driver);
|
||||
|
||||
MODULE_DESCRIPTION("SR-IOV PF stub driver with no functionality");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -92,5 +92,6 @@ static void __exit pci_stub_exit(void)
|
|||
module_init(pci_stub_init);
|
||||
module_exit(pci_stub_exit);
|
||||
|
||||
MODULE_DESCRIPTION("VM device assignment stub driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Chris Wright <chrisw@sous-sol.org>");
|
||||
|
|
|
@ -2218,12 +2218,6 @@ void pci_disable_enabled_device(struct pci_dev *dev)
|
|||
*/
|
||||
void pci_disable_device(struct pci_dev *dev)
|
||||
{
|
||||
struct pci_devres *dr;
|
||||
|
||||
dr = find_pci_dr(dev);
|
||||
if (dr)
|
||||
dr->enabled = 0;
|
||||
|
||||
dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
|
||||
"disabling already-disabled device");
|
||||
|
||||
|
@ -3872,7 +3866,15 @@ EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
|
|||
*/
|
||||
void pci_release_region(struct pci_dev *pdev, int bar)
|
||||
{
|
||||
struct pci_devres *dr;
|
||||
/*
|
||||
* This is done for backwards compatibility, because the old PCI devres
|
||||
* API had a mode in which the function became managed if it had been
|
||||
* enabled with pcim_enable_device() instead of pci_enable_device().
|
||||
*/
|
||||
if (pci_is_managed(pdev)) {
|
||||
pcim_release_region(pdev, bar);
|
||||
return;
|
||||
}
|
||||
|
||||
if (pci_resource_len(pdev, bar) == 0)
|
||||
return;
|
||||
|
@ -3882,10 +3884,6 @@ void pci_release_region(struct pci_dev *pdev, int bar)
|
|||
else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
|
||||
release_mem_region(pci_resource_start(pdev, bar),
|
||||
pci_resource_len(pdev, bar));
|
||||
|
||||
dr = find_pci_dr(pdev);
|
||||
if (dr)
|
||||
dr->region_mask &= ~(1 << bar);
|
||||
}
|
||||
EXPORT_SYMBOL(pci_release_region);
|
||||
|
||||
|
@ -3896,6 +3894,8 @@ EXPORT_SYMBOL(pci_release_region);
|
|||
* @res_name: Name to be associated with resource.
|
||||
* @exclusive: whether the region access is exclusive or not
|
||||
*
|
||||
* Returns: 0 on success, negative error code on failure.
|
||||
*
|
||||
* Mark the PCI region associated with PCI device @pdev BAR @bar as
|
||||
* being reserved by owner @res_name. Do not access any
|
||||
* address inside the PCI regions unless this call returns
|
||||
|
@ -3911,7 +3911,12 @@ EXPORT_SYMBOL(pci_release_region);
|
|||
static int __pci_request_region(struct pci_dev *pdev, int bar,
|
||||
const char *res_name, int exclusive)
|
||||
{
|
||||
struct pci_devres *dr;
|
||||
if (pci_is_managed(pdev)) {
|
||||
if (exclusive == IORESOURCE_EXCLUSIVE)
|
||||
return pcim_request_region_exclusive(pdev, bar, res_name);
|
||||
|
||||
return pcim_request_region(pdev, bar, res_name);
|
||||
}
|
||||
|
||||
if (pci_resource_len(pdev, bar) == 0)
|
||||
return 0;
|
||||
|
@ -3927,10 +3932,6 @@ static int __pci_request_region(struct pci_dev *pdev, int bar,
|
|||
goto err_out;
|
||||
}
|
||||
|
||||
dr = find_pci_dr(pdev);
|
||||
if (dr)
|
||||
dr->region_mask |= 1 << bar;
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
|
@ -3945,6 +3946,8 @@ err_out:
|
|||
* @bar: BAR to be reserved
|
||||
* @res_name: Name to be associated with resource
|
||||
*
|
||||
* Returns: 0 on success, negative error code on failure.
|
||||
*
|
||||
* Mark the PCI region associated with PCI device @pdev BAR @bar as
|
||||
* being reserved by owner @res_name. Do not access any
|
||||
* address inside the PCI regions unless this call returns
|
||||
|
@ -3952,6 +3955,11 @@ err_out:
|
|||
*
|
||||
* Returns 0 on success, or %EBUSY on error. A warning
|
||||
* message is also printed on failure.
|
||||
*
|
||||
* NOTE:
|
||||
* This is a "hybrid" function: It's normally unmanaged, but becomes managed
|
||||
* when pcim_enable_device() has been called in advance. This hybrid feature is
|
||||
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
|
||||
*/
|
||||
int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
|
||||
{
|
||||
|
@ -4002,6 +4010,13 @@ err_out:
|
|||
* @pdev: PCI device whose resources are to be reserved
|
||||
* @bars: Bitmask of BARs to be requested
|
||||
* @res_name: Name to be associated with resource
|
||||
*
|
||||
* Returns: 0 on success, negative error code on failure.
|
||||
*
|
||||
* NOTE:
|
||||
* This is a "hybrid" function: It's normally unmanaged, but becomes managed
|
||||
* when pcim_enable_device() has been called in advance. This hybrid feature is
|
||||
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
|
||||
*/
|
||||
int pci_request_selected_regions(struct pci_dev *pdev, int bars,
|
||||
const char *res_name)
|
||||
|
@ -4010,6 +4025,19 @@ int pci_request_selected_regions(struct pci_dev *pdev, int bars,
|
|||
}
|
||||
EXPORT_SYMBOL(pci_request_selected_regions);
|
||||
|
||||
/**
|
||||
* pci_request_selected_regions_exclusive - Request regions exclusively
|
||||
* @pdev: PCI device to request regions from
|
||||
* @bars: bit mask of BARs to request
|
||||
* @res_name: name to be associated with the requests
|
||||
*
|
||||
* Returns: 0 on success, negative error code on failure.
|
||||
*
|
||||
* NOTE:
|
||||
* This is a "hybrid" function: It's normally unmanaged, but becomes managed
|
||||
* when pcim_enable_device() has been called in advance. This hybrid feature is
|
||||
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
|
||||
*/
|
||||
int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
|
||||
const char *res_name)
|
||||
{
|
||||
|
@ -4027,7 +4055,6 @@ EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
|
|||
* successful call to pci_request_regions(). Call this function only
|
||||
* after all use of the PCI regions has ceased.
|
||||
*/
|
||||
|
||||
void pci_release_regions(struct pci_dev *pdev)
|
||||
{
|
||||
pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
|
||||
|
@ -4046,6 +4073,11 @@ EXPORT_SYMBOL(pci_release_regions);
|
|||
*
|
||||
* Returns 0 on success, or %EBUSY on error. A warning
|
||||
* message is also printed on failure.
|
||||
*
|
||||
* NOTE:
|
||||
* This is a "hybrid" function: It's normally unmanaged, but becomes managed
|
||||
* when pcim_enable_device() has been called in advance. This hybrid feature is
|
||||
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
|
||||
*/
|
||||
int pci_request_regions(struct pci_dev *pdev, const char *res_name)
|
||||
{
|
||||
|
@ -4059,6 +4091,8 @@ EXPORT_SYMBOL(pci_request_regions);
|
|||
* @pdev: PCI device whose resources are to be reserved
|
||||
* @res_name: Name to be associated with resource.
|
||||
*
|
||||
* Returns: 0 on success, negative error code on failure.
|
||||
*
|
||||
* Mark all PCI regions associated with PCI device @pdev as being reserved
|
||||
* by owner @res_name. Do not access any address inside the PCI regions
|
||||
* unless this call returns successfully.
|
||||
|
@ -4068,6 +4102,11 @@ EXPORT_SYMBOL(pci_request_regions);
|
|||
*
|
||||
* Returns 0 on success, or %EBUSY on error. A warning message is also
|
||||
* printed on failure.
|
||||
*
|
||||
* NOTE:
|
||||
* This is a "hybrid" function: It's normally unmanaged, but becomes managed
|
||||
* when pcim_enable_device() has been called in advance. This hybrid feature is
|
||||
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
|
||||
*/
|
||||
int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
|
||||
{
|
||||
|
@ -4399,6 +4438,11 @@ void pci_disable_parity(struct pci_dev *dev)
|
|||
* @enable: boolean: whether to enable or disable PCI INTx
|
||||
*
|
||||
* Enables/disables PCI INTx for device @pdev
|
||||
*
|
||||
* NOTE:
|
||||
* This is a "hybrid" function: It's normally unmanaged, but becomes managed
|
||||
* when pcim_enable_device() has been called in advance. This hybrid feature is
|
||||
* DEPRECATED! If you want managed cleanup, use pcim_intx() instead.
|
||||
*/
|
||||
void pci_intx(struct pci_dev *pdev, int enable)
|
||||
{
|
||||
|
@ -4412,15 +4456,13 @@ void pci_intx(struct pci_dev *pdev, int enable)
|
|||
new = pci_command | PCI_COMMAND_INTX_DISABLE;
|
||||
|
||||
if (new != pci_command) {
|
||||
struct pci_devres *dr;
|
||||
/* Preserve the "hybrid" behavior for backwards compatibility */
|
||||
if (pci_is_managed(pdev)) {
|
||||
WARN_ON_ONCE(pcim_intx(pdev, enable) != 0);
|
||||
return;
|
||||
}
|
||||
|
||||
pci_write_config_word(pdev, PCI_COMMAND, new);
|
||||
|
||||
dr = find_pci_dr(pdev);
|
||||
if (dr && !dr->restore_intx) {
|
||||
dr->restore_intx = 1;
|
||||
dr->orig_intx = !enable;
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_intx);
|
||||
|
@ -4883,6 +4925,9 @@ void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
|
|||
*/
|
||||
int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
|
||||
{
|
||||
if (!dev->block_cfg_access)
|
||||
pci_warn_once(dev, "unlocked secondary bus reset via: %pS\n",
|
||||
__builtin_return_address(0));
|
||||
pcibios_reset_secondary_bus(dev);
|
||||
|
||||
return pci_bridge_wait_for_secondary_bus(dev, "bus reset");
|
||||
|
|
|
@ -16,12 +16,40 @@
|
|||
/* Power stable to PERST# inactive from PCIe card Electromechanical Spec */
|
||||
#define PCIE_T_PVPERL_MS 100
|
||||
|
||||
/*
|
||||
* End of conventional reset (PERST# de-asserted) to first configuration
|
||||
* request (device able to respond with a "Request Retry Status" completion),
|
||||
* from PCIe r6.0, sec 6.6.1.
|
||||
*/
|
||||
#define PCIE_T_RRS_READY_MS 100
|
||||
|
||||
/*
|
||||
* PCIe r6.0, sec 5.3.3.2.1 <PME Synchronization>
|
||||
* Recommends 1ms to 10ms timeout to check L2 ready.
|
||||
*/
|
||||
#define PCIE_PME_TO_L2_TIMEOUT_US 10000
|
||||
|
||||
/* Message Routing (r[2:0]); PCIe r6.0, sec 2.2.8 */
|
||||
#define PCIE_MSG_TYPE_R_RC 0
|
||||
#define PCIE_MSG_TYPE_R_ADDR 1
|
||||
#define PCIE_MSG_TYPE_R_ID 2
|
||||
#define PCIE_MSG_TYPE_R_BC 3
|
||||
#define PCIE_MSG_TYPE_R_LOCAL 4
|
||||
#define PCIE_MSG_TYPE_R_GATHER 5
|
||||
|
||||
/* Power Management Messages; PCIe r6.0, sec 2.2.8.2 */
|
||||
#define PCIE_MSG_CODE_PME_TURN_OFF 0x19
|
||||
|
||||
/* INTx Mechanism Messages; PCIe r6.0, sec 2.2.8.1 */
|
||||
#define PCIE_MSG_CODE_ASSERT_INTA 0x20
|
||||
#define PCIE_MSG_CODE_ASSERT_INTB 0x21
|
||||
#define PCIE_MSG_CODE_ASSERT_INTC 0x22
|
||||
#define PCIE_MSG_CODE_ASSERT_INTD 0x23
|
||||
#define PCIE_MSG_CODE_DEASSERT_INTA 0x24
|
||||
#define PCIE_MSG_CODE_DEASSERT_INTB 0x25
|
||||
#define PCIE_MSG_CODE_DEASSERT_INTC 0x26
|
||||
#define PCIE_MSG_CODE_DEASSERT_INTD 0x27
|
||||
|
||||
extern const unsigned char pcie_link_speed[];
|
||||
extern bool pci_early_dump;
|
||||
|
||||
|
@ -657,6 +685,7 @@ int of_pci_get_max_link_speed(struct device_node *node);
|
|||
u32 of_pci_get_slot_power_limit(struct device_node *node,
|
||||
u8 *slot_power_limit_value,
|
||||
u8 *slot_power_limit_scale);
|
||||
bool of_pci_preserve_config(struct device_node *node);
|
||||
int pci_set_of_node(struct pci_dev *dev);
|
||||
void pci_release_of_node(struct pci_dev *dev);
|
||||
void pci_set_bus_of_node(struct pci_bus *bus);
|
||||
|
@ -695,6 +724,11 @@ of_pci_get_slot_power_limit(struct device_node *node,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline bool of_pci_preserve_config(struct device_node *node)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int pci_set_of_node(struct pci_dev *dev) { return 0; }
|
||||
static inline void pci_release_of_node(struct pci_dev *dev) { }
|
||||
static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
|
||||
|
@ -741,6 +775,7 @@ static inline void pci_restore_aer_state(struct pci_dev *dev) { }
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
bool pci_acpi_preserve_config(struct pci_host_bridge *bridge);
|
||||
int pci_acpi_program_hp_params(struct pci_dev *dev);
|
||||
extern const struct attribute_group pci_dev_acpi_attr_group;
|
||||
void pci_set_acpi_fwnode(struct pci_dev *dev);
|
||||
|
@ -754,6 +789,10 @@ int acpi_pci_wakeup(struct pci_dev *dev, bool enable);
|
|||
bool acpi_pci_need_resume(struct pci_dev *dev);
|
||||
pci_power_t acpi_pci_choose_state(struct pci_dev *pdev);
|
||||
#else
|
||||
static inline bool pci_acpi_preserve_config(struct pci_host_bridge *bridge)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline int pci_dev_acpi_reset(struct pci_dev *dev, bool probe)
|
||||
{
|
||||
return -ENOTTY;
|
||||
|
@ -819,26 +858,12 @@ static inline pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X
|
||||
* on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so
|
||||
* there's no need to track it separately. pci_devres is initialized
|
||||
* when a device is enabled using managed PCI device enable interface.
|
||||
*
|
||||
* TODO: Struct pci_devres and find_pci_dr() only need to be here because
|
||||
* they're used in pci.c. Port or move these functions to devres.c and
|
||||
* then remove them from here.
|
||||
*/
|
||||
struct pci_devres {
|
||||
unsigned int enabled:1;
|
||||
unsigned int pinned:1;
|
||||
unsigned int orig_intx:1;
|
||||
unsigned int restore_intx:1;
|
||||
unsigned int mwi:1;
|
||||
u32 region_mask;
|
||||
};
|
||||
int pcim_intx(struct pci_dev *dev, int enable);
|
||||
|
||||
struct pci_devres *find_pci_dr(struct pci_dev *pdev);
|
||||
int pcim_request_region(struct pci_dev *pdev, int bar, const char *name);
|
||||
int pcim_request_region_exclusive(struct pci_dev *pdev, int bar,
|
||||
const char *name);
|
||||
void pcim_release_region(struct pci_dev *pdev, int bar);
|
||||
|
||||
/*
|
||||
* Config Address for PCI Configuration Mechanism #1
|
||||
|
|
|
@ -412,13 +412,44 @@ void pci_dpc_init(struct pci_dev *pdev)
|
|||
}
|
||||
}
|
||||
|
||||
static void dpc_enable(struct pcie_device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = dev->port;
|
||||
int dpc = pdev->dpc_cap;
|
||||
u16 ctl;
|
||||
|
||||
/*
|
||||
* Clear DPC Interrupt Status so we don't get an interrupt for an
|
||||
* old event when setting DPC Interrupt Enable.
|
||||
*/
|
||||
pci_write_config_word(pdev, dpc + PCI_EXP_DPC_STATUS,
|
||||
PCI_EXP_DPC_STATUS_INTERRUPT);
|
||||
|
||||
pci_read_config_word(pdev, dpc + PCI_EXP_DPC_CTL, &ctl);
|
||||
ctl &= ~PCI_EXP_DPC_CTL_EN_MASK;
|
||||
ctl |= PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
|
||||
pci_write_config_word(pdev, dpc + PCI_EXP_DPC_CTL, ctl);
|
||||
}
|
||||
|
||||
static void dpc_disable(struct pcie_device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = dev->port;
|
||||
int dpc = pdev->dpc_cap;
|
||||
u16 ctl;
|
||||
|
||||
/* Disable DPC triggering and DPC interrupts */
|
||||
pci_read_config_word(pdev, dpc + PCI_EXP_DPC_CTL, &ctl);
|
||||
ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN);
|
||||
pci_write_config_word(pdev, dpc + PCI_EXP_DPC_CTL, ctl);
|
||||
}
|
||||
|
||||
#define FLAG(x, y) (((x) & (y)) ? '+' : '-')
|
||||
static int dpc_probe(struct pcie_device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = dev->port;
|
||||
struct device *device = &dev->device;
|
||||
int status;
|
||||
u16 ctl, cap;
|
||||
u16 cap;
|
||||
|
||||
if (!pcie_aer_is_native(pdev) && !pcie_ports_dpc_native)
|
||||
return -ENOTSUPP;
|
||||
|
@ -433,11 +464,7 @@ static int dpc_probe(struct pcie_device *dev)
|
|||
}
|
||||
|
||||
pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap);
|
||||
|
||||
pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
|
||||
ctl &= ~PCI_EXP_DPC_CTL_EN_MASK;
|
||||
ctl |= PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
|
||||
pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
|
||||
dpc_enable(dev);
|
||||
|
||||
pci_info(pdev, "enabled with IRQ %d\n", dev->irq);
|
||||
pci_info(pdev, "error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
|
||||
|
@ -450,14 +477,21 @@ static int dpc_probe(struct pcie_device *dev)
|
|||
return status;
|
||||
}
|
||||
|
||||
static int dpc_suspend(struct pcie_device *dev)
|
||||
{
|
||||
dpc_disable(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dpc_resume(struct pcie_device *dev)
|
||||
{
|
||||
dpc_enable(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dpc_remove(struct pcie_device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = dev->port;
|
||||
u16 ctl;
|
||||
|
||||
pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
|
||||
ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN);
|
||||
pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
|
||||
dpc_disable(dev);
|
||||
}
|
||||
|
||||
static struct pcie_port_service_driver dpcdriver = {
|
||||
|
@ -465,6 +499,8 @@ static struct pcie_port_service_driver dpcdriver = {
|
|||
.port_type = PCIE_ANY_PORT,
|
||||
.service = PCIE_PORT_SERVICE_DPC,
|
||||
.probe = dpc_probe,
|
||||
.suspend = dpc_suspend,
|
||||
.resume = dpc_resume,
|
||||
.remove = dpc_remove,
|
||||
};
|
||||
|
||||
|
|
|
@ -786,7 +786,7 @@ static const struct pci_error_handlers pcie_portdrv_err_handler = {
|
|||
|
||||
static struct pci_driver pcie_portdriver = {
|
||||
.name = "pcieport",
|
||||
.id_table = &port_pci_ids[0],
|
||||
.id_table = port_pci_ids,
|
||||
|
||||
.probe = pcie_portdrv_probe,
|
||||
.remove = pcie_portdrv_remove,
|
||||
|
|
|
@ -889,6 +889,17 @@ static void pci_set_bus_msi_domain(struct pci_bus *bus)
|
|||
dev_set_msi_domain(&bus->dev, d);
|
||||
}
|
||||
|
||||
static bool pci_preserve_config(struct pci_host_bridge *host_bridge)
|
||||
{
|
||||
if (pci_acpi_preserve_config(host_bridge))
|
||||
return true;
|
||||
|
||||
if (host_bridge->dev.parent && host_bridge->dev.parent->of_node)
|
||||
return of_pci_preserve_config(host_bridge->dev.parent->of_node);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int pci_register_host_bridge(struct pci_host_bridge *bridge)
|
||||
{
|
||||
struct device *parent = bridge->dev.parent;
|
||||
|
@ -983,6 +994,9 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
|
|||
if (nr_node_ids > 1 && pcibus_to_node(bus) == NUMA_NO_NODE)
|
||||
dev_warn(&bus->dev, "Unknown NUMA node; performance will be reduced\n");
|
||||
|
||||
/* Check if the boot configuration by FW needs to be preserved */
|
||||
bridge->preserve_config = pci_preserve_config(bridge);
|
||||
|
||||
/* Coalesce contiguous windows */
|
||||
resource_list_for_each_entry_safe(window, n, &resources) {
|
||||
if (list_is_last(&window->node, &resources))
|
||||
|
@ -3077,20 +3091,18 @@ int pci_host_probe(struct pci_host_bridge *bridge)
|
|||
|
||||
bus = bridge->bus;
|
||||
|
||||
/*
|
||||
* We insert PCI resources into the iomem_resource and
|
||||
* ioport_resource trees in either pci_bus_claim_resources()
|
||||
* or pci_bus_assign_resources().
|
||||
*/
|
||||
if (pci_has_flag(PCI_PROBE_ONLY)) {
|
||||
/* If we must preserve the resource configuration, claim now */
|
||||
if (bridge->preserve_config)
|
||||
pci_bus_claim_resources(bus);
|
||||
} else {
|
||||
pci_bus_size_bridges(bus);
|
||||
pci_bus_assign_resources(bus);
|
||||
|
||||
list_for_each_entry(child, &bus->children, node)
|
||||
pcie_bus_configure_settings(child);
|
||||
}
|
||||
/*
|
||||
* Assign whatever was left unassigned. If we didn't claim above,
|
||||
* this will reassign everything.
|
||||
*/
|
||||
pci_assign_unassigned_root_bus_resources(bus);
|
||||
|
||||
list_for_each_entry(child, &bus->children, node)
|
||||
pcie_bus_configure_settings(child);
|
||||
|
||||
pci_bus_add_devices(bus);
|
||||
return 0;
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
* tighter packing. Prefetchable range support.
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -21,6 +22,8 @@
|
|||
#include <linux/errno.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/acpi.h>
|
||||
#include "pci.h"
|
||||
|
@ -829,11 +832,9 @@ static resource_size_t calculate_memsize(resource_size_t size,
|
|||
size = min_size;
|
||||
if (old_size == 1)
|
||||
old_size = 0;
|
||||
if (size < old_size)
|
||||
size = old_size;
|
||||
|
||||
size = ALIGN(max(size, add_size) + children_add_size, align);
|
||||
return size;
|
||||
size = max(size, add_size) + children_add_size;
|
||||
return ALIGN(max(size, old_size), align);
|
||||
}
|
||||
|
||||
resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus,
|
||||
|
@ -959,7 +960,7 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
|
|||
for (order = 0; order <= max_order; order++) {
|
||||
resource_size_t align1 = 1;
|
||||
|
||||
align1 <<= (order + 20);
|
||||
align1 <<= order + __ffs(SZ_1M);
|
||||
|
||||
if (!align)
|
||||
min_align = align1;
|
||||
|
@ -971,6 +972,67 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
|
|||
return min_align;
|
||||
}
|
||||
|
||||
/**
|
||||
* pbus_upstream_space_available - Check no upstream resource limits allocation
|
||||
* @bus: The bus
|
||||
* @mask: Mask the resource flag, then compare it with type
|
||||
* @type: The type of resource from bridge
|
||||
* @size: The size required from the bridge window
|
||||
* @align: Required alignment for the resource
|
||||
*
|
||||
* Checks that @size can fit inside the upstream bridge resources that are
|
||||
* already assigned.
|
||||
*
|
||||
* Return: %true if enough space is available on all assigned upstream
|
||||
* resources.
|
||||
*/
|
||||
static bool pbus_upstream_space_available(struct pci_bus *bus, unsigned long mask,
|
||||
unsigned long type, resource_size_t size,
|
||||
resource_size_t align)
|
||||
{
|
||||
struct resource_constraint constraint = {
|
||||
.max = RESOURCE_SIZE_MAX,
|
||||
.align = align,
|
||||
};
|
||||
struct pci_bus *downstream = bus;
|
||||
struct resource *r;
|
||||
|
||||
while ((bus = bus->parent)) {
|
||||
if (pci_is_root_bus(bus))
|
||||
break;
|
||||
|
||||
pci_bus_for_each_resource(bus, r) {
|
||||
if (!r || !r->parent || (r->flags & mask) != type)
|
||||
continue;
|
||||
|
||||
if (resource_size(r) >= size) {
|
||||
struct resource gap = {};
|
||||
|
||||
if (find_resource_space(r, &gap, size, &constraint) == 0) {
|
||||
gap.flags = type;
|
||||
pci_dbg(bus->self,
|
||||
"Assigned bridge window %pR to %pR free space at %pR\n",
|
||||
r, &bus->busn_res, &gap);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (bus->self) {
|
||||
pci_info(bus->self,
|
||||
"Assigned bridge window %pR to %pR cannot fit 0x%llx required for %s bridging to %pR\n",
|
||||
r, &bus->busn_res,
|
||||
(unsigned long long)size,
|
||||
pci_name(downstream->self),
|
||||
&downstream->busn_res);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* pbus_size_mem() - Size the memory window of a given bus
|
||||
*
|
||||
|
@ -997,7 +1059,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
|
|||
struct list_head *realloc_head)
|
||||
{
|
||||
struct pci_dev *dev;
|
||||
resource_size_t min_align, align, size, size0, size1;
|
||||
resource_size_t min_align, win_align, align, size, size0, size1;
|
||||
resource_size_t aligns[24]; /* Alignments from 1MB to 8TB */
|
||||
int order, max_order;
|
||||
struct resource *b_res = find_bus_resource_of_type(bus,
|
||||
|
@ -1049,7 +1111,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
|
|||
* resources.
|
||||
*/
|
||||
align = pci_resource_alignment(dev, r);
|
||||
order = __ffs(align) - 20;
|
||||
order = __ffs(align) - __ffs(SZ_1M);
|
||||
if (order < 0)
|
||||
order = 0;
|
||||
if (order >= ARRAY_SIZE(aligns)) {
|
||||
|
@ -1076,10 +1138,23 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
|
|||
}
|
||||
}
|
||||
|
||||
win_align = window_alignment(bus, b_res->flags);
|
||||
min_align = calculate_mem_align(aligns, max_order);
|
||||
min_align = max(min_align, window_alignment(bus, b_res->flags));
|
||||
min_align = max(min_align, win_align);
|
||||
size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), min_align);
|
||||
add_align = max(min_align, add_align);
|
||||
|
||||
if (bus->self && size0 &&
|
||||
!pbus_upstream_space_available(bus, mask | IORESOURCE_PREFETCH, type,
|
||||
size0, add_align)) {
|
||||
min_align = 1ULL << (max_order + __ffs(SZ_1M));
|
||||
min_align = max(min_align, win_align);
|
||||
size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), win_align);
|
||||
add_align = win_align;
|
||||
pci_info(bus->self, "bridge window %pR to %pR requires relaxed alignment rules\n",
|
||||
b_res, &bus->busn_res);
|
||||
}
|
||||
|
||||
size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
|
||||
calculate_memsize(size, min_size, add_size, children_add_size,
|
||||
resource_size(b_res), add_align);
|
||||
|
|
|
@ -37,7 +37,9 @@ MODULE_PARM_DESC(nirqs, "number of interrupts to allocate (more may be useful fo
|
|||
static dev_t switchtec_devt;
|
||||
static DEFINE_IDA(switchtec_minor_ida);
|
||||
|
||||
struct class *switchtec_class;
|
||||
const struct class switchtec_class = {
|
||||
.name = "switchtec",
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(switchtec_class);
|
||||
|
||||
enum mrpc_state {
|
||||
|
@ -1363,7 +1365,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
|
|||
|
||||
dev = &stdev->dev;
|
||||
device_initialize(dev);
|
||||
dev->class = switchtec_class;
|
||||
dev->class = &switchtec_class;
|
||||
dev->parent = &pdev->dev;
|
||||
dev->groups = switchtec_device_groups;
|
||||
dev->release = stdev_release;
|
||||
|
@ -1851,11 +1853,9 @@ static int __init switchtec_init(void)
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
switchtec_class = class_create("switchtec");
|
||||
if (IS_ERR(switchtec_class)) {
|
||||
rc = PTR_ERR(switchtec_class);
|
||||
rc = class_register(&switchtec_class);
|
||||
if (rc)
|
||||
goto err_create_class;
|
||||
}
|
||||
|
||||
rc = pci_register_driver(&switchtec_pci_driver);
|
||||
if (rc)
|
||||
|
@ -1866,7 +1866,7 @@ static int __init switchtec_init(void)
|
|||
return 0;
|
||||
|
||||
err_pci_register:
|
||||
class_destroy(switchtec_class);
|
||||
class_unregister(&switchtec_class);
|
||||
|
||||
err_create_class:
|
||||
unregister_chrdev_region(switchtec_devt, max_devices);
|
||||
|
@ -1878,7 +1878,7 @@ module_init(switchtec_init);
|
|||
static void __exit switchtec_exit(void)
|
||||
{
|
||||
pci_unregister_driver(&switchtec_pci_driver);
|
||||
class_destroy(switchtec_class);
|
||||
class_unregister(&switchtec_class);
|
||||
unregister_chrdev_region(switchtec_devt, max_devices);
|
||||
ida_destroy(&switchtec_minor_ida);
|
||||
|
||||
|
|
|
@ -231,7 +231,7 @@ static const struct pci_device_id cdnsp_pci_ids[] = {
|
|||
|
||||
static struct pci_driver cdnsp_pci_driver = {
|
||||
.name = "cdnsp-pci",
|
||||
.id_table = &cdnsp_pci_ids[0],
|
||||
.id_table = cdnsp_pci_ids,
|
||||
.probe = cdnsp_pci_probe,
|
||||
.remove = cdnsp_pci_remove,
|
||||
.driver = {
|
||||
|
|
|
@ -173,6 +173,11 @@ enum {
|
|||
#define DEFINE_RES_MEM(_start, _size) \
|
||||
DEFINE_RES_MEM_NAMED((_start), (_size), NULL)
|
||||
|
||||
#define DEFINE_RES_REG_NAMED(_start, _size, _name) \
|
||||
DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_REG)
|
||||
#define DEFINE_RES_REG(_start, _size) \
|
||||
DEFINE_RES_REG_NAMED((_start), (_size), NULL)
|
||||
|
||||
#define DEFINE_RES_IRQ_NAMED(_irq, _name) \
|
||||
DEFINE_RES_NAMED((_irq), 1, (_name), IORESOURCE_IRQ)
|
||||
#define DEFINE_RES_IRQ(_irq) \
|
||||
|
@ -183,6 +188,42 @@ enum {
|
|||
#define DEFINE_RES_DMA(_dma) \
|
||||
DEFINE_RES_DMA_NAMED((_dma), NULL)
|
||||
|
||||
/**
|
||||
* typedef resource_alignf - Resource alignment callback
|
||||
* @data: Private data used by the callback
|
||||
* @res: Resource candidate range (an empty resource space)
|
||||
* @size: The minimum size of the empty space
|
||||
* @align: Alignment from the constraints
|
||||
*
|
||||
* Callback allows calculating resource placement and alignment beyond min,
|
||||
* max, and align fields in the struct resource_constraint.
|
||||
*
|
||||
* Return: Start address for the resource.
|
||||
*/
|
||||
typedef resource_size_t (*resource_alignf)(void *data,
|
||||
const struct resource *res,
|
||||
resource_size_t size,
|
||||
resource_size_t align);
|
||||
|
||||
/**
|
||||
* struct resource_constraint - constraints to be met while searching empty
|
||||
* resource space
|
||||
* @min: The minimum address for the memory range
|
||||
* @max: The maximum address for the memory range
|
||||
* @align: Alignment for the start address of the empty space
|
||||
* @alignf: Additional alignment constraints callback
|
||||
* @alignf_data: Data provided for @alignf callback
|
||||
*
|
||||
* Contains the range and alignment constraints that have to be met during
|
||||
* find_resource_space(). @alignf can be NULL indicating no alignment beyond
|
||||
* @align is necessary.
|
||||
*/
|
||||
struct resource_constraint {
|
||||
resource_size_t min, max, align;
|
||||
resource_alignf alignf;
|
||||
void *alignf_data;
|
||||
};
|
||||
|
||||
/* PC/ISA/whatever - the normal PC address spaces: IO and memory */
|
||||
extern struct resource ioport_resource;
|
||||
extern struct resource iomem_resource;
|
||||
|
@ -202,10 +243,7 @@ extern void arch_remove_reservations(struct resource *avail);
|
|||
extern int allocate_resource(struct resource *root, struct resource *new,
|
||||
resource_size_t size, resource_size_t min,
|
||||
resource_size_t max, resource_size_t align,
|
||||
resource_size_t (*alignf)(void *,
|
||||
const struct resource *,
|
||||
resource_size_t,
|
||||
resource_size_t),
|
||||
resource_alignf alignf,
|
||||
void *alignf_data);
|
||||
struct resource *lookup_resource(struct resource *root, resource_size_t start);
|
||||
int adjust_resource(struct resource *res, resource_size_t start,
|
||||
|
@ -259,6 +297,9 @@ resource_union(struct resource *r1, struct resource *r2, struct resource *r)
|
|||
return true;
|
||||
}
|
||||
|
||||
int find_resource_space(struct resource *root, struct resource *new,
|
||||
resource_size_t size, struct resource_constraint *constraint);
|
||||
|
||||
/* Convenience shorthand with allocation */
|
||||
#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0)
|
||||
#define request_muxed_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED)
|
||||
|
@ -326,6 +367,9 @@ extern int
|
|||
walk_system_ram_res(u64 start, u64 end, void *arg,
|
||||
int (*func)(struct resource *, void *));
|
||||
extern int
|
||||
walk_system_ram_res_rev(u64 start, u64 end, void *arg,
|
||||
int (*func)(struct resource *, void *));
|
||||
extern int
|
||||
walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
|
||||
void *arg, int (*func)(struct resource *, void *));
|
||||
|
||||
|
|
|
@ -197,6 +197,8 @@ struct pci_epc_features {
|
|||
|
||||
#define to_pci_epc(device) container_of((device), struct pci_epc, dev)
|
||||
|
||||
#ifdef CONFIG_PCI_ENDPOINT
|
||||
|
||||
#define pci_epc_create(dev, ops) \
|
||||
__pci_epc_create((dev), (ops), THIS_MODULE)
|
||||
#define devm_pci_epc_create(dev, ops) \
|
||||
|
@ -226,7 +228,8 @@ void pci_epc_linkup(struct pci_epc *epc);
|
|||
void pci_epc_linkdown(struct pci_epc *epc);
|
||||
void pci_epc_init_notify(struct pci_epc *epc);
|
||||
void pci_epc_notify_pending_init(struct pci_epc *epc, struct pci_epf *epf);
|
||||
void pci_epc_bme_notify(struct pci_epc *epc);
|
||||
void pci_epc_deinit_notify(struct pci_epc *epc);
|
||||
void pci_epc_bus_master_enable_notify(struct pci_epc *epc);
|
||||
void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
|
||||
enum pci_epc_interface_type type);
|
||||
int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
|
||||
|
@ -272,4 +275,14 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
|
|||
phys_addr_t *phys_addr, size_t size);
|
||||
void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
|
||||
void __iomem *virt_addr, size_t size);
|
||||
|
||||
#else
|
||||
static inline void pci_epc_init_notify(struct pci_epc *epc)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void pci_epc_deinit_notify(struct pci_epc *epc)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_PCI_ENDPOINT */
|
||||
#endif /* __LINUX_PCI_EPC_H */
|
||||
|
|
|
@ -70,16 +70,18 @@ struct pci_epf_ops {
|
|||
|
||||
/**
|
||||
* struct pci_epc_event_ops - Callbacks for capturing the EPC events
|
||||
* @core_init: Callback for the EPC initialization complete event
|
||||
* @epc_init: Callback for the EPC initialization complete event
|
||||
* @epc_deinit: Callback for the EPC deinitialization event
|
||||
* @link_up: Callback for the EPC link up event
|
||||
* @link_down: Callback for the EPC link down event
|
||||
* @bme: Callback for the EPC BME (Bus Master Enable) event
|
||||
* @bus_master_enable: Callback for the EPC Bus Master Enable event
|
||||
*/
|
||||
struct pci_epc_event_ops {
|
||||
int (*core_init)(struct pci_epf *epf);
|
||||
int (*epc_init)(struct pci_epf *epf);
|
||||
void (*epc_deinit)(struct pci_epf *epf);
|
||||
int (*link_up)(struct pci_epf *epf);
|
||||
int (*link_down)(struct pci_epf *epf);
|
||||
int (*bme)(struct pci_epf *epf);
|
||||
int (*bus_master_enable)(struct pci_epf *epf);
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -372,10 +372,11 @@ struct pci_dev {
|
|||
this is D0-D3, D0 being fully
|
||||
functional, and D3 being off. */
|
||||
u8 pm_cap; /* PM capability offset */
|
||||
unsigned int imm_ready:1; /* Supports Immediate Readiness */
|
||||
unsigned int pme_support:5; /* Bitmask of states from which PME#
|
||||
can be generated */
|
||||
unsigned int pme_poll:1; /* Poll device's PME status bit */
|
||||
unsigned int pinned:1; /* Whether this dev is pinned */
|
||||
unsigned int imm_ready:1; /* Supports Immediate Readiness */
|
||||
unsigned int d1_support:1; /* Low power state D1 is supported */
|
||||
unsigned int d2_support:1; /* Low power state D2 is supported */
|
||||
unsigned int no_d1d2:1; /* D1 and D2 are forbidden */
|
||||
|
@ -1599,10 +1600,7 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
|
|||
struct resource *res, resource_size_t size,
|
||||
resource_size_t align, resource_size_t min,
|
||||
unsigned long type_mask,
|
||||
resource_size_t (*alignf)(void *,
|
||||
const struct resource *,
|
||||
resource_size_t,
|
||||
resource_size_t),
|
||||
resource_alignf alignf,
|
||||
void *alignf_data);
|
||||
|
||||
|
||||
|
@ -2351,6 +2349,8 @@ int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
|
|||
int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
|
||||
const char *name);
|
||||
void pcim_iounmap_regions(struct pci_dev *pdev, int mask);
|
||||
void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar,
|
||||
unsigned long offset, unsigned long len);
|
||||
|
||||
extern int pci_pci_problems;
|
||||
#define PCIPCI_FAIL 1 /* No PCI PCI DMA */
|
||||
|
|
|
@ -521,6 +521,6 @@ static inline struct switchtec_dev *to_stdev(struct device *dev)
|
|||
return container_of(dev, struct switchtec_dev, dev);
|
||||
}
|
||||
|
||||
extern struct class *switchtec_class;
|
||||
extern const struct class switchtec_class;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -27,6 +27,8 @@
|
|||
#include <linux/mount.h>
|
||||
#include <linux/resource_ext.h>
|
||||
#include <uapi/linux/magic.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
|
||||
|
@ -46,51 +48,19 @@ struct resource iomem_resource = {
|
|||
};
|
||||
EXPORT_SYMBOL(iomem_resource);
|
||||
|
||||
/* constraints to be met while allocating resources */
|
||||
struct resource_constraint {
|
||||
resource_size_t min, max, align;
|
||||
resource_size_t (*alignf)(void *, const struct resource *,
|
||||
resource_size_t, resource_size_t);
|
||||
void *alignf_data;
|
||||
};
|
||||
|
||||
static DEFINE_RWLOCK(resource_lock);
|
||||
|
||||
/*
|
||||
* For memory hotplug, there is no way to free resource entries allocated
|
||||
* by boot mem after the system is up. So for reusing the resource entry
|
||||
* we need to remember the resource.
|
||||
*/
|
||||
static struct resource *bootmem_resource_free;
|
||||
static DEFINE_SPINLOCK(bootmem_resource_lock);
|
||||
|
||||
static struct resource *next_resource(struct resource *p)
|
||||
static struct resource *next_resource(struct resource *p, bool skip_children)
|
||||
{
|
||||
if (p->child)
|
||||
if (!skip_children && p->child)
|
||||
return p->child;
|
||||
while (!p->sibling && p->parent)
|
||||
p = p->parent;
|
||||
return p->sibling;
|
||||
}
|
||||
|
||||
static struct resource *next_resource_skip_children(struct resource *p)
|
||||
{
|
||||
while (!p->sibling && p->parent)
|
||||
p = p->parent;
|
||||
return p->sibling;
|
||||
}
|
||||
|
||||
#define for_each_resource(_root, _p, _skip_children) \
|
||||
for ((_p) = (_root)->child; (_p); \
|
||||
(_p) = (_skip_children) ? next_resource_skip_children(_p) : \
|
||||
next_resource(_p))
|
||||
|
||||
static void *r_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
struct resource *p = v;
|
||||
(*pos)++;
|
||||
return (void *)next_resource(p);
|
||||
}
|
||||
for ((_p) = (_root)->child; (_p); (_p) = next_resource(_p, _skip_children))
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
||||
|
@ -99,14 +69,28 @@ enum { MAX_IORES_LEVEL = 5 };
|
|||
static void *r_start(struct seq_file *m, loff_t *pos)
|
||||
__acquires(resource_lock)
|
||||
{
|
||||
struct resource *p = pde_data(file_inode(m->file));
|
||||
loff_t l = 0;
|
||||
struct resource *root = pde_data(file_inode(m->file));
|
||||
struct resource *p;
|
||||
loff_t l = *pos;
|
||||
|
||||
read_lock(&resource_lock);
|
||||
for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
|
||||
;
|
||||
for_each_resource(root, p, false) {
|
||||
if (l-- == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
static void *r_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
struct resource *p = v;
|
||||
|
||||
(*pos)++;
|
||||
|
||||
return (void *)next_resource(p, false);
|
||||
}
|
||||
|
||||
static void r_stop(struct seq_file *m, void *v)
|
||||
__releases(resource_lock)
|
||||
{
|
||||
|
@ -160,36 +144,19 @@ __initcall(ioresources_init);
|
|||
|
||||
static void free_resource(struct resource *res)
|
||||
{
|
||||
if (!res)
|
||||
return;
|
||||
|
||||
if (!PageSlab(virt_to_head_page(res))) {
|
||||
spin_lock(&bootmem_resource_lock);
|
||||
res->sibling = bootmem_resource_free;
|
||||
bootmem_resource_free = res;
|
||||
spin_unlock(&bootmem_resource_lock);
|
||||
} else {
|
||||
/**
|
||||
* If the resource was allocated using memblock early during boot
|
||||
* we'll leak it here: we can only return full pages back to the
|
||||
* buddy and trying to be smart and reusing them eventually in
|
||||
* alloc_resource() overcomplicates resource handling.
|
||||
*/
|
||||
if (res && PageSlab(virt_to_head_page(res)))
|
||||
kfree(res);
|
||||
}
|
||||
}
|
||||
|
||||
static struct resource *alloc_resource(gfp_t flags)
|
||||
{
|
||||
struct resource *res = NULL;
|
||||
|
||||
spin_lock(&bootmem_resource_lock);
|
||||
if (bootmem_resource_free) {
|
||||
res = bootmem_resource_free;
|
||||
bootmem_resource_free = res->sibling;
|
||||
}
|
||||
spin_unlock(&bootmem_resource_lock);
|
||||
|
||||
if (res)
|
||||
memset(res, 0, sizeof(struct resource));
|
||||
else
|
||||
res = kzalloc(sizeof(struct resource), flags);
|
||||
|
||||
return res;
|
||||
return kzalloc(sizeof(struct resource), flags);
|
||||
}
|
||||
|
||||
/* Return the conflict entry if you can't request it */
|
||||
|
@ -361,7 +328,7 @@ static int find_next_iomem_res(resource_size_t start, resource_size_t end,
|
|||
|
||||
read_lock(&resource_lock);
|
||||
|
||||
for (p = iomem_resource.child; p; p = next_resource(p)) {
|
||||
for_each_resource(&iomem_resource, p, false) {
|
||||
/* If we passed the resource we are looking for, stop */
|
||||
if (p->start > end) {
|
||||
p = NULL;
|
||||
|
@ -456,6 +423,61 @@ int walk_system_ram_res(u64 start, u64 end, void *arg,
|
|||
func);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function, being a variant of walk_system_ram_res(), calls the @func
|
||||
* callback against all memory ranges of type System RAM which are marked as
|
||||
* IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY in reversed order, i.e., from
|
||||
* higher to lower.
|
||||
*/
|
||||
int walk_system_ram_res_rev(u64 start, u64 end, void *arg,
|
||||
int (*func)(struct resource *, void *))
|
||||
{
|
||||
struct resource res, *rams;
|
||||
int rams_size = 16, i;
|
||||
unsigned long flags;
|
||||
int ret = -1;
|
||||
|
||||
/* create a list */
|
||||
rams = kvcalloc(rams_size, sizeof(struct resource), GFP_KERNEL);
|
||||
if (!rams)
|
||||
return ret;
|
||||
|
||||
flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
|
||||
i = 0;
|
||||
while ((start < end) &&
|
||||
(!find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res))) {
|
||||
if (i >= rams_size) {
|
||||
/* re-alloc */
|
||||
struct resource *rams_new;
|
||||
|
||||
rams_new = kvrealloc(rams, rams_size * sizeof(struct resource),
|
||||
(rams_size + 16) * sizeof(struct resource),
|
||||
GFP_KERNEL);
|
||||
if (!rams_new)
|
||||
goto out;
|
||||
|
||||
rams = rams_new;
|
||||
rams_size += 16;
|
||||
}
|
||||
|
||||
rams[i].start = res.start;
|
||||
rams[i++].end = res.end;
|
||||
|
||||
start = res.end + 1;
|
||||
}
|
||||
|
||||
/* go reverse */
|
||||
for (i--; i >= 0; i--) {
|
||||
ret = (*func)(&rams[i], arg);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
kvfree(rams);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function calls the @func callback against all memory ranges, which
|
||||
* are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
|
||||
|
@ -580,14 +602,6 @@ void __weak arch_remove_reservations(struct resource *avail)
|
|||
{
|
||||
}
|
||||
|
||||
static resource_size_t simple_align_resource(void *data,
|
||||
const struct resource *avail,
|
||||
resource_size_t size,
|
||||
resource_size_t align)
|
||||
{
|
||||
return avail->start;
|
||||
}
|
||||
|
||||
static void resource_clip(struct resource *res, resource_size_t min,
|
||||
resource_size_t max)
|
||||
{
|
||||
|
@ -598,16 +612,16 @@ static void resource_clip(struct resource *res, resource_size_t min,
|
|||
}
|
||||
|
||||
/*
|
||||
* Find empty slot in the resource tree with the given range and
|
||||
* Find empty space in the resource tree with the given range and
|
||||
* alignment constraints
|
||||
*/
|
||||
static int __find_resource(struct resource *root, struct resource *old,
|
||||
struct resource *new,
|
||||
resource_size_t size,
|
||||
struct resource_constraint *constraint)
|
||||
static int __find_resource_space(struct resource *root, struct resource *old,
|
||||
struct resource *new, resource_size_t size,
|
||||
struct resource_constraint *constraint)
|
||||
{
|
||||
struct resource *this = root->child;
|
||||
struct resource tmp = *new, avail, alloc;
|
||||
resource_alignf alignf = constraint->alignf;
|
||||
|
||||
tmp.start = root->start;
|
||||
/*
|
||||
|
@ -636,8 +650,12 @@ static int __find_resource(struct resource *root, struct resource *old,
|
|||
avail.flags = new->flags & ~IORESOURCE_UNSET;
|
||||
if (avail.start >= tmp.start) {
|
||||
alloc.flags = avail.flags;
|
||||
alloc.start = constraint->alignf(constraint->alignf_data, &avail,
|
||||
size, constraint->align);
|
||||
if (alignf) {
|
||||
alloc.start = alignf(constraint->alignf_data,
|
||||
&avail, size, constraint->align);
|
||||
} else {
|
||||
alloc.start = avail.start;
|
||||
}
|
||||
alloc.end = alloc.start + size - 1;
|
||||
if (alloc.start <= alloc.end &&
|
||||
resource_contains(&avail, &alloc)) {
|
||||
|
@ -657,15 +675,27 @@ next: if (!this || this->end == root->end)
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find empty slot in the resource tree given range and alignment.
|
||||
/**
|
||||
* find_resource_space - Find empty space in the resource tree
|
||||
* @root: Root resource descriptor
|
||||
* @new: Resource descriptor awaiting an empty resource space
|
||||
* @size: The minimum size of the empty space
|
||||
* @constraint: The range and alignment constraints to be met
|
||||
*
|
||||
* Finds an empty space under @root in the resource tree satisfying range and
|
||||
* alignment @constraints.
|
||||
*
|
||||
* Return:
|
||||
* * %0 - if successful, @new members start, end, and flags are altered.
|
||||
* * %-EBUSY - if no empty space was found.
|
||||
*/
|
||||
static int find_resource(struct resource *root, struct resource *new,
|
||||
int find_resource_space(struct resource *root, struct resource *new,
|
||||
resource_size_t size,
|
||||
struct resource_constraint *constraint)
|
||||
struct resource_constraint *constraint)
|
||||
{
|
||||
return __find_resource(root, NULL, new, size, constraint);
|
||||
return __find_resource_space(root, NULL, new, size, constraint);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(find_resource_space);
|
||||
|
||||
/**
|
||||
* reallocate_resource - allocate a slot in the resource tree given range & alignment.
|
||||
|
@ -687,7 +717,7 @@ static int reallocate_resource(struct resource *root, struct resource *old,
|
|||
|
||||
write_lock(&resource_lock);
|
||||
|
||||
if ((err = __find_resource(root, old, &new, newsize, constraint)))
|
||||
if ((err = __find_resource_space(root, old, &new, newsize, constraint)))
|
||||
goto out;
|
||||
|
||||
if (resource_contains(&new, old)) {
|
||||
|
@ -731,18 +761,12 @@ out:
|
|||
int allocate_resource(struct resource *root, struct resource *new,
|
||||
resource_size_t size, resource_size_t min,
|
||||
resource_size_t max, resource_size_t align,
|
||||
resource_size_t (*alignf)(void *,
|
||||
const struct resource *,
|
||||
resource_size_t,
|
||||
resource_size_t),
|
||||
resource_alignf alignf,
|
||||
void *alignf_data)
|
||||
{
|
||||
int err;
|
||||
struct resource_constraint constraint;
|
||||
|
||||
if (!alignf)
|
||||
alignf = simple_align_resource;
|
||||
|
||||
constraint.min = min;
|
||||
constraint.max = max;
|
||||
constraint.align = align;
|
||||
|
@ -756,7 +780,7 @@ int allocate_resource(struct resource *root, struct resource *new,
|
|||
}
|
||||
|
||||
write_lock(&resource_lock);
|
||||
err = find_resource(root, new, size, &constraint);
|
||||
err = find_resource_space(root, new, size, &constraint);
|
||||
if (err >= 0 && __request_resource(root, new))
|
||||
err = -EBUSY;
|
||||
write_unlock(&resource_lock);
|
||||
|
@ -913,7 +937,7 @@ void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
|
|||
if (conflict->end > new->end)
|
||||
new->end = conflict->end;
|
||||
|
||||
printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
|
||||
pr_info("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
|
||||
}
|
||||
write_unlock(&resource_lock);
|
||||
}
|
||||
|
@ -1308,9 +1332,7 @@ void __release_region(struct resource *parent, resource_size_t start,
|
|||
|
||||
write_unlock(&resource_lock);
|
||||
|
||||
printk(KERN_WARNING "Trying to free nonexistent resource "
|
||||
"<%016llx-%016llx>\n", (unsigned long long)start,
|
||||
(unsigned long long)end);
|
||||
pr_warn("Trying to free nonexistent resource <%pa-%pa>\n", &start, &end);
|
||||
}
|
||||
EXPORT_SYMBOL(__release_region);
|
||||
|
||||
|
@ -1668,22 +1690,22 @@ __setup("reserve=", reserve_setup);
|
|||
*/
|
||||
int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
|
||||
{
|
||||
struct resource *p = &iomem_resource;
|
||||
resource_size_t end = addr + size - 1;
|
||||
struct resource *p;
|
||||
int err = 0;
|
||||
loff_t l;
|
||||
|
||||
read_lock(&resource_lock);
|
||||
for (p = p->child; p ; p = r_next(NULL, p, &l)) {
|
||||
for_each_resource(&iomem_resource, p, false) {
|
||||
/*
|
||||
* We can probably skip the resources without
|
||||
* IORESOURCE_IO attribute?
|
||||
*/
|
||||
if (p->start >= addr + size)
|
||||
if (p->start > end)
|
||||
continue;
|
||||
if (p->end < addr)
|
||||
continue;
|
||||
if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
|
||||
PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
|
||||
PFN_DOWN(p->end) >= PFN_DOWN(end))
|
||||
continue;
|
||||
/*
|
||||
* if a resource is "BUSY", it's not a hardware resource
|
||||
|
@ -1694,10 +1716,8 @@ int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
|
|||
if (p->flags & IORESOURCE_BUSY)
|
||||
continue;
|
||||
|
||||
printk(KERN_WARNING "resource sanity check: requesting [mem %#010llx-%#010llx], which spans more than %s %pR\n",
|
||||
(unsigned long long)addr,
|
||||
(unsigned long long)(addr + size - 1),
|
||||
p->name, p);
|
||||
pr_warn("resource sanity check: requesting [mem %pa-%pa], which spans more than %s %pR\n",
|
||||
&addr, &end, p->name, p);
|
||||
err = -1;
|
||||
break;
|
||||
}
|
||||
|
@ -1875,8 +1895,8 @@ get_free_mem_region(struct device *dev, struct resource *base,
|
|||
|
||||
write_lock(&resource_lock);
|
||||
for (addr = gfr_start(base, size, align, flags);
|
||||
gfr_continue(base, addr, size, flags);
|
||||
addr = gfr_next(addr, size, flags)) {
|
||||
gfr_continue(base, addr, align, flags);
|
||||
addr = gfr_next(addr, align, flags)) {
|
||||
if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) !=
|
||||
REGION_DISJOINT)
|
||||
continue;
|
||||
|
|
Loading…
Reference in New Issue