1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * pci.h
4 *
5 * PCI defines and function prototypes
6 * Copyright 1994, Drew Eckhardt
7 * Copyright 1997--1999 Martin Mares <[email protected]>
8 *
9 * PCI Express ASPM defines and function prototypes
10 * Copyright (c) 2007 Intel Corp.
11 * Zhang Yanmin ([email protected])
12 * Shaohua Li ([email protected])
13 *
14 * For more information, please consult the following manuals (look at
15 * http://www.pcisig.com/ for how to get them):
16 *
17 * PCI BIOS Specification
18 * PCI Local Bus Specification
19 * PCI to PCI Bridge Specification
20 * PCI Express Specification
21 * PCI System Design Guide
22 */
23 #ifndef LINUX_PCI_H
24 #define LINUX_PCI_H
25
26 #include <linux/args.h>
27 #include <linux/mod_devicetable.h>
28
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/ioport.h>
32 #include <linux/list.h>
33 #include <linux/compiler.h>
34 #include <linux/errno.h>
35 #include <linux/kobject.h>
36 #include <linux/atomic.h>
37 #include <linux/device.h>
38 #include <linux/interrupt.h>
39 #include <linux/io.h>
40 #include <linux/resource_ext.h>
41 #include <linux/msi_api.h>
42 #include <uapi/linux/pci.h>
43
44 #include <linux/pci_ids.h>
45
46 #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
47 PCI_STATUS_SIG_SYSTEM_ERROR | \
48 PCI_STATUS_REC_MASTER_ABORT | \
49 PCI_STATUS_REC_TARGET_ABORT | \
50 PCI_STATUS_SIG_TARGET_ABORT | \
51 PCI_STATUS_PARITY)
52
53 /* Number of reset methods used in pci_reset_fn_methods array in pci.c */
54 #define PCI_NUM_RESET_METHODS 8
55
56 #define PCI_RESET_PROBE true
57 #define PCI_RESET_DO_RESET false
58
59 /*
60 * The PCI interface treats multi-function devices as independent
61 * devices. The slot/function address of each device is encoded
62 * in a single byte as follows:
63 *
64 * 7:3 = slot
65 * 2:0 = function
66 *
67 * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
68 * In the interest of not exposing interfaces to user-space unnecessarily,
69 * the following kernel-only defines are being added here.
70 */
71 #define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
72 /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
73 #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
74
75 /* pci_slot represents a physical slot */
76 struct pci_slot {
77 struct pci_bus *bus; /* Bus this slot is on */
78 struct list_head list; /* Node in list of slots */
79 struct hotplug_slot *hotplug; /* Hotplug info (move here) */
80 unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
81 struct kobject kobj;
82 };
83
pci_slot_name(const struct pci_slot * slot)84 static inline const char *pci_slot_name(const struct pci_slot *slot)
85 {
86 return kobject_name(&slot->kobj);
87 }
88
89 /* File state for mmap()s on /proc/bus/pci/X/Y */
90 enum pci_mmap_state {
91 pci_mmap_io,
92 pci_mmap_mem
93 };
94
95 /* For PCI devices, the region numbers are assigned this way: */
96 enum {
97 /* #0-5: standard PCI resources */
98 PCI_STD_RESOURCES,
99 PCI_STD_RESOURCE_END = PCI_STD_RESOURCES + PCI_STD_NUM_BARS - 1,
100
101 /* #6: expansion ROM resource */
102 PCI_ROM_RESOURCE,
103
104 /* Device-specific resources */
105 #ifdef CONFIG_PCI_IOV
106 PCI_IOV_RESOURCES,
107 PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
108 #endif
109
110 /* PCI-to-PCI (P2P) bridge windows */
111 #define PCI_BRIDGE_IO_WINDOW (PCI_BRIDGE_RESOURCES + 0)
112 #define PCI_BRIDGE_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 1)
113 #define PCI_BRIDGE_PREF_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 2)
114
115 /* CardBus bridge windows */
116 #define PCI_CB_BRIDGE_IO_0_WINDOW (PCI_BRIDGE_RESOURCES + 0)
117 #define PCI_CB_BRIDGE_IO_1_WINDOW (PCI_BRIDGE_RESOURCES + 1)
118 #define PCI_CB_BRIDGE_MEM_0_WINDOW (PCI_BRIDGE_RESOURCES + 2)
119 #define PCI_CB_BRIDGE_MEM_1_WINDOW (PCI_BRIDGE_RESOURCES + 3)
120
121 /* Total number of bridge resources for P2P and CardBus */
122 #define PCI_BRIDGE_RESOURCE_NUM 4
123
124 /* Resources assigned to buses behind the bridge */
125 PCI_BRIDGE_RESOURCES,
126 PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
127 PCI_BRIDGE_RESOURCE_NUM - 1,
128
129 /* Total resources associated with a PCI device */
130 PCI_NUM_RESOURCES,
131
132 /* Preserve this for compatibility */
133 DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
134 };
135
136 /**
137 * enum pci_interrupt_pin - PCI INTx interrupt values
138 * @PCI_INTERRUPT_UNKNOWN: Unknown or unassigned interrupt
139 * @PCI_INTERRUPT_INTA: PCI INTA pin
140 * @PCI_INTERRUPT_INTB: PCI INTB pin
141 * @PCI_INTERRUPT_INTC: PCI INTC pin
142 * @PCI_INTERRUPT_INTD: PCI INTD pin
143 *
144 * Corresponds to values for legacy PCI INTx interrupts, as can be found in the
145 * PCI_INTERRUPT_PIN register.
146 */
147 enum pci_interrupt_pin {
148 PCI_INTERRUPT_UNKNOWN,
149 PCI_INTERRUPT_INTA,
150 PCI_INTERRUPT_INTB,
151 PCI_INTERRUPT_INTC,
152 PCI_INTERRUPT_INTD,
153 };
154
155 /* The number of legacy PCI INTx interrupts */
156 #define PCI_NUM_INTX 4
157
158 /*
159 * Reading from a device that doesn't respond typically returns ~0. A
160 * successful read from a device may also return ~0, so you need additional
161 * information to reliably identify errors.
162 */
163 #define PCI_ERROR_RESPONSE (~0ULL)
164 #define PCI_SET_ERROR_RESPONSE(val) (*(val) = ((typeof(*(val))) PCI_ERROR_RESPONSE))
165 #define PCI_POSSIBLE_ERROR(val) ((val) == ((typeof(val)) PCI_ERROR_RESPONSE))
166
167 /*
168 * pci_power_t values must match the bits in the Capabilities PME_Support
169 * and Control/Status PowerState fields in the Power Management capability.
170 */
171 typedef int __bitwise pci_power_t;
172
173 #define PCI_D0 ((pci_power_t __force) 0)
174 #define PCI_D1 ((pci_power_t __force) 1)
175 #define PCI_D2 ((pci_power_t __force) 2)
176 #define PCI_D3hot ((pci_power_t __force) 3)
177 #define PCI_D3cold ((pci_power_t __force) 4)
178 #define PCI_UNKNOWN ((pci_power_t __force) 5)
179 #define PCI_POWER_ERROR ((pci_power_t __force) -1)
180
181 /* Remember to update this when the list above changes! */
182 extern const char *pci_power_names[];
183
pci_power_name(pci_power_t state)184 static inline const char *pci_power_name(pci_power_t state)
185 {
186 return pci_power_names[1 + (__force int) state];
187 }
188
189 /**
190 * typedef pci_channel_state_t
191 *
192 * The pci_channel state describes connectivity between the CPU and
193 * the PCI device. If some PCI bus between here and the PCI device
194 * has crashed or locked up, this info is reflected here.
195 */
196 typedef unsigned int __bitwise pci_channel_state_t;
197
198 enum {
199 /* I/O channel is in normal state */
200 pci_channel_io_normal = (__force pci_channel_state_t) 1,
201
202 /* I/O to channel is blocked */
203 pci_channel_io_frozen = (__force pci_channel_state_t) 2,
204
205 /* PCI card is dead */
206 pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
207 };
208
209 typedef unsigned int __bitwise pcie_reset_state_t;
210
211 enum pcie_reset_state {
212 /* Reset is NOT asserted (Use to deassert reset) */
213 pcie_deassert_reset = (__force pcie_reset_state_t) 1,
214
215 /* Use #PERST to reset PCIe device */
216 pcie_warm_reset = (__force pcie_reset_state_t) 2,
217
218 /* Use PCIe Hot Reset to reset device */
219 pcie_hot_reset = (__force pcie_reset_state_t) 3
220 };
221
222 typedef unsigned short __bitwise pci_dev_flags_t;
223 enum pci_dev_flags {
224 /* INTX_DISABLE in PCI_COMMAND register disables MSI too */
225 PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
226 /* Device configuration is irrevocably lost if disabled into D3 */
227 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
228 /* Provide indication device is assigned by a Virtual Machine Manager */
229 PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
230 /* Flag for quirk use to store if quirk-specific ACS is enabled */
231 PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
232 /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
233 PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
234 /* Do not use bus resets for device */
235 PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
236 /* Do not use PM reset even if device advertises NoSoftRst- */
237 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
238 /* Get VPD from function 0 VPD */
239 PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
240 /* A non-root bridge where translation occurs, stop alias search here */
241 PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
242 /* Do not use FLR even if device advertises PCI_AF_CAP */
243 PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
244 /* Don't use Relaxed Ordering for TLPs directed at this device */
245 PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
246 /* Device does honor MSI masking despite saying otherwise */
247 PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
248 };
249
250 enum pci_irq_reroute_variant {
251 INTEL_IRQ_REROUTE_VARIANT = 1,
252 MAX_IRQ_REROUTE_VARIANTS = 3
253 };
254
255 typedef unsigned short __bitwise pci_bus_flags_t;
256 enum pci_bus_flags {
257 PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
258 PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
259 PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4,
260 PCI_BUS_FLAGS_NO_EXTCFG = (__force pci_bus_flags_t) 8,
261 };
262
263 /* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
264 enum pcie_link_width {
265 PCIE_LNK_WIDTH_RESRV = 0x00,
266 PCIE_LNK_X1 = 0x01,
267 PCIE_LNK_X2 = 0x02,
268 PCIE_LNK_X4 = 0x04,
269 PCIE_LNK_X8 = 0x08,
270 PCIE_LNK_X12 = 0x0c,
271 PCIE_LNK_X16 = 0x10,
272 PCIE_LNK_X32 = 0x20,
273 PCIE_LNK_WIDTH_UNKNOWN = 0xff,
274 };
275
276 /* See matching string table in pci_speed_string() */
277 enum pci_bus_speed {
278 PCI_SPEED_33MHz = 0x00,
279 PCI_SPEED_66MHz = 0x01,
280 PCI_SPEED_66MHz_PCIX = 0x02,
281 PCI_SPEED_100MHz_PCIX = 0x03,
282 PCI_SPEED_133MHz_PCIX = 0x04,
283 PCI_SPEED_66MHz_PCIX_ECC = 0x05,
284 PCI_SPEED_100MHz_PCIX_ECC = 0x06,
285 PCI_SPEED_133MHz_PCIX_ECC = 0x07,
286 PCI_SPEED_66MHz_PCIX_266 = 0x09,
287 PCI_SPEED_100MHz_PCIX_266 = 0x0a,
288 PCI_SPEED_133MHz_PCIX_266 = 0x0b,
289 AGP_UNKNOWN = 0x0c,
290 AGP_1X = 0x0d,
291 AGP_2X = 0x0e,
292 AGP_4X = 0x0f,
293 AGP_8X = 0x10,
294 PCI_SPEED_66MHz_PCIX_533 = 0x11,
295 PCI_SPEED_100MHz_PCIX_533 = 0x12,
296 PCI_SPEED_133MHz_PCIX_533 = 0x13,
297 PCIE_SPEED_2_5GT = 0x14,
298 PCIE_SPEED_5_0GT = 0x15,
299 PCIE_SPEED_8_0GT = 0x16,
300 PCIE_SPEED_16_0GT = 0x17,
301 PCIE_SPEED_32_0GT = 0x18,
302 PCIE_SPEED_64_0GT = 0x19,
303 PCI_SPEED_UNKNOWN = 0xff,
304 };
305
306 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
307 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
308
309 struct pci_vpd {
310 struct mutex lock;
311 unsigned int len;
312 u8 cap;
313 };
314
315 struct irq_affinity;
316 struct pcie_bwctrl_data;
317 struct pcie_link_state;
318 struct pci_sriov;
319 struct pci_p2pdma;
320 struct rcec_ea;
321
322 /* struct pci_dev - describes a PCI device
323 *
324 * @supported_speeds: PCIe Supported Link Speeds Vector (+ reserved 0 at
325 * LSB). 0 when the supported speeds cannot be
326 * determined (e.g., for Root Complex Integrated
327 * Endpoints without the relevant Capability
328 * Registers).
329 */
330 struct pci_dev {
331 struct list_head bus_list; /* Node in per-bus list */
332 struct pci_bus *bus; /* Bus this device is on */
333 struct pci_bus *subordinate; /* Bus this device bridges to */
334
335 void *sysdata; /* Hook for sys-specific extension */
336 struct proc_dir_entry *procent; /* Device entry in /proc/bus/pci */
337 struct pci_slot *slot; /* Physical slot this device is in */
338
339 unsigned int devfn; /* Encoded device & function index */
340 unsigned short vendor;
341 unsigned short device;
342 unsigned short subsystem_vendor;
343 unsigned short subsystem_device;
344 unsigned int class; /* 3 bytes: (base,sub,prog-if) */
345 u8 revision; /* PCI revision, low byte of class word */
346 u8 hdr_type; /* PCI header type (`multi' flag masked out) */
347 #ifdef CONFIG_PCIEAER
348 u16 aer_cap; /* AER capability offset */
349 struct aer_stats *aer_stats; /* AER stats for this device */
350 #endif
351 #ifdef CONFIG_PCIEPORTBUS
352 struct rcec_ea *rcec_ea; /* RCEC cached endpoint association */
353 struct pci_dev *rcec; /* Associated RCEC device */
354 #endif
355 u32 devcap; /* PCIe Device Capabilities */
356 u8 pcie_cap; /* PCIe capability offset */
357 u8 msi_cap; /* MSI capability offset */
358 u8 msix_cap; /* MSI-X capability offset */
359 u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */
360 u8 rom_base_reg; /* Config register controlling ROM */
361 u8 pin; /* Interrupt pin this device uses */
362 u16 pcie_flags_reg; /* Cached PCIe Capabilities Register */
363 unsigned long *dma_alias_mask;/* Mask of enabled devfn aliases */
364
365 struct pci_driver *driver; /* Driver bound to this device */
366 u64 dma_mask; /* Mask of the bits of bus address this
367 device implements. Normally this is
368 0xffffffff. You only need to change
369 this if your device has broken DMA
370 or supports 64-bit transfers. */
371
372 struct device_dma_parameters dma_parms;
373
374 pci_power_t current_state; /* Current operating state. In ACPI,
375 this is D0-D3, D0 being fully
376 functional, and D3 being off. */
377 u8 pm_cap; /* PM capability offset */
378 unsigned int pme_support:5; /* Bitmask of states from which PME#
379 can be generated */
380 unsigned int pme_poll:1; /* Poll device's PME status bit */
381 unsigned int pinned:1; /* Whether this dev is pinned */
382 unsigned int config_rrs_sv:1; /* Config RRS software visibility */
383 unsigned int imm_ready:1; /* Supports Immediate Readiness */
384 unsigned int d1_support:1; /* Low power state D1 is supported */
385 unsigned int d2_support:1; /* Low power state D2 is supported */
386 unsigned int no_d1d2:1; /* D1 and D2 are forbidden */
387 unsigned int no_d3cold:1; /* D3cold is forbidden */
388 unsigned int bridge_d3:1; /* Allow D3 for bridge */
389 unsigned int d3cold_allowed:1; /* D3cold is allowed by user */
390 unsigned int mmio_always_on:1; /* Disallow turning off io/mem
391 decoding during BAR sizing */
392 unsigned int wakeup_prepared:1;
393 unsigned int skip_bus_pm:1; /* Internal: Skip bus-level PM */
394 unsigned int ignore_hotplug:1; /* Ignore hotplug events */
395 unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
396 controlled exclusively by
397 user sysfs */
398 unsigned int clear_retrain_link:1; /* Need to clear Retrain Link
399 bit manually */
400 unsigned int d3hot_delay; /* D3hot->D0 transition time in ms */
401 unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
402
403 u16 l1ss; /* L1SS Capability pointer */
404 #ifdef CONFIG_PCIEASPM
405 struct pcie_link_state *link_state; /* ASPM link state */
406 unsigned int ltr_path:1; /* Latency Tolerance Reporting
407 supported from root to here */
408 #endif
409 unsigned int pasid_no_tlp:1; /* PASID works without TLP Prefix */
410 unsigned int eetlp_prefix_max:3; /* Max # of End-End TLP Prefixes, 0=not supported */
411
412 pci_channel_state_t error_state; /* Current connectivity state */
413 struct device dev; /* Generic device interface */
414
415 int cfg_size; /* Size of config space */
416
417 /*
418 * Instead of touching interrupt line and base address registers
419 * directly, use the values stored here. They might be different!
420 */
421 unsigned int irq;
422 struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
423 struct resource driver_exclusive_resource; /* driver exclusive resource ranges */
424
425 bool match_driver; /* Skip attaching driver */
426
427 unsigned int transparent:1; /* Subtractive decode bridge */
428 unsigned int io_window:1; /* Bridge has I/O window */
429 unsigned int pref_window:1; /* Bridge has pref mem window */
430 unsigned int pref_64_window:1; /* Pref mem window is 64-bit */
431 unsigned int multifunction:1; /* Multi-function device */
432
433 unsigned int is_busmaster:1; /* Is busmaster */
434 unsigned int no_msi:1; /* May not use MSI */
435 unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */
436 unsigned int block_cfg_access:1; /* Config space access blocked */
437 unsigned int broken_parity_status:1; /* Generates false positive parity */
438 unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */
439 unsigned int msi_enabled:1;
440 unsigned int msix_enabled:1;
441 unsigned int ari_enabled:1; /* ARI forwarding */
442 unsigned int ats_enabled:1; /* Address Translation Svc */
443 unsigned int pasid_enabled:1; /* Process Address Space ID */
444 unsigned int pri_enabled:1; /* Page Request Interface */
445 unsigned int tph_enabled:1; /* TLP Processing Hints */
446 unsigned int is_managed:1; /* Managed via devres */
447 unsigned int is_msi_managed:1; /* MSI release via devres installed */
448 unsigned int needs_freset:1; /* Requires fundamental reset */
449 unsigned int state_saved:1;
450 unsigned int is_physfn:1;
451 unsigned int is_virtfn:1;
452 unsigned int is_hotplug_bridge:1;
453 unsigned int shpc_managed:1; /* SHPC owned by shpchp */
454 unsigned int is_thunderbolt:1; /* Thunderbolt controller */
455 /*
456 * Devices marked being untrusted are the ones that can potentially
457 * execute DMA attacks and similar. They are typically connected
458 * through external ports such as Thunderbolt but not limited to
459 * that. When an IOMMU is enabled they should be getting full
460 * mappings to make sure they cannot access arbitrary memory.
461 */
462 unsigned int untrusted:1;
463 /*
464 * Info from the platform, e.g., ACPI or device tree, may mark a
465 * device as "external-facing". An external-facing device is
466 * itself internal but devices downstream from it are external.
467 */
468 unsigned int external_facing:1;
469 unsigned int broken_intx_masking:1; /* INTx masking can't be used */
470 unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */
471 unsigned int irq_managed:1;
472 unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */
473 unsigned int is_probed:1; /* Device probing in progress */
474 unsigned int link_active_reporting:1;/* Device capable of reporting link active */
475 unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */
476 unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */
477 unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */
478 unsigned int rom_attr_enabled:1; /* Display of ROM attribute enabled? */
479 pci_dev_flags_t dev_flags;
480 atomic_t enable_cnt; /* pci_enable_device has been called */
481
482 spinlock_t pcie_cap_lock; /* Protects RMW ops in capability accessors */
483 u32 saved_config_space[16]; /* Config space saved at suspend time */
484 struct hlist_head saved_cap_space;
485 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
486 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
487
488 #ifdef CONFIG_HOTPLUG_PCI_PCIE
489 unsigned int broken_cmd_compl:1; /* No compl for some cmds */
490 #endif
491 #ifdef CONFIG_PCIE_PTM
492 u16 ptm_cap; /* PTM Capability */
493 unsigned int ptm_root:1;
494 unsigned int ptm_enabled:1;
495 u8 ptm_granularity;
496 #endif
497 #ifdef CONFIG_PCI_MSI
498 void __iomem *msix_base;
499 raw_spinlock_t msi_lock;
500 #endif
501 struct pci_vpd vpd;
502 #ifdef CONFIG_PCIE_DPC
503 u16 dpc_cap;
504 unsigned int dpc_rp_extensions:1;
505 u8 dpc_rp_log_size;
506 #endif
507 struct pcie_bwctrl_data *link_bwctrl;
508 #ifdef CONFIG_PCI_ATS
509 union {
510 struct pci_sriov *sriov; /* PF: SR-IOV info */
511 struct pci_dev *physfn; /* VF: related PF */
512 };
513 u16 ats_cap; /* ATS Capability offset */
514 u8 ats_stu; /* ATS Smallest Translation Unit */
515 #endif
516 #ifdef CONFIG_PCI_PRI
517 u16 pri_cap; /* PRI Capability offset */
518 u32 pri_reqs_alloc; /* Number of PRI requests allocated */
519 unsigned int pasid_required:1; /* PRG Response PASID Required */
520 #endif
521 #ifdef CONFIG_PCI_PASID
522 u16 pasid_cap; /* PASID Capability offset */
523 u16 pasid_features;
524 #endif
525 #ifdef CONFIG_PCI_P2PDMA
526 struct pci_p2pdma __rcu *p2pdma;
527 #endif
528 #ifdef CONFIG_PCI_DOE
529 struct xarray doe_mbs; /* Data Object Exchange mailboxes */
530 #endif
531 #ifdef CONFIG_PCI_NPEM
532 struct npem *npem; /* Native PCIe Enclosure Management */
533 #endif
534 u16 acs_cap; /* ACS Capability offset */
535 u8 supported_speeds; /* Supported Link Speeds Vector */
536 phys_addr_t rom; /* Physical address if not from BAR */
537 size_t romlen; /* Length if not from BAR */
538 /*
539 * Driver name to force a match. Do not set directly, because core
540 * frees it. Use driver_set_override() to set or clear it.
541 */
542 const char *driver_override;
543
544 unsigned long priv_flags; /* Private flags for the PCI driver */
545
546 /* These methods index pci_reset_fn_methods[] */
547 u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */
548
549 #ifdef CONFIG_PCIE_TPH
550 u16 tph_cap; /* TPH capability offset */
551 u8 tph_mode; /* TPH mode */
552 u8 tph_req_type; /* TPH requester type */
553 #endif
554 };
555
pci_physfn(struct pci_dev * dev)556 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
557 {
558 #ifdef CONFIG_PCI_IOV
559 if (dev->is_virtfn)
560 dev = dev->physfn;
561 #endif
562 return dev;
563 }
564
565 struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
566
567 #define to_pci_dev(n) container_of(n, struct pci_dev, dev)
568 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
569
pci_channel_offline(struct pci_dev * pdev)570 static inline int pci_channel_offline(struct pci_dev *pdev)
571 {
572 return (pdev->error_state != pci_channel_io_normal);
573 }
574
575 /*
576 * Currently in ACPI spec, for each PCI host bridge, PCI Segment
577 * Group number is limited to a 16-bit value, therefore (int)-1 is
578 * not a valid PCI domain number, and can be used as a sentinel
579 * value indicating ->domain_nr is not set by the driver (and
580 * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with
581 * pci_bus_find_domain_nr()).
582 */
583 #define PCI_DOMAIN_NR_NOT_SET (-1)
584
585 struct pci_host_bridge {
586 struct device dev;
587 struct pci_bus *bus; /* Root bus */
588 struct pci_ops *ops;
589 struct pci_ops *child_ops;
590 void *sysdata;
591 int busnr;
592 int domain_nr;
593 struct list_head windows; /* resource_entry */
594 struct list_head dma_ranges; /* dma ranges resource list */
595 u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
596 int (*map_irq)(const struct pci_dev *, u8, u8);
597 void (*release_fn)(struct pci_host_bridge *);
598 int (*enable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev);
599 void (*disable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev);
600 void *release_data;
601 unsigned int ignore_reset_delay:1; /* For entire hierarchy */
602 unsigned int no_ext_tags:1; /* No Extended Tags */
603 unsigned int no_inc_mrrs:1; /* No Increase MRRS */
604 unsigned int native_aer:1; /* OS may use PCIe AER */
605 unsigned int native_pcie_hotplug:1; /* OS may use PCIe hotplug */
606 unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */
607 unsigned int native_pme:1; /* OS may use PCIe PME */
608 unsigned int native_ltr:1; /* OS may use PCIe LTR */
609 unsigned int native_dpc:1; /* OS may use PCIe DPC */
610 unsigned int native_cxl_error:1; /* OS may use CXL RAS/Events */
611 unsigned int preserve_config:1; /* Preserve FW resource setup */
612 unsigned int size_windows:1; /* Enable root bus sizing */
613 unsigned int msi_domain:1; /* Bridge wants MSI domain */
614
615 /* Resource alignment requirements */
616 resource_size_t (*align_resource)(struct pci_dev *dev,
617 const struct resource *res,
618 resource_size_t start,
619 resource_size_t size,
620 resource_size_t align);
621 unsigned long private[] ____cacheline_aligned;
622 };
623
624 #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
625
pci_host_bridge_priv(struct pci_host_bridge * bridge)626 static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge)
627 {
628 return (void *)bridge->private;
629 }
630
pci_host_bridge_from_priv(void * priv)631 static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv)
632 {
633 return container_of(priv, struct pci_host_bridge, private);
634 }
635
636 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
637 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
638 size_t priv);
639 void pci_free_host_bridge(struct pci_host_bridge *bridge);
640 struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
641
642 void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
643 void (*release_fn)(struct pci_host_bridge *),
644 void *release_data);
645
646 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
647
648 #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
649
650 struct pci_bus {
651 struct list_head node; /* Node in list of buses */
652 struct pci_bus *parent; /* Parent bus this bridge is on */
653 struct list_head children; /* List of child buses */
654 struct list_head devices; /* List of devices on this bus */
655 struct pci_dev *self; /* Bridge device as seen by parent */
656 struct list_head slots; /* List of slots on this bus;
657 protected by pci_slot_mutex */
658 struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
659 struct list_head resources; /* Address space routed to this bus */
660 struct resource busn_res; /* Bus numbers routed to this bus */
661
662 struct pci_ops *ops; /* Configuration access functions */
663 void *sysdata; /* Hook for sys-specific extension */
664 struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */
665
666 unsigned char number; /* Bus number */
667 unsigned char primary; /* Number of primary bridge */
668 unsigned char max_bus_speed; /* enum pci_bus_speed */
669 unsigned char cur_bus_speed; /* enum pci_bus_speed */
670 #ifdef CONFIG_PCI_DOMAINS_GENERIC
671 int domain_nr;
672 #endif
673
674 char name[48];
675
676 unsigned short bridge_ctl; /* Manage NO_ISA/FBB/et al behaviors */
677 pci_bus_flags_t bus_flags; /* Inherited by child buses */
678 struct device *bridge;
679 struct device dev;
680 struct bin_attribute *legacy_io; /* Legacy I/O for this bus */
681 struct bin_attribute *legacy_mem; /* Legacy mem */
682 unsigned int is_added:1;
683 unsigned int unsafe_warn:1; /* warned about RW1C config write */
684 };
685
686 #define to_pci_bus(n) container_of(n, struct pci_bus, dev)
687
pci_dev_id(struct pci_dev * dev)688 static inline u16 pci_dev_id(struct pci_dev *dev)
689 {
690 return PCI_DEVID(dev->bus->number, dev->devfn);
691 }
692
693 /*
694 * Returns true if the PCI bus is root (behind host-PCI bridge),
695 * false otherwise
696 *
697 * Some code assumes that "bus->self == NULL" means that bus is a root bus.
698 * This is incorrect because "virtual" buses added for SR-IOV (via
699 * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
700 */
pci_is_root_bus(struct pci_bus * pbus)701 static inline bool pci_is_root_bus(struct pci_bus *pbus)
702 {
703 return !(pbus->parent);
704 }
705
706 /**
707 * pci_is_bridge - check if the PCI device is a bridge
708 * @dev: PCI device
709 *
710 * Return true if the PCI device is bridge whether it has subordinate
711 * or not.
712 */
pci_is_bridge(struct pci_dev * dev)713 static inline bool pci_is_bridge(struct pci_dev *dev)
714 {
715 return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
716 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
717 }
718
719 /**
720 * pci_is_vga - check if the PCI device is a VGA device
721 * @pdev: PCI device
722 *
723 * The PCI Code and ID Assignment spec, r1.15, secs 1.4 and 1.1, define
724 * VGA Base Class and Sub-Classes:
725 *
726 * 03 00 PCI_CLASS_DISPLAY_VGA VGA-compatible or 8514-compatible
727 * 00 01 PCI_CLASS_NOT_DEFINED_VGA VGA-compatible (before Class Code)
728 *
729 * Return true if the PCI device is a VGA device and uses the legacy VGA
730 * resources ([mem 0xa0000-0xbffff], [io 0x3b0-0x3bb], [io 0x3c0-0x3df] and
731 * aliases).
732 */
pci_is_vga(struct pci_dev * pdev)733 static inline bool pci_is_vga(struct pci_dev *pdev)
734 {
735 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
736 return true;
737
738 if ((pdev->class >> 8) == PCI_CLASS_NOT_DEFINED_VGA)
739 return true;
740
741 return false;
742 }
743
744 #define for_each_pci_bridge(dev, bus) \
745 list_for_each_entry(dev, &bus->devices, bus_list) \
746 if (!pci_is_bridge(dev)) {} else
747
pci_upstream_bridge(struct pci_dev * dev)748 static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
749 {
750 dev = pci_physfn(dev);
751 if (pci_is_root_bus(dev->bus))
752 return NULL;
753
754 return dev->bus->self;
755 }
756
757 #ifdef CONFIG_PCI_MSI
pci_dev_msi_enabled(struct pci_dev * pci_dev)758 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
759 {
760 return pci_dev->msi_enabled || pci_dev->msix_enabled;
761 }
762 #else
pci_dev_msi_enabled(struct pci_dev * pci_dev)763 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
764 #endif
765
766 /* Error values that may be returned by PCI functions */
767 #define PCIBIOS_SUCCESSFUL 0x00
768 #define PCIBIOS_FUNC_NOT_SUPPORTED 0x81
769 #define PCIBIOS_BAD_VENDOR_ID 0x83
770 #define PCIBIOS_DEVICE_NOT_FOUND 0x86
771 #define PCIBIOS_BAD_REGISTER_NUMBER 0x87
772 #define PCIBIOS_SET_FAILED 0x88
773 #define PCIBIOS_BUFFER_TOO_SMALL 0x89
774
775 /* Translate above to generic errno for passing back through non-PCI code */
pcibios_err_to_errno(int err)776 static inline int pcibios_err_to_errno(int err)
777 {
778 if (err <= PCIBIOS_SUCCESSFUL)
779 return err; /* Assume already errno */
780
781 switch (err) {
782 case PCIBIOS_FUNC_NOT_SUPPORTED:
783 return -ENOENT;
784 case PCIBIOS_BAD_VENDOR_ID:
785 return -ENOTTY;
786 case PCIBIOS_DEVICE_NOT_FOUND:
787 return -ENODEV;
788 case PCIBIOS_BAD_REGISTER_NUMBER:
789 return -EFAULT;
790 case PCIBIOS_SET_FAILED:
791 return -EIO;
792 case PCIBIOS_BUFFER_TOO_SMALL:
793 return -ENOSPC;
794 }
795
796 return -ERANGE;
797 }
798
799 /* Low-level architecture-dependent routines */
800
801 struct pci_ops {
802 int (*add_bus)(struct pci_bus *bus);
803 void (*remove_bus)(struct pci_bus *bus);
804 void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
805 int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
806 int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
807 };
808
809 /*
810 * ACPI needs to be able to access PCI config space before we've done a
811 * PCI bus scan and created pci_bus structures.
812 */
813 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
814 int reg, int len, u32 *val);
815 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
816 int reg, int len, u32 val);
817
818 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
819 typedef u64 pci_bus_addr_t;
820 #else
821 typedef u32 pci_bus_addr_t;
822 #endif
823
824 struct pci_bus_region {
825 pci_bus_addr_t start;
826 pci_bus_addr_t end;
827 };
828
829 struct pci_dynids {
830 spinlock_t lock; /* Protects list, index */
831 struct list_head list; /* For IDs added at runtime */
832 };
833
834
835 /*
836 * PCI Error Recovery System (PCI-ERS). If a PCI device driver provides
837 * a set of callbacks in struct pci_error_handlers, that device driver
838 * will be notified of PCI bus errors, and will be driven to recovery
839 * when an error occurs.
840 */
841
842 typedef unsigned int __bitwise pci_ers_result_t;
843
844 enum pci_ers_result {
845 /* No result/none/not supported in device driver */
846 PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
847
848 /* Device driver can recover without slot reset */
849 PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
850
851 /* Device driver wants slot to be reset */
852 PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
853
854 /* Device has completely failed, is unrecoverable */
855 PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
856
857 /* Device driver is fully recovered and operational */
858 PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
859
860 /* No AER capabilities registered for the driver */
861 PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6,
862 };
863
864 /* PCI bus error event callbacks */
865 struct pci_error_handlers {
866 /* PCI bus error detected on this device */
867 pci_ers_result_t (*error_detected)(struct pci_dev *dev,
868 pci_channel_state_t error);
869
870 /* MMIO has been re-enabled, but not DMA */
871 pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
872
873 /* PCI slot has been reset */
874 pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
875
876 /* PCI function reset prepare or completed */
877 void (*reset_prepare)(struct pci_dev *dev);
878 void (*reset_done)(struct pci_dev *dev);
879
880 /* Device driver may resume normal operations */
881 void (*resume)(struct pci_dev *dev);
882
883 /* Allow device driver to record more details of a correctable error */
884 void (*cor_error_detected)(struct pci_dev *dev);
885 };
886
887
888 struct module;
889
890 /**
891 * struct pci_driver - PCI driver structure
892 * @name: Driver name.
893 * @id_table: Pointer to table of device IDs the driver is
894 * interested in. Most drivers should export this
895 * table using MODULE_DEVICE_TABLE(pci,...).
896 * @probe: This probing function gets called (during execution
897 * of pci_register_driver() for already existing
898 * devices or later if a new device gets inserted) for
899 * all PCI devices which match the ID table and are not
900 * "owned" by the other drivers yet. This function gets
901 * passed a "struct pci_dev \*" for each device whose
902 * entry in the ID table matches the device. The probe
903 * function returns zero when the driver chooses to
904 * take "ownership" of the device or an error code
905 * (negative number) otherwise.
906 * The probe function always gets called from process
907 * context, so it can sleep.
908 * @remove: The remove() function gets called whenever a device
909 * being handled by this driver is removed (either during
910 * deregistration of the driver or when it's manually
911 * pulled out of a hot-pluggable slot).
912 * The remove function always gets called from process
913 * context, so it can sleep.
914 * @suspend: Put device into low power state.
915 * @resume: Wake device from low power state.
916 * (Please see Documentation/power/pci.rst for descriptions
917 * of PCI Power Management and the related functions.)
918 * @shutdown: Hook into reboot_notifier_list (kernel/sys.c).
919 * Intended to stop any idling DMA operations.
920 * Useful for enabling wake-on-lan (NIC) or changing
921 * the power state of a device before reboot.
922 * e.g. drivers/net/e100.c.
923 * @sriov_configure: Optional driver callback to allow configuration of
924 * number of VFs to enable via sysfs "sriov_numvfs" file.
925 * @sriov_set_msix_vec_count: PF Driver callback to change number of MSI-X
926 * vectors on a VF. Triggered via sysfs "sriov_vf_msix_count".
927 * This will change MSI-X Table Size in the VF Message Control
928 * registers.
929 * @sriov_get_vf_total_msix: PF driver callback to get the total number of
930 * MSI-X vectors available for distribution to the VFs.
931 * @err_handler: See Documentation/PCI/pci-error-recovery.rst
932 * @groups: Sysfs attribute groups.
933 * @dev_groups: Attributes attached to the device that will be
934 * created once it is bound to the driver.
935 * @driver: Driver model structure.
936 * @dynids: List of dynamically added device IDs.
937 * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
938 * For most device drivers, no need to care about this flag
939 * as long as all DMAs are handled through the kernel DMA API.
940 * For some special ones, for example VFIO drivers, they know
941 * how to manage the DMA themselves and set this flag so that
942 * the IOMMU layer will allow them to setup and manage their
943 * own I/O address space.
944 */
945 struct pci_driver {
946 const char *name;
947 const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */
948 int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
949 void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
950 int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */
951 int (*resume)(struct pci_dev *dev); /* Device woken up */
952 void (*shutdown)(struct pci_dev *dev);
953 int (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
954 int (*sriov_set_msix_vec_count)(struct pci_dev *vf, int msix_vec_count); /* On PF */
955 u32 (*sriov_get_vf_total_msix)(struct pci_dev *pf);
956 const struct pci_error_handlers *err_handler;
957 const struct attribute_group **groups;
958 const struct attribute_group **dev_groups;
959 struct device_driver driver;
960 struct pci_dynids dynids;
961 bool driver_managed_dma;
962 };
963
964 #define to_pci_driver(__drv) \
965 ( __drv ? container_of_const(__drv, struct pci_driver, driver) : NULL )
966
967 /**
968 * PCI_DEVICE - macro used to describe a specific PCI device
969 * @vend: the 16 bit PCI Vendor ID
970 * @dev: the 16 bit PCI Device ID
971 *
972 * This macro is used to create a struct pci_device_id that matches a
973 * specific device. The subvendor and subdevice fields will be set to
974 * PCI_ANY_ID.
975 */
976 #define PCI_DEVICE(vend,dev) \
977 .vendor = (vend), .device = (dev), \
978 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
979
980 /**
981 * PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with
982 * override_only flags.
983 * @vend: the 16 bit PCI Vendor ID
984 * @dev: the 16 bit PCI Device ID
985 * @driver_override: the 32 bit PCI Device override_only
986 *
987 * This macro is used to create a struct pci_device_id that matches only a
988 * driver_override device. The subvendor and subdevice fields will be set to
989 * PCI_ANY_ID.
990 */
991 #define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
992 .vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \
993 .subdevice = PCI_ANY_ID, .override_only = (driver_override)
994
995 /**
996 * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO
997 * "driver_override" PCI device.
998 * @vend: the 16 bit PCI Vendor ID
999 * @dev: the 16 bit PCI Device ID
1000 *
1001 * This macro is used to create a struct pci_device_id that matches a
1002 * specific device. The subvendor and subdevice fields will be set to
1003 * PCI_ANY_ID and the driver_override will be set to
1004 * PCI_ID_F_VFIO_DRIVER_OVERRIDE.
1005 */
1006 #define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \
1007 PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE)
1008
1009 /**
1010 * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
1011 * @vend: the 16 bit PCI Vendor ID
1012 * @dev: the 16 bit PCI Device ID
1013 * @subvend: the 16 bit PCI Subvendor ID
1014 * @subdev: the 16 bit PCI Subdevice ID
1015 *
1016 * This macro is used to create a struct pci_device_id that matches a
1017 * specific device with subsystem information.
1018 */
1019 #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
1020 .vendor = (vend), .device = (dev), \
1021 .subvendor = (subvend), .subdevice = (subdev)
1022
1023 /**
1024 * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class
1025 * @dev_class: the class, subclass, prog-if triple for this device
1026 * @dev_class_mask: the class mask for this device
1027 *
1028 * This macro is used to create a struct pci_device_id that matches a
1029 * specific PCI class. The vendor, device, subvendor, and subdevice
1030 * fields will be set to PCI_ANY_ID.
1031 */
1032 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
1033 .class = (dev_class), .class_mask = (dev_class_mask), \
1034 .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
1035 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
1036
1037 /**
1038 * PCI_VDEVICE - macro used to describe a specific PCI device in short form
1039 * @vend: the vendor name
1040 * @dev: the 16 bit PCI Device ID
1041 *
1042 * This macro is used to create a struct pci_device_id that matches a
1043 * specific PCI device. The subvendor, and subdevice fields will be set
1044 * to PCI_ANY_ID. The macro allows the next field to follow as the device
1045 * private data.
1046 */
1047 #define PCI_VDEVICE(vend, dev) \
1048 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
1049 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
1050
1051 /**
1052 * PCI_VDEVICE_SUB - describe a specific PCI device/subdevice in a short form
1053 * @vend: the vendor name
1054 * @dev: the 16 bit PCI Device ID
1055 * @subvend: the 16 bit PCI Subvendor ID
1056 * @subdev: the 16 bit PCI Subdevice ID
1057 *
1058 * Generate the pci_device_id struct layout for the specific PCI
1059 * device/subdevice. Private data may follow the output.
1060 */
1061 #define PCI_VDEVICE_SUB(vend, dev, subvend, subdev) \
1062 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
1063 .subvendor = (subvend), .subdevice = (subdev), 0, 0
1064
1065 /**
1066 * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
1067 * @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
1068 * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix)
1069 * @data: the driver data to be filled
1070 *
1071 * This macro is used to create a struct pci_device_id that matches a
1072 * specific PCI device. The subvendor, and subdevice fields will be set
1073 * to PCI_ANY_ID.
1074 */
1075 #define PCI_DEVICE_DATA(vend, dev, data) \
1076 .vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \
1077 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
1078 .driver_data = (kernel_ulong_t)(data)
1079
1080 enum {
1081 PCI_REASSIGN_ALL_RSRC = 0x00000001, /* Ignore firmware setup */
1082 PCI_REASSIGN_ALL_BUS = 0x00000002, /* Reassign all bus numbers */
1083 PCI_PROBE_ONLY = 0x00000004, /* Use existing setup */
1084 PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* Don't do ISA alignment */
1085 PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* Enable domains in /proc */
1086 PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */
1087 PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */
1088 };
1089
1090 #define PCI_IRQ_INTX (1 << 0) /* Allow INTx interrupts */
1091 #define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
1092 #define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */
1093 #define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */
1094
1095 /* These external functions are only available when PCI support is enabled */
1096 #ifdef CONFIG_PCI
1097
1098 extern unsigned int pci_flags;
1099
pci_set_flags(int flags)1100 static inline void pci_set_flags(int flags) { pci_flags = flags; }
pci_add_flags(int flags)1101 static inline void pci_add_flags(int flags) { pci_flags |= flags; }
pci_clear_flags(int flags)1102 static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
pci_has_flag(int flag)1103 static inline int pci_has_flag(int flag) { return pci_flags & flag; }
1104
1105 void pcie_bus_configure_settings(struct pci_bus *bus);
1106
1107 enum pcie_bus_config_types {
1108 PCIE_BUS_TUNE_OFF, /* Don't touch MPS at all */
1109 PCIE_BUS_DEFAULT, /* Ensure MPS matches upstream bridge */
1110 PCIE_BUS_SAFE, /* Use largest MPS boot-time devices support */
1111 PCIE_BUS_PERFORMANCE, /* Use MPS and MRRS for best performance */
1112 PCIE_BUS_PEER2PEER, /* Set MPS = 128 for all devices */
1113 };
1114
1115 extern enum pcie_bus_config_types pcie_bus_config;
1116
1117 extern const struct bus_type pci_bus_type;
1118
1119 /* Do NOT directly access these two variables, unless you are arch-specific PCI
1120 * code, or PCI core code. */
1121 extern struct list_head pci_root_buses; /* List of all known PCI buses */
1122 /* Some device drivers need know if PCI is initiated */
1123 int no_pci_devices(void);
1124
1125 void pcibios_resource_survey_bus(struct pci_bus *bus);
1126 void pcibios_bus_add_device(struct pci_dev *pdev);
1127 void pcibios_add_bus(struct pci_bus *bus);
1128 void pcibios_remove_bus(struct pci_bus *bus);
1129 void pcibios_fixup_bus(struct pci_bus *);
1130 int __must_check pcibios_enable_device(struct pci_dev *, int mask);
1131 /* Architecture-specific versions may override this (weak) */
1132 char *pcibios_setup(char *str);
1133
1134 /* Used only when drivers/pci/setup.c is used */
1135 resource_size_t pcibios_align_resource(void *, const struct resource *,
1136 resource_size_t,
1137 resource_size_t);
1138
1139 /* Weak but can be overridden by arch */
1140 void pci_fixup_cardbus(struct pci_bus *);
1141
1142 /* Generic PCI functions used internally */
1143
1144 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
1145 struct resource *res);
1146 void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
1147 struct pci_bus_region *region);
1148 void pcibios_scan_specific_bus(int busn);
1149 struct pci_bus *pci_find_bus(int domain, int busnr);
1150 void pci_bus_add_devices(const struct pci_bus *bus);
1151 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
1152 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1153 struct pci_ops *ops, void *sysdata,
1154 struct list_head *resources);
1155 int pci_host_probe(struct pci_host_bridge *bridge);
1156 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
1157 int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
1158 void pci_bus_release_busn_res(struct pci_bus *b);
1159 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1160 struct pci_ops *ops, void *sysdata,
1161 struct list_head *resources);
1162 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
1163 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
1164 int busnr);
1165 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
1166 const char *name,
1167 struct hotplug_slot *hotplug);
1168 void pci_destroy_slot(struct pci_slot *slot);
1169 #ifdef CONFIG_SYSFS
1170 void pci_dev_assign_slot(struct pci_dev *dev);
1171 #else
pci_dev_assign_slot(struct pci_dev * dev)1172 static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
1173 #endif
1174 int pci_scan_slot(struct pci_bus *bus, int devfn);
1175 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
1176 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
1177 unsigned int pci_scan_child_bus(struct pci_bus *bus);
1178 void pci_bus_add_device(struct pci_dev *dev);
1179 void pci_read_bridge_bases(struct pci_bus *child);
1180 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
1181 struct resource *res);
1182 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
1183 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
1184 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
1185 struct pci_dev *pci_dev_get(struct pci_dev *dev);
1186 void pci_dev_put(struct pci_dev *dev);
1187 DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
1188 void pci_remove_bus(struct pci_bus *b);
1189 void pci_stop_and_remove_bus_device(struct pci_dev *dev);
1190 void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
1191 void pci_stop_root_bus(struct pci_bus *bus);
1192 void pci_remove_root_bus(struct pci_bus *bus);
1193 void pci_setup_cardbus(struct pci_bus *bus);
1194 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
1195 void pci_sort_breadthfirst(void);
1196 #define dev_is_pci(d) ((d)->bus == &pci_bus_type)
1197 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
1198
1199 /* Generic PCI functions exported to card drivers */
1200
1201 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
1202 u8 pci_find_capability(struct pci_dev *dev, int cap);
1203 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
1204 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
1205 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap);
1206 u16 pci_find_ext_capability(struct pci_dev *dev, int cap);
1207 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap);
1208 struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
1209 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap);
1210 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec);
1211
1212 u64 pci_get_dsn(struct pci_dev *dev);
1213
1214 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
1215 struct pci_dev *from);
1216 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
1217 unsigned int ss_vendor, unsigned int ss_device,
1218 struct pci_dev *from);
1219 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
1220 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
1221 unsigned int devfn);
1222 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
1223 struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from);
1224
1225 int pci_dev_present(const struct pci_device_id *ids);
1226
1227 int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
1228 int where, u8 *val);
1229 int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn,
1230 int where, u16 *val);
1231 int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn,
1232 int where, u32 *val);
1233 int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn,
1234 int where, u8 val);
1235 int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
1236 int where, u16 val);
1237 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
1238 int where, u32 val);
1239
1240 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
1241 int where, int size, u32 *val);
1242 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
1243 int where, int size, u32 val);
1244 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
1245 int where, int size, u32 *val);
1246 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
1247 int where, int size, u32 val);
1248
1249 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
1250
1251 int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val);
1252 int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val);
1253 int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
1254 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
1255 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
1256 int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
1257 void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos,
1258 u32 clear, u32 set);
1259
1260 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
1261 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
1262 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
1263 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
1264 int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos,
1265 u16 clear, u16 set);
1266 int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos,
1267 u16 clear, u16 set);
1268 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
1269 u32 clear, u32 set);
1270
1271 /**
1272 * pcie_capability_clear_and_set_word - RMW accessor for PCI Express Capability Registers
1273 * @dev: PCI device structure of the PCI Express device
1274 * @pos: PCI Express Capability Register
1275 * @clear: Clear bitmask
1276 * @set: Set bitmask
1277 *
1278 * Perform a Read-Modify-Write (RMW) operation using @clear and @set
1279 * bitmasks on PCI Express Capability Register at @pos. Certain PCI Express
1280 * Capability Registers are accessed concurrently in RMW fashion, hence
1281 * require locking which is handled transparently to the caller.
1282 */
pcie_capability_clear_and_set_word(struct pci_dev * dev,int pos,u16 clear,u16 set)1283 static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev,
1284 int pos,
1285 u16 clear, u16 set)
1286 {
1287 switch (pos) {
1288 case PCI_EXP_LNKCTL:
1289 case PCI_EXP_LNKCTL2:
1290 case PCI_EXP_RTCTL:
1291 return pcie_capability_clear_and_set_word_locked(dev, pos,
1292 clear, set);
1293 default:
1294 return pcie_capability_clear_and_set_word_unlocked(dev, pos,
1295 clear, set);
1296 }
1297 }
1298
pcie_capability_set_word(struct pci_dev * dev,int pos,u16 set)1299 static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
1300 u16 set)
1301 {
1302 return pcie_capability_clear_and_set_word(dev, pos, 0, set);
1303 }
1304
pcie_capability_set_dword(struct pci_dev * dev,int pos,u32 set)1305 static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos,
1306 u32 set)
1307 {
1308 return pcie_capability_clear_and_set_dword(dev, pos, 0, set);
1309 }
1310
pcie_capability_clear_word(struct pci_dev * dev,int pos,u16 clear)1311 static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
1312 u16 clear)
1313 {
1314 return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
1315 }
1316
pcie_capability_clear_dword(struct pci_dev * dev,int pos,u32 clear)1317 static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
1318 u32 clear)
1319 {
1320 return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
1321 }
1322
1323 /* User-space driven config access */
1324 int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
1325 int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
1326 int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
1327 int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
1328 int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
1329 int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
1330
1331 int __must_check pci_enable_device(struct pci_dev *dev);
1332 int __must_check pci_enable_device_mem(struct pci_dev *dev);
1333 int __must_check pci_reenable_device(struct pci_dev *);
1334 int __must_check pcim_enable_device(struct pci_dev *pdev);
1335 void pcim_pin_device(struct pci_dev *pdev);
1336
pci_intx_mask_supported(struct pci_dev * pdev)1337 static inline bool pci_intx_mask_supported(struct pci_dev *pdev)
1338 {
1339 /*
1340 * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is
1341 * writable and no quirk has marked the feature broken.
1342 */
1343 return !pdev->broken_intx_masking;
1344 }
1345
pci_is_enabled(struct pci_dev * pdev)1346 static inline int pci_is_enabled(struct pci_dev *pdev)
1347 {
1348 return (atomic_read(&pdev->enable_cnt) > 0);
1349 }
1350
pci_is_managed(struct pci_dev * pdev)1351 static inline int pci_is_managed(struct pci_dev *pdev)
1352 {
1353 return pdev->is_managed;
1354 }
1355
1356 void pci_disable_device(struct pci_dev *dev);
1357
1358 extern unsigned int pcibios_max_latency;
1359 void pci_set_master(struct pci_dev *dev);
1360 void pci_clear_master(struct pci_dev *dev);
1361
1362 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
1363 int pci_set_cacheline_size(struct pci_dev *dev);
1364 int __must_check pci_set_mwi(struct pci_dev *dev);
1365 int __must_check pcim_set_mwi(struct pci_dev *dev);
1366 int pci_try_set_mwi(struct pci_dev *dev);
1367 void pci_clear_mwi(struct pci_dev *dev);
1368 void pci_disable_parity(struct pci_dev *dev);
1369 void pci_intx(struct pci_dev *dev, int enable);
1370 bool pci_check_and_mask_intx(struct pci_dev *dev);
1371 bool pci_check_and_unmask_intx(struct pci_dev *dev);
1372 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
1373 int pci_wait_for_pending_transaction(struct pci_dev *dev);
1374 int pcix_get_max_mmrbc(struct pci_dev *dev);
1375 int pcix_get_mmrbc(struct pci_dev *dev);
1376 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
1377 int pcie_get_readrq(struct pci_dev *dev);
1378 int pcie_set_readrq(struct pci_dev *dev, int rq);
1379 int pcie_get_mps(struct pci_dev *dev);
1380 int pcie_set_mps(struct pci_dev *dev, int mps);
1381 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
1382 enum pci_bus_speed *speed,
1383 enum pcie_link_width *width);
1384 int pcie_link_speed_mbps(struct pci_dev *pdev);
1385 void pcie_print_link_status(struct pci_dev *dev);
1386 int pcie_reset_flr(struct pci_dev *dev, bool probe);
1387 int pcie_flr(struct pci_dev *dev);
1388 int __pci_reset_function_locked(struct pci_dev *dev);
1389 int pci_reset_function(struct pci_dev *dev);
1390 int pci_reset_function_locked(struct pci_dev *dev);
1391 int pci_try_reset_function(struct pci_dev *dev);
1392 int pci_probe_reset_slot(struct pci_slot *slot);
1393 int pci_probe_reset_bus(struct pci_bus *bus);
1394 int pci_reset_bus(struct pci_dev *dev);
1395 void pci_reset_secondary_bus(struct pci_dev *dev);
1396 void pcibios_reset_secondary_bus(struct pci_dev *dev);
1397 void pci_update_resource(struct pci_dev *dev, int resno);
1398 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
1399 int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
1400 void pci_release_resource(struct pci_dev *dev, int resno);
pci_rebar_bytes_to_size(u64 bytes)1401 static inline int pci_rebar_bytes_to_size(u64 bytes)
1402 {
1403 bytes = roundup_pow_of_two(bytes);
1404
1405 /* Return BAR size as defined in the resizable BAR specification */
1406 return max(ilog2(bytes), 20) - 20;
1407 }
1408
1409 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
1410 int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
1411 int pci_select_bars(struct pci_dev *dev, unsigned long flags);
1412 bool pci_device_is_present(struct pci_dev *pdev);
1413 void pci_ignore_hotplug(struct pci_dev *dev);
1414 struct pci_dev *pci_real_dma_dev(struct pci_dev *dev);
1415 int pci_status_get_and_clear_errors(struct pci_dev *pdev);
1416
1417 int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
1418 irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
1419 const char *fmt, ...);
1420 void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id);
1421
1422 /* ROM control related routines */
1423 int pci_enable_rom(struct pci_dev *pdev);
1424 void pci_disable_rom(struct pci_dev *pdev);
1425 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
1426 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
1427
1428 /* Power management related routines */
1429 int pci_save_state(struct pci_dev *dev);
1430 void pci_restore_state(struct pci_dev *dev);
1431 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
1432 int pci_load_saved_state(struct pci_dev *dev,
1433 struct pci_saved_state *state);
1434 int pci_load_and_free_saved_state(struct pci_dev *dev,
1435 struct pci_saved_state **state);
1436 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
1437 int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
1438 int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state);
1439 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
1440 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
1441 void pci_pme_active(struct pci_dev *dev, bool enable);
1442 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
1443 int pci_wake_from_d3(struct pci_dev *dev, bool enable);
1444 int pci_prepare_to_sleep(struct pci_dev *dev);
1445 int pci_back_from_sleep(struct pci_dev *dev);
1446 bool pci_dev_run_wake(struct pci_dev *dev);
1447 void pci_d3cold_enable(struct pci_dev *dev);
1448 void pci_d3cold_disable(struct pci_dev *dev);
1449 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
1450 void pci_resume_bus(struct pci_bus *bus);
1451 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
1452
1453 /* For use by arch with custom probe code */
1454 void set_pcie_port_type(struct pci_dev *pdev);
1455 void set_pcie_hotplug_bridge(struct pci_dev *pdev);
1456
1457 /* Functions for PCI Hotplug drivers to use */
1458 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
1459 unsigned int pci_rescan_bus(struct pci_bus *bus);
1460 void pci_lock_rescan_remove(void);
1461 void pci_unlock_rescan_remove(void);
1462
1463 /* Vital Product Data routines */
1464 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1465 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1466 ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1467 ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1468
1469 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
1470 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
1471 void pci_bus_assign_resources(const struct pci_bus *bus);
1472 void pci_bus_claim_resources(struct pci_bus *bus);
1473 void pci_bus_size_bridges(struct pci_bus *bus);
1474 int pci_claim_resource(struct pci_dev *, int);
1475 int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
1476 void pci_assign_unassigned_resources(void);
1477 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
1478 void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
1479 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
1480 int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type);
1481 int pci_enable_resources(struct pci_dev *, int mask);
1482 void pci_assign_irq(struct pci_dev *dev);
1483 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
1484 #define HAVE_PCI_REQ_REGIONS 2
1485 int __must_check pci_request_regions(struct pci_dev *, const char *);
1486 int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
1487 void pci_release_regions(struct pci_dev *);
1488 int __must_check pci_request_region(struct pci_dev *, int, const char *);
1489 void pci_release_region(struct pci_dev *, int);
1490 int pci_request_selected_regions(struct pci_dev *, int, const char *);
1491 int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
1492 void pci_release_selected_regions(struct pci_dev *, int);
1493
1494 static inline __must_check struct resource *
pci_request_config_region_exclusive(struct pci_dev * pdev,unsigned int offset,unsigned int len,const char * name)1495 pci_request_config_region_exclusive(struct pci_dev *pdev, unsigned int offset,
1496 unsigned int len, const char *name)
1497 {
1498 return __request_region(&pdev->driver_exclusive_resource, offset, len,
1499 name, IORESOURCE_EXCLUSIVE);
1500 }
1501
pci_release_config_region(struct pci_dev * pdev,unsigned int offset,unsigned int len)1502 static inline void pci_release_config_region(struct pci_dev *pdev,
1503 unsigned int offset,
1504 unsigned int len)
1505 {
1506 __release_region(&pdev->driver_exclusive_resource, offset, len);
1507 }
1508
1509 /* drivers/pci/bus.c */
1510 void pci_add_resource(struct list_head *resources, struct resource *res);
1511 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
1512 resource_size_t offset);
1513 void pci_free_resource_list(struct list_head *resources);
1514 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res);
1515 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
1516 void pci_bus_remove_resources(struct pci_bus *bus);
1517 void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res);
1518 int devm_request_pci_bus_resources(struct device *dev,
1519 struct list_head *resources);
1520
1521 /* Temporary until new and working PCI SBR API in place */
1522 int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
1523
1524 #define __pci_bus_for_each_res0(bus, res, ...) \
1525 for (unsigned int __b = 0; \
1526 (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1527 __b++)
1528
1529 #define __pci_bus_for_each_res1(bus, res, __b) \
1530 for (__b = 0; \
1531 (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1532 __b++)
1533
1534 /**
1535 * pci_bus_for_each_resource - iterate over PCI bus resources
1536 * @bus: the PCI bus
1537 * @res: pointer to the current resource
1538 * @...: optional index of the current resource
1539 *
1540 * Iterate over PCI bus resources. The first part is to go over PCI bus
1541 * resource array, which has at most the %PCI_BRIDGE_RESOURCE_NUM entries.
1542 * After that continue with the separate list of the additional resources,
1543 * if not empty. That's why the Logical OR is being used.
1544 *
1545 * Possible usage:
1546 *
1547 * struct pci_bus *bus = ...;
1548 * struct resource *res;
1549 * unsigned int i;
1550 *
1551 * // With optional index
1552 * pci_bus_for_each_resource(bus, res, i)
1553 * pr_info("PCI bus resource[%u]: %pR\n", i, res);
1554 *
1555 * // Without index
1556 * pci_bus_for_each_resource(bus, res)
1557 * _do_something_(res);
1558 */
1559 #define pci_bus_for_each_resource(bus, res, ...) \
1560 CONCATENATE(__pci_bus_for_each_res, COUNT_ARGS(__VA_ARGS__)) \
1561 (bus, res, __VA_ARGS__)
1562
1563 int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
1564 struct resource *res, resource_size_t size,
1565 resource_size_t align, resource_size_t min,
1566 unsigned long type_mask,
1567 resource_alignf alignf,
1568 void *alignf_data);
1569
1570
1571 int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr,
1572 resource_size_t size);
1573 unsigned long pci_address_to_pio(phys_addr_t addr);
1574 phys_addr_t pci_pio_to_address(unsigned long pio);
1575 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
1576 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
1577 phys_addr_t phys_addr);
1578 void pci_unmap_iospace(struct resource *res);
1579 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
1580 resource_size_t offset,
1581 resource_size_t size);
1582 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
1583 struct resource *res);
1584
pci_bus_address(struct pci_dev * pdev,int bar)1585 static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1586 {
1587 struct pci_bus_region region;
1588
1589 pcibios_resource_to_bus(pdev->bus, ®ion, &pdev->resource[bar]);
1590 return region.start;
1591 }
1592
1593 /* Proper probing supporting hot-pluggable devices */
1594 int __must_check __pci_register_driver(struct pci_driver *, struct module *,
1595 const char *mod_name);
1596
1597 /* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */
1598 #define pci_register_driver(driver) \
1599 __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
1600
1601 void pci_unregister_driver(struct pci_driver *dev);
1602
1603 /**
1604 * module_pci_driver() - Helper macro for registering a PCI driver
1605 * @__pci_driver: pci_driver struct
1606 *
1607 * Helper macro for PCI drivers which do not do anything special in module
1608 * init/exit. This eliminates a lot of boilerplate. Each module may only
1609 * use this macro once, and calling it replaces module_init() and module_exit()
1610 */
1611 #define module_pci_driver(__pci_driver) \
1612 module_driver(__pci_driver, pci_register_driver, pci_unregister_driver)
1613
1614 /**
1615 * builtin_pci_driver() - Helper macro for registering a PCI driver
1616 * @__pci_driver: pci_driver struct
1617 *
1618 * Helper macro for PCI drivers which do not do anything special in their
1619 * init code. This eliminates a lot of boilerplate. Each driver may only
1620 * use this macro once, and calling it replaces device_initcall(...)
1621 */
1622 #define builtin_pci_driver(__pci_driver) \
1623 builtin_driver(__pci_driver, pci_register_driver)
1624
1625 struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
1626 int pci_add_dynid(struct pci_driver *drv,
1627 unsigned int vendor, unsigned int device,
1628 unsigned int subvendor, unsigned int subdevice,
1629 unsigned int class, unsigned int class_mask,
1630 unsigned long driver_data);
1631 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
1632 struct pci_dev *dev);
1633 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
1634 int pass);
1635
1636 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
1637 void *userdata);
1638 int pci_cfg_space_size(struct pci_dev *dev);
1639 unsigned char pci_bus_max_busnr(struct pci_bus *bus);
1640 void pci_setup_bridge(struct pci_bus *bus);
1641 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
1642 unsigned long type);
1643
1644 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
1645 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
1646
1647 int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1648 unsigned int command_bits, u32 flags);
1649
1650 /*
1651 * Virtual interrupts allow for more interrupts to be allocated
1652 * than the device has interrupts for. These are not programmed
1653 * into the device's MSI-X table and must be handled by some
1654 * other driver means.
1655 */
1656 #define PCI_IRQ_VIRTUAL (1 << 4)
1657
1658 #define PCI_IRQ_ALL_TYPES (PCI_IRQ_INTX | PCI_IRQ_MSI | PCI_IRQ_MSIX)
1659
1660 #include <linux/dmapool.h>
1661
1662 struct msix_entry {
1663 u32 vector; /* Kernel uses to write allocated vector */
1664 u16 entry; /* Driver uses to specify entry, OS writes */
1665 };
1666
1667 #ifdef CONFIG_PCI_MSI
1668 int pci_msi_vec_count(struct pci_dev *dev);
1669 void pci_disable_msi(struct pci_dev *dev);
1670 int pci_msix_vec_count(struct pci_dev *dev);
1671 void pci_disable_msix(struct pci_dev *dev);
1672 void pci_restore_msi_state(struct pci_dev *dev);
1673 int pci_msi_enabled(void);
1674 int pci_enable_msi(struct pci_dev *dev);
1675 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1676 int minvec, int maxvec);
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1677 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1678 struct msix_entry *entries, int nvec)
1679 {
1680 int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
1681 if (rc < 0)
1682 return rc;
1683 return 0;
1684 }
1685 int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1686 unsigned int max_vecs, unsigned int flags);
1687 int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1688 unsigned int max_vecs, unsigned int flags,
1689 struct irq_affinity *affd);
1690
1691 bool pci_msix_can_alloc_dyn(struct pci_dev *dev);
1692 struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1693 const struct irq_affinity_desc *affdesc);
1694 void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map);
1695
1696 void pci_free_irq_vectors(struct pci_dev *dev);
1697 int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
1698 const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
1699
1700 #else
pci_msi_vec_count(struct pci_dev * dev)1701 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msi(struct pci_dev * dev)1702 static inline void pci_disable_msi(struct pci_dev *dev) { }
pci_msix_vec_count(struct pci_dev * dev)1703 static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msix(struct pci_dev * dev)1704 static inline void pci_disable_msix(struct pci_dev *dev) { }
pci_restore_msi_state(struct pci_dev * dev)1705 static inline void pci_restore_msi_state(struct pci_dev *dev) { }
pci_msi_enabled(void)1706 static inline int pci_msi_enabled(void) { return 0; }
pci_enable_msi(struct pci_dev * dev)1707 static inline int pci_enable_msi(struct pci_dev *dev)
1708 { return -ENOSYS; }
pci_enable_msix_range(struct pci_dev * dev,struct msix_entry * entries,int minvec,int maxvec)1709 static inline int pci_enable_msix_range(struct pci_dev *dev,
1710 struct msix_entry *entries, int minvec, int maxvec)
1711 { return -ENOSYS; }
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1712 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1713 struct msix_entry *entries, int nvec)
1714 { return -ENOSYS; }
1715
1716 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)1717 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1718 unsigned int max_vecs, unsigned int flags,
1719 struct irq_affinity *aff_desc)
1720 {
1721 if ((flags & PCI_IRQ_INTX) && min_vecs == 1 && dev->irq)
1722 return 1;
1723 return -ENOSPC;
1724 }
1725 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)1726 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1727 unsigned int max_vecs, unsigned int flags)
1728 {
1729 return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs,
1730 flags, NULL);
1731 }
1732
pci_msix_can_alloc_dyn(struct pci_dev * dev)1733 static inline bool pci_msix_can_alloc_dyn(struct pci_dev *dev)
1734 { return false; }
pci_msix_alloc_irq_at(struct pci_dev * dev,unsigned int index,const struct irq_affinity_desc * affdesc)1735 static inline struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1736 const struct irq_affinity_desc *affdesc)
1737 {
1738 struct msi_map map = { .index = -ENOSYS, };
1739
1740 return map;
1741 }
1742
pci_msix_free_irq(struct pci_dev * pdev,struct msi_map map)1743 static inline void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map)
1744 {
1745 }
1746
pci_free_irq_vectors(struct pci_dev * dev)1747 static inline void pci_free_irq_vectors(struct pci_dev *dev)
1748 {
1749 }
1750
pci_irq_vector(struct pci_dev * dev,unsigned int nr)1751 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1752 {
1753 if (WARN_ON_ONCE(nr > 0))
1754 return -EINVAL;
1755 return dev->irq;
1756 }
pci_irq_get_affinity(struct pci_dev * pdev,int vec)1757 static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
1758 int vec)
1759 {
1760 return cpu_possible_mask;
1761 }
1762 #endif
1763
1764 /**
1765 * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq
1766 * @d: the INTx IRQ domain
1767 * @node: the DT node for the device whose interrupt we're translating
1768 * @intspec: the interrupt specifier data from the DT
1769 * @intsize: the number of entries in @intspec
1770 * @out_hwirq: pointer at which to write the hwirq number
1771 * @out_type: pointer at which to write the interrupt type
1772 *
1773 * Translate a PCI INTx interrupt number from device tree in the range 1-4, as
1774 * stored in the standard PCI_INTERRUPT_PIN register, to a value in the range
1775 * 0-3 suitable for use in a 4 entry IRQ domain. That is, subtract one from the
1776 * INTx value to obtain the hwirq number.
1777 *
1778 * Returns 0 on success, or -EINVAL if the interrupt specifier is out of range.
1779 */
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)1780 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
1781 struct device_node *node,
1782 const u32 *intspec,
1783 unsigned int intsize,
1784 unsigned long *out_hwirq,
1785 unsigned int *out_type)
1786 {
1787 const u32 intx = intspec[0];
1788
1789 if (intx < PCI_INTERRUPT_INTA || intx > PCI_INTERRUPT_INTD)
1790 return -EINVAL;
1791
1792 *out_hwirq = intx - PCI_INTERRUPT_INTA;
1793 return 0;
1794 }
1795
1796 #ifdef CONFIG_PCIEPORTBUS
1797 extern bool pcie_ports_disabled;
1798 extern bool pcie_ports_native;
1799
1800 int pcie_set_target_speed(struct pci_dev *port, enum pci_bus_speed speed_req,
1801 bool use_lt);
1802 #else
1803 #define pcie_ports_disabled true
1804 #define pcie_ports_native false
1805
pcie_set_target_speed(struct pci_dev * port,enum pci_bus_speed speed_req,bool use_lt)1806 static inline int pcie_set_target_speed(struct pci_dev *port,
1807 enum pci_bus_speed speed_req,
1808 bool use_lt)
1809 {
1810 return -EOPNOTSUPP;
1811 }
1812 #endif
1813
1814 #define PCIE_LINK_STATE_L0S (BIT(0) | BIT(1)) /* Upstr/dwnstr L0s */
1815 #define PCIE_LINK_STATE_L1 BIT(2) /* L1 state */
1816 #define PCIE_LINK_STATE_L1_1 BIT(3) /* ASPM L1.1 state */
1817 #define PCIE_LINK_STATE_L1_2 BIT(4) /* ASPM L1.2 state */
1818 #define PCIE_LINK_STATE_L1_1_PCIPM BIT(5) /* PCI-PM L1.1 state */
1819 #define PCIE_LINK_STATE_L1_2_PCIPM BIT(6) /* PCI-PM L1.2 state */
1820 #define PCIE_LINK_STATE_ASPM_ALL (PCIE_LINK_STATE_L0S |\
1821 PCIE_LINK_STATE_L1 |\
1822 PCIE_LINK_STATE_L1_1 |\
1823 PCIE_LINK_STATE_L1_2 |\
1824 PCIE_LINK_STATE_L1_1_PCIPM |\
1825 PCIE_LINK_STATE_L1_2_PCIPM)
1826 #define PCIE_LINK_STATE_CLKPM BIT(7)
1827 #define PCIE_LINK_STATE_ALL (PCIE_LINK_STATE_ASPM_ALL |\
1828 PCIE_LINK_STATE_CLKPM)
1829
1830 #ifdef CONFIG_PCIEASPM
1831 int pci_disable_link_state(struct pci_dev *pdev, int state);
1832 int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
1833 int pci_enable_link_state(struct pci_dev *pdev, int state);
1834 int pci_enable_link_state_locked(struct pci_dev *pdev, int state);
1835 void pcie_no_aspm(void);
1836 bool pcie_aspm_support_enabled(void);
1837 bool pcie_aspm_enabled(struct pci_dev *pdev);
1838 #else
pci_disable_link_state(struct pci_dev * pdev,int state)1839 static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
1840 { return 0; }
pci_disable_link_state_locked(struct pci_dev * pdev,int state)1841 static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1842 { return 0; }
pci_enable_link_state(struct pci_dev * pdev,int state)1843 static inline int pci_enable_link_state(struct pci_dev *pdev, int state)
1844 { return 0; }
pci_enable_link_state_locked(struct pci_dev * pdev,int state)1845 static inline int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
1846 { return 0; }
pcie_no_aspm(void)1847 static inline void pcie_no_aspm(void) { }
pcie_aspm_support_enabled(void)1848 static inline bool pcie_aspm_support_enabled(void) { return false; }
pcie_aspm_enabled(struct pci_dev * pdev)1849 static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
1850 #endif
1851
1852 #ifdef CONFIG_PCIEAER
1853 bool pci_aer_available(void);
1854 #else
pci_aer_available(void)1855 static inline bool pci_aer_available(void) { return false; }
1856 #endif
1857
1858 bool pci_ats_disabled(void);
1859
1860 #ifdef CONFIG_PCIE_PTM
1861 int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
1862 void pci_disable_ptm(struct pci_dev *dev);
1863 bool pcie_ptm_enabled(struct pci_dev *dev);
1864 #else
pci_enable_ptm(struct pci_dev * dev,u8 * granularity)1865 static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
1866 { return -EINVAL; }
pci_disable_ptm(struct pci_dev * dev)1867 static inline void pci_disable_ptm(struct pci_dev *dev) { }
pcie_ptm_enabled(struct pci_dev * dev)1868 static inline bool pcie_ptm_enabled(struct pci_dev *dev)
1869 { return false; }
1870 #endif
1871
1872 void pci_cfg_access_lock(struct pci_dev *dev);
1873 bool pci_cfg_access_trylock(struct pci_dev *dev);
1874 void pci_cfg_access_unlock(struct pci_dev *dev);
1875
1876 void pci_dev_lock(struct pci_dev *dev);
1877 int pci_dev_trylock(struct pci_dev *dev);
1878 void pci_dev_unlock(struct pci_dev *dev);
1879 DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
1880
1881 /*
1882 * PCI domain support. Sometimes called PCI segment (eg by ACPI),
1883 * a PCI domain is defined to be a set of PCI buses which share
1884 * configuration space.
1885 */
1886 #ifdef CONFIG_PCI_DOMAINS
1887 extern int pci_domains_supported;
1888 #else
1889 enum { pci_domains_supported = 0 };
1890 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1891 static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
1892 #endif /* CONFIG_PCI_DOMAINS */
1893
1894 /*
1895 * Generic implementation for PCI domain support. If your
1896 * architecture does not need custom management of PCI
1897 * domains then this implementation will be used
1898 */
1899 #ifdef CONFIG_PCI_DOMAINS_GENERIC
pci_domain_nr(struct pci_bus * bus)1900 static inline int pci_domain_nr(struct pci_bus *bus)
1901 {
1902 return bus->domain_nr;
1903 }
1904 #ifdef CONFIG_ACPI
1905 int acpi_pci_bus_find_domain_nr(struct pci_bus *bus);
1906 #else
acpi_pci_bus_find_domain_nr(struct pci_bus * bus)1907 static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
1908 { return 0; }
1909 #endif
1910 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
1911 void pci_bus_release_domain_nr(struct device *parent, int domain_nr);
1912 #endif
1913
1914 /* Some architectures require additional setup to direct VGA traffic */
1915 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
1916 unsigned int command_bits, u32 flags);
1917 void pci_register_set_vga_state(arch_set_vga_state_t func);
1918
1919 static inline int
pci_request_io_regions(struct pci_dev * pdev,const char * name)1920 pci_request_io_regions(struct pci_dev *pdev, const char *name)
1921 {
1922 return pci_request_selected_regions(pdev,
1923 pci_select_bars(pdev, IORESOURCE_IO), name);
1924 }
1925
1926 static inline void
pci_release_io_regions(struct pci_dev * pdev)1927 pci_release_io_regions(struct pci_dev *pdev)
1928 {
1929 return pci_release_selected_regions(pdev,
1930 pci_select_bars(pdev, IORESOURCE_IO));
1931 }
1932
1933 static inline int
pci_request_mem_regions(struct pci_dev * pdev,const char * name)1934 pci_request_mem_regions(struct pci_dev *pdev, const char *name)
1935 {
1936 return pci_request_selected_regions(pdev,
1937 pci_select_bars(pdev, IORESOURCE_MEM), name);
1938 }
1939
1940 static inline void
pci_release_mem_regions(struct pci_dev * pdev)1941 pci_release_mem_regions(struct pci_dev *pdev)
1942 {
1943 return pci_release_selected_regions(pdev,
1944 pci_select_bars(pdev, IORESOURCE_MEM));
1945 }
1946
1947 #else /* CONFIG_PCI is not enabled */
1948
pci_set_flags(int flags)1949 static inline void pci_set_flags(int flags) { }
pci_add_flags(int flags)1950 static inline void pci_add_flags(int flags) { }
pci_clear_flags(int flags)1951 static inline void pci_clear_flags(int flags) { }
pci_has_flag(int flag)1952 static inline int pci_has_flag(int flag) { return 0; }
1953
1954 /*
1955 * If the system does not have PCI, clearly these return errors. Define
1956 * these as simple inline functions to avoid hair in drivers.
1957 */
1958 #define _PCI_NOP(o, s, t) \
1959 static inline int pci_##o##_config_##s(struct pci_dev *dev, \
1960 int where, t val) \
1961 { return PCIBIOS_FUNC_NOT_SUPPORTED; }
1962
1963 #define _PCI_NOP_ALL(o, x) _PCI_NOP(o, byte, u8 x) \
1964 _PCI_NOP(o, word, u16 x) \
1965 _PCI_NOP(o, dword, u32 x)
1966 _PCI_NOP_ALL(read, *)
1967 _PCI_NOP_ALL(write,)
1968
pci_get_device(unsigned int vendor,unsigned int device,struct pci_dev * from)1969 static inline struct pci_dev *pci_get_device(unsigned int vendor,
1970 unsigned int device,
1971 struct pci_dev *from)
1972 { return NULL; }
1973
pci_get_subsys(unsigned int vendor,unsigned int device,unsigned int ss_vendor,unsigned int ss_device,struct pci_dev * from)1974 static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
1975 unsigned int device,
1976 unsigned int ss_vendor,
1977 unsigned int ss_device,
1978 struct pci_dev *from)
1979 { return NULL; }
1980
pci_get_class(unsigned int class,struct pci_dev * from)1981 static inline struct pci_dev *pci_get_class(unsigned int class,
1982 struct pci_dev *from)
1983 { return NULL; }
1984
pci_get_base_class(unsigned int class,struct pci_dev * from)1985 static inline struct pci_dev *pci_get_base_class(unsigned int class,
1986 struct pci_dev *from)
1987 { return NULL; }
1988
pci_dev_present(const struct pci_device_id * ids)1989 static inline int pci_dev_present(const struct pci_device_id *ids)
1990 { return 0; }
1991
1992 #define no_pci_devices() (1)
1993 #define pci_dev_put(dev) do { } while (0)
1994
pci_set_master(struct pci_dev * dev)1995 static inline void pci_set_master(struct pci_dev *dev) { }
pci_clear_master(struct pci_dev * dev)1996 static inline void pci_clear_master(struct pci_dev *dev) { }
pci_enable_device(struct pci_dev * dev)1997 static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
pci_disable_device(struct pci_dev * dev)1998 static inline void pci_disable_device(struct pci_dev *dev) { }
pcim_enable_device(struct pci_dev * pdev)1999 static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
pci_assign_resource(struct pci_dev * dev,int i)2000 static inline int pci_assign_resource(struct pci_dev *dev, int i)
2001 { return -EBUSY; }
__pci_register_driver(struct pci_driver * drv,struct module * owner,const char * mod_name)2002 static inline int __must_check __pci_register_driver(struct pci_driver *drv,
2003 struct module *owner,
2004 const char *mod_name)
2005 { return 0; }
pci_register_driver(struct pci_driver * drv)2006 static inline int pci_register_driver(struct pci_driver *drv)
2007 { return 0; }
pci_unregister_driver(struct pci_driver * drv)2008 static inline void pci_unregister_driver(struct pci_driver *drv) { }
pci_find_capability(struct pci_dev * dev,int cap)2009 static inline u8 pci_find_capability(struct pci_dev *dev, int cap)
2010 { return 0; }
pci_find_next_capability(struct pci_dev * dev,u8 post,int cap)2011 static inline u8 pci_find_next_capability(struct pci_dev *dev, u8 post, int cap)
2012 { return 0; }
pci_find_ext_capability(struct pci_dev * dev,int cap)2013 static inline u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
2014 { return 0; }
2015
pci_get_dsn(struct pci_dev * dev)2016 static inline u64 pci_get_dsn(struct pci_dev *dev)
2017 { return 0; }
2018
2019 /* Power management related routines */
pci_save_state(struct pci_dev * dev)2020 static inline int pci_save_state(struct pci_dev *dev) { return 0; }
pci_restore_state(struct pci_dev * dev)2021 static inline void pci_restore_state(struct pci_dev *dev) { }
pci_set_power_state(struct pci_dev * dev,pci_power_t state)2022 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
2023 { return 0; }
pci_set_power_state_locked(struct pci_dev * dev,pci_power_t state)2024 static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
2025 { return 0; }
pci_wake_from_d3(struct pci_dev * dev,bool enable)2026 static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2027 { return 0; }
pci_choose_state(struct pci_dev * dev,pm_message_t state)2028 static inline pci_power_t pci_choose_state(struct pci_dev *dev,
2029 pm_message_t state)
2030 { return PCI_D0; }
pci_enable_wake(struct pci_dev * dev,pci_power_t state,int enable)2031 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
2032 int enable)
2033 { return 0; }
2034
pci_find_resource(struct pci_dev * dev,struct resource * res)2035 static inline struct resource *pci_find_resource(struct pci_dev *dev,
2036 struct resource *res)
2037 { return NULL; }
pci_request_regions(struct pci_dev * dev,const char * res_name)2038 static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
2039 { return -EIO; }
pci_release_regions(struct pci_dev * dev)2040 static inline void pci_release_regions(struct pci_dev *dev) { }
2041
pci_register_io_range(const struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)2042 static inline int pci_register_io_range(const struct fwnode_handle *fwnode,
2043 phys_addr_t addr, resource_size_t size)
2044 { return -EINVAL; }
2045
pci_address_to_pio(phys_addr_t addr)2046 static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
2047
pci_find_next_bus(const struct pci_bus * from)2048 static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
2049 { return NULL; }
pci_get_slot(struct pci_bus * bus,unsigned int devfn)2050 static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
2051 unsigned int devfn)
2052 { return NULL; }
pci_get_domain_bus_and_slot(int domain,unsigned int bus,unsigned int devfn)2053 static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
2054 unsigned int bus, unsigned int devfn)
2055 { return NULL; }
2056
pci_domain_nr(struct pci_bus * bus)2057 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
pci_dev_get(struct pci_dev * dev)2058 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
2059
2060 #define dev_is_pci(d) (false)
2061 #define dev_is_pf(d) (false)
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)2062 static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2063 { return false; }
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)2064 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
2065 struct device_node *node,
2066 const u32 *intspec,
2067 unsigned int intsize,
2068 unsigned long *out_hwirq,
2069 unsigned int *out_type)
2070 { return -EINVAL; }
2071
pci_match_id(const struct pci_device_id * ids,struct pci_dev * dev)2072 static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
2073 struct pci_dev *dev)
2074 { return NULL; }
pci_ats_disabled(void)2075 static inline bool pci_ats_disabled(void) { return true; }
2076
pci_irq_vector(struct pci_dev * dev,unsigned int nr)2077 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
2078 {
2079 return -EINVAL;
2080 }
2081
2082 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)2083 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
2084 unsigned int max_vecs, unsigned int flags,
2085 struct irq_affinity *aff_desc)
2086 {
2087 return -ENOSPC;
2088 }
2089 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)2090 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
2091 unsigned int max_vecs, unsigned int flags)
2092 {
2093 return -ENOSPC;
2094 }
2095 #endif /* CONFIG_PCI */
2096
2097 /* Include architecture-dependent settings and functions */
2098
2099 #include <asm/pci.h>
2100
2101 /*
2102 * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
2103 * is expected to be an offset within that region.
2104 *
2105 */
2106 int pci_mmap_resource_range(struct pci_dev *dev, int bar,
2107 struct vm_area_struct *vma,
2108 enum pci_mmap_state mmap_state, int write_combine);
2109
2110 #ifndef arch_can_pci_mmap_wc
2111 #define arch_can_pci_mmap_wc() 0
2112 #endif
2113
2114 #ifndef arch_can_pci_mmap_io
2115 #define arch_can_pci_mmap_io() 0
2116 #define pci_iobar_pfn(pdev, bar, vma) (-EINVAL)
2117 #else
2118 int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
2119 #endif
2120
2121 #ifndef pci_root_bus_fwnode
2122 #define pci_root_bus_fwnode(bus) NULL
2123 #endif
2124
2125 /*
2126 * These helpers provide future and backwards compatibility
2127 * for accessing popular PCI BAR info
2128 */
2129 #define pci_resource_n(dev, bar) (&(dev)->resource[(bar)])
2130 #define pci_resource_start(dev, bar) (pci_resource_n(dev, bar)->start)
2131 #define pci_resource_end(dev, bar) (pci_resource_n(dev, bar)->end)
2132 #define pci_resource_flags(dev, bar) (pci_resource_n(dev, bar)->flags)
2133 #define pci_resource_len(dev,bar) \
2134 (pci_resource_end((dev), (bar)) ? \
2135 resource_size(pci_resource_n((dev), (bar))) : 0)
2136
2137 #define __pci_dev_for_each_res0(dev, res, ...) \
2138 for (unsigned int __b = 0; \
2139 __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2140 __b++)
2141
2142 #define __pci_dev_for_each_res1(dev, res, __b) \
2143 for (__b = 0; \
2144 __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2145 __b++)
2146
2147 #define pci_dev_for_each_resource(dev, res, ...) \
2148 CONCATENATE(__pci_dev_for_each_res, COUNT_ARGS(__VA_ARGS__)) \
2149 (dev, res, __VA_ARGS__)
2150
2151 /*
2152 * Similar to the helpers above, these manipulate per-pci_dev
2153 * driver-specific data. They are really just a wrapper around
2154 * the generic device structure functions of these calls.
2155 */
pci_get_drvdata(struct pci_dev * pdev)2156 static inline void *pci_get_drvdata(struct pci_dev *pdev)
2157 {
2158 return dev_get_drvdata(&pdev->dev);
2159 }
2160
pci_set_drvdata(struct pci_dev * pdev,void * data)2161 static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
2162 {
2163 dev_set_drvdata(&pdev->dev, data);
2164 }
2165
pci_name(const struct pci_dev * pdev)2166 static inline const char *pci_name(const struct pci_dev *pdev)
2167 {
2168 return dev_name(&pdev->dev);
2169 }
2170
2171 void pci_resource_to_user(const struct pci_dev *dev, int bar,
2172 const struct resource *rsrc,
2173 resource_size_t *start, resource_size_t *end);
2174
2175 /*
2176 * The world is not perfect and supplies us with broken PCI devices.
2177 * For at least a part of these bugs we need a work-around, so both
2178 * generic (drivers/pci/quirks.c) and per-architecture code can define
2179 * fixup hooks to be called for particular buggy devices.
2180 */
2181
2182 struct pci_fixup {
2183 u16 vendor; /* Or PCI_ANY_ID */
2184 u16 device; /* Or PCI_ANY_ID */
2185 u32 class; /* Or PCI_ANY_ID */
2186 unsigned int class_shift; /* should be 0, 8, 16 */
2187 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2188 int hook_offset;
2189 #else
2190 void (*hook)(struct pci_dev *dev);
2191 #endif
2192 };
2193
2194 enum pci_fixup_pass {
2195 pci_fixup_early, /* Before probing BARs */
2196 pci_fixup_header, /* After reading configuration header */
2197 pci_fixup_final, /* Final phase of device fixups */
2198 pci_fixup_enable, /* pci_enable_device() time */
2199 pci_fixup_resume, /* pci_device_resume() */
2200 pci_fixup_suspend, /* pci_device_suspend() */
2201 pci_fixup_resume_early, /* pci_device_resume_early() */
2202 pci_fixup_suspend_late, /* pci_device_suspend_late() */
2203 };
2204
2205 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2206 #define ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2207 class_shift, hook) \
2208 __ADDRESSABLE(hook) \
2209 asm(".section " #sec ", \"a\" \n" \
2210 ".balign 16 \n" \
2211 ".short " #vendor ", " #device " \n" \
2212 ".long " #class ", " #class_shift " \n" \
2213 ".long " #hook " - . \n" \
2214 ".previous \n");
2215
2216 /*
2217 * Clang's LTO may rename static functions in C, but has no way to
2218 * handle such renamings when referenced from inline asm. To work
2219 * around this, create global C stubs for these cases.
2220 */
2221 #ifdef CONFIG_LTO_CLANG
2222 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2223 class_shift, hook, stub) \
2224 void stub(struct pci_dev *dev); \
2225 void stub(struct pci_dev *dev) \
2226 { \
2227 hook(dev); \
2228 } \
2229 ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2230 class_shift, stub)
2231 #else
2232 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2233 class_shift, hook, stub) \
2234 ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2235 class_shift, hook)
2236 #endif
2237
2238 #define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2239 class_shift, hook) \
2240 __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2241 class_shift, hook, __UNIQUE_ID(hook))
2242 #else
2243 /* Anonymous variables would be nice... */
2244 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
2245 class_shift, hook) \
2246 static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \
2247 __attribute__((__section__(#section), aligned((sizeof(void *))))) \
2248 = { vendor, device, class, class_shift, hook };
2249 #endif
2250
2251 #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \
2252 class_shift, hook) \
2253 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
2254 hook, vendor, device, class, class_shift, hook)
2255 #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \
2256 class_shift, hook) \
2257 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
2258 hook, vendor, device, class, class_shift, hook)
2259 #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \
2260 class_shift, hook) \
2261 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
2262 hook, vendor, device, class, class_shift, hook)
2263 #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \
2264 class_shift, hook) \
2265 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
2266 hook, vendor, device, class, class_shift, hook)
2267 #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \
2268 class_shift, hook) \
2269 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
2270 resume##hook, vendor, device, class, class_shift, hook)
2271 #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \
2272 class_shift, hook) \
2273 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
2274 resume_early##hook, vendor, device, class, class_shift, hook)
2275 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \
2276 class_shift, hook) \
2277 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
2278 suspend##hook, vendor, device, class, class_shift, hook)
2279 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \
2280 class_shift, hook) \
2281 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
2282 suspend_late##hook, vendor, device, class, class_shift, hook)
2283
2284 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
2285 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
2286 hook, vendor, device, PCI_ANY_ID, 0, hook)
2287 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \
2288 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
2289 hook, vendor, device, PCI_ANY_ID, 0, hook)
2290 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \
2291 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
2292 hook, vendor, device, PCI_ANY_ID, 0, hook)
2293 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \
2294 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
2295 hook, vendor, device, PCI_ANY_ID, 0, hook)
2296 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
2297 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
2298 resume##hook, vendor, device, PCI_ANY_ID, 0, hook)
2299 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
2300 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
2301 resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook)
2302 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
2303 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
2304 suspend##hook, vendor, device, PCI_ANY_ID, 0, hook)
2305 #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \
2306 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
2307 suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook)
2308
2309 #ifdef CONFIG_PCI_QUIRKS
2310 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
2311 #else
pci_fixup_device(enum pci_fixup_pass pass,struct pci_dev * dev)2312 static inline void pci_fixup_device(enum pci_fixup_pass pass,
2313 struct pci_dev *dev) { }
2314 #endif
2315
2316 int pcim_intx(struct pci_dev *pdev, int enabled);
2317 int pcim_request_all_regions(struct pci_dev *pdev, const char *name);
2318 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
2319 void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
2320 const char *name);
2321 void pcim_iounmap_region(struct pci_dev *pdev, int bar);
2322 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
2323 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
2324 int pcim_request_region(struct pci_dev *pdev, int bar, const char *name);
2325 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
2326 void pcim_iounmap_regions(struct pci_dev *pdev, int mask);
2327 void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar,
2328 unsigned long offset, unsigned long len);
2329
2330 extern int pci_pci_problems;
2331 #define PCIPCI_FAIL 1 /* No PCI PCI DMA */
2332 #define PCIPCI_TRITON 2
2333 #define PCIPCI_NATOMA 4
2334 #define PCIPCI_VIAETBF 8
2335 #define PCIPCI_VSFX 16
2336 #define PCIPCI_ALIMAGIK 32 /* Need low latency setting */
2337 #define PCIAGP_FAIL 64 /* No PCI to AGP DMA */
2338
2339 extern unsigned long pci_cardbus_io_size;
2340 extern unsigned long pci_cardbus_mem_size;
2341 extern u8 pci_dfl_cache_line_size;
2342 extern u8 pci_cache_line_size;
2343
2344 /* Architecture-specific versions may override these (weak) */
2345 void pcibios_disable_device(struct pci_dev *dev);
2346 void pcibios_set_master(struct pci_dev *dev);
2347 int pcibios_set_pcie_reset_state(struct pci_dev *dev,
2348 enum pcie_reset_state state);
2349 int pcibios_device_add(struct pci_dev *dev);
2350 void pcibios_release_device(struct pci_dev *dev);
2351 #ifdef CONFIG_PCI
2352 void pcibios_penalize_isa_irq(int irq, int active);
2353 #else
pcibios_penalize_isa_irq(int irq,int active)2354 static inline void pcibios_penalize_isa_irq(int irq, int active) {}
2355 #endif
2356 int pcibios_alloc_irq(struct pci_dev *dev);
2357 void pcibios_free_irq(struct pci_dev *dev);
2358 resource_size_t pcibios_default_alignment(void);
2359
2360 #if !defined(HAVE_PCI_MMAP) && !defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
2361 extern int pci_create_resource_files(struct pci_dev *dev);
2362 extern void pci_remove_resource_files(struct pci_dev *dev);
2363 #endif
2364
2365 #if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
2366 void __init pci_mmcfg_early_init(void);
2367 void __init pci_mmcfg_late_init(void);
2368 #else
pci_mmcfg_early_init(void)2369 static inline void pci_mmcfg_early_init(void) { }
pci_mmcfg_late_init(void)2370 static inline void pci_mmcfg_late_init(void) { }
2371 #endif
2372
2373 int pci_ext_cfg_avail(void);
2374
2375 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
2376 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
2377
2378 #ifdef CONFIG_PCI_IOV
2379 int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
2380 int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
2381 int pci_iov_vf_id(struct pci_dev *dev);
2382 void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver);
2383 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
2384 void pci_disable_sriov(struct pci_dev *dev);
2385
2386 int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id);
2387 int pci_iov_add_virtfn(struct pci_dev *dev, int id);
2388 void pci_iov_remove_virtfn(struct pci_dev *dev, int id);
2389 int pci_num_vf(struct pci_dev *dev);
2390 int pci_vfs_assigned(struct pci_dev *dev);
2391 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
2392 int pci_sriov_get_totalvfs(struct pci_dev *dev);
2393 int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn);
2394 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
2395 void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
2396
2397 /* Arch may override these (weak) */
2398 int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs);
2399 int pcibios_sriov_disable(struct pci_dev *pdev);
2400 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
2401 #else
pci_iov_virtfn_bus(struct pci_dev * dev,int id)2402 static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
2403 {
2404 return -ENOSYS;
2405 }
pci_iov_virtfn_devfn(struct pci_dev * dev,int id)2406 static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
2407 {
2408 return -ENOSYS;
2409 }
2410
pci_iov_vf_id(struct pci_dev * dev)2411 static inline int pci_iov_vf_id(struct pci_dev *dev)
2412 {
2413 return -ENOSYS;
2414 }
2415
pci_iov_get_pf_drvdata(struct pci_dev * dev,struct pci_driver * pf_driver)2416 static inline void *pci_iov_get_pf_drvdata(struct pci_dev *dev,
2417 struct pci_driver *pf_driver)
2418 {
2419 return ERR_PTR(-EINVAL);
2420 }
2421
pci_enable_sriov(struct pci_dev * dev,int nr_virtfn)2422 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
2423 { return -ENODEV; }
2424
pci_iov_sysfs_link(struct pci_dev * dev,struct pci_dev * virtfn,int id)2425 static inline int pci_iov_sysfs_link(struct pci_dev *dev,
2426 struct pci_dev *virtfn, int id)
2427 {
2428 return -ENODEV;
2429 }
pci_iov_add_virtfn(struct pci_dev * dev,int id)2430 static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id)
2431 {
2432 return -ENOSYS;
2433 }
pci_iov_remove_virtfn(struct pci_dev * dev,int id)2434 static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
2435 int id) { }
pci_disable_sriov(struct pci_dev * dev)2436 static inline void pci_disable_sriov(struct pci_dev *dev) { }
pci_num_vf(struct pci_dev * dev)2437 static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
pci_vfs_assigned(struct pci_dev * dev)2438 static inline int pci_vfs_assigned(struct pci_dev *dev)
2439 { return 0; }
pci_sriov_set_totalvfs(struct pci_dev * dev,u16 numvfs)2440 static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
2441 { return 0; }
pci_sriov_get_totalvfs(struct pci_dev * dev)2442 static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
2443 { return 0; }
2444 #define pci_sriov_configure_simple NULL
pci_iov_resource_size(struct pci_dev * dev,int resno)2445 static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
2446 { return 0; }
pci_vf_drivers_autoprobe(struct pci_dev * dev,bool probe)2447 static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
2448 #endif
2449
2450 #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
2451 void pci_hp_create_module_link(struct pci_slot *pci_slot);
2452 void pci_hp_remove_module_link(struct pci_slot *pci_slot);
2453 #endif
2454
2455 /**
2456 * pci_pcie_cap - get the saved PCIe capability offset
2457 * @dev: PCI device
2458 *
2459 * PCIe capability offset is calculated at PCI device initialization
2460 * time and saved in the data structure. This function returns saved
2461 * PCIe capability offset. Using this instead of pci_find_capability()
2462 * reduces unnecessary search in the PCI configuration space. If you
2463 * need to calculate PCIe capability offset from raw device for some
2464 * reasons, please use pci_find_capability() instead.
2465 */
pci_pcie_cap(struct pci_dev * dev)2466 static inline int pci_pcie_cap(struct pci_dev *dev)
2467 {
2468 return dev->pcie_cap;
2469 }
2470
2471 /**
2472 * pci_is_pcie - check if the PCI device is PCI Express capable
2473 * @dev: PCI device
2474 *
2475 * Returns: true if the PCI device is PCI Express capable, false otherwise.
2476 */
pci_is_pcie(struct pci_dev * dev)2477 static inline bool pci_is_pcie(struct pci_dev *dev)
2478 {
2479 return pci_pcie_cap(dev);
2480 }
2481
2482 /**
2483 * pcie_caps_reg - get the PCIe Capabilities Register
2484 * @dev: PCI device
2485 */
pcie_caps_reg(const struct pci_dev * dev)2486 static inline u16 pcie_caps_reg(const struct pci_dev *dev)
2487 {
2488 return dev->pcie_flags_reg;
2489 }
2490
2491 /**
2492 * pci_pcie_type - get the PCIe device/port type
2493 * @dev: PCI device
2494 */
pci_pcie_type(const struct pci_dev * dev)2495 static inline int pci_pcie_type(const struct pci_dev *dev)
2496 {
2497 return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
2498 }
2499
2500 /**
2501 * pcie_find_root_port - Get the PCIe root port device
2502 * @dev: PCI device
2503 *
2504 * Traverse up the parent chain and return the PCIe Root Port PCI Device
2505 * for a given PCI/PCIe Device.
2506 */
pcie_find_root_port(struct pci_dev * dev)2507 static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
2508 {
2509 while (dev) {
2510 if (pci_is_pcie(dev) &&
2511 pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2512 return dev;
2513 dev = pci_upstream_bridge(dev);
2514 }
2515
2516 return NULL;
2517 }
2518
pci_dev_is_disconnected(const struct pci_dev * dev)2519 static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
2520 {
2521 /*
2522 * error_state is set in pci_dev_set_io_state() using xchg/cmpxchg()
2523 * and read w/o common lock. READ_ONCE() ensures compiler cannot cache
2524 * the value (e.g. inside the loop in pci_dev_wait()).
2525 */
2526 return READ_ONCE(dev->error_state) == pci_channel_io_perm_failure;
2527 }
2528
2529 void pci_request_acs(void);
2530 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
2531 bool pci_acs_path_enabled(struct pci_dev *start,
2532 struct pci_dev *end, u16 acs_flags);
2533 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
2534
2535 #define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
2536 #define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT)
2537
2538 /* Large Resource Data Type Tag Item Names */
2539 #define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */
2540 #define PCI_VPD_LTIN_RO_DATA 0x10 /* Read-Only Data */
2541 #define PCI_VPD_LTIN_RW_DATA 0x11 /* Read-Write Data */
2542
2543 #define PCI_VPD_LRDT_ID_STRING PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
2544 #define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
2545 #define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
2546
2547 #define PCI_VPD_RO_KEYWORD_PARTNO "PN"
2548 #define PCI_VPD_RO_KEYWORD_SERIALNO "SN"
2549 #define PCI_VPD_RO_KEYWORD_MFR_ID "MN"
2550 #define PCI_VPD_RO_KEYWORD_VENDOR0 "V0"
2551 #define PCI_VPD_RO_KEYWORD_CHKSUM "RV"
2552
2553 /**
2554 * pci_vpd_alloc - Allocate buffer and read VPD into it
2555 * @dev: PCI device
2556 * @size: pointer to field where VPD length is returned
2557 *
2558 * Returns pointer to allocated buffer or an ERR_PTR in case of failure
2559 */
2560 void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size);
2561
2562 /**
2563 * pci_vpd_find_id_string - Locate id string in VPD
2564 * @buf: Pointer to buffered VPD data
2565 * @len: The length of the buffer area in which to search
2566 * @size: Pointer to field where length of id string is returned
2567 *
2568 * Returns the index of the id string or -ENOENT if not found.
2569 */
2570 int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size);
2571
2572 /**
2573 * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section
2574 * @buf: Pointer to buffered VPD data
2575 * @len: The length of the buffer area in which to search
2576 * @kw: The keyword to search for
2577 * @size: Pointer to field where length of found keyword data is returned
2578 *
2579 * Returns the index of the information field keyword data or -ENOENT if
2580 * not found.
2581 */
2582 int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len,
2583 const char *kw, unsigned int *size);
2584
2585 /**
2586 * pci_vpd_check_csum - Check VPD checksum
2587 * @buf: Pointer to buffered VPD data
2588 * @len: VPD size
2589 *
2590 * Returns 1 if VPD has no checksum, otherwise 0 or an errno
2591 */
2592 int pci_vpd_check_csum(const void *buf, unsigned int len);
2593
2594 /* PCI <-> OF binding helpers */
2595 #ifdef CONFIG_OF
2596 struct device_node;
2597 struct irq_domain;
2598 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
2599 bool pci_host_of_has_msi_map(struct device *dev);
2600
2601 /* Arch may override this (weak) */
2602 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
2603
2604 #else /* CONFIG_OF */
2605 static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus * bus)2606 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
pci_host_of_has_msi_map(struct device * dev)2607 static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; }
2608 #endif /* CONFIG_OF */
2609
2610 static inline struct device_node *
pci_device_to_OF_node(const struct pci_dev * pdev)2611 pci_device_to_OF_node(const struct pci_dev *pdev)
2612 {
2613 return pdev ? pdev->dev.of_node : NULL;
2614 }
2615
pci_bus_to_OF_node(struct pci_bus * bus)2616 static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
2617 {
2618 return bus ? bus->dev.of_node : NULL;
2619 }
2620
2621 #ifdef CONFIG_ACPI
2622 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
2623
2624 void
2625 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
2626 bool pci_pr3_present(struct pci_dev *pdev);
2627 #else
2628 static inline struct irq_domain *
pci_host_bridge_acpi_msi_domain(struct pci_bus * bus)2629 pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
pci_pr3_present(struct pci_dev * pdev)2630 static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; }
2631 #endif
2632
2633 #if defined(CONFIG_X86) && defined(CONFIG_ACPI)
2634 bool arch_pci_dev_is_removable(struct pci_dev *pdev);
2635 #else
arch_pci_dev_is_removable(struct pci_dev * pdev)2636 static inline bool arch_pci_dev_is_removable(struct pci_dev *pdev) { return false; }
2637 #endif
2638
2639 #ifdef CONFIG_EEH
pci_dev_to_eeh_dev(struct pci_dev * pdev)2640 static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
2641 {
2642 return pdev->dev.archdata.edev;
2643 }
2644 #endif
2645
2646 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns);
2647 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
2648 int pci_for_each_dma_alias(struct pci_dev *pdev,
2649 int (*fn)(struct pci_dev *pdev,
2650 u16 alias, void *data), void *data);
2651
2652 /* Helper functions for operation of device flag */
pci_set_dev_assigned(struct pci_dev * pdev)2653 static inline void pci_set_dev_assigned(struct pci_dev *pdev)
2654 {
2655 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
2656 }
pci_clear_dev_assigned(struct pci_dev * pdev)2657 static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
2658 {
2659 pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
2660 }
pci_is_dev_assigned(struct pci_dev * pdev)2661 static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
2662 {
2663 return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
2664 }
2665
2666 /**
2667 * pci_ari_enabled - query ARI forwarding status
2668 * @bus: the PCI bus
2669 *
2670 * Returns true if ARI forwarding is enabled.
2671 */
pci_ari_enabled(struct pci_bus * bus)2672 static inline bool pci_ari_enabled(struct pci_bus *bus)
2673 {
2674 return bus->self && bus->self->ari_enabled;
2675 }
2676
2677 /**
2678 * pci_is_thunderbolt_attached - whether device is on a Thunderbolt daisy chain
2679 * @pdev: PCI device to check
2680 *
2681 * Walk upwards from @pdev and check for each encountered bridge if it's part
2682 * of a Thunderbolt controller. Reaching the host bridge means @pdev is not
2683 * Thunderbolt-attached. (But rather soldered to the mainboard usually.)
2684 */
pci_is_thunderbolt_attached(struct pci_dev * pdev)2685 static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
2686 {
2687 struct pci_dev *parent = pdev;
2688
2689 if (pdev->is_thunderbolt)
2690 return true;
2691
2692 while ((parent = pci_upstream_bridge(parent)))
2693 if (parent->is_thunderbolt)
2694 return true;
2695
2696 return false;
2697 }
2698
2699 #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
2700 void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
2701 #endif
2702
2703 #include <linux/dma-mapping.h>
2704
2705 #define pci_printk(level, pdev, fmt, arg...) \
2706 dev_printk(level, &(pdev)->dev, fmt, ##arg)
2707
2708 #define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg)
2709 #define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg)
2710 #define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
2711 #define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
2712 #define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
2713 #define pci_warn_once(pdev, fmt, arg...) dev_warn_once(&(pdev)->dev, fmt, ##arg)
2714 #define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
2715 #define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
2716 #define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
2717
2718 #define pci_notice_ratelimited(pdev, fmt, arg...) \
2719 dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg)
2720
2721 #define pci_info_ratelimited(pdev, fmt, arg...) \
2722 dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
2723
2724 #define pci_WARN(pdev, condition, fmt, arg...) \
2725 WARN(condition, "%s %s: " fmt, \
2726 dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2727
2728 #define pci_WARN_ONCE(pdev, condition, fmt, arg...) \
2729 WARN_ONCE(condition, "%s %s: " fmt, \
2730 dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2731
2732 #endif /* LINUX_PCI_H */
2733