diff options
Diffstat (limited to 'include')
151 files changed, 3036 insertions, 975 deletions
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 9f0681bf1e87..66260777d644 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -22,17 +22,6 @@ #include <asm-generic/qspinlock_types.h> /** - * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock - * @lock : Pointer to queued spinlock structure - * - * There is a very slight possibility of live-lock if the lockers keep coming - * and the waiter is just unfortunate enough to not see any unlock state. - */ -#ifndef queued_spin_unlock_wait -extern void queued_spin_unlock_wait(struct qspinlock *lock); -#endif - -/** * queued_spin_is_locked - is the spinlock locked? * @lock: Pointer to queued spinlock structure * Return: 1 if it is locked, 0 otherwise @@ -41,8 +30,6 @@ extern void queued_spin_unlock_wait(struct qspinlock *lock); static __always_inline int queued_spin_is_locked(struct qspinlock *lock) { /* - * See queued_spin_unlock_wait(). - * * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL * isn't immediately observable. */ @@ -135,6 +122,5 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock) #define arch_spin_trylock(l) queued_spin_trylock(l) #define arch_spin_unlock(l) queued_spin_unlock(l) #define arch_spin_lock_flags(l, f) queued_spin_lock(l) -#define arch_spin_unlock_wait(l) queued_spin_unlock_wait(l) #endif /* __ASM_GENERIC_QSPINLOCK_H */ diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index 532372c6cf15..e5da44eddd2f 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -27,6 +27,8 @@ * __kprobes_text_start, __kprobes_text_end * __entry_text_start, __entry_text_end * __ctors_start, __ctors_end + * __irqentry_text_start, __irqentry_text_end + * __softirqentry_text_start, __softirqentry_text_end */ extern char _text[], _stext[], _etext[]; extern char _data[], _sdata[], _edata[]; @@ -39,6 +41,8 @@ extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; extern char __kprobes_text_start[], __kprobes_text_end[]; extern char __entry_text_start[], __entry_text_end[]; extern char __start_rodata[], __end_rodata[]; +extern char __irqentry_text_start[], __irqentry_text_end[]; +extern char __softirqentry_text_start[], __softirqentry_text_end[]; /* Start and end of .ctors section - used for constructor calls. */ extern char __ctors_start[], __ctors_end[]; diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 8afa4335e5b2..faddde44de8c 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -112,10 +112,11 @@ struct mmu_gather { #define HAVE_GENERIC_MMU_GATHER -void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end); +void arch_tlb_gather_mmu(struct mmu_gather *tlb, + struct mm_struct *mm, unsigned long start, unsigned long end); void tlb_flush_mmu(struct mmu_gather *tlb); -void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, - unsigned long end); +void arch_tlb_finish_mmu(struct mmu_gather *tlb, + unsigned long start, unsigned long end, bool force); extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size); diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h index fc824e2828f3..5d2add1a6c96 100644 --- a/include/asm-generic/topology.h +++ b/include/asm-generic/topology.h @@ -48,7 +48,11 @@ #define parent_node(node) ((void)(node),0) #endif #ifndef cpumask_of_node -#define cpumask_of_node(node) ((void)node, cpu_online_mask) + #ifdef CONFIG_NEED_MULTIPLE_NODES + #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) + #else + #define cpumask_of_node(node) ((void)node, cpu_online_mask) + #endif #endif #ifndef pcibus_to_node #define pcibus_to_node(bus) ((void)(bus), -1) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index fffc9bdae025..9fdb54a95976 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -60,6 +60,22 @@ #define ALIGN_FUNCTION() . = ALIGN(8) /* + * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which + * generates .data.identifier sections, which need to be pulled in with + * .data. We don't want to pull in .data..other sections, which Linux + * has defined. Same for text and bss. + */ +#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION +#define TEXT_MAIN .text .text.[0-9a-zA-Z_]* +#define DATA_MAIN .data .data.[0-9a-zA-Z_]* +#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* +#else +#define TEXT_MAIN .text +#define DATA_MAIN .data +#define BSS_MAIN .bss +#endif + +/* * Align to a 32 byte boundary equal to the * alignment gcc 4.5 uses for a struct */ @@ -198,12 +214,9 @@ /* * .data section - * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates - * .data.identifier which needs to be pulled in with .data, but don't want to - * pull in .data..stuff which has its own requirements. Same for bss. */ #define DATA_DATA \ - *(.data .data.[0-9a-zA-Z_]*) \ + *(DATA_MAIN) \ *(.ref.data) \ *(.data..shared_aligned) /* percpu related */ \ MEM_KEEP(init.data) \ @@ -434,16 +447,17 @@ VMLINUX_SYMBOL(__security_initcall_end) = .; \ } -/* .text section. Map to function alignment to avoid address changes +/* + * .text section. Map to function alignment to avoid address changes * during second ld run in second ld pass when generating System.map - * LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates - * .text.identifier which needs to be pulled in with .text , but some - * architectures define .text.foo which is not intended to be pulled in here. - * Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have - * conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */ + * + * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead + * code elimination is enabled, so these sections should be converted + * to use ".." first. + */ #define TEXT_TEXT \ ALIGN_FUNCTION(); \ - *(.text.hot .text .text.fixup .text.unlikely) \ + *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ *(.ref.text) \ MEM_KEEP(init.text) \ MEM_KEEP(exit.text) \ @@ -483,25 +497,17 @@ *(.entry.text) \ VMLINUX_SYMBOL(__entry_text_end) = .; -#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN) #define IRQENTRY_TEXT \ ALIGN_FUNCTION(); \ VMLINUX_SYMBOL(__irqentry_text_start) = .; \ *(.irqentry.text) \ VMLINUX_SYMBOL(__irqentry_text_end) = .; -#else -#define IRQENTRY_TEXT -#endif -#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN) #define SOFTIRQENTRY_TEXT \ ALIGN_FUNCTION(); \ VMLINUX_SYMBOL(__softirqentry_text_start) = .; \ *(.softirqentry.text) \ VMLINUX_SYMBOL(__softirqentry_text_end) = .; -#else -#define SOFTIRQENTRY_TEXT -#endif /* Section used for early init (in .S files) */ #define HEAD_TEXT *(.head.text) @@ -613,7 +619,7 @@ BSS_FIRST_SECTIONS \ *(.bss..page_aligned) \ *(.dynbss) \ - *(.bss .bss.[0-9a-zA-Z_]*) \ + *(BSS_MAIN) \ *(COMMON) \ } diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h new file mode 100644 index 000000000000..9b30fec302c8 --- /dev/null +++ b/include/drm/bridge/dw_mipi_dsi.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) STMicroelectronics SA 2017 + * + * Authors: Philippe Cornu <philippe.cornu@st.com> + * Yannick Fertre <yannick.fertre@st.com> + * + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef __DW_MIPI_DSI__ +#define __DW_MIPI_DSI__ + +struct dw_mipi_dsi_phy_ops { + int (*init)(void *priv_data); + int (*get_lane_mbps)(void *priv_data, struct drm_display_mode *mode, + unsigned long mode_flags, u32 lanes, u32 format, + unsigned int *lane_mbps); +}; + +struct dw_mipi_dsi_plat_data { + void __iomem *base; + unsigned int max_data_lanes; + + enum drm_mode_status (*mode_valid)(void *priv_data, + const struct drm_display_mode *mode); + + const struct dw_mipi_dsi_phy_ops *phy_ops; + + void *priv_data; +}; + +int dw_mipi_dsi_probe(struct platform_device *pdev, + const struct dw_mipi_dsi_plat_data *plat_data); +void dw_mipi_dsi_remove(struct platform_device *pdev); +int dw_mipi_dsi_bind(struct platform_device *pdev, struct drm_encoder *encoder, + const struct dw_mipi_dsi_plat_data *plat_data); +void dw_mipi_dsi_unbind(struct device *dev); + +#endif /* __DW_MIPI_DSI__ */ diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 39df16af7a4a..7277783a4ff0 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -82,19 +82,10 @@ #include <drm/drm_sysfs.h> #include <drm/drm_vblank.h> #include <drm/drm_irq.h> - +#include <drm/drm_device.h> struct module; -struct drm_device; -struct drm_agp_head; -struct drm_local_map; -struct drm_device_dma; -struct drm_gem_object; -struct drm_master; -struct drm_vblank_crtc; -struct drm_vma_offset_manager; - struct device_node; struct videomode; struct reservation_object; @@ -306,143 +297,6 @@ struct pci_controller; /** - * DRM device structure. This structure represent a complete card that - * may contain multiple heads. - */ -struct drm_device { - struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */ - int if_version; /**< Highest interface version set */ - - /** \name Lifetime Management */ - /*@{ */ - struct kref ref; /**< Object ref-count */ - struct device *dev; /**< Device structure of bus-device */ - struct drm_driver *driver; /**< DRM driver managing the device */ - void *dev_private; /**< DRM driver private data */ - struct drm_minor *control; /**< Control node */ - struct drm_minor *primary; /**< Primary node */ - struct drm_minor *render; /**< Render node */ - bool registered; - - /* currently active master for this device. Protected by master_mutex */ - struct drm_master *master; - - atomic_t unplugged; /**< Flag whether dev is dead */ - struct inode *anon_inode; /**< inode for private address-space */ - char *unique; /**< unique name of the device */ - /*@} */ - - /** \name Locks */ - /*@{ */ - struct mutex struct_mutex; /**< For others */ - struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */ - /*@} */ - - /** \name Usage Counters */ - /*@{ */ - int open_count; /**< Outstanding files open, protected by drm_global_mutex. */ - spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */ - int buf_use; /**< Buffers in use -- cannot alloc */ - atomic_t buf_alloc; /**< Buffer allocation in progress */ - /*@} */ - - struct mutex filelist_mutex; - struct list_head filelist; - - /** \name Memory management */ - /*@{ */ - struct list_head maplist; /**< Linked list of regions */ - struct drm_open_hash map_hash; /**< User token hash table for maps */ - - /** \name Context handle management */ - /*@{ */ - struct list_head ctxlist; /**< Linked list of context handles */ - struct mutex ctxlist_mutex; /**< For ctxlist */ - - struct idr ctx_idr; - - struct list_head vmalist; /**< List of vmas (for debugging) */ - - /*@} */ - - /** \name DMA support */ - /*@{ */ - struct drm_device_dma *dma; /**< Optional pointer for DMA support */ - /*@} */ - - /** \name Context support */ - /*@{ */ - - __volatile__ long context_flag; /**< Context swapping flag */ - int last_context; /**< Last current context */ - /*@} */ - - /** - * @irq_enabled: - * - * Indicates that interrupt handling is enabled, specifically vblank - * handling. Drivers which don't use drm_irq_install() need to set this - * to true manually. - */ - bool irq_enabled; - int irq; - - /* - * If true, vblank interrupt will be disabled immediately when the - * refcount drops to zero, as opposed to via the vblank disable - * timer. - * This can be set to true it the hardware has a working vblank - * counter and the driver uses drm_vblank_on() and drm_vblank_off() - * appropriately. - */ - bool vblank_disable_immediate; - - /* array of size num_crtcs */ - struct drm_vblank_crtc *vblank; - - spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */ - spinlock_t vbl_lock; - - u32 max_vblank_count; /**< size of vblank counter register */ - - /** - * List of events - */ - struct list_head vblank_event_list; - spinlock_t event_lock; - - /*@} */ - - struct drm_agp_head *agp; /**< AGP data */ - - struct pci_dev *pdev; /**< PCI device structure */ -#ifdef __alpha__ - struct pci_controller *hose; -#endif - - struct drm_sg_mem *sg; /**< Scatter gather memory */ - unsigned int num_crtcs; /**< Number of CRTCs on this device */ - - struct { - int context; - struct drm_hw_lock *lock; - } sigdata; - - struct drm_local_map *agp_buffer_map; - unsigned int agp_buffer_token; - - struct drm_mode_config mode_config; /**< Current mode config */ - - /** \name GEM information */ - /*@{ */ - struct mutex object_name_lock; - struct idr object_name_idr; - struct drm_vma_offset_manager *vma_offset_manager; - /*@} */ - int switch_power_state; -}; - -/** * drm_drv_uses_atomic_modeset - check if the driver implements * atomic_commit() * @dev: DRM device @@ -466,19 +320,6 @@ static __inline__ int drm_core_check_feature(struct drm_device *dev, return ((dev->driver->driver_features & feature) ? 1 : 0); } -static inline void drm_device_set_unplugged(struct drm_device *dev) -{ - smp_wmb(); - atomic_set(&dev->unplugged, 1); -} - -static inline int drm_device_is_unplugged(struct drm_device *dev) -{ - int ret = atomic_read(&dev->unplugged); - smp_rmb(); - return ret; -} - /******************************************************************/ /** \name Internal function definitions */ /*@{*/ diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 0196f264a418..8a5808eb5628 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -154,6 +154,9 @@ struct __drm_connnectors_state { struct drm_connector_state *state, *old_state, *new_state; }; +struct drm_private_obj; +struct drm_private_state; + /** * struct drm_private_state_funcs - atomic state functions for private objects * @@ -166,7 +169,7 @@ struct __drm_connnectors_state { */ struct drm_private_state_funcs { /** - * @duplicate_state: + * @atomic_duplicate_state: * * Duplicate the current state of the private object and return it. It * is an error to call this before obj->state has been initialized. @@ -176,29 +179,30 @@ struct drm_private_state_funcs { * Duplicated atomic state or NULL when obj->state is not * initialized or allocation failed. */ - void *(*duplicate_state)(struct drm_atomic_state *state, void *obj); + struct drm_private_state *(*atomic_duplicate_state)(struct drm_private_obj *obj); /** - * @swap_state: + * @atomic_destroy_state: * - * This function swaps the existing state of a private object @obj with - * it's newly created state, the pointer to which is passed as - * @obj_state_ptr. + * Frees the private object state created with @atomic_duplicate_state. */ - void (*swap_state)(void *obj, void **obj_state_ptr); + void (*atomic_destroy_state)(struct drm_private_obj *obj, + struct drm_private_state *state); +}; - /** - * @destroy_state: - * - * Frees the private object state created with @duplicate_state. - */ - void (*destroy_state)(void *obj_state); +struct drm_private_obj { + struct drm_private_state *state; + + const struct drm_private_state_funcs *funcs; +}; + +struct drm_private_state { + struct drm_atomic_state *state; }; struct __drm_private_objs_state { - void *obj; - void *obj_state; - const struct drm_private_state_funcs *funcs; + struct drm_private_obj *ptr; + struct drm_private_state *state, *old_state, *new_state; }; /** @@ -207,6 +211,7 @@ struct __drm_private_objs_state { * @dev: parent DRM device * @allow_modeset: allow full modeset * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics + * @async_update: hint for asynchronous plane update * @planes: pointer to array of structures with per-plane data * @crtcs: pointer to array of CRTC pointers * @num_connector: size of the @connectors and @connector_states arrays @@ -221,6 +226,7 @@ struct drm_atomic_state { struct drm_device *dev; bool allow_modeset : 1; bool legacy_cursor_update : 1; + bool async_update : 1; struct __drm_planes_state *planes; struct __drm_crtcs_state *crtcs; int num_connector; @@ -309,20 +315,18 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, struct drm_plane_state * __must_check drm_atomic_get_plane_state(struct drm_atomic_state *state, struct drm_plane *plane); -int drm_atomic_plane_set_property(struct drm_plane *plane, - struct drm_plane_state *state, struct drm_property *property, - uint64_t val); struct drm_connector_state * __must_check drm_atomic_get_connector_state(struct drm_atomic_state *state, struct drm_connector *connector); -int drm_atomic_connector_set_property(struct drm_connector *connector, - struct drm_connector_state *state, struct drm_property *property, - uint64_t val); -void * __must_check +void drm_atomic_private_obj_init(struct drm_private_obj *obj, + struct drm_private_state *state, + const struct drm_private_state_funcs *funcs); +void drm_atomic_private_obj_fini(struct drm_private_obj *obj); + +struct drm_private_state * __must_check drm_atomic_get_private_obj_state(struct drm_atomic_state *state, - void *obj, - const struct drm_private_state_funcs *funcs); + struct drm_private_obj *obj); /** * drm_atomic_get_existing_crtc_state - get crtc state, if it exists @@ -541,8 +545,6 @@ int __must_check drm_atomic_add_affected_planes(struct drm_atomic_state *state, struct drm_crtc *crtc); -void drm_atomic_legacy_backoff(struct drm_atomic_state *state); - void drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret); @@ -809,43 +811,63 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p); for_each_if (plane) /** - * __for_each_private_obj - iterate over all private objects + * for_each_oldnew_private_obj_in_state - iterate over all private objects in an atomic update * @__state: &struct drm_atomic_state pointer - * @obj: private object iteration cursor - * @obj_state: private object state iteration cursor + * @obj: &struct drm_private_obj iteration cursor + * @old_obj_state: &struct drm_private_state iteration cursor for the old state + * @new_obj_state: &struct drm_private_state iteration cursor for the new state * @__i: int iteration cursor, for macro-internal use - * @__funcs: &struct drm_private_state_funcs iteration cursor * - * This macro iterates over the array containing private object data in atomic - * state + * This iterates over all private objects in an atomic update, tracking both + * old and new state. This is useful in places where the state delta needs + * to be considered, for example in atomic check functions. */ -#define __for_each_private_obj(__state, obj, obj_state, __i, __funcs) \ - for ((__i) = 0; \ - (__i) < (__state)->num_private_objs && \ - ((obj) = (__state)->private_objs[__i].obj, \ - (__funcs) = (__state)->private_objs[__i].funcs, \ - (obj_state) = (__state)->private_objs[__i].obj_state, \ - 1); \ - (__i)++) \ +#define for_each_oldnew_private_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_private_objs && \ + ((obj) = (__state)->private_objs[__i].ptr, \ + (old_obj_state) = (__state)->private_objs[__i].old_state, \ + (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \ + (__i)++) \ + for_each_if (obj) /** - * for_each_private_obj - iterate over a specify type of private object + * for_each_old_private_obj_in_state - iterate over all private objects in an atomic update * @__state: &struct drm_atomic_state pointer - * @obj_funcs: &struct drm_private_state_funcs function table to filter - * private objects - * @obj: private object iteration cursor - * @obj_state: private object state iteration cursor + * @obj: &struct drm_private_obj iteration cursor + * @old_obj_state: &struct drm_private_state iteration cursor for the old state * @__i: int iteration cursor, for macro-internal use - * @__funcs: &struct drm_private_state_funcs iteration cursor * - * This macro iterates over the private objects state array while filtering the - * objects based on the vfunc table that is passed as @obj_funcs. New macros - * can be created by passing in the vfunc table associated with a specific - * private object. + * This iterates over all private objects in an atomic update, tracking only + * the old state. This is useful in disable functions, where we need the old + * state the hardware is still in. + */ +#define for_each_old_private_obj_in_state(__state, obj, old_obj_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_private_objs && \ + ((obj) = (__state)->private_objs[__i].ptr, \ + (old_obj_state) = (__state)->private_objs[__i].old_state, 1); \ + (__i)++) \ + for_each_if (obj) + +/** + * for_each_new_private_obj_in_state - iterate over all private objects in an atomic update + * @__state: &struct drm_atomic_state pointer + * @obj: &struct drm_private_obj iteration cursor + * @new_obj_state: &struct drm_private_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all private objects in an atomic update, tracking only + * the new state. This is useful in enable functions, where we need the new state the + * hardware should be in when the atomic commit operation has completed. */ -#define for_each_private_obj(__state, obj_funcs, obj, obj_state, __i, __funcs) \ - __for_each_private_obj(__state, obj, obj_state, __i, __funcs) \ - for_each_if (__funcs == obj_funcs) +#define for_each_new_private_obj_in_state(__state, obj, new_obj_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_private_objs && \ + ((obj) = (__state)->private_objs[__i].ptr, \ + (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \ + (__i)++) \ + for_each_if (obj) /** * drm_atomic_crtc_needs_modeset - compute combined modeset need @@ -853,7 +875,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p); * * To give drivers flexibility &struct drm_crtc_state has 3 booleans to track * whether the state CRTC changed enough to need a full modeset cycle: - * planes_changed, mode_changed and active_changed. This helper simply + * mode_changed, active_changed and connectors_changed. This helper simply * combines these three to compute the overall need for a modeset for @state. * * The atomic helper code sets these booleans, but drivers can and should diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index f0a8678ae98e..d2b56cc657e9 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -33,6 +33,8 @@ #include <drm/drm_modeset_helper.h> struct drm_atomic_state; +struct drm_private_obj; +struct drm_private_state; int drm_atomic_helper_check_modeset(struct drm_device *dev, struct drm_atomic_state *state); @@ -41,9 +43,14 @@ int drm_atomic_helper_check_planes(struct drm_device *dev, int drm_atomic_helper_check(struct drm_device *dev, struct drm_atomic_state *state); void drm_atomic_helper_commit_tail(struct drm_atomic_state *state); +void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *state); int drm_atomic_helper_commit(struct drm_device *dev, struct drm_atomic_state *state, bool nonblock); +int drm_atomic_helper_async_check(struct drm_device *dev, + struct drm_atomic_state *state); +void drm_atomic_helper_async_commit(struct drm_device *dev, + struct drm_atomic_state *state); int drm_atomic_helper_wait_for_fences(struct drm_device *dev, struct drm_atomic_state *state, @@ -52,6 +59,9 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev, void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, struct drm_atomic_state *old_state); +void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev, + struct drm_atomic_state *old_state); + void drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev, struct drm_atomic_state *old_state); @@ -77,8 +87,8 @@ void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state, bool atomic); -void drm_atomic_helper_swap_state(struct drm_atomic_state *state, - bool stall); +int __must_check drm_atomic_helper_swap_state(struct drm_atomic_state *state, + bool stall); /* nonblocking commit helpers */ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, @@ -114,15 +124,6 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, int drm_atomic_helper_resume(struct drm_device *dev, struct drm_atomic_state *state); -int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc, - struct drm_property *property, - uint64_t val); -int drm_atomic_helper_plane_set_property(struct drm_plane *plane, - struct drm_property *property, - uint64_t val); -int drm_atomic_helper_connector_set_property(struct drm_connector *connector, - struct drm_property *property, - uint64_t val); int drm_atomic_helper_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, @@ -135,8 +136,6 @@ int drm_atomic_helper_page_flip_target( uint32_t flags, uint32_t target, struct drm_modeset_acquire_ctx *ctx); -int drm_atomic_helper_connector_dpms(struct drm_connector *connector, - int mode); struct drm_encoder * drm_atomic_helper_best_encoder(struct drm_connector *connector); @@ -178,6 +177,8 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, uint32_t size, struct drm_modeset_acquire_ctx *ctx); +void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj, + struct drm_private_state *state); /** * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index 1dc94d5392e2..6522d4cbc9d9 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -268,6 +268,9 @@ void drm_bridge_enable(struct drm_bridge *bridge); struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel, u32 connector_type); void drm_panel_bridge_remove(struct drm_bridge *bridge); +struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev, + struct drm_panel *panel, + u32 connector_type); #endif #endif diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index ae5b7dc316c8..ea8da401c93c 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -135,6 +135,28 @@ struct drm_scdc { struct drm_hdmi_info { /** @scdc: sink's scdc support and capabilities */ struct drm_scdc scdc; + + /** + * @y420_vdb_modes: bitmap of modes which can support ycbcr420 + * output only (not normal RGB/YCBCR444/422 outputs). There are total + * 107 VICs defined by CEA-861-F spec, so the size is 128 bits to map + * upto 128 VICs; + */ + unsigned long y420_vdb_modes[BITS_TO_LONGS(128)]; + + /** + * @y420_cmdb_modes: bitmap of modes which can support ycbcr420 + * output also, along with normal HDMI outputs. There are total 107 + * VICs defined by CEA-861-F spec, so the size is 128 bits to map upto + * 128 VICs; + */ + unsigned long y420_cmdb_modes[BITS_TO_LONGS(128)]; + + /** @y420_cmdb_map: bitmap of SVD index, to extraxt vcb modes */ + u64 y420_cmdb_map; + + /** @y420_dc_modes: bitmap of deep color support index */ + u8 y420_dc_modes; }; /** @@ -198,6 +220,7 @@ struct drm_display_info { #define DRM_COLOR_FORMAT_RGB444 (1<<0) #define DRM_COLOR_FORMAT_YCRCB444 (1<<1) #define DRM_COLOR_FORMAT_YCRCB422 (1<<2) +#define DRM_COLOR_FORMAT_YCRCB420 (1<<3) /** * @color_formats: HDMI Color formats, selects between RGB and YCrCb @@ -359,8 +382,8 @@ struct drm_connector_funcs { * implement the 4 level DPMS support on the connector any more, but * instead only have an on/off "ACTIVE" property on the CRTC object. * - * Drivers implementing atomic modeset should use - * drm_atomic_helper_connector_dpms() to implement this hook. + * This hook is not used by atomic drivers, remapping of the legacy DPMS + * property is entirely handled in the DRM core. * * RETURNS: * @@ -457,11 +480,9 @@ struct drm_connector_funcs { * This is the legacy entry point to update a property attached to the * connector. * - * Drivers implementing atomic modeset should use - * drm_atomic_helper_connector_set_property() to implement this hook. - * * This callback is optional if the driver does not support any legacy - * driver-private properties. + * driver-private properties. For atomic drivers it is not used because + * property handling is done entirely in the DRM core. * * RETURNS: * @@ -726,6 +747,15 @@ struct drm_connector { bool interlace_allowed; bool doublescan_allowed; bool stereo_allowed; + + /** + * @ycbcr_420_allowed : This bool indicates if this connector is + * capable of handling YCBCR 420 output. While parsing the EDID + * blocks, its very helpful to know, if the source is capable of + * handling YCBCR 420 outputs. + */ + bool ycbcr_420_allowed; + /** * @registered: Is this connector exposed (registered) with userspace? * Protected by @mutex. diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 629a5fe075b3..1a642020e306 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -358,14 +358,6 @@ struct drm_crtc_funcs { * drm_crtc_enable_color_mgmt(), which then supports the legacy gamma * interface through the drm_atomic_helper_legacy_gamma_set() * compatibility implementation. - * - * NOTE: - * - * Drivers that support gamma tables and also fbdev emulation through - * the provided helper library need to take care to fill out the gamma - * hooks for both. Currently there's a bit an unfortunate duplication - * going on, which should eventually be unified to just one set of - * hooks. */ int (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t size, @@ -481,11 +473,9 @@ struct drm_crtc_funcs { * This is the legacy entry point to update a property attached to the * CRTC. * - * Drivers implementing atomic modeset should use - * drm_atomic_helper_crtc_set_property() to implement this hook. - * * This callback is optional if the driver does not support any legacy - * driver-private properties. + * driver-private properties. For atomic drivers it is not used because + * property handling is done entirely in the DRM core. * * RETURNS: * @@ -685,6 +675,9 @@ struct drm_crtc_funcs { * drm_crtc_vblank_off() and drm_crtc_vblank_on() when disabling or * enabling a CRTC. * + * See also &drm_device.vblank_disable_immediate and + * &drm_device.max_vblank_count. + * * Returns: * * Raw vblank counter value. diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h new file mode 100644 index 000000000000..e21af87a2f3c --- /dev/null +++ b/include/drm/drm_device.h @@ -0,0 +1,190 @@ +#ifndef _DRM_DEVICE_H_ +#define _DRM_DEVICE_H_ + +#include <linux/list.h> +#include <linux/kref.h> +#include <linux/mutex.h> +#include <linux/idr.h> + +#include <drm/drm_hashtab.h> +#include <drm/drm_mode_config.h> + +struct drm_driver; +struct drm_minor; +struct drm_master; +struct drm_device_dma; +struct drm_vblank_crtc; +struct drm_sg_mem; +struct drm_local_map; +struct drm_vma_offset_manager; + +struct inode; + +struct pci_dev; +struct pci_controller; + +/** + * DRM device structure. This structure represent a complete card that + * may contain multiple heads. + */ +struct drm_device { + struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */ + int if_version; /**< Highest interface version set */ + + /** \name Lifetime Management */ + /*@{ */ + struct kref ref; /**< Object ref-count */ + struct device *dev; /**< Device structure of bus-device */ + struct drm_driver *driver; /**< DRM driver managing the device */ + void *dev_private; /**< DRM driver private data */ + struct drm_minor *control; /**< Control node */ + struct drm_minor *primary; /**< Primary node */ + struct drm_minor *render; /**< Render node */ + bool registered; + + /* currently active master for this device. Protected by master_mutex */ + struct drm_master *master; + + atomic_t unplugged; /**< Flag whether dev is dead */ + struct inode *anon_inode; /**< inode for private address-space */ + char *unique; /**< unique name of the device */ + /*@} */ + + /** \name Locks */ + /*@{ */ + struct mutex struct_mutex; /**< For others */ + struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */ + /*@} */ + + /** \name Usage Counters */ + /*@{ */ + int open_count; /**< Outstanding files open, protected by drm_global_mutex. */ + spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */ + int buf_use; /**< Buffers in use -- cannot alloc */ + atomic_t buf_alloc; /**< Buffer allocation in progress */ + /*@} */ + + struct mutex filelist_mutex; + struct list_head filelist; + + /** \name Memory management */ + /*@{ */ + struct list_head maplist; /**< Linked list of regions */ + struct drm_open_hash map_hash; /**< User token hash table for maps */ + + /** \name Context handle management */ + /*@{ */ + struct list_head ctxlist; /**< Linked list of context handles */ + struct mutex ctxlist_mutex; /**< For ctxlist */ + + struct idr ctx_idr; + + struct list_head vmalist; /**< List of vmas (for debugging) */ + + /*@} */ + + /** \name DMA support */ + /*@{ */ + struct drm_device_dma *dma; /**< Optional pointer for DMA support */ + /*@} */ + + /** \name Context support */ + /*@{ */ + + __volatile__ long context_flag; /**< Context swapping flag */ + int last_context; /**< Last current context */ + /*@} */ + + /** + * @irq_enabled: + * + * Indicates that interrupt handling is enabled, specifically vblank + * handling. Drivers which don't use drm_irq_install() need to set this + * to true manually. + */ + bool irq_enabled; + int irq; + + /** + * @vblank_disable_immediate: + * + * If true, vblank interrupt will be disabled immediately when the + * refcount drops to zero, as opposed to via the vblank disable + * timer. + * + * This can be set to true it the hardware has a working vblank counter + * with high-precision timestamping (otherwise there are races) and the + * driver uses drm_crtc_vblank_on() and drm_crtc_vblank_off() + * appropriately. See also @max_vblank_count and + * &drm_crtc_funcs.get_vblank_counter. + */ + bool vblank_disable_immediate; + + /** + * @vblank: + * + * Array of vblank tracking structures, one per &struct drm_crtc. For + * historical reasons (vblank support predates kernel modesetting) this + * is free-standing and not part of &struct drm_crtc itself. It must be + * initialized explicitly by calling drm_vblank_init(). + */ + struct drm_vblank_crtc *vblank; + + spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */ + spinlock_t vbl_lock; + + /** + * @max_vblank_count: + * + * Maximum value of the vblank registers. This value +1 will result in a + * wrap-around of the vblank register. It is used by the vblank core to + * handle wrap-arounds. + * + * If set to zero the vblank core will try to guess the elapsed vblanks + * between times when the vblank interrupt is disabled through + * high-precision timestamps. That approach is suffering from small + * races and imprecision over longer time periods, hence exposing a + * hardware vblank counter is always recommended. + * + * If non-zeor, &drm_crtc_funcs.get_vblank_counter must be set. + */ + u32 max_vblank_count; /**< size of vblank counter register */ + + /** + * List of events + */ + struct list_head vblank_event_list; + spinlock_t event_lock; + + /*@} */ + + struct drm_agp_head *agp; /**< AGP data */ + + struct pci_dev *pdev; /**< PCI device structure */ +#ifdef __alpha__ + struct pci_controller *hose; +#endif + + struct drm_sg_mem *sg; /**< Scatter gather memory */ + unsigned int num_crtcs; /**< Number of CRTCs on this device */ + + struct { + int context; + struct drm_hw_lock *lock; + } sigdata; + + struct drm_local_map *agp_buffer_map; + unsigned int agp_buffer_token; + + struct drm_mode_config mode_config; /**< Current mode config */ + + /** \name GEM information */ + /*@{ */ + struct mutex object_name_lock; + struct idr object_name_idr; + struct drm_vma_offset_manager *vma_offset_manager; + /*@} */ + int switch_power_state; +}; + +#endif diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 177ab6f86855..d55abb75f29a 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -404,12 +404,17 @@ struct drm_dp_payload { int vcpi; }; +#define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base) + struct drm_dp_mst_topology_state { + struct drm_private_state base; int avail_slots; struct drm_atomic_state *state; struct drm_dp_mst_topology_mgr *mgr; }; +#define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base) + /** * struct drm_dp_mst_topology_mgr - DisplayPort MST manager * @@ -419,6 +424,11 @@ struct drm_dp_mst_topology_state { */ struct drm_dp_mst_topology_mgr { /** + * @base: Base private object for atomic + */ + struct drm_private_obj base; + + /** * @dev: device pointer for adding i2c devices etc. */ struct drm_device *dev; diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index d855f9ae41a8..71bbaaec836d 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -30,7 +30,8 @@ #include <linux/list.h> #include <linux/irqreturn.h> -struct drm_device; +#include <drm/drm_device.h> + struct drm_file; struct drm_gem_object; struct drm_master; @@ -173,8 +174,6 @@ struct drm_driver { */ void (*release) (struct drm_device *); - int (*set_busid)(struct drm_device *dev, struct drm_master *master); - /** * @get_vblank_counter: * @@ -392,6 +391,11 @@ struct drm_driver { */ void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv); + /** + * @debugfs_init: + * + * Allows drivers to create driver-specific debugfs files. + */ int (*debugfs_init)(struct drm_minor *minor); /** @@ -410,7 +414,18 @@ struct drm_driver { */ void (*gem_free_object_unlocked) (struct drm_gem_object *obj); + /** + * @gem_open_object: + * + * Driver hook called upon gem handle creation + */ int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); + + /** + * @gem_close_object: + * + * Driver hook called upon gem handle release + */ void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); /** @@ -423,19 +438,34 @@ struct drm_driver { size_t size); /* prime: */ - /* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */ + /** + * @prime_handle_to_fd: + * + * export handle -> fd (see drm_gem_prime_handle_to_fd() helper) + */ int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd); - /* import fd -> handle (see drm_gem_prime_fd_to_handle() helper) */ + /** + * @prime_fd_to_handle: + * + * import fd -> handle (see drm_gem_prime_fd_to_handle() helper) + */ int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv, int prime_fd, uint32_t *handle); - /* export GEM -> dmabuf */ + /** + * @gem_prime_export: + * + * export GEM -> dmabuf + */ struct dma_buf * (*gem_prime_export)(struct drm_device *dev, struct drm_gem_object *obj, int flags); - /* import dmabuf -> GEM */ + /** + * @gem_prime_import: + * + * import dmabuf -> GEM + */ struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, struct dma_buf *dma_buf); - /* low-level interface used by drm_gem_prime_{import,export} */ int (*gem_prime_pin)(struct drm_gem_object *obj); void (*gem_prime_unpin)(struct drm_gem_object *obj); struct reservation_object * (*gem_prime_res_obj)( @@ -507,19 +537,46 @@ struct drm_driver { struct drm_device *dev, uint32_t handle); - /* Driver private ops for this object */ + /** + * @gem_vm_ops: Driver private ops for this object + */ const struct vm_operations_struct *gem_vm_ops; + /** @major: driver major number */ int major; + /** @minor: driver minor number */ int minor; + /** @patchlevel: driver patch level */ int patchlevel; + /** @name: driver name */ char *name; + /** @desc: driver description */ char *desc; + /** @date: driver date */ char *date; + /** @driver_features: driver features */ u32 driver_features; + + /** + * @ioctls: + * + * Array of driver-private IOCTL description entries. See the chapter on + * :ref:`IOCTL support in the userland interfaces + * chapter<drm_driver_ioctl>` for the full details. + */ + const struct drm_ioctl_desc *ioctls; + /** @num_ioctls: Number of entries in @ioctls. */ int num_ioctls; + + /** + * @fops: + * + * File operations for the DRM device node. See the discussion in + * :ref:`file operations<drm_driver_fops>` for in-depth coverage and + * some examples. + */ const struct file_operations *fops; /* Everything below here is for legacy driver, never use! */ @@ -557,7 +614,24 @@ void drm_dev_unregister(struct drm_device *dev); void drm_dev_ref(struct drm_device *dev); void drm_dev_unref(struct drm_device *dev); void drm_put_dev(struct drm_device *dev); -void drm_unplug_dev(struct drm_device *dev); +void drm_dev_unplug(struct drm_device *dev); + +/** + * drm_dev_is_unplugged - is a DRM device unplugged + * @dev: DRM device + * + * This function can be called to check whether a hotpluggable is unplugged. + * Unplugging itself is singalled through drm_dev_unplug(). If a device is + * unplugged, these two functions guarantee that any store before calling + * drm_dev_unplug() is visible to callers of this function after it completes + */ +static inline int drm_dev_is_unplugged(struct drm_device *dev) +{ + int ret = atomic_read(&dev->unplugged); + smp_rmb(); + return ret; +} + int drm_dev_set_unique(struct drm_device *dev, const char *name); diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 7b9f48b62e07..1e1908a6b1d6 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -213,6 +213,14 @@ struct detailed_timing { #define DRM_EDID_HDMI_DC_30 (1 << 4) #define DRM_EDID_HDMI_DC_Y444 (1 << 3) +/* YCBCR 420 deep color modes */ +#define DRM_EDID_YCBCR420_DC_48 (1 << 6) +#define DRM_EDID_YCBCR420_DC_36 (1 << 5) +#define DRM_EDID_YCBCR420_DC_30 (1 << 4) +#define DRM_EDID_YCBCR420_DC_MASK (DRM_EDID_YCBCR420_DC_48 | \ + DRM_EDID_YCBCR420_DC_36 | \ + DRM_EDID_YCBCR420_DC_30) + /* ELD Header Block */ #define DRM_ELD_HEADER_BLOCK_SIZE 4 @@ -343,7 +351,8 @@ drm_load_edid_firmware(struct drm_connector *connector) int drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, - const struct drm_display_mode *mode); + const struct drm_display_mode *mode, + bool is_hdmi2_sink); int drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, const struct drm_display_mode *mode); diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h index 199a63f48659..a323781afc3f 100644 --- a/include/drm/drm_fb_cma_helper.h +++ b/include/drm/drm_fb_cma_helper.h @@ -24,9 +24,9 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma); void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma); void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma); -void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state); +void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, bool state); void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma, - int state); + bool state); void drm_fb_cma_destroy(struct drm_framebuffer *fb); int drm_fb_cma_create_handle(struct drm_framebuffer *fb, diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 119e5e4609c7..33fe95927742 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -85,38 +85,6 @@ struct drm_fb_helper_surface_size { */ struct drm_fb_helper_funcs { /** - * @gamma_set: - * - * Set the given gamma LUT register on the given CRTC. - * - * This callback is optional. - * - * FIXME: - * - * This callback is functionally redundant with the core gamma table - * support and simply exists because the fbdev hasn't yet been - * refactored to use the core gamma table interfaces. - */ - void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green, - u16 blue, int regno); - /** - * @gamma_get: - * - * Read the given gamma LUT register on the given CRTC, used to save the - * current LUT when force-restoring the fbdev for e.g. kdbg. - * - * This callback is optional. - * - * FIXME: - * - * This callback is functionally redundant with the core gamma table - * support and simply exists because the fbdev hasn't yet been - * refactored to use the core gamma table interfaces. - */ - void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, int regno); - - /** * @fb_probe: * * Driver callback to allocate and initialize the fbdev info structure. @@ -169,7 +137,6 @@ struct drm_fb_helper_connector { * @crtc_info: per-CRTC helper state (mode, x/y offset, etc) * @connector_count: number of connected connectors * @connector_info_alloc_count: size of connector_info - * @connector_info: array of per-connector information * @funcs: driver callbacks for fb helper * @fbdev: emulated fbdev device info struct * @pseudo_palette: fake palette of 16 colors @@ -191,6 +158,12 @@ struct drm_fb_helper { struct drm_fb_helper_crtc *crtc_info; int connector_count; int connector_info_alloc_count; + /** + * @connector_info: + * + * Array of per-connector information. Do not iterate directly, but use + * drm_fb_helper_for_each_connector. + */ struct drm_fb_helper_connector **connector_info; const struct drm_fb_helper_funcs *funcs; struct fb_info *fbdev; @@ -201,6 +174,18 @@ struct drm_fb_helper { struct work_struct resume_work; /** + * @lock: + * + * Top-level FBDEV helper lock. This protects all internal data + * structures and lists, such as @connector_info and @crtc_info. + * + * FIXME: fbdev emulation locking is a mess and long term we want to + * protect all helper internal state with this lock as well as reduce + * core KMS locking as much as possible. + */ + struct mutex lock; + + /** * @kernel_fb_list: * * Entry on the global kernel_fb_helper_list, used for kgdb entry/exit. @@ -215,6 +200,29 @@ struct drm_fb_helper { * needs to be reprobe when fbdev is in control again. */ bool delayed_hotplug; + + /** + * @deferred_setup: + * + * If no outputs are connected (disconnected or unknown) the FB helper + * code will defer setup until at least one of the outputs shows up. + * This field keeps track of the status so that setup can be retried + * at every hotplug event until it succeeds eventually. + * + * Protected by @lock. + */ + bool deferred_setup; + + /** + * @preferred_bpp: + * + * Temporary storage for the driver's preferred BPP setting passed to + * FB helper initialization. This needs to be tracked so that deferred + * FB helper setup can pass this on. + * + * See also: @deferred_setup + */ + int preferred_bpp; }; /** diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h index 5244f059d23a..b6996ddb19d6 100644 --- a/include/drm/drm_framebuffer.h +++ b/include/drm/drm_framebuffer.h @@ -190,6 +190,13 @@ struct drm_framebuffer { * @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock. */ struct list_head filp_head; + /** + * @obj: GEM objects backing the framebuffer, one per plane (optional). + * + * This is used by the GEM framebuffer helpers, see e.g. + * drm_gem_fb_create(). + */ + struct drm_gem_object *obj[4]; }; #define obj_to_fb(x) container_of(x, struct drm_framebuffer, base) diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 663d80358057..9c55c2acaa2b 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -131,21 +131,6 @@ struct drm_gem_object { uint32_t write_domain; /** - * @pending_read_domains: - * - * While validating an exec operation, the - * new read/write domain values are computed here. - * They will be transferred to the above values - * at the point that any cache flushing occurs - */ - uint32_t pending_read_domains; - - /** - * @pending_write_domain: Write domain similar to @pending_read_domains. - */ - uint32_t pending_write_domain; - - /** * @dma_buf: * * dma-buf associated with this GEM object. @@ -317,6 +302,8 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, bool dirty, bool accessed); struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle); +int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, + u32 handle, u64 *offset); int drm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, uint32_t handle); diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index b42529e0fae0..58a739bf15f1 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -73,11 +73,6 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv, struct drm_device *drm, struct drm_mode_create_dumb *args); -/* map memory region for DRM framebuffer to user space */ -int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *drm, u32 handle, - u64 *offset); - /* set vm_flags and we can change the VM attribute to other one at here */ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma); diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h new file mode 100644 index 000000000000..db9cfa07235e --- /dev/null +++ b/include/drm/drm_gem_framebuffer_helper.h @@ -0,0 +1,37 @@ +#ifndef __DRM_GEM_FB_HELPER_H__ +#define __DRM_GEM_FB_HELPER_H__ + +struct drm_device; +struct drm_file; +struct drm_fb_helper_surface_size; +struct drm_framebuffer; +struct drm_framebuffer_funcs; +struct drm_gem_object; +struct drm_mode_fb_cmd2; +struct drm_plane; +struct drm_plane_state; + +struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb, + unsigned int plane); +void drm_gem_fb_destroy(struct drm_framebuffer *fb); +int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file, + unsigned int *handle); + +struct drm_framebuffer * +drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_framebuffer_funcs *funcs); +struct drm_framebuffer * +drm_gem_fb_create(struct drm_device *dev, struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd); + +int drm_gem_fb_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *state); + +struct drm_framebuffer * +drm_gem_fbdev_fb_create(struct drm_device *dev, + struct drm_fb_helper_surface_size *sizes, + unsigned int pitch_align, struct drm_gem_object *obj, + const struct drm_framebuffer_funcs *funcs); + +#endif diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 42981711189b..1b37368416c8 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -757,6 +757,12 @@ struct drm_mode_config { */ bool allow_fb_modifiers; + /** + * @modifiers: Plane property to list support modifier/format + * combination. + */ + struct drm_property *modifiers_property; + /* cursor size */ uint32_t cursor_width, cursor_height; diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index 94ac771fe460..9f3421c8efcd 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -80,6 +80,7 @@ struct videomode; * @MODE_ONE_SIZE: only one resolution is supported * @MODE_NO_REDUCED: monitor doesn't accept reduced blanking * @MODE_NO_STEREO: stereo modes not supported + * @MODE_NO_420: ycbcr 420 modes not supported * @MODE_STALE: mode has become stale * @MODE_BAD: unspecified reason * @MODE_ERROR: error condition @@ -124,6 +125,7 @@ enum drm_mode_status { MODE_ONE_SIZE, MODE_NO_REDUCED, MODE_NO_STEREO, + MODE_NO_420, MODE_STALE = -3, MODE_BAD = -2, MODE_ERROR = -1 @@ -450,6 +452,12 @@ int drm_mode_convert_umode(struct drm_display_mode *out, const struct drm_mode_modeinfo *in); void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); void drm_mode_debug_printmodeline(const struct drm_display_mode *mode); +bool drm_mode_is_420_only(const struct drm_display_info *display, + const struct drm_display_mode *mode); +bool drm_mode_is_420_also(const struct drm_display_info *display, + const struct drm_display_mode *mode); +bool drm_mode_is_420(const struct drm_display_info *display, + const struct drm_display_mode *mode); struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, @@ -496,6 +504,9 @@ bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, enum drm_mode_status drm_mode_validate_basic(const struct drm_display_mode *mode); enum drm_mode_status drm_mode_validate_size(const struct drm_display_mode *mode, int maxX, int maxY); +enum drm_mode_status +drm_mode_validate_ycbcr420(const struct drm_display_mode *mode, + struct drm_connector *connector); void drm_mode_prune_invalid(struct drm_device *dev, struct list_head *mode_list, bool verbose); void drm_mode_sort(struct list_head *mode_list); diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index 85984b208218..c55cf3ff6847 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -71,7 +71,7 @@ struct drm_crtc_helper_funcs { * This callback is used by the legacy CRTC helpers. Atomic helpers * also support using this hook for enabling and disabling a CRTC to * facilitate transitions to atomic, but it is deprecated. Instead - * @enable and @disable should be used. + * @atomic_enable and @atomic_disable should be used. */ void (*dpms)(struct drm_crtc *crtc, int mode); @@ -85,8 +85,8 @@ struct drm_crtc_helper_funcs { * * This callback is used by the legacy CRTC helpers. Atomic helpers * also support using this hook for disabling a CRTC to facilitate - * transitions to atomic, but it is deprecated. Instead @disable should - * be used. + * transitions to atomic, but it is deprecated. Instead @atomic_disable + * should be used. */ void (*prepare)(struct drm_crtc *crtc); @@ -100,8 +100,8 @@ struct drm_crtc_helper_funcs { * * This callback is used by the legacy CRTC helpers. Atomic helpers * also support using this hook for enabling a CRTC to facilitate - * transitions to atomic, but it is deprecated. Instead @enable should - * be used. + * transitions to atomic, but it is deprecated. Instead @atomic_enable + * should be used. */ void (*commit)(struct drm_crtc *crtc); @@ -222,7 +222,7 @@ struct drm_crtc_helper_funcs { * pipeline is suspended using either DPMS or the new "ACTIVE" property. * Which means register values set in this callback might get reset when * the CRTC is suspended, but not restored. Such drivers should instead - * move all their CRTC setup into the @enable callback. + * move all their CRTC setup into the @atomic_enable callback. * * This callback is optional. */ @@ -267,22 +267,6 @@ struct drm_crtc_helper_funcs { enum mode_set_atomic); /** - * @load_lut: - * - * Load a LUT prepared with the &drm_fb_helper_funcs.gamma_set vfunc. - * - * This callback is optional and is only used by the fbdev emulation - * helpers. - * - * FIXME: - * - * This callback is functionally redundant with the core gamma table - * support and simply exists because the fbdev hasn't yet been - * refactored to use the core gamma table interfaces. - */ - void (*load_lut)(struct drm_crtc *crtc); - - /** * @disable: * * This callback should be used to disable the CRTC. With the atomic @@ -297,7 +281,7 @@ struct drm_crtc_helper_funcs { * Atomic drivers don't need to implement it if there's no need to * disable anything at the CRTC level. To ensure that runtime PM * handling (using either DPMS or the new "ACTIVE" property) works - * @disable must be the inverse of @enable for atomic drivers. + * @disable must be the inverse of @atomic_enable for atomic drivers. * Atomic drivers should consider to use @atomic_disable instead of * this one. * @@ -316,24 +300,6 @@ struct drm_crtc_helper_funcs { void (*disable)(struct drm_crtc *crtc); /** - * @enable: - * - * This callback should be used to enable the CRTC. With the atomic - * drivers it is called before all encoders connected to this CRTC are - * enabled through the encoder's own &drm_encoder_helper_funcs.enable - * hook. If that sequence is too simple drivers can just add their own - * hooks and call it from this CRTC callback here by looping over all - * encoders connected to it using for_each_encoder_on_crtc(). - * - * This hook is used only by atomic helpers, for symmetry with @disable. - * Atomic drivers don't need to implement it if there's no need to - * enable anything at the CRTC level. To ensure that runtime PM handling - * (using either DPMS or the new "ACTIVE" property) works - * @enable must be the inverse of @disable for atomic drivers. - */ - void (*enable)(struct drm_crtc *crtc); - - /** * @atomic_check: * * Drivers should check plane-update related CRTC constraints in this @@ -433,6 +399,30 @@ struct drm_crtc_helper_funcs { struct drm_crtc_state *old_crtc_state); /** + * @atomic_enable: + * + * This callback should be used to enable the CRTC. With the atomic + * drivers it is called before all encoders connected to this CRTC are + * enabled through the encoder's own &drm_encoder_helper_funcs.enable + * hook. If that sequence is too simple drivers can just add their own + * hooks and call it from this CRTC callback here by looping over all + * encoders connected to it using for_each_encoder_on_crtc(). + * + * This hook is used only by atomic helpers, for symmetry with + * @atomic_disable. Atomic drivers don't need to implement it if there's + * no need to enable anything at the CRTC level. To ensure that runtime + * PM handling (using either DPMS or the new "ACTIVE" property) works + * @atomic_enable must be the inverse of @atomic_disable for atomic + * drivers. + * + * Drivers can use the @old_crtc_state input parameter if the operations + * needed to enable the CRTC don't depend solely on the new state but + * also on the transition between the old state and the new state. + */ + void (*atomic_enable)(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state); + + /** * @atomic_disable: * * This callback should be used to disable the CRTC. With the atomic @@ -1129,6 +1119,56 @@ struct drm_plane_helper_funcs { */ void (*atomic_disable)(struct drm_plane *plane, struct drm_plane_state *old_state); + + /** + * @atomic_async_check: + * + * Drivers should set this function pointer to check if the plane state + * can be updated in a async fashion. Here async means "not vblank + * synchronized". + * + * This hook is called by drm_atomic_async_check() to establish if a + * given update can be committed asynchronously, that is, if it can + * jump ahead of the state currently queued for update. + * + * RETURNS: + * + * Return 0 on success and any error returned indicates that the update + * can not be applied in asynchronous manner. + */ + int (*atomic_async_check)(struct drm_plane *plane, + struct drm_plane_state *state); + + /** + * @atomic_async_update: + * + * Drivers should set this function pointer to perform asynchronous + * updates of planes, that is, jump ahead of the currently queued + * state and update the plane. Here async means "not vblank + * synchronized". + * + * This hook is called by drm_atomic_helper_async_commit(). + * + * An async update will happen on legacy cursor updates. An async + * update won't happen if there is an outstanding commit modifying + * the same plane. + * + * Note that unlike &drm_plane_helper_funcs.atomic_update this hook + * takes the new &drm_plane_state as parameter. When doing async_update + * drivers shouldn't replace the &drm_plane_state but update the + * current one with the new plane configurations in the new + * plane_state. + * + * FIXME: + * - It only works for single plane updates + * - Async Pageflips are not supported yet + * - Some hw might still scan out the old buffer until the next + * vblank, however we let go of the fb references as soon as + * we run this hook. For now drivers must implement their own workers + * for deferring if needed, until a common solution is created. + */ + void (*atomic_async_update)(struct drm_plane *plane, + struct drm_plane_state *new_state); }; /** @@ -1169,7 +1209,8 @@ struct drm_mode_config_helper_funcs { * After the atomic update is committed to the hardware this hook needs * to call drm_atomic_helper_commit_hw_done(). Then wait for the upate * to be executed by the hardware, for example using - * drm_atomic_helper_wait_for_vblanks(), and then clean up the old + * drm_atomic_helper_wait_for_vblanks() or + * drm_atomic_helper_wait_for_flip_done(), and then clean up the old * framebuffers using drm_atomic_helper_cleanup_planes(). * * When disabling a CRTC this hook _must_ stall for the commit to diff --git a/include/drm/drm_pci.h b/include/drm/drm_pci.h index 4579fac1080c..674599025d7d 100644 --- a/include/drm/drm_pci.h +++ b/include/drm/drm_pci.h @@ -43,13 +43,12 @@ struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size, size_t align); void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah); -int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver); -void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver); +int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver); +void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver); #ifdef CONFIG_PCI int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver); -int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master); #else static inline int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, @@ -57,12 +56,6 @@ static inline int drm_get_pci_dev(struct pci_dev *pdev, { return -ENOSYS; } - -static inline int drm_pci_set_busid(struct drm_device *dev, - struct drm_master *master) -{ - return -ENOSYS; -} #endif #define DRM_PCIE_SPEED_25 1 diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 9ab3e7044812..73f90f9d057f 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -233,11 +233,9 @@ struct drm_plane_funcs { * This is the legacy entry point to update a property attached to the * plane. * - * Drivers implementing atomic modeset should use - * drm_atomic_helper_plane_set_property() to implement this hook. - * * This callback is optional if the driver does not support any legacy - * driver-private properties. + * driver-private properties. For atomic drivers it is not used because + * property handling is done entirely in the DRM core. * * RETURNS: * @@ -392,6 +390,22 @@ struct drm_plane_funcs { */ void (*atomic_print_state)(struct drm_printer *p, const struct drm_plane_state *state); + + /** + * @format_mod_supported: + * + * This optional hook is used for the DRM to determine if the given + * format/modifier combination is valid for the plane. This allows the + * DRM to generate the correct format bitmask (which formats apply to + * which modifier). + * + * Returns: + * + * True if the given modifier is valid for that format on the plane. + * False otherwise. + */ + bool (*format_mod_supported)(struct drm_plane *plane, uint32_t format, + uint64_t modifier); }; /** @@ -487,6 +501,9 @@ struct drm_plane { unsigned int format_count; bool format_default; + uint64_t *modifiers; + unsigned int modifier_count; + struct drm_crtc *crtc; struct drm_framebuffer *fb; @@ -527,13 +544,14 @@ struct drm_plane { #define obj_to_plane(x) container_of(x, struct drm_plane, base) -__printf(8, 9) +__printf(9, 10) int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, uint32_t possible_crtcs, const struct drm_plane_funcs *funcs, const uint32_t *formats, unsigned int format_count, + const uint64_t *format_modifiers, enum drm_plane_type type, const char *name, ...); int drm_plane_init(struct drm_device *dev, diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h index 619868dc08d8..37355c623e6c 100644 --- a/include/drm/drm_property.h +++ b/include/drm/drm_property.h @@ -273,6 +273,8 @@ int drm_property_replace_global_blob(struct drm_device *dev, const void *data, struct drm_mode_object *obj_holds_id, struct drm_property *prop_holds_id); +bool drm_property_replace_blob(struct drm_property_blob **blob, + struct drm_property_blob *new_blob); struct drm_property_blob *drm_property_blob_get(struct drm_property_blob *blob); void drm_property_blob_put(struct drm_property_blob *blob); diff --git a/include/drm/drm_scdc_helper.h b/include/drm/drm_scdc_helper.h index c25122bb490a..f92eb2094d6b 100644 --- a/include/drm/drm_scdc_helper.h +++ b/include/drm/drm_scdc_helper.h @@ -131,31 +131,6 @@ static inline int drm_scdc_writeb(struct i2c_adapter *adapter, u8 offset, bool drm_scdc_get_scrambling_status(struct i2c_adapter *adapter); -/** - * drm_scdc_set_scrambling - enable scrambling - * @adapter: I2C adapter for DDC channel - * @enable: bool to indicate if scrambling is to be enabled/disabled - * - * Writes the TMDS config register over SCDC channel, and: - * enables scrambling when enable = 1 - * disables scrambling when enable = 0 - * - * Returns: - * True if scrambling is set/reset successfully, false otherwise. - */ bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable); - -/** - * drm_scdc_set_high_tmds_clock_ratio - set TMDS clock ratio - * @adapter: I2C adapter for DDC channel - * @set: ret or reset the high clock ratio - * - * Writes to the TMDS config register over SCDC channel, and: - * sets TMDS clock ratio to 1/40 when set = 1 - * sets TMDS clock ratio to 1/10 when set = 0 - * - * Returns: - * True if write is successful, false otherwise. - */ bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set); #endif diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h index 2d36538e4a17..6d9adbb46293 100644 --- a/include/drm/drm_simple_kms_helper.h +++ b/include/drm/drm_simple_kms_helper.h @@ -122,6 +122,7 @@ int drm_simple_display_pipe_init(struct drm_device *dev, struct drm_simple_display_pipe *pipe, const struct drm_simple_display_pipe_funcs *funcs, const uint32_t *formats, unsigned int format_count, + const uint64_t *format_modifiers, struct drm_connector *connector); #endif /* __LINUX_DRM_SIMPLE_KMS_HELPER_H */ diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h index 89976da542b1..c00fee539822 100644 --- a/include/drm/drm_syncobj.h +++ b/include/drm/drm_syncobj.h @@ -28,6 +28,8 @@ #include "linux/dma-fence.h" +struct drm_syncobj_cb; + /** * struct drm_syncobj - sync object. * @@ -43,15 +45,47 @@ struct drm_syncobj { /** * @fence: * NULL or a pointer to the fence bound to this object. + * + * This field should not be used directly. Use drm_syncobj_fence_get + * and drm_syncobj_replace_fence instead. */ struct dma_fence *fence; /** + * @cb_list: + * List of callbacks to call when the fence gets replaced + */ + struct list_head cb_list; + /** + * @lock: + * locks cb_list and write-locks fence. + */ + spinlock_t lock; + /** * @file: * a file backing for this syncobj. */ struct file *file; }; +typedef void (*drm_syncobj_func_t)(struct drm_syncobj *syncobj, + struct drm_syncobj_cb *cb); + +/** + * struct drm_syncobj_cb - callback for drm_syncobj_add_callback + * @node: used by drm_syncob_add_callback to append this struct to + * syncobj::cb_list + * @func: drm_syncobj_func_t to call + * + * This struct will be initialized by drm_syncobj_add_callback, additional + * data can be passed along by embedding drm_syncobj_cb in another struct. + * The callback will get called the next time drm_syncobj_replace_fence is + * called. + */ +struct drm_syncobj_cb { + struct list_head node; + drm_syncobj_func_t func; +}; + void drm_syncobj_free(struct kref *kref); /** @@ -77,13 +111,30 @@ drm_syncobj_put(struct drm_syncobj *obj) kref_put(&obj->refcount, drm_syncobj_free); } +static inline struct dma_fence * +drm_syncobj_fence_get(struct drm_syncobj *syncobj) +{ + struct dma_fence *fence; + + rcu_read_lock(); + fence = dma_fence_get_rcu_safe(&syncobj->fence); + rcu_read_unlock(); + + return fence; +} + struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, u32 handle); +void drm_syncobj_add_callback(struct drm_syncobj *syncobj, + struct drm_syncobj_cb *cb, + drm_syncobj_func_t func); +void drm_syncobj_remove_callback(struct drm_syncobj *syncobj, + struct drm_syncobj_cb *cb); void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, struct dma_fence *fence); -int drm_syncobj_fence_get(struct drm_file *file_private, - u32 handle, - struct dma_fence **fence); +int drm_syncobj_find_fence(struct drm_file *file_private, + u32 handle, + struct dma_fence **fence); void drm_syncobj_free(struct kref *kref); #endif diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h index 4cde47332dfa..7fba9efe4951 100644 --- a/include/drm/drm_vblank.h +++ b/include/drm/drm_vblank.h @@ -168,8 +168,7 @@ void drm_crtc_wait_one_vblank(struct drm_crtc *crtc); void drm_crtc_vblank_off(struct drm_crtc *crtc); void drm_crtc_vblank_reset(struct drm_crtc *crtc); void drm_crtc_vblank_on(struct drm_crtc *crtc); -void drm_vblank_cleanup(struct drm_device *dev); -u32 drm_accurate_vblank_count(struct drm_crtc *crtc); +u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc); bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, unsigned int pipe, int *max_error, diff --git a/include/drm/tinydrm/mipi-dbi.h b/include/drm/tinydrm/mipi-dbi.h index d137b16ee873..83346ddb9dba 100644 --- a/include/drm/tinydrm/mipi-dbi.h +++ b/include/drm/tinydrm/mipi-dbi.h @@ -62,11 +62,7 @@ mipi_dbi_from_tinydrm(struct tinydrm_device *tdev) } int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi, - struct gpio_desc *dc, - const struct drm_simple_display_pipe_funcs *pipe_funcs, - struct drm_driver *driver, - const struct drm_display_mode *mode, - unsigned int rotation); + struct gpio_desc *dc); int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi, const struct drm_simple_display_pipe_funcs *pipe_funcs, struct drm_driver *driver, diff --git a/include/drm/tinydrm/tinydrm-helpers.h b/include/drm/tinydrm/tinydrm-helpers.h index 9b9b6cfe3ba5..d554ded60ee9 100644 --- a/include/drm/tinydrm/tinydrm-helpers.h +++ b/include/drm/tinydrm/tinydrm-helpers.h @@ -43,6 +43,8 @@ void tinydrm_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb, void tinydrm_xrgb8888_to_rgb565(u16 *dst, void *vaddr, struct drm_framebuffer *fb, struct drm_clip_rect *clip, bool swap); +void tinydrm_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb, + struct drm_clip_rect *clip); struct backlight_device *tinydrm_of_find_backlight(struct device *dev); int tinydrm_enable_backlight(struct backlight_device *backlight); diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h index 00b800df4d1b..4774fe3d4273 100644 --- a/include/drm/tinydrm/tinydrm.h +++ b/include/drm/tinydrm/tinydrm.h @@ -56,9 +56,7 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe) .gem_prime_vmap = drm_gem_cma_prime_vmap, \ .gem_prime_vunmap = drm_gem_cma_prime_vunmap, \ .gem_prime_mmap = drm_gem_cma_prime_mmap, \ - .dumb_create = drm_gem_cma_dumb_create, \ - .dumb_map_offset = drm_gem_cma_dumb_map_offset, \ - .dumb_destroy = drm_gem_dumb_destroy + .dumb_create = drm_gem_cma_dumb_create /** * TINYDRM_MODE - tinydrm display mode diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 990d529f823c..5f821a9b3a1f 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -229,13 +229,14 @@ struct ttm_mem_type_manager_func { * struct ttm_mem_type_manager member debug * * @man: Pointer to a memory type manager. - * @prefix: Prefix to be used in printout to identify the caller. + * @printer: Prefix to be used in printout to identify the caller. * * This function is called to print out the state of the memory * type manager to aid debugging of out-of-memory conditions. * It may not be called from within atomic context. */ - void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); + void (*debug)(struct ttm_mem_type_manager *man, + struct drm_printer *printer); }; /** @@ -472,6 +473,23 @@ struct ttm_bo_driver { */ unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo, unsigned long page_offset); + + /** + * Read/write memory buffers for ptrace access + * + * @bo: the BO to access + * @offset: the offset from the start of the BO + * @buf: pointer to source/destination buffer + * @len: number of bytes to copy + * @write: whether to read (0) from or write (non-0) to BO + * + * If successful, this function should return the number of + * bytes copied, -EIO otherwise. If the number of bytes + * returned is < len, the function may be called again with + * the remainder of the buffer to copy. + */ + int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset, + void *buf, int len, int write); }; /** diff --git a/include/linux/acpi.h b/include/linux/acpi.h index c749eef1daa1..27b4b6615263 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1209,6 +1209,7 @@ static inline bool acpi_has_watchdog(void) { return false; } #endif #ifdef CONFIG_ACPI_SPCR_TABLE +extern bool qdf2400_e44_present; int parse_spcr(bool earlycon); #else static inline int parse_spcr(bool earlycon) { return 0; } diff --git a/include/linux/ata.h b/include/linux/ata.h index e65ae4b2ed48..c7a353825450 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -60,7 +60,8 @@ enum { ATA_ID_FW_REV = 23, ATA_ID_PROD = 27, ATA_ID_MAX_MULTSECT = 47, - ATA_ID_DWORD_IO = 48, + ATA_ID_DWORD_IO = 48, /* before ATA-8 */ + ATA_ID_TRUSTED = 48, /* ATA-8 and later */ ATA_ID_CAPABILITY = 49, ATA_ID_OLD_PIO_MODES = 51, ATA_ID_OLD_DMA_MODES = 52, @@ -889,6 +890,13 @@ static inline bool ata_id_has_dword_io(const u16 *id) return id[ATA_ID_DWORD_IO] & (1 << 0); } +static inline bool ata_id_has_trusted(const u16 *id) +{ + if (ata_id_major_version(id) <= 7) + return false; + return id[ATA_ID_TRUSTED] & (1 << 0); +} + static inline bool ata_id_has_unload(const u16 *id) { if (ata_id_major_version(id) >= 7 && diff --git a/include/linux/blk-mq-rdma.h b/include/linux/blk-mq-rdma.h new file mode 100644 index 000000000000..b4ade198007d --- /dev/null +++ b/include/linux/blk-mq-rdma.h @@ -0,0 +1,10 @@ +#ifndef _LINUX_BLK_MQ_RDMA_H +#define _LINUX_BLK_MQ_RDMA_H + +struct blk_mq_tag_set; +struct ib_device; + +int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set, + struct ib_device *dev, int first_vec); + +#endif /* _LINUX_BLK_MQ_RDMA_H */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 25f6a0cb27d3..2a5d52fa90f5 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -568,7 +568,6 @@ struct request_queue { #if defined(CONFIG_BLK_DEV_BSG) bsg_job_fn *bsg_job_fn; - int bsg_job_size; struct bsg_class_device bsg_dev; #endif diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index e34dde2da0ef..637a20cfb237 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h @@ -24,6 +24,7 @@ #define _BLK_BSG_ #include <linux/blkdev.h> +#include <scsi/scsi_request.h> struct request; struct device; @@ -37,6 +38,7 @@ struct bsg_buffer { }; struct bsg_job { + struct scsi_request sreq; struct device *dev; struct request *req; diff --git a/include/linux/compat.h b/include/linux/compat.h index 5a6a109b4a50..3fc433303d7a 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -27,7 +27,7 @@ #endif #ifndef __SC_DELOUSE -#define __SC_DELOUSE(t,v) ((t)(unsigned long)(v)) +#define __SC_DELOUSE(t,v) ((__force t)(unsigned long)(v)) #endif #define COMPAT_SYSCALL_DEFINE0(name) \ @@ -365,10 +365,10 @@ asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd, compat_ulong_t vlen, u32 pos_low, u32 pos_high); asmlinkage ssize_t compat_sys_preadv2(compat_ulong_t fd, const struct compat_iovec __user *vec, - compat_ulong_t vlen, u32 pos_low, u32 pos_high, int flags); + compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags); asmlinkage ssize_t compat_sys_pwritev2(compat_ulong_t fd, const struct compat_iovec __user *vec, - compat_ulong_t vlen, u32 pos_low, u32 pos_high, int flags); + compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags); #ifdef __ARCH_WANT_COMPAT_SYS_PREADV64 asmlinkage long compat_sys_preadv64(unsigned long fd, @@ -382,6 +382,18 @@ asmlinkage long compat_sys_pwritev64(unsigned long fd, unsigned long vlen, loff_t pos); #endif +#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2 +asmlinkage long compat_sys_readv64v2(unsigned long fd, + const struct compat_iovec __user *vec, + unsigned long vlen, loff_t pos, rwf_t flags); +#endif + +#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2 +asmlinkage long compat_sys_pwritev64v2(unsigned long fd, + const struct compat_iovec __user *vec, + unsigned long vlen, loff_t pos, rwf_t flags); +#endif + asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int); asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv, diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 109b1460ecee..e786337cf5a7 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -543,7 +543,8 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s # define __compiletime_error_fallback(condition) do { } while (0) #endif -#define __compiletime_assert(condition, msg, prefix, suffix) \ +#ifdef __OPTIMIZE__ +# define __compiletime_assert(condition, msg, prefix, suffix) \ do { \ bool __cond = !(condition); \ extern void prefix ## suffix(void) __compiletime_error(msg); \ @@ -551,6 +552,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s prefix ## suffix(); \ __compiletime_error_fallback(__cond); \ } while (0) +#else +# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) +#endif #define _compiletime_assert(condition, msg, prefix, suffix) \ __compiletime_assert(condition, msg, prefix, suffix) diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 1473455d0341..4f2b3b2076c4 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -549,46 +549,29 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); *---------------------------------------------------------------*/ #define DM_NAME "device-mapper" -#ifdef CONFIG_PRINTK -extern struct ratelimit_state dm_ratelimit_state; - -#define dm_ratelimit() __ratelimit(&dm_ratelimit_state) -#else -#define dm_ratelimit() 0 -#endif +#define DM_RATELIMIT(pr_func, fmt, ...) \ +do { \ + static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + \ + if (__ratelimit(&rs)) \ + pr_func(DM_FMT(fmt), ##__VA_ARGS__); \ +} while (0) #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n" #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__) #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__) -#define DMERR_LIMIT(fmt, ...) \ -do { \ - if (dm_ratelimit()) \ - DMERR(fmt, ##__VA_ARGS__); \ -} while (0) - +#define DMERR_LIMIT(fmt, ...) DM_RATELIMIT(pr_err, fmt, ##__VA_ARGS__) #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__) -#define DMWARN_LIMIT(fmt, ...) \ -do { \ - if (dm_ratelimit()) \ - DMWARN(fmt, ##__VA_ARGS__); \ -} while (0) - +#define DMWARN_LIMIT(fmt, ...) DM_RATELIMIT(pr_warn, fmt, ##__VA_ARGS__) #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__) -#define DMINFO_LIMIT(fmt, ...) \ -do { \ - if (dm_ratelimit()) \ - DMINFO(fmt, ##__VA_ARGS__); \ -} while (0) +#define DMINFO_LIMIT(fmt, ...) DM_RATELIMIT(pr_info, fmt, ##__VA_ARGS__) #ifdef CONFIG_DM_DEBUG #define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__) -#define DMDEBUG_LIMIT(fmt, ...) \ -do { \ - if (dm_ratelimit()) \ - DMDEBUG(fmt, ##__VA_ARGS__); \ -} while (0) +#define DMDEBUG_LIMIT(fmt, ...) DM_RATELIMIT(pr_debug, fmt, ##__VA_ARGS__) #else #define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__) #define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__) diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h index 277ab9af9ac2..100cb4343763 100644 --- a/include/linux/devpts_fs.h +++ b/include/linux/devpts_fs.h @@ -19,6 +19,7 @@ struct pts_fs_info; +struct vfsmount *devpts_mntget(struct file *, struct pts_fs_info *); struct pts_fs_info *devpts_acquire(struct file *); void devpts_release(struct pts_fs_info *); @@ -32,6 +33,15 @@ void *devpts_get_priv(struct dentry *); /* unlink */ void devpts_pty_kill(struct dentry *); +/* in pty.c */ +int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags); + +#else +static inline int +ptm_open_peer(struct file *master, struct tty_struct *tty, int flags) +{ + return -EIO; +} #endif diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 0a186c4f3981..171895072435 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -338,6 +338,19 @@ dma_fence_is_signaled(struct dma_fence *fence) } /** + * __dma_fence_is_later - return if f1 is chronologically later than f2 + * @f1: [in] the first fence's seqno + * @f2: [in] the second fence's seqno from the same context + * + * Returns true if f1 is chronologically later than f2. Both fences must be + * from the same context, since a seqno is not common across contexts. + */ +static inline bool __dma_fence_is_later(u32 f1, u32 f2) +{ + return (int)(f1 - f2) > 0; +} + +/** * dma_fence_is_later - return if f1 is chronologically later than f2 * @f1: [in] the first fence from the same context * @f2: [in] the second fence from the same context @@ -351,7 +364,7 @@ static inline bool dma_fence_is_later(struct dma_fence *f1, if (WARN_ON(f1->context != f2->context)) return false; - return (int)(f1->seqno - f2->seqno) > 0; + return __dma_fence_is_later(f1->seqno, f2->seqno); } /** @@ -418,8 +431,8 @@ int dma_fence_get_status(struct dma_fence *fence); static inline void dma_fence_set_error(struct dma_fence *fence, int error) { - BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); - BUG_ON(error >= 0 || error < -MAX_ERRNO); + WARN_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); + WARN_ON(error >= 0 || error < -MAX_ERRNO); fence->error = error; } diff --git a/include/linux/fs.h b/include/linux/fs.h index 6e1fd5d21248..2625fc47c7e5 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -72,6 +72,8 @@ extern int leases_enable, lease_break_time; extern int sysctl_protected_symlinks; extern int sysctl_protected_hardlinks; +typedef __kernel_rwf_t rwf_t; + struct buffer_head; typedef int (get_block_t)(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create); @@ -907,9 +909,9 @@ static inline struct file *get_file(struct file *f) /* Page cache limit. The filesystems should put that into their s_maxbytes limits, otherwise bad things can happen in VM. */ #if BITS_PER_LONG==32 -#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1) +#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT) #elif BITS_PER_LONG==64 -#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL) +#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX) #endif #define FL_POSIX 1 @@ -1758,9 +1760,9 @@ extern ssize_t __vfs_write(struct file *, const char __user *, size_t, loff_t *) extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); extern ssize_t vfs_readv(struct file *, const struct iovec __user *, - unsigned long, loff_t *, int); + unsigned long, loff_t *, rwf_t); extern ssize_t vfs_writev(struct file *, const struct iovec __user *, - unsigned long, loff_t *, int); + unsigned long, loff_t *, rwf_t); extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *, loff_t, size_t, unsigned int); extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in, @@ -2874,9 +2876,9 @@ extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *); extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t); ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos, - int flags); + rwf_t flags); ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos, - int flags); + rwf_t flags); /* fs/block_dev.c */ extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to); @@ -3143,7 +3145,7 @@ static inline int iocb_flags(struct file *file) return res; } -static inline int kiocb_set_rw_flags(struct kiocb *ki, int flags) +static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags) { if (unlikely(flags & ~RWF_SUPPORTED)) return -EOPNOTSUPP; diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 29d4385903d4..6dfec4d638df 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -38,12 +38,13 @@ struct device_node; struct gen_pool; /** - * Allocation callback function type definition + * typedef genpool_algo_t: Allocation callback function type definition * @map: Pointer to bitmap * @size: The bitmap size in bits * @start: The bitnumber to start searching at * @nr: The number of zeroed bits we're looking for - * @data: optional additional data used by @genpool_algo_t + * @data: optional additional data used by the callback + * @pool: the pool being allocated from */ typedef unsigned long (*genpool_algo_t)(unsigned long *map, unsigned long size, diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 497f2b3a5a62..97f1b465d04f 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -105,6 +105,11 @@ struct st_sensor_fullscale { struct st_sensor_fullscale_avl fs_avl[ST_SENSORS_FULLSCALE_AVL_MAX]; }; +struct st_sensor_sim { + u8 addr; + u8 value; +}; + /** * struct st_sensor_bdu - ST sensor device block data update * @addr: address of the register. @@ -197,6 +202,7 @@ struct st_sensor_transfer_function { * @bdu: Block data update register. * @das: Data Alignment Selection register. * @drdy_irq: Data ready register of the sensor. + * @sim: SPI serial interface mode register of the sensor. * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read. * @bootime: samples to discard when sensor passing from power-down to power-up. */ @@ -213,6 +219,7 @@ struct st_sensor_settings { struct st_sensor_bdu bdu; struct st_sensor_das das; struct st_sensor_data_ready_irq drdy_irq; + struct st_sensor_sim sim; bool multi_read_bit; unsigned int bootime; }; diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index d68bec297a45..c380daa40c0e 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -535,7 +535,7 @@ struct iio_buffer_setup_ops { * @scan_timestamp: [INTERN] set if any buffers have requested timestamp * @scan_index_timestamp:[INTERN] cache of the index to the timestamp * @trig: [INTERN] current device trigger (buffer modes) - * @trig_readonly [INTERN] mark the current trigger immutable + * @trig_readonly: [INTERN] mark the current trigger immutable * @pollfunc: [DRIVER] function run on trigger being received * @pollfunc_event: [DRIVER] function run on events trigger being received * @channels: [DRIVER] channel specification structure table diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h index ea08302f2d7b..7142d8d6e470 100644 --- a/include/linux/iio/trigger.h +++ b/include/linux/iio/trigger.h @@ -144,8 +144,8 @@ void devm_iio_trigger_unregister(struct device *dev, /** * iio_trigger_set_immutable() - set an immutable trigger on destination * - * @indio_dev - IIO device structure containing the device - * @trig - trigger to assign to device + * @indio_dev: IIO device structure containing the device + * @trig: trigger to assign to device * **/ int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig); diff --git a/include/linux/init_task.h b/include/linux/init_task.h index a2f6707e9fc0..0e849715e5be 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -126,17 +126,11 @@ extern struct group_info init_groups; #endif #ifdef CONFIG_PREEMPT_RCU -#define INIT_TASK_RCU_TREE_PREEMPT() \ - .rcu_blocked_node = NULL, -#else -#define INIT_TASK_RCU_TREE_PREEMPT(tsk) -#endif -#ifdef CONFIG_PREEMPT_RCU #define INIT_TASK_RCU_PREEMPT(tsk) \ .rcu_read_lock_nesting = 0, \ .rcu_read_unlock_special.s = 0, \ .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \ - INIT_TASK_RCU_TREE_PREEMPT() + .rcu_blocked_node = NULL, #else #define INIT_TASK_RCU_PREEMPT(tsk) #endif diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index a2fddddb0d60..59ba11661b6e 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -18,6 +18,7 @@ #include <linux/atomic.h> #include <asm/ptrace.h> #include <asm/irq.h> +#include <asm/sections.h> /* * These correspond to the IORESOURCE_IRQ_* defines in @@ -726,7 +727,6 @@ extern int early_irq_init(void); extern int arch_probe_nr_irqs(void); extern int arch_early_irq_init(void); -#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN) /* * We want to know which function is an entrypoint of a hardirq or a softirq. */ @@ -734,16 +734,4 @@ extern int arch_early_irq_init(void); #define __softirq_entry \ __attribute__((__section__(".softirqentry.text"))) -/* Limits of hardirq entrypoints */ -extern char __irqentry_text_start[]; -extern char __irqentry_text_end[]; -/* Limits of softirq entrypoints */ -extern char __softirqentry_text_start[]; -extern char __softirqentry_text_end[]; - -#else -#define __irq_entry -#define __softirq_entry -#endif - #endif diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 2cb54adc4a33..176f7569d874 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -240,7 +240,7 @@ struct iommu_device { struct list_head list; const struct iommu_ops *ops; struct fwnode_handle *fwnode; - struct device dev; + struct device *dev; }; int iommu_device_register(struct iommu_device *iommu); @@ -265,6 +265,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu, iommu->fwnode = fwnode; } +static inline struct iommu_device *dev_to_iommu_device(struct device *dev) +{ + return (struct iommu_device *)dev_get_drvdata(dev); +} + #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ @@ -589,6 +594,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu, { } +static inline struct iommu_device *dev_to_iommu_device(struct device *dev) +{ + return NULL; +} + static inline void iommu_device_unregister(struct iommu_device *iommu) { } diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 77d427974f57..bae11c7e7bf3 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -61,6 +61,7 @@ extern int memblock_debug; #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK #define __init_memblock __meminit #define __initdata_memblock __meminitdata +void memblock_discard(void); #else #define __init_memblock #define __initdata_memblock @@ -74,8 +75,6 @@ phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, int nid, ulong flags); phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align); -phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr); -phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr); void memblock_allow_resize(void); int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); int memblock_add(phys_addr_t base, phys_addr_t size); @@ -110,6 +109,9 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags, void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, phys_addr_t *out_end); +void __memblock_free_early(phys_addr_t base, phys_addr_t size); +void __memblock_free_late(phys_addr_t base, phys_addr_t size); + /** * for_each_mem_range - iterate through memblock areas from type_a and not * included in type_b. Or just type_a if type_b is NULL. diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 3914e3dd6168..9b15a4bcfa77 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -484,7 +484,8 @@ bool mem_cgroup_oom_synchronize(bool wait); extern int do_swap_account; #endif -void lock_page_memcg(struct page *page); +struct mem_cgroup *lock_page_memcg(struct page *page); +void __unlock_page_memcg(struct mem_cgroup *memcg); void unlock_page_memcg(struct page *page); static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, @@ -809,7 +810,12 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) { } -static inline void lock_page_memcg(struct page *page) +static inline struct mem_cgroup *lock_page_memcg(struct page *page) +{ + return NULL; +} + +static inline void __unlock_page_memcg(struct mem_cgroup *memcg) { } diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h index ce9230af09c2..ae5b663836d0 100644 --- a/include/linux/mfd/da9052/da9052.h +++ b/include/linux/mfd/da9052/da9052.h @@ -45,6 +45,12 @@ #define DA9052_ADC_TJUNC 8 #define DA9052_ADC_VBBAT 9 +/* TSI channel has its own 4 channel mux */ +#define DA9052_ADC_TSI_XP 70 +#define DA9052_ADC_TSI_XN 71 +#define DA9052_ADC_TSI_YP 72 +#define DA9052_ADC_TSI_YN 73 + #define DA9052_IRQ_DCIN 0 #define DA9052_IRQ_VBUS 1 #define DA9052_IRQ_DCINREM 2 diff --git a/include/linux/mfd/da9052/reg.h b/include/linux/mfd/da9052/reg.h index 5010f978725c..76780ea8849c 100644 --- a/include/linux/mfd/da9052/reg.h +++ b/include/linux/mfd/da9052/reg.h @@ -690,7 +690,10 @@ /* TSI CONTROL REGISTER B BITS */ #define DA9052_TSICONTB_ADCREF 0X80 #define DA9052_TSICONTB_TSIMAN 0X40 -#define DA9052_TSICONTB_TSIMUX 0X30 +#define DA9052_TSICONTB_TSIMUX_XP 0X00 +#define DA9052_TSICONTB_TSIMUX_YP 0X10 +#define DA9052_TSICONTB_TSIMUX_XN 0X20 +#define DA9052_TSICONTB_TSIMUX_YN 0X30 #define DA9052_TSICONTB_TSISEL3 0X08 #define DA9052_TSICONTB_TSISEL2 0X04 #define DA9052_TSICONTB_TSISEL1 0X02 @@ -705,8 +708,14 @@ /* TSI CO-ORDINATE LSB RESULT REGISTER BITS */ #define DA9052_TSILSB_PENDOWN 0X40 #define DA9052_TSILSB_TSIZL 0X30 +#define DA9052_TSILSB_TSIZL_SHIFT 4 +#define DA9052_TSILSB_TSIZL_BITS 2 #define DA9052_TSILSB_TSIYL 0X0C +#define DA9052_TSILSB_TSIYL_SHIFT 2 +#define DA9052_TSILSB_TSIYL_BITS 2 #define DA9052_TSILSB_TSIXL 0X03 +#define DA9052_TSILSB_TSIXL_SHIFT 0 +#define DA9052_TSILSB_TSIXL_BITS 2 /* TSI Z MEASUREMENT MSB RESULT REGISTER BIT */ #define DA9052_TSIZMSB_TSIZM 0XFF diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index b54517c05e9a..c8a63e148a98 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -428,6 +428,12 @@ enum mlx4_steer_type { MLX4_NUM_STEERS }; +enum mlx4_resource_usage { + MLX4_RES_USAGE_NONE, + MLX4_RES_USAGE_DRIVER, + MLX4_RES_USAGE_USER_VERBS, +}; + enum { MLX4_NUM_FEXCH = 64 * 1024, }; @@ -749,6 +755,7 @@ struct mlx4_cq { } tasklet_ctx; int reset_notify_added; struct list_head reset_notify; + u8 usage; }; struct mlx4_qp { @@ -758,6 +765,7 @@ struct mlx4_qp { atomic_t refcount; struct completion free; + u8 usage; }; struct mlx4_srq { @@ -1121,7 +1129,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, unsigned vector, int collapsed, int timestamp_en); void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, - int *base, u8 flags); + int *base, u8 flags, u8 usage); void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp); @@ -1418,7 +1426,7 @@ int mlx4_get_phys_port_id(struct mlx4_dev *dev); int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port); int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); -int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); +int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage); void mlx4_counter_free(struct mlx4_dev *dev, u32 idx); int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index f31a0b5377e1..c13d71deaeca 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -290,6 +290,7 @@ enum mlx5_event { MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16, MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, + MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22, MLX5_EVENT_TYPE_PPS_EVENT = 0x25, MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, @@ -305,6 +306,10 @@ enum mlx5_event { }; enum { + MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1, +}; + +enum { MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1, MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4, MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5, @@ -968,7 +973,7 @@ enum mlx5_cap_type { MLX5_CAP_ATOMIC, MLX5_CAP_ROCE, MLX5_CAP_IPOIB_OFFLOADS, - MLX5_CAP_EOIB_OFFLOADS, + MLX5_CAP_IPOIB_ENHANCED_OFFLOADS, MLX5_CAP_FLOW_TABLE, MLX5_CAP_ESWITCH_FLOW_TABLE, MLX5_CAP_ESWITCH, @@ -1011,6 +1016,10 @@ enum mlx5_mcam_feature_groups { MLX5_GET(per_protocol_networking_offload_caps,\ mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap) +#define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \ + MLX5_GET(per_protocol_networking_offload_caps,\ + mdev->caps.hca_cur[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS], cap) + #define MLX5_CAP_ROCE(mdev, cap) \ MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index df6ce59a1f95..b3fc9d586a9f 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -162,6 +162,13 @@ enum dbg_rsc_type { MLX5_DBG_RSC_CQ, }; +enum port_state_policy { + MLX5_POLICY_DOWN = 0, + MLX5_POLICY_UP = 1, + MLX5_POLICY_FOLLOW = 2, + MLX5_POLICY_INVALID = 0xffffffff +}; + struct mlx5_field_desc { struct dentry *dent; int i; @@ -185,6 +192,7 @@ enum mlx5_dev_event { MLX5_DEV_EVENT_GUID_CHANGE, MLX5_DEV_EVENT_CLIENT_REREG, MLX5_DEV_EVENT_PPS, + MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT, }; enum mlx5_port_status { @@ -291,7 +299,7 @@ struct mlx5_cmd { struct semaphore pages_sem; int mode; struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; - struct pci_pool *pool; + struct dma_pool *pool; struct mlx5_cmd_debug dbg; struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES]; int checksum_disabled; @@ -410,6 +418,7 @@ enum mlx5_res_type { MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ, MLX5_RES_SRQ = 3, MLX5_RES_XSRQ = 4, + MLX5_RES_XRQ = 5, }; struct mlx5_core_rsc_common { @@ -525,6 +534,9 @@ struct mlx5_mkey_table { struct mlx5_vf_context { int enabled; + u64 port_guid; + u64 node_guid; + enum port_state_policy policy; }; struct mlx5_core_sriov { @@ -534,7 +546,6 @@ struct mlx5_core_sriov { }; struct mlx5_irq_info { - cpumask_var_t mask; char name[MLX5_MAX_IRQ_NAME]; }; @@ -597,7 +608,6 @@ struct mlx5_port_module_event_stats { struct mlx5_priv { char name[MLX5_MAX_NAME_LEN]; struct mlx5_eq_table eq_table; - struct msix_entry *msix_arr; struct mlx5_irq_info *irq_info; /* pages stuff */ @@ -673,9 +683,7 @@ enum mlx5_device_state { }; enum mlx5_interface_state { - MLX5_INTERFACE_STATE_DOWN = BIT(0), - MLX5_INTERFACE_STATE_UP = BIT(1), - MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2), + MLX5_INTERFACE_STATE_UP = BIT(0), }; enum mlx5_pci_status { @@ -842,13 +850,6 @@ struct mlx5_pas { u8 log_sz; }; -enum port_state_policy { - MLX5_POLICY_DOWN = 0, - MLX5_POLICY_UP = 1, - MLX5_POLICY_FOLLOW = 2, - MLX5_POLICY_INVALID = 0xffffffff -}; - enum phy_port_state { MLX5_AAA_111 }; @@ -1091,7 +1092,7 @@ enum { }; enum { - MAX_UMR_CACHE_ENTRY = 20, + MR_CACHE_LAST_STD_ENTRY = 20, MLX5_IMR_MTT_CACHE_ENTRY, MLX5_IMR_KSM_CACHE_ENTRY, MAX_MR_CACHE_ENTRIES @@ -1185,4 +1186,10 @@ enum { MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, }; +static inline const struct cpumask * +mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector) +{ + return pci_irq_get_affinity(dev->pdev, MLX5_EQ_VEC_COMP_BASE + vector); +} + #endif /* MLX5_DRIVER_H */ diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 3030121b4746..b7338a21c780 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -200,6 +200,7 @@ enum { MLX5_CMD_OP_QUERY_SQ = 0x907, MLX5_CMD_OP_CREATE_RQ = 0x908, MLX5_CMD_OP_MODIFY_RQ = 0x909, + MLX5_CMD_OP_SET_DELAY_DROP_PARAMS = 0x910, MLX5_CMD_OP_DESTROY_RQ = 0x90a, MLX5_CMD_OP_QUERY_RQ = 0x90b, MLX5_CMD_OP_CREATE_RMP = 0x90c, @@ -294,8 +295,10 @@ struct mlx5_ifc_flow_table_fields_supported_bits { u8 inner_tcp_dport[0x1]; u8 inner_tcp_flags[0x1]; u8 reserved_at_37[0x9]; + u8 reserved_at_40[0x1a]; + u8 bth_dst_qp[0x1]; - u8 reserved_at_40[0x40]; + u8 reserved_at_5b[0x25]; }; struct mlx5_ifc_flow_table_prop_layout_bits { @@ -431,7 +434,9 @@ struct mlx5_ifc_fte_match_set_misc_bits { u8 reserved_at_100[0xc]; u8 inner_ipv6_flow_label[0x14]; - u8 reserved_at_120[0xe0]; + u8 reserved_at_120[0x28]; + u8 bth_dst_qp[0x18]; + u8 reserved_at_160[0xa0]; }; struct mlx5_ifc_cmd_pas_bits { @@ -599,7 +604,7 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 rss_ind_tbl_cap[0x4]; u8 reg_umr_sq[0x1]; u8 scatter_fcs[0x1]; - u8 reserved_at_1a[0x1]; + u8 enhanced_multi_pkt_send_wqe[0x1]; u8 tunnel_lso_const_out_ip_id[0x1]; u8 reserved_at_1c[0x2]; u8 tunnel_statless_gre[0x1]; @@ -840,7 +845,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 retransmission_q_counters[0x1]; u8 reserved_at_183[0x1]; u8 modify_rq_counter_set_id[0x1]; - u8 reserved_at_185[0x1]; + u8 rq_delay_drop[0x1]; u8 max_qp_cnt[0xa]; u8 pkey_table_size[0x10]; @@ -857,7 +862,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 pcam_reg[0x1]; u8 local_ca_ack_delay[0x5]; u8 port_module_event[0x1]; - u8 reserved_at_1b1[0x1]; + u8 enhanced_error_q_counters[0x1]; u8 ports_check[0x1]; u8 reserved_at_1b3[0x1]; u8 disable_link_up[0x1]; @@ -873,7 +878,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 max_tc[0x4]; u8 reserved_at_1d0[0x1]; u8 dcbx[0x1]; - u8 reserved_at_1d2[0x3]; + u8 general_notification_event[0x1]; + u8 reserved_at_1d3[0x2]; u8 fpga[0x1]; u8 rol_s[0x1]; u8 rol_g[0x1]; @@ -1016,7 +1022,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_max_wq_sz[0x5]; u8 nic_vport_change_event[0x1]; - u8 reserved_at_3e1[0xa]; + u8 disable_local_lb[0x1]; + u8 reserved_at_3e2[0x9]; u8 log_max_vlan_list[0x5]; u8 reserved_at_3f0[0x3]; u8 log_max_current_mc_list[0x5]; @@ -1187,7 +1194,8 @@ struct mlx5_ifc_cong_control_r_roce_ecn_np_bits { u8 reserved_at_c0[0x12]; u8 cnp_dscp[0x6]; - u8 reserved_at_d8[0x5]; + u8 reserved_at_d8[0x4]; + u8 cnp_prio_mode[0x1]; u8 cnp_802p_prio[0x3]; u8 reserved_at_e0[0x720]; @@ -2015,6 +2023,10 @@ enum { }; enum { + MLX5_QPC_OFFLOAD_TYPE_RNDV = 0x1, +}; + +enum { MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS = 0x0, MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT = 0x1, }; @@ -2057,7 +2069,8 @@ struct mlx5_ifc_qpc_bits { u8 st[0x8]; u8 reserved_at_10[0x3]; u8 pm_state[0x2]; - u8 reserved_at_15[0x7]; + u8 reserved_at_15[0x3]; + u8 offload_type[0x4]; u8 end_padding_mode[0x2]; u8 reserved_at_1e[0x2]; @@ -2437,7 +2450,7 @@ struct mlx5_ifc_sqc_bits { u8 cd_master[0x1]; u8 fre[0x1]; u8 flush_in_error_en[0x1]; - u8 reserved_at_4[0x1]; + u8 allow_multi_pkt_send_wqe[0x1]; u8 min_wqe_inline_mode[0x3]; u8 state[0x4]; u8 reg_umr[0x1]; @@ -2515,7 +2528,7 @@ enum { struct mlx5_ifc_rqc_bits { u8 rlky[0x1]; - u8 reserved_at_1[0x1]; + u8 delay_drop_en[0x1]; u8 scatter_fcs[0x1]; u8 vsd[0x1]; u8 mem_rq_type[0x4]; @@ -2562,7 +2575,9 @@ struct mlx5_ifc_rmpc_bits { struct mlx5_ifc_nic_vport_context_bits { u8 reserved_at_0[0x5]; u8 min_wqe_inline_mode[0x3]; - u8 reserved_at_8[0x17]; + u8 reserved_at_8[0x15]; + u8 disable_mc_local_lb[0x1]; + u8 disable_uc_local_lb[0x1]; u8 roce_en[0x1]; u8 arm_change_event[0x1]; @@ -3000,7 +3015,7 @@ struct mlx5_ifc_xrqc_bits { struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context; - u8 reserved_at_180[0x880]; + u8 reserved_at_180[0x280]; struct mlx5_ifc_wq_bits wq; }; @@ -3947,7 +3962,47 @@ struct mlx5_ifc_query_q_counter_out_bits { u8 local_ack_timeout_err[0x20]; - u8 reserved_at_320[0x4e0]; + u8 reserved_at_320[0xa0]; + + u8 resp_local_length_error[0x20]; + + u8 req_local_length_error[0x20]; + + u8 resp_local_qp_error[0x20]; + + u8 local_operation_error[0x20]; + + u8 resp_local_protection[0x20]; + + u8 req_local_protection[0x20]; + + u8 resp_cqe_error[0x20]; + + u8 req_cqe_error[0x20]; + + u8 req_mw_binding[0x20]; + + u8 req_bad_response[0x20]; + + u8 req_remote_invalid_request[0x20]; + + u8 resp_remote_invalid_request[0x20]; + + u8 req_remote_access_errors[0x20]; + + u8 resp_remote_access_errors[0x20]; + + u8 req_remote_operation_errors[0x20]; + + u8 req_transport_retries_exceeded[0x20]; + + u8 cq_overflow[0x20]; + + u8 resp_cqe_flush_error[0x20]; + + u8 req_cqe_flush_error[0x20]; + + u8 reserved_at_620[0x1e0]; }; struct mlx5_ifc_query_q_counter_in_bits { @@ -5229,7 +5284,9 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits { }; struct mlx5_ifc_modify_nic_vport_field_select_bits { - u8 reserved_at_0[0x16]; + u8 reserved_at_0[0x14]; + u8 disable_uc_local_lb[0x1]; + u8 disable_mc_local_lb[0x1]; u8 node_guid[0x1]; u8 port_guid[0x1]; u8 min_inline[0x1]; @@ -5847,6 +5904,28 @@ struct mlx5_ifc_destroy_rq_in_bits { u8 reserved_at_60[0x20]; }; +struct mlx5_ifc_set_delay_drop_params_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x20]; + + u8 reserved_at_60[0x10]; + u8 delay_drop_timeout[0x10]; +}; + +struct mlx5_ifc_set_delay_drop_params_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + struct mlx5_ifc_destroy_rmp_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index f378dc0e7eaf..66d19b611fe4 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -560,6 +560,9 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, u32 *out, int outlen); +int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev, + u32 timeout_usec); + int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn); int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn); void mlx5_init_qp_table(struct mlx5_core_dev *dev); diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h index 1cde0fd53f90..24ff23e27c8a 100644 --- a/include/linux/mlx5/srq.h +++ b/include/linux/mlx5/srq.h @@ -38,6 +38,7 @@ enum { MLX5_SRQ_FLAG_ERR = (1 << 0), MLX5_SRQ_FLAG_WQ_SIG = (1 << 1), + MLX5_SRQ_FLAG_RNDV = (1 << 2), }; struct mlx5_srq_attr { @@ -56,6 +57,10 @@ struct mlx5_srq_attr { u32 user_index; u64 db_record; __be64 *pas; + u32 tm_log_list_size; + u32 tm_next_tag; + u32 tm_hw_phase_cnt; + u32 tm_sw_phase_cnt; }; struct mlx5_core_dev; diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 656c70b65dd2..aaa0bb9e7655 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -114,5 +114,6 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, u8 other_vport, u8 port_num, int vf, struct mlx5_hca_vport_context *req); - +int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable); +int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status); #endif /* __MLX5_VPORT_H__ */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 46b9ac5e8569..c1f6c95f3496 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1260,6 +1260,7 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src, void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); int follow_pte_pmd(struct mm_struct *mm, unsigned long address, + unsigned long *start, unsigned long *end, pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); int follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 7f384bb62d8e..3cadee0a3508 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -487,14 +487,12 @@ struct mm_struct { /* numa_scan_seq prevents two threads setting pte_numa */ int numa_scan_seq; #endif -#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) /* * An operation with batched TLB flushing is going on. Anything that * can move process memory needs to flush the TLB when moving a * PROT_NONE or PROT_NUMA mapped page. */ - bool tlb_flush_pending; -#endif + atomic_t tlb_flush_pending; #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH /* See flush_tlb_batched_pending() */ bool tlb_flush_batched; @@ -522,46 +520,60 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm) return mm->cpu_vm_mask_var; } -#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) +struct mmu_gather; +extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, + unsigned long start, unsigned long end); +extern void tlb_finish_mmu(struct mmu_gather *tlb, + unsigned long start, unsigned long end); + /* * Memory barriers to keep this state in sync are graciously provided by * the page table locks, outside of which no page table modifications happen. - * The barriers below prevent the compiler from re-ordering the instructions - * around the memory barriers that are already present in the code. + * The barriers are used to ensure the order between tlb_flush_pending updates, + * which happen while the lock is not taken, and the PTE updates, which happen + * while the lock is taken, are serialized. */ static inline bool mm_tlb_flush_pending(struct mm_struct *mm) { - barrier(); - return mm->tlb_flush_pending; + return atomic_read(&mm->tlb_flush_pending) > 0; +} + +/* + * Returns true if there are two above TLB batching threads in parallel. + */ +static inline bool mm_tlb_flush_nested(struct mm_struct *mm) +{ + return atomic_read(&mm->tlb_flush_pending) > 1; +} + +static inline void init_tlb_flush_pending(struct mm_struct *mm) +{ + atomic_set(&mm->tlb_flush_pending, 0); } -static inline void set_tlb_flush_pending(struct mm_struct *mm) + +static inline void inc_tlb_flush_pending(struct mm_struct *mm) { - mm->tlb_flush_pending = true; + atomic_inc(&mm->tlb_flush_pending); /* - * Guarantee that the tlb_flush_pending store does not leak into the + * Guarantee that the tlb_flush_pending increase does not leak into the * critical section updating the page tables */ smp_mb__before_spinlock(); } + /* Clearing is done after a TLB flush, which also provides a barrier. */ -static inline void clear_tlb_flush_pending(struct mm_struct *mm) -{ - barrier(); - mm->tlb_flush_pending = false; -} -#else -static inline bool mm_tlb_flush_pending(struct mm_struct *mm) -{ - return false; -} -static inline void set_tlb_flush_pending(struct mm_struct *mm) -{ -} -static inline void clear_tlb_flush_pending(struct mm_struct *mm) +static inline void dec_tlb_flush_pending(struct mm_struct *mm) { + /* + * Guarantee that the tlb_flush_pending does not not leak into the + * critical section, since we must order the PTE change and changes to + * the pending TLB flush indication. We could have relied on TLB flush + * as a memory barrier, but this behavior is not clearly documented. + */ + smp_mb__before_atomic(); + atomic_dec(&mm->tlb_flush_pending); } -#endif struct vm_fault; diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index c91b3bcd158f..7b2e31b1745a 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -95,17 +95,6 @@ struct mmu_notifier_ops { pte_t pte); /* - * Before this is invoked any secondary MMU is still ok to - * read/write to the page previously pointed to by the Linux - * pte because the page hasn't been freed yet and it won't be - * freed until this returns. If required set_page_dirty has to - * be called internally to this method. - */ - void (*invalidate_page)(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long address); - - /* * invalidate_range_start() and invalidate_range_end() must be * paired and are called only when the mmap_sem and/or the * locks protecting the reverse maps are held. If the subsystem @@ -220,8 +209,6 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm, unsigned long address); extern void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, pte_t pte); -extern void __mmu_notifier_invalidate_page(struct mm_struct *mm, - unsigned long address); extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, unsigned long start, unsigned long end); extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, @@ -268,13 +255,6 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm, __mmu_notifier_change_pte(mm, address, pte); } -static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, - unsigned long address) -{ - if (mm_has_notifiers(mm)) - __mmu_notifier_invalidate_page(mm, address); -} - static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, unsigned long start, unsigned long end) { @@ -442,11 +422,6 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm, { } -static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, - unsigned long address) -{ -} - static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, unsigned long start, unsigned long end) { diff --git a/include/linux/net.h b/include/linux/net.h index dda2cc939a53..ebeb48c92005 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -37,7 +37,7 @@ struct net; /* Historically, SOCKWQ_ASYNC_NOSPACE & SOCKWQ_ASYNC_WAITDATA were located * in sock->flags, but moved into sk->sk_wq->flags to be RCU protected. - * Eventually all flags will be in sk->sk_wq_flags. + * Eventually all flags will be in sk->sk_wq->flags. */ #define SOCKWQ_ASYNC_NOSPACE 0 #define SOCKWQ_ASYNC_WAITDATA 1 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 779b23595596..c99ba7914c0a 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3866,6 +3866,8 @@ int netdev_walk_all_upper_dev_rcu(struct net_device *dev, bool netdev_has_upper_dev_all_rcu(struct net_device *dev, struct net_device *upper_dev); +bool netdev_has_any_upper_dev(struct net_device *dev); + void *netdev_lower_get_next_private(struct net_device *dev, struct list_head **iter); void *netdev_lower_get_next_private_rcu(struct net_device *dev, diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 8aa01fd859fb..a36abe2da13e 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -168,6 +168,14 @@ extern int sysctl_hardlockup_all_cpu_backtrace; #define sysctl_softlockup_all_cpu_backtrace 0 #define sysctl_hardlockup_all_cpu_backtrace 0 #endif + +#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \ + defined(CONFIG_HARDLOCKUP_DETECTOR) +void watchdog_update_hrtimer_threshold(u64 period); +#else +static inline void watchdog_update_hrtimer_threshold(u64 period) { } +#endif + extern bool is_hardlockup(void); struct ctl_table; extern int proc_watchdog(struct ctl_table *, int , diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index 6c8c5d8041b7..2591878c1d48 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h @@ -346,6 +346,11 @@ struct nvme_fc_remote_port { * indicating an FC transport Aborted status. * Entrypoint is Mandatory. * + * @defer_rcv: Called by the transport to signal the LLLD that it has + * begun processing of a previously received NVME CMD IU. The LLDD + * is now free to re-use the rcv buffer associated with the + * nvmefc_tgt_fcp_req. + * * @max_hw_queues: indicates the maximum number of hw queues the LLDD * supports for cpu affinitization. * Value is Mandatory. Must be at least 1. @@ -846,6 +851,8 @@ struct nvmet_fc_target_template { struct nvmefc_tgt_fcp_req *fcpreq); void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *fcpreq); + void (*defer_rcv)(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_fcp_req *fcpreq); u32 max_hw_queues; u16 max_sgl_segments; diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 25d8225dbd04..8efff888bd9b 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -254,7 +254,7 @@ enum { NVME_CTRL_VWC_PRESENT = 1 << 0, NVME_CTRL_OACS_SEC_SUPP = 1 << 0, NVME_CTRL_OACS_DIRECTIVES = 1 << 5, - NVME_CTRL_OACS_DBBUF_SUPP = 1 << 7, + NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8, }; struct nvme_lbaf { diff --git a/include/linux/oom.h b/include/linux/oom.h index 8a266e2be5a6..76aac4ce39bc 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -6,6 +6,8 @@ #include <linux/types.h> #include <linux/nodemask.h> #include <uapi/linux/oom.h> +#include <linux/sched/coredump.h> /* MMF_* */ +#include <linux/mm.h> /* VM_FAULT* */ struct zonelist; struct notifier_block; @@ -63,6 +65,26 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk) return tsk->signal->oom_mm; } +/* + * Checks whether a page fault on the given mm is still reliable. + * This is no longer true if the oom reaper started to reap the + * address space which is reflected by MMF_UNSTABLE flag set in + * the mm. At that moment any !shared mapping would lose the content + * and could cause a memory corruption (zero pages instead of the + * original content). + * + * User should call this before establishing a page table entry for + * a !shared mapping and under the proper page table lock. + * + * Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise. + */ +static inline int check_stable_address_space(struct mm_struct *mm) +{ + if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags))) + return VM_FAULT_SIGBUS; + return 0; +} + extern unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, const nodemask_t *nodemask, unsigned long totalpages); diff --git a/include/linux/pci.h b/include/linux/pci.h index 4869e66dd659..da05e5db06ac 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -188,6 +188,8 @@ enum pci_dev_flags { * the direct_complete optimization. */ PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11), + /* Don't use Relaxed Ordering for TLPs directed at this device */ + PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 12), }; enum pci_irq_reroute_variant { @@ -729,6 +731,7 @@ struct pci_driver { void (*shutdown) (struct pci_dev *dev); int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* PF pdev */ const struct pci_error_handlers *err_handler; + const struct attribute_group **groups; struct device_driver driver; struct pci_dynids dynids; }; @@ -1067,6 +1070,7 @@ void pcie_flr(struct pci_dev *dev); int __pci_reset_function(struct pci_dev *dev); int __pci_reset_function_locked(struct pci_dev *dev); int pci_reset_function(struct pci_dev *dev); +int pci_reset_function_locked(struct pci_dev *dev); int pci_try_reset_function(struct pci_dev *dev); int pci_probe_reset_slot(struct pci_slot *slot); int pci_reset_slot(struct pci_slot *slot); @@ -1125,6 +1129,7 @@ bool pci_check_pme_status(struct pci_dev *dev); void pci_pme_wakeup_bus(struct pci_bus *bus); void pci_d3cold_enable(struct pci_dev *dev); void pci_d3cold_disable(struct pci_dev *dev); +bool pcie_relaxed_ordering_enabled(struct pci_dev *dev); /* PCI Virtual Channel */ int pci_save_vc_state(struct pci_dev *dev); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index a3b873fc59e4..718ba163c1b9 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -147,9 +147,6 @@ struct hw_perf_event { struct list_head cqm_groups_entry; struct list_head cqm_group_entry; }; - struct { /* itrace */ - int itrace_started; - }; struct { /* amd_power */ u64 pwr_acc; u64 ptsc; @@ -310,8 +307,8 @@ struct pmu { * Notification that the event was mapped or unmapped. Called * in the context of the mapping task. */ - void (*event_mapped) (struct perf_event *event); /*optional*/ - void (*event_unmapped) (struct perf_event *event); /*optional*/ + void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ + void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ /* * Flags for ->add()/->del()/ ->start()/->stop(). There are @@ -541,6 +538,7 @@ struct swevent_hlist { #define PERF_ATTACH_GROUP 0x02 #define PERF_ATTACH_TASK 0x04 #define PERF_ATTACH_TASK_DATA 0x08 +#define PERF_ATTACH_ITRACE 0x10 struct perf_cgroup; struct ring_buffer; @@ -864,6 +862,7 @@ extern int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size); extern void *perf_get_aux(struct perf_output_handle *handle); extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags); +extern void perf_event_itrace_started(struct perf_event *event); extern int perf_pmu_register(struct pmu *pmu, const char *name, int type); extern void perf_pmu_unregister(struct pmu *pmu); @@ -944,6 +943,8 @@ struct perf_sample_data { struct perf_regs regs_intr; u64 stack_user_size; + + u64 phys_addr; } ____cacheline_aligned; /* default value for data source */ @@ -1201,7 +1202,7 @@ extern void perf_event_init(void); extern void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, struct pt_regs *regs, struct hlist_head *head, int rctx, - struct task_struct *task); + struct task_struct *task, struct perf_event *event); extern void perf_bp_event(struct perf_event *event, void *data); #ifndef perf_misc_flags diff --git a/include/linux/pid.h b/include/linux/pid.h index 4d179316e431..719582744a2e 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -8,7 +8,9 @@ enum pid_type PIDTYPE_PID, PIDTYPE_PGID, PIDTYPE_SID, - PIDTYPE_MAX + PIDTYPE_MAX, + /* only valid to __task_pid_nr_ns() */ + __PIDTYPE_TGID }; /* diff --git a/include/linux/platform_data/omap_drm.h b/include/linux/platform_data/omap_drm.h deleted file mode 100644 index f4e4a237ebd2..000000000000 --- a/include/linux/platform_data/omap_drm.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * DRM/KMS platform data for TI OMAP platforms - * - * Copyright (C) 2012 Texas Instruments - * Author: Rob Clark <rob.clark@linaro.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#ifndef __PLATFORM_DATA_OMAP_DRM_H__ -#define __PLATFORM_DATA_OMAP_DRM_H__ - -/* - * Optional platform data to configure the default configuration of which - * pipes/overlays/CRTCs are used.. if this is not provided, then instead the - * first CONFIG_DRM_OMAP_NUM_CRTCS are used, and they are each connected to - * one manager, with priority given to managers that are connected to - * detected devices. Remaining overlays are used as video planes. This - * should be a good default behavior for most cases, but yet there still - * might be times when you wish to do something different. - */ -struct omap_kms_platform_data { - /* overlays to use as CRTCs: */ - int ovl_cnt; - const int *ovl_ids; - - /* overlays to use as video planes: */ - int pln_cnt; - const int *pln_ids; - - int mgr_cnt; - const int *mgr_ids; - - int dev_cnt; - const char **dev_names; -}; - -struct omap_drm_platform_data { - uint32_t omaprev; - struct omap_kms_platform_data *kms_pdata; -}; - -#endif /* __PLATFORM_DATA_OMAP_DRM_H__ */ diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h index 79b0e4cdb814..f8274b0c6888 100644 --- a/include/linux/platform_data/st_sensors_pdata.h +++ b/include/linux/platform_data/st_sensors_pdata.h @@ -17,10 +17,12 @@ * Available only for accelerometer and pressure sensors. * Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet). * @open_drain: set the interrupt line to be open drain if possible. + * @spi_3wire: enable spi-3wire mode. */ struct st_sensors_platform_data { u8 drdy_int_pin; bool open_drain; + bool spi_3wire; }; #endif /* ST_SENSORS_PDATA_H */ diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index d8c97ec8a8e6..37b4bb2545b3 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -436,9 +436,9 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, __PTR_RING_PEEK_CALL_v; \ }) -static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp) +static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) { - return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp); + return kcalloc(size, sizeof(void *), gfp); } static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) @@ -582,7 +582,8 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, * In particular if you consume ring in interrupt or BH context, you must * disable interrupts/BH when doing so. */ -static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings, +static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, + unsigned int nrings, int size, gfp_t gfp, void (*destroy)(void *)) { @@ -590,7 +591,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings, void ***queues; int i; - queues = kmalloc(nrings * sizeof *queues, gfp); + queues = kmalloc_array(nrings, sizeof(*queues), gfp); if (!queues) goto noqueues; diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index f816fc72b51e..96f1baf62ab8 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -58,8 +58,6 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func); void call_rcu_bh(struct rcu_head *head, rcu_callback_t func); void call_rcu_sched(struct rcu_head *head, rcu_callback_t func); void synchronize_sched(void); -void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); -void synchronize_rcu_tasks(void); void rcu_barrier_tasks(void); #ifdef CONFIG_PREEMPT_RCU @@ -105,11 +103,13 @@ static inline int rcu_preempt_depth(void) /* Internal to kernel */ void rcu_init(void); +extern int rcu_scheduler_active __read_mostly; void rcu_sched_qs(void); void rcu_bh_qs(void); void rcu_check_callbacks(int user); void rcu_report_dead(unsigned int cpu); void rcu_cpu_starting(unsigned int cpu); +void rcutree_migrate_callbacks(int cpu); #ifdef CONFIG_RCU_STALL_COMMON void rcu_sysrq_start(void); @@ -164,8 +164,6 @@ static inline void rcu_init_nohz(void) { } * macro rather than an inline function to avoid #include hell. */ #ifdef CONFIG_TASKS_RCU -#define TASKS_RCU(x) x -extern struct srcu_struct tasks_rcu_exit_srcu; #define rcu_note_voluntary_context_switch_lite(t) \ do { \ if (READ_ONCE((t)->rcu_tasks_holdout)) \ @@ -176,10 +174,17 @@ extern struct srcu_struct tasks_rcu_exit_srcu; rcu_all_qs(); \ rcu_note_voluntary_context_switch_lite(t); \ } while (0) +void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); +void synchronize_rcu_tasks(void); +void exit_tasks_rcu_start(void); +void exit_tasks_rcu_finish(void); #else /* #ifdef CONFIG_TASKS_RCU */ -#define TASKS_RCU(x) do { } while (0) #define rcu_note_voluntary_context_switch_lite(t) do { } while (0) #define rcu_note_voluntary_context_switch(t) rcu_all_qs() +#define call_rcu_tasks call_rcu_sched +#define synchronize_rcu_tasks synchronize_sched +static inline void exit_tasks_rcu_start(void) { } +static inline void exit_tasks_rcu_finish(void) { } #endif /* #else #ifdef CONFIG_TASKS_RCU */ /** diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 5becbbccb998..b3dbf9502fd0 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -116,13 +116,11 @@ static inline void rcu_irq_exit_irqson(void) { } static inline void rcu_irq_enter_irqson(void) { } static inline void rcu_irq_exit(void) { } static inline void exit_rcu(void) { } - -#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) -extern int rcu_scheduler_active __read_mostly; +#ifdef CONFIG_SRCU void rcu_scheduler_starting(void); -#else /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */ +#else /* #ifndef CONFIG_SRCU */ static inline void rcu_scheduler_starting(void) { } -#endif /* #else #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */ +#endif /* #else #ifndef CONFIG_SRCU */ static inline void rcu_end_inkernel_boot(void) { } static inline bool rcu_is_watching(void) { return true; } diff --git a/include/linux/reservation.h b/include/linux/reservation.h index 156cfd330b66..21fc84d82d41 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h @@ -254,6 +254,9 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, unsigned *pshared_count, struct dma_fence ***pshared); +int reservation_object_copy_fences(struct reservation_object *dst, + struct reservation_object *src); + long reservation_object_wait_timeout_rcu(struct reservation_object *obj, bool wait_all, bool intr, unsigned long timeout); diff --git a/include/linux/sched.h b/include/linux/sched.h index 8337e2db0bb2..e5fbce866073 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -589,9 +589,10 @@ struct task_struct { #ifdef CONFIG_TASKS_RCU unsigned long rcu_tasks_nvcsw; - bool rcu_tasks_holdout; - struct list_head rcu_tasks_holdout_list; + u8 rcu_tasks_holdout; + u8 rcu_tasks_idx; int rcu_tasks_idle_cpu; + struct list_head rcu_tasks_holdout_list; #endif /* #ifdef CONFIG_TASKS_RCU */ struct sched_info sched_info; @@ -1163,13 +1164,6 @@ static inline pid_t task_tgid_nr(struct task_struct *tsk) return tsk->tgid; } -extern pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); - -static inline pid_t task_tgid_vnr(struct task_struct *tsk) -{ - return pid_vnr(task_tgid(tsk)); -} - /** * pid_alive - check that a task structure is not stale * @p: Task structure to be checked. @@ -1185,23 +1179,6 @@ static inline int pid_alive(const struct task_struct *p) return p->pids[PIDTYPE_PID].pid != NULL; } -static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) -{ - pid_t pid = 0; - - rcu_read_lock(); - if (pid_alive(tsk)) - pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); - rcu_read_unlock(); - - return pid; -} - -static inline pid_t task_ppid_nr(const struct task_struct *tsk) -{ - return task_ppid_nr_ns(tsk, &init_pid_ns); -} - static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) { return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); @@ -1223,12 +1200,52 @@ static inline pid_t task_session_vnr(struct task_struct *tsk) return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); } +static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) +{ + return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns); +} + +static inline pid_t task_tgid_vnr(struct task_struct *tsk) +{ + return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL); +} + +static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) +{ + pid_t pid = 0; + + rcu_read_lock(); + if (pid_alive(tsk)) + pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); + rcu_read_unlock(); + + return pid; +} + +static inline pid_t task_ppid_nr(const struct task_struct *tsk) +{ + return task_ppid_nr_ns(tsk, &init_pid_ns); +} + /* Obsolete, do not use: */ static inline pid_t task_pgrp_nr(struct task_struct *tsk) { return task_pgrp_nr_ns(tsk, &init_pid_ns); } +static inline char task_state_to_char(struct task_struct *task) +{ + const char stat_nam[] = TASK_STATE_TO_CHAR_STR; + unsigned long state = task->state; + + state = state ? __ffs(state) + 1 : 0; + + /* Make sure the string lines up properly with the number of task states: */ + BUILD_BUG_ON(sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1); + + return state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'; +} + /** * is_global_init - check if a task structure is init. Since init * is free to have sub-threads we need to check tgid. diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h index e0eaee54c5a4..5d58d49e9f87 100644 --- a/include/linux/sched/debug.h +++ b/include/linux/sched/debug.h @@ -6,6 +6,7 @@ */ struct task_struct; +struct pid_namespace; extern void dump_cpu_task(int cpu); @@ -34,7 +35,8 @@ extern void sched_show_task(struct task_struct *p); #ifdef CONFIG_SCHED_DEBUG struct seq_file; -extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); +extern void proc_sched_show_task(struct task_struct *p, + struct pid_namespace *ns, struct seq_file *m); extern void proc_sched_set_task(struct task_struct *p); #endif diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index c97e5f096927..79a2a744648d 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -30,7 +30,6 @@ extern int lockdep_tasklist_lock_is_held(void); extern asmlinkage void schedule_tail(struct task_struct *prev); extern void init_idle(struct task_struct *idle, int cpu); -extern void init_idle_bootup_task(struct task_struct *idle); extern int sched_fork(unsigned long clone_flags, struct task_struct *p); extern void sched_dead(struct task_struct *p); diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 7d065abc7a47..d7b6dab956ec 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -71,6 +71,14 @@ struct sched_domain_shared { atomic_t ref; atomic_t nr_busy_cpus; int has_idle_cores; + + /* + * Some variables from the most recent sd_lb_stats for this domain, + * used by wake_affine(). + */ + unsigned long nr_running; + unsigned long load; + unsigned long capacity; }; struct sched_domain { diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h index 35226cd4efb0..8621ffdeecbf 100644 --- a/include/linux/skb_array.h +++ b/include/linux/skb_array.h @@ -193,7 +193,8 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp) } static inline int skb_array_resize_multiple(struct skb_array **rings, - int nrings, int size, gfp_t gfp) + int nrings, unsigned int size, + gfp_t gfp) { BUILD_BUG_ON(offsetof(struct skb_array, ring)); return ptr_ring_resize_multiple((struct ptr_ring **)rings, diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index dbe29b6c9bd6..d67a8182e5eb 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -973,7 +973,23 @@ int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len); int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); -int skb_pad(struct sk_buff *skb, int pad); +int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error); + +/** + * skb_pad - zero pad the tail of an skb + * @skb: buffer to pad + * @pad: space to pad + * + * Ensure that a buffer is followed by a padding area that is zero + * filled. Used by network drivers which may DMA or transfer data + * beyond the buffer end onto the wire. + * + * May return error in out of memory cases. The skb is freed on error. + */ +static inline int skb_pad(struct sk_buff *skb, int pad) +{ + return __skb_pad(skb, pad, true); +} #define dev_kfree_skb(a) consume_skb(a) int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, @@ -2825,25 +2841,42 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) * skb_put_padto - increase size and pad an skbuff up to a minimal size * @skb: buffer to pad * @len: minimal length + * @free_on_error: free buffer on error * * Pads up a buffer to ensure the trailing bytes exist and are * blanked. If the buffer already contains sufficient data it * is untouched. Otherwise it is extended. Returns zero on - * success. The skb is freed on error. + * success. The skb is freed on error if @free_on_error is true. */ -static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) +static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len, + bool free_on_error) { unsigned int size = skb->len; if (unlikely(size < len)) { len -= size; - if (skb_pad(skb, len)) + if (__skb_pad(skb, len, free_on_error)) return -ENOMEM; __skb_put(skb, len); } return 0; } +/** + * skb_put_padto - increase size and pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ +static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) +{ + return __skb_put_padto(skb, len, true); +} + static inline int skb_add_data(struct sk_buff *skb, struct iov_iter *from, int copy) { diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index d9510e8522d4..ef018a6e4985 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -130,12 +130,6 @@ do { \ #define smp_mb__before_spinlock() smp_wmb() #endif -/** - * raw_spin_unlock_wait - wait until the spinlock gets unlocked - * @lock: the spinlock in question. - */ -#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) - #ifdef CONFIG_DEBUG_SPINLOCK extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) @@ -369,31 +363,6 @@ static __always_inline int spin_trylock_irq(spinlock_t *lock) raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ }) -/** - * spin_unlock_wait - Interpose between successive critical sections - * @lock: the spinlock whose critical sections are to be interposed. - * - * Semantically this is equivalent to a spin_lock() immediately - * followed by a spin_unlock(). However, most architectures have - * more efficient implementations in which the spin_unlock_wait() - * cannot block concurrent lock acquisition, and in some cases - * where spin_unlock_wait() does not write to the lock variable. - * Nevertheless, spin_unlock_wait() can have high overhead, so if - * you feel the need to use it, please check to see if there is - * a better way to get your job done. - * - * The ordering guarantees provided by spin_unlock_wait() are: - * - * 1. All accesses preceding the spin_unlock_wait() happen before - * any accesses in later critical sections for this same lock. - * 2. All accesses following the spin_unlock_wait() happen after - * any accesses in earlier critical sections for this same lock. - */ -static __always_inline void spin_unlock_wait(spinlock_t *lock) -{ - raw_spin_unlock_wait(&lock->rlock); -} - static __always_inline int spin_is_locked(spinlock_t *lock) { return raw_spin_is_locked(&lock->rlock); diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index 0d9848de677d..612fb530af41 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -26,11 +26,6 @@ #ifdef CONFIG_DEBUG_SPINLOCK #define arch_spin_is_locked(x) ((x)->slock == 0) -static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) -{ - smp_cond_load_acquire(&lock->slock, VAL); -} - static inline void arch_spin_lock(arch_spinlock_t *lock) { lock->slock = 0; @@ -73,7 +68,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) #else /* DEBUG_SPINLOCK */ #define arch_spin_is_locked(lock) ((void)(lock), 0) -#define arch_spin_unlock_wait(lock) do { barrier(); (void)(lock); } while (0) /* for sched/core.c and kernel_lock.c: */ # define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) # define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index cfbfc540cafc..261471f407a5 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h @@ -87,4 +87,17 @@ static inline void srcu_barrier(struct srcu_struct *sp) synchronize_srcu(sp); } +/* Defined here to avoid size increase for non-torture kernels. */ +static inline void srcu_torture_stats_print(struct srcu_struct *sp, + char *tt, char *tf) +{ + int idx; + + idx = READ_ONCE(sp->srcu_idx) & 0x1; + pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n", + tt, tf, idx, + READ_ONCE(sp->srcu_lock_nesting[!idx]), + READ_ONCE(sp->srcu_lock_nesting[idx])); +} + #endif diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 42973f787e7e..a949f4f9e4d7 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -104,8 +104,6 @@ struct srcu_struct { #define SRCU_STATE_SCAN1 1 #define SRCU_STATE_SCAN2 2 -void process_srcu(struct work_struct *work); - #define __SRCU_STRUCT_INIT(name) \ { \ .sda = &name##_srcu_data, \ @@ -141,5 +139,6 @@ void process_srcu(struct work_struct *work); void synchronize_srcu_expedited(struct srcu_struct *sp); void srcu_barrier(struct srcu_struct *sp); +void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf); #endif diff --git a/include/linux/swait.h b/include/linux/swait.h index c1f9c62a8a50..4a4e180d0a35 100644 --- a/include/linux/swait.h +++ b/include/linux/swait.h @@ -169,4 +169,59 @@ do { \ __ret; \ }) +#define __swait_event_idle(wq, condition) \ + (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule()) + +/** + * swait_event_idle - wait without system load contribution + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * + * The process is put to sleep (TASK_IDLE) until the @condition evaluates to + * true. The @condition is checked each time the waitqueue @wq is woken up. + * + * This function is mostly used when a kthread or workqueue waits for some + * condition and doesn't want to contribute to system load. Signals are + * ignored. + */ +#define swait_event_idle(wq, condition) \ +do { \ + if (condition) \ + break; \ + __swait_event_idle(wq, condition); \ +} while (0) + +#define __swait_event_idle_timeout(wq, condition, timeout) \ + ___swait_event(wq, ___wait_cond_timeout(condition), \ + TASK_IDLE, timeout, \ + __ret = schedule_timeout(__ret)) + +/** + * swait_event_idle_timeout - wait up to timeout without load contribution + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * @timeout: timeout at which we'll give up in jiffies + * + * The process is put to sleep (TASK_IDLE) until the @condition evaluates to + * true. The @condition is checked each time the waitqueue @wq is woken up. + * + * This function is mostly used when a kthread or workqueue waits for some + * condition and doesn't want to contribute to system load. Signals are + * ignored. + * + * Returns: + * 0 if the @condition evaluated to %false after the @timeout elapsed, + * 1 if the @condition evaluated to %true after the @timeout elapsed, + * or the remaining jiffies (at least 1) if the @condition evaluated + * to %true before the @timeout elapsed. + */ +#define swait_event_idle_timeout(wq, condition, timeout) \ +({ \ + long __ret = timeout; \ + if (!___wait_cond_timeout(condition)) \ + __ret = __swait_event_idle_timeout(wq, \ + condition, timeout); \ + __ret; \ +}) + #endif /* _LINUX_SWAIT_H */ diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h index 5726107963b2..0ad87c434ae6 100644 --- a/include/linux/sync_file.h +++ b/include/linux/sync_file.h @@ -43,12 +43,13 @@ struct sync_file { #endif wait_queue_head_t wq; + unsigned long flags; struct dma_fence *fence; struct dma_fence_cb cb; }; -#define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS +#define POLL_ENABLED 0 struct sync_file *sync_file_create(struct dma_fence *fence); struct dma_fence *sync_file_get_fence(int fd); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 3cb15ea48aee..138c94535864 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -100,11 +100,12 @@ union bpf_attr; #define __MAP(n,...) __MAP##n(__VA_ARGS__) #define __SC_DECL(t, a) t a -#define __TYPE_IS_L(t) (__same_type((t)0, 0L)) -#define __TYPE_IS_UL(t) (__same_type((t)0, 0UL)) -#define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL)) +#define __TYPE_AS(t, v) __same_type((__force t)0, v) +#define __TYPE_IS_L(t) (__TYPE_AS(t, 0L)) +#define __TYPE_IS_UL(t) (__TYPE_AS(t, 0UL)) +#define __TYPE_IS_LL(t) (__TYPE_AS(t, 0LL) || __TYPE_AS(t, 0ULL)) #define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a -#define __SC_CAST(t, a) (t) a +#define __SC_CAST(t, a) (__force t) a #define __SC_ARGS(t, a) a #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long)) @@ -578,12 +579,12 @@ asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec, unsigned long vlen, unsigned long pos_l, unsigned long pos_h); asmlinkage long sys_preadv2(unsigned long fd, const struct iovec __user *vec, unsigned long vlen, unsigned long pos_l, unsigned long pos_h, - int flags); + rwf_t flags); asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec, unsigned long vlen, unsigned long pos_l, unsigned long pos_h); asmlinkage long sys_pwritev2(unsigned long fd, const struct iovec __user *vec, unsigned long vlen, unsigned long pos_l, unsigned long pos_h, - int flags); + rwf_t flags); asmlinkage long sys_getcwd(char __user *buf, unsigned long size); asmlinkage long sys_mkdir(const char __user *pathname, umode_t mode); asmlinkage long sys_chdir(const char __user *filename); diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 536c80ff7ad9..5012b524283d 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -508,9 +508,9 @@ void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx, static inline void perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type, u64 count, struct pt_regs *regs, void *head, - struct task_struct *task) + struct task_struct *task, struct perf_event *event) { - perf_tp_event(type, count, raw_data, size, regs, head, rctx, task); + perf_tp_event(type, count, raw_data, size, regs, head, rctx, task, event); } #endif diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index acdd6f915a8d..20ef8e6ec2db 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -156,7 +156,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n) } #ifdef CONFIG_COMPAT static __always_inline unsigned long __must_check -copy_in_user(void __user *to, const void *from, unsigned long n) +copy_in_user(void __user *to, const void __user *from, unsigned long n) { might_fault(); if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n)) diff --git a/include/linux/wait.h b/include/linux/wait.h index 5b74e36c0ca8..dc19880c02f5 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -757,6 +757,43 @@ extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *); __ret; \ }) +#define __wait_event_killable_timeout(wq_head, condition, timeout) \ + ___wait_event(wq_head, ___wait_cond_timeout(condition), \ + TASK_KILLABLE, 0, timeout, \ + __ret = schedule_timeout(__ret)) + +/** + * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses + * @wq_head: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * @timeout: timeout, in jiffies + * + * The process is put to sleep (TASK_KILLABLE) until the + * @condition evaluates to true or a kill signal is received. + * The @condition is checked each time the waitqueue @wq_head is woken up. + * + * wake_up() has to be called after changing any variable that could + * change the result of the wait condition. + * + * Returns: + * 0 if the @condition evaluated to %false after the @timeout elapsed, + * 1 if the @condition evaluated to %true after the @timeout elapsed, + * the remaining jiffies (at least 1) if the @condition evaluated + * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was + * interrupted by a kill signal. + * + * Only kill signals interrupt this process. + */ +#define wait_event_killable_timeout(wq_head, condition, timeout) \ +({ \ + long __ret = timeout; \ + might_sleep(); \ + if (!___wait_cond_timeout(condition)) \ + __ret = __wait_event_killable_timeout(wq_head, \ + condition, timeout); \ + __ret; \ +}) + #define __wait_event_lock_irq(wq_head, condition, lock, cmd) \ (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ diff --git a/include/media/vsp1.h b/include/media/vsp1.h index c837383b2013..68a8abe4fac5 100644 --- a/include/media/vsp1.h +++ b/include/media/vsp1.h @@ -34,11 +34,12 @@ struct vsp1_du_lif_config { unsigned int width; unsigned int height; - void (*callback)(void *); + void (*callback)(void *, bool); void *callback_data; }; -int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg); +int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index, + const struct vsp1_du_lif_config *cfg); struct vsp1_du_atomic_config { u32 pixelformat; @@ -50,10 +51,11 @@ struct vsp1_du_atomic_config { unsigned int zpos; }; -void vsp1_du_atomic_begin(struct device *dev); -int vsp1_du_atomic_update(struct device *dev, unsigned int rpf, +void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index); +int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index, + unsigned int rpf, const struct vsp1_du_atomic_config *cfg); -void vsp1_du_atomic_flush(struct device *dev); +void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index); int vsp1_du_map_sg(struct device *dev, struct sg_table *sgt); void vsp1_du_unmap_sg(struct device *dev, struct sg_table *sgt); diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 6df79e96a780..f44ff2476758 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -336,6 +336,16 @@ static inline void in6_dev_put(struct inet6_dev *idev) in6_dev_finish_destroy(idev); } +static inline void in6_dev_put_clear(struct inet6_dev **pidev) +{ + struct inet6_dev *idev = *pidev; + + if (idev) { + in6_dev_put(idev); + *pidev = NULL; + } +} + static inline void __in6_dev_put(struct inet6_dev *idev) { refcount_dec(&idev->refcnt); diff --git a/include/net/bonding.h b/include/net/bonding.h index b00508d22e0a..b2e68657a216 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -277,6 +277,11 @@ static inline bool bond_is_lb(const struct bonding *bond) BOND_MODE(bond) == BOND_MODE_ALB; } +static inline bool bond_needs_speed_duplex(const struct bonding *bond) +{ + return BOND_MODE(bond) == BOND_MODE_8023AD || bond_is_lb(bond); +} + static inline bool bond_is_nondyn_tlb(const struct bonding *bond) { return (BOND_MODE(bond) == BOND_MODE_TLB) && diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index 8ffd434676b7..71c72a939bf8 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -29,18 +29,18 @@ #include <linux/sched/signal.h> #include <net/ip.h> -#ifdef CONFIG_NET_RX_BUSY_POLL - -struct napi_struct; -extern unsigned int sysctl_net_busy_read __read_mostly; -extern unsigned int sysctl_net_busy_poll __read_mostly; - /* 0 - Reserved to indicate value not set * 1..NR_CPUS - Reserved for sender_cpu * NR_CPUS+1..~0 - Region available for NAPI IDs */ #define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1)) +#ifdef CONFIG_NET_RX_BUSY_POLL + +struct napi_struct; +extern unsigned int sysctl_net_busy_read __read_mostly; +extern unsigned int sysctl_net_busy_poll __read_mostly; + static inline bool net_busy_loop_on(void) { return sysctl_net_busy_poll; diff --git a/include/net/ip.h b/include/net/ip.h index 821cedcc8e73..0cf7f5a65fe6 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -352,7 +352,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, !forwarding) return dst_mtu(dst); - return min(dst->dev->mtu, IP_MAX_MTU); + return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU); } static inline unsigned int ip_skb_dst_mtu(struct sock *sk, @@ -364,7 +364,7 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk, return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding); } - return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU); + return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU); } u32 ip_idents_reserve(u32 hash, int segs); diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 1a88008cc6f5..af509f801084 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -70,6 +70,7 @@ struct fib6_node { __u16 fn_flags; int fn_sernum; struct rt6_info *rr_ptr; + struct rcu_head rcu; }; #ifndef CONFIG_IPV6_SUBTREES @@ -104,7 +105,7 @@ struct rt6_info { * the same cache line. */ struct fib6_table *rt6i_table; - struct fib6_node *rt6i_node; + struct fib6_node __rcu *rt6i_node; struct in6_addr rt6i_gateway; @@ -167,13 +168,40 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout) rt0->rt6i_flags |= RTF_EXPIRES; } +/* Function to safely get fn->sernum for passed in rt + * and store result in passed in cookie. + * Return true if we can get cookie safely + * Return false if not + */ +static inline bool rt6_get_cookie_safe(const struct rt6_info *rt, + u32 *cookie) +{ + struct fib6_node *fn; + bool status = false; + + rcu_read_lock(); + fn = rcu_dereference(rt->rt6i_node); + + if (fn) { + *cookie = fn->fn_sernum; + status = true; + } + + rcu_read_unlock(); + return status; +} + static inline u32 rt6_get_cookie(const struct rt6_info *rt) { + u32 cookie = 0; + if (rt->rt6i_flags & RTF_PCPU || (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from)) rt = (struct rt6_info *)(rt->dst.from); - return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; + rt6_get_cookie_safe(rt, &cookie); + + return cookie; } static inline void ip6_rt_put(struct rt6_info *rt) diff --git a/include/net/mac80211.h b/include/net/mac80211.h index b2b5419467cc..f8149ca192b4 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -5499,6 +5499,21 @@ static inline void ieee80211_stop_rx_ba_session_offl(struct ieee80211_vif *vif, ieee80211_manage_rx_ba_offl(vif, addr, tid + IEEE80211_NUM_TIDS); } +/** + * ieee80211_rx_ba_timer_expired - stop a Rx BA session due to timeout + * + * Some device drivers do not offload AddBa/DelBa negotiation, but handle rx + * buffer reording internally, and therefore also handle the session timer. + * + * Trigger the timeout flow, which sends a DelBa. + * + * @vif: &struct ieee80211_vif pointer from the add_interface callback + * @addr: station mac address + * @tid: the rx tid + */ +void ieee80211_rx_ba_timer_expired(struct ieee80211_vif *vif, + const u8 *addr, unsigned int tid); + /* Rate control API */ /** diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 1c123e2b2415..c1109cdbbfa6 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -101,6 +101,13 @@ struct Qdisc { spinlock_t busylock ____cacheline_aligned_in_smp; }; +static inline void qdisc_refcount_inc(struct Qdisc *qdisc) +{ + if (qdisc->flags & TCQ_F_BUILTIN) + return; + refcount_inc(&qdisc->refcnt); +} + static inline bool qdisc_is_running(const struct Qdisc *qdisc) { return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; @@ -806,8 +813,11 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, old = *pold; *pold = new; if (old != NULL) { - qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog); + unsigned int qlen = old->q.qlen; + unsigned int backlog = old->qstats.backlog; + qdisc_reset(old); + qdisc_tree_reduce_backlog(old, qlen, backlog); } sch_tree_unlock(sch); diff --git a/include/net/sock.h b/include/net/sock.h index 7c0632c7e870..aeeec62992ca 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -507,9 +507,7 @@ int sk_set_peek_off(struct sock *sk, int val); static inline int sk_peek_offset(struct sock *sk, int flags) { if (unlikely(flags & MSG_PEEK)) { - s32 off = READ_ONCE(sk->sk_peek_off); - if (off >= 0) - return off; + return READ_ONCE(sk->sk_peek_off); } return 0; diff --git a/include/net/tcp.h b/include/net/tcp.h index ada65e767b28..f642a39f9eee 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1004,9 +1004,7 @@ void tcp_get_default_congestion_control(char *name); void tcp_get_available_congestion_control(char *buf, size_t len); void tcp_get_allowed_congestion_control(char *buf, size_t len); int tcp_set_allowed_congestion_control(char *allowed); -int tcp_set_congestion_control(struct sock *sk, const char *name, bool load); -void tcp_reinit_congestion_control(struct sock *sk, - const struct tcp_congestion_ops *ca); +int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit); u32 tcp_slow_start(struct tcp_sock *tp, u32 acked); void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked); diff --git a/include/net/udp.h b/include/net/udp.h index cc8036987dcb..626c2d8a70c5 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -260,7 +260,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags, } void udp_v4_early_demux(struct sk_buff *skb); -void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst); +bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst); int udp_get_port(struct sock *sk, unsigned short snum, int (*saddr_cmp)(const struct sock *, const struct sock *)); @@ -366,12 +366,13 @@ static inline bool udp_skb_is_linear(struct sk_buff *skb) static inline int copy_linear_skb(struct sk_buff *skb, int len, int off, struct iov_iter *to) { - int n, copy = len - off; + int n; - n = copy_to_iter(skb->data + off, copy, to); - if (n == copy) + n = copy_to_iter(skb->data + off, len, to); + if (n == len) return 0; + iov_iter_revert(to, n); return -EFAULT; } diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index b73a14edc85e..ec5008cf5d51 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h @@ -172,7 +172,8 @@ static inline int rdma_ip2gid(struct sockaddr *addr, union ib_gid *gid) (struct in6_addr *)gid); break; case AF_INET6: - memcpy(gid->raw, &((struct sockaddr_in6 *)addr)->sin6_addr, 16); + *(struct in6_addr *)&gid->raw = + ((struct sockaddr_in6 *)addr)->sin6_addr; break; default: return -EINVAL; @@ -304,7 +305,13 @@ static inline void rdma_get_ll_mac(struct in6_addr *addr, u8 *mac) static inline int rdma_is_multicast_addr(struct in6_addr *addr) { - return addr->s6_addr[0] == 0xff; + u32 ipv4_addr; + + if (addr->s6_addr[0] == 0xff) + return 1; + + memcpy(&ipv4_addr, addr->s6_addr + 12, 4); + return (ipv6_addr_v4mapped(addr) && ipv4_is_multicast(ipv4_addr)); } static inline void rdma_get_mcast_mac(struct in6_addr *addr, u8 *mac) diff --git a/include/rdma/ib_hdrs.h b/include/rdma/ib_hdrs.h index 5519f31f043a..c124d515f7d5 100644 --- a/include/rdma/ib_hdrs.h +++ b/include/rdma/ib_hdrs.h @@ -193,8 +193,12 @@ static inline void put_ib_ateth_compare(u64 val, struct ib_atomic_eth *ateth) #define IB_LNH_MASK 3 #define IB_SC_MASK 0xf #define IB_SC_SHIFT 12 +#define IB_SC5_MASK 0x10 #define IB_SL_MASK 0xf #define IB_SL_SHIFT 4 +#define IB_SL_SHIFT 4 +#define IB_LVER_MASK 0xf +#define IB_LVER_SHIFT 8 static inline u8 ib_get_lnh(struct ib_header *hdr) { @@ -206,6 +210,11 @@ static inline u8 ib_get_sc(struct ib_header *hdr) return ((be16_to_cpu(hdr->lrh[0]) >> IB_SC_SHIFT) & IB_SC_MASK); } +static inline bool ib_is_sc5(u16 sc5) +{ + return !!(sc5 & IB_SC5_MASK); +} + static inline u8 ib_get_sl(struct ib_header *hdr) { return ((be16_to_cpu(hdr->lrh[0]) >> IB_SL_SHIFT) & IB_SL_MASK); @@ -221,6 +230,27 @@ static inline u16 ib_get_slid(struct ib_header *hdr) return (be16_to_cpu(hdr->lrh[3])); } +static inline u8 ib_get_lver(struct ib_header *hdr) +{ + return (u8)((be16_to_cpu(hdr->lrh[0]) >> IB_LVER_SHIFT) & + IB_LVER_MASK); +} + +static inline u16 ib_get_len(struct ib_header *hdr) +{ + return (u16)(be16_to_cpu(hdr->lrh[2])); +} + +static inline u32 ib_get_qkey(struct ib_other_headers *ohdr) +{ + return be32_to_cpu(ohdr->u.ud.deth[0]); +} + +static inline u32 ib_get_sqpn(struct ib_other_headers *ohdr) +{ + return ((be32_to_cpu(ohdr->u.ud.deth[1])) & IB_QPN_MASK); +} + /* * BTH */ @@ -229,6 +259,14 @@ static inline u16 ib_get_slid(struct ib_header *hdr) #define IB_BTH_PAD_MASK 3 #define IB_BTH_PKEY_MASK 0xffff #define IB_BTH_PAD_SHIFT 20 +#define IB_BTH_A_MASK 1 +#define IB_BTH_A_SHIFT 31 +#define IB_BTH_M_MASK 1 +#define IB_BTH_M_SHIFT 22 +#define IB_BTH_SE_MASK 1 +#define IB_BTH_SE_SHIFT 23 +#define IB_BTH_TVER_MASK 0xf +#define IB_BTH_TVER_SHIFT 16 static inline u8 ib_bth_get_pad(struct ib_other_headers *ohdr) { @@ -247,4 +285,50 @@ static inline u8 ib_bth_get_opcode(struct ib_other_headers *ohdr) IB_BTH_OPCODE_MASK); } +static inline u8 ib_bth_get_ackreq(struct ib_other_headers *ohdr) +{ + return (u8)((be32_to_cpu(ohdr->bth[2]) >> IB_BTH_A_SHIFT) & + IB_BTH_A_MASK); +} + +static inline u8 ib_bth_get_migreq(struct ib_other_headers *ohdr) +{ + return (u8)((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_M_SHIFT) & + IB_BTH_M_MASK); +} + +static inline u8 ib_bth_get_se(struct ib_other_headers *ohdr) +{ + return (u8)((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_SE_SHIFT) & + IB_BTH_SE_MASK); +} + +static inline u32 ib_bth_get_psn(struct ib_other_headers *ohdr) +{ + return (u32)(be32_to_cpu(ohdr->bth[2])); +} + +static inline u32 ib_bth_get_qpn(struct ib_other_headers *ohdr) +{ + return (u32)((be32_to_cpu(ohdr->bth[1])) & IB_QPN_MASK); +} + +static inline u8 ib_bth_get_becn(struct ib_other_headers *ohdr) +{ + return (u8)((be32_to_cpu(ohdr->bth[1]) >> IB_BECN_SHIFT) & + IB_BECN_MASK); +} + +static inline u8 ib_bth_get_fecn(struct ib_other_headers *ohdr) +{ + return (u8)((be32_to_cpu(ohdr->bth[1]) >> IB_FECN_SHIFT) & + IB_FECN_MASK); +} + +static inline u8 ib_bth_get_tver(struct ib_other_headers *ohdr) +{ + return (u8)((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_TVER_SHIFT) & + IB_BTH_TVER_MASK); +} + #endif /* IB_HDRS_H */ diff --git a/include/rdma/ib_marshall.h b/include/rdma/ib_marshall.h index 68cef3bd50fb..8ebf84ae9ed1 100644 --- a/include/rdma/ib_marshall.h +++ b/include/rdma/ib_marshall.h @@ -38,10 +38,12 @@ #include <rdma/ib_user_verbs.h> #include <rdma/ib_user_sa.h> -void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, +void ib_copy_qp_attr_to_user(struct ib_device *device, + struct ib_uverbs_qp_attr *dst, struct ib_qp_attr *src); -void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst, +void ib_copy_ah_attr_to_user(struct ib_device *device, + struct ib_uverbs_ah_attr *dst, struct rdma_ah_attr *src); void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index b5732432bb29..e6df68048517 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -64,6 +64,8 @@ #include <linux/cgroup_rdma.h> #include <uapi/rdma/ib_user_verbs.h> +#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN + extern struct workqueue_struct *ib_wq; extern struct workqueue_struct *ib_comp_wq; @@ -168,7 +170,7 @@ enum ib_device_cap_flags { IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6), IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7), IB_DEVICE_SHUTDOWN_PORT = (1 << 8), - IB_DEVICE_INIT_TYPE = (1 << 9), + /* Not in use, former INIT_TYPE = (1 << 9),*/ IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10), IB_DEVICE_SYS_IMAGE_GUID = (1 << 11), IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12), @@ -183,7 +185,7 @@ enum ib_device_cap_flags { * which will always contain a usable lkey. */ IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15), - IB_DEVICE_RESERVED /* old SEND_W_INV */ = (1 << 16), + /* Reserved, old SEND_W_INV = (1 << 16),*/ IB_DEVICE_MEM_WINDOW = (1 << 17), /* * Devices should set IB_DEVICE_UD_IP_SUM if they support @@ -218,7 +220,7 @@ enum ib_device_cap_flags { * of I/O operations with single completion queue managed * by hardware. */ - IB_DEVICE_CROSS_CHANNEL = (1 << 27), + IB_DEVICE_CROSS_CHANNEL = (1 << 27), IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30), IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31), @@ -278,6 +280,24 @@ struct ib_rss_caps { u32 max_rwq_indirection_table_size; }; +enum ib_tm_cap_flags { + /* Support tag matching on RC transport */ + IB_TM_CAP_RC = 1 << 0, +}; + +struct ib_xrq_caps { + /* Max size of RNDV header */ + u32 max_rndv_hdr_size; + /* Max number of entries in tag matching list */ + u32 max_num_tags; + /* From enum ib_tm_cap_flags */ + u32 flags; + /* Max number of outstanding list operations */ + u32 max_ops; + /* Max number of SGE in tag matching entry */ + u32 max_sge; +}; + enum ib_cq_creation_flags { IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0, IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1, @@ -338,6 +358,7 @@ struct ib_device_attr { struct ib_rss_caps rss_caps; u32 max_wq_type_rq; u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */ + struct ib_xrq_caps xrq_caps; }; enum ib_mtu { @@ -549,8 +570,8 @@ struct ib_port_attr { u32 bad_pkey_cntr; u32 qkey_viol_cntr; u16 pkey_tbl_len; - u16 lid; - u16 sm_lid; + u32 sm_lid; + u32 lid; u8 lmc; u8 max_vl_num; u8 sm_sl; @@ -577,7 +598,8 @@ struct ib_device_modify { enum ib_port_modify_flags { IB_PORT_SHUTDOWN = 1, IB_PORT_INIT_TYPE = (1<<2), - IB_PORT_RESET_QKEY_CNTR = (1<<3) + IB_PORT_RESET_QKEY_CNTR = (1<<3), + IB_PORT_OPA_MASK_CHG = (1<<4) }; struct ib_port_modify { @@ -664,6 +686,8 @@ union rdma_network_hdr { }; }; +#define IB_QPN_MASK 0xFFFFFF + enum { IB_MULTICAST_QPN = 0xffffff }; @@ -859,6 +883,7 @@ struct roce_ah_attr { struct opa_ah_attr { u32 dlid; u8 src_path_bits; + bool make_grd; }; struct rdma_ah_attr { @@ -948,7 +973,7 @@ struct ib_wc { u32 src_qp; int wc_flags; u16 pkey_index; - u16 slid; + u32 slid; u8 sl; u8 dlid_path_bits; u8 port_num; /* valid only for DR SMPs on switches */ @@ -966,9 +991,16 @@ enum ib_cq_notify_flags { enum ib_srq_type { IB_SRQT_BASIC, - IB_SRQT_XRC + IB_SRQT_XRC, + IB_SRQT_TM, }; +static inline bool ib_srq_has_cq(enum ib_srq_type srq_type) +{ + return srq_type == IB_SRQT_XRC || + srq_type == IB_SRQT_TM; +} + enum ib_srq_attr_mask { IB_SRQ_MAX_WR = 1 << 0, IB_SRQ_LIMIT = 1 << 1, @@ -986,11 +1018,17 @@ struct ib_srq_init_attr { struct ib_srq_attr attr; enum ib_srq_type srq_type; - union { - struct { - struct ib_xrcd *xrcd; - struct ib_cq *cq; - } xrc; + struct { + struct ib_cq *cq; + union { + struct { + struct ib_xrcd *xrcd; + } xrc; + + struct { + u32 max_num_tags; + } tag_matching; + }; } ext; }; @@ -1059,6 +1097,7 @@ enum ib_qp_create_flags { /* FREE = 1 << 7, */ IB_QP_CREATE_SCATTER_FCS = 1 << 8, IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9, + IB_QP_CREATE_SOURCE_QPN = 1 << 10, /* reserve bits 26-31 for low level drivers' internal use */ IB_QP_CREATE_RESERVED_START = 1 << 26, IB_QP_CREATE_RESERVED_END = 1 << 31, @@ -1086,6 +1125,7 @@ struct ib_qp_init_attr { */ u8 port_num; struct ib_rwq_ind_table *rwq_ind_tbl; + u32 source_qpn; }; struct ib_qp_open_attr { @@ -1527,12 +1567,14 @@ struct ib_srq { enum ib_srq_type srq_type; atomic_t usecnt; - union { - struct { - struct ib_xrcd *xrcd; - struct ib_cq *cq; - u32 srq_num; - } xrc; + struct { + struct ib_cq *cq; + union { + struct { + struct ib_xrcd *xrcd; + u32 srq_num; + } xrc; + }; } ext; }; @@ -1546,6 +1588,10 @@ enum ib_raw_packet_caps { IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1), /* Checksum offloads are supported (for both send and receive). */ IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2), + /* When a packet is received for an RQ with no receive WQEs, the + * packet processing is delayed. + */ + IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3), }; enum ib_wq_type { @@ -1574,6 +1620,7 @@ struct ib_wq { enum ib_wq_flags { IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0, IB_WQ_FLAGS_SCATTER_FCS = 1 << 1, + IB_WQ_FLAGS_DELAY_DROP = 1 << 2, }; struct ib_wq_init_attr { @@ -1683,6 +1730,7 @@ struct ib_qp { enum ib_qp_type qp_type; struct ib_rwq_ind_table *rwq_ind_tbl; struct ib_qp_security *qp_sec; + u8 port; }; struct ib_mr { @@ -2288,6 +2336,8 @@ struct ib_device { struct rdmacg_device cg_device; #endif + u32 index; + /** * The following mandatory functions are used only at device * registration. Keep functions such as these at the end of this @@ -2295,7 +2345,11 @@ struct ib_device { * in fast paths. */ int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *); - void (*get_dev_fw_str)(struct ib_device *, char *str, size_t str_len); + void (*get_dev_fw_str)(struct ib_device *, char *str); + const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev, + int comp_vector); + + struct uverbs_root_spec *specs_root; }; struct ib_client { @@ -2331,7 +2385,7 @@ struct ib_client { struct ib_device *ib_alloc_device(size_t size); void ib_dealloc_device(struct ib_device *device); -void ib_get_device_fw_str(struct ib_device *device, char *str, size_t str_len); +void ib_get_device_fw_str(struct ib_device *device, char *str); int ib_register_device(struct ib_device *device, int (*port_callback)(struct ib_device *, @@ -2395,8 +2449,8 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, enum ib_qp_type type, enum ib_qp_attr_mask mask, enum rdma_link_layer ll); -int ib_register_event_handler (struct ib_event_handler *event_handler); -int ib_unregister_event_handler(struct ib_event_handler *event_handler); +void ib_register_event_handler(struct ib_event_handler *event_handler); +void ib_unregister_event_handler(struct ib_event_handler *event_handler); void ib_dispatch_event(struct ib_event *event); int ib_query_port(struct ib_device *device, @@ -3555,6 +3609,7 @@ void ib_drain_qp(struct ib_qp *qp); int ib_resolve_eth_dmac(struct ib_device *device, struct rdma_ah_attr *ah_attr); +int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width); static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr) { @@ -3608,6 +3663,20 @@ static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr) return 0; } +static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr, + bool make_grd) +{ + if (attr->type == RDMA_AH_ATTR_TYPE_OPA) + attr->opa.make_grd = make_grd; +} + +static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr) +{ + if (attr->type == RDMA_AH_ATTR_TYPE_OPA) + return attr->opa.make_grd; + return false; +} + static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num) { attr->port_num = port_num; @@ -3706,4 +3775,52 @@ static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev, else return RDMA_AH_ATTR_TYPE_IB; } + +/** + * ib_lid_cpu16 - Return lid in 16bit CPU encoding. + * In the current implementation the only way to get + * get the 32bit lid is from other sources for OPA. + * For IB, lids will always be 16bits so cast the + * value accordingly. + * + * @lid: A 32bit LID + */ +static inline u16 ib_lid_cpu16(u32 lid) +{ + WARN_ON_ONCE(lid & 0xFFFF0000); + return (u16)lid; +} + +/** + * ib_lid_be16 - Return lid in 16bit BE encoding. + * + * @lid: A 32bit LID + */ +static inline __be16 ib_lid_be16(u32 lid) +{ + WARN_ON_ONCE(lid & 0xFFFF0000); + return cpu_to_be16((u16)lid); +} + +/** + * ib_get_vector_affinity - Get the affinity mappings of a given completion + * vector + * @device: the rdma device + * @comp_vector: index of completion vector + * + * Returns NULL on failure, otherwise a corresponding cpu map of the + * completion vector (returns all-cpus map if the device driver doesn't + * implement get_vector_affinity). + */ +static inline const struct cpumask * +ib_get_vector_affinity(struct ib_device *device, int comp_vector) +{ + if (comp_vector < 0 || comp_vector >= device->num_comp_vectors || + !device->get_vector_affinity) + return NULL; + + return device->get_vector_affinity(device, comp_vector); + +} + #endif /* IB_VERBS_H */ diff --git a/include/rdma/opa_addr.h b/include/rdma/opa_addr.h index eace28f1555d..e6e90f18e6d5 100644 --- a/include/rdma/opa_addr.h +++ b/include/rdma/opa_addr.h @@ -48,8 +48,21 @@ #ifndef OPA_ADDR_H #define OPA_ADDR_H +#include <rdma/opa_smi.h> + #define OPA_SPECIAL_OUI (0x00066AULL) #define OPA_MAKE_ID(x) (cpu_to_be64(OPA_SPECIAL_OUI << 40 | (x))) +#define OPA_TO_IB_UCAST_LID(x) (((x) >= be16_to_cpu(IB_MULTICAST_LID_BASE)) \ + ? 0 : x) +#define OPA_GID_INDEX 0x1 +/** + * 0xF8 - 4 bits of multicast range and 1 bit for collective range + * Example: For 24 bit LID space, + * Multicast range: 0xF00000 to 0xF7FFFF + * Collective range: 0xF80000 to 0xFFFFFE + */ +#define OPA_MCAST_NR 0x4 /* Number of top bits set */ +#define OPA_COLLECTIVE_NR 0x1 /* Number of bits after MCAST_NR */ /** * ib_is_opa_gid: Returns true if the top 24 bits of the gid @@ -59,7 +72,7 @@ * * @gid: The Global identifier */ -static inline bool ib_is_opa_gid(union ib_gid *gid) +static inline bool ib_is_opa_gid(const union ib_gid *gid) { return ((be64_to_cpu(gid->global.interface_id) >> 40) == OPA_SPECIAL_OUI); @@ -72,8 +85,33 @@ static inline bool ib_is_opa_gid(union ib_gid *gid) * * @gid: The Global identifier */ -static inline u32 opa_get_lid_from_gid(union ib_gid *gid) +static inline u32 opa_get_lid_from_gid(const union ib_gid *gid) { return be64_to_cpu(gid->global.interface_id) & 0xFFFFFFFF; } + +/** + * opa_is_extended_lid: Returns true if dlid or slid are + * extended. + * + * @dlid: The DLID + * @slid: The SLID + */ +static inline bool opa_is_extended_lid(u32 dlid, u32 slid) +{ + if ((be32_to_cpu(dlid) >= + be16_to_cpu(IB_MULTICAST_LID_BASE)) || + (be32_to_cpu(slid) >= + be16_to_cpu(IB_MULTICAST_LID_BASE))) + return true; + else + return false; +} + +/* Get multicast lid base */ +static inline u32 opa_get_mcast_base(u32 nr_top_bits) +{ + return (be32_to_cpu(OPA_LID_PERMISSIVE) << (32 - nr_top_bits)); +} + #endif /* OPA_ADDR_H */ diff --git a/include/rdma/opa_vnic.h b/include/rdma/opa_vnic.h index 39d6890616a6..0c07a70bd7f6 100644 --- a/include/rdma/opa_vnic.h +++ b/include/rdma/opa_vnic.h @@ -54,9 +54,6 @@ #include <rdma/ib_verbs.h> -/* VNIC uses 16B header format */ -#define OPA_VNIC_L2_TYPE 0x2 - /* 16 header bytes + 2 reserved bytes */ #define OPA_VNIC_L2_HDR_LEN (16 + 2) diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h index 348c102cb5f6..2d878596b1e0 100644 --- a/include/rdma/rdma_netlink.h +++ b/include/rdma/rdma_netlink.h @@ -5,29 +5,43 @@ #include <linux/netlink.h> #include <uapi/rdma/rdma_netlink.h> -struct ibnl_client_cbs { +struct rdma_nl_cbs { + int (*doit)(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack); int (*dump)(struct sk_buff *skb, struct netlink_callback *nlcb); - struct module *module; + u8 flags; }; +enum rdma_nl_flags { + /* Require CAP_NET_ADMIN */ + RDMA_NL_ADMIN_PERM = 1 << 0, +}; + +/* Define this module as providing netlink services for NETLINK_RDMA, with + * index _index. Since the client indexes were setup in a uapi header as an + * enum and we do no want to change that, the user must supply the expanded + * constant as well and the compiler checks they are the same. + */ +#define MODULE_ALIAS_RDMA_NETLINK(_index, _val) \ + static inline void __chk_##_index(void) \ + { \ + BUILD_BUG_ON(_index != _val); \ + } \ + MODULE_ALIAS("rdma-netlink-subsys-" __stringify(_val)) + /** - * Add a a client to the list of IB netlink exporters. + * Register client in RDMA netlink. * @index: Index of the added client - * @nops: Number of supported ops by the added client. * @cb_table: A table for op->callback - * - * Returns 0 on success or a negative error code. */ -int ibnl_add_client(int index, int nops, - const struct ibnl_client_cbs cb_table[]); +void rdma_nl_register(unsigned int index, + const struct rdma_nl_cbs cb_table[]); /** * Remove a client from IB netlink. * @index: Index of the removed IB client. - * - * Returns 0 on success or a negative error code. */ -int ibnl_remove_client(int index); +void rdma_nl_unregister(unsigned int index); /** * Put a new message in a supplied skb. @@ -56,22 +70,32 @@ int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh, /** * Send the supplied skb to a specific userspace PID. * @skb: The netlink skb - * @nlh: Header of the netlink message to send * @pid: Userspace netlink process ID * Returns 0 on success or a negative error code. */ -int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh, - __u32 pid); +int rdma_nl_unicast(struct sk_buff *skb, u32 pid); + +/** + * Send, with wait/1 retry, the supplied skb to a specific userspace PID. + * @skb: The netlink skb + * @pid: Userspace netlink process ID + * Returns 0 on success or a negative error code. + */ +int rdma_nl_unicast_wait(struct sk_buff *skb, __u32 pid); /** * Send the supplied skb to a netlink group. * @skb: The netlink skb - * @nlh: Header of the netlink message to send * @group: Netlink group ID * @flags: allocation flags * Returns 0 on success or a negative error code. */ -int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh, - unsigned int group, gfp_t flags); +int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags); +/** + * Check if there are any listeners to the netlink group + * @group: the netlink group ID + * Returns 0 on success or a negative for no listeners. + */ +int rdma_nl_chk_listeners(unsigned int group); #endif /* _RDMA_NETLINK_H */ diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index 55af69271053..1ba84a78f1c5 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h @@ -57,11 +57,21 @@ #include <linux/list.h> #include <linux/hash.h> #include <rdma/ib_verbs.h> +#include <rdma/ib_mad.h> #include <rdma/rdmavt_mr.h> #include <rdma/rdmavt_qp.h> #define RVT_MAX_PKEY_VALUES 16 +#define RVT_MAX_TRAP_LEN 100 /* Limit pending trap list */ +#define RVT_MAX_TRAP_LISTS 5 /*((IB_NOTICE_TYPE_INFO & 0x0F) + 1)*/ +#define RVT_TRAP_TIMEOUT 4096 /* 4.096 usec */ + +struct trap_list { + u32 list_len; + struct list_head list; +}; + struct rvt_ibport { struct rvt_qp __rcu *qp[2]; struct ib_mad_agent *send_agent; /* agent for SMI (traps) */ @@ -75,12 +85,13 @@ struct rvt_ibport { __be64 mkey; u64 tid; u32 port_cap_flags; + u16 port_cap3_flags; u32 pma_sample_start; u32 pma_sample_interval; __be16 pma_counter_select[5]; u16 pma_tag; u16 mkey_lease_period; - u16 sm_lid; + u32 sm_lid; u8 sm_sl; u8 mkeyprot; u8 subnet_timeout; @@ -127,6 +138,13 @@ struct rvt_ibport { u16 *pkey_table; struct rvt_ah *sm_ah; + + /* + * Keep a list of traps that have not been repressed. They will be + * resent based on trap_timer. + */ + struct trap_list trap_lists[RVT_MAX_TRAP_LISTS]; + struct timer_list trap_timer; }; #define RVT_CQN_MAX 16 /* maximum length of cq name */ @@ -514,7 +532,8 @@ int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey); int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge, u32 len, u64 vaddr, u32 rkey, int acc); int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd, - struct rvt_sge *isge, struct ib_sge *sge, int acc); + struct rvt_sge *isge, struct rvt_sge *last_sge, + struct ib_sge *sge, int acc); struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid, u16 lid); diff --git a/include/rdma/rdmavt_mr.h b/include/rdma/rdmavt_mr.h index f418bd5571a5..72a3856d4057 100644 --- a/include/rdma/rdmavt_mr.h +++ b/include/rdma/rdmavt_mr.h @@ -191,4 +191,7 @@ static inline void rvt_skip_sge(struct rvt_sge_state *ss, u32 length, } } +bool rvt_ss_has_lkey(struct rvt_sge_state *ss, u32 lkey); +bool rvt_mr_has_lkey(struct rvt_mregion *mr, u32 lkey); + #endif /* DEF_RDMAVT_INCMRH */ diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index d664d2e76280..0eed3d8752fa 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -277,7 +277,6 @@ struct rvt_qp { unsigned long timeout_jiffies; /* computed from timeout */ - enum ib_mtu path_mtu; int srate_mbps; /* s_srate (below) converted to Mbit/s */ pid_t pid; /* pid for user mode QPs */ u32 remote_qpn; @@ -396,7 +395,7 @@ struct rvt_srq { #define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) #define RVT_BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE) #define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE - 1) -#define RVT_QPN_MASK 0xFFFFFF +#define RVT_QPN_MASK IB_QPN_MASK /* * QPN-map pages start out as NULL, they get allocated upon @@ -674,4 +673,34 @@ void rvt_del_timers_sync(struct rvt_qp *qp); void rvt_stop_rc_timers(struct rvt_qp *qp); void rvt_add_retry_timer(struct rvt_qp *qp); +/** + * struct rvt_qp_iter - the iterator for QPs + * @qp - the current QP + * + * This structure defines the current iterator + * state for sequenced access to all QPs relative + * to an rvt_dev_info. + */ +struct rvt_qp_iter { + struct rvt_qp *qp; + /* private: backpointer */ + struct rvt_dev_info *rdi; + /* private: callback routine */ + void (*cb)(struct rvt_qp *qp, u64 v); + /* private: for arg to callback routine */ + u64 v; + /* private: number of SMI,GSI QPs for device */ + int specials; + /* private: current iterator index */ + int n; +}; + +struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi, + u64 v, + void (*cb)(struct rvt_qp *qp, u64 v)); +int rvt_qp_iter_next(struct rvt_qp_iter *iter); +void rvt_qp_iter(struct rvt_dev_info *rdi, + u64 v, + void (*cb)(struct rvt_qp *qp, u64 v)); +void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey); #endif /* DEF_RDMAVT_INCQP_H */ diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h new file mode 100644 index 000000000000..6da44079aa58 --- /dev/null +++ b/include/rdma/uverbs_ioctl.h @@ -0,0 +1,438 @@ +/* + * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _UVERBS_IOCTL_ +#define _UVERBS_IOCTL_ + +#include <rdma/uverbs_types.h> +#include <linux/uaccess.h> +#include <rdma/rdma_user_ioctl.h> +#include <rdma/ib_user_ioctl_verbs.h> + +/* + * ======================================= + * Verbs action specifications + * ======================================= + */ + +enum uverbs_attr_type { + UVERBS_ATTR_TYPE_NA, + UVERBS_ATTR_TYPE_PTR_IN, + UVERBS_ATTR_TYPE_PTR_OUT, + UVERBS_ATTR_TYPE_IDR, + UVERBS_ATTR_TYPE_FD, +}; + +enum uverbs_obj_access { + UVERBS_ACCESS_READ, + UVERBS_ACCESS_WRITE, + UVERBS_ACCESS_NEW, + UVERBS_ACCESS_DESTROY +}; + +enum { + UVERBS_ATTR_SPEC_F_MANDATORY = 1U << 0, + /* Support extending attributes by length */ + UVERBS_ATTR_SPEC_F_MIN_SZ = 1U << 1, +}; + +struct uverbs_attr_spec { + enum uverbs_attr_type type; + union { + u16 len; + struct { + /* + * higher bits mean the namespace and lower bits mean + * the type id within the namespace. + */ + u16 obj_type; + u8 access; + } obj; + }; + /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */ + u8 flags; +}; + +struct uverbs_attr_spec_hash { + size_t num_attrs; + unsigned long *mandatory_attrs_bitmask; + struct uverbs_attr_spec attrs[0]; +}; + +struct uverbs_attr_bundle; +struct ib_uverbs_file; + +enum { + /* + * Action marked with this flag creates a context (or root for all + * objects). + */ + UVERBS_ACTION_FLAG_CREATE_ROOT = 1U << 0, +}; + +struct uverbs_method_spec { + /* Combination of bits from enum UVERBS_ACTION_FLAG_XXXX */ + u32 flags; + size_t num_buckets; + size_t num_child_attrs; + int (*handler)(struct ib_device *ib_dev, struct ib_uverbs_file *ufile, + struct uverbs_attr_bundle *ctx); + struct uverbs_attr_spec_hash *attr_buckets[0]; +}; + +struct uverbs_method_spec_hash { + size_t num_methods; + struct uverbs_method_spec *methods[0]; +}; + +struct uverbs_object_spec { + const struct uverbs_obj_type *type_attrs; + size_t num_buckets; + struct uverbs_method_spec_hash *method_buckets[0]; +}; + +struct uverbs_object_spec_hash { + size_t num_objects; + struct uverbs_object_spec *objects[0]; +}; + +struct uverbs_root_spec { + size_t num_buckets; + struct uverbs_object_spec_hash *object_buckets[0]; +}; + +/* + * ======================================= + * Verbs definitions + * ======================================= + */ + +struct uverbs_attr_def { + u16 id; + struct uverbs_attr_spec attr; +}; + +struct uverbs_method_def { + u16 id; + /* Combination of bits from enum UVERBS_ACTION_FLAG_XXXX */ + u32 flags; + size_t num_attrs; + const struct uverbs_attr_def * const (*attrs)[]; + int (*handler)(struct ib_device *ib_dev, struct ib_uverbs_file *ufile, + struct uverbs_attr_bundle *ctx); +}; + +struct uverbs_object_def { + u16 id; + const struct uverbs_obj_type *type_attrs; + size_t num_methods; + const struct uverbs_method_def * const (*methods)[]; +}; + +struct uverbs_object_tree_def { + size_t num_objects; + const struct uverbs_object_def * const (*objects)[]; +}; + +#define UA_FLAGS(_flags) .flags = _flags +#define __UVERBS_ATTR0(_id, _len, _type, ...) \ + ((const struct uverbs_attr_def) \ + {.id = _id, .attr = {.type = _type, {.len = _len}, .flags = 0, } }) +#define __UVERBS_ATTR1(_id, _len, _type, _flags) \ + ((const struct uverbs_attr_def) \ + {.id = _id, .attr = {.type = _type, {.len = _len}, _flags, } }) +#define __UVERBS_ATTR(_id, _len, _type, _flags, _n, ...) \ + __UVERBS_ATTR##_n(_id, _len, _type, _flags) +/* + * In new compiler, UVERBS_ATTR could be simplified by declaring it as + * [_id] = {.type = _type, .len = _len, ##__VA_ARGS__} + * But since we support older compilers too, we need the more complex code. + */ +#define UVERBS_ATTR(_id, _len, _type, ...) \ + __UVERBS_ATTR(_id, _len, _type, ##__VA_ARGS__, 1, 0) +#define UVERBS_ATTR_PTR_IN_SZ(_id, _len, ...) \ + UVERBS_ATTR(_id, _len, UVERBS_ATTR_TYPE_PTR_IN, ##__VA_ARGS__) +/* If sizeof(_type) <= sizeof(u64), this will be inlined rather than a pointer */ +#define UVERBS_ATTR_PTR_IN(_id, _type, ...) \ + UVERBS_ATTR_PTR_IN_SZ(_id, sizeof(_type), ##__VA_ARGS__) +#define UVERBS_ATTR_PTR_OUT_SZ(_id, _len, ...) \ + UVERBS_ATTR(_id, _len, UVERBS_ATTR_TYPE_PTR_OUT, ##__VA_ARGS__) +#define UVERBS_ATTR_PTR_OUT(_id, _type, ...) \ + UVERBS_ATTR_PTR_OUT_SZ(_id, sizeof(_type), ##__VA_ARGS__) + +/* + * In new compiler, UVERBS_ATTR_IDR (and FD) could be simplified by declaring + * it as + * {.id = _id, \ + * .attr {.type = __obj_class, \ + * .obj = {.obj_type = _idr_type, \ + * .access = _access \ + * }, ##__VA_ARGS__ } } + * But since we support older compilers too, we need the more complex code. + */ +#define ___UVERBS_ATTR_OBJ0(_id, _obj_class, _obj_type, _access, ...)\ + ((const struct uverbs_attr_def) \ + {.id = _id, \ + .attr = {.type = _obj_class, \ + {.obj = {.obj_type = _obj_type, .access = _access } },\ + .flags = 0} }) +#define ___UVERBS_ATTR_OBJ1(_id, _obj_class, _obj_type, _access, _flags)\ + ((const struct uverbs_attr_def) \ + {.id = _id, \ + .attr = {.type = _obj_class, \ + {.obj = {.obj_type = _obj_type, .access = _access} }, \ + _flags} }) +#define ___UVERBS_ATTR_OBJ(_id, _obj_class, _obj_type, _access, _flags, \ + _n, ...) \ + ___UVERBS_ATTR_OBJ##_n(_id, _obj_class, _obj_type, _access, _flags) +#define __UVERBS_ATTR_OBJ(_id, _obj_class, _obj_type, _access, ...) \ + ___UVERBS_ATTR_OBJ(_id, _obj_class, _obj_type, _access, \ + ##__VA_ARGS__, 1, 0) +#define UVERBS_ATTR_IDR(_id, _idr_type, _access, ...) \ + __UVERBS_ATTR_OBJ(_id, UVERBS_ATTR_TYPE_IDR, _idr_type, _access,\ + ##__VA_ARGS__) +#define UVERBS_ATTR_FD(_id, _fd_type, _access, ...) \ + __UVERBS_ATTR_OBJ(_id, UVERBS_ATTR_TYPE_FD, _fd_type, \ + (_access) + BUILD_BUG_ON_ZERO( \ + (_access) != UVERBS_ACCESS_NEW && \ + (_access) != UVERBS_ACCESS_READ), \ + ##__VA_ARGS__) +#define DECLARE_UVERBS_ATTR_SPEC(_name, ...) \ + const struct uverbs_attr_def _name = __VA_ARGS__ + +#define _UVERBS_METHOD_ATTRS_SZ(...) \ + (sizeof((const struct uverbs_attr_def * const []){__VA_ARGS__}) /\ + sizeof(const struct uverbs_attr_def *)) +#define _UVERBS_METHOD(_id, _handler, _flags, ...) \ + ((const struct uverbs_method_def) { \ + .id = _id, \ + .flags = _flags, \ + .handler = _handler, \ + .num_attrs = _UVERBS_METHOD_ATTRS_SZ(__VA_ARGS__), \ + .attrs = &(const struct uverbs_attr_def * const []){__VA_ARGS__} }) +#define DECLARE_UVERBS_METHOD(_name, _id, _handler, ...) \ + const struct uverbs_method_def _name = \ + _UVERBS_METHOD(_id, _handler, 0, ##__VA_ARGS__) +#define DECLARE_UVERBS_CTX_METHOD(_name, _id, _handler, _flags, ...) \ + const struct uverbs_method_def _name = \ + _UVERBS_METHOD(_id, _handler, \ + UVERBS_ACTION_FLAG_CREATE_ROOT, \ + ##__VA_ARGS__) +#define _UVERBS_OBJECT_METHODS_SZ(...) \ + (sizeof((const struct uverbs_method_def * const []){__VA_ARGS__}) / \ + sizeof(const struct uverbs_method_def *)) +#define _UVERBS_OBJECT(_id, _type_attrs, ...) \ + ((const struct uverbs_object_def) { \ + .id = _id, \ + .type_attrs = _type_attrs, \ + .num_methods = _UVERBS_OBJECT_METHODS_SZ(__VA_ARGS__), \ + .methods = &(const struct uverbs_method_def * const []){__VA_ARGS__} }) +#define DECLARE_UVERBS_OBJECT(_name, _id, _type_attrs, ...) \ + const struct uverbs_object_def _name = \ + _UVERBS_OBJECT(_id, _type_attrs, ##__VA_ARGS__) +#define _UVERBS_TREE_OBJECTS_SZ(...) \ + (sizeof((const struct uverbs_object_def * const []){__VA_ARGS__}) / \ + sizeof(const struct uverbs_object_def *)) +#define _UVERBS_OBJECT_TREE(...) \ + ((const struct uverbs_object_tree_def) { \ + .num_objects = _UVERBS_TREE_OBJECTS_SZ(__VA_ARGS__), \ + .objects = &(const struct uverbs_object_def * const []){__VA_ARGS__} }) +#define DECLARE_UVERBS_OBJECT_TREE(_name, ...) \ + const struct uverbs_object_tree_def _name = \ + _UVERBS_OBJECT_TREE(__VA_ARGS__) + +/* ================================================= + * Parsing infrastructure + * ================================================= + */ + +struct uverbs_ptr_attr { + union { + u64 data; + void __user *ptr; + }; + u16 len; + /* Combination of bits from enum UVERBS_ATTR_F_XXXX */ + u16 flags; +}; + +struct uverbs_obj_attr { + /* pointer to the kernel descriptor -> type, access, etc */ + const struct uverbs_obj_type *type; + struct ib_uobject *uobject; + /* fd or id in idr of this object */ + int id; +}; + +struct uverbs_attr { + /* + * pointer to the user-space given attribute, in order to write the + * new uobject's id or update flags. + */ + struct ib_uverbs_attr __user *uattr; + union { + struct uverbs_ptr_attr ptr_attr; + struct uverbs_obj_attr obj_attr; + }; +}; + +struct uverbs_attr_bundle_hash { + /* if bit i is set, it means attrs[i] contains valid information */ + unsigned long *valid_bitmap; + size_t num_attrs; + /* + * arrays of attributes, each element corresponds to the specification + * of the attribute in the same index. + */ + struct uverbs_attr *attrs; +}; + +struct uverbs_attr_bundle { + size_t num_buckets; + struct uverbs_attr_bundle_hash hash[]; +}; + +static inline bool uverbs_attr_is_valid_in_hash(const struct uverbs_attr_bundle_hash *attrs_hash, + unsigned int idx) +{ + return test_bit(idx, attrs_hash->valid_bitmap); +} + +static inline bool uverbs_attr_is_valid(const struct uverbs_attr_bundle *attrs_bundle, + unsigned int idx) +{ + u16 idx_bucket = idx >> UVERBS_ID_NS_SHIFT; + + if (attrs_bundle->num_buckets <= idx_bucket) + return false; + + return uverbs_attr_is_valid_in_hash(&attrs_bundle->hash[idx_bucket], + idx & ~UVERBS_ID_NS_MASK); +} + +static inline const struct uverbs_attr *uverbs_attr_get(const struct uverbs_attr_bundle *attrs_bundle, + u16 idx) +{ + u16 idx_bucket = idx >> UVERBS_ID_NS_SHIFT; + + if (!uverbs_attr_is_valid(attrs_bundle, idx)) + return ERR_PTR(-ENOENT); + + return &attrs_bundle->hash[idx_bucket].attrs[idx & ~UVERBS_ID_NS_MASK]; +} + +static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle, + size_t idx, const void *from) +{ + const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx); + u16 flags; + + if (IS_ERR(attr)) + return PTR_ERR(attr); + + flags = attr->ptr_attr.flags | UVERBS_ATTR_F_VALID_OUTPUT; + return (!copy_to_user(attr->ptr_attr.ptr, from, attr->ptr_attr.len) && + !put_user(flags, &attr->uattr->flags)) ? 0 : -EFAULT; +} + +static inline int _uverbs_copy_from(void *to, size_t to_size, + const struct uverbs_attr_bundle *attrs_bundle, + size_t idx) +{ + const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx); + + if (IS_ERR(attr)) + return PTR_ERR(attr); + + if (to_size <= sizeof(((struct ib_uverbs_attr *)0)->data)) + memcpy(to, &attr->ptr_attr.data, attr->ptr_attr.len); + else if (copy_from_user(to, attr->ptr_attr.ptr, attr->ptr_attr.len)) + return -EFAULT; + + return 0; +} + +#define uverbs_copy_from(to, attrs_bundle, idx) \ + _uverbs_copy_from(to, sizeof(*(to)), attrs_bundle, idx) + +/* ================================================= + * Definitions -> Specs infrastructure + * ================================================= + */ + +/* + * uverbs_alloc_spec_tree - Merges different common and driver specific feature + * into one parsing tree that every uverbs command will be parsed upon. + * + * @num_trees: Number of trees in the array @trees. + * @trees: Array of pointers to tree root definitions to merge. Each such tree + * possibly contains objects, methods and attributes definitions. + * + * Returns: + * uverbs_root_spec *: The root of the merged parsing tree. + * On error, we return an error code. Error is checked via IS_ERR. + * + * The following merges could take place: + * a. Two trees representing the same method with different handler + * -> We take the handler of the tree that its handler != NULL + * and its index in the trees array is greater. The incentive for that + * is that developers are expected to first merge common trees and then + * merge trees that gives specialized the behaviour. + * b. Two trees representing the same object with different + * type_attrs (struct uverbs_obj_type): + * -> We take the type_attrs of the tree that its type_attr != NULL + * and its index in the trees array is greater. This could be used + * in order to override the free function, allocation size, etc. + * c. Two trees representing the same method attribute (same id but possibly + * different attributes): + * -> ERROR (-ENOENT), we believe that's not the programmer's intent. + * + * An object without any methods is considered invalid and will abort the + * function with -ENOENT error. + */ +#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) +struct uverbs_root_spec *uverbs_alloc_spec_tree(unsigned int num_trees, + const struct uverbs_object_tree_def **trees); +void uverbs_free_spec_tree(struct uverbs_root_spec *root); +#else +static inline struct uverbs_root_spec *uverbs_alloc_spec_tree(unsigned int num_trees, + const struct uverbs_object_tree_def **trees) +{ + return NULL; +} + +static inline void uverbs_free_spec_tree(struct uverbs_root_spec *root) +{ +} +#endif + +#endif diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h index 7771ce966952..5f8e20bbd67c 100644 --- a/include/rdma/uverbs_std_types.h +++ b/include/rdma/uverbs_std_types.h @@ -34,19 +34,35 @@ #define _UVERBS_STD_TYPES__ #include <rdma/uverbs_types.h> - -extern const struct uverbs_obj_fd_type uverbs_type_attrs_comp_channel; -extern const struct uverbs_obj_idr_type uverbs_type_attrs_cq; -extern const struct uverbs_obj_idr_type uverbs_type_attrs_qp; -extern const struct uverbs_obj_idr_type uverbs_type_attrs_rwq_ind_table; -extern const struct uverbs_obj_idr_type uverbs_type_attrs_wq; -extern const struct uverbs_obj_idr_type uverbs_type_attrs_srq; -extern const struct uverbs_obj_idr_type uverbs_type_attrs_ah; -extern const struct uverbs_obj_idr_type uverbs_type_attrs_flow; -extern const struct uverbs_obj_idr_type uverbs_type_attrs_mr; -extern const struct uverbs_obj_idr_type uverbs_type_attrs_mw; -extern const struct uverbs_obj_idr_type uverbs_type_attrs_pd; -extern const struct uverbs_obj_idr_type uverbs_type_attrs_xrcd; +#include <rdma/uverbs_ioctl.h> +#include <rdma/ib_user_ioctl_verbs.h> + +#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) +extern const struct uverbs_object_def uverbs_object_comp_channel; +extern const struct uverbs_object_def uverbs_object_cq; +extern const struct uverbs_object_def uverbs_object_qp; +extern const struct uverbs_object_def uverbs_object_rwq_ind_table; +extern const struct uverbs_object_def uverbs_object_wq; +extern const struct uverbs_object_def uverbs_object_srq; +extern const struct uverbs_object_def uverbs_object_ah; +extern const struct uverbs_object_def uverbs_object_flow; +extern const struct uverbs_object_def uverbs_object_mr; +extern const struct uverbs_object_def uverbs_object_mw; +extern const struct uverbs_object_def uverbs_object_pd; +extern const struct uverbs_object_def uverbs_object_xrcd; +extern const struct uverbs_object_def uverbs_object_device; + +extern const struct uverbs_object_tree_def uverbs_default_objects; +static inline const struct uverbs_object_tree_def *uverbs_default_get_objects(void) +{ + return &uverbs_default_objects; +} +#else +static inline const struct uverbs_object_tree_def *uverbs_default_get_objects(void) +{ + return NULL; +} +#endif static inline struct ib_uobject *__uobj_get(const struct uverbs_obj_type *type, bool write, @@ -56,22 +72,22 @@ static inline struct ib_uobject *__uobj_get(const struct uverbs_obj_type *type, return rdma_lookup_get_uobject(type, ucontext, id, write); } -#define uobj_get_type(_type) uverbs_type_attrs_##_type.type +#define uobj_get_type(_object) uverbs_object_##_object.type_attrs #define uobj_get_read(_type, _id, _ucontext) \ - __uobj_get(&(_type), false, _ucontext, _id) + __uobj_get(_type, false, _ucontext, _id) -#define uobj_get_obj_read(_type, _id, _ucontext) \ +#define uobj_get_obj_read(_object, _id, _ucontext) \ ({ \ - struct ib_uobject *uobj = \ - __uobj_get(&uobj_get_type(_type), \ + struct ib_uobject *__uobj = \ + __uobj_get(uverbs_object_##_object.type_attrs, \ false, _ucontext, _id); \ \ - (struct ib_##_type *)(IS_ERR(uobj) ? NULL : uobj->object); \ + (struct ib_##_object *)(IS_ERR(__uobj) ? NULL : __uobj->object);\ }) #define uobj_get_write(_type, _id, _ucontext) \ - __uobj_get(&(_type), true, _ucontext, _id) + __uobj_get(_type, true, _ucontext, _id) static inline void uobj_put_read(struct ib_uobject *uobj) { @@ -108,7 +124,7 @@ static inline struct ib_uobject *__uobj_alloc(const struct uverbs_obj_type *type } #define uobj_alloc(_type, ucontext) \ - __uobj_alloc(&(_type), ucontext) + __uobj_alloc(_type, ucontext) #endif diff --git a/include/rdma/uverbs_types.h b/include/rdma/uverbs_types.h index 351ea185df44..cc04ec65588d 100644 --- a/include/rdma/uverbs_types.h +++ b/include/rdma/uverbs_types.h @@ -129,6 +129,7 @@ struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_obj_type *type, void rdma_alloc_abort_uobject(struct ib_uobject *uobj); int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj); int rdma_alloc_commit_uobject(struct ib_uobject *uobj); +int rdma_explicit_destroy(struct ib_uobject *uobject); struct uverbs_obj_fd_type { /* @@ -151,22 +152,30 @@ extern const struct uverbs_obj_type_class uverbs_fd_class; #define UVERBS_BUILD_BUG_ON(cond) (sizeof(char[1 - 2 * !!(cond)]) - \ sizeof(char)) -#define UVERBS_TYPE_ALLOC_FD(_size, _order) \ - { \ - .destroy_order = _order, \ - .type_class = &uverbs_fd_class, \ - .obj_size = (_size) + \ - UVERBS_BUILD_BUG_ON((_size) < \ - sizeof(struct ib_uobject_file)),\ - } -#define UVERBS_TYPE_ALLOC_IDR_SZ(_size, _order) \ - { \ +#define UVERBS_TYPE_ALLOC_FD(_order, _obj_size, _context_closed, _fops, _name, _flags)\ + ((&((const struct uverbs_obj_fd_type) \ + {.type = { \ + .destroy_order = _order, \ + .type_class = &uverbs_fd_class, \ + .obj_size = (_obj_size) + \ + UVERBS_BUILD_BUG_ON((_obj_size) < sizeof(struct ib_uobject_file)), \ + }, \ + .context_closed = _context_closed, \ + .fops = _fops, \ + .name = _name, \ + .flags = _flags}))->type) +#define UVERBS_TYPE_ALLOC_IDR_SZ(_size, _order, _destroy_object) \ + ((&((const struct uverbs_obj_idr_type) \ + {.type = { \ .destroy_order = _order, \ .type_class = &uverbs_idr_class, \ .obj_size = (_size) + \ - UVERBS_BUILD_BUG_ON((_size) < \ - sizeof(struct ib_uobject)), \ - } -#define UVERBS_TYPE_ALLOC_IDR(_order) \ - UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uobject), _order) + UVERBS_BUILD_BUG_ON((_size) < \ + sizeof(struct ib_uobject)) \ + }, \ + .destroy_object = _destroy_object,}))->type) +#define UVERBS_TYPE_ALLOC_IDR(_order, _destroy_object) \ + UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uobject), _order, \ + _destroy_object) + #endif diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index a1266d318c85..6af198d8120b 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -57,6 +57,7 @@ struct scsi_pointer { /* for scmd->flags */ #define SCMD_TAGGED (1 << 0) #define SCMD_UNCHECKED_ISA_DMA (1 << 1) +#define SCMD_ZONE_WRITE_LOCK (1 << 2) struct scsi_cmnd { struct scsi_request req; diff --git a/include/sound/omap-hdmi-audio.h b/include/sound/omap-hdmi-audio.h index 1df2ff61a4dd..0e495ed8872e 100644 --- a/include/sound/omap-hdmi-audio.h +++ b/include/sound/omap-hdmi-audio.h @@ -39,7 +39,7 @@ struct omap_hdmi_audio_ops { /* HDMI audio initalization data */ struct omap_hdmi_audio_pdata { struct device *dev; - enum omapdss_version dss_version; + unsigned int version; phys_addr_t audio_dma_addr; const struct omap_hdmi_audio_ops *ops; diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index 0ca1fb08805b..fb87d32f5e51 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h @@ -786,6 +786,7 @@ struct iscsi_np { int np_sock_type; enum np_thread_state_table np_thread_state; bool enabled; + atomic_t np_reset_count; enum iscsi_timer_flags_table np_login_timer_flags; u32 np_exports; enum np_flags_table np_flags; diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 91dc089d65b7..e91ae1f2290d 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -703,6 +703,7 @@ TRACE_EVENT(rcu_batch_end, * at the beginning and end of the read, respectively. Note that the * callback address can be NULL. */ +#define RCUTORTURENAME_LEN 8 TRACE_EVENT(rcu_torture_read, TP_PROTO(const char *rcutorturename, struct rcu_head *rhp, @@ -711,7 +712,7 @@ TRACE_EVENT(rcu_torture_read, TP_ARGS(rcutorturename, rhp, secs, c_old, c), TP_STRUCT__entry( - __field(const char *, rcutorturename) + __field(char, rcutorturename[RCUTORTURENAME_LEN]) __field(struct rcu_head *, rhp) __field(unsigned long, secs) __field(unsigned long, c_old) @@ -719,7 +720,9 @@ TRACE_EVENT(rcu_torture_read, ), TP_fast_assign( - __entry->rcutorturename = rcutorturename; + strncpy(__entry->rcutorturename, rcutorturename, + RCUTORTURENAME_LEN); + __entry->rcutorturename[RCUTORTURENAME_LEN - 1] = 0; __entry->rhp = rhp; __entry->secs = secs; __entry->c_old = c_old; diff --git a/include/uapi/drm/armada_drm.h b/include/uapi/drm/armada_drm.h index 72e326f9c7de..0cb932416cfe 100644 --- a/include/uapi/drm/armada_drm.h +++ b/include/uapi/drm/armada_drm.h @@ -23,27 +23,27 @@ extern "C" { DRM_##dir(DRM_COMMAND_BASE + DRM_ARMADA_##name, struct drm_armada_##str) struct drm_armada_gem_create { - uint32_t handle; - uint32_t size; + __u32 handle; + __u32 size; }; #define DRM_IOCTL_ARMADA_GEM_CREATE \ ARMADA_IOCTL(IOWR, GEM_CREATE, gem_create) struct drm_armada_gem_mmap { - uint32_t handle; - uint32_t pad; - uint64_t offset; - uint64_t size; - uint64_t addr; + __u32 handle; + __u32 pad; + __u64 offset; + __u64 size; + __u64 addr; }; #define DRM_IOCTL_ARMADA_GEM_MMAP \ ARMADA_IOCTL(IOWR, GEM_MMAP, gem_mmap) struct drm_armada_gem_pwrite { - uint64_t ptr; - uint32_t handle; - uint32_t offset; - uint32_t size; + __u64 ptr; + __u32 handle; + __u32 offset; + __u32 size; }; #define DRM_IOCTL_ARMADA_GEM_PWRITE \ ARMADA_IOCTL(IOW, GEM_PWRITE, gem_pwrite) diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 101593ab10ac..97677cd6964d 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -700,6 +700,7 @@ struct drm_prime_handle { struct drm_syncobj_create { __u32 handle; +#define DRM_SYNCOBJ_CREATE_SIGNALED (1 << 0) __u32 flags; }; @@ -718,6 +719,24 @@ struct drm_syncobj_handle { __u32 pad; }; +#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0) +#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1) +struct drm_syncobj_wait { + __u64 handles; + /* absolute timeout */ + __s64 timeout_nsec; + __u32 count_handles; + __u32 flags; + __u32 first_signaled; /* only valid when not waiting all */ + __u32 pad; +}; + +struct drm_syncobj_array { + __u64 handles; + __u32 count_handles; + __u32 pad; +}; + #if defined(__cplusplus) } #endif @@ -840,6 +859,9 @@ extern "C" { #define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy) #define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle) #define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle) +#define DRM_IOCTL_SYNCOBJ_WAIT DRM_IOWR(0xC3, struct drm_syncobj_wait) +#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array) +#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array) /** * Device specific ioctls should only be in their respective headers diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 7586c46f68bf..3ad838d3f93f 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -185,6 +185,8 @@ extern "C" { #define DRM_FORMAT_MOD_VENDOR_BROADCOM 0x07 /* add more to the end as needed */ +#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1) + #define fourcc_mod_code(vendor, val) \ ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL)) @@ -197,6 +199,15 @@ extern "C" { */ /* + * Invalid Modifier + * + * This modifier can be used as a sentinel to terminate the format modifiers + * list, or to initialize a variable with an invalid modifier. It might also be + * used to report an error back to userspace for certain APIs. + */ +#define DRM_FORMAT_MOD_INVALID fourcc_mod_code(NONE, DRM_FORMAT_RESERVED) + +/* * Linear Layout * * Just plain linear layout. Note that this is different from no specifying any @@ -253,6 +264,26 @@ extern "C" { #define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3) /* + * Intel color control surface (CCS) for render compression + * + * The framebuffer format must be one of the 8:8:8:8 RGB formats. + * The main surface will be plane index 0 and must be Y/Yf-tiled, + * the CCS will be plane index 1. + * + * Each CCS tile matches a 1024x512 pixel area of the main surface. + * To match certain aspects of the 3D hardware the CCS is + * considered to be made up of normal 128Bx32 Y tiles, Thus + * the CCS pitch must be specified in multiples of 128 bytes. + * + * In reality the CCS tile appears to be a 64Bx64 Y tile, composed + * of QWORD (8 bytes) chunks instead of OWORD (16 bytes) chunks. + * But that fact is not relevant unless the memory is accessed + * directly. + */ +#define I915_FORMAT_MOD_Y_TILED_CCS fourcc_mod_code(INTEL, 4) +#define I915_FORMAT_MOD_Yf_TILED_CCS fourcc_mod_code(INTEL, 5) + +/* * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks * * Macroblocks are laid in a Z-shape, and each pixel data is following the diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 403339f98a92..54fc38c3c3f1 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -712,6 +712,56 @@ struct drm_mode_atomic { __u64 user_data; }; +struct drm_format_modifier_blob { +#define FORMAT_BLOB_CURRENT 1 + /* Version of this blob format */ + __u32 version; + + /* Flags */ + __u32 flags; + + /* Number of fourcc formats supported */ + __u32 count_formats; + + /* Where in this blob the formats exist (in bytes) */ + __u32 formats_offset; + + /* Number of drm_format_modifiers */ + __u32 count_modifiers; + + /* Where in this blob the modifiers exist (in bytes) */ + __u32 modifiers_offset; + + /* __u32 formats[] */ + /* struct drm_format_modifier modifiers[] */ +}; + +struct drm_format_modifier { + /* Bitmask of formats in get_plane format list this info applies to. The + * offset allows a sliding window of which 64 formats (bits). + * + * Some examples: + * In today's world with < 65 formats, and formats 0, and 2 are + * supported + * 0x0000000000000005 + * ^-offset = 0, formats = 5 + * + * If the number formats grew to 128, and formats 98-102 are + * supported with the modifier: + * + * 0x0000003c00000000 0000000000000000 + * ^ + * |__offset = 64, formats = 0x3c00000000 + * + */ + __u64 formats; + __u32 offset; + __u32 pad; + + /* The modifier that applies to the >get_plane format list bitmask. */ + __u64 modifier; +}; + /** * Create a new 'blob' data property, copying length bytes from data pointer, * and returning new blob ID. diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 7ccbd6a2bbe0..6598fb76d2c2 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -260,6 +260,8 @@ typedef struct _drm_i915_sarea { #define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 #define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 #define DRM_I915_PERF_OPEN 0x36 +#define DRM_I915_PERF_ADD_CONFIG 0x37 +#define DRM_I915_PERF_REMOVE_CONFIG 0x38 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) @@ -315,6 +317,8 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) #define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param) +#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config) +#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64) /* Allow drivers to submit batchbuffers directly to hardware, relying * on the security mechanisms provided by hardware. @@ -431,6 +435,11 @@ typedef struct drm_i915_irq_wait { */ #define I915_PARAM_HAS_EXEC_BATCH_FIRST 48 +/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of + * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY. + */ +#define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49 + typedef struct drm_i915_getparam { __s32 param; /* @@ -812,6 +821,17 @@ struct drm_i915_gem_exec_object2 { __u64 rsvd2; }; +struct drm_i915_gem_exec_fence { + /** + * User's handle for a drm_syncobj to wait on or signal. + */ + __u32 handle; + +#define I915_EXEC_FENCE_WAIT (1<<0) +#define I915_EXEC_FENCE_SIGNAL (1<<1) + __u32 flags; +}; + struct drm_i915_gem_execbuffer2 { /** * List of gem_exec_object2 structs @@ -826,7 +846,11 @@ struct drm_i915_gem_execbuffer2 { __u32 DR1; __u32 DR4; __u32 num_cliprects; - /** This is a struct drm_clip_rect *cliprects */ + /** + * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY + * is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a + * struct drm_i915_gem_exec_fence *fences. + */ __u64 cliprects_ptr; #define I915_EXEC_RING_MASK (7<<0) #define I915_EXEC_DEFAULT (0<<0) @@ -927,7 +951,14 @@ struct drm_i915_gem_execbuffer2 { * element). */ #define I915_EXEC_BATCH_FIRST (1<<18) -#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_BATCH_FIRST<<1)) + +/* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr + * define an array of i915_gem_exec_fence structures which specify a set of + * dma fences to wait upon or signal. + */ +#define I915_EXEC_FENCE_ARRAY (1<<19) + +#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1)) #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) #define i915_execbuffer2_set_context_id(eb2, context) \ @@ -1467,6 +1498,22 @@ enum drm_i915_perf_record_type { DRM_I915_PERF_RECORD_MAX /* non-ABI */ }; +/** + * Structure to upload perf dynamic configuration into the kernel. + */ +struct drm_i915_perf_oa_config { + /** String formatted like "%08x-%04x-%04x-%04x-%012x" */ + char uuid[36]; + + __u32 n_mux_regs; + __u32 n_boolean_regs; + __u32 n_flex_regs; + + __u64 __user mux_regs_ptr; + __u64 __user boolean_regs_ptr; + __u64 __user flex_regs_ptr; +}; + #if defined(__cplusplus) } #endif diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 26c54f6d595d..ad4eb2863e70 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -171,7 +171,7 @@ struct drm_msm_gem_submit_cmd { __u32 size; /* in, cmdstream size */ __u32 pad; __u32 nr_relocs; /* in, number of submit_reloc's */ - __u64 __user relocs; /* in, ptr to array of submit_reloc's */ + __u64 relocs; /* in, ptr to array of submit_reloc's */ }; /* Each buffer referenced elsewhere in the cmdstream submit (ie. the @@ -215,8 +215,8 @@ struct drm_msm_gem_submit { __u32 fence; /* out */ __u32 nr_bos; /* in, number of submit_bo's */ __u32 nr_cmds; /* in, number of submit_cmd's */ - __u64 __user bos; /* in, ptr to array of submit_bo's */ - __u64 __user cmds; /* in, ptr to array of submit_cmd's */ + __u64 bos; /* in, ptr to array of submit_bo's */ + __u64 cmds; /* in, ptr to array of submit_cmd's */ __s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */ }; diff --git a/include/uapi/drm/qxl_drm.h b/include/uapi/drm/qxl_drm.h index 7eef42213051..880999d2d863 100644 --- a/include/uapi/drm/qxl_drm.h +++ b/include/uapi/drm/qxl_drm.h @@ -80,8 +80,8 @@ struct drm_qxl_reloc { }; struct drm_qxl_command { - __u64 __user command; /* void* */ - __u64 __user relocs; /* struct drm_qxl_reloc* */ + __u64 command; /* void* */ + __u64 relocs; /* struct drm_qxl_reloc* */ __u32 type; __u32 command_size; __u32 relocs_num; @@ -91,7 +91,7 @@ struct drm_qxl_command { struct drm_qxl_execbuffer { __u32 flags; /* for future use */ __u32 commands_num; - __u64 __user commands; /* struct drm_qxl_command* */ + __u64 commands; /* struct drm_qxl_command* */ }; struct drm_qxl_update_area { diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h index 6ac4c5c014cb..afae87004963 100644 --- a/include/uapi/drm/vc4_drm.h +++ b/include/uapi/drm/vc4_drm.h @@ -40,6 +40,7 @@ extern "C" { #define DRM_VC4_GET_PARAM 0x07 #define DRM_VC4_SET_TILING 0x08 #define DRM_VC4_GET_TILING 0x09 +#define DRM_VC4_LABEL_BO 0x0a #define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl) #define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno) @@ -51,6 +52,7 @@ extern "C" { #define DRM_IOCTL_VC4_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param) #define DRM_IOCTL_VC4_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SET_TILING, struct drm_vc4_set_tiling) #define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling) +#define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo) struct drm_vc4_submit_rcl_surface { __u32 hindex; /* Handle index, or ~0 if not present. */ @@ -153,6 +155,16 @@ struct drm_vc4_submit_cl { __u32 pad:24; #define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0) +/* By default, the kernel gets to choose the order that the tiles are + * rendered in. If this is set, then the tiles will be rendered in a + * raster order, with the right-to-left vs left-to-right and + * top-to-bottom vs bottom-to-top dictated by + * VC4_SUBMIT_CL_RCL_ORDER_INCREASING_*. This allows overlapping + * blits to be implemented using the 3D engine. + */ +#define VC4_SUBMIT_CL_FIXED_RCL_ORDER (1 << 1) +#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X (1 << 2) +#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y (1 << 3) __u32 flags; /* Returned value of the seqno of this render job (for the @@ -292,6 +304,7 @@ struct drm_vc4_get_hang_state { #define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3 #define DRM_VC4_PARAM_SUPPORTS_ETC1 4 #define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5 +#define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6 struct drm_vc4_get_param { __u32 param; @@ -311,6 +324,15 @@ struct drm_vc4_set_tiling { __u64 modifier; }; +/** + * struct drm_vc4_label_bo - Attach a name to a BO for debug purposes. + */ +struct drm_vc4_label_bo { + __u32 handle; + __u32 len; + __u64 name; +}; + #if defined(__cplusplus) } #endif diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index d9dfde9aa757..0bc784f5e0db 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h @@ -297,13 +297,17 @@ union drm_vmw_surface_reference_arg { * @version: Allows expanding the execbuf ioctl parameters without breaking * backwards compatibility, since user-space will always tell the kernel * which version it uses. - * @flags: Execbuf flags. None currently. + * @flags: Execbuf flags. + * @imported_fence_fd: FD for a fence imported from another device * * Argument to the DRM_VMW_EXECBUF Ioctl. */ #define DRM_VMW_EXECBUF_VERSION 2 +#define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0) +#define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1) + struct drm_vmw_execbuf_arg { __u64 commands; __u32 command_size; @@ -312,7 +316,7 @@ struct drm_vmw_execbuf_arg { __u32 version; __u32 flags; __u32 context_handle; - __u32 pad64; + __s32 imported_fence_fd; }; /** @@ -328,6 +332,7 @@ struct drm_vmw_execbuf_arg { * @passed_seqno: The highest seqno number processed by the hardware * so far. This can be used to mark user-space fence objects as signaled, and * to determine whether a fence seqno might be stale. + * @fd: FD associated with the fence, -1 if not exported * @error: This member should've been set to -EFAULT on submission. * The following actions should be take on completion: * error == -EFAULT: Fence communication failed. The host is synchronized. @@ -345,7 +350,7 @@ struct drm_vmw_fence_rep { __u32 mask; __u32 seqno; __u32 passed_seqno; - __u32 pad64; + __s32 fd; __s32 error; }; diff --git a/include/uapi/linux/aio_abi.h b/include/uapi/linux/aio_abi.h index a2d4a8ac94ca..a04adbc70ddf 100644 --- a/include/uapi/linux/aio_abi.h +++ b/include/uapi/linux/aio_abi.h @@ -28,6 +28,7 @@ #define __LINUX__AIO_ABI_H #include <linux/types.h> +#include <linux/fs.h> #include <asm/byteorder.h> typedef __kernel_ulong_t aio_context_t; @@ -62,14 +63,6 @@ struct io_event { __s64 res2; /* secondary result */ }; -#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN) -#define PADDED(x,y) x, y -#elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN) -#define PADDED(x,y) y, x -#else -#error edit for your odd byteorder. -#endif - /* * we always use a 64bit off_t when communicating * with userland. its up to libraries to do the @@ -79,8 +72,16 @@ struct io_event { struct iocb { /* these are internal to the kernel/libc. */ __u64 aio_data; /* data to be returned in event's data */ - __u32 PADDED(aio_key, aio_rw_flags); - /* the kernel sets aio_key to the req # */ + +#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN) + __u32 aio_key; /* the kernel sets aio_key to the req # */ + __kernel_rwf_t aio_rw_flags; /* RWF_* flags */ +#elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN) + __kernel_rwf_t aio_rw_flags; /* RWF_* flags */ + __u32 aio_key; /* the kernel sets aio_key to the req # */ +#else +#error edit for your odd byteorder. +#endif /* common fields */ __u16 aio_lio_opcode; /* see IOCB_CMD_ above */ diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index b7495d05e8de..56235dddea7d 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -358,13 +358,25 @@ struct fscrypt_key { #define SYNC_FILE_RANGE_WRITE 2 #define SYNC_FILE_RANGE_WAIT_AFTER 4 -/* flags for preadv2/pwritev2: */ -#define RWF_HIPRI 0x00000001 /* high priority request, poll if possible */ -#define RWF_DSYNC 0x00000002 /* per-IO O_DSYNC */ -#define RWF_SYNC 0x00000004 /* per-IO O_SYNC */ -#define RWF_NOWAIT 0x00000008 /* per-IO, return -EAGAIN if operation would block */ - -#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC |\ - RWF_NOWAIT) +/* + * Flags for preadv2/pwritev2: + */ + +typedef int __bitwise __kernel_rwf_t; + +/* high priority request, poll if possible */ +#define RWF_HIPRI ((__force __kernel_rwf_t)0x00000001) + +/* per-IO O_DSYNC */ +#define RWF_DSYNC ((__force __kernel_rwf_t)0x00000002) + +/* per-IO O_SYNC */ +#define RWF_SYNC ((__force __kernel_rwf_t)0x00000004) + +/* per-IO, return -EAGAIN if operation would block */ +#define RWF_NOWAIT ((__force __kernel_rwf_t)0x00000008) + +/* mask of flags supported by the kernel */ +#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT) #endif /* _UAPI_LINUX_FS_H */ diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index d6833426fdef..7b4567bacfc2 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -232,6 +232,35 @@ struct kfd_ioctl_wait_events_args { uint32_t wait_result; /* from KFD */ }; +struct kfd_ioctl_set_scratch_backing_va_args { + uint64_t va_addr; /* to KFD */ + uint32_t gpu_id; /* to KFD */ + uint32_t pad; +}; + +struct kfd_ioctl_get_tile_config_args { + /* to KFD: pointer to tile array */ + uint64_t tile_config_ptr; + /* to KFD: pointer to macro tile array */ + uint64_t macro_tile_config_ptr; + /* to KFD: array size allocated by user mode + * from KFD: array size filled by kernel + */ + uint32_t num_tile_configs; + /* to KFD: array size allocated by user mode + * from KFD: array size filled by kernel + */ + uint32_t num_macro_tile_configs; + + uint32_t gpu_id; /* to KFD */ + uint32_t gb_addr_config; /* from KFD */ + uint32_t num_banks; /* from KFD */ + uint32_t num_ranks; /* from KFD */ + /* struct size can be extended later if needed + * without breaking ABI compatibility + */ +}; + #define AMDKFD_IOCTL_BASE 'K' #define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr) #define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type) @@ -286,7 +315,13 @@ struct kfd_ioctl_wait_events_args { #define AMDKFD_IOC_DBG_WAVE_CONTROL \ AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args) +#define AMDKFD_IOC_SET_SCRATCH_BACKING_VA \ + AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args) + +#define AMDKFD_IOC_GET_TILE_CONFIG \ + AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args) + #define AMDKFD_COMMAND_START 0x01 -#define AMDKFD_COMMAND_END 0x11 +#define AMDKFD_COMMAND_END 0x13 #endif diff --git a/include/uapi/linux/loop.h b/include/uapi/linux/loop.h index a3960f98679c..c8125ec1f4f2 100644 --- a/include/uapi/linux/loop.h +++ b/include/uapi/linux/loop.h @@ -22,7 +22,6 @@ enum { LO_FLAGS_AUTOCLEAR = 4, LO_FLAGS_PARTSCAN = 8, LO_FLAGS_DIRECT_IO = 16, - LO_FLAGS_BLOCKSIZE = 32, }; #include <asm/posix_types.h> /* for __kernel_old_dev_t */ @@ -60,8 +59,6 @@ struct loop_info64 { __u64 lo_init[2]; }; -#define LO_INFO_BLOCKSIZE(l) (l)->lo_init[0] - /* * Loop filter types */ diff --git a/include/uapi/linux/membarrier.h b/include/uapi/linux/membarrier.h index e0b108bd2624..6d47b3249d8a 100644 --- a/include/uapi/linux/membarrier.h +++ b/include/uapi/linux/membarrier.h @@ -40,14 +40,33 @@ * (non-running threads are de facto in such a * state). This covers threads from all processes * running on the system. This command returns 0. + * @MEMBARRIER_CMD_PRIVATE_EXPEDITED: + * Execute a memory barrier on each running + * thread belonging to the same process as the current + * thread. Upon return from system call, the + * caller thread is ensured that all its running + * threads siblings have passed through a state + * where all memory accesses to user-space + * addresses match program order between entry + * to and return from the system call + * (non-running threads are de facto in such a + * state). This only covers threads from the + * same processes as the caller thread. This + * command returns 0. The "expedited" commands + * complete faster than the non-expedited ones, + * they never block, but have the downside of + * causing extra overhead. * * Command to be passed to the membarrier system call. The commands need to * be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to * the value 0. */ enum membarrier_cmd { - MEMBARRIER_CMD_QUERY = 0, - MEMBARRIER_CMD_SHARED = (1 << 0), + MEMBARRIER_CMD_QUERY = 0, + MEMBARRIER_CMD_SHARED = (1 << 0), + /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */ + /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */ + MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3), }; #endif /* _UAPI_LINUX_MEMBARRIER_H */ diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h index 6d3c54264d8e..3f03567631cb 100644 --- a/include/uapi/linux/ndctl.h +++ b/include/uapi/linux/ndctl.h @@ -145,43 +145,6 @@ struct nd_cmd_clear_error { __u64 cleared; } __packed; -struct nd_cmd_trans_spa { - __u64 spa; - __u32 status; - __u8 flags; - __u8 _reserved[3]; - __u64 trans_length; - __u32 num_nvdimms; - struct nd_nvdimm_device { - __u32 nfit_device_handle; - __u32 _reserved; - __u64 dpa; - } __packed devices[0]; - -} __packed; - -struct nd_cmd_ars_err_inj { - __u64 err_inj_spa_range_base; - __u64 err_inj_spa_range_length; - __u8 err_inj_options; - __u32 status; -} __packed; - -struct nd_cmd_ars_err_inj_clr { - __u64 err_inj_clr_spa_range_base; - __u64 err_inj_clr_spa_range_length; - __u32 status; -} __packed; - -struct nd_cmd_ars_err_inj_stat { - __u32 status; - __u32 inj_err_rec_count; - struct nd_error_stat_query_record { - __u64 err_inj_stat_spa_range_base; - __u64 err_inj_stat_spa_range_length; - } __packed record[0]; -} __packed; - enum { ND_CMD_IMPLEMENTED = 0, diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index b1c0b187acfe..140ae638cfd6 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -139,8 +139,9 @@ enum perf_event_sample_format { PERF_SAMPLE_IDENTIFIER = 1U << 16, PERF_SAMPLE_TRANSACTION = 1U << 17, PERF_SAMPLE_REGS_INTR = 1U << 18, + PERF_SAMPLE_PHYS_ADDR = 1U << 19, - PERF_SAMPLE_MAX = 1U << 19, /* non-ABI */ + PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */ }; /* @@ -174,6 +175,8 @@ enum perf_branch_sample_type_shift { PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /* no flags */ PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /* no cycles */ + PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /* save branch type */ + PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ }; @@ -198,9 +201,30 @@ enum perf_branch_sample_type { PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT, PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT, + PERF_SAMPLE_BRANCH_TYPE_SAVE = + 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT, + PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, }; +/* + * Common flow change classification + */ +enum { + PERF_BR_UNKNOWN = 0, /* unknown */ + PERF_BR_COND = 1, /* conditional */ + PERF_BR_UNCOND = 2, /* unconditional */ + PERF_BR_IND = 3, /* indirect */ + PERF_BR_CALL = 4, /* function call */ + PERF_BR_IND_CALL = 5, /* indirect function call */ + PERF_BR_RET = 6, /* function return */ + PERF_BR_SYSCALL = 7, /* syscall */ + PERF_BR_SYSRET = 8, /* syscall return */ + PERF_BR_COND_CALL = 9, /* conditional function call */ + PERF_BR_COND_RET = 10, /* conditional function return */ + PERF_BR_MAX, +}; + #define PERF_SAMPLE_BRANCH_PLM_ALL \ (PERF_SAMPLE_BRANCH_USER|\ PERF_SAMPLE_BRANCH_KERNEL|\ @@ -791,6 +815,7 @@ enum perf_event_type { * { u64 transaction; } && PERF_SAMPLE_TRANSACTION * { u64 abi; # enum perf_sample_regs_abi * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR + * { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR * }; */ PERF_RECORD_SAMPLE = 9, @@ -931,14 +956,20 @@ union perf_mem_data_src { mem_snoop:5, /* snoop mode */ mem_lock:2, /* lock instr */ mem_dtlb:7, /* tlb access */ - mem_rsvd:31; + mem_lvl_num:4, /* memory hierarchy level number */ + mem_remote:1, /* remote */ + mem_snoopx:2, /* snoop mode, ext */ + mem_rsvd:24; }; }; #elif defined(__BIG_ENDIAN_BITFIELD) union perf_mem_data_src { __u64 val; struct { - __u64 mem_rsvd:31, + __u64 mem_rsvd:24, + mem_snoopx:2, /* snoop mode, ext */ + mem_remote:1, /* remote */ + mem_lvl_num:4, /* memory hierarchy level number */ mem_dtlb:7, /* tlb access */ mem_lock:2, /* lock instr */ mem_snoop:5, /* snoop mode */ @@ -975,6 +1006,22 @@ union perf_mem_data_src { #define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */ #define PERF_MEM_LVL_SHIFT 5 +#define PERF_MEM_REMOTE_REMOTE 0x01 /* Remote */ +#define PERF_MEM_REMOTE_SHIFT 37 + +#define PERF_MEM_LVLNUM_L1 0x01 /* L1 */ +#define PERF_MEM_LVLNUM_L2 0x02 /* L2 */ +#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */ +#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */ +/* 5-0xa available */ +#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */ +#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */ +#define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */ +#define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */ +#define PERF_MEM_LVLNUM_NA 0x0f /* N/A */ + +#define PERF_MEM_LVLNUM_SHIFT 33 + /* snoop mode */ #define PERF_MEM_SNOOP_NA 0x01 /* not available */ #define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */ @@ -983,6 +1030,10 @@ union perf_mem_data_src { #define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */ #define PERF_MEM_SNOOP_SHIFT 19 +#define PERF_MEM_SNOOPX_FWD 0x01 /* forward */ +/* 1 free */ +#define PERF_MEM_SNOOPX_SHIFT 37 + /* locked instruction */ #define PERF_MEM_LOCK_NA 0x01 /* not available */ #define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */ @@ -1015,6 +1066,7 @@ union perf_mem_data_src { * in_tx: running in a hardware transaction * abort: aborting a hardware transaction * cycles: cycles from last branch (or 0 if not supported) + * type: branch type */ struct perf_branch_entry { __u64 from; @@ -1024,7 +1076,8 @@ struct perf_branch_entry { in_tx:1, /* in transaction */ abort:1, /* transaction abort */ cycles:16, /* cycle count to last branch */ - reserved:44; + type:4, /* branch type */ + reserved:40; }; #endif /* _UAPI_LINUX_PERF_EVENT_H */ diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h new file mode 100644 index 000000000000..842792eae383 --- /dev/null +++ b/include/uapi/rdma/ib_user_ioctl_verbs.h @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef IB_USER_IOCTL_VERBS_H +#define IB_USER_IOCTL_VERBS_H + +#include <rdma/rdma_user_ioctl.h> + +#define UVERBS_UDATA_DRIVER_DATA_NS 1 +#define UVERBS_UDATA_DRIVER_DATA_FLAG (1UL << UVERBS_ID_NS_SHIFT) + +enum uverbs_default_objects { + UVERBS_OBJECT_DEVICE, /* No instances of DEVICE are allowed */ + UVERBS_OBJECT_PD, + UVERBS_OBJECT_COMP_CHANNEL, + UVERBS_OBJECT_CQ, + UVERBS_OBJECT_QP, + UVERBS_OBJECT_SRQ, + UVERBS_OBJECT_AH, + UVERBS_OBJECT_MR, + UVERBS_OBJECT_MW, + UVERBS_OBJECT_FLOW, + UVERBS_OBJECT_XRCD, + UVERBS_OBJECT_RWQ_IND_TBL, + UVERBS_OBJECT_WQ, + UVERBS_OBJECT_LAST, +}; + +enum { + UVERBS_UHW_IN = UVERBS_UDATA_DRIVER_DATA_FLAG, + UVERBS_UHW_OUT, +}; + +enum uverbs_create_cq_cmd_attr_ids { + CREATE_CQ_HANDLE, + CREATE_CQ_CQE, + CREATE_CQ_USER_HANDLE, + CREATE_CQ_COMP_CHANNEL, + CREATE_CQ_COMP_VECTOR, + CREATE_CQ_FLAGS, + CREATE_CQ_RESP_CQE, +}; + +enum uverbs_destroy_cq_cmd_attr_ids { + DESTROY_CQ_HANDLE, + DESTROY_CQ_RESP, +}; + +enum uverbs_actions_cq_ops { + UVERBS_CQ_CREATE, + UVERBS_CQ_DESTROY, +}; + +#endif + diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index 270c350bedc6..9a0b6479fe0c 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -236,6 +236,20 @@ struct ib_uverbs_rss_caps { __u32 reserved; }; +struct ib_uverbs_tm_caps { + /* Max size of rendezvous request message */ + __u32 max_rndv_hdr_size; + /* Max number of entries in tag matching list */ + __u32 max_num_tags; + /* TM flags */ + __u32 flags; + /* Max number of outstanding list operations */ + __u32 max_ops; + /* Max number of SGE in tag matching entry */ + __u32 max_sge; + __u32 reserved; +}; + struct ib_uverbs_ex_query_device_resp { struct ib_uverbs_query_device_resp base; __u32 comp_mask; @@ -247,6 +261,7 @@ struct ib_uverbs_ex_query_device_resp { struct ib_uverbs_rss_caps rss_caps; __u32 max_wq_type_rq; __u32 raw_packet_caps; + struct ib_uverbs_tm_caps xrq_caps; }; struct ib_uverbs_query_port { @@ -578,7 +593,7 @@ struct ib_uverbs_ex_create_qp { __u32 comp_mask; __u32 create_flags; __u32 rwq_ind_tbl_handle; - __u32 reserved1; + __u32 source_qpn; }; struct ib_uverbs_open_qp { @@ -1024,7 +1039,7 @@ struct ib_uverbs_create_xsrq { __u32 max_wr; __u32 max_sge; __u32 srq_limit; - __u32 reserved; + __u32 max_num_tags; __u32 xrcd_handle; __u32 cq_handle; __u64 driver_data[0]; diff --git a/include/uapi/rdma/mlx4-abi.h b/include/uapi/rdma/mlx4-abi.h index af431752655c..c55f60e05f86 100644 --- a/include/uapi/rdma/mlx4-abi.h +++ b/include/uapi/rdma/mlx4-abi.h @@ -95,13 +95,63 @@ struct mlx4_ib_create_srq_resp { __u32 reserved; }; +struct mlx4_ib_create_qp_rss { + __u64 rx_hash_fields_mask; + __u8 rx_hash_function; + __u8 reserved[7]; + __u8 rx_hash_key[40]; + __u32 comp_mask; + __u32 reserved1; +}; + struct mlx4_ib_create_qp { __u64 buf_addr; __u64 db_addr; __u8 log_sq_bb_count; __u8 log_sq_stride; __u8 sq_no_prefetch; - __u8 reserved[5]; + __u8 reserved; + __u32 inl_recv_sz; +}; + +struct mlx4_ib_create_wq { + __u64 buf_addr; + __u64 db_addr; + __u8 log_range_size; + __u8 reserved[3]; + __u32 comp_mask; +}; + +struct mlx4_ib_modify_wq { + __u32 comp_mask; + __u32 reserved; +}; + +struct mlx4_ib_create_rwq_ind_tbl_resp { + __u32 response_length; + __u32 reserved; +}; + +/* RX Hash function flags */ +enum mlx4_ib_rx_hash_function_flags { + MLX4_IB_RX_HASH_FUNC_TOEPLITZ = 1 << 0, +}; + +/* + * RX Hash flags, these flags allows to set which incoming packet's field should + * participates in RX Hash. Each flag represent certain packet's field, + * when the flag is set the field that is represented by the flag will + * participate in RX Hash calculation. + */ +enum mlx4_ib_rx_hash_fields { + MLX4_IB_RX_HASH_SRC_IPV4 = 1 << 0, + MLX4_IB_RX_HASH_DST_IPV4 = 1 << 1, + MLX4_IB_RX_HASH_SRC_IPV6 = 1 << 2, + MLX4_IB_RX_HASH_DST_IPV6 = 1 << 3, + MLX4_IB_RX_HASH_SRC_PORT_TCP = 1 << 4, + MLX4_IB_RX_HASH_DST_PORT_TCP = 1 << 5, + MLX4_IB_RX_HASH_SRC_PORT_UDP = 1 << 6, + MLX4_IB_RX_HASH_DST_PORT_UDP = 1 << 7 }; #endif /* MLX4_ABI_USER_H */ diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index 0b3d30837a9f..1791bf123ba9 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -168,6 +168,28 @@ struct mlx5_packet_pacing_caps { __u32 reserved; }; +enum mlx5_ib_mpw_caps { + MPW_RESERVED = 1 << 0, + MLX5_IB_ALLOW_MPW = 1 << 1, + MLX5_IB_SUPPORT_EMPW = 1 << 2, +}; + +enum mlx5_ib_sw_parsing_offloads { + MLX5_IB_SW_PARSING = 1 << 0, + MLX5_IB_SW_PARSING_CSUM = 1 << 1, + MLX5_IB_SW_PARSING_LSO = 1 << 2, +}; + +struct mlx5_ib_sw_parsing_caps { + __u32 sw_parsing_offloads; /* enum mlx5_ib_sw_parsing_offloads */ + + /* Corresponding bit will be set if qp type from + * 'enum ib_qp_type' is supported, e.g. + * supported_qpts |= 1 << IB_QPT_RAW_PACKET + */ + __u32 supported_qpts; +}; + struct mlx5_ib_query_device_resp { __u32 comp_mask; __u32 response_length; @@ -177,6 +199,7 @@ struct mlx5_ib_query_device_resp { struct mlx5_packet_pacing_caps packet_pacing_caps; __u32 mlx5_ib_support_multi_pkt_send_wqes; __u32 reserved; + struct mlx5_ib_sw_parsing_caps sw_parsing_caps; }; struct mlx5_ib_create_cq { diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h index 75c270d839c8..54b64357ab24 100644 --- a/include/uapi/rdma/qedr-abi.h +++ b/include/uapi/rdma/qedr-abi.h @@ -49,6 +49,9 @@ struct qedr_alloc_ucontext_resp { __u32 sges_per_recv_wr; __u32 sges_per_srq_wr; __u32 max_cqes; + __u8 dpm_enabled; + __u8 wids_enabled; + __u16 wid_count; }; struct qedr_alloc_pd_ureq { diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h index 02fe8390c18f..861440a87e7c 100644 --- a/include/uapi/rdma/rdma_netlink.h +++ b/include/uapi/rdma/rdma_netlink.h @@ -8,7 +8,7 @@ enum { RDMA_NL_IWCM, RDMA_NL_RSVD, RDMA_NL_LS, /* RDMA Local Services */ - RDMA_NL_I40IW, + RDMA_NL_NLDEV, /* RDMA device interface */ RDMA_NL_NUM_CLIENTS }; @@ -222,4 +222,86 @@ struct rdma_nla_ls_gid { __u8 gid[16]; }; +enum rdma_nldev_command { + RDMA_NLDEV_CMD_UNSPEC, + + RDMA_NLDEV_CMD_GET, /* can dump */ + RDMA_NLDEV_CMD_SET, + RDMA_NLDEV_CMD_NEW, + RDMA_NLDEV_CMD_DEL, + + RDMA_NLDEV_CMD_PORT_GET, /* can dump */ + RDMA_NLDEV_CMD_PORT_SET, + RDMA_NLDEV_CMD_PORT_NEW, + RDMA_NLDEV_CMD_PORT_DEL, + + RDMA_NLDEV_NUM_OPS +}; + +enum rdma_nldev_attr { + /* don't change the order or add anything between, this is ABI! */ + RDMA_NLDEV_ATTR_UNSPEC, + + /* Identifier for ib_device */ + RDMA_NLDEV_ATTR_DEV_INDEX, /* u32 */ + + RDMA_NLDEV_ATTR_DEV_NAME, /* string */ + /* + * Device index together with port index are identifiers + * for port/link properties. + * + * For RDMA_NLDEV_CMD_GET commamnd, port index will return number + * of available ports in ib_device, while for port specific operations, + * it will be real port index as it appears in sysfs. Port index follows + * sysfs notation and starts from 1 for the first port. + */ + RDMA_NLDEV_ATTR_PORT_INDEX, /* u32 */ + + /* + * Device and port capabilities + */ + RDMA_NLDEV_ATTR_CAP_FLAGS, /* u64 */ + + /* + * FW version + */ + RDMA_NLDEV_ATTR_FW_VERSION, /* string */ + + /* + * Node GUID (in host byte order) associated with the RDMA device. + */ + RDMA_NLDEV_ATTR_NODE_GUID, /* u64 */ + + /* + * System image GUID (in host byte order) associated with + * this RDMA device and other devices which are part of a + * single system. + */ + RDMA_NLDEV_ATTR_SYS_IMAGE_GUID, /* u64 */ + + /* + * Subnet prefix (in host byte order) + */ + RDMA_NLDEV_ATTR_SUBNET_PREFIX, /* u64 */ + + /* + * Local Identifier (LID), + * According to IB specification, It is 16-bit address assigned + * by the Subnet Manager. Extended to be 32-bit for OmniPath users. + */ + RDMA_NLDEV_ATTR_LID, /* u32 */ + RDMA_NLDEV_ATTR_SM_LID, /* u32 */ + + /* + * LID mask control (LMC) + */ + RDMA_NLDEV_ATTR_LMC, /* u8 */ + + RDMA_NLDEV_ATTR_PORT_STATE, /* u8 */ + RDMA_NLDEV_ATTR_PORT_PHYS_STATE, /* u8 */ + + RDMA_NLDEV_ATTR_DEV_NODE_TYPE, /* u8 */ + + RDMA_NLDEV_ATTR_MAX +}; #endif /* _UAPI_RDMA_NETLINK_H */ diff --git a/include/uapi/rdma/rdma_user_ioctl.h b/include/uapi/rdma/rdma_user_ioctl.h index 9388125ad51b..165a27e969d5 100644 --- a/include/uapi/rdma/rdma_user_ioctl.h +++ b/include/uapi/rdma/rdma_user_ioctl.h @@ -43,6 +43,39 @@ /* Legacy name, for user space application which already use it */ #define IB_IOCTL_MAGIC RDMA_IOCTL_MAGIC +#define RDMA_VERBS_IOCTL \ + _IOWR(RDMA_IOCTL_MAGIC, 1, struct ib_uverbs_ioctl_hdr) + +#define UVERBS_ID_NS_MASK 0xF000 +#define UVERBS_ID_NS_SHIFT 12 + +enum { + /* User input */ + UVERBS_ATTR_F_MANDATORY = 1U << 0, + /* + * Valid output bit should be ignored and considered set in + * mandatory fields. This bit is kernel output. + */ + UVERBS_ATTR_F_VALID_OUTPUT = 1U << 1, +}; + +struct ib_uverbs_attr { + __u16 attr_id; /* command specific type attribute */ + __u16 len; /* only for pointers */ + __u16 flags; /* combination of UVERBS_ATTR_F_XXXX */ + __u16 reserved; + __u64 data; /* ptr to command, inline data or idr/fd */ +}; + +struct ib_uverbs_ioctl_hdr { + __u16 length; + __u16 object_id; + __u16 method_id; + __u16 num_attrs; + __u64 reserved; + struct ib_uverbs_attr attrs[0]; +}; + /* * General blocks assignments * It is closed on purpose do not expose it it user space diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h index c8c1d2d6df4d..c6569b0032ec 100644 --- a/include/uapi/rdma/vmw_pvrdma-abi.h +++ b/include/uapi/rdma/vmw_pvrdma-abi.h @@ -125,7 +125,8 @@ enum pvrdma_wc_flags { PVRDMA_WC_IP_CSUM_OK = 1 << 3, PVRDMA_WC_WITH_SMAC = 1 << 4, PVRDMA_WC_WITH_VLAN = 1 << 5, - PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_VLAN, + PVRDMA_WC_WITH_NETWORK_HDR_TYPE = 1 << 6, + PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_NETWORK_HDR_TYPE, }; struct pvrdma_alloc_ucontext_resp { @@ -283,7 +284,8 @@ struct pvrdma_cqe { __u8 dlid_path_bits; __u8 port_num; __u8 smac[6]; - __u8 reserved2[7]; /* Pad to next power of 2 (64). */ + __u8 network_hdr_type; + __u8 reserved2[6]; /* Pad to next power of 2 (64). */ }; #endif /* __VMW_PVRDMA_ABI_H__ */ |