Lines Matching full:work

3  * workqueue.h --- work queue handling for Linux.
20 * The first word is the work queue pointer and the flags rolled into
23 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) argument
26 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
27 WORK_STRUCT_INACTIVE_BIT, /* work item is inactive */
29 WORK_STRUCT_LINKED_BIT, /* next work is linked to this one */
66 * When a work item is off queue, the high bits encode off-queue flags
114 struct work_struct work; member
117 /* target workqueue and CPU ->timer uses to queue ->work */
123 struct work_struct work; member
126 /* target workqueue ->rcu uses to queue ->work */
155 * Work items in this workqueue are affine to these CPUs and not allowed
196 * CPU pods are used to improve execution locality of unbound work
206 * @ordered: work items must be executed one by one in queueing order
211 static inline struct delayed_work *to_delayed_work(struct work_struct *work) in to_delayed_work() argument
213 return container_of(work, struct delayed_work, work); in to_delayed_work()
216 static inline struct rcu_work *to_rcu_work(struct work_struct *work) in to_rcu_work() argument
218 return container_of(work, struct rcu_work, work); in to_rcu_work()
222 struct work_struct work; member
245 .work = __WORK_INITIALIZER((n).work, (f)), \
260 extern void __init_work(struct work_struct *work, int onstack);
261 extern void destroy_work_on_stack(struct work_struct *work);
262 extern void destroy_delayed_work_on_stack(struct delayed_work *work);
263 static inline unsigned int work_static(struct work_struct *work) in work_static() argument
265 return *work_data_bits(work) & WORK_STRUCT_STATIC; in work_static()
268 static inline void __init_work(struct work_struct *work, int onstack) { } in __init_work() argument
269 static inline void destroy_work_on_stack(struct work_struct *work) { } in destroy_work_on_stack() argument
270 static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { } in destroy_delayed_work_on_stack() argument
271 static inline unsigned int work_static(struct work_struct *work) { return 0; } in work_static() argument
275 * initialize all of a work item in one go
278 * assignment of the work data initializer allows the compiler
318 INIT_WORK(&(_work)->work, (_func)); \
326 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
345 INIT_WORK(&(_work)->work, (_func))
348 INIT_WORK_ONSTACK(&(_work)->work, (_func))
351 * work_pending - Find out whether a work item is currently pending
352 * @work: The work item in question
354 #define work_pending(work) \ argument
355 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
358 * delayed_work_pending - Find out whether a delayable work item is currently
360 * @w: The work item in question
363 work_pending(&(w)->work)
387 * however, for example, a per-cpu work item scheduled from an
389 * execute the work item on that CPU breaking the idleness, which in
435 * system_highpri_wq is similar to system_wq but for work items which
455 * system_bh[_highpri]_wq are convenience interface to softirq. BH work items
475 * @max_active: max in-flight work items, 0 for default
478 * For a per-cpu workqueue, @max_active limits the number of in-flight work
480 * executing at most one work item for the workqueue.
482 * For unbound workqueues, @max_active limits the number of in-flight work items
484 * at most 16 work items executing for the workqueue in the whole system.
494 * of interdependent work items for the workqueue.
515 * @max_active: max in-flight work items, 0 for default
558 * most one work item at any given time in the queued order. They are
587 struct work_struct *work);
589 struct work_struct *work);
591 struct delayed_work *work, unsigned long delay);
603 extern bool flush_work(struct work_struct *work);
604 extern bool cancel_work(struct work_struct *work);
605 extern bool cancel_work_sync(struct work_struct *work);
611 extern bool disable_work(struct work_struct *work);
612 extern bool disable_work_sync(struct work_struct *work);
613 extern bool enable_work(struct work_struct *work);
628 extern unsigned int work_busy(struct work_struct *work);
637 * queue_work - queue work on a workqueue
639 * @work: work to queue
641 * Returns %false if @work was already on a queue, %true otherwise.
643 * We queue the work to the CPU on which it was submitted, but if the CPU dies
648 * the CPU which will execute @work by the time such work executes, e.g.,
654 * WRITE_ONCE(x, 1); [ @work is being executed ]
655 * r0 = queue_work(wq, work); r1 = READ_ONCE(x);
660 struct work_struct *work) in queue_work() argument
662 return queue_work_on(WORK_CPU_UNBOUND, wq, work); in queue_work()
666 * queue_delayed_work - queue work on a workqueue after delay
668 * @dwork: delayable work to queue
681 * mod_delayed_work - modify delay of or queue a delayed work
683 * @dwork: work to queue
696 * schedule_work_on - put work task on a specific cpu
697 * @cpu: cpu to put the work task on
698 * @work: job to be done
702 static inline bool schedule_work_on(int cpu, struct work_struct *work) in schedule_work_on() argument
704 return queue_work_on(cpu, system_wq, work); in schedule_work_on()
708 * schedule_work - put work task in global workqueue
709 * @work: job to be done
711 * Returns %false if @work was already on the kernel-global workqueue and
721 static inline bool schedule_work(struct work_struct *work) in schedule_work() argument
723 return queue_work(system_wq, work); in schedule_work()
727 * enable_and_queue_work - Enable and queue a work item on a specific workqueue
729 * @work: The work item to be enabled and queued
732 * providing a convenient way to enable and queue a work item in a single call.
733 * It invokes enable_work() on @work and then queues it if the disable depth
734 * reached 0. Returns %true if the disable depth reached 0 and @work is queued,
737 * Note that @work is always queued when disable depth reaches zero. If the
738 * desired behavior is queueing only if certain events took place while @work is
743 struct work_struct *work) in enable_and_queue_work() argument
745 if (enable_work(work)) { in enable_and_queue_work()
746 queue_work(wq, work); in enable_and_queue_work()
792 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
807 * schedule_delayed_work - put work task in global workqueue after delay
833 * A new key is defined for each caller to make sure the work
847 * A new key is defined for each caller to make sure the work