NOTE: At the moment DMA_ATTR_ALLOC_SINGLE_PAGES is only implemented on ARM,
though ARM64 patches will likely be posted soon.
+
+DMA_ATTR_NO_WARN
+----------------
+
+This tells the DMA-mapping subsystem to suppress allocation failure reports
+(similarly to __GFP_NOWARN).
+
+On some architectures allocation failures are reported with error messages
+to the system logs. Although this can help to identify and debug problems,
+drivers which handle failures (eg, retry later) have no problems with them,
+and can actually flood the system logs with error messages that aren't any
+problem at all, depending on the implementation of the retry mechanism.
+
+So, this provides a way for drivers to avoid those error messages on calls
+where allocation failures are not a problem, and shouldn't bother the logs.
+
+NOTE: At the moment DMA_ATTR_NO_WARN is only implemented on PowerPC.
[<ffffffff817db154>] kernel_thread_helper+0x4/0x10
[<ffffffff81066430>] ? finish_task_switch+0x80/0x110
[<ffffffff817d9c04>] ? retint_restore_args+0xe/0xe
- [<ffffffff81097510>] ? __init_kthread_worker+0x70/0x70
+ [<ffffffff81097510>] ? __kthread_init_worker+0x70/0x70
[<ffffffff817db150>] ? gs_change+0xb/0xb
Line 2776 of block/cfq-iosched.c in v3.0-rc5 is as follows:
- ``kmemleak_alloc_recursive`` - as kmemleak_alloc but checks the recursiveness
- ``kmemleak_free_recursive`` - as kmemleak_free but checks the recursiveness
+The following functions take a physical address as the object pointer
+and only perform the corresponding action if the address has a lowmem
+mapping:
+
+- ``kmemleak_alloc_phys``
+- ``kmemleak_free_part_phys``
+- ``kmemleak_not_leak_phys``
+- ``kmemleak_ignore_phys``
+
Dealing with false positives/negatives
--------------------------------------
* including this struct */
__s32 ioctlfd; /* automount command fd */
- __u32 arg1; /* Command parameters */
- __u32 arg2;
+ union {
+ struct args_protover protover;
+ struct args_protosubver protosubver;
+ struct args_openmount openmount;
+ struct args_ready ready;
+ struct args_fail fail;
+ struct args_setpipefd setpipefd;
+ struct args_timeout timeout;
+ struct args_requester requester;
+ struct args_expire expire;
+ struct args_askumount askumount;
+ struct args_ismountpoint ismountpoint;
+ };
char path[0];
};
mount point file descriptor, and when requesting the uid and gid of the
last successful mount on a directory within the autofs file system.
-The fields arg1 and arg2 are used to communicate parameters and results of
-calls made as described below.
+The union is used to communicate parameters and results of calls made
+as described below.
The path field is used to pass a path where it is needed and the size field
is used account for the increased structure length when translating the
Get the major and minor version of the autofs4 protocol version understood
by loaded module. This call requires an initialized struct autofs_dev_ioctl
with the ioctlfd field set to a valid autofs mount point descriptor
-and sets the requested version number in structure field arg1. These
-commands return 0 on success or one of the negative error codes if
-validation fails.
+and sets the requested version number in version field of struct args_protover
+or sub_version field of struct args_protosubver. These commands return
+0 on success or one of the negative error codes if validation fails.
AUTOFS_DEV_IOCTL_OPENMOUNT and AUTOFS_DEV_IOCTL_CLOSEMOUNT
Obtain and release a file descriptor for an autofs managed mount point
path. The open call requires an initialized struct autofs_dev_ioctl with
the path field set and the size field adjusted appropriately as well
-as the arg1 field set to the device number of the autofs mount. The
-device number can be obtained from the mount options shown in
-/proc/mounts. The close call requires an initialized struct
+as the devid field of struct args_openmount set to the device number of
+the autofs mount. The device number can be obtained from the mount options
+shown in /proc/mounts. The close call requires an initialized struct
autofs_dev_ioct with the ioctlfd field set to the descriptor obtained
from the open call. The release of the file descriptor can also be done
with close(2) so any open descriptors will also be closed at process exit.
Return mount and expire result status from user space to the kernel.
Both of these calls require an initialized struct autofs_dev_ioctl
with the ioctlfd field set to the descriptor obtained from the open
-call and the arg1 field set to the wait queue token number, received
-by user space in the foregoing mount or expire request. The arg2 field
-is set to the status to be returned. For the ready call this is always
-0 and for the fail call it is set to the errno of the operation.
+call and the token field of struct args_ready or struct args_fail set
+to the wait queue token number, received by user space in the foregoing
+mount or expire request. The status field of struct args_fail is set to
+the errno of the operation. It is set to 0 on success.
AUTOFS_DEV_IOCTL_SETPIPEFD_CMD
The call requires an initialized struct autofs_dev_ioctl with the
ioctlfd field set to the descriptor obtained from the open call and
-the arg1 field set to descriptor of the pipe. On success the call
-also sets the process group id used to identify the controlling process
-(eg. the owning automount(8) daemon) to the process group of the caller.
+the pipefd field of struct args_setpipefd set to descriptor of the pipe.
+On success the call also sets the process group id used to identify the
+controlling process (eg. the owning automount(8) daemon) to the process
+group of the caller.
AUTOFS_DEV_IOCTL_CATATONIC_CMD
The call requires an initialized struct autofs_dev_ioctl with the path
field set to the mount point in question and the size field adjusted
-appropriately as well as the arg1 field set to the device number of the
-containing autofs mount. Upon return the struct field arg1 contains the
-uid and arg2 the gid.
+appropriately. Upon return the uid field of struct args_requester contains
+the uid and gid field the gid.
When reconstructing an autofs mount tree with active mounts we need to
re-connect to mounts that may have used the original process uid and
The call requires an initialized struct autofs_dev_ioctl with the
ioctlfd field set to the descriptor obtained from the open call. In
addition an immediate expire, independent of the mount timeout, can be
-requested by setting the arg1 field to 1. If no expire candidates can
-be found the ioctl returns -1 with errno set to EAGAIN.
+requested by setting the how field of struct args_expire to 1. If no
+expire candidates can be found the ioctl returns -1 with errno set to
+EAGAIN.
This call causes the kernel module to check the mount corresponding
to the given ioctlfd for mounts that can be expired, issues an expire
The call requires an initialized struct autofs_dev_ioctl with the
ioctlfd field set to the descriptor obtained from the open call and
-it returns the result in the arg1 field, 1 for busy and 0 otherwise.
+it returns the result in the may_umount field of struct args_askumount,
+1 for busy and 0 otherwise.
AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD
possible variations. Both use the path field set to the path of the mount
point to check and the size field adjusted appropriately. One uses the
ioctlfd field to identify a specific mount point to check while the other
-variation uses the path and optionally arg1 set to an autofs mount type.
-The call returns 1 if this is a mount point and sets arg1 to the device
-number of the mount and field arg2 to the relevant super block magic
-number (described below) or 0 if it isn't a mountpoint. In both cases
-the the device number (as returned by new_encode_dev()) is returned
-in field arg1.
+variation uses the path and optionally in.type field of struct args_ismountpoint
+set to an autofs mount type. The call returns 1 if this is a mount point
+and sets out.devid field to the device number of the mount and out.magic
+field to the relevant super block magic number (described below) or 0 if
+it isn't a mountpoint. In both cases the the device number (as returned
+by new_encode_dev()) is returned in out.devid field.
If supplied with a file descriptor we're looking for a specific mount,
not necessarily at the top of the mounted stack. In this case the path
Mountpoint expiry
-----------------
-The VFS has a mechansim for automatically expiring unused mounts,
+The VFS has a mechanism for automatically expiring unused mounts,
much as it can expire any unused dentry information from the dcache.
-This is guided by the MNT_SHRINKABLE flag. This only applies to
+This is guided by the MNT_SHRINKABLE flag. This only applies to
mounts that were created by `d_automount()` returning a filesystem to be
mounted. As autofs doesn't return such a filesystem but leaves the
mounting to the automount daemon, it must involve the automount daemon
autofs knows whether a process requesting some operation is the daemon
or not based on its process-group id number (see getpgid(1)).
-When an autofs filesystem it mounted the pgid of the mounting
+When an autofs filesystem is mounted the pgid of the mounting
processes is recorded unless the "pgrp=" option is given, in which
case that number is recorded instead. Any request arriving from a
process in that process group is considered to come from the daemon.
numbers for existing filesystems can be found in
`/proc/self/mountinfo`.
- **AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD**: same as `close(ioctlfd)`.
-- **AUTOFS_DEV_IOCTL_SETPIPEFD_CMD**: if the filesystem is in
+- **AUTOFS_DEV_IOCTL_SETPIPEFD_CMD**: if the filesystem is in
catatonic mode, this can provide the write end of a new pipe
in `arg1` to re-establish communication with a daemon. The
process group of the calling process is used to identify the
Double-quotes can be used to protect spaces in values, e.g.:
param="spaces in here"
+cpu lists:
+----------
+
+Some kernel parameters take a list of CPUs as a value, e.g. isolcpus,
+nohz_full, irqaffinity, rcu_nocbs. The format of this list is:
+
+ <cpu number>,...,<cpu number>
+
+or
+
+ <cpu number>-<cpu number>
+ (must be a positive range in ascending order)
+
+or a mixture
+
+<cpu number>,...,<cpu number>-<cpu number>
+
+Note that for the special case of a range one can split the range into equal
+sized groups and for each group use some amount from the beginning of that
+group:
+
+ <cpu number>-cpu number>:<used size>/<group size>
+
+For example one can add to the command line following parameter:
+
+ isolcpus=1,2,10-20,100-2000:2/25
+
+where the final item represents CPUs 100,101,125,126,150,151,...
+
+
+
This document may not be entirely up to date and comprehensive. The command
"modinfo -p ${modulename}" shows a current list of all parameters of a loadable
module. Loadable modules, after being loaded into the running kernel, also
See Documentation/filesystems/nfs/nfsroot.txt.
irqaffinity= [SMP] Set the default irq affinity mask
- Format:
- <cpu number>,...,<cpu number>
- or
- <cpu number>-<cpu number>
- (must be a positive range in ascending order)
- or a mixture
- <cpu number>,...,<cpu number>-<cpu number>
+ The argument is a cpu list, as described above.
irqfixup [HW]
When an interrupt is not handled search all handlers
Format: <RDP>,<reset>,<pci_scan>,<verbosity>
isolcpus= [KNL,SMP] Isolate CPUs from the general scheduler.
- Format:
- <cpu number>,...,<cpu number>
- or
- <cpu number>-<cpu number>
- (must be a positive range in ascending order)
- or a mixture
- <cpu number>,...,<cpu number>-<cpu number>
+ The argument is a cpu list, as described above.
This option can be used to specify one or more CPUs
to isolate from the general SMP balancing and scheduling
Default: on
nohz_full= [KNL,BOOT]
+ The argument is a cpu list, as described above.
In kernels built with CONFIG_NO_HZ_FULL=y, set
the specified list of CPUs whose tick will be stopped
whenever possible. The boot CPU will be forced outside
See Documentation/blockdev/ramdisk.txt.
rcu_nocbs= [KNL]
+ The argument is a cpu list, as described above.
+
In kernels built with CONFIG_RCU_NOCB_CPU=y, set
the specified list of CPUs to be no-callback CPUs.
Invocation of these CPUs' RCU callbacks will
#ifndef __ASM_ARM_TRUSTED_FOUNDATIONS_H
#define __ASM_ARM_TRUSTED_FOUNDATIONS_H
-#include <linux/kconfig.h>
#include <linux/printk.h>
#include <linux/bug.h>
#include <linux/of.h>
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
- unsigned long range_end = mm->brk + 0x02000000;
- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+ return randomize_page(mm->brk, 0x02000000);
}
#ifdef CONFIG_MMU
#ifndef __ASSEMBLY__
#include <linux/init.h>
-#include <linux/kconfig.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/stringify.h>
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
- unsigned long range_end = mm->brk;
-
if (is_compat_task())
- range_end += 0x02000000;
+ return randomize_page(mm->brk, 0x02000000);
else
- range_end += 0x40000000;
-
- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+ return randomize_page(mm->brk, 0x40000000);
}
default_machine_crash_shutdown(regs);
}
+#ifdef CONFIG_SMP
+void octeon_crash_smp_send_stop(void)
+{
+ int cpu;
+
+ /* disable watchdogs */
+ for_each_online_cpu(cpu)
+ cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
+}
+#endif
+
#endif /* CONFIG_KEXEC */
#ifdef CONFIG_CAVIUM_RESERVE32
_machine_kexec_shutdown = octeon_shutdown;
_machine_crash_shutdown = octeon_crash_shutdown;
_machine_kexec_prepare = octeon_kexec_prepare;
+#ifdef CONFIG_SMP
+ _crash_smp_send_stop = octeon_crash_smp_send_stop;
+#endif
#endif
octeon_user_io_init();
extern unsigned long secondary_kexec_args[4];
extern void (*relocated_kexec_smp_wait) (void *);
extern atomic_t kexec_ready_to_reboot;
+extern void (*_crash_smp_send_stop)(void);
#endif
#endif
#include <linux/io.h>
#include <linux/init.h>
#include <linux/irq.h>
-#include <linux/kconfig.h>
#include <boot_param.h>
/* loongson internal northbridge initialization */
static void crash_kexec_prepare_cpus(void)
{
+ static int cpus_stopped;
unsigned int msecs;
+ unsigned int ncpus;
- unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
+ if (cpus_stopped)
+ return;
+
+ ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
dump_send_ipi(crash_shutdown_secondary);
smp_wmb();
cpu_relax();
mdelay(1);
}
+
+ cpus_stopped = 1;
+}
+
+/* Override the weak function in kernel/panic.c */
+void crash_smp_send_stop(void)
+{
+ if (_crash_smp_send_stop)
+ _crash_smp_send_stop();
+
+ crash_kexec_prepare_cpus();
}
#else /* !defined(CONFIG_SMP) */
#ifdef CONFIG_SMP
void (*relocated_kexec_smp_wait) (void *);
atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
+void (*_crash_smp_send_stop)(void) = NULL;
#endif
int
*/
#include <linux/sched.h>
#include <linux/debugfs.h>
-#include <linux/kconfig.h>
#include <linux/percpu-defs.h>
#include <linux/perf_event.h>
#include <linux/errno.h>
#include <linux/filter.h>
#include <linux/if_vlan.h>
-#include <linux/kconfig.h>
#include <linux/moduleloader.h>
#include <linux/netdevice.h>
#include <linux/string.h>
/* Handle failure */
if (unlikely(entry == DMA_ERROR_CODE)) {
- if (printk_ratelimit())
+ if (!(attrs & DMA_ATTR_NO_WARN) &&
+ printk_ratelimit())
dev_info(dev, "iommu_alloc failed, tbl %p "
"vaddr %lx npages %lu\n", tbl, vaddr,
npages);
mask >> tbl->it_page_shift, align,
attrs);
if (dma_handle == DMA_ERROR_CODE) {
- if (printk_ratelimit()) {
+ if (!(attrs & DMA_ATTR_NO_WARN) &&
+ printk_ratelimit()) {
dev_info(dev, "iommu_alloc failed, tbl %p "
"vaddr %p npages %d\n", tbl, vaddr,
npages);
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
- unsigned long range_end = mm->brk + 0x02000000;
- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+ return randomize_page(mm->brk, 0x02000000);
}
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
- unsigned long range_end = mm->brk + 0x02000000;
- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+ return randomize_page(mm->brk, 0x02000000);
}
/*
typedef void crash_vmclear_fn(void);
extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
+extern void kdump_nmi_shootdown_cpus(void);
#endif /* __ASSEMBLY__ */
void (*smp_cpus_done)(unsigned max_cpus);
void (*stop_other_cpus)(int wait);
+ void (*crash_stop_other_cpus)(void);
void (*smp_send_reschedule)(int cpu);
int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
disable_local_APIC();
}
-static void kdump_nmi_shootdown_cpus(void)
+void kdump_nmi_shootdown_cpus(void)
{
nmi_shootdown_cpus(kdump_nmi_callback);
disable_local_APIC();
}
+/* Override the weak function in kernel/panic.c */
+void crash_smp_send_stop(void)
+{
+ static int cpus_stopped;
+
+ if (cpus_stopped)
+ return;
+
+ if (smp_ops.crash_stop_other_cpus)
+ smp_ops.crash_stop_other_cpus();
+ else
+ smp_send_stop();
+
+ cpus_stopped = 1;
+}
+
#else
-static void kdump_nmi_shootdown_cpus(void)
+void crash_smp_send_stop(void)
{
/* There are no cpus to shootdown */
}
/* The kernel is broken so disable interrupts */
local_irq_disable();
- kdump_nmi_shootdown_cpus();
+ crash_smp_send_stop();
/*
* VMCLEAR VMCSs loaded on this cpu if needed.
#endif
vmcoreinfo_append_str("KERNELOFFSET=%lx\n",
kaslr_offset());
+ VMCOREINFO_PAGE_OFFSET(PAGE_OFFSET);
+ VMCOREINFO_VMALLOC_START(VMALLOC_START);
+ VMCOREINFO_VMEMMAP_START(VMEMMAP_START);
}
/* arch-dependent functionality related to kexec file-based syscall */
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
- unsigned long range_end = mm->brk + 0x02000000;
- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+ return randomize_page(mm->brk, 0x02000000);
}
/*
#include <asm/nmi.h>
#include <asm/mce.h>
#include <asm/trace/irq_vectors.h>
+#include <asm/kexec.h>
+
/*
* Some notes on x86 processor bugs affecting SMP operation:
*
.smp_cpus_done = native_smp_cpus_done,
.stop_other_cpus = native_stop_other_cpus,
+#if defined(CONFIG_KEXEC_CORE)
+ .crash_stop_other_cpus = kdump_nmi_shootdown_cpus,
+#endif
.smp_send_reschedule = native_smp_send_reschedule,
.cpu_up = native_cpu_up,
unsigned long *end)
{
if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
- unsigned long new_begin;
/* This is usually used needed to map code in small
model, so it needs to be in the first 31bit. Limit
it to that. This means we need to move the
*begin = 0x40000000;
*end = 0x80000000;
if (current->flags & PF_RANDOMIZE) {
- new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
- if (new_begin)
- *begin = new_begin;
+ *begin = randomize_page(*begin, 0x02000000);
}
} else {
*begin = current->mm->mmap_legacy_base;
*/
smp_mb();
if (atomic_dec_if_positive(&ps->pending) > 0)
- queue_kthread_work(&pit->worker, &pit->expired);
+ kthread_queue_work(&pit->worker, &pit->expired);
}
void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
static void destroy_pit_timer(struct kvm_pit *pit)
{
hrtimer_cancel(&pit->pit_state.timer);
- flush_kthread_work(&pit->expired);
+ kthread_flush_work(&pit->expired);
}
static void pit_do_work(struct kthread_work *work)
if (atomic_read(&ps->reinject))
atomic_inc(&ps->pending);
- queue_kthread_work(&pt->worker, &pt->expired);
+ kthread_queue_work(&pt->worker, &pt->expired);
if (ps->is_periodic) {
hrtimer_add_expires_ns(&ps->timer, ps->period);
/* TODO The new value only affected after the retriggered */
hrtimer_cancel(&ps->timer);
- flush_kthread_work(&pit->expired);
+ kthread_flush_work(&pit->expired);
ps->period = interval;
ps->is_periodic = is_period;
pid_nr = pid_vnr(pid);
put_pid(pid);
- init_kthread_worker(&pit->worker);
+ kthread_init_worker(&pit->worker);
pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker,
"kvm-pit/%d", pid_nr);
if (IS_ERR(pit->worker_task))
goto fail_kthread;
- init_kthread_work(&pit->expired, pit_do_work);
+ kthread_init_work(&pit->expired, pit_do_work);
pit->kvm = kvm;
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev);
kvm_pit_set_reinject(pit, false);
hrtimer_cancel(&pit->pit_state.timer);
- flush_kthread_work(&pit->expired);
+ kthread_flush_work(&pit->expired);
kthread_stop(pit->worker_task);
kvm_free_irq_source_id(kvm, pit->irq_source_id);
kfree(pit);
unsigned int granularity;
enum req_op op;
int alignment;
+ sector_t bs_mask;
if (!q)
return -ENXIO;
op = REQ_OP_DISCARD;
}
+ bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
+ if ((sector | nr_sects) & bs_mask)
+ return -EINVAL;
+
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U);
alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
unsigned int max_write_same_sectors;
struct bio *bio = NULL;
int ret = 0;
+ sector_t bs_mask;
if (!q)
return -ENXIO;
+ bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
+ if ((sector | nr_sects) & bs_mask)
+ return -EINVAL;
+
/* Ensure that max_write_same_sectors doesn't overflow bi_size */
max_write_same_sectors = UINT_MAX >> 9;
int ret;
struct bio *bio = NULL;
unsigned int sz;
+ sector_t bs_mask;
+
+ bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
+ if ((sector | nr_sects) & bs_mask)
+ return -EINVAL;
while (nr_sects != 0) {
bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES),
unsigned long arg)
{
uint64_t range[2];
- uint64_t start, len;
+ struct address_space *mapping;
+ uint64_t start, end, len;
if (!(mode & FMODE_WRITE))
return -EBADF;
start = range[0];
len = range[1];
+ end = start + len - 1;
if (start & 511)
return -EINVAL;
if (len & 511)
return -EINVAL;
- start >>= 9;
- len >>= 9;
-
- if (start + len > (i_size_read(bdev->bd_inode) >> 9))
+ if (end >= (uint64_t)i_size_read(bdev->bd_inode))
+ return -EINVAL;
+ if (end < start)
return -EINVAL;
- return blkdev_issue_zeroout(bdev, start, len, GFP_KERNEL, false);
+ /* Invalidate the page cache, including dirty pages */
+ mapping = bdev->bd_inode->i_mapping;
+ truncate_inode_pages_range(mapping, start, end);
+
+ return blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL,
+ false);
}
static int put_ushort(unsigned long arg, unsigned short val)
/* If another context is idling then defer */
if (engine->idling) {
- queue_kthread_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(&engine->kworker, &engine->pump_requests);
goto out;
}
/* Only do teardown in the thread */
if (!in_kthread) {
- queue_kthread_work(&engine->kworker,
+ kthread_queue_work(&engine->kworker,
&engine->pump_requests);
goto out;
}
ret = ablkcipher_enqueue_request(&engine->queue, req);
if (!engine->busy && need_pump)
- queue_kthread_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(&engine->kworker, &engine->pump_requests);
spin_unlock_irqrestore(&engine->queue_lock, flags);
return ret;
ret = ahash_enqueue_request(&engine->queue, req);
if (!engine->busy && need_pump)
- queue_kthread_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(&engine->kworker, &engine->pump_requests);
spin_unlock_irqrestore(&engine->queue_lock, flags);
return ret;
req->base.complete(&req->base, err);
- queue_kthread_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(&engine->kworker, &engine->pump_requests);
}
EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
req->base.complete(&req->base, err);
- queue_kthread_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(&engine->kworker, &engine->pump_requests);
}
EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
engine->running = true;
spin_unlock_irqrestore(&engine->queue_lock, flags);
- queue_kthread_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(&engine->kworker, &engine->pump_requests);
return 0;
}
crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
spin_lock_init(&engine->queue_lock);
- init_kthread_worker(&engine->kworker);
+ kthread_init_worker(&engine->kworker);
engine->kworker_task = kthread_run(kthread_worker_fn,
&engine->kworker, "%s",
engine->name);
dev_err(dev, "failed to create crypto request pump task\n");
return NULL;
}
- init_kthread_work(&engine->pump_requests, crypto_pump_work);
+ kthread_init_work(&engine->pump_requests, crypto_pump_work);
if (engine->rt) {
dev_info(dev, "will run requests pump with realtime priority\n");
if (ret)
return ret;
- flush_kthread_worker(&engine->kworker);
+ kthread_flush_worker(&engine->kworker);
kthread_stop(engine->kworker_task);
return 0;
static void loop_unprepare_queue(struct loop_device *lo)
{
- flush_kthread_worker(&lo->worker);
+ kthread_flush_worker(&lo->worker);
kthread_stop(lo->worker_task);
}
static int loop_prepare_queue(struct loop_device *lo)
{
- init_kthread_worker(&lo->worker);
+ kthread_init_worker(&lo->worker);
lo->worker_task = kthread_run(kthread_worker_fn,
&lo->worker, "loop%d", lo->lo_number);
if (IS_ERR(lo->worker_task))
break;
}
- queue_kthread_work(&lo->worker, &cmd->work);
+ kthread_queue_work(&lo->worker, &cmd->work);
return BLK_MQ_RQ_QUEUE_OK;
}
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
cmd->rq = rq;
- init_kthread_work(&cmd->work, loop_queue_work);
+ kthread_init_work(&cmd->work, loop_queue_work);
return 0;
}
}
EXPORT_SYMBOL(get_random_long);
-/*
- * randomize_range() returns a start address such that
+/**
+ * randomize_page - Generate a random, page aligned address
+ * @start: The smallest acceptable address the caller will take.
+ * @range: The size of the area, starting at @start, within which the
+ * random address must fall.
+ *
+ * If @start + @range would overflow, @range is capped.
*
- * [...... <range> .....]
- * start end
+ * NOTE: Historical use of randomize_range, which this replaces, presumed that
+ * @start was already page aligned. We now align it regardless.
*
- * a <range> with size "len" starting at the return value is inside in the
- * area defined by [start, end], but is otherwise randomized.
+ * Return: A page aligned address within [start, start + range). On error,
+ * @start is returned.
*/
unsigned long
-randomize_range(unsigned long start, unsigned long end, unsigned long len)
+randomize_page(unsigned long start, unsigned long range)
{
- unsigned long range = end - len - start;
+ if (!PAGE_ALIGNED(start)) {
+ range -= PAGE_ALIGN(start) - start;
+ start = PAGE_ALIGN(start);
+ }
- if (end <= start + len)
- return 0;
- return PAGE_ALIGN(get_random_int() % range + start);
+ if (start > ULONG_MAX - range)
+ range = ULONG_MAX - start;
+
+ range >>= PAGE_SHIFT;
+
+ if (range == 0)
+ return start;
+
+ return start + (get_random_long() % range << PAGE_SHIFT);
}
/* Interface for in-kernel drivers of true hardware RNGs.
#include <linux/workqueue.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
-#include <linux/kconfig.h>
#include "../tty/hvc/hvc_console.h"
#define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC)
if (likely(worker)) {
cq->notify = RVT_CQ_NONE;
cq->triggered++;
- queue_kthread_work(worker, &cq->comptask);
+ kthread_queue_work(worker, &cq->comptask);
}
}
cq->ibcq.cqe = entries;
cq->notify = RVT_CQ_NONE;
spin_lock_init(&cq->lock);
- init_kthread_work(&cq->comptask, send_complete);
+ kthread_init_work(&cq->comptask, send_complete);
cq->queue = wc;
ret = &cq->ibcq;
struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
struct rvt_dev_info *rdi = cq->rdi;
- flush_kthread_work(&cq->comptask);
+ kthread_flush_work(&cq->comptask);
spin_lock(&rdi->n_cqs_lock);
rdi->n_cqs_allocated--;
spin_unlock(&rdi->n_cqs_lock);
rdi->worker = kzalloc(sizeof(*rdi->worker), GFP_KERNEL);
if (!rdi->worker)
return -ENOMEM;
- init_kthread_worker(rdi->worker);
+ kthread_init_worker(rdi->worker);
task = kthread_create_on_node(
kthread_worker_fn,
rdi->worker,
/* blocks future queuing from send_complete() */
rdi->worker = NULL;
smp_wmb(); /* See rdi_cq_enter */
- flush_kthread_worker(worker);
+ kthread_flush_worker(worker);
kthread_stop(worker->task);
kfree(worker);
}
#include <linux/kernel.h>
#include <linux/device.h>
-#include <linux/kconfig.h>
#include <linux/list.h>
#include <linux/pm.h>
#include <linux/rmi.h>
#include <linux/bitmap.h>
#include <linux/delay.h>
#include <linux/fs.h>
-#include <linux/kconfig.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/of.h>
*/
#include <linux/kernel.h>
-#include <linux/kconfig.h>
#include <linux/rmi.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/device.h>
#include <linux/input.h>
#include <linux/input/mt.h>
-#include <linux/kconfig.h>
#include <linux/rmi.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/bitops.h>
#include <linux/cpumask.h>
-#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitops.h>
-#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/kconfig.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/of.h>
if (!md->init_tio_pdu)
memset(&tio->info, 0, sizeof(tio->info));
if (md->kworker_task)
- init_kthread_work(&tio->work, map_tio_request);
+ kthread_init_work(&tio->work, map_tio_request);
}
static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
tio = tio_from_request(rq);
/* Establish tio->ti before queuing work (map_tio_request) */
tio->ti = ti;
- queue_kthread_work(&md->kworker, &tio->work);
+ kthread_queue_work(&md->kworker, &tio->work);
BUG_ON(!irqs_disabled());
}
}
blk_queue_prep_rq(md->queue, dm_old_prep_fn);
/* Initialize the request-based DM worker thread */
- init_kthread_worker(&md->kworker);
+ kthread_init_worker(&md->kworker);
md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
"kdmwork-%s", dm_device_name(md));
if (IS_ERR(md->kworker_task))
spin_unlock_irq(q->queue_lock);
if (dm_request_based(md) && md->kworker_task)
- flush_kthread_worker(&md->kworker);
+ kthread_flush_worker(&md->kworker);
/*
* Take suspend_lock so that presuspend and postsuspend methods
if (dm_request_based(md)) {
dm_stop_queue(md->queue);
if (md->kworker_task)
- flush_kthread_worker(&md->kworker);
+ kthread_flush_worker(&md->kworker);
}
flush_workqueue(md->wq);
#ifndef AF9013_H
#define AF9013_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
/* AF9013/5 GPIOs (mostly guessed)
#ifndef AF9033_H
#define AF9033_H
-#include <linux/kconfig.h>
-
/*
* I2C address (TODO: are these in 8-bit format?)
* 0x38, 0x3a, 0x3c, 0x3e
#ifndef __DVB_ASCOT2E_H__
#define __DVB_ASCOT2E_H__
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include <linux/i2c.h>
#ifndef __ATBM8830_H__
#define __ATBM8830_H__
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include <linux/i2c.h>
#ifndef __AU8522_H__
#define __AU8522_H__
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
enum au8522_if_freq {
#ifndef CX22702_H
#define CX22702_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
struct cx22702_config {
#ifndef CX24113_H
#define CX24113_H
-#include <linux/kconfig.h>
-
struct dvb_frontend;
struct cx24113_config {
#ifndef CX24116_H
#define CX24116_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
struct cx24116_config {
#ifndef CX24117_H
#define CX24117_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
struct cx24117_config {
#ifndef CX24120_H
#define CX24120_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include <linux/firmware.h>
#ifndef CX24123_H
#define CX24123_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
struct cx24123_config {
#ifndef CXD2820R_H
#define CXD2820R_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#define CXD2820R_GPIO_D (0 << 0) /* disable */
#ifndef CXD2841ER_H
#define CXD2841ER_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
enum cxd2841er_xtal {
#ifndef DIB3000MC_H
#define DIB3000MC_H
-#include <linux/kconfig.h>
-
#include "dibx000_common.h"
struct dib3000mc_config {
#ifndef DIB7000M_H
#define DIB7000M_H
-#include <linux/kconfig.h>
-
#include "dibx000_common.h"
struct dib7000m_config {
#ifndef DIB7000P_H
#define DIB7000P_H
-#include <linux/kconfig.h>
-
#include "dibx000_common.h"
struct dib7000p_config {
#ifndef _DRXD_H_
#define _DRXD_H_
-#include <linux/kconfig.h>
#include <linux/types.h>
#include <linux/i2c.h>
#ifndef _DRXK_H_
#define _DRXK_H_
-#include <linux/kconfig.h>
#include <linux/types.h>
#include <linux/i2c.h>
#ifndef DS3000_H
#define DS3000_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
struct ds3000_config {
#ifndef DVB_DUMMY_FE_H
#define DVB_DUMMY_FE_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
#ifndef EC100_H
#define EC100_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
struct ec100_config {
#ifndef HD29L2_H
#define HD29L2_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
struct hd29l2_config {
#ifndef __DVB_HELENE_H__
#define __DVB_HELENE_H__
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include <linux/i2c.h>
#ifndef __DVB_HORUS3A_H__
#define __DVB_HORUS3A_H__
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include <linux/i2c.h>
#ifndef DVB_IX2505V_H
#define DVB_IX2505V_H
-#include <linux/kconfig.h>
#include <linux/i2c.h>
#include "dvb_frontend.h"
#ifndef _LG2160_H_
#define _LG2160_H_
-#include <linux/kconfig.h>
#include <linux/i2c.h>
#include "dvb_frontend.h"
#ifndef _LGDT3305_H_
#define _LGDT3305_H_
-#include <linux/kconfig.h>
#include <linux/i2c.h>
#include "dvb_frontend.h"
#ifndef LGS8GL5_H
#define LGS8GL5_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
struct lgs8gl5_config {
#ifndef __LGS8GXX_H__
#define __LGS8GXX_H__
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include <linux/i2c.h>
#ifndef _LNBH24_H
#define _LNBH24_H
-#include <linux/kconfig.h>
-
/* system register bits */
#define LNBH24_OLF 0x01
#define LNBH24_OTF 0x02
#define LNBH25_H
#include <linux/i2c.h>
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
/* 22 kHz tone enabled. Tone output controlled by DSQIN pin */
#ifndef _LNBP21_H
#define _LNBP21_H
-#include <linux/kconfig.h>
-
/* system register bits */
/* [RO] 0=OK; 1=over current limit flag */
#define LNBP21_OLF 0x01
#ifndef _LNBP22_H
#define _LNBP22_H
-#include <linux/kconfig.h>
-
/* Enable */
#define LNBP22_EN 0x10
/* Voltage selection */
#ifndef M88RS2000_H
#define M88RS2000_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
#ifndef MB86A20S_H
#define MB86A20S_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
/**
#ifndef __S5H1409_H__
#define __S5H1409_H__
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
struct s5h1409_config {
#ifndef __S5H1411_H__
#define __S5H1411_H__
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#define S5H1411_I2C_TOP_ADDR (0x32 >> 1)
#ifndef __S5H1432_H__
#define __S5H1432_H__
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#define S5H1432_I2C_TOP_ADDR (0x02 >> 1)
#ifndef S921_H
#define S921_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
struct s921_config {
#ifndef SI21XX_H
#define SI21XX_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
#ifndef SP2_H
#define SP2_H
-#include <linux/kconfig.h>
#include "dvb_ca_en50221.h"
/*
#ifndef __DVB_STB6000_H__
#define __DVB_STB6000_H__
-#include <linux/kconfig.h>
#include <linux/i2c.h>
#include "dvb_frontend.h"
#ifndef STV0288_H
#define STV0288_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
#ifndef STV0367_H
#define STV0367_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
#ifndef STV0900_H
#define STV0900_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
#ifndef __DVB_STV6110_H__
#define __DVB_STV6110_H__
-#include <linux/kconfig.h>
#include <linux/i2c.h>
#include "dvb_frontend.h"
#ifndef TDA10048_H
#define TDA10048_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include <linux/firmware.h>
#ifndef _TDA18271C2DD_H_
#define _TDA18271C2DD_H_
-#include <linux/kconfig.h>
-
#if IS_REACHABLE(CONFIG_DVB_TDA18271C2DD)
struct dvb_frontend *tda18271c2dd_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c, u8 adr);
#ifndef TS2020_H
#define TS2020_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
struct ts2020_config {
#ifndef DVB_ZL10036_H
#define DVB_ZL10036_H
-#include <linux/kconfig.h>
#include <linux/i2c.h>
#include "dvb_frontend.h"
#ifndef ZL10039_H
#define ZL10039_H
-#include <linux/kconfig.h>
-
#if IS_REACHABLE(CONFIG_DVB_ZL10039)
struct dvb_frontend *zl10039_attach(struct dvb_frontend *fe,
u8 i2c_addr,
#ifndef __ALTERA_CI_H
#define __ALTERA_CI_H
-#include <linux/kconfig.h>
-
#define ALT_DATA 0x000000ff
#define ALT_TDI 0x00008000
#define ALT_TDO 0x00004000
spin_lock_init(&itv->lock);
spin_lock_init(&itv->dma_reg_lock);
- init_kthread_worker(&itv->irq_worker);
+ kthread_init_worker(&itv->irq_worker);
itv->irq_worker_task = kthread_run(kthread_worker_fn, &itv->irq_worker,
"%s", itv->v4l2_dev.name);
if (IS_ERR(itv->irq_worker_task)) {
/* must use the FIFO scheduler as it is realtime sensitive */
sched_setscheduler(itv->irq_worker_task, SCHED_FIFO, ¶m);
- init_kthread_work(&itv->irq_work, ivtv_irq_work_handler);
+ kthread_init_work(&itv->irq_work, ivtv_irq_work_handler);
/* Initial settings */
itv->cxhdl.port = CX2341X_PORT_MEMORY;
del_timer_sync(&itv->dma_timer);
/* Kill irq worker */
- flush_kthread_worker(&itv->irq_worker);
+ kthread_flush_worker(&itv->irq_worker);
kthread_stop(itv->irq_worker_task);
ivtv_streams_cleanup(itv);
}
if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
- queue_kthread_work(&itv->irq_worker, &itv->irq_work);
+ kthread_queue_work(&itv->irq_worker, &itv->irq_work);
}
spin_unlock(&itv->dma_reg_lock);
#ifndef LINUX_FC0011_H_
#define LINUX_FC0011_H_
-#include <linux/kconfig.h>
#include "dvb_frontend.h"
#ifndef _FC0012_H_
#define _FC0012_H_
-#include <linux/kconfig.h>
#include "dvb_frontend.h"
#include "fc001x-common.h"
#ifndef _FC0013_H_
#define _FC0013_H_
-#include <linux/kconfig.h>
#include "dvb_frontend.h"
#include "fc001x-common.h"
#ifndef __MAX2165_H__
#define __MAX2165_H__
-#include <linux/kconfig.h>
-
struct dvb_frontend;
struct i2c_adapter;
#ifndef MC44S803_H
#define MC44S803_H
-#include <linux/kconfig.h>
-
struct dvb_frontend;
struct i2c_adapter;
#ifndef __MXL5005S_H
#define __MXL5005S_H
-#include <linux/kconfig.h>
-
#include <linux/i2c.h>
#include "dvb_frontend.h"
#ifndef R820T_H
#define R820T_H
-#include <linux/kconfig.h>
#include "dvb_frontend.h"
enum r820t_chip {
#ifndef SI2157_H
#define SI2157_H
-#include <linux/kconfig.h>
#include <media/media-device.h>
#include "dvb_frontend.h"
#ifndef TDA18212_H
#define TDA18212_H
-#include <linux/kconfig.h>
#include "dvb_frontend.h"
struct tda18212_config {
#ifndef TDA18218_H
#define TDA18218_H
-#include <linux/kconfig.h>
#include "dvb_frontend.h"
struct tda18218_config {
#ifndef __XC5000_H__
#define __XC5000_H__
-#include <linux/kconfig.h>
#include <linux/firmware.h>
struct dvb_frontend;
#ifndef __MXL111SF_DEMOD_H__
#define __MXL111SF_DEMOD_H__
-#include <linux/kconfig.h>
#include "dvb_frontend.h"
#include "mxl111sf.h"
#ifndef __MXL111SF_TUNER_H__
#define __MXL111SF_TUNER_H__
-#include <linux/kconfig.h>
#include "dvb_frontend.h"
#include "mxl111sf.h"
* see Documentation/dvb/README.dvb-usb for more information
*/
-#include <linux/kconfig.h>
#include "dibusb.h"
/* Max transfer size done by I2C transfer functions */
*/
#include <linux/kernel.h>
-#include <linux/kconfig.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/reboot.h>
-#include <linux/kconfig.h>
#include <linux/leds.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/err.h>
-#include <linux/kconfig.h>
#include "mtdcore.h"
*/
#include <linux/kernel.h>
-#include <linux/kconfig.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/platform_device.h>
}
if (oldfilter != priv->rxfilter)
- queue_kthread_work(&priv->kworker, &priv->setrx_work);
+ kthread_queue_work(&priv->kworker, &priv->setrx_work);
}
static void encx24j600_hw_tx(struct encx24j600_priv *priv)
/* Remember the skb for deferred processing */
priv->tx_skb = skb;
- queue_kthread_work(&priv->kworker, &priv->tx_work);
+ kthread_queue_work(&priv->kworker, &priv->tx_work);
return NETDEV_TX_OK;
}
goto out_free;
}
- init_kthread_worker(&priv->kworker);
- init_kthread_work(&priv->tx_work, encx24j600_tx_proc);
- init_kthread_work(&priv->setrx_work, encx24j600_setrx_proc);
+ kthread_init_worker(&priv->kworker);
+ kthread_init_work(&priv->tx_work, encx24j600_tx_proc);
+ kthread_init_work(&priv->setrx_work, encx24j600_setrx_proc);
priv->kworker_task = kthread_run(kthread_worker_fn, &priv->kworker,
"encx24j600");
#include <linux/highmem.h>
#include <linux/if_vlan.h>
#include <linux/init.h>
-#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/kconfig.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/kconfig.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
goto out;
ret = BLK_MQ_RQ_QUEUE_BUSY;
- if (!dma_map_sg(dev->dev, iod->sg, iod->nents, dma_dir))
+ if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
+ DMA_ATTR_NO_WARN))
goto out;
if (!nvme_setup_prps(dev, req, size))
config NTP_PPS
bool "PPS kernel consumer support"
- depends on !NO_HZ
+ depends on !NO_HZ_COMMON
help
This option adds support for direct in-kernel time
synchronization using an external PPS signal.
{
struct rio_cm_msg msg;
void *buf;
- int ret = 0;
+ int ret;
if (copy_from_user(&msg, arg, sizeof(msg)))
return -EFAULT;
if (msg.size > RIO_MAX_MSG_SIZE)
return -EINVAL;
- buf = kmalloc(msg.size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- if (copy_from_user(buf, (void __user *)(uintptr_t)msg.msg, msg.size)) {
- ret = -EFAULT;
- goto out;
- }
+ buf = memdup_user((void __user *)(uintptr_t)msg.msg, msg.size);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
ret = riocm_ch_send(msg.ch_num, buf, msg.size);
-out:
+
kfree(buf);
return ret;
}
/* If another context is idling the device then defer */
if (master->idling) {
- queue_kthread_work(&master->kworker, &master->pump_messages);
+ kthread_queue_work(&master->kworker, &master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
return;
}
/* Only do teardown in the thread */
if (!in_kthread) {
- queue_kthread_work(&master->kworker,
+ kthread_queue_work(&master->kworker,
&master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
return;
master->running = false;
master->busy = false;
- init_kthread_worker(&master->kworker);
+ kthread_init_worker(&master->kworker);
master->kworker_task = kthread_run(kthread_worker_fn,
&master->kworker, "%s",
dev_name(&master->dev));
dev_err(&master->dev, "failed to create message pump task\n");
return PTR_ERR(master->kworker_task);
}
- init_kthread_work(&master->pump_messages, spi_pump_messages);
+ kthread_init_work(&master->pump_messages, spi_pump_messages);
/*
* Master config will indicate if this controller should run the
spin_lock_irqsave(&master->queue_lock, flags);
master->cur_msg = NULL;
master->cur_msg_prepared = false;
- queue_kthread_work(&master->kworker, &master->pump_messages);
+ kthread_queue_work(&master->kworker, &master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
trace_spi_message_done(mesg);
master->cur_msg = NULL;
spin_unlock_irqrestore(&master->queue_lock, flags);
- queue_kthread_work(&master->kworker, &master->pump_messages);
+ kthread_queue_work(&master->kworker, &master->pump_messages);
return 0;
}
ret = spi_stop_queue(master);
/*
- * flush_kthread_worker will block until all work is done.
+ * kthread_flush_worker will block until all work is done.
* If the reason that stop_queue timed out is that the work will never
* finish, then it does no good to call flush/stop thread, so
* return anyway.
return ret;
}
- flush_kthread_worker(&master->kworker);
+ kthread_flush_worker(&master->kworker);
kthread_stop(master->kworker_task);
return 0;
list_add_tail(&msg->queue, &master->queue);
if (!master->busy && need_pump)
- queue_kthread_work(&master->kworker, &master->pump_messages);
+ kthread_queue_work(&master->kworker, &master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
return 0;
obj->vob_discard_page_warned = 0;
} else {
SetPageError(vmpage);
- if (ioret == -ENOSPC)
- set_bit(AS_ENOSPC, &inode->i_mapping->flags);
- else
- set_bit(AS_EIO, &inode->i_mapping->flags);
+ mapping_set_error(inode->i_mapping, ioret);
if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
obj->vob_discard_page_warned == 0) {
{
struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id;
- queue_kthread_work(&s->kworker, &s->irq_work);
+ kthread_queue_work(&s->kworker, &s->irq_work);
return IRQ_HANDLED;
}
one->config.flags |= SC16IS7XX_RECONF_IER;
one->config.ier_clear |= bit;
- queue_kthread_work(&s->kworker, &one->reg_work);
+ kthread_queue_work(&s->kworker, &one->reg_work);
}
static void sc16is7xx_stop_tx(struct uart_port *port)
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
- queue_kthread_work(&s->kworker, &one->tx_work);
+ kthread_queue_work(&s->kworker, &one->tx_work);
}
static unsigned int sc16is7xx_tx_empty(struct uart_port *port)
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
one->config.flags |= SC16IS7XX_RECONF_MD;
- queue_kthread_work(&s->kworker, &one->reg_work);
+ kthread_queue_work(&s->kworker, &one->reg_work);
}
static void sc16is7xx_break_ctl(struct uart_port *port, int break_state)
port->rs485 = *rs485;
one->config.flags |= SC16IS7XX_RECONF_RS485;
- queue_kthread_work(&s->kworker, &one->reg_work);
+ kthread_queue_work(&s->kworker, &one->reg_work);
return 0;
}
sc16is7xx_power(port, 0);
- flush_kthread_worker(&s->kworker);
+ kthread_flush_worker(&s->kworker);
}
static const char *sc16is7xx_type(struct uart_port *port)
s->devtype = devtype;
dev_set_drvdata(dev, s);
- init_kthread_worker(&s->kworker);
- init_kthread_work(&s->irq_work, sc16is7xx_ist);
+ kthread_init_worker(&s->kworker);
+ kthread_init_work(&s->irq_work, sc16is7xx_ist);
s->kworker_task = kthread_run(kthread_worker_fn, &s->kworker,
"sc16is7xx");
if (IS_ERR(s->kworker_task)) {
SC16IS7XX_EFCR_RXDISABLE_BIT |
SC16IS7XX_EFCR_TXDISABLE_BIT);
/* Initialize kthread work structs */
- init_kthread_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
- init_kthread_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
+ kthread_init_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
+ kthread_init_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
/* Register port */
uart_add_one_port(&sc16is7xx_uart, &s->p[i].port);
sc16is7xx_power(&s->p[i].port, 0);
}
- flush_kthread_worker(&s->kworker);
+ kthread_flush_worker(&s->kworker);
kthread_stop(s->kworker_task);
if (!IS_ERR(s->clk))
#include <linux/usb/ehci_def.h>
#include <linux/delay.h>
#include <linux/serial_core.h>
-#include <linux/kconfig.h>
#include <linux/kgdb.h>
#include <linux/kthread.h>
#include <asm/io.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
-#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
*/
#include <linux/types.h>
-#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
switch (ret) {
case -EDQUOT:
case -ENOSPC:
- set_bit(AS_ENOSPC,
- &wb->vnode->vfs_inode.i_mapping->flags);
+ mapping_set_error(wb->vnode->vfs_inode.i_mapping, -ENOSPC);
break;
case -EROFS:
case -EIO:
case -ENOMEDIUM:
case -ENXIO:
afs_kill_pages(wb->vnode, true, first, last);
- set_bit(AS_EIO, &wb->vnode->vfs_inode.i_mapping->flags);
+ mapping_set_error(wb->vnode->vfs_inode.i_mapping, -EIO);
break;
case -EACCES:
case -EPERM:
#define AUTOFS_IOC_COUNT 32
#define AUTOFS_DEV_IOCTL_IOC_FIRST (AUTOFS_DEV_IOCTL_VERSION)
-#define AUTOFS_DEV_IOCTL_IOC_COUNT (AUTOFS_IOC_COUNT - 11)
+#define AUTOFS_DEV_IOCTL_IOC_COUNT \
+ (AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD - AUTOFS_DEV_IOCTL_VERSION_CMD)
#include <linux/kernel.h>
#include <linux/slab.h>
#include <asm/current.h>
#include <linux/uaccess.h>
-/* #define DEBUG */
-
#ifdef pr_fmt
#undef pr_fmt
#endif
int max_proto;
unsigned long exp_timeout;
unsigned int type;
- int reghost_enabled;
- int needs_reghost;
struct super_block *sb;
struct mutex wq_mutex;
struct mutex pipe_mutex;
}
}
-extern void autofs4_kill_sb(struct super_block *);
+void autofs4_kill_sb(struct super_block *);
if ((param->ver_major != AUTOFS_DEV_IOCTL_VERSION_MAJOR) ||
(param->ver_minor > AUTOFS_DEV_IOCTL_VERSION_MINOR)) {
pr_warn("ioctl control interface version mismatch: "
- "kernel(%u.%u), user(%u.%u), cmd(%d)\n",
+ "kernel(%u.%u), user(%u.%u), cmd(0x%08x)\n",
AUTOFS_DEV_IOCTL_VERSION_MAJOR,
AUTOFS_DEV_IOCTL_VERSION_MINOR,
param->ver_major, param->ver_minor, cmd);
return sbi;
}
+/* Return autofs dev ioctl version */
+static int autofs_dev_ioctl_version(struct file *fp,
+ struct autofs_sb_info *sbi,
+ struct autofs_dev_ioctl *param)
+{
+ /* This should have already been set. */
+ param->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR;
+ param->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR;
+ return 0;
+}
+
/* Return autofs module protocol version */
static int autofs_dev_ioctl_protover(struct file *fp,
struct autofs_sb_info *sbi,
static ioctl_fn lookup_dev_ioctl(unsigned int cmd)
{
- static struct {
- int cmd;
- ioctl_fn fn;
- } _ioctls[] = {
- {cmd_idx(AUTOFS_DEV_IOCTL_VERSION_CMD), NULL},
- {cmd_idx(AUTOFS_DEV_IOCTL_PROTOVER_CMD),
- autofs_dev_ioctl_protover},
- {cmd_idx(AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD),
- autofs_dev_ioctl_protosubver},
- {cmd_idx(AUTOFS_DEV_IOCTL_OPENMOUNT_CMD),
- autofs_dev_ioctl_openmount},
- {cmd_idx(AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD),
- autofs_dev_ioctl_closemount},
- {cmd_idx(AUTOFS_DEV_IOCTL_READY_CMD),
- autofs_dev_ioctl_ready},
- {cmd_idx(AUTOFS_DEV_IOCTL_FAIL_CMD),
- autofs_dev_ioctl_fail},
- {cmd_idx(AUTOFS_DEV_IOCTL_SETPIPEFD_CMD),
- autofs_dev_ioctl_setpipefd},
- {cmd_idx(AUTOFS_DEV_IOCTL_CATATONIC_CMD),
- autofs_dev_ioctl_catatonic},
- {cmd_idx(AUTOFS_DEV_IOCTL_TIMEOUT_CMD),
- autofs_dev_ioctl_timeout},
- {cmd_idx(AUTOFS_DEV_IOCTL_REQUESTER_CMD),
- autofs_dev_ioctl_requester},
- {cmd_idx(AUTOFS_DEV_IOCTL_EXPIRE_CMD),
- autofs_dev_ioctl_expire},
- {cmd_idx(AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD),
- autofs_dev_ioctl_askumount},
- {cmd_idx(AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD),
- autofs_dev_ioctl_ismountpoint}
+ static ioctl_fn _ioctls[] = {
+ autofs_dev_ioctl_version,
+ autofs_dev_ioctl_protover,
+ autofs_dev_ioctl_protosubver,
+ autofs_dev_ioctl_openmount,
+ autofs_dev_ioctl_closemount,
+ autofs_dev_ioctl_ready,
+ autofs_dev_ioctl_fail,
+ autofs_dev_ioctl_setpipefd,
+ autofs_dev_ioctl_catatonic,
+ autofs_dev_ioctl_timeout,
+ autofs_dev_ioctl_requester,
+ autofs_dev_ioctl_expire,
+ autofs_dev_ioctl_askumount,
+ autofs_dev_ioctl_ismountpoint,
};
unsigned int idx = cmd_idx(cmd);
- return (idx >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[idx].fn;
+ return (idx >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[idx];
}
/* ioctl dispatcher */
cmd = _IOC_NR(command);
if (_IOC_TYPE(command) != _IOC_TYPE(AUTOFS_DEV_IOCTL_IOC_FIRST) ||
- cmd - cmd_first >= AUTOFS_DEV_IOCTL_IOC_COUNT) {
+ cmd - cmd_first > AUTOFS_DEV_IOCTL_IOC_COUNT) {
return -ENOTTY;
}
if (err)
goto out;
- /* The validate routine above always sets the version */
- if (cmd == AUTOFS_DEV_IOCTL_VERSION_CMD)
- goto done;
-
fn = lookup_dev_ioctl(cmd);
if (!fn) {
pr_warn("unknown command 0x%08x\n", command);
- return -ENOTTY;
+ err = -ENOTTY;
+ goto out;
}
fp = NULL;
/*
* For obvious reasons the openmount can't have a file
* descriptor yet. We don't take a reference to the
- * file during close to allow for immediate release.
+ * file during close to allow for immediate release,
+ * and the same for retrieving ioctl version.
*/
- if (cmd != AUTOFS_DEV_IOCTL_OPENMOUNT_CMD &&
+ if (cmd != AUTOFS_DEV_IOCTL_VERSION_CMD &&
+ cmd != AUTOFS_DEV_IOCTL_OPENMOUNT_CMD &&
cmd != AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD) {
fp = fget(param->ioctlfd);
if (!fp) {
if (fp)
fput(fp);
-done:
if (err >= 0 && copy_to_user(user, param, AUTOFS_DEV_IOCTL_SIZE))
err = -EFAULT;
out:
goto fail_dput;
}
+ /* Test versions first */
+ if (sbi->max_proto < AUTOFS_MIN_PROTO_VERSION ||
+ sbi->min_proto > AUTOFS_MAX_PROTO_VERSION) {
+ pr_err("kernel does not match daemon version "
+ "daemon (%d, %d) kernel (%d, %d)\n",
+ sbi->min_proto, sbi->max_proto,
+ AUTOFS_MIN_PROTO_VERSION, AUTOFS_MAX_PROTO_VERSION);
+ goto fail_dput;
+ }
+
+ /* Establish highest kernel protocol version */
+ if (sbi->max_proto > AUTOFS_MAX_PROTO_VERSION)
+ sbi->version = AUTOFS_MAX_PROTO_VERSION;
+ else
+ sbi->version = sbi->max_proto;
+ sbi->sub_version = AUTOFS_PROTO_SUBVERSION;
+
if (pgrp_set) {
sbi->oz_pgrp = find_get_pid(pgrp);
if (!sbi->oz_pgrp) {
root_inode->i_fop = &autofs4_root_operations;
root_inode->i_op = &autofs4_dir_inode_operations;
- /* Couldn't this be tested earlier? */
- if (sbi->max_proto < AUTOFS_MIN_PROTO_VERSION ||
- sbi->min_proto > AUTOFS_MAX_PROTO_VERSION) {
- pr_err("kernel does not match daemon version "
- "daemon (%d, %d) kernel (%d, %d)\n",
- sbi->min_proto, sbi->max_proto,
- AUTOFS_MIN_PROTO_VERSION, AUTOFS_MAX_PROTO_VERSION);
- goto fail_dput;
- }
-
- /* Establish highest kernel protocol version */
- if (sbi->max_proto > AUTOFS_MAX_PROTO_VERSION)
- sbi->version = AUTOFS_MAX_PROTO_VERSION;
- else
- sbi->version = sbi->max_proto;
- sbi->sub_version = AUTOFS_PROTO_SUBVERSION;
-
pr_debug("pipe fd = %d, pgrp = %u\n", pipefd, pid_nr(sbi->oz_pgrp));
pipe = fget(pipefd);
if (!pipe) {
pr_err("could not open pipe file descriptor\n");
- goto fail_dput;
+ goto fail_put_pid;
}
ret = autofs_prepare_pipe(pipe);
if (ret < 0)
fail_fput:
pr_err("pipe file descriptor does not contain proper ops\n");
fput(pipe);
- /* fall through */
+fail_put_pid:
+ put_pid(sbi->oz_pgrp);
fail_dput:
dput(root);
goto fail_free;
fail_ino:
- kfree(ino);
+ autofs4_free_ino(ino);
fail_free:
- put_pid(sbi->oz_pgrp);
kfree(sbi);
s->s_fs_info = NULL;
return ret;
inode->i_fop = &autofs4_dir_operations;
} else if (S_ISLNK(mode)) {
inode->i_op = &autofs4_symlink_inode_operations;
- }
+ } else
+ WARN_ON(1);
return inode;
}
inode = autofs4_get_inode(dir->i_sb, S_IFLNK | 0555);
if (!inode) {
kfree(cp);
- if (!dentry->d_fsdata)
- kfree(ino);
return -ENOMEM;
}
inode->i_private = cp;
if (may_umount(mnt))
status = 1;
- pr_debug("returning %d\n", status);
+ pr_debug("may umount %d\n", status);
status = put_user(status, p);
#include <linux/cleancache.h>
#include <linux/dax.h>
#include <linux/badblocks.h>
+#include <linux/falloc.h>
#include <asm/uaccess.h>
#include "internal.h"
.is_dirty_writeback = buffer_check_dirty_writeback,
};
+#define BLKDEV_FALLOC_FL_SUPPORTED \
+ (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
+ FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
+
+static long blkdev_fallocate(struct file *file, int mode, loff_t start,
+ loff_t len)
+{
+ struct block_device *bdev = I_BDEV(bdev_file_inode(file));
+ struct request_queue *q = bdev_get_queue(bdev);
+ struct address_space *mapping;
+ loff_t end = start + len - 1;
+ loff_t isize;
+ int error;
+
+ /* Fail if we don't recognize the flags. */
+ if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
+ return -EOPNOTSUPP;
+
+ /* Don't go off the end of the device. */
+ isize = i_size_read(bdev->bd_inode);
+ if (start >= isize)
+ return -EINVAL;
+ if (end >= isize) {
+ if (mode & FALLOC_FL_KEEP_SIZE) {
+ len = isize - start;
+ end = start + len - 1;
+ } else
+ return -EINVAL;
+ }
+
+ /*
+ * Don't allow IO that isn't aligned to logical block size.
+ */
+ if ((start | len) & (bdev_logical_block_size(bdev) - 1))
+ return -EINVAL;
+
+ /* Invalidate the page cache, including dirty pages. */
+ mapping = bdev->bd_inode->i_mapping;
+ truncate_inode_pages_range(mapping, start, end);
+
+ switch (mode) {
+ case FALLOC_FL_ZERO_RANGE:
+ case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
+ error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
+ GFP_KERNEL, false);
+ break;
+ case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
+ /* Only punch if the device can do zeroing discard. */
+ if (!blk_queue_discard(q) || !q->limits.discard_zeroes_data)
+ return -EOPNOTSUPP;
+ error = blkdev_issue_discard(bdev, start >> 9, len >> 9,
+ GFP_KERNEL, 0);
+ break;
+ case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
+ if (!blk_queue_discard(q))
+ return -EOPNOTSUPP;
+ error = blkdev_issue_discard(bdev, start >> 9, len >> 9,
+ GFP_KERNEL, 0);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ if (error)
+ return error;
+
+ /*
+ * Invalidate again; if someone wandered in and dirtied a page,
+ * the caller will be given -EBUSY. The third argument is
+ * inclusive, so the rounding here is safe.
+ */
+ return invalidate_inode_pages2_range(mapping,
+ start >> PAGE_SHIFT,
+ end >> PAGE_SHIFT);
+}
+
const struct file_operations def_blk_fops = {
.open = blkdev_open,
.release = blkdev_close,
#endif
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
+ .fallocate = blkdev_fallocate,
};
int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
set_buffer_uptodate(bh);
} else {
buffer_io_error(bh, ", lost async page write");
- set_bit(AS_EIO, &page->mapping->flags);
+ mapping_set_error(page->mapping, -EIO);
set_buffer_write_io_error(bh);
clear_buffer_uptodate(bh);
SetPageError(page);
bh = head;
do {
if (buffer_write_io_error(bh) && page->mapping)
- set_bit(AS_EIO, &page->mapping->flags);
+ mapping_set_error(page->mapping, -EIO);
if (buffer_busy(bh))
goto failed;
bh = bh->b_this_page;
fail:
EXOFS_DBGMSG("Error: writepage_strip(0x%lx, 0x%lx)=>%d\n",
inode->i_ino, page->index, ret);
- set_bit(AS_EIO, &page->mapping->flags);
+ mapping_set_error(page->mapping, -EIO);
unlock_page(page);
return ret;
}
if (bio->bi_error) {
SetPageError(page);
- set_bit(AS_EIO, &page->mapping->flags);
+ mapping_set_error(page->mapping, -EIO);
}
bh = head = page_buffers(page);
/*
fscrypt_pullback_bio_page(&page, true);
if (unlikely(bio->bi_error)) {
- set_bit(AS_EIO, &page->mapping->flags);
+ mapping_set_error(page->mapping, -EIO);
f2fs_stop_checkpoint(sbi, true);
}
end_page_writeback(page);
* filemap_fdatawait_range(), set it again so
* that user process can get -EIO from fsync().
*/
- set_bit(AS_EIO,
- &jinode->i_vfs_inode->i_mapping->flags);
+ mapping_set_error(jinode->i_vfs_inode->i_mapping, -EIO);
if (!ret)
ret = err;
#ifndef _LOCKD_PROCFS_H
#define _LOCKD_PROCFS_H
-#include <linux/kconfig.h>
-
#if IS_ENABLED(CONFIG_PROC_FS)
int lockd_create_procfs(void);
void lockd_remove_procfs(void);
migrate->new_master,
migrate->master);
+ if (ret < 0)
+ kmem_cache_free(dlm_mle_cache, mle);
+
spin_unlock(&dlm->master_lock);
unlock:
spin_unlock(&dlm->spinlock);
* Let individual file system decide if it supports preallocation
* for directories or not.
*/
- if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
+ if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode) &&
+ !S_ISBLK(inode->i_mode))
return -ENODEV;
/* Check for wrap through zero too */
return retval;
}
-static void account_pipe_buffers(struct pipe_inode_info *pipe,
+static unsigned long account_pipe_buffers(struct user_struct *user,
unsigned long old, unsigned long new)
{
- atomic_long_add(new - old, &pipe->user->pipe_bufs);
+ return atomic_long_add_return(new - old, &user->pipe_bufs);
}
-static bool too_many_pipe_buffers_soft(struct user_struct *user)
+static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
{
- return pipe_user_pages_soft &&
- atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_soft;
+ return pipe_user_pages_soft && user_bufs >= pipe_user_pages_soft;
}
-static bool too_many_pipe_buffers_hard(struct user_struct *user)
+static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
{
- return pipe_user_pages_hard &&
- atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_hard;
+ return pipe_user_pages_hard && user_bufs >= pipe_user_pages_hard;
}
struct pipe_inode_info *alloc_pipe_info(void)
{
struct pipe_inode_info *pipe;
+ unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
+ struct user_struct *user = get_current_user();
+ unsigned long user_bufs;
pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
- if (pipe) {
- unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
- struct user_struct *user = get_current_user();
-
- if (!too_many_pipe_buffers_hard(user)) {
- if (too_many_pipe_buffers_soft(user))
- pipe_bufs = 1;
- pipe->bufs = kcalloc(pipe_bufs,
- sizeof(struct pipe_buffer),
- GFP_KERNEL_ACCOUNT);
- }
+ if (pipe == NULL)
+ goto out_free_uid;
- if (pipe->bufs) {
- init_waitqueue_head(&pipe->wait);
- pipe->r_counter = pipe->w_counter = 1;
- pipe->buffers = pipe_bufs;
- pipe->user = user;
- account_pipe_buffers(pipe, 0, pipe_bufs);
- mutex_init(&pipe->mutex);
- return pipe;
- }
- free_uid(user);
- kfree(pipe);
+ if (pipe_bufs * PAGE_SIZE > pipe_max_size && !capable(CAP_SYS_RESOURCE))
+ pipe_bufs = pipe_max_size >> PAGE_SHIFT;
+
+ user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
+
+ if (too_many_pipe_buffers_soft(user_bufs)) {
+ user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
+ pipe_bufs = 1;
+ }
+
+ if (too_many_pipe_buffers_hard(user_bufs))
+ goto out_revert_acct;
+
+ pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
+ GFP_KERNEL_ACCOUNT);
+
+ if (pipe->bufs) {
+ init_waitqueue_head(&pipe->wait);
+ pipe->r_counter = pipe->w_counter = 1;
+ pipe->buffers = pipe_bufs;
+ pipe->user = user;
+ mutex_init(&pipe->mutex);
+ return pipe;
}
+out_revert_acct:
+ (void) account_pipe_buffers(user, pipe_bufs, 0);
+ kfree(pipe);
+out_free_uid:
+ free_uid(user);
return NULL;
}
{
int i;
- account_pipe_buffers(pipe, pipe->buffers, 0);
+ (void) account_pipe_buffers(pipe->user, pipe->buffers, 0);
free_uid(pipe->user);
for (i = 0; i < pipe->buffers; i++) {
struct pipe_buffer *buf = pipe->bufs + i;
.fasync = pipe_fasync,
};
+/*
+ * Currently we rely on the pipe array holding a power-of-2 number
+ * of pages.
+ */
+static inline unsigned int round_pipe_size(unsigned int size)
+{
+ unsigned long nr_pages;
+
+ nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
+}
+
/*
* Allocate a new array of pipe buffers and copy the info over. Returns the
* pipe size if successful, or return -ERROR on error.
*/
-static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
+static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
{
struct pipe_buffer *bufs;
+ unsigned int size, nr_pages;
+ unsigned long user_bufs;
+ long ret = 0;
+
+ size = round_pipe_size(arg);
+ nr_pages = size >> PAGE_SHIFT;
+
+ if (!nr_pages)
+ return -EINVAL;
+
+ /*
+ * If trying to increase the pipe capacity, check that an
+ * unprivileged user is not trying to exceed various limits
+ * (soft limit check here, hard limit check just below).
+ * Decreasing the pipe capacity is always permitted, even
+ * if the user is currently over a limit.
+ */
+ if (nr_pages > pipe->buffers &&
+ size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
+ return -EPERM;
+
+ user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages);
+
+ if (nr_pages > pipe->buffers &&
+ (too_many_pipe_buffers_hard(user_bufs) ||
+ too_many_pipe_buffers_soft(user_bufs)) &&
+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
+ ret = -EPERM;
+ goto out_revert_acct;
+ }
/*
* We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
* again like we would do for growing. If the pipe currently
* contains more buffers than arg, then return busy.
*/
- if (nr_pages < pipe->nrbufs)
- return -EBUSY;
+ if (nr_pages < pipe->nrbufs) {
+ ret = -EBUSY;
+ goto out_revert_acct;
+ }
bufs = kcalloc(nr_pages, sizeof(*bufs),
GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
- if (unlikely(!bufs))
- return -ENOMEM;
+ if (unlikely(!bufs)) {
+ ret = -ENOMEM;
+ goto out_revert_acct;
+ }
/*
* The pipe array wraps around, so just start the new one at zero
memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
}
- account_pipe_buffers(pipe, pipe->buffers, nr_pages);
pipe->curbuf = 0;
kfree(pipe->bufs);
pipe->bufs = bufs;
pipe->buffers = nr_pages;
return nr_pages * PAGE_SIZE;
-}
-
-/*
- * Currently we rely on the pipe array holding a power-of-2 number
- * of pages.
- */
-static inline unsigned int round_pipe_size(unsigned int size)
-{
- unsigned long nr_pages;
- nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
+out_revert_acct:
+ (void) account_pipe_buffers(pipe->user, nr_pages, pipe->buffers);
+ return ret;
}
/*
__pipe_lock(pipe);
switch (cmd) {
- case F_SETPIPE_SZ: {
- unsigned int size, nr_pages;
-
- size = round_pipe_size(arg);
- nr_pages = size >> PAGE_SHIFT;
-
- ret = -EINVAL;
- if (!nr_pages)
- goto out;
-
- if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
- ret = -EPERM;
- goto out;
- } else if ((too_many_pipe_buffers_hard(pipe->user) ||
- too_many_pipe_buffers_soft(pipe->user)) &&
- !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
- ret = -EPERM;
- goto out;
- }
- ret = pipe_set_size(pipe, nr_pages);
+ case F_SETPIPE_SZ:
+ ret = pipe_set_size(pipe, arg);
break;
- }
case F_GETPIPE_SZ:
ret = pipe->buffers * PAGE_SIZE;
break;
break;
}
-out:
__pipe_unlock(pipe);
return ret;
}
#include <linux/sched/rt.h>
#include <linux/freezer.h>
#include <net/busy_poll.h>
+#include <linux/vmalloc.h>
#include <asm/uaccess.h>
fd_set_bits fds;
void *bits;
int ret, max_fds;
- unsigned int size;
+ size_t size, alloc_size;
struct fdtable *fdt;
/* Allocate small arguments on the stack to save memory and be faster */
long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
if (size > sizeof(stack_fds) / 6) {
/* Not enough space in on-stack array; must use kmalloc */
ret = -ENOMEM;
- bits = kmalloc(6 * size, GFP_KERNEL);
+ if (size > (SIZE_MAX / 6))
+ goto out_nofds;
+
+ alloc_size = 6 * size;
+ bits = kmalloc(alloc_size, GFP_KERNEL|__GFP_NOWARN);
+ if (!bits && alloc_size > PAGE_SIZE)
+ bits = vmalloc(alloc_size);
+
if (!bits)
goto out_nofds;
}
out:
if (bits != stack_fds)
- kfree(bits);
+ kvfree(bits);
out_nofds:
return ret;
}
#ifndef _LINUX_AUTO_DEV_IOCTL_H
#define _LINUX_AUTO_DEV_IOCTL_H
-#include <linux/auto_fs.h>
-#include <linux/string.h>
-
-#define AUTOFS_DEVICE_NAME "autofs"
-
-#define AUTOFS_DEV_IOCTL_VERSION_MAJOR 1
-#define AUTOFS_DEV_IOCTL_VERSION_MINOR 0
-
-#define AUTOFS_DEVID_LEN 16
-
-#define AUTOFS_DEV_IOCTL_SIZE sizeof(struct autofs_dev_ioctl)
-
-/*
- * An ioctl interface for autofs mount point control.
- */
-
-struct args_protover {
- __u32 version;
-};
-
-struct args_protosubver {
- __u32 sub_version;
-};
-
-struct args_openmount {
- __u32 devid;
-};
-
-struct args_ready {
- __u32 token;
-};
-
-struct args_fail {
- __u32 token;
- __s32 status;
-};
-
-struct args_setpipefd {
- __s32 pipefd;
-};
-
-struct args_timeout {
- __u64 timeout;
-};
-
-struct args_requester {
- __u32 uid;
- __u32 gid;
-};
-
-struct args_expire {
- __u32 how;
-};
-
-struct args_askumount {
- __u32 may_umount;
-};
-
-struct args_ismountpoint {
- union {
- struct args_in {
- __u32 type;
- } in;
- struct args_out {
- __u32 devid;
- __u32 magic;
- } out;
- };
-};
-
-/*
- * All the ioctls use this structure.
- * When sending a path size must account for the total length
- * of the chunk of memory otherwise is is the size of the
- * structure.
- */
-
-struct autofs_dev_ioctl {
- __u32 ver_major;
- __u32 ver_minor;
- __u32 size; /* total size of data passed in
- * including this struct */
- __s32 ioctlfd; /* automount command fd */
-
- /* Command parameters */
-
- union {
- struct args_protover protover;
- struct args_protosubver protosubver;
- struct args_openmount openmount;
- struct args_ready ready;
- struct args_fail fail;
- struct args_setpipefd setpipefd;
- struct args_timeout timeout;
- struct args_requester requester;
- struct args_expire expire;
- struct args_askumount askumount;
- struct args_ismountpoint ismountpoint;
- };
-
- char path[0];
-};
-
-static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in)
-{
- memset(in, 0, sizeof(struct autofs_dev_ioctl));
- in->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR;
- in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR;
- in->size = sizeof(struct autofs_dev_ioctl);
- in->ioctlfd = -1;
-}
-
-/*
- * If you change this make sure you make the corresponding change
- * to autofs-dev-ioctl.c:lookup_ioctl()
- */
-enum {
- /* Get various version info */
- AUTOFS_DEV_IOCTL_VERSION_CMD = 0x71,
- AUTOFS_DEV_IOCTL_PROTOVER_CMD,
- AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD,
-
- /* Open mount ioctl fd */
- AUTOFS_DEV_IOCTL_OPENMOUNT_CMD,
-
- /* Close mount ioctl fd */
- AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD,
-
- /* Mount/expire status returns */
- AUTOFS_DEV_IOCTL_READY_CMD,
- AUTOFS_DEV_IOCTL_FAIL_CMD,
-
- /* Activate/deactivate autofs mount */
- AUTOFS_DEV_IOCTL_SETPIPEFD_CMD,
- AUTOFS_DEV_IOCTL_CATATONIC_CMD,
-
- /* Expiry timeout */
- AUTOFS_DEV_IOCTL_TIMEOUT_CMD,
-
- /* Get mount last requesting uid and gid */
- AUTOFS_DEV_IOCTL_REQUESTER_CMD,
-
- /* Check for eligible expire candidates */
- AUTOFS_DEV_IOCTL_EXPIRE_CMD,
-
- /* Request busy status */
- AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD,
-
- /* Check if path is a mountpoint */
- AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD,
-};
-
-#define AUTOFS_IOCTL 0x93
-
-#define AUTOFS_DEV_IOCTL_VERSION \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_VERSION_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_PROTOVER \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_PROTOVER_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_PROTOSUBVER \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_OPENMOUNT \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_OPENMOUNT_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_CLOSEMOUNT \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_READY \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_READY_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_FAIL \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_FAIL_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_SETPIPEFD \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_SETPIPEFD_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_CATATONIC \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_CATATONIC_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_TIMEOUT \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_TIMEOUT_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_REQUESTER \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_REQUESTER_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_EXPIRE \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_EXPIRE_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_ASKUMOUNT \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_ISMOUNTPOINT \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, struct autofs_dev_ioctl)
-
+#include <uapi/linux/auto_dev-ioctl.h>
#endif /* _LINUX_AUTO_DEV_IOCTL_H */
#define _LINUX_AUTO_FS_H
#include <linux/fs.h>
-#include <linux/limits.h>
#include <linux/ioctl.h>
#include <uapi/linux/auto_fs.h>
#endif /* _LINUX_AUTO_FS_H */
#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0)
#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0)
#define iscntrl(c) ((__ismask(c)&(_C)) != 0)
-#define isdigit(c) ((__ismask(c)&(_D)) != 0)
+static inline int isdigit(int c)
+{
+ return '0' <= c && c <= '9';
+}
#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0)
#define islower(c) ((__ismask(c)&(_L)) != 0)
#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
* that gives better TLB efficiency.
*/
#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
+/*
+ * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
+ * allocation failure reports (similarly to __GFP_NOWARN).
+ */
+#define DMA_ATTR_NO_WARN (1UL << 8)
/*
* A dma_addr_t can hold any valid DMA or bus address for the platform.
#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
-#include <linux/kconfig.h>
#include <generated/autoksyms.h>
#define __EXPORT_SYMBOL(sym, sec) \
unsigned long nrexceptional;
pgoff_t writeback_index;/* writeback starts here */
const struct address_space_operations *a_ops; /* methods */
- unsigned long flags; /* error bits/gfp mask */
+ unsigned long flags; /* error bits */
spinlock_t private_lock; /* for use by the address_space */
+ gfp_t gfp_mask; /* implicit gfp mask for allocations */
struct list_head private_list; /* ditto */
void *private_data; /* ditto */
} __attribute__((aligned(sizeof(long))));
#include <linux/irqdomain.h>
#include <linux/lockdep.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/kconfig.h>
struct gpio_desc;
struct of_phandle_args;
vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name)
#define VMCOREINFO_CONFIG(name) \
vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
+#define VMCOREINFO_PAGE_OFFSET(value) \
+ vmcoreinfo_append_str("PAGE_OFFSET=%lx\n", (unsigned long)value)
+#define VMCOREINFO_VMALLOC_START(value) \
+ vmcoreinfo_append_str("VMALLOC_START=%lx\n", (unsigned long)value)
+#define VMCOREINFO_VMEMMAP_START(value) \
+ vmcoreinfo_append_str("VMEMMAP_START=%lx\n", (unsigned long)value)
extern struct kimage *kexec_image;
extern struct kimage *kexec_crash_image;
extern void kmemleak_ignore(const void *ptr) __ref;
extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
extern void kmemleak_no_scan(const void *ptr) __ref;
+extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
+ gfp_t gfp) __ref;
+extern void kmemleak_free_part_phys(phys_addr_t phys, size_t size) __ref;
+extern void kmemleak_not_leak_phys(phys_addr_t phys) __ref;
+extern void kmemleak_ignore_phys(phys_addr_t phys) __ref;
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
int min_count, unsigned long flags,
static inline void kmemleak_no_scan(const void *ptr)
{
}
+static inline void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
+ int min_count, gfp_t gfp)
+{
+}
+static inline void kmemleak_free_part_phys(phys_addr_t phys, size_t size)
+{
+}
+static inline void kmemleak_not_leak_phys(phys_addr_t phys)
+{
+}
+static inline void kmemleak_ignore_phys(phys_addr_t phys)
+{
+}
#endif /* CONFIG_DEBUG_KMEMLEAK */
int node,
const char namefmt[], ...);
+/**
+ * kthread_create - create a kthread on the current node
+ * @threadfn: the function to run in the thread
+ * @data: data pointer for @threadfn()
+ * @namefmt: printf-style format string for the thread name
+ * @...: arguments for @namefmt.
+ *
+ * This macro will create a kthread on the current node, leaving it in
+ * the stopped state. This is just a helper for kthread_create_on_node();
+ * see the documentation there for more details.
+ */
#define kthread_create(threadfn, data, namefmt, arg...) \
kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg)
bool kthread_should_park(void);
bool kthread_freezable_should_stop(bool *was_frozen);
void *kthread_data(struct task_struct *k);
-void *probe_kthread_data(struct task_struct *k);
+void *kthread_probe_data(struct task_struct *k);
int kthread_park(struct task_struct *k);
void kthread_unpark(struct task_struct *k);
void kthread_parkme(void);
* Simple work processor based on kthread.
*
* This provides easier way to make use of kthreads. A kthread_work
- * can be queued and flushed using queue/flush_kthread_work()
+ * can be queued and flushed using queue/kthread_flush_work()
* respectively. Queued kthread_works are processed by a kthread
* running kthread_worker_fn().
*/
struct kthread_work;
typedef void (*kthread_work_func_t)(struct kthread_work *work);
+void kthread_delayed_work_timer_fn(unsigned long __data);
+
+enum {
+ KTW_FREEZABLE = 1 << 0, /* freeze during suspend */
+};
struct kthread_worker {
+ unsigned int flags;
spinlock_t lock;
struct list_head work_list;
+ struct list_head delayed_work_list;
struct task_struct *task;
struct kthread_work *current_work;
};
struct list_head node;
kthread_work_func_t func;
struct kthread_worker *worker;
+ /* Number of canceling calls that are running at the moment. */
+ int canceling;
+};
+
+struct kthread_delayed_work {
+ struct kthread_work work;
+ struct timer_list timer;
};
#define KTHREAD_WORKER_INIT(worker) { \
.lock = __SPIN_LOCK_UNLOCKED((worker).lock), \
.work_list = LIST_HEAD_INIT((worker).work_list), \
+ .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
}
#define KTHREAD_WORK_INIT(work, fn) { \
.func = (fn), \
}
+#define KTHREAD_DELAYED_WORK_INIT(dwork, fn) { \
+ .work = KTHREAD_WORK_INIT((dwork).work, (fn)), \
+ .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn, \
+ 0, (unsigned long)&(dwork), \
+ TIMER_IRQSAFE), \
+ }
+
#define DEFINE_KTHREAD_WORKER(worker) \
struct kthread_worker worker = KTHREAD_WORKER_INIT(worker)
#define DEFINE_KTHREAD_WORK(work, fn) \
struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
+#define DEFINE_KTHREAD_DELAYED_WORK(dwork, fn) \
+ struct kthread_delayed_work dwork = \
+ KTHREAD_DELAYED_WORK_INIT(dwork, fn)
+
/*
* kthread_worker.lock needs its own lockdep class key when defined on
* stack with lockdep enabled. Use the following macros in such cases.
*/
#ifdef CONFIG_LOCKDEP
# define KTHREAD_WORKER_INIT_ONSTACK(worker) \
- ({ init_kthread_worker(&worker); worker; })
+ ({ kthread_init_worker(&worker); worker; })
# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \
struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)
#else
# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)
#endif
-extern void __init_kthread_worker(struct kthread_worker *worker,
+extern void __kthread_init_worker(struct kthread_worker *worker,
const char *name, struct lock_class_key *key);
-#define init_kthread_worker(worker) \
+#define kthread_init_worker(worker) \
do { \
static struct lock_class_key __key; \
- __init_kthread_worker((worker), "("#worker")->lock", &__key); \
+ __kthread_init_worker((worker), "("#worker")->lock", &__key); \
} while (0)
-#define init_kthread_work(work, fn) \
+#define kthread_init_work(work, fn) \
do { \
memset((work), 0, sizeof(struct kthread_work)); \
INIT_LIST_HEAD(&(work)->node); \
(work)->func = (fn); \
} while (0)
+#define kthread_init_delayed_work(dwork, fn) \
+ do { \
+ kthread_init_work(&(dwork)->work, (fn)); \
+ __setup_timer(&(dwork)->timer, \
+ kthread_delayed_work_timer_fn, \
+ (unsigned long)(dwork), \
+ TIMER_IRQSAFE); \
+ } while (0)
+
int kthread_worker_fn(void *worker_ptr);
-bool queue_kthread_work(struct kthread_worker *worker,
+__printf(2, 3)
+struct kthread_worker *
+kthread_create_worker(unsigned int flags, const char namefmt[], ...);
+
+struct kthread_worker *
+kthread_create_worker_on_cpu(int cpu, unsigned int flags,
+ const char namefmt[], ...);
+
+bool kthread_queue_work(struct kthread_worker *worker,
struct kthread_work *work);
-void flush_kthread_work(struct kthread_work *work);
-void flush_kthread_worker(struct kthread_worker *worker);
+
+bool kthread_queue_delayed_work(struct kthread_worker *worker,
+ struct kthread_delayed_work *dwork,
+ unsigned long delay);
+
+bool kthread_mod_delayed_work(struct kthread_worker *worker,
+ struct kthread_delayed_work *dwork,
+ unsigned long delay);
+
+void kthread_flush_work(struct kthread_work *work);
+void kthread_flush_worker(struct kthread_worker *worker);
+
+bool kthread_cancel_work_sync(struct kthread_work *work);
+bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work);
+
+void kthread_destroy_worker(struct kthread_worker *worker);
#endif /* _LINUX_KTHREAD_H */
#include <linux/hugetlb_inline.h>
/*
- * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
- * allocation mode flags.
+ * Bits in mapping->flags.
*/
enum mapping_flags {
- AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
- AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
- AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
- AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
- AS_EXITING = __GFP_BITS_SHIFT + 4, /* final truncate in progress */
+ AS_EIO = 0, /* IO error on async write */
+ AS_ENOSPC = 1, /* ENOSPC on async write */
+ AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */
+ AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */
+ AS_EXITING = 4, /* final truncate in progress */
/* writeback related tags are not used */
- AS_NO_WRITEBACK_TAGS = __GFP_BITS_SHIFT + 5,
+ AS_NO_WRITEBACK_TAGS = 5,
};
static inline void mapping_set_error(struct address_space *mapping, int error)
static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
{
- return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
+ return mapping->gfp_mask;
}
/* Restricts the given gfp_mask to what the mapping allows. */
*/
static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
{
- m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
- (__force unsigned long)mask;
+ m->gfp_mask = mask;
}
void release_pages(struct page **pages, int nr, bool cold);
*
* This function updates @iter->index in the case of a successful lookup.
* For tagged lookup it also eats @iter->tags.
+ *
+ * There are several cases where 'slot' can be passed in as NULL to this
+ * function. These cases result from the use of radix_tree_iter_next() or
+ * radix_tree_iter_retry(). In these cases we don't end up dereferencing
+ * 'slot' because either:
+ * a) we are doing tagged iteration and iter->tags has been set to 0, or
+ * b) we are doing non-tagged iteration, and iter->index and iter->next_index
+ * have been set up so that radix_tree_chunk_size() returns 1 or 0.
*/
static __always_inline void **
radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
unsigned int get_random_int(void);
unsigned long get_random_long(void);
-unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
+unsigned long randomize_page(unsigned long start, unsigned long range);
u32 prandom_u32(void);
void prandom_bytes(void *buf, size_t nbytes);
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/list.h>
+#include <linux/irq_work.h>
#include <linux/bug.h>
#include <linux/fs.h>
#include <linux/poll.h>
size_t subbufs_consumed; /* count of sub-buffers consumed */
struct rchan *chan; /* associated channel */
wait_queue_head_t read_wait; /* reader wait queue */
- struct timer_list timer; /* reader wake-up timer */
+ struct irq_work wakeup_work; /* reader wakeup */
struct dentry *dentry; /* channel file dentry */
struct kref kref; /* channel buffer refcount */
struct page **page_array; /* array of current buffer pages */
struct list_head list_id; /* undo requests on this array */
int sem_nsems; /* no. of semaphores in array */
int complex_count; /* pending complex operations */
+ bool complex_mode; /* no parallel simple ops */
};
#ifdef CONFIG_SYSVIPC
--- /dev/null
+/*
+ * Copyright 2008 Red Hat, Inc. All rights reserved.
+ * Copyright 2008 Ian Kent <raven@themaw.net>
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ */
+
+#ifndef _UAPI_LINUX_AUTO_DEV_IOCTL_H
+#define _UAPI_LINUX_AUTO_DEV_IOCTL_H
+
+#include <linux/auto_fs.h>
+#include <linux/string.h>
+
+#define AUTOFS_DEVICE_NAME "autofs"
+
+#define AUTOFS_DEV_IOCTL_VERSION_MAJOR 1
+#define AUTOFS_DEV_IOCTL_VERSION_MINOR 0
+
+#define AUTOFS_DEV_IOCTL_SIZE sizeof(struct autofs_dev_ioctl)
+
+/*
+ * An ioctl interface for autofs mount point control.
+ */
+
+struct args_protover {
+ __u32 version;
+};
+
+struct args_protosubver {
+ __u32 sub_version;
+};
+
+struct args_openmount {
+ __u32 devid;
+};
+
+struct args_ready {
+ __u32 token;
+};
+
+struct args_fail {
+ __u32 token;
+ __s32 status;
+};
+
+struct args_setpipefd {
+ __s32 pipefd;
+};
+
+struct args_timeout {
+ __u64 timeout;
+};
+
+struct args_requester {
+ __u32 uid;
+ __u32 gid;
+};
+
+struct args_expire {
+ __u32 how;
+};
+
+struct args_askumount {
+ __u32 may_umount;
+};
+
+struct args_ismountpoint {
+ union {
+ struct args_in {
+ __u32 type;
+ } in;
+ struct args_out {
+ __u32 devid;
+ __u32 magic;
+ } out;
+ };
+};
+
+/*
+ * All the ioctls use this structure.
+ * When sending a path size must account for the total length
+ * of the chunk of memory otherwise is is the size of the
+ * structure.
+ */
+
+struct autofs_dev_ioctl {
+ __u32 ver_major;
+ __u32 ver_minor;
+ __u32 size; /* total size of data passed in
+ * including this struct */
+ __s32 ioctlfd; /* automount command fd */
+
+ /* Command parameters */
+
+ union {
+ struct args_protover protover;
+ struct args_protosubver protosubver;
+ struct args_openmount openmount;
+ struct args_ready ready;
+ struct args_fail fail;
+ struct args_setpipefd setpipefd;
+ struct args_timeout timeout;
+ struct args_requester requester;
+ struct args_expire expire;
+ struct args_askumount askumount;
+ struct args_ismountpoint ismountpoint;
+ };
+
+ char path[0];
+};
+
+static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in)
+{
+ memset(in, 0, sizeof(struct autofs_dev_ioctl));
+ in->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR;
+ in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR;
+ in->size = sizeof(struct autofs_dev_ioctl);
+ in->ioctlfd = -1;
+}
+
+/*
+ * If you change this make sure you make the corresponding change
+ * to autofs-dev-ioctl.c:lookup_ioctl()
+ */
+enum {
+ /* Get various version info */
+ AUTOFS_DEV_IOCTL_VERSION_CMD = 0x71,
+ AUTOFS_DEV_IOCTL_PROTOVER_CMD,
+ AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD,
+
+ /* Open mount ioctl fd */
+ AUTOFS_DEV_IOCTL_OPENMOUNT_CMD,
+
+ /* Close mount ioctl fd */
+ AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD,
+
+ /* Mount/expire status returns */
+ AUTOFS_DEV_IOCTL_READY_CMD,
+ AUTOFS_DEV_IOCTL_FAIL_CMD,
+
+ /* Activate/deactivate autofs mount */
+ AUTOFS_DEV_IOCTL_SETPIPEFD_CMD,
+ AUTOFS_DEV_IOCTL_CATATONIC_CMD,
+
+ /* Expiry timeout */
+ AUTOFS_DEV_IOCTL_TIMEOUT_CMD,
+
+ /* Get mount last requesting uid and gid */
+ AUTOFS_DEV_IOCTL_REQUESTER_CMD,
+
+ /* Check for eligible expire candidates */
+ AUTOFS_DEV_IOCTL_EXPIRE_CMD,
+
+ /* Request busy status */
+ AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD,
+
+ /* Check if path is a mountpoint */
+ AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD,
+};
+
+#define AUTOFS_IOCTL 0x93
+
+#define AUTOFS_DEV_IOCTL_VERSION \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_VERSION_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_PROTOVER \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_PROTOVER_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_PROTOSUBVER \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_OPENMOUNT \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_OPENMOUNT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_CLOSEMOUNT \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_READY \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_READY_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_FAIL \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_FAIL_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_SETPIPEFD \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_SETPIPEFD_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_CATATONIC \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_CATATONIC_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_TIMEOUT \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_TIMEOUT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_REQUESTER \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_REQUESTER_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_EXPIRE \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_EXPIRE_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_ASKUMOUNT \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_ISMOUNTPOINT \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, struct autofs_dev_ioctl)
+
+#endif /* _UAPI_LINUX_AUTO_DEV_IOCTL_H */
#define _UAPI_LINUX_AUTO_FS_H
#include <linux/types.h>
+#include <linux/limits.h>
#ifndef __KERNEL__
#include <sys/ioctl.h>
#endif /* __KERNEL__ */
config RELAY
bool "Kernel->user space relay support (formerly relayfs)"
+ select IRQ_WORK
help
This option enables support for relay interface support in
certain file systems (such as debugfs).
long r_msgtype;
long r_maxsize;
- /*
- * Mark r_msg volatile so that the compiler
- * does not try to get smart and optimize
- * it. We rely on this for the lockless
- * receive algorithm.
- */
- struct msg_msg *volatile r_msg;
+ struct msg_msg *r_msg;
};
/* one msg_sender for each sleeping sender */
struct msg_sender {
struct list_head list;
struct task_struct *tsk;
+ size_t msgsz;
};
#define SEARCH_ANY 1
return msq->q_perm.id;
}
-static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
+static inline bool msg_fits_inqueue(struct msg_queue *msq, size_t msgsz)
+{
+ return msgsz + msq->q_cbytes <= msq->q_qbytes &&
+ 1 + msq->q_qnum <= msq->q_qbytes;
+}
+
+static inline void ss_add(struct msg_queue *msq,
+ struct msg_sender *mss, size_t msgsz)
{
mss->tsk = current;
+ mss->msgsz = msgsz;
__set_current_state(TASK_INTERRUPTIBLE);
list_add_tail(&mss->list, &msq->q_senders);
}
static inline void ss_del(struct msg_sender *mss)
{
- if (mss->list.next != NULL)
+ if (mss->list.next)
list_del(&mss->list);
}
-static void ss_wakeup(struct list_head *h, int kill)
+static void ss_wakeup(struct msg_queue *msq,
+ struct wake_q_head *wake_q, bool kill)
{
struct msg_sender *mss, *t;
+ struct task_struct *stop_tsk = NULL;
+ struct list_head *h = &msq->q_senders;
list_for_each_entry_safe(mss, t, h, list) {
if (kill)
mss->list.next = NULL;
- wake_up_process(mss->tsk);
+
+ /*
+ * Stop at the first task we don't wakeup,
+ * we've already iterated the original
+ * sender queue.
+ */
+ else if (stop_tsk == mss->tsk)
+ break;
+ /*
+ * We are not in an EIDRM scenario here, therefore
+ * verify that we really need to wakeup the task.
+ * To maintain current semantics and wakeup order,
+ * move the sender to the tail on behalf of the
+ * blocked task.
+ */
+ else if (!msg_fits_inqueue(msq, mss->msgsz)) {
+ if (!stop_tsk)
+ stop_tsk = mss->tsk;
+
+ list_move_tail(&mss->list, &msq->q_senders);
+ continue;
+ }
+
+ wake_q_add(wake_q, mss->tsk);
}
}
-static void expunge_all(struct msg_queue *msq, int res)
+static void expunge_all(struct msg_queue *msq, int res,
+ struct wake_q_head *wake_q)
{
struct msg_receiver *msr, *t;
list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
- msr->r_msg = NULL; /* initialize expunge ordering */
- wake_up_process(msr->r_tsk);
- /*
- * Ensure that the wakeup is visible before setting r_msg as
- * the receiving end depends on it: either spinning on a nil,
- * or dealing with -EAGAIN cases. See lockless receive part 1
- * and 2 in do_msgrcv().
- */
- smp_wmb(); /* barrier (B) */
- msr->r_msg = ERR_PTR(res);
+ wake_q_add(wake_q, msr->r_tsk);
+ WRITE_ONCE(msr->r_msg, ERR_PTR(res));
}
}
{
struct msg_msg *msg, *t;
struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
+ WAKE_Q(wake_q);
- expunge_all(msq, -EIDRM);
- ss_wakeup(&msq->q_senders, 1);
+ expunge_all(msq, -EIDRM, &wake_q);
+ ss_wakeup(msq, &wake_q, true);
msg_rmid(ns, msq);
ipc_unlock_object(&msq->q_perm);
+ wake_up_q(&wake_q);
rcu_read_unlock();
list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) {
freeque(ns, ipcp);
goto out_up;
case IPC_SET:
+ {
+ WAKE_Q(wake_q);
+
if (msqid64.msg_qbytes > ns->msg_ctlmnb &&
!capable(CAP_SYS_RESOURCE)) {
err = -EPERM;
msq->q_qbytes = msqid64.msg_qbytes;
msq->q_ctime = get_seconds();
- /* sleeping receivers might be excluded by
+ /*
+ * Sleeping receivers might be excluded by
* stricter permissions.
*/
- expunge_all(msq, -EAGAIN);
- /* sleeping senders might be able to send
+ expunge_all(msq, -EAGAIN, &wake_q);
+ /*
+ * Sleeping senders might be able to send
* due to a larger queue size.
*/
- ss_wakeup(&msq->q_senders, 0);
- break;
+ ss_wakeup(msq, &wake_q, false);
+ ipc_unlock_object(&msq->q_perm);
+ wake_up_q(&wake_q);
+
+ goto out_unlock1;
+ }
default:
err = -EINVAL;
goto out_unlock1;
return 0;
}
-static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
+static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg,
+ struct wake_q_head *wake_q)
{
struct msg_receiver *msr, *t;
list_del(&msr->r_list);
if (msr->r_maxsize < msg->m_ts) {
- /* initialize pipelined send ordering */
- msr->r_msg = NULL;
- wake_up_process(msr->r_tsk);
- /* barrier (B) see barrier comment below */
- smp_wmb();
- msr->r_msg = ERR_PTR(-E2BIG);
+ wake_q_add(wake_q, msr->r_tsk);
+ WRITE_ONCE(msr->r_msg, ERR_PTR(-E2BIG));
} else {
- msr->r_msg = NULL;
msq->q_lrpid = task_pid_vnr(msr->r_tsk);
msq->q_rtime = get_seconds();
- wake_up_process(msr->r_tsk);
- /*
- * Ensure that the wakeup is visible before
- * setting r_msg, as the receiving can otherwise
- * exit - once r_msg is set, the receiver can
- * continue. See lockless receive part 1 and 2
- * in do_msgrcv(). Barrier (B).
- */
- smp_wmb();
- msr->r_msg = msg;
+ wake_q_add(wake_q, msr->r_tsk);
+ WRITE_ONCE(msr->r_msg, msg);
return 1;
}
}
struct msg_msg *msg;
int err;
struct ipc_namespace *ns;
+ WAKE_Q(wake_q);
ns = current->nsproxy->ipc_ns;
if (err)
goto out_unlock0;
- if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
- 1 + msq->q_qnum <= msq->q_qbytes) {
+ if (msg_fits_inqueue(msq, msgsz))
break;
- }
/* queue full, wait: */
if (msgflg & IPC_NOWAIT) {
}
/* enqueue the sender and prepare to block */
- ss_add(msq, &s);
+ ss_add(msq, &s, msgsz);
if (!ipc_rcu_getref(msq)) {
err = -EIDRM;
err = -EIDRM;
goto out_unlock0;
}
-
ss_del(&s);
if (signal_pending(current)) {
}
}
+
msq->q_lspid = task_tgid_vnr(current);
msq->q_stime = get_seconds();
- if (!pipelined_send(msq, msg)) {
+ if (!pipelined_send(msq, msg, &wake_q)) {
/* no one is waiting for this message, enqueue it */
list_add_tail(&msg->m_list, &msq->q_messages);
msq->q_cbytes += msgsz;
out_unlock0:
ipc_unlock_object(&msq->q_perm);
+ wake_up_q(&wake_q);
out_unlock1:
rcu_read_unlock();
if (msg != NULL)
struct msg_queue *msq;
struct ipc_namespace *ns;
struct msg_msg *msg, *copy = NULL;
+ WAKE_Q(wake_q);
ns = current->nsproxy->ipc_ns;
msq->q_cbytes -= msg->m_ts;
atomic_sub(msg->m_ts, &ns->msg_bytes);
atomic_dec(&ns->msg_hdrs);
- ss_wakeup(&msq->q_senders, 0);
+ ss_wakeup(msq, &wake_q, false);
goto out_unlock0;
}
rcu_read_unlock();
schedule();
- /* Lockless receive, part 1:
- * Disable preemption. We don't hold a reference to the queue
- * and getting a reference would defeat the idea of a lockless
- * operation, thus the code relies on rcu to guarantee the
- * existence of msq:
+ /*
+ * Lockless receive, part 1:
+ * We don't hold a reference to the queue and getting a
+ * reference would defeat the idea of a lockless operation,
+ * thus the code relies on rcu to guarantee the existence of
+ * msq:
* Prior to destruction, expunge_all(-EIRDM) changes r_msg.
* Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
- * rcu_read_lock() prevents preemption between reading r_msg
- * and acquiring the q_perm.lock in ipc_lock_object().
*/
rcu_read_lock();
- /* Lockless receive, part 2:
- * Wait until pipelined_send or expunge_all are outside of
- * wake_up_process(). There is a race with exit(), see
- * ipc/mqueue.c for the details. The correct serialization
- * ensures that a receiver cannot continue without the wakeup
- * being visibible _before_ setting r_msg:
- *
- * CPU 0 CPU 1
- * <loop receiver>
- * smp_rmb(); (A) <-- pair -. <waker thread>
- * <load ->r_msg> | msr->r_msg = NULL;
- * | wake_up_process();
- * <continue> `------> smp_wmb(); (B)
- * msr->r_msg = msg;
+ /*
+ * Lockless receive, part 2:
+ * The work in pipelined_send() and expunge_all():
+ * - Set pointer to message
+ * - Queue the receiver task for later wakeup
+ * - Wake up the process after the lock is dropped.
*
- * Where (A) orders the message value read and where (B) orders
- * the write to the r_msg -- done in both pipelined_send and
- * expunge_all.
- */
- for (;;) {
- /*
- * Pairs with writer barrier in pipelined_send
- * or expunge_all.
- */
- smp_rmb(); /* barrier (A) */
- msg = (struct msg_msg *)msr_d.r_msg;
- if (msg)
- break;
-
- /*
- * The cpu_relax() call is a compiler barrier
- * which forces everything in this loop to be
- * re-loaded.
- */
- cpu_relax();
- }
-
- /* Lockless receive, part 3:
- * If there is a message or an error then accept it without
- * locking.
+ * Should the process wake up before this wakeup (due to a
+ * signal) it will either see the message and continue ...
*/
+ msg = READ_ONCE(msr_d.r_msg);
if (msg != ERR_PTR(-EAGAIN))
goto out_unlock1;
- /* Lockless receive, part 3:
- * Acquire the queue spinlock.
- */
+ /*
+ * ... or see -EAGAIN, acquire the lock to check the message
+ * again.
+ */
ipc_lock_object(&msq->q_perm);
- /* Lockless receive, part 4:
- * Repeat test after acquiring the spinlock.
- */
- msg = (struct msg_msg *)msr_d.r_msg;
+ msg = msr_d.r_msg;
if (msg != ERR_PTR(-EAGAIN))
goto out_unlock0;
out_unlock0:
ipc_unlock_object(&msq->q_perm);
+ wake_up_q(&wake_q);
out_unlock1:
rcu_read_unlock();
if (IS_ERR(msg)) {
/*
* Locking:
+ * a) global sem_lock() for read/write
* sem_undo.id_next,
* sem_array.complex_count,
- * sem_array.pending{_alter,_cont},
- * sem_array.sem_undo: global sem_lock() for read/write
- * sem_undo.proc_next: only "current" is allowed to read/write that field.
+ * sem_array.complex_mode
+ * sem_array.pending{_alter,_const},
+ * sem_array.sem_undo
*
+ * b) global or semaphore sem_lock() for read/write:
* sem_array.sem_base[i].pending_{const,alter}:
- * global or semaphore sem_lock() for read/write
+ * sem_array.complex_mode (for read)
+ *
+ * c) special:
+ * sem_undo_list.list_proc:
+ * * undo_list->lock for write
+ * * rcu for read
*/
#define sc_semmsl sem_ctls[0]
}
/*
- * Wait until all currently ongoing simple ops have completed.
+ * Enter the mode suitable for non-simple operations:
* Caller must own sem_perm.lock.
- * New simple ops cannot start, because simple ops first check
- * that sem_perm.lock is free.
- * that a) sem_perm.lock is free and b) complex_count is 0.
*/
-static void sem_wait_array(struct sem_array *sma)
+static void complexmode_enter(struct sem_array *sma)
{
int i;
struct sem *sem;
- if (sma->complex_count) {
- /* The thread that increased sma->complex_count waited on
- * all sem->lock locks. Thus we don't need to wait again.
- */
+ if (sma->complex_mode) {
+ /* We are already in complex_mode. Nothing to do */
return;
}
+ /* We need a full barrier after seting complex_mode:
+ * The write to complex_mode must be visible
+ * before we read the first sem->lock spinlock state.
+ */
+ smp_store_mb(sma->complex_mode, true);
+
for (i = 0; i < sma->sem_nsems; i++) {
sem = sma->sem_base + i;
spin_unlock_wait(&sem->lock);
}
+ /*
+ * spin_unlock_wait() is not a memory barriers, it is only a
+ * control barrier. The code must pair with spin_unlock(&sem->lock),
+ * thus just the control barrier is insufficient.
+ *
+ * smp_rmb() is sufficient, as writes cannot pass the control barrier.
+ */
+ smp_rmb();
}
+/*
+ * Try to leave the mode that disallows simple operations:
+ * Caller must own sem_perm.lock.
+ */
+static void complexmode_tryleave(struct sem_array *sma)
+{
+ if (sma->complex_count) {
+ /* Complex ops are sleeping.
+ * We must stay in complex mode
+ */
+ return;
+ }
+ /*
+ * Immediately after setting complex_mode to false,
+ * a simple op can start. Thus: all memory writes
+ * performed by the current operation must be visible
+ * before we set complex_mode to false.
+ */
+ smp_store_release(&sma->complex_mode, false);
+}
+
+#define SEM_GLOBAL_LOCK (-1)
/*
* If the request contains only one semaphore operation, and there are
* no complex transactions pending, lock only the semaphore involved.
/* Complex operation - acquire a full lock */
ipc_lock_object(&sma->sem_perm);
- /* And wait until all simple ops that are processed
- * right now have dropped their locks.
- */
- sem_wait_array(sma);
- return -1;
+ /* Prevent parallel simple ops */
+ complexmode_enter(sma);
+ return SEM_GLOBAL_LOCK;
}
/*
* Only one semaphore affected - try to optimize locking.
- * The rules are:
- * - optimized locking is possible if no complex operation
- * is either enqueued or processed right now.
- * - The test for enqueued complex ops is simple:
- * sma->complex_count != 0
- * - Testing for complex ops that are processed right now is
- * a bit more difficult. Complex ops acquire the full lock
- * and first wait that the running simple ops have completed.
- * (see above)
- * Thus: If we own a simple lock and the global lock is free
- * and complex_count is now 0, then it will stay 0 and
- * thus just locking sem->lock is sufficient.
+ * Optimized locking is possible if no complex operation
+ * is either enqueued or processed right now.
+ *
+ * Both facts are tracked by complex_mode.
*/
sem = sma->sem_base + sops->sem_num;
- if (sma->complex_count == 0) {
+ /*
+ * Initial check for complex_mode. Just an optimization,
+ * no locking, no memory barrier.
+ */
+ if (!sma->complex_mode) {
/*
* It appears that no complex operation is around.
* Acquire the per-semaphore lock.
*/
spin_lock(&sem->lock);
- /* Then check that the global lock is free */
- if (!spin_is_locked(&sma->sem_perm.lock)) {
- /*
- * We need a memory barrier with acquire semantics,
- * otherwise we can race with another thread that does:
- * complex_count++;
- * spin_unlock(sem_perm.lock);
- */
- smp_acquire__after_ctrl_dep();
+ /*
+ * See 51d7d5205d33
+ * ("powerpc: Add smp_mb() to arch_spin_is_locked()"):
+ * A full barrier is required: the write of sem->lock
+ * must be visible before the read is executed
+ */
+ smp_mb();
- /*
- * Now repeat the test of complex_count:
- * It can't change anymore until we drop sem->lock.
- * Thus: if is now 0, then it will stay 0.
- */
- if (sma->complex_count == 0) {
- /* fast path successful! */
- return sops->sem_num;
- }
+ if (!smp_load_acquire(&sma->complex_mode)) {
+ /* fast path successful! */
+ return sops->sem_num;
}
spin_unlock(&sem->lock);
}
/* Not a false alarm, thus complete the sequence for a
* full lock.
*/
- sem_wait_array(sma);
- return -1;
+ complexmode_enter(sma);
+ return SEM_GLOBAL_LOCK;
}
}
static inline void sem_unlock(struct sem_array *sma, int locknum)
{
- if (locknum == -1) {
+ if (locknum == SEM_GLOBAL_LOCK) {
unmerge_queues(sma);
+ complexmode_tryleave(sma);
ipc_unlock_object(&sma->sem_perm);
} else {
struct sem *sem = sma->sem_base + locknum;
}
sma->complex_count = 0;
+ sma->complex_mode = true; /* dropped by sem_unlock below */
INIT_LIST_HEAD(&sma->pending_alter);
INIT_LIST_HEAD(&sma->pending_const);
INIT_LIST_HEAD(&sma->list_id);
struct list_head tasks;
int semid, i;
+ cond_resched();
+
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
struct sem_undo, list_proc);
/*
* The proc interface isn't aware of sem_lock(), it calls
* ipc_lock_object() directly (in sysvipc_find_ipc).
- * In order to stay compatible with sem_lock(), we must wait until
- * all simple semop() calls have left their critical regions.
+ * In order to stay compatible with sem_lock(), we must
+ * enter / leave complex_mode.
*/
- sem_wait_array(sma);
+ complexmode_enter(sma);
sem_otime = get_semotime(sma);
sem_otime,
sma->sem_ctime);
+ complexmode_tryleave(sma);
+
return 0;
}
#endif
CONFIG_ARMV8_DEPRECATED=y
CONFIG_ASHMEM=y
CONFIG_AUDIT=y
-CONFIG_BLK_DEV_DM=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CGROUPS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_SCHED=y
CONFIG_CP15_BARRIER_EMULATION=y
-CONFIG_DM_CRYPT=y
-CONFIG_DM_VERITY=y
-CONFIG_DM_VERITY_FEC=y
+CONFIG_DEFAULT_SECURITY_SELINUX=y
CONFIG_EMBEDDED=y
CONFIG_FB=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IPV6_MIP6=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_OPTIMISTIC_DAD=y
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_QUOTA=y
CONFIG_RTC_CLASS=y
CONFIG_RT_GROUP_SCHED=y
+CONFIG_SECCOMP=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_SELINUX=y
# CONFIG_PM_WAKELOCKS_GC is not set
# CONFIG_VT is not set
CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BLK_DEV_DM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_COMPACTION=y
CONFIG_DEBUG_RODATA=y
+CONFIG_DM_CRYPT=y
CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
CONFIG_DRAGONRISE_FF=y
CONFIG_ENABLE_DEFAULT_TRACERS=y
CONFIG_EXT4_FS=y
trace_sched_process_hang(t);
- if (!sysctl_hung_task_warnings)
+ if (!sysctl_hung_task_warnings && !sysctl_hung_task_panic)
return;
- if (sysctl_hung_task_warnings > 0)
- sysctl_hung_task_warnings--;
-
/*
* Ok, the task did not get scheduled for more than 2 minutes,
* complain:
*/
- pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
- t->comm, t->pid, timeout);
- pr_err(" %s %s %.*s\n",
- print_tainted(), init_utsname()->release,
- (int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
- pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
- " disables this message.\n");
- sched_show_task(t);
- debug_show_all_locks();
+ if (sysctl_hung_task_warnings) {
+ sysctl_hung_task_warnings--;
+ pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
+ t->comm, t->pid, timeout);
+ pr_err(" %s %s %.*s\n",
+ print_tainted(), init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+ pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
+ " disables this message.\n");
+ sched_show_task(t);
+ debug_show_all_locks();
+ }
touch_nmi_watchdog();
#include <linux/cpu.h>
#include <linux/jump_label.h>
-#include <asm-generic/sections.h>
+#include <asm/sections.h>
#include <asm/cacheflush.h>
#include <asm/errno.h>
#include <asm/uaccess.h>
}
/**
- * probe_kthread_data - speculative version of kthread_data()
+ * kthread_probe_data - speculative version of kthread_data()
* @task: possible kthread task in question
*
* @task could be a kthread task. Return the data value specified when it
* inaccessible for any reason, %NULL is returned. This function requires
* that @task itself is safe to dereference.
*/
-void *probe_kthread_data(struct task_struct *task)
+void *kthread_probe_data(struct task_struct *task)
{
struct kthread *kthread = to_kthread(task);
void *data = NULL;
}
}
-/**
- * kthread_create_on_node - create a kthread.
- * @threadfn: the function to run until signal_pending(current).
- * @data: data ptr for @threadfn.
- * @node: task and thread structures for the thread are allocated on this node
- * @namefmt: printf-style name for the thread.
- *
- * Description: This helper function creates and names a kernel
- * thread. The thread will be stopped: use wake_up_process() to start
- * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and
- * is affine to all CPUs.
- *
- * If thread is going to be bound on a particular cpu, give its node
- * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
- * When woken, the thread will run @threadfn() with @data as its
- * argument. @threadfn() can either call do_exit() directly if it is a
- * standalone thread for which no one will call kthread_stop(), or
- * return when 'kthread_should_stop()' is true (which means
- * kthread_stop() has been called). The return value should be zero
- * or a negative error number; it will be passed to kthread_stop().
- *
- * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
- */
-struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
- void *data, int node,
- const char namefmt[],
- ...)
+static struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
+ void *data, int node,
+ const char namefmt[],
+ va_list args)
{
DECLARE_COMPLETION_ONSTACK(done);
struct task_struct *task;
task = create->result;
if (!IS_ERR(task)) {
static const struct sched_param param = { .sched_priority = 0 };
- va_list args;
- va_start(args, namefmt);
vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
- va_end(args);
/*
* root may have changed our (kthreadd's) priority or CPU mask.
* The kernel thread should not inherit these properties.
kfree(create);
return task;
}
+
+/**
+ * kthread_create_on_node - create a kthread.
+ * @threadfn: the function to run until signal_pending(current).
+ * @data: data ptr for @threadfn.
+ * @node: task and thread structures for the thread are allocated on this node
+ * @namefmt: printf-style name for the thread.
+ *
+ * Description: This helper function creates and names a kernel
+ * thread. The thread will be stopped: use wake_up_process() to start
+ * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and
+ * is affine to all CPUs.
+ *
+ * If thread is going to be bound on a particular cpu, give its node
+ * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
+ * When woken, the thread will run @threadfn() with @data as its
+ * argument. @threadfn() can either call do_exit() directly if it is a
+ * standalone thread for which no one will call kthread_stop(), or
+ * return when 'kthread_should_stop()' is true (which means
+ * kthread_stop() has been called). The return value should be zero
+ * or a negative error number; it will be passed to kthread_stop().
+ *
+ * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
+ */
+struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
+ void *data, int node,
+ const char namefmt[],
+ ...)
+{
+ struct task_struct *task;
+ va_list args;
+
+ va_start(args, namefmt);
+ task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
+ va_end(args);
+
+ return task;
+}
EXPORT_SYMBOL(kthread_create_on_node);
static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
cpu);
if (IS_ERR(p))
return p;
+ kthread_bind(p, cpu);
+ /* CPU hotplug need to bind once again when unparking the thread. */
set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
to_kthread(p)->cpu = cpu;
- /* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */
- kthread_park(p);
return p;
}
* which might be about to be cleared.
*/
if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
+ /*
+ * Newly created kthread was parked when the CPU was offline.
+ * The binding was lost and we need to set it again.
+ */
if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
__kthread_bind(k, kthread->cpu, TASK_PARKED);
wake_up_state(k, TASK_PARKED);
return 0;
}
-void __init_kthread_worker(struct kthread_worker *worker,
+void __kthread_init_worker(struct kthread_worker *worker,
const char *name,
struct lock_class_key *key)
{
+ memset(worker, 0, sizeof(struct kthread_worker));
spin_lock_init(&worker->lock);
lockdep_set_class_and_name(&worker->lock, key, name);
INIT_LIST_HEAD(&worker->work_list);
- worker->task = NULL;
+ INIT_LIST_HEAD(&worker->delayed_work_list);
}
-EXPORT_SYMBOL_GPL(__init_kthread_worker);
+EXPORT_SYMBOL_GPL(__kthread_init_worker);
/**
* kthread_worker_fn - kthread function to process kthread_worker
* @worker_ptr: pointer to initialized kthread_worker
*
- * This function can be used as @threadfn to kthread_create() or
- * kthread_run() with @worker_ptr argument pointing to an initialized
- * kthread_worker. The started kthread will process work_list until
- * the it is stopped with kthread_stop(). A kthread can also call
- * this function directly after extra initialization.
+ * This function implements the main cycle of kthread worker. It processes
+ * work_list until it is stopped with kthread_stop(). It sleeps when the queue
+ * is empty.
+ *
+ * The works are not allowed to keep any locks, disable preemption or interrupts
+ * when they finish. There is defined a safe point for freezing when one work
+ * finishes and before a new one is started.
*
- * Different kthreads can be used for the same kthread_worker as long
- * as there's only one kthread attached to it at any given time. A
- * kthread_worker without an attached kthread simply collects queued
- * kthread_works.
+ * Also the works must not be handled by more than one worker at the same time,
+ * see also kthread_queue_work().
*/
int kthread_worker_fn(void *worker_ptr)
{
struct kthread_worker *worker = worker_ptr;
struct kthread_work *work;
- WARN_ON(worker->task);
+ /*
+ * FIXME: Update the check and remove the assignment when all kthread
+ * worker users are created using kthread_create_worker*() functions.
+ */
+ WARN_ON(worker->task && worker->task != current);
worker->task = current;
+
+ if (worker->flags & KTW_FREEZABLE)
+ set_freezable();
+
repeat:
set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
}
EXPORT_SYMBOL_GPL(kthread_worker_fn);
-/* insert @work before @pos in @worker */
-static void insert_kthread_work(struct kthread_worker *worker,
- struct kthread_work *work,
- struct list_head *pos)
+static struct kthread_worker *
+__kthread_create_worker(int cpu, unsigned int flags,
+ const char namefmt[], va_list args)
+{
+ struct kthread_worker *worker;
+ struct task_struct *task;
+
+ worker = kzalloc(sizeof(*worker), GFP_KERNEL);
+ if (!worker)
+ return ERR_PTR(-ENOMEM);
+
+ kthread_init_worker(worker);
+
+ if (cpu >= 0) {
+ char name[TASK_COMM_LEN];
+
+ /*
+ * kthread_create_worker_on_cpu() allows to pass a generic
+ * namefmt in compare with kthread_create_on_cpu. We need
+ * to format it here.
+ */
+ vsnprintf(name, sizeof(name), namefmt, args);
+ task = kthread_create_on_cpu(kthread_worker_fn, worker,
+ cpu, name);
+ } else {
+ task = __kthread_create_on_node(kthread_worker_fn, worker,
+ -1, namefmt, args);
+ }
+
+ if (IS_ERR(task))
+ goto fail_task;
+
+ worker->flags = flags;
+ worker->task = task;
+ wake_up_process(task);
+ return worker;
+
+fail_task:
+ kfree(worker);
+ return ERR_CAST(task);
+}
+
+/**
+ * kthread_create_worker - create a kthread worker
+ * @flags: flags modifying the default behavior of the worker
+ * @namefmt: printf-style name for the kthread worker (task).
+ *
+ * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
+ * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
+ * when the worker was SIGKILLed.
+ */
+struct kthread_worker *
+kthread_create_worker(unsigned int flags, const char namefmt[], ...)
+{
+ struct kthread_worker *worker;
+ va_list args;
+
+ va_start(args, namefmt);
+ worker = __kthread_create_worker(-1, flags, namefmt, args);
+ va_end(args);
+
+ return worker;
+}
+EXPORT_SYMBOL(kthread_create_worker);
+
+/**
+ * kthread_create_worker_on_cpu - create a kthread worker and bind it
+ * it to a given CPU and the associated NUMA node.
+ * @cpu: CPU number
+ * @flags: flags modifying the default behavior of the worker
+ * @namefmt: printf-style name for the kthread worker (task).
+ *
+ * Use a valid CPU number if you want to bind the kthread worker
+ * to the given CPU and the associated NUMA node.
+ *
+ * A good practice is to add the cpu number also into the worker name.
+ * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
+ *
+ * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
+ * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
+ * when the worker was SIGKILLed.
+ */
+struct kthread_worker *
+kthread_create_worker_on_cpu(int cpu, unsigned int flags,
+ const char namefmt[], ...)
+{
+ struct kthread_worker *worker;
+ va_list args;
+
+ va_start(args, namefmt);
+ worker = __kthread_create_worker(cpu, flags, namefmt, args);
+ va_end(args);
+
+ return worker;
+}
+EXPORT_SYMBOL(kthread_create_worker_on_cpu);
+
+/*
+ * Returns true when the work could not be queued at the moment.
+ * It happens when it is already pending in a worker list
+ * or when it is being cancelled.
+ */
+static inline bool queuing_blocked(struct kthread_worker *worker,
+ struct kthread_work *work)
+{
+ lockdep_assert_held(&worker->lock);
+
+ return !list_empty(&work->node) || work->canceling;
+}
+
+static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
+ struct kthread_work *work)
{
lockdep_assert_held(&worker->lock);
+ WARN_ON_ONCE(!list_empty(&work->node));
+ /* Do not use a work with >1 worker, see kthread_queue_work() */
+ WARN_ON_ONCE(work->worker && work->worker != worker);
+}
+
+/* insert @work before @pos in @worker */
+static void kthread_insert_work(struct kthread_worker *worker,
+ struct kthread_work *work,
+ struct list_head *pos)
+{
+ kthread_insert_work_sanity_check(worker, work);
list_add_tail(&work->node, pos);
work->worker = worker;
}
/**
- * queue_kthread_work - queue a kthread_work
+ * kthread_queue_work - queue a kthread_work
* @worker: target kthread_worker
* @work: kthread_work to queue
*
* Queue @work to work processor @task for async execution. @task
* must have been created with kthread_worker_create(). Returns %true
* if @work was successfully queued, %false if it was already pending.
+ *
+ * Reinitialize the work if it needs to be used by another worker.
+ * For example, when the worker was stopped and started again.
*/
-bool queue_kthread_work(struct kthread_worker *worker,
+bool kthread_queue_work(struct kthread_worker *worker,
struct kthread_work *work)
{
bool ret = false;
unsigned long flags;
spin_lock_irqsave(&worker->lock, flags);
- if (list_empty(&work->node)) {
- insert_kthread_work(worker, work, &worker->work_list);
+ if (!queuing_blocked(worker, work)) {
+ kthread_insert_work(worker, work, &worker->work_list);
+ ret = true;
+ }
+ spin_unlock_irqrestore(&worker->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(kthread_queue_work);
+
+/**
+ * kthread_delayed_work_timer_fn - callback that queues the associated kthread
+ * delayed work when the timer expires.
+ * @__data: pointer to the data associated with the timer
+ *
+ * The format of the function is defined by struct timer_list.
+ * It should have been called from irqsafe timer with irq already off.
+ */
+void kthread_delayed_work_timer_fn(unsigned long __data)
+{
+ struct kthread_delayed_work *dwork =
+ (struct kthread_delayed_work *)__data;
+ struct kthread_work *work = &dwork->work;
+ struct kthread_worker *worker = work->worker;
+
+ /*
+ * This might happen when a pending work is reinitialized.
+ * It means that it is used a wrong way.
+ */
+ if (WARN_ON_ONCE(!worker))
+ return;
+
+ spin_lock(&worker->lock);
+ /* Work must not be used with >1 worker, see kthread_queue_work(). */
+ WARN_ON_ONCE(work->worker != worker);
+
+ /* Move the work from worker->delayed_work_list. */
+ WARN_ON_ONCE(list_empty(&work->node));
+ list_del_init(&work->node);
+ kthread_insert_work(worker, work, &worker->work_list);
+
+ spin_unlock(&worker->lock);
+}
+EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
+
+void __kthread_queue_delayed_work(struct kthread_worker *worker,
+ struct kthread_delayed_work *dwork,
+ unsigned long delay)
+{
+ struct timer_list *timer = &dwork->timer;
+ struct kthread_work *work = &dwork->work;
+
+ WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn ||
+ timer->data != (unsigned long)dwork);
+
+ /*
+ * If @delay is 0, queue @dwork->work immediately. This is for
+ * both optimization and correctness. The earliest @timer can
+ * expire is on the closest next tick and delayed_work users depend
+ * on that there's no such delay when @delay is 0.
+ */
+ if (!delay) {
+ kthread_insert_work(worker, work, &worker->work_list);
+ return;
+ }
+
+ /* Be paranoid and try to detect possible races already now. */
+ kthread_insert_work_sanity_check(worker, work);
+
+ list_add(&work->node, &worker->delayed_work_list);
+ work->worker = worker;
+ timer_stats_timer_set_start_info(&dwork->timer);
+ timer->expires = jiffies + delay;
+ add_timer(timer);
+}
+
+/**
+ * kthread_queue_delayed_work - queue the associated kthread work
+ * after a delay.
+ * @worker: target kthread_worker
+ * @dwork: kthread_delayed_work to queue
+ * @delay: number of jiffies to wait before queuing
+ *
+ * If the work has not been pending it starts a timer that will queue
+ * the work after the given @delay. If @delay is zero, it queues the
+ * work immediately.
+ *
+ * Return: %false if the @work has already been pending. It means that
+ * either the timer was running or the work was queued. It returns %true
+ * otherwise.
+ */
+bool kthread_queue_delayed_work(struct kthread_worker *worker,
+ struct kthread_delayed_work *dwork,
+ unsigned long delay)
+{
+ struct kthread_work *work = &dwork->work;
+ unsigned long flags;
+ bool ret = false;
+
+ spin_lock_irqsave(&worker->lock, flags);
+
+ if (!queuing_blocked(worker, work)) {
+ __kthread_queue_delayed_work(worker, dwork, delay);
ret = true;
}
+
spin_unlock_irqrestore(&worker->lock, flags);
return ret;
}
-EXPORT_SYMBOL_GPL(queue_kthread_work);
+EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
struct kthread_flush_work {
struct kthread_work work;
}
/**
- * flush_kthread_work - flush a kthread_work
+ * kthread_flush_work - flush a kthread_work
* @work: work to flush
*
* If @work is queued or executing, wait for it to finish execution.
*/
-void flush_kthread_work(struct kthread_work *work)
+void kthread_flush_work(struct kthread_work *work)
{
struct kthread_flush_work fwork = {
KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
struct kthread_worker *worker;
bool noop = false;
-retry:
worker = work->worker;
if (!worker)
return;
spin_lock_irq(&worker->lock);
- if (work->worker != worker) {
- spin_unlock_irq(&worker->lock);
- goto retry;
- }
+ /* Work must not be used with >1 worker, see kthread_queue_work(). */
+ WARN_ON_ONCE(work->worker != worker);
if (!list_empty(&work->node))
- insert_kthread_work(worker, &fwork.work, work->node.next);
+ kthread_insert_work(worker, &fwork.work, work->node.next);
else if (worker->current_work == work)
- insert_kthread_work(worker, &fwork.work, worker->work_list.next);
+ kthread_insert_work(worker, &fwork.work,
+ worker->work_list.next);
else
noop = true;
if (!noop)
wait_for_completion(&fwork.done);
}
-EXPORT_SYMBOL_GPL(flush_kthread_work);
+EXPORT_SYMBOL_GPL(kthread_flush_work);
+
+/*
+ * This function removes the work from the worker queue. Also it makes sure
+ * that it won't get queued later via the delayed work's timer.
+ *
+ * The work might still be in use when this function finishes. See the
+ * current_work proceed by the worker.
+ *
+ * Return: %true if @work was pending and successfully canceled,
+ * %false if @work was not pending
+ */
+static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
+ unsigned long *flags)
+{
+ /* Try to cancel the timer if exists. */
+ if (is_dwork) {
+ struct kthread_delayed_work *dwork =
+ container_of(work, struct kthread_delayed_work, work);
+ struct kthread_worker *worker = work->worker;
+
+ /*
+ * del_timer_sync() must be called to make sure that the timer
+ * callback is not running. The lock must be temporary released
+ * to avoid a deadlock with the callback. In the meantime,
+ * any queuing is blocked by setting the canceling counter.
+ */
+ work->canceling++;
+ spin_unlock_irqrestore(&worker->lock, *flags);
+ del_timer_sync(&dwork->timer);
+ spin_lock_irqsave(&worker->lock, *flags);
+ work->canceling--;
+ }
+
+ /*
+ * Try to remove the work from a worker list. It might either
+ * be from worker->work_list or from worker->delayed_work_list.
+ */
+ if (!list_empty(&work->node)) {
+ list_del_init(&work->node);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
+ * @worker: kthread worker to use
+ * @dwork: kthread delayed work to queue
+ * @delay: number of jiffies to wait before queuing
+ *
+ * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
+ * modify @dwork's timer so that it expires after @delay. If @delay is zero,
+ * @work is guaranteed to be queued immediately.
+ *
+ * Return: %true if @dwork was pending and its timer was modified,
+ * %false otherwise.
+ *
+ * A special case is when the work is being canceled in parallel.
+ * It might be caused either by the real kthread_cancel_delayed_work_sync()
+ * or yet another kthread_mod_delayed_work() call. We let the other command
+ * win and return %false here. The caller is supposed to synchronize these
+ * operations a reasonable way.
+ *
+ * This function is safe to call from any context including IRQ handler.
+ * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
+ * for details.
+ */
+bool kthread_mod_delayed_work(struct kthread_worker *worker,
+ struct kthread_delayed_work *dwork,
+ unsigned long delay)
+{
+ struct kthread_work *work = &dwork->work;
+ unsigned long flags;
+ int ret = false;
+
+ spin_lock_irqsave(&worker->lock, flags);
+
+ /* Do not bother with canceling when never queued. */
+ if (!work->worker)
+ goto fast_queue;
+
+ /* Work must not be used with >1 worker, see kthread_queue_work() */
+ WARN_ON_ONCE(work->worker != worker);
+
+ /* Do not fight with another command that is canceling this work. */
+ if (work->canceling)
+ goto out;
+
+ ret = __kthread_cancel_work(work, true, &flags);
+fast_queue:
+ __kthread_queue_delayed_work(worker, dwork, delay);
+out:
+ spin_unlock_irqrestore(&worker->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
+
+static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
+{
+ struct kthread_worker *worker = work->worker;
+ unsigned long flags;
+ int ret = false;
+
+ if (!worker)
+ goto out;
+
+ spin_lock_irqsave(&worker->lock, flags);
+ /* Work must not be used with >1 worker, see kthread_queue_work(). */
+ WARN_ON_ONCE(work->worker != worker);
+
+ ret = __kthread_cancel_work(work, is_dwork, &flags);
+
+ if (worker->current_work != work)
+ goto out_fast;
+
+ /*
+ * The work is in progress and we need to wait with the lock released.
+ * In the meantime, block any queuing by setting the canceling counter.
+ */
+ work->canceling++;
+ spin_unlock_irqrestore(&worker->lock, flags);
+ kthread_flush_work(work);
+ spin_lock_irqsave(&worker->lock, flags);
+ work->canceling--;
+
+out_fast:
+ spin_unlock_irqrestore(&worker->lock, flags);
+out:
+ return ret;
+}
+
+/**
+ * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
+ * @work: the kthread work to cancel
+ *
+ * Cancel @work and wait for its execution to finish. This function
+ * can be used even if the work re-queues itself. On return from this
+ * function, @work is guaranteed to be not pending or executing on any CPU.
+ *
+ * kthread_cancel_work_sync(&delayed_work->work) must not be used for
+ * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
+ *
+ * The caller must ensure that the worker on which @work was last
+ * queued can't be destroyed before this function returns.
+ *
+ * Return: %true if @work was pending, %false otherwise.
+ */
+bool kthread_cancel_work_sync(struct kthread_work *work)
+{
+ return __kthread_cancel_work_sync(work, false);
+}
+EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
+
+/**
+ * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
+ * wait for it to finish.
+ * @dwork: the kthread delayed work to cancel
+ *
+ * This is kthread_cancel_work_sync() for delayed works.
+ *
+ * Return: %true if @dwork was pending, %false otherwise.
+ */
+bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
+{
+ return __kthread_cancel_work_sync(&dwork->work, true);
+}
+EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
/**
- * flush_kthread_worker - flush all current works on a kthread_worker
+ * kthread_flush_worker - flush all current works on a kthread_worker
* @worker: worker to flush
*
* Wait until all currently executing or pending works on @worker are
* finished.
*/
-void flush_kthread_worker(struct kthread_worker *worker)
+void kthread_flush_worker(struct kthread_worker *worker)
{
struct kthread_flush_work fwork = {
KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
COMPLETION_INITIALIZER_ONSTACK(fwork.done),
};
- queue_kthread_work(worker, &fwork.work);
+ kthread_queue_work(worker, &fwork.work);
wait_for_completion(&fwork.done);
}
-EXPORT_SYMBOL_GPL(flush_kthread_worker);
+EXPORT_SYMBOL_GPL(kthread_flush_worker);
+
+/**
+ * kthread_destroy_worker - destroy a kthread worker
+ * @worker: worker to be destroyed
+ *
+ * Flush and destroy @worker. The simple flush is enough because the kthread
+ * worker API is used only in trivial scenarios. There are no multi-step state
+ * machines needed.
+ */
+void kthread_destroy_worker(struct kthread_worker *worker)
+{
+ struct task_struct *task;
+
+ task = worker->task;
+ if (WARN_ON(!task))
+ return;
+
+ kthread_flush_worker(worker);
+ kthread_stop(task);
+ WARN_ON(!list_empty(&worker->work_list));
+ kfree(worker);
+}
+EXPORT_SYMBOL(kthread_destroy_worker);
panic_smp_self_stop();
}
+/*
+ * Stop other CPUs in panic. Architecture dependent code may override this
+ * with more suitable version. For example, if the architecture supports
+ * crash dump, it should save registers of each stopped CPU and disable
+ * per-CPU features such as virtualization extensions.
+ */
+void __weak crash_smp_send_stop(void)
+{
+ static int cpus_stopped;
+
+ /*
+ * This function can be called twice in panic path, but obviously
+ * we execute this only once.
+ */
+ if (cpus_stopped)
+ return;
+
+ /*
+ * Note smp_send_stop is the usual smp shutdown function, which
+ * unfortunately means it may not be hardened to work in a panic
+ * situation.
+ */
+ smp_send_stop();
+ cpus_stopped = 1;
+}
+
atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID);
/*
if (!_crash_kexec_post_notifiers) {
printk_nmi_flush_on_panic();
__crash_kexec(NULL);
- }
- /*
- * Note smp_send_stop is the usual smp shutdown function, which
- * unfortunately means it may not be hardened to work in a panic
- * situation.
- */
- smp_send_stop();
+ /*
+ * Note smp_send_stop is the usual smp shutdown function, which
+ * unfortunately means it may not be hardened to work in a
+ * panic situation.
+ */
+ smp_send_stop();
+ } else {
+ /*
+ * If we want to do crash dump after notifier calls and
+ * kmsg_dump, we will need architecture dependent extra
+ * works in addition to stopping other CPUs.
+ */
+ crash_smp_send_stop();
+ }
/*
* Run any panic handlers, including those that might need to
{
BUG_ON(!child->ptrace);
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+
child->parent = child->real_parent;
list_del_init(&child->ptrace_entry);
/* Architecture-specific hardware disable .. */
ptrace_disable(child);
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
write_lock_irq(&tasklist_lock);
/*
/**
* wakeup_readers - wake up readers waiting on a channel
- * @data: contains the channel buffer
+ * @work: contains the channel buffer
*
- * This is the timer function used to defer reader waking.
+ * This is the function used to defer reader waking
*/
-static void wakeup_readers(unsigned long data)
+static void wakeup_readers(struct irq_work *work)
{
- struct rchan_buf *buf = (struct rchan_buf *)data;
+ struct rchan_buf *buf;
+
+ buf = container_of(work, struct rchan_buf, wakeup_work);
wake_up_interruptible(&buf->read_wait);
}
if (init) {
init_waitqueue_head(&buf->read_wait);
kref_init(&buf->kref);
- setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
- } else
- del_timer_sync(&buf->timer);
+ init_irq_work(&buf->wakeup_work, wakeup_readers);
+ } else {
+ irq_work_sync(&buf->wakeup_work);
+ }
buf->subbufs_produced = 0;
buf->subbufs_consumed = 0;
static void relay_close_buf(struct rchan_buf *buf)
{
buf->finalized = 1;
- del_timer_sync(&buf->timer);
+ irq_work_sync(&buf->wakeup_work);
buf->chan->cb->remove_buf_file(buf->dentry);
kref_put(&buf->kref, relay_remove_buf);
}
buf->early_bytes += buf->chan->subbuf_size -
buf->padding[old_subbuf];
smp_mb();
- if (waitqueue_active(&buf->read_wait))
+ if (waitqueue_active(&buf->read_wait)) {
/*
* Calling wake_up_interruptible() from here
* will deadlock if we happen to be logging
* from the scheduler (trying to re-grab
* rq->lock), so defer it.
*/
- mod_timer(&buf->timer, jiffies + 1);
+ irq_work_queue(&buf->wakeup_work);
+ }
}
old = buf->data;
kfree(td);
return PTR_ERR(tsk);
}
+ /*
+ * Park the thread so that it could start right on the CPU
+ * when it is available.
+ */
+ kthread_park(tsk);
get_task_struct(tsk);
*per_cpu_ptr(ht->store, cpu) = tsk;
if (ht->create) {
* This function is called without any synchronization and @task
* could be in any state. Be careful with dereferences.
*/
- worker = probe_kthread_data(task);
+ worker = kthread_probe_data(task);
/*
* Carefully copy the associated workqueue's workfn and name. Keep
obj-$(CONFIG_STACKDEPOT) += stackdepot.o
KASAN_SANITIZE_stackdepot.o := n
+KCOV_INSTRUMENT_stackdepot.o := n
libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
fdt_empty_tree.o
* ranges. Consecutively set bits are shown as two hyphen-separated
* decimal numbers, the smallest and largest bit numbers set in
* the range.
+ * Optionally each range can be postfixed to denote that only parts of it
+ * should be set. The range will divided to groups of specific size.
+ * From each group will be used only defined amount of bits.
+ * Syntax: range:used_size/group_size
+ * Example: 0-1023:2/256 ==> 0,1,256,257,512,513,768,769
*
* Returns 0 on success, -errno on invalid input strings.
* Error values:
int is_user, unsigned long *maskp,
int nmaskbits)
{
- unsigned a, b;
+ unsigned int a, b, old_a, old_b;
+ unsigned int group_size, used_size;
int c, old_c, totaldigits, ndigits;
const char __user __force *ubuf = (const char __user __force *)buf;
- int at_start, in_range;
+ int at_start, in_range, in_partial_range;
totaldigits = c = 0;
+ old_a = old_b = 0;
+ group_size = used_size = 0;
bitmap_zero(maskp, nmaskbits);
do {
at_start = 1;
in_range = 0;
+ in_partial_range = 0;
a = b = 0;
ndigits = totaldigits;
if ((totaldigits != ndigits) && isspace(old_c))
return -EINVAL;
+ if (c == '/') {
+ used_size = a;
+ at_start = 1;
+ in_range = 0;
+ a = b = 0;
+ continue;
+ }
+
+ if (c == ':') {
+ old_a = a;
+ old_b = b;
+ at_start = 1;
+ in_range = 0;
+ in_partial_range = 1;
+ a = b = 0;
+ continue;
+ }
+
if (c == '-') {
if (at_start || in_range)
return -EINVAL;
}
if (ndigits == totaldigits)
continue;
+ if (in_partial_range) {
+ group_size = a;
+ a = old_a;
+ b = old_b;
+ old_a = old_b = 0;
+ }
/* if no digit is after '-', it's wrong*/
if (at_start && in_range)
return -EINVAL;
- if (!(a <= b))
+ if (!(a <= b) || !(used_size <= group_size))
return -EINVAL;
if (b >= nmaskbits)
return -ERANGE;
while (a <= b) {
- set_bit(a, maskp);
+ if (in_partial_range) {
+ static int pos_in_group = 1;
+
+ if (pos_in_group <= used_size)
+ set_bit(a, maskp);
+
+ if (a == b || ++pos_in_group > group_size)
+ pos_in_group = 1;
+ } else
+ set_bit(a, maskp);
a++;
}
} while (buflen && c == ',');
{
unsigned long long res;
unsigned int rv;
- int overflow;
res = 0;
rv = 0;
- overflow = 0;
while (*s) {
unsigned int val;
*/
if (unlikely(res & (~0ull << 60))) {
if (res > div_u64(ULLONG_MAX - val, base))
- overflow = 1;
+ rv |= KSTRTOX_OVERFLOW;
}
res = res * base + val;
rv++;
s++;
}
*p = res;
- if (overflow)
- rv |= KSTRTOX_OVERFLOW;
return rv;
}
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/kasan-checks.h>
+#include <linux/thread_info.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/errno.h>
long retval;
kasan_check_write(dst, count);
+ check_object_size(dst, count, false);
user_access_begin();
retval = do_strncpy_from_user(dst, src, count, max);
user_access_end();
{
unsigned long cursor, end;
- kmemleak_free_part(__va(physaddr), size);
+ kmemleak_free_part_phys(physaddr, size);
cursor = PFN_UP(physaddr);
end = PFN_DOWN(physaddr + size);
{
unsigned long start, end;
- kmemleak_free_part(__va(physaddr), size);
+ kmemleak_free_part_phys(physaddr, size);
start = PFN_UP(physaddr);
end = PFN_DOWN(physaddr + size);
{
unsigned long start, end;
- kmemleak_free_part(__va(physaddr), size);
+ kmemleak_free_part_phys(physaddr, size);
start = PFN_UP(physaddr);
end = PFN_DOWN(physaddr + size);
* kmemleak scans/reads tracked objects for pointers to other
* objects but this address isn't mapped and accessible
*/
- kmemleak_ignore(phys_to_virt(addr));
+ kmemleak_ignore_phys(addr);
base = addr;
}
#include <linux/cache.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
+#include <linux/bootmem.h>
+#include <linux/pfn.h>
#include <linux/mmzone.h>
#include <linux/slab.h>
#include <linux/thread_info.h>
}
EXPORT_SYMBOL(kmemleak_no_scan);
+/**
+ * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
+ * address argument
+ */
+void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
+ gfp_t gfp)
+{
+ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+ kmemleak_alloc(__va(phys), size, min_count, gfp);
+}
+EXPORT_SYMBOL(kmemleak_alloc_phys);
+
+/**
+ * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
+ * physical address argument
+ */
+void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
+{
+ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+ kmemleak_free_part(__va(phys), size);
+}
+EXPORT_SYMBOL(kmemleak_free_part_phys);
+
+/**
+ * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
+ * address argument
+ */
+void __ref kmemleak_not_leak_phys(phys_addr_t phys)
+{
+ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+ kmemleak_not_leak(__va(phys));
+}
+EXPORT_SYMBOL(kmemleak_not_leak_phys);
+
+/**
+ * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
+ * address argument
+ */
+void __ref kmemleak_ignore_phys(phys_addr_t phys)
+{
+ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+ kmemleak_ignore(__va(phys));
+}
+EXPORT_SYMBOL(kmemleak_ignore_phys);
+
/*
* Update an object's checksum and return true if it was modified.
*/
(unsigned long long)base + size - 1,
(void *)_RET_IP_);
- kmemleak_free_part(__va(base), size);
+ kmemleak_free_part_phys(base, size);
return memblock_remove_range(&memblock.reserved, base, size);
}
* The min_count is set to 0 so that memblock allocations are
* never reported as leaks.
*/
- kmemleak_alloc(__va(found), size, 0, 0);
+ kmemleak_alloc_phys(found, size, 0, 0);
return found;
}
return 0;
memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
__func__, (u64)base, (u64)base + size - 1,
(void *)_RET_IP_);
- kmemleak_free_part(__va(base), size);
+ kmemleak_free_part_phys(base, size);
memblock_remove_range(&memblock.reserved, base, size);
}
memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
__func__, (u64)base, (u64)base + size - 1,
(void *)_RET_IP_);
- kmemleak_free_part(__va(base), size);
+ kmemleak_free_part_phys(base, size);
cursor = PFN_UP(base);
end = PFN_DOWN(base + size);
{
unsigned long cursor, end;
- kmemleak_free_part(__va(addr), size);
+ kmemleak_free_part_phys(addr, size);
cursor = PFN_UP(addr);
end = PFN_DOWN(addr + size);
#include "main.h"
-#include <linux/kconfig.h>
-
struct net_device;
#define BATADV_DEBUGFS_SUBDIR "batman_adv"
my $spelling_file = "$D/spelling.txt";
my $codespell = 0;
my $codespellfile = "/usr/share/codespell/dictionary.txt";
+my $conststructsfile = "$D/const_structs.checkpatch";
my $color = 1;
my $allow_c99_comments = 1;
["module_param_array_named", 5],
["debugfs_create_(?:file|u8|u16|u32|u64|x8|x16|x32|x64|size_t|atomic_t|bool|blob|regset32|u32_array)", 2],
["proc_create(?:_data|)", 2],
- ["(?:CLASS|DEVICE|SENSOR)_ATTR", 2],
+ ["(?:CLASS|DEVICE|SENSOR|SENSOR_DEVICE|IIO_DEVICE)_ATTR", 2],
+ ["IIO_DEV_ATTR_[A-Z_]+", 1],
+ ["SENSOR_(?:DEVICE_|)ATTR_2", 2],
+ ["SENSOR_TEMPLATE(?:_2|)", 3],
+ ["__ATTR", 2],
);
#Create a search pattern for all these functions to speed up a loop below
0[0-7][0-7][2367]
}x;
+our %mode_permission_string_types = (
+ "S_IRWXU" => 0700,
+ "S_IRUSR" => 0400,
+ "S_IWUSR" => 0200,
+ "S_IXUSR" => 0100,
+ "S_IRWXG" => 0070,
+ "S_IRGRP" => 0040,
+ "S_IWGRP" => 0020,
+ "S_IXGRP" => 0010,
+ "S_IRWXO" => 0007,
+ "S_IROTH" => 0004,
+ "S_IWOTH" => 0002,
+ "S_IXOTH" => 0001,
+ "S_IRWXUGO" => 0777,
+ "S_IRUGO" => 0444,
+ "S_IWUGO" => 0222,
+ "S_IXUGO" => 0111,
+);
+
+#Create a search pattern for all these strings to speed up a loop below
+our $mode_perms_string_search = "";
+foreach my $entry (keys %mode_permission_string_types) {
+ $mode_perms_string_search .= '|' if ($mode_perms_string_search ne "");
+ $mode_perms_string_search .= $entry;
+}
+
our $allowed_asm_includes = qr{(?x:
irq|
memory|
$misspellings = join("|", sort keys %spelling_fix) if keys %spelling_fix;
+my $const_structs = "";
+if (open(my $conststructs, '<', $conststructsfile)) {
+ while (<$conststructs>) {
+ my $line = $_;
+
+ $line =~ s/\s*\n?$//g;
+ $line =~ s/^\s*//g;
+
+ next if ($line =~ m/^\s*#/);
+ next if ($line =~ m/^\s*$/);
+ if ($line =~ /\s/) {
+ print("$conststructsfile: '$line' invalid - ignored\n");
+ next;
+ }
+
+ $const_structs .= '|' if ($const_structs ne "");
+ $const_structs .= $line;
+ }
+ close($conststructsfile);
+} else {
+ warn "No structs that should be const will be found - file '$conststructsfile': $!\n";
+}
+
sub build_types {
my $mods = "(?x: \n" . join("|\n ", (@modifierList, @modifierListFile)) . "\n)";
my $all = "(?x: \n" . join("|\n ", (@typeList, @typeListFile)) . "\n)";
}
}
+sub is_maintained_obsolete {
+ my ($filename) = @_;
+
+ return 0 if (!(-e "$root/scripts/get_maintainer.pl"));
+
+ my $status = `perl $root/scripts/get_maintainer.pl --status --nom --nol --nogit --nogit-fallback -f $filename 2>&1`;
+
+ return $status =~ /obsolete/i;
+}
+
my $camelcase_seeded = 0;
sub seed_camelcase_includes {
return if ($camelcase_seeded);
}
if ($found_file) {
+ if (is_maintained_obsolete($realfile)) {
+ WARN("OBSOLETE",
+ "$realfile is marked as 'obsolete' in the MAINTAINERS hierarchy. No unnecessary modifications please.\n");
+ }
if ($realfile =~ m@^(?:drivers/net/|net/|drivers/staging/)@) {
$check = 1;
} else {
"Block comments use a trailing */ on a separate line\n" . $herecurr);
}
+# Block comment * alignment
+ if ($prevline =~ /$;[ \t]*$/ && #ends in comment
+ $line =~ /^\+[ \t]*$;/ && #leading comment
+ $rawline =~ /^\+[ \t]*\*/ && #leading *
+ (($prevrawline =~ /^\+.*?\/\*/ && #leading /*
+ $prevrawline !~ /\*\/[ \t]*$/) || #no trailing */
+ $prevrawline =~ /^\+[ \t]*\*/)) { #leading *
+ my $oldindent;
+ $prevrawline =~ m@^\+([ \t]*/?)\*@;
+ if (defined($1)) {
+ $oldindent = expand_tabs($1);
+ } else {
+ $prevrawline =~ m@^\+(.*/?)\*@;
+ $oldindent = expand_tabs($1);
+ }
+ $rawline =~ m@^\+([ \t]*)\*@;
+ my $newindent = $1;
+ $newindent = expand_tabs($newindent);
+ if (length($oldindent) ne length($newindent)) {
+ WARN("BLOCK_COMMENT_STYLE",
+ "Block comments should align the * on each line\n" . $hereprev);
+ }
+ }
+
# check for missing blank lines after struct/union declarations
# with exceptions for various attributes and macros
if ($prevline =~ /^[\+ ]};?\s*$/ &&
$has_flow_statement = 1 if ($ctx =~ /\b(goto|return)\b/);
$has_arg_concat = 1 if ($ctx =~ /\#\#/ && $ctx !~ /\#\#\s*(?:__VA_ARGS__|args)\b/);
- $dstat =~ s/^.\s*\#\s*define\s+$Ident(?:\([^\)]*\))?\s*//;
+ $dstat =~ s/^.\s*\#\s*define\s+$Ident(\([^\)]*\))?\s*//;
+ my $define_args = $1;
+ my $define_stmt = $dstat;
+ my @def_args = ();
+
+ if (defined $define_args && $define_args ne "") {
+ $define_args = substr($define_args, 1, length($define_args) - 2);
+ $define_args =~ s/\s*//g;
+ @def_args = split(",", $define_args);
+ }
+
$dstat =~ s/$;//g;
$dstat =~ s/\\\n.//g;
$dstat =~ s/^\s*//s;
^\[
}x;
#print "REST<$rest> dstat<$dstat> ctx<$ctx>\n";
+
+ $ctx =~ s/\n*$//;
+ my $herectx = $here . "\n";
+ my $stmt_cnt = statement_rawlines($ctx);
+
+ for (my $n = 0; $n < $stmt_cnt; $n++) {
+ $herectx .= raw_line($linenr, $n) . "\n";
+ }
+
if ($dstat ne '' &&
$dstat !~ /^(?:$Ident|-?$Constant),$/ && # 10, // foo(),
$dstat !~ /^(?:$Ident|-?$Constant);$/ && # foo();
$dstat !~ /^\(\{/ && # ({...
$ctx !~ /^.\s*#\s*define\s+TRACE_(?:SYSTEM|INCLUDE_FILE|INCLUDE_PATH)\b/)
{
- $ctx =~ s/\n*$//;
- my $herectx = $here . "\n";
- my $cnt = statement_rawlines($ctx);
-
- for (my $n = 0; $n < $cnt; $n++) {
- $herectx .= raw_line($linenr, $n) . "\n";
- }
if ($dstat =~ /;/) {
ERROR("MULTISTATEMENT_MACRO_USE_DO_WHILE",
ERROR("COMPLEX_MACRO",
"Macros with complex values should be enclosed in parentheses\n" . "$herectx");
}
+
+ }
+
+ # Make $define_stmt single line, comment-free, etc
+ my @stmt_array = split('\n', $define_stmt);
+ my $first = 1;
+ $define_stmt = "";
+ foreach my $l (@stmt_array) {
+ $l =~ s/\\$//;
+ if ($first) {
+ $define_stmt = $l;
+ $first = 0;
+ } elsif ($l =~ /^[\+ ]/) {
+ $define_stmt .= substr($l, 1);
+ }
+ }
+ $define_stmt =~ s/$;//g;
+ $define_stmt =~ s/\s+/ /g;
+ $define_stmt = trim($define_stmt);
+
+# check if any macro arguments are reused (ignore '...' and 'type')
+ foreach my $arg (@def_args) {
+ next if ($arg =~ /\.\.\./);
+ next if ($arg =~ /^type$/i);
+ my $tmp = $define_stmt;
+ $tmp =~ s/\b(typeof|__typeof__|__builtin\w+|typecheck\s*\(\s*$Type\s*,|\#+)\s*\(*\s*$arg\s*\)*\b//g;
+ $tmp =~ s/\#+\s*$arg\b//g;
+ $tmp =~ s/\b$arg\s*\#\#//g;
+ my $use_cnt = $tmp =~ s/\b$arg\b//g;
+ if ($use_cnt > 1) {
+ CHK("MACRO_ARG_REUSE",
+ "Macro argument reuse '$arg' - possible side-effects?\n" . "$herectx");
+ }
+# check if any macro arguments may have other precedence issues
+ if ($define_stmt =~ m/($Operators)?\s*\b$arg\b\s*($Operators)?/m &&
+ ((defined($1) && $1 ne ',') ||
+ (defined($2) && $2 ne ','))) {
+ CHK("MACRO_ARG_PRECEDENCE",
+ "Macro argument '$arg' may be better as '($arg)' to avoid precedence issues\n" . "$herectx");
+ }
}
# check for macros with flow control, but without ## concatenation
}
# Check for memcpy(foo, bar, ETH_ALEN) that could be ether_addr_copy(foo, bar)
- if ($^V && $^V ge 5.10.0 &&
- defined $stat &&
- $stat =~ /^\+(?:.*?)\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
- if (WARN("PREFER_ETHER_ADDR_COPY",
- "Prefer ether_addr_copy() over memcpy() if the Ethernet addresses are __aligned(2)\n" . "$here\n$stat\n") &&
- $fix) {
- $fixed[$fixlinenr] =~ s/\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/ether_addr_copy($2, $7)/;
- }
- }
+# if ($^V && $^V ge 5.10.0 &&
+# defined $stat &&
+# $stat =~ /^\+(?:.*?)\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
+# if (WARN("PREFER_ETHER_ADDR_COPY",
+# "Prefer ether_addr_copy() over memcpy() if the Ethernet addresses are __aligned(2)\n" . "$here\n$stat\n") &&
+# $fix) {
+# $fixed[$fixlinenr] =~ s/\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/ether_addr_copy($2, $7)/;
+# }
+# }
# Check for memcmp(foo, bar, ETH_ALEN) that could be ether_addr_equal*(foo, bar)
- if ($^V && $^V ge 5.10.0 &&
- defined $stat &&
- $stat =~ /^\+(?:.*?)\bmemcmp\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
- WARN("PREFER_ETHER_ADDR_EQUAL",
- "Prefer ether_addr_equal() or ether_addr_equal_unaligned() over memcmp()\n" . "$here\n$stat\n")
- }
+# if ($^V && $^V ge 5.10.0 &&
+# defined $stat &&
+# $stat =~ /^\+(?:.*?)\bmemcmp\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
+# WARN("PREFER_ETHER_ADDR_EQUAL",
+# "Prefer ether_addr_equal() or ether_addr_equal_unaligned() over memcmp()\n" . "$here\n$stat\n")
+# }
# check for memset(foo, 0x0, ETH_ALEN) that could be eth_zero_addr
# check for memset(foo, 0xFF, ETH_ALEN) that could be eth_broadcast_addr
- if ($^V && $^V ge 5.10.0 &&
- defined $stat &&
- $stat =~ /^\+(?:.*?)\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
-
- my $ms_val = $7;
-
- if ($ms_val =~ /^(?:0x|)0+$/i) {
- if (WARN("PREFER_ETH_ZERO_ADDR",
- "Prefer eth_zero_addr over memset()\n" . "$here\n$stat\n") &&
- $fix) {
- $fixed[$fixlinenr] =~ s/\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*,\s*ETH_ALEN\s*\)/eth_zero_addr($2)/;
- }
- } elsif ($ms_val =~ /^(?:0xff|255)$/i) {
- if (WARN("PREFER_ETH_BROADCAST_ADDR",
- "Prefer eth_broadcast_addr() over memset()\n" . "$here\n$stat\n") &&
- $fix) {
- $fixed[$fixlinenr] =~ s/\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*,\s*ETH_ALEN\s*\)/eth_broadcast_addr($2)/;
- }
- }
- }
+# if ($^V && $^V ge 5.10.0 &&
+# defined $stat &&
+# $stat =~ /^\+(?:.*?)\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
+#
+# my $ms_val = $7;
+#
+# if ($ms_val =~ /^(?:0x|)0+$/i) {
+# if (WARN("PREFER_ETH_ZERO_ADDR",
+# "Prefer eth_zero_addr over memset()\n" . "$here\n$stat\n") &&
+# $fix) {
+# $fixed[$fixlinenr] =~ s/\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*,\s*ETH_ALEN\s*\)/eth_zero_addr($2)/;
+# }
+# } elsif ($ms_val =~ /^(?:0xff|255)$/i) {
+# if (WARN("PREFER_ETH_BROADCAST_ADDR",
+# "Prefer eth_broadcast_addr() over memset()\n" . "$here\n$stat\n") &&
+# $fix) {
+# $fixed[$fixlinenr] =~ s/\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*,\s*ETH_ALEN\s*\)/eth_broadcast_addr($2)/;
+# }
+# }
+# }
# typecasts on min/max could be min_t/max_t
if ($^V && $^V ge 5.10.0 &&
"externs should be avoided in .c files\n" . $herecurr);
}
+ if ($realfile =~ /\.[ch]$/ && defined $stat &&
+ $stat =~ /^.\s*(?:extern\s+)?$Type\s*$Ident\s*\(\s*([^{]+)\s*\)\s*;/s &&
+ $1 ne "void") {
+ my $args = trim($1);
+ while ($args =~ m/\s*($Type\s*(?:$Ident|\(\s*\*\s*$Ident?\s*\)\s*$balanced_parens)?)/g) {
+ my $arg = trim($1);
+ if ($arg =~ /^$Type$/ && $arg !~ /enum\s+$Ident$/) {
+ WARN("FUNCTION_ARGUMENTS",
+ "function definition argument '$arg' should also have an identifier name\n" . $herecurr);
+ }
+ }
+ }
+
# checks for new __setup's
if ($rawline =~ /\b__setup\("([^"]*)"/) {
my $name = $1;
}
# check for various structs that are normally const (ops, kgdb, device_tree)
- my $const_structs = qr{
- acpi_dock_ops|
- address_space_operations|
- backlight_ops|
- block_device_operations|
- dentry_operations|
- dev_pm_ops|
- dma_map_ops|
- extent_io_ops|
- file_lock_operations|
- file_operations|
- hv_ops|
- ide_dma_ops|
- intel_dvo_dev_ops|
- item_operations|
- iwl_ops|
- kgdb_arch|
- kgdb_io|
- kset_uevent_ops|
- lock_manager_operations|
- microcode_ops|
- mtrr_ops|
- neigh_ops|
- nlmsvc_binding|
- of_device_id|
- pci_raw_ops|
- pipe_buf_operations|
- platform_hibernation_ops|
- platform_suspend_ops|
- proto_ops|
- rpc_pipe_ops|
- seq_operations|
- snd_ac97_build_ops|
- soc_pcmcia_socket_ops|
- stacktrace_ops|
- sysfs_ops|
- tty_operations|
- uart_ops|
- usb_mon_operations|
- wd_ops}x;
if ($line !~ /\bconst\b/ &&
$line =~ /\bstruct\s+($const_structs)\b/) {
WARN("CONST_STRUCT",
# Mode permission misuses where it seems decimal should be octal
# This uses a shortcut match to avoid unnecessary uses of a slow foreach loop
if ($^V && $^V ge 5.10.0 &&
+ defined $stat &&
$line =~ /$mode_perms_search/) {
foreach my $entry (@mode_permission_funcs) {
my $func = $entry->[0];
my $arg_pos = $entry->[1];
+ my $lc = $stat =~ tr@\n@@;
+ $lc = $lc + $linenr;
+ my $stat_real = raw_line($linenr, 0);
+ for (my $count = $linenr + 1; $count <= $lc; $count++) {
+ $stat_real = $stat_real . "\n" . raw_line($count, 0);
+ }
+
my $skip_args = "";
if ($arg_pos > 1) {
$arg_pos--;
$skip_args = "(?:\\s*$FuncArg\\s*,\\s*){$arg_pos,$arg_pos}";
}
- my $test = "\\b$func\\s*\\(${skip_args}([\\d]+)\\s*[,\\)]";
- if ($line =~ /$test/) {
+ my $test = "\\b$func\\s*\\(${skip_args}($FuncArg(?:\\|\\s*$FuncArg)*)\\s*[,\\)]";
+ if ($stat =~ /$test/) {
my $val = $1;
$val = $6 if ($skip_args ne "");
-
- if ($val !~ /^0$/ &&
- (($val =~ /^$Int$/ && $val !~ /^$Octal$/) ||
- length($val) ne 4)) {
+ if (($val =~ /^$Int$/ && $val !~ /^$Octal$/) ||
+ ($val =~ /^$Octal$/ && length($val) ne 4)) {
ERROR("NON_OCTAL_PERMISSIONS",
- "Use 4 digit octal (0777) not decimal permissions\n" . $herecurr);
- } elsif ($val =~ /^$Octal$/ && (oct($val) & 02)) {
+ "Use 4 digit octal (0777) not decimal permissions\n" . "$here\n" . $stat_real);
+ }
+ if ($val =~ /^$Octal$/ && (oct($val) & 02)) {
ERROR("EXPORTED_WORLD_WRITABLE",
- "Exporting writable files is usually an error. Consider more restrictive permissions.\n" . $herecurr);
+ "Exporting writable files is usually an error. Consider more restrictive permissions.\n" . "$here\n" . $stat_real);
}
}
}
}
+# check for uses of S_<PERMS> that could be octal for readability
+ if ($line =~ /\b$mode_perms_string_search\b/) {
+ my $val = "";
+ my $oval = "";
+ my $to = 0;
+ my $curpos = 0;
+ my $lastpos = 0;
+ while ($line =~ /\b(($mode_perms_string_search)\b(?:\s*\|\s*)?\s*)/g) {
+ $curpos = pos($line);
+ my $match = $2;
+ my $omatch = $1;
+ last if ($lastpos > 0 && ($curpos - length($omatch) != $lastpos));
+ $lastpos = $curpos;
+ $to |= $mode_permission_string_types{$match};
+ $val .= '\s*\|\s*' if ($val ne "");
+ $val .= $match;
+ $oval .= $omatch;
+ }
+ $oval =~ s/^\s*\|\s*//;
+ $oval =~ s/\s*\|\s*$//;
+ my $octal = sprintf("%04o", $to);
+ if (WARN("SYMBOLIC_PERMS",
+ "Symbolic permissions '$oval' are not preferred. Consider using octal permissions '$octal'.\n" . $herecurr) &&
+ $fix) {
+ $fixed[$fixlinenr] =~ s/$val/$octal/;
+ }
+ }
+
# validate content of MODULE_LICENSE against list from include/linux/module.h
if ($line =~ /\bMODULE_LICENSE\s*\(\s*($String)\s*\)/) {
my $extracted_string = get_quoted_string($line, $rawline);
--- /dev/null
+acpi_dock_ops
+address_space_operations
+backlight_ops
+block_device_operations
+clk_ops
+comedi_lrange
+component_ops
+dentry_operations
+dev_pm_ops
+dma_map_ops
+driver_info
+drm_connector_funcs
+drm_encoder_funcs
+drm_encoder_helper_funcs
+ethtool_ops
+extent_io_ops
+file_lock_operations
+file_operations
+hv_ops
+ide_dma_ops
+ide_port_ops
+inode_operations
+intel_dvo_dev_ops
+irq_domain_ops
+item_operations
+iwl_cfg
+iwl_ops
+kgdb_arch
+kgdb_io
+kset_uevent_ops
+lock_manager_operations
+machine_desc
+microcode_ops
+mlxsw_reg_info
+mtrr_ops
+neigh_ops
+net_device_ops
+nlmsvc_binding
+nvkm_device_chip
+of_device_id
+pci_raw_ops
+pipe_buf_operations
+platform_hibernation_ops
+platform_suspend_ops
+proto_ops
+regmap_access_table
+rpc_pipe_ops
+rtc_class_ops
+sd_desc
+seq_operations
+sirfsoc_padmux
+snd_ac97_build_ops
+snd_soc_component_driver
+soc_pcmcia_socket_ops
+stacktrace_ops
+sysfs_ops
+tty_operations
+uart_ops
+usb_mon_operations
+v4l2_ctrl_ops
+v4l2_ioctl_ops
+vm_operations_struct
+wacom_features
+wd_ops
-I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL,ACPI_EXPORT_SYMBOL \
-I DEFINE_TRACE,EXPORT_TRACEPOINT_SYMBOL,EXPORT_TRACEPOINT_SYMBOL_GPL \
-I static,const \
- --extra=+f --c-kinds=+px --langmap=c:+.h "${regex[@]}"
+ --extra=+fq --c-kinds=+px --fields=+iaS --langmap=c:+.h \
+ "${regex[@]}"
setup_regex exuberant kconfig
all_kconfigs | xargs $1 -a \
spin_unlock_irqrestore(&sst->spinlock, flags);
/* continue to send any remaining messages... */
- queue_kthread_work(&ipc->kworker, &ipc->kwork);
+ kthread_queue_work(&ipc->kworker, &ipc->kwork);
return IRQ_HANDLED;
}
*
*/
-#include <linux/kconfig.h>
#include <linux/stddef.h>
#include <linux/acpi.h>
list_add_tail(&msg->list, &ipc->tx_list);
spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
- queue_kthread_work(&ipc->kworker, &ipc->kwork);
+ kthread_queue_work(&ipc->kworker, &ipc->kwork);
if (wait)
return tx_wait_done(ipc, msg, rx_data);
return -ENOMEM;
/* start the IPC message thread */
- init_kthread_worker(&ipc->kworker);
+ kthread_init_worker(&ipc->kworker);
ipc->tx_thread = kthread_run(kthread_worker_fn,
&ipc->kworker, "%s",
dev_name(ipc->dev));
return ret;
}
- init_kthread_work(&ipc->kwork, ipc_tx_msgs);
+ kthread_init_work(&ipc->kwork, ipc_tx_msgs);
return 0;
}
EXPORT_SYMBOL_GPL(sst_ipc_init);
spin_unlock_irqrestore(&sst->spinlock, flags);
/* continue to send any remaining messages... */
- queue_kthread_work(&ipc->kworker, &ipc->kwork);
+ kthread_queue_work(&ipc->kworker, &ipc->kwork);
return IRQ_HANDLED;
}
skl_ipc_int_enable(dsp);
/* continue to send any remaining messages... */
- queue_kthread_work(&ipc->kworker, &ipc->kwork);
+ kthread_queue_work(&ipc->kworker, &ipc->kwork);
return IRQ_HANDLED;
}
-#include <linux/kconfig.h>
#include <linux/bug.h>
void check(void)
LDFLAGS += -lpthread -lurcu
TARGETS = main
OFILES = main.o radix-tree.o linux.o test.o tag_check.o find_next_bit.o \
- regression1.o regression2.o regression3.o multiorder.o
+ regression1.o regression2.o regression3.o multiorder.o \
+ iteration_check.o
targets: $(TARGETS)
--- /dev/null
+/*
+ * iteration_check.c: test races having to do with radix tree iteration
+ * Copyright (c) 2016 Intel Corporation
+ * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include <linux/radix-tree.h>
+#include <pthread.h>
+#include "test.h"
+
+#define NUM_THREADS 4
+#define TAG 0
+static pthread_mutex_t tree_lock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_t threads[NUM_THREADS];
+RADIX_TREE(tree, GFP_KERNEL);
+bool test_complete;
+
+/* relentlessly fill the tree with tagged entries */
+static void *add_entries_fn(void *arg)
+{
+ int pgoff;
+
+ while (!test_complete) {
+ for (pgoff = 0; pgoff < 100; pgoff++) {
+ pthread_mutex_lock(&tree_lock);
+ if (item_insert(&tree, pgoff) == 0)
+ item_tag_set(&tree, pgoff, TAG);
+ pthread_mutex_unlock(&tree_lock);
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Iterate over the tagged entries, doing a radix_tree_iter_retry() as we find
+ * things that have been removed and randomly resetting our iteration to the
+ * next chunk with radix_tree_iter_next(). Both radix_tree_iter_retry() and
+ * radix_tree_iter_next() cause radix_tree_next_slot() to be called with a
+ * NULL 'slot' variable.
+ */
+static void *tagged_iteration_fn(void *arg)
+{
+ struct radix_tree_iter iter;
+ void **slot;
+
+ while (!test_complete) {
+ rcu_read_lock();
+ radix_tree_for_each_tagged(slot, &tree, &iter, 0, TAG) {
+ void *entry;
+ int i;
+
+ /* busy wait to let removals happen */
+ for (i = 0; i < 1000000; i++)
+ ;
+
+ entry = radix_tree_deref_slot(slot);
+ if (unlikely(!entry))
+ continue;
+
+ if (radix_tree_deref_retry(entry)) {
+ slot = radix_tree_iter_retry(&iter);
+ continue;
+ }
+
+ if (rand() % 50 == 0)
+ slot = radix_tree_iter_next(&iter);
+ }
+ rcu_read_unlock();
+ }
+
+ return NULL;
+}
+
+/*
+ * Iterate over the entries, doing a radix_tree_iter_retry() as we find things
+ * that have been removed and randomly resetting our iteration to the next
+ * chunk with radix_tree_iter_next(). Both radix_tree_iter_retry() and
+ * radix_tree_iter_next() cause radix_tree_next_slot() to be called with a
+ * NULL 'slot' variable.
+ */
+static void *untagged_iteration_fn(void *arg)
+{
+ struct radix_tree_iter iter;
+ void **slot;
+
+ while (!test_complete) {
+ rcu_read_lock();
+ radix_tree_for_each_slot(slot, &tree, &iter, 0) {
+ void *entry;
+ int i;
+
+ /* busy wait to let removals happen */
+ for (i = 0; i < 1000000; i++)
+ ;
+
+ entry = radix_tree_deref_slot(slot);
+ if (unlikely(!entry))
+ continue;
+
+ if (radix_tree_deref_retry(entry)) {
+ slot = radix_tree_iter_retry(&iter);
+ continue;
+ }
+
+ if (rand() % 50 == 0)
+ slot = radix_tree_iter_next(&iter);
+ }
+ rcu_read_unlock();
+ }
+
+ return NULL;
+}
+
+/*
+ * Randomly remove entries to help induce radix_tree_iter_retry() calls in the
+ * two iteration functions.
+ */
+static void *remove_entries_fn(void *arg)
+{
+ while (!test_complete) {
+ int pgoff;
+
+ pgoff = rand() % 100;
+
+ pthread_mutex_lock(&tree_lock);
+ item_delete(&tree, pgoff);
+ pthread_mutex_unlock(&tree_lock);
+ }
+
+ return NULL;
+}
+
+/* This is a unit test for a bug found by the syzkaller tester */
+void iteration_test(void)
+{
+ int i;
+
+ printf("Running iteration tests for 10 seconds\n");
+
+ srand(time(0));
+ test_complete = false;
+
+ if (pthread_create(&threads[0], NULL, tagged_iteration_fn, NULL)) {
+ perror("pthread_create");
+ exit(1);
+ }
+ if (pthread_create(&threads[1], NULL, untagged_iteration_fn, NULL)) {
+ perror("pthread_create");
+ exit(1);
+ }
+ if (pthread_create(&threads[2], NULL, add_entries_fn, NULL)) {
+ perror("pthread_create");
+ exit(1);
+ }
+ if (pthread_create(&threads[3], NULL, remove_entries_fn, NULL)) {
+ perror("pthread_create");
+ exit(1);
+ }
+
+ sleep(10);
+ test_complete = true;
+
+ for (i = 0; i < NUM_THREADS; i++) {
+ if (pthread_join(threads[i], NULL)) {
+ perror("pthread_join");
+ exit(1);
+ }
+ }
+
+ item_kill_tree(&tree);
+}
regression1_test();
regression2_test();
regression3_test();
+ iteration_test();
single_thread_tests(long_run);
sleep(1);
#include "regression.h"
static RADIX_TREE(mt_tree, GFP_KERNEL);
-static pthread_mutex_t mt_lock;
+static pthread_mutex_t mt_lock = PTHREAD_MUTEX_INITIALIZER;
struct page {
pthread_mutex_t lock;
void tag_check(void);
void multiorder_checks(void);
+void iteration_test(void);
struct item *
item_tag_set(struct radix_tree_root *root, unsigned long index, int tag);