]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'WIP.sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 3 Mar 2017 18:16:38 +0000 (10:16 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 3 Mar 2017 18:16:38 +0000 (10:16 -0800)
Pull sched.h split-up from Ingo Molnar:
 "The point of these changes is to significantly reduce the
  <linux/sched.h> header footprint, to speed up the kernel build and to
  have a cleaner header structure.

  After these changes the new <linux/sched.h>'s typical preprocessed
  size goes down from a previous ~0.68 MB (~22K lines) to ~0.45 MB (~15K
  lines), which is around 40% faster to build on typical configs.

  Not much changed from the last version (-v2) posted three weeks ago: I
  eliminated quirks, backmerged fixes plus I rebased it to an upstream
  SHA1 from yesterday that includes most changes queued up in -next plus
  all sched.h changes that were pending from Andrew.

  I've re-tested the series both on x86 and on cross-arch defconfigs,
  and did a bisectability test at a number of random points.

  I tried to test as many build configurations as possible, but some
  build breakage is probably still left - but it should be mostly
  limited to architectures that have no cross-compiler binaries
  available on kernel.org, and non-default configurations"

* 'WIP.sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (146 commits)
  sched/headers: Clean up <linux/sched.h>
  sched/headers: Remove #ifdefs from <linux/sched.h>
  sched/headers: Remove the <linux/topology.h> include from <linux/sched.h>
  sched/headers, hrtimer: Remove the <linux/wait.h> include from <linux/hrtimer.h>
  sched/headers, x86/apic: Remove the <linux/pm.h> header inclusion from <asm/apic.h>
  sched/headers, timers: Remove the <linux/sysctl.h> include from <linux/timer.h>
  sched/headers: Remove <linux/magic.h> from <linux/sched/task_stack.h>
  sched/headers: Remove <linux/sched.h> from <linux/sched/init.h>
  sched/core: Remove unused prefetch_stack()
  sched/headers: Remove <linux/rculist.h> from <linux/sched.h>
  sched/headers: Remove the 'init_pid_ns' prototype from <linux/sched.h>
  sched/headers: Remove <linux/signal.h> from <linux/sched.h>
  sched/headers: Remove <linux/rwsem.h> from <linux/sched.h>
  sched/headers: Remove the runqueue_is_locked() prototype
  sched/headers: Remove <linux/sched.h> from <linux/sched/hotplug.h>
  sched/headers: Remove <linux/sched.h> from <linux/sched/debug.h>
  sched/headers: Remove <linux/sched.h> from <linux/sched/nohz.h>
  sched/headers: Remove <linux/sched.h> from <linux/sched/stat.h>
  sched/headers: Remove the <linux/gfp.h> include from <linux/sched.h>
  sched/headers: Remove <linux/rtmutex.h> from <linux/sched.h>
  ...

24 files changed:
1  2 
drivers/block/drbd/drbd_main.c
drivers/cpufreq/intel_pstate.c
drivers/cpuidle/governors/menu.c
drivers/target/iscsi/cxgbit/cxgbit_target.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_erl0.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_nego.c
drivers/vhost/vhost.c
drivers/virtio/virtio_balloon.c
fs/afs/rxrpc.c
fs/aio.c
fs/btrfs/ctree.h
fs/btrfs/extent-tree.c
fs/btrfs/free-space-cache.c
fs/cifs/connect.c
fs/ncpfs/sock.c
fs/read_write.c
fs/splice.c
include/target/target_core_base.h
kernel/power/hibernate.c
mm/nommu.c
net/dns_resolver/dns_query.c
security/selinux/hooks.c

index 37000c6bb7f40edf3d7ec59bf7d49c2a42fa6bed,c7d530a95e53063d10b36c7a557e49d40f87bcc5..92c60cbd04ee8ce24d7e986dc3414ad57d637841
@@@ -52,6 -52,7 +52,7 @@@
  #define __KERNEL_SYSCALLS__
  #include <linux/unistd.h>
  #include <linux/vmalloc.h>
+ #include <linux/sched/signal.h>
  
  #include <linux/drbd_limits.h>
  #include "drbd_int.h"
@@@ -1846,7 -1847,7 +1847,7 @@@ int drbd_send_out_of_sync(struct drbd_p
  int drbd_send(struct drbd_connection *connection, struct socket *sock,
              void *buf, size_t size, unsigned msg_flags)
  {
 -      struct kvec iov;
 +      struct kvec iov = {.iov_base = buf, .iov_len = size};
        struct msghdr msg;
        int rv, sent = 0;
  
  
        /* THINK  if (signal_pending) return ... ? */
  
 -      iov.iov_base = buf;
 -      iov.iov_len  = size;
 -
        msg.msg_name       = NULL;
        msg.msg_namelen    = 0;
        msg.msg_control    = NULL;
        msg.msg_controllen = 0;
        msg.msg_flags      = msg_flags | MSG_NOSIGNAL;
  
 +      iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
 +
        if (sock == connection->data.socket) {
                rcu_read_lock();
                connection->ko_count = rcu_dereference(connection->net_conf)->ko_count;
                drbd_update_congested(connection);
        }
        do {
 -              rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
 +              rv = sock_sendmsg(sock, &msg);
                if (rv == -EAGAIN) {
                        if (we_should_drop_the_connection(connection, sock))
                                break;
                if (rv < 0)
                        break;
                sent += rv;
 -              iov.iov_base += rv;
 -              iov.iov_len  -= rv;
        } while (sent < size);
  
        if (sock == connection->data.socket)
index 108826d9c40f72196d7a08c5caebca3457cbe0f9,8f6d29302aac509a5e43e92f3614798acb350f6b..b1fbaa30ae0415c330b9b1069e17900b99a48868
@@@ -19,7 -19,7 +19,7 @@@
  #include <linux/hrtimer.h>
  #include <linux/tick.h>
  #include <linux/slab.h>
- #include <linux/sched.h>
+ #include <linux/sched/cpufreq.h>
  #include <linux/list.h>
  #include <linux/cpu.h>
  #include <linux/cpufreq.h>
  
  #define INTEL_CPUFREQ_TRANSITION_LATENCY      20000
  
 -#define ATOM_RATIOS           0x66a
 -#define ATOM_VIDS             0x66b
 -#define ATOM_TURBO_RATIOS     0x66c
 -#define ATOM_TURBO_VIDS               0x66d
 -
  #ifdef CONFIG_ACPI
  #include <acpi/processor.h>
  #include <acpi/cppc_acpi.h>
@@@ -359,25 -364,37 +359,25 @@@ static bool driver_registered __read_mo
  static bool acpi_ppc;
  #endif
  
 -static struct perf_limits performance_limits = {
 -      .no_turbo = 0,
 -      .turbo_disabled = 0,
 -      .max_perf_pct = 100,
 -      .max_perf = int_ext_tofp(1),
 -      .min_perf_pct = 100,
 -      .min_perf = int_ext_tofp(1),
 -      .max_policy_pct = 100,
 -      .max_sysfs_pct = 100,
 -      .min_policy_pct = 0,
 -      .min_sysfs_pct = 0,
 -};
 +static struct perf_limits performance_limits;
 +static struct perf_limits powersave_limits;
 +static struct perf_limits *limits;
  
 -static struct perf_limits powersave_limits = {
 -      .no_turbo = 0,
 -      .turbo_disabled = 0,
 -      .max_perf_pct = 100,
 -      .max_perf = int_ext_tofp(1),
 -      .min_perf_pct = 0,
 -      .min_perf = 0,
 -      .max_policy_pct = 100,
 -      .max_sysfs_pct = 100,
 -      .min_policy_pct = 0,
 -      .min_sysfs_pct = 0,
 -};
 +static void intel_pstate_init_limits(struct perf_limits *limits)
 +{
 +      memset(limits, 0, sizeof(*limits));
 +      limits->max_perf_pct = 100;
 +      limits->max_perf = int_ext_tofp(1);
 +      limits->max_policy_pct = 100;
 +      limits->max_sysfs_pct = 100;
 +}
  
 -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
 -static struct perf_limits *limits = &performance_limits;
 -#else
 -static struct perf_limits *limits = &powersave_limits;
 -#endif
 +static void intel_pstate_set_performance_limits(struct perf_limits *limits)
 +{
 +      intel_pstate_init_limits(limits);
 +      limits->min_perf_pct = 100;
 +      limits->min_perf = int_ext_tofp(1);
 +}
  
  static DEFINE_MUTEX(intel_pstate_driver_lock);
  static DEFINE_MUTEX(intel_pstate_limits_lock);
@@@ -1350,7 -1367,7 +1350,7 @@@ static int atom_get_min_pstate(void
  {
        u64 value;
  
 -      rdmsrl(ATOM_RATIOS, value);
 +      rdmsrl(MSR_ATOM_CORE_RATIOS, value);
        return (value >> 8) & 0x7F;
  }
  
@@@ -1358,7 -1375,7 +1358,7 @@@ static int atom_get_max_pstate(void
  {
        u64 value;
  
 -      rdmsrl(ATOM_RATIOS, value);
 +      rdmsrl(MSR_ATOM_CORE_RATIOS, value);
        return (value >> 16) & 0x7F;
  }
  
@@@ -1366,7 -1383,7 +1366,7 @@@ static int atom_get_turbo_pstate(void
  {
        u64 value;
  
 -      rdmsrl(ATOM_TURBO_RATIOS, value);
 +      rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value);
        return value & 0x7F;
  }
  
@@@ -1428,7 -1445,7 +1428,7 @@@ static void atom_get_vid(struct cpudat
  {
        u64 value;
  
 -      rdmsrl(ATOM_VIDS, value);
 +      rdmsrl(MSR_ATOM_CORE_VIDS, value);
        cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
        cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
        cpudata->vid.ratio = div_fp(
                int_tofp(cpudata->pstate.max_pstate -
                        cpudata->pstate.min_pstate));
  
 -      rdmsrl(ATOM_TURBO_VIDS, value);
 +      rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value);
        cpudata->vid.turbo = value & 0x7f;
  }
  
@@@ -2067,6 -2084,20 +2067,6 @@@ static void intel_pstate_clear_update_u
        synchronize_sched();
  }
  
 -static void intel_pstate_set_performance_limits(struct perf_limits *limits)
 -{
 -      limits->no_turbo = 0;
 -      limits->turbo_disabled = 0;
 -      limits->max_perf_pct = 100;
 -      limits->max_perf = int_ext_tofp(1);
 -      limits->min_perf_pct = 100;
 -      limits->min_perf = int_ext_tofp(1);
 -      limits->max_policy_pct = 100;
 -      limits->max_sysfs_pct = 100;
 -      limits->min_policy_pct = 0;
 -      limits->min_sysfs_pct = 0;
 -}
 -
  static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
                                            struct perf_limits *limits)
  {
@@@ -2435,11 -2466,6 +2435,11 @@@ static int intel_pstate_register_driver
  {
        int ret;
  
 +      intel_pstate_init_limits(&powersave_limits);
 +      intel_pstate_set_performance_limits(&performance_limits);
 +      limits = IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) ?
 +                      &performance_limits : &powersave_limits;
 +
        ret = cpufreq_register_driver(intel_pstate_driver);
        if (ret) {
                intel_pstate_driver_cleanup();
index 6d6f46e79d94958b83920da3dec2ec47301e2f78,6d8a4026a9036a2548a0883f6443b9354fb076ad..b2330fd69e3464bbb5713a6b5dceadeba3421f0d
@@@ -18,6 -18,8 +18,8 @@@
  #include <linux/hrtimer.h>
  #include <linux/tick.h>
  #include <linux/sched.h>
+ #include <linux/sched/loadavg.h>
+ #include <linux/sched/stat.h>
  #include <linux/math64.h>
  #include <linux/cpu.h>
  
@@@ -287,7 -289,7 +289,7 @@@ static int menu_select(struct cpuidle_d
        unsigned int interactivity_req;
        unsigned int expected_interval;
        unsigned long nr_iowaiters, cpu_load;
 -      int resume_latency = dev_pm_qos_read_value(device);
 +      int resume_latency = dev_pm_qos_raw_read_value(device);
  
        if (data->needs_update) {
                menu_update(drv, dev);
index 2714e5901d1845cfe2583e8cf21f09e193f3ef3b,0c3e8fce3695e788d116eeaa4e9b1ccef12db12b..bdcc8b4c522a2fc73920384da94530827651fb1e
@@@ -8,6 -8,8 +8,8 @@@
  
  #include <linux/workqueue.h>
  #include <linux/kthread.h>
+ #include <linux/sched/signal.h>
  #include <asm/unaligned.h>
  #include <net/tcp.h>
  #include <target/target_core_base.h>
@@@ -162,14 -164,12 +164,14 @@@ cxgbit_tx_data_wr(struct cxgbit_sock *c
                  u32 len, u32 credits, u32 compl)
  {
        struct fw_ofld_tx_data_wr *req;
 +      const struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
        u32 submode = cxgbit_skcb_submode(skb);
        u32 wr_ulp_mode = 0;
        u32 hdr_size = sizeof(*req);
        u32 opcode = FW_OFLD_TX_DATA_WR;
        u32 immlen = 0;
 -      u32 force = TX_FORCE_V(!submode);
 +      u32 force = is_t5(lldi->adapter_type) ? TX_FORCE_V(!submode) :
 +                  T6_TX_FORCE_F;
  
        if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO) {
                opcode = FW_ISCSI_TX_DATA_WR;
@@@ -245,7 -245,7 +247,7 @@@ void cxgbit_push_tx_frames(struct cxgbi
                }
                __skb_unlink(skb, &csk->txq);
                set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
 -              skb->csum = credits_needed + flowclen16;
 +              skb->csum = (__force __wsum)(credits_needed + flowclen16);
                csk->wr_cred -= credits_needed;
                csk->wr_una_cred += credits_needed;
  
@@@ -653,6 -653,26 +655,6 @@@ static int cxgbit_set_iso_npdu(struct c
        u32 max_npdu, max_iso_npdu;
  
        if (conn->login->leading_connection) {
 -              param = iscsi_find_param_from_key(DATASEQUENCEINORDER,
 -                                                conn->param_list);
 -              if (!param) {
 -                      pr_err("param not found key %s\n", DATASEQUENCEINORDER);
 -                      return -1;
 -              }
 -
 -              if (strcmp(param->value, YES))
 -                      return 0;
 -
 -              param = iscsi_find_param_from_key(DATAPDUINORDER,
 -                                                conn->param_list);
 -              if (!param) {
 -                      pr_err("param not found key %s\n", DATAPDUINORDER);
 -                      return -1;
 -              }
 -
 -              if (strcmp(param->value, YES))
 -                      return 0;
 -
                param = iscsi_find_param_from_key(MAXBURSTLENGTH,
                                                  conn->param_list);
                if (!param) {
                if (kstrtou32(param->value, 0, &mbl) < 0)
                        return -1;
        } else {
 -              if (!conn->sess->sess_ops->DataSequenceInOrder)
 -                      return 0;
 -              if (!conn->sess->sess_ops->DataPDUInOrder)
 -                      return 0;
 -
                mbl = conn->sess->sess_ops->MaxBurstLength;
        }
  
        return 0;
  }
  
 +/*
 + * cxgbit_seq_pdu_inorder()
 + * @csk: pointer to cxgbit socket structure
 + *
 + * This function checks whether data sequence and data
 + * pdu are in order.
 + *
 + * Return: returns -1 on error, 0 if data sequence and
 + * data pdu are in order, 1 if data sequence or data pdu
 + * is not in order.
 + */
 +static int cxgbit_seq_pdu_inorder(struct cxgbit_sock *csk)
 +{
 +      struct iscsi_conn *conn = csk->conn;
 +      struct iscsi_param *param;
 +
 +      if (conn->login->leading_connection) {
 +              param = iscsi_find_param_from_key(DATASEQUENCEINORDER,
 +                                                conn->param_list);
 +              if (!param) {
 +                      pr_err("param not found key %s\n", DATASEQUENCEINORDER);
 +                      return -1;
 +              }
 +
 +              if (strcmp(param->value, YES))
 +                      return 1;
 +
 +              param = iscsi_find_param_from_key(DATAPDUINORDER,
 +                                                conn->param_list);
 +              if (!param) {
 +                      pr_err("param not found key %s\n", DATAPDUINORDER);
 +                      return -1;
 +              }
 +
 +              if (strcmp(param->value, YES))
 +                      return 1;
 +
 +      } else {
 +              if (!conn->sess->sess_ops->DataSequenceInOrder)
 +                      return 1;
 +              if (!conn->sess->sess_ops->DataPDUInOrder)
 +                      return 1;
 +      }
 +
 +      return 0;
 +}
 +
  static int cxgbit_set_params(struct iscsi_conn *conn)
  {
        struct cxgbit_sock *csk = conn->context;
        }
  
        if (!erl) {
 +              int ret;
 +
 +              ret = cxgbit_seq_pdu_inorder(csk);
 +              if (ret < 0) {
 +                      return -1;
 +              } else if (ret > 0) {
 +                      if (is_t5(cdev->lldi.adapter_type))
 +                              goto enable_ddp;
 +                      else
 +                              goto enable_digest;
 +              }
 +
                if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) {
                        if (cxgbit_set_iso_npdu(csk))
                                return -1;
                }
  
 +enable_ddp:
                if (test_bit(CDEV_DDP_ENABLE, &cdev->flags)) {
                        if (cxgbit_setup_conn_pgidx(csk,
                                                    ppm->tformat.pgsz_idx_dflt))
                }
        }
  
 +enable_digest:
        if (cxgbit_set_digest(csk))
                return -1;
  
@@@ -1021,36 -985,11 +1023,36 @@@ static int cxgbit_handle_iscsi_dataout(
        int rc, sg_nents, sg_off;
        bool dcrc_err = false;
  
 -      rc = iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, &cmd);
 -      if (rc < 0)
 -              return rc;
 -      else if (!cmd)
 -              return 0;
 +      if (pdu_cb->flags & PDUCBF_RX_DDP_CMP) {
 +              u32 offset = be32_to_cpu(hdr->offset);
 +              u32 ddp_data_len;
 +              u32 payload_length = ntoh24(hdr->dlength);
 +              bool success = false;
 +
 +              cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, 0);
 +              if (!cmd)
 +                      return 0;
 +
 +              ddp_data_len = offset - cmd->write_data_done;
 +              atomic_long_add(ddp_data_len, &conn->sess->rx_data_octets);
 +
 +              cmd->write_data_done = offset;
 +              cmd->next_burst_len = ddp_data_len;
 +              cmd->data_sn = be32_to_cpu(hdr->datasn);
 +
 +              rc = __iscsit_check_dataout_hdr(conn, (unsigned char *)hdr,
 +                                              cmd, payload_length, &success);
 +              if (rc < 0)
 +                      return rc;
 +              else if (!success)
 +                      return 0;
 +      } else {
 +              rc = iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, &cmd);
 +              if (rc < 0)
 +                      return rc;
 +              else if (!cmd)
 +                      return 0;
 +      }
  
        if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
                pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
@@@ -1414,9 -1353,6 +1416,9 @@@ static void cxgbit_lro_hskb_reset(struc
        for (i = 0; i < ssi->nr_frags; i++)
                put_page(skb_frag_page(&ssi->frags[i]));
        ssi->nr_frags = 0;
 +      skb->data_len = 0;
 +      skb->truesize -= skb->len;
 +      skb->len = 0;
  }
  
  static void
@@@ -1430,42 -1366,39 +1432,42 @@@ cxgbit_lro_skb_merge(struct cxgbit_soc
        unsigned int len = 0;
  
        if (pdu_cb->flags & PDUCBF_RX_HDR) {
 -              hpdu_cb->flags = pdu_cb->flags;
 +              u8 hfrag_idx = hssi->nr_frags;
 +
 +              hpdu_cb->flags |= pdu_cb->flags;
                hpdu_cb->seq = pdu_cb->seq;
                hpdu_cb->hdr = pdu_cb->hdr;
                hpdu_cb->hlen = pdu_cb->hlen;
  
 -              memcpy(&hssi->frags[0], &ssi->frags[pdu_cb->hfrag_idx],
 +              memcpy(&hssi->frags[hfrag_idx], &ssi->frags[pdu_cb->hfrag_idx],
                       sizeof(skb_frag_t));
  
 -              get_page(skb_frag_page(&hssi->frags[0]));
 -              hssi->nr_frags = 1;
 -              hpdu_cb->frags = 1;
 -              hpdu_cb->hfrag_idx = 0;
 +              get_page(skb_frag_page(&hssi->frags[hfrag_idx]));
 +              hssi->nr_frags++;
 +              hpdu_cb->frags++;
 +              hpdu_cb->hfrag_idx = hfrag_idx;
  
 -              len = hssi->frags[0].size;
 -              hskb->len = len;
 -              hskb->data_len = len;
 -              hskb->truesize = len;
 +              len = hssi->frags[hfrag_idx].size;
 +              hskb->len += len;
 +              hskb->data_len += len;
 +              hskb->truesize += len;
        }
  
        if (pdu_cb->flags & PDUCBF_RX_DATA) {
 -              u8 hfrag_idx = 1, i;
 +              u8 dfrag_idx = hssi->nr_frags, i;
  
                hpdu_cb->flags |= pdu_cb->flags;
 +              hpdu_cb->dfrag_idx = dfrag_idx;
  
                len = 0;
 -              for (i = 0; i < pdu_cb->nr_dfrags; hfrag_idx++, i++) {
 -                      memcpy(&hssi->frags[hfrag_idx],
 +              for (i = 0; i < pdu_cb->nr_dfrags; dfrag_idx++, i++) {
 +                      memcpy(&hssi->frags[dfrag_idx],
                               &ssi->frags[pdu_cb->dfrag_idx + i],
                               sizeof(skb_frag_t));
  
 -                      get_page(skb_frag_page(&hssi->frags[hfrag_idx]));
 +                      get_page(skb_frag_page(&hssi->frags[dfrag_idx]));
  
 -                      len += hssi->frags[hfrag_idx].size;
 +                      len += hssi->frags[dfrag_idx].size;
  
                        hssi->nr_frags++;
                        hpdu_cb->frags++;
                hpdu_cb->dlen = pdu_cb->dlen;
                hpdu_cb->doffset = hpdu_cb->hlen;
                hpdu_cb->nr_dfrags = pdu_cb->nr_dfrags;
 -              hpdu_cb->dfrag_idx = 1;
                hskb->len += len;
                hskb->data_len += len;
                hskb->truesize += len;
@@@ -1558,15 -1492,10 +1560,15 @@@ static int cxgbit_rx_lro_skb(struct cxg
  
  static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
  {
 +      struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
        int ret = -1;
  
 -      if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO))
 -              ret = cxgbit_rx_lro_skb(csk, skb);
 +      if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO)) {
 +              if (is_t5(lldi->adapter_type))
 +                      ret = cxgbit_rx_lro_skb(csk, skb);
 +              else
 +                      ret = cxgbit_process_lro_skb(csk, skb);
 +      }
  
        __kfree_skb(skb);
        return ret;
index 2285988c209b00ba1f845c8c7afff52ca1fb9825,fa1d578d56bdabd1abc0f9de7e81167c9c942f55..a91802432f2f47d1b163ba9f8e2da90dabe28e62
@@@ -24,6 -24,7 +24,7 @@@
  #include <linux/vmalloc.h>
  #include <linux/idr.h>
  #include <linux/delay.h>
+ #include <linux/sched/signal.h>
  #include <asm/unaligned.h>
  #include <net/ipv6.h>
  #include <scsi/scsi_proto.h>
@@@ -1431,17 -1432,36 +1432,17 @@@ static void iscsit_do_crypto_hash_buf
  }
  
  int
 -iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
 -                        struct iscsi_cmd **out_cmd)
 +__iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
 +                         struct iscsi_cmd *cmd, u32 payload_length,
 +                         bool *success)
  {
 -      struct iscsi_data *hdr = (struct iscsi_data *)buf;
 -      struct iscsi_cmd *cmd = NULL;
 +      struct iscsi_data *hdr = buf;
        struct se_cmd *se_cmd;
 -      u32 payload_length = ntoh24(hdr->dlength);
        int rc;
  
 -      if (!payload_length) {
 -              pr_warn("DataOUT payload is ZERO, ignoring.\n");
 -              return 0;
 -      }
 -
        /* iSCSI write */
        atomic_long_add(payload_length, &conn->sess->rx_data_octets);
  
 -      if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
 -              pr_err("DataSegmentLength: %u is greater than"
 -                      " MaxXmitDataSegmentLength: %u\n", payload_length,
 -                      conn->conn_ops->MaxXmitDataSegmentLength);
 -              return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
 -                                       buf);
 -      }
 -
 -      cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt,
 -                      payload_length);
 -      if (!cmd)
 -              return 0;
 -
        pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x,"
                " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
                hdr->itt, hdr->ttt, hdr->datasn, ntohl(hdr->offset),
                }
        }
        /*
 -       * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and
 +       * Perform DataSN, DataSequenceInOrder, DataPDUInOrder, and
         * within-command recovery checks before receiving the payload.
         */
        rc = iscsit_check_pre_dataout(cmd, buf);
                return 0;
        else if (rc == DATAOUT_CANNOT_RECOVER)
                return -1;
 -
 -      *out_cmd = cmd;
 +      *success = true;
        return 0;
  }
 +EXPORT_SYMBOL(__iscsit_check_dataout_hdr);
 +
 +int
 +iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
 +                       struct iscsi_cmd **out_cmd)
 +{
 +      struct iscsi_data *hdr = buf;
 +      struct iscsi_cmd *cmd;
 +      u32 payload_length = ntoh24(hdr->dlength);
 +      int rc;
 +      bool success = false;
 +
 +      if (!payload_length) {
 +              pr_warn_ratelimited("DataOUT payload is ZERO, ignoring.\n");
 +              return 0;
 +      }
 +
 +      if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
 +              pr_err_ratelimited("DataSegmentLength: %u is greater than"
 +                      " MaxXmitDataSegmentLength: %u\n", payload_length,
 +                      conn->conn_ops->MaxXmitDataSegmentLength);
 +              return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, buf);
 +      }
 +
 +      cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, payload_length);
 +      if (!cmd)
 +              return 0;
 +
 +      rc = __iscsit_check_dataout_hdr(conn, buf, cmd, payload_length, &success);
 +
 +      if (success)
 +              *out_cmd = cmd;
 +
 +      return rc;
 +}
  EXPORT_SYMBOL(iscsit_check_dataout_hdr);
  
  static int
        return ret;
  }
  
 +static enum tcm_tmreq_table iscsit_convert_tmf(u8 iscsi_tmf)
 +{
 +      switch (iscsi_tmf) {
 +      case ISCSI_TM_FUNC_ABORT_TASK:
 +              return TMR_ABORT_TASK;
 +      case ISCSI_TM_FUNC_ABORT_TASK_SET:
 +              return TMR_ABORT_TASK_SET;
 +      case ISCSI_TM_FUNC_CLEAR_ACA:
 +              return TMR_CLEAR_ACA;
 +      case ISCSI_TM_FUNC_CLEAR_TASK_SET:
 +              return TMR_CLEAR_TASK_SET;
 +      case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
 +              return TMR_LUN_RESET;
 +      case ISCSI_TM_FUNC_TARGET_WARM_RESET:
 +              return TMR_TARGET_WARM_RESET;
 +      case ISCSI_TM_FUNC_TARGET_COLD_RESET:
 +              return TMR_TARGET_COLD_RESET;
 +      default:
 +              return TMR_UNKNOWN;
 +      }
 +}
 +
  int
  iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                           unsigned char *buf)
        struct iscsi_tm *hdr;
        int out_of_order_cmdsn = 0, ret;
        bool sess_ref = false;
 -      u8 function;
 +      u8 function, tcm_function = TMR_UNKNOWN;
  
        hdr                     = (struct iscsi_tm *) buf;
        hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
         * LIO-Target $FABRIC_MOD
         */
        if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
 -
 -              u8 tcm_function;
 -              int ret;
 -
                transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
                                      conn->sess->se_sess, 0, DMA_NONE,
                                      TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
  
                target_get_sess_cmd(&cmd->se_cmd, true);
                sess_ref = true;
 -
 -              switch (function) {
 -              case ISCSI_TM_FUNC_ABORT_TASK:
 -                      tcm_function = TMR_ABORT_TASK;
 -                      break;
 -              case ISCSI_TM_FUNC_ABORT_TASK_SET:
 -                      tcm_function = TMR_ABORT_TASK_SET;
 -                      break;
 -              case ISCSI_TM_FUNC_CLEAR_ACA:
 -                      tcm_function = TMR_CLEAR_ACA;
 -                      break;
 -              case ISCSI_TM_FUNC_CLEAR_TASK_SET:
 -                      tcm_function = TMR_CLEAR_TASK_SET;
 -                      break;
 -              case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
 -                      tcm_function = TMR_LUN_RESET;
 -                      break;
 -              case ISCSI_TM_FUNC_TARGET_WARM_RESET:
 -                      tcm_function = TMR_TARGET_WARM_RESET;
 -                      break;
 -              case ISCSI_TM_FUNC_TARGET_COLD_RESET:
 -                      tcm_function = TMR_TARGET_COLD_RESET;
 -                      break;
 -              default:
 +              tcm_function = iscsit_convert_tmf(function);
 +              if (tcm_function == TMR_UNKNOWN) {
                        pr_err("Unknown iSCSI TMR Function:"
                               " 0x%02x\n", function);
                        return iscsit_add_reject_cmd(cmd,
                                ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
                }
 -
 -              ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req,
 -                                       tcm_function, GFP_KERNEL);
 -              if (ret < 0)
 -                      return iscsit_add_reject_cmd(cmd,
 +      }
 +      ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function,
 +                               GFP_KERNEL);
 +      if (ret < 0)
 +              return iscsit_add_reject_cmd(cmd,
                                ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
  
 -              cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
 -      }
 +      cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
  
        cmd->iscsi_opcode       = ISCSI_OP_SCSI_TMFUNC;
        cmd->i_state            = ISTATE_SEND_TASKMGTRSP;
@@@ -4146,7 -4137,7 +4147,7 @@@ int iscsit_close_connection
        /*
         * During Connection recovery drop unacknowledged out of order
         * commands for this connection, and prepare the other commands
 -       * for realligence.
 +       * for reallegiance.
         *
         * During normal operation clear the out of order commands (but
         * do not free the struct iscsi_ooo_cmdsn's) and release all
         */
        if (atomic_read(&conn->connection_recovery)) {
                iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn);
 -              iscsit_prepare_cmds_for_realligance(conn);
 +              iscsit_prepare_cmds_for_reallegiance(conn);
        } else {
                iscsit_clear_ooo_cmdsns_for_conn(conn);
                iscsit_release_commands_from_conn(conn);
index a8bcbc43b047c80764de949e1b20591c46f8568c,a4d5e6749932b21ee2588aea814aef7828337357..9a96e17bf7cd5f7448c880ffafcaa123730ebe71
@@@ -17,6 -17,8 +17,8 @@@
   * GNU General Public License for more details.
   ******************************************************************************/
  
+ #include <linux/sched/signal.h>
  #include <scsi/iscsi_proto.h>
  #include <target/target_core_base.h>
  #include <target/target_core_fabric.h>
@@@ -44,8 -46,10 +46,8 @@@ void iscsit_set_dataout_sequence_values
         */
        if (cmd->unsolicited_data) {
                cmd->seq_start_offset = cmd->write_data_done;
 -              cmd->seq_end_offset = (cmd->write_data_done +
 -                      ((cmd->se_cmd.data_length >
 -                        conn->sess->sess_ops->FirstBurstLength) ?
 -                       conn->sess->sess_ops->FirstBurstLength : cmd->se_cmd.data_length));
 +              cmd->seq_end_offset = min(cmd->se_cmd.data_length,
 +                                      conn->sess->sess_ops->FirstBurstLength);
                return;
        }
  
index 746b97f8e4f74fc25d2d497c45dfadd0920b8164,b03cc03423c16b891ccc60b9897701a928391901..ad8f3011bdc2f1ba901395fbd40abe8353cc753f
@@@ -20,6 -20,7 +20,7 @@@
  #include <linux/module.h>
  #include <linux/string.h>
  #include <linux/kthread.h>
+ #include <linux/sched/signal.h>
  #include <linux/idr.h>
  #include <linux/tcp.h>        /* TCP_NODELAY */
  #include <net/ipv6.h>         /* ipv6_addr_v4mapped() */
@@@ -223,7 -224,7 +224,7 @@@ int iscsi_check_for_session_reinstateme
                return 0;
  
        pr_debug("%s iSCSI Session SID %u is still active for %s,"
 -              " preforming session reinstatement.\n", (sessiontype) ?
 +              " performing session reinstatement.\n", (sessiontype) ?
                "Discovery" : "Normal", sess->sid,
                sess->sess_ops->InitiatorName);
  
index 5269e9ef031ca09c7638862e5d4f293e0355be87,29eb09e0cd15370fc59774cebb7450bac690a476..7ccc9c1cbfd1a664fb4c37a5dd71f305e735f4bb
@@@ -19,6 -19,7 +19,7 @@@
  #include <linux/ctype.h>
  #include <linux/kthread.h>
  #include <linux/slab.h>
+ #include <linux/sched/signal.h>
  #include <net/sock.h>
  #include <scsi/iscsi_proto.h>
  #include <target/target_core_base.h>
@@@ -1249,16 -1250,16 +1250,16 @@@ int iscsi_target_start_negotiation
  {
        int ret;
  
 -       if (conn->sock) {
 -               struct sock *sk = conn->sock->sk;
 +      if (conn->sock) {
 +              struct sock *sk = conn->sock->sk;
  
 -               write_lock_bh(&sk->sk_callback_lock);
 -               set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
 -               write_unlock_bh(&sk->sk_callback_lock);
 -       }
 +              write_lock_bh(&sk->sk_callback_lock);
 +              set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
 +              write_unlock_bh(&sk->sk_callback_lock);
 +      }
  
 -       ret = iscsi_target_do_login(conn, login);
 -       if (ret < 0) {
 +      ret = iscsi_target_do_login(conn, login);
 +      if (ret < 0) {
                cancel_delayed_work_sync(&conn->login_work);
                cancel_delayed_work_sync(&conn->login_cleanup_work);
                iscsi_target_restore_sock_callbacks(conn);
diff --combined drivers/vhost/vhost.c
index 9469364eefd73ef59fdfde46019ac6fdd1ce5538,dcbe2e29bf1704cdb8a20b3ccbe30fc9a4e26ac0..f0ba362d4c101aa970a12943d1e69944d54058b7
@@@ -27,6 -27,8 +27,8 @@@
  #include <linux/cgroup.h>
  #include <linux/module.h>
  #include <linux/sort.h>
+ #include <linux/sched/mm.h>
+ #include <linux/sched/signal.h>
  #include <linux/interval_tree_generic.h>
  
  #include "vhost.h"
@@@ -282,22 -284,6 +284,22 @@@ void vhost_poll_queue(struct vhost_pol
  }
  EXPORT_SYMBOL_GPL(vhost_poll_queue);
  
 +static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq)
 +{
 +      int j;
 +
 +      for (j = 0; j < VHOST_NUM_ADDRS; j++)
 +              vq->meta_iotlb[j] = NULL;
 +}
 +
 +static void vhost_vq_meta_reset(struct vhost_dev *d)
 +{
 +      int i;
 +
 +      for (i = 0; i < d->nvqs; ++i)
 +              __vhost_vq_meta_reset(d->vqs[i]);
 +}
 +
  static void vhost_vq_reset(struct vhost_dev *dev,
                           struct vhost_virtqueue *vq)
  {
        vq->busyloop_timeout = 0;
        vq->umem = NULL;
        vq->iotlb = NULL;
 +      __vhost_vq_meta_reset(vq);
  }
  
  static int vhost_worker(void *data)
@@@ -708,18 -693,6 +710,18 @@@ static int vq_memory_access_ok(void __u
        return 1;
  }
  
 +static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
 +                                             u64 addr, unsigned int size,
 +                                             int type)
 +{
 +      const struct vhost_umem_node *node = vq->meta_iotlb[type];
 +
 +      if (!node)
 +              return NULL;
 +
 +      return (void *)(uintptr_t)(node->userspace_addr + addr - node->start);
 +}
 +
  /* Can we switch to this memory table? */
  /* Caller should have device mutex but not vq mutex */
  static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
@@@ -762,14 -735,8 +764,14 @@@ static int vhost_copy_to_user(struct vh
                 * could be access through iotlb. So -EAGAIN should
                 * not happen in this case.
                 */
 -              /* TODO: more fast path */
                struct iov_iter t;
 +              void __user *uaddr = vhost_vq_meta_fetch(vq,
 +                                   (u64)(uintptr_t)to, size,
 +                                   VHOST_ADDR_DESC);
 +
 +              if (uaddr)
 +                      return __copy_to_user(uaddr, from, size);
 +
                ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
                                     ARRAY_SIZE(vq->iotlb_iov),
                                     VHOST_ACCESS_WO);
@@@ -797,14 -764,8 +799,14 @@@ static int vhost_copy_from_user(struct 
                 * could be access through iotlb. So -EAGAIN should
                 * not happen in this case.
                 */
 -              /* TODO: more fast path */
 +              void __user *uaddr = vhost_vq_meta_fetch(vq,
 +                                   (u64)(uintptr_t)from, size,
 +                                   VHOST_ADDR_DESC);
                struct iov_iter f;
 +
 +              if (uaddr)
 +                      return __copy_from_user(to, uaddr, size);
 +
                ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
                                     ARRAY_SIZE(vq->iotlb_iov),
                                     VHOST_ACCESS_RO);
@@@ -824,12 -785,17 +826,12 @@@ out
        return ret;
  }
  
 -static void __user *__vhost_get_user(struct vhost_virtqueue *vq,
 -                                   void __user *addr, unsigned size)
 +static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
 +                                        void __user *addr, unsigned int size,
 +                                        int type)
  {
        int ret;
  
 -      /* This function should be called after iotlb
 -       * prefetch, which means we're sure that vq
 -       * could be access through iotlb. So -EAGAIN should
 -       * not happen in this case.
 -       */
 -      /* TODO: more fast path */
        ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
                             ARRAY_SIZE(vq->iotlb_iov),
                             VHOST_ACCESS_RO);
        return vq->iotlb_iov[0].iov_base;
  }
  
 -#define vhost_put_user(vq, x, ptr) \
 +/* This function should be called after iotlb
 + * prefetch, which means we're sure that vq
 + * could be access through iotlb. So -EAGAIN should
 + * not happen in this case.
 + */
 +static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
 +                                          void *addr, unsigned int size,
 +                                          int type)
 +{
 +      void __user *uaddr = vhost_vq_meta_fetch(vq,
 +                           (u64)(uintptr_t)addr, size, type);
 +      if (uaddr)
 +              return uaddr;
 +
 +      return __vhost_get_user_slow(vq, addr, size, type);
 +}
 +
 +#define vhost_put_user(vq, x, ptr)            \
  ({ \
        int ret = -EFAULT; \
        if (!vq->iotlb) { \
                ret = __put_user(x, ptr); \
        } else { \
                __typeof__(ptr) to = \
 -                      (__typeof__(ptr)) __vhost_get_user(vq, ptr, sizeof(*ptr)); \
 +                      (__typeof__(ptr)) __vhost_get_user(vq, ptr,     \
 +                                        sizeof(*ptr), VHOST_ADDR_USED); \
                if (to != NULL) \
                        ret = __put_user(x, to); \
                else \
        ret; \
  })
  
 -#define vhost_get_user(vq, x, ptr) \
 +#define vhost_get_user(vq, x, ptr, type)              \
  ({ \
        int ret; \
        if (!vq->iotlb) { \
                ret = __get_user(x, ptr); \
        } else { \
                __typeof__(ptr) from = \
 -                      (__typeof__(ptr)) __vhost_get_user(vq, ptr, sizeof(*ptr)); \
 +                      (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
 +                                                         sizeof(*ptr), \
 +                                                         type); \
                if (from != NULL) \
                        ret = __get_user(x, from); \
                else \
        ret; \
  })
  
 +#define vhost_get_avail(vq, x, ptr) \
 +      vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
 +
 +#define vhost_get_used(vq, x, ptr) \
 +      vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
 +
  static void vhost_dev_lock_vqs(struct vhost_dev *d)
  {
        int i = 0;
@@@ -1013,7 -953,6 +1015,7 @@@ static int vhost_process_iotlb_msg(stru
                        ret = -EFAULT;
                        break;
                }
 +              vhost_vq_meta_reset(dev);
                if (vhost_new_umem_range(dev->iotlb, msg->iova, msg->size,
                                         msg->iova + msg->size - 1,
                                         msg->uaddr, msg->perm)) {
                vhost_iotlb_notify_vq(dev, msg);
                break;
        case VHOST_IOTLB_INVALIDATE:
 +              vhost_vq_meta_reset(dev);
                vhost_del_umem_range(dev->iotlb, msg->iova,
                                     msg->iova + msg->size - 1);
                break;
@@@ -1167,26 -1105,12 +1169,26 @@@ static int vq_access_ok(struct vhost_vi
                        sizeof *used + num * sizeof *used->ring + s);
  }
  
 +static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
 +                               const struct vhost_umem_node *node,
 +                               int type)
 +{
 +      int access = (type == VHOST_ADDR_USED) ?
 +                   VHOST_ACCESS_WO : VHOST_ACCESS_RO;
 +
 +      if (likely(node->perm & access))
 +              vq->meta_iotlb[type] = node;
 +}
 +
  static int iotlb_access_ok(struct vhost_virtqueue *vq,
 -                         int access, u64 addr, u64 len)
 +                         int access, u64 addr, u64 len, int type)
  {
        const struct vhost_umem_node *node;
        struct vhost_umem *umem = vq->iotlb;
 -      u64 s = 0, size;
 +      u64 s = 0, size, orig_addr = addr;
 +
 +      if (vhost_vq_meta_fetch(vq, addr, len, type))
 +              return true;
  
        while (len > s) {
                node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
                }
  
                size = node->size - addr + node->start;
 +
 +              if (orig_addr == addr && size >= len)
 +                      vhost_vq_meta_update(vq, node, type);
 +
                s += size;
                addr += size;
        }
@@@ -1223,15 -1143,13 +1225,15 @@@ int vq_iotlb_prefetch(struct vhost_virt
                return 1;
  
        return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc,
 -                             num * sizeof *vq->desc) &&
 +                             num * sizeof(*vq->desc), VHOST_ADDR_DESC) &&
               iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail,
                               sizeof *vq->avail +
 -                             num * sizeof *vq->avail->ring + s) &&
 +                             num * sizeof(*vq->avail->ring) + s,
 +                             VHOST_ADDR_AVAIL) &&
               iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->used,
                               sizeof *vq->used +
 -                             num * sizeof *vq->used->ring + s);
 +                             num * sizeof(*vq->used->ring) + s,
 +                             VHOST_ADDR_USED);
  }
  EXPORT_SYMBOL_GPL(vq_iotlb_prefetch);
  
@@@ -1812,7 -1730,7 +1814,7 @@@ int vhost_vq_init_access(struct vhost_v
                r = -EFAULT;
                goto err;
        }
 -      r = vhost_get_user(vq, last_used_idx, &vq->used->idx);
 +      r = vhost_get_used(vq, last_used_idx, &vq->used->idx);
        if (r) {
                vq_err(vq, "Can't access used idx at %p\n",
                       &vq->used->idx);
@@@ -2014,36 -1932,29 +2016,36 @@@ int vhost_get_vq_desc(struct vhost_virt
  
        /* Check it isn't doing very strange things with descriptor numbers. */
        last_avail_idx = vq->last_avail_idx;
 -      if (unlikely(vhost_get_user(vq, avail_idx, &vq->avail->idx))) {
 -              vq_err(vq, "Failed to access avail idx at %p\n",
 -                     &vq->avail->idx);
 -              return -EFAULT;
 -      }
 -      vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
  
 -      if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
 -              vq_err(vq, "Guest moved used index from %u to %u",
 -                     last_avail_idx, vq->avail_idx);
 -              return -EFAULT;
 -      }
 +      if (vq->avail_idx == vq->last_avail_idx) {
 +              if (unlikely(vhost_get_avail(vq, avail_idx, &vq->avail->idx))) {
 +                      vq_err(vq, "Failed to access avail idx at %p\n",
 +                              &vq->avail->idx);
 +                      return -EFAULT;
 +              }
 +              vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
  
 -      /* If there's nothing new since last we looked, return invalid. */
 -      if (vq->avail_idx == last_avail_idx)
 -              return vq->num;
 +              if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
 +                      vq_err(vq, "Guest moved used index from %u to %u",
 +                              last_avail_idx, vq->avail_idx);
 +                      return -EFAULT;
 +              }
 +
 +              /* If there's nothing new since last we looked, return
 +               * invalid.
 +               */
 +              if (vq->avail_idx == last_avail_idx)
 +                      return vq->num;
  
 -      /* Only get avail ring entries after they have been exposed by guest. */
 -      smp_rmb();
 +              /* Only get avail ring entries after they have been
 +               * exposed by guest.
 +               */
 +              smp_rmb();
 +      }
  
        /* Grab the next descriptor number they're advertising, and increment
         * the index we've seen. */
 -      if (unlikely(vhost_get_user(vq, ring_head,
 +      if (unlikely(vhost_get_avail(vq, ring_head,
                     &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) {
                vq_err(vq, "Failed to read head: idx %d address %p\n",
                       last_avail_idx,
@@@ -2259,7 -2170,7 +2261,7 @@@ static bool vhost_notify(struct vhost_d
                 * with the barrier that the Guest executes when enabling
                 * interrupts. */
                smp_mb();
 -              if (vhost_get_user(vq, flags, &vq->avail->flags)) {
 +              if (vhost_get_avail(vq, flags, &vq->avail->flags)) {
                        vq_err(vq, "Failed to get flags");
                        return true;
                }
         * interrupts. */
        smp_mb();
  
 -      if (vhost_get_user(vq, event, vhost_used_event(vq))) {
 +      if (vhost_get_avail(vq, event, vhost_used_event(vq))) {
                vq_err(vq, "Failed to get used event idx");
                return true;
        }
@@@ -2333,7 -2244,7 +2335,7 @@@ bool vhost_vq_avail_empty(struct vhost_
        if (vq->avail_idx != vq->last_avail_idx)
                return false;
  
 -      r = vhost_get_user(vq, avail_idx, &vq->avail->idx);
 +      r = vhost_get_avail(vq, avail_idx, &vq->avail->idx);
        if (unlikely(r))
                return false;
        vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
@@@ -2369,7 -2280,7 +2371,7 @@@ bool vhost_enable_notify(struct vhost_d
        /* They could have slipped one in as we were doing that: make
         * sure it's written, then check again. */
        smp_mb();
 -      r = vhost_get_user(vq, avail_idx, &vq->avail->idx);
 +      r = vhost_get_avail(vq, avail_idx, &vq->avail->idx);
        if (r) {
                vq_err(vq, "Failed to check avail idx at %p: %d\n",
                       &vq->avail->idx, r);
index a2a4386d9836106c81b6bb908ddaf5229226eb08,a610061fabf6edcd48f245de6bc700605df27a71..4e1191508228cd86f6c3ee8174f4320c89e14686
@@@ -31,6 -31,7 +31,7 @@@
  #include <linux/wait.h>
  #include <linux/mm.h>
  #include <linux/mount.h>
+ #include <linux/magic.h>
  
  /*
   * Balloon device works in 4K page units.  So each page is pointed to by
@@@ -413,8 -414,7 +414,8 @@@ static int init_vqs(struct virtio_ballo
         * optionally stat.
         */
        nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2;
 -      err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names);
 +      err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names,
 +                      NULL);
        if (err)
                return err;
  
diff --combined fs/afs/rxrpc.c
index f3c1b40eb11fbab7744c173043de0f08ba4c98d7,4f6b00efc27c2ddf71894dae00509c9cf21e7c19..419ef05dcb5ec7149a3a0b5de657c75bbc6eabb4
@@@ -10,6 -10,8 +10,8 @@@
   */
  
  #include <linux/slab.h>
+ #include <linux/sched/signal.h>
  #include <net/sock.h>
  #include <net/af_rxrpc.h>
  #include <rxrpc/packet.h>
@@@ -260,7 -262,8 +262,7 @@@ void afs_flat_call_destructor(struct af
  /*
   * attach the data from a bunch of pages on an inode to a call
   */
 -static int afs_send_pages(struct afs_call *call, struct msghdr *msg,
 -                        struct kvec *iov)
 +static int afs_send_pages(struct afs_call *call, struct msghdr *msg)
  {
        struct page *pages[8];
        unsigned count, n, loop, offset, to;
  
                loop = 0;
                do {
 +                      struct bio_vec bvec = {.bv_page = pages[loop],
 +                                             .bv_offset = offset};
                        msg->msg_flags = 0;
                        to = PAGE_SIZE;
                        if (first + loop >= last)
                                to = call->last_to;
                        else
                                msg->msg_flags = MSG_MORE;
 -                      iov->iov_base = kmap(pages[loop]) + offset;
 -                      iov->iov_len = to - offset;
 +                      bvec.bv_len = to - offset;
                        offset = 0;
  
                        _debug("- range %u-%u%s",
                               offset, to, msg->msg_flags ? " [more]" : "");
 -                      iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC,
 -                                    iov, 1, to - offset);
 +                      iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC,
 +                                    &bvec, 1, to - offset);
  
                        /* have to change the state *before* sending the last
                         * packet as RxRPC might give us the reply before it
                                call->state = AFS_CALL_AWAIT_REPLY;
                        ret = rxrpc_kernel_send_data(afs_socket, call->rxcall,
                                                     msg, to - offset);
 -                      kunmap(pages[loop]);
                        if (ret < 0)
                                break;
                } while (++loop < count);
@@@ -390,7 -393,7 +392,7 @@@ int afs_make_call(struct in_addr *addr
                goto error_do_abort;
  
        if (call->send_pages) {
 -              ret = afs_send_pages(call, &msg, iov);
 +              ret = afs_send_pages(call, &msg);
                if (ret < 0)
                        goto error_do_abort;
        }
diff --combined fs/aio.c
index 7ec41aee7098f090fc82a3f024acfd9141122085,0bb108476de2828fd2888314d9c755ecf457860d..f52d925ee2599df6b3e0d71b0a332f20bb97d1d5
+++ b/fs/aio.c
@@@ -20,7 -20,7 +20,7 @@@
  #include <linux/backing-dev.h>
  #include <linux/uio.h>
  
- #include <linux/sched.h>
+ #include <linux/sched/signal.h>
  #include <linux/fs.h>
  #include <linux/file.h>
  #include <linux/mm.h>
@@@ -1495,7 -1495,7 +1495,7 @@@ static ssize_t aio_read(struct kiocb *r
                return ret;
        ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
        if (!ret)
 -              ret = aio_ret(req, file->f_op->read_iter(req, &iter));
 +              ret = aio_ret(req, call_read_iter(file, req, &iter));
        kfree(iovec);
        return ret;
  }
@@@ -1520,7 -1520,7 +1520,7 @@@ static ssize_t aio_write(struct kiocb *
        if (!ret) {
                req->ki_flags |= IOCB_WRITE;
                file_start_write(file);
 -              ret = aio_ret(req, file->f_op->write_iter(req, &iter));
 +              ret = aio_ret(req, call_write_iter(file, req, &iter));
                /*
                 * We release freeze protection in aio_complete().  Fool lockdep
                 * by telling it the lock got released so that it doesn't
diff --combined fs/btrfs/ctree.h
index 00e3518a26ef9caa1a0f379f55d3714ec21be1c5,a8812d95359dc87fc2cf30e52a09e43425ff03db..29b7fc28c607232987cc3b28fbe9a92e0f766df7
@@@ -20,6 -20,7 +20,7 @@@
  #define __BTRFS_CTREE__
  
  #include <linux/mm.h>
+ #include <linux/sched/signal.h>
  #include <linux/highmem.h>
  #include <linux/fs.h>
  #include <linux/rwsem.h>
@@@ -2687,7 -2688,7 +2688,7 @@@ enum btrfs_flush_state 
  };
  
  int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len);
 -int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes);
 +int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes);
  void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len);
  void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
                                            u64 len);
@@@ -2695,16 -2696,16 +2696,16 @@@ void btrfs_trans_release_metadata(struc
                                  struct btrfs_fs_info *fs_info);
  void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans);
  int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
 -                                struct inode *inode);
 -void btrfs_orphan_release_metadata(struct inode *inode);
 +                                struct btrfs_inode *inode);
 +void btrfs_orphan_release_metadata(struct btrfs_inode *inode);
  int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
                                     struct btrfs_block_rsv *rsv,
                                     int nitems,
                                     u64 *qgroup_reserved, bool use_global_rsv);
  void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
                                      struct btrfs_block_rsv *rsv);
 -int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes);
 -void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes);
 +int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes);
 +void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes);
  int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len);
  void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len);
  void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type);
@@@ -2982,7 -2983,7 +2983,7 @@@ int btrfs_check_dir_item_collision(stru
                          const char *name, int name_len);
  int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
                          struct btrfs_root *root, const char *name,
 -                        int name_len, struct inode *dir,
 +                        int name_len, struct btrfs_inode *dir,
                          struct btrfs_key *location, u8 type, u64 index);
  struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
                                             struct btrfs_root *root,
@@@ -3081,7 -3082,7 +3082,7 @@@ int btrfs_csum_one_bio(struct inode *in
                       u64 file_start, int contig);
  int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
                             struct list_head *list, int search_commit);
 -void btrfs_extent_item_to_extent_map(struct inode *inode,
 +void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
                                     const struct btrfs_path *path,
                                     struct btrfs_file_extent_item *fi,
                                     const bool new_inline,
@@@ -3100,9 -3101,9 +3101,9 @@@ struct btrfs_delalloc_work *btrfs_alloc
                                                    int delay_iput);
  void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work);
  
 -struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
 -                                         size_t pg_offset, u64 start, u64 len,
 -                                         int create);
 +struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
 +              struct page *page, size_t pg_offset, u64 start,
 +              u64 len, int create);
  noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
                              u64 *orig_start, u64 *orig_block_len,
                              u64 *ram_bytes);
@@@ -3123,13 -3124,13 +3124,13 @@@ static inline void btrfs_force_ra(struc
  }
  
  struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
 -int btrfs_set_inode_index(struct inode *dir, u64 *index);
 +int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index);
  int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
                       struct btrfs_root *root,
                       struct btrfs_inode *dir, struct btrfs_inode *inode,
                       const char *name, int name_len);
  int btrfs_add_link(struct btrfs_trans_handle *trans,
 -                 struct inode *parent_inode, struct inode *inode,
 +                 struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
                   const char *name, int name_len, int add_backref, u64 index);
  int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
                        struct btrfs_root *root,
@@@ -3166,16 -3167,15 +3167,16 @@@ void btrfs_destroy_cachep(void)
  long btrfs_ioctl_trans_end(struct file *file);
  struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
                         struct btrfs_root *root, int *was_new);
 -struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
 -                                  size_t pg_offset, u64 start, u64 end,
 -                                  int create);
 +struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
 +              struct page *page, size_t pg_offset,
 +              u64 start, u64 end, int create);
  int btrfs_update_inode(struct btrfs_trans_handle *trans,
                              struct btrfs_root *root,
                              struct inode *inode);
  int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root, struct inode *inode);
 -int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode);
 +int btrfs_orphan_add(struct btrfs_trans_handle *trans,
 +              struct btrfs_inode *inode);
  int btrfs_orphan_cleanup(struct btrfs_root *root);
  void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
                              struct btrfs_root *root);
@@@ -3216,11 -3216,11 +3217,11 @@@ ssize_t btrfs_dedupe_file_range(struct 
  int btrfs_auto_defrag_init(void);
  void btrfs_auto_defrag_exit(void);
  int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
 -                         struct inode *inode);
 +                         struct btrfs_inode *inode);
  int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info);
  void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info);
  int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
 -void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
 +void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
                             int skip_pinned);
  extern const struct file_operations btrfs_file_operations;
  int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
@@@ -3234,7 -3234,7 +3235,7 @@@ int btrfs_drop_extents(struct btrfs_tra
                       struct btrfs_root *root, struct inode *inode, u64 start,
                       u64 end, int drop_cache);
  int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
 -                            struct inode *inode, u64 start, u64 end);
 +                            struct btrfs_inode *inode, u64 start, u64 end);
  int btrfs_release_file(struct inode *inode, struct file *file);
  int btrfs_dirty_pages(struct inode *inode, struct page **pages,
                      size_t num_pages, loff_t pos, size_t write_bytes,
diff --combined fs/btrfs/extent-tree.c
index 60794658ffd880d32f8921ba1286fa3e717c3be6,dad395b0b9fc8e9636febac0e7ebe49c0f581191..be5477676cc829e4efe89349fc9b7df540fd0dff
@@@ -16,6 -16,7 +16,7 @@@
   * Boston, MA 021110-1307, USA.
   */
  #include <linux/sched.h>
+ #include <linux/sched/signal.h>
  #include <linux/pagemap.h>
  #include <linux/writeback.h>
  #include <linux/blkdev.h>
@@@ -4135,10 -4136,10 +4136,10 @@@ static u64 btrfs_space_info_used(struc
                (may_use_included ? s_info->bytes_may_use : 0);
  }
  
 -int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
 +int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes)
  {
        struct btrfs_space_info *data_sinfo;
 -      struct btrfs_root *root = BTRFS_I(inode)->root;
 +      struct btrfs_root *root = inode->root;
        struct btrfs_fs_info *fs_info = root->fs_info;
        u64 used;
        int ret = 0;
@@@ -4281,7 -4282,7 +4282,7 @@@ int btrfs_check_data_free_space(struct 
              round_down(start, fs_info->sectorsize);
        start = round_down(start, fs_info->sectorsize);
  
 -      ret = btrfs_alloc_data_chunk_ondemand(inode, len);
 +      ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), len);
        if (ret < 0)
                return ret;
  
@@@ -5742,10 -5743,10 +5743,10 @@@ void btrfs_trans_release_chunk_metadata
  
  /* Can only return 0 or -ENOSPC */
  int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
 -                                struct inode *inode)
 +                                struct btrfs_inode *inode)
  {
 -      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 -      struct btrfs_root *root = BTRFS_I(inode)->root;
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
 +      struct btrfs_root *root = inode->root;
        /*
         * We always use trans->block_rsv here as we will have reserved space
         * for our orphan when starting the transaction, using get_block_rsv()
         */
        u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
  
 -      trace_btrfs_space_reservation(fs_info, "orphan",
 -                                    btrfs_ino(BTRFS_I(inode)), num_bytes, 1);
 +      trace_btrfs_space_reservation(fs_info, "orphan", btrfs_ino(inode), 
 +                      num_bytes, 1);
        return btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
  }
  
 -void btrfs_orphan_release_metadata(struct inode *inode)
 +void btrfs_orphan_release_metadata(struct btrfs_inode *inode)
  {
 -      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 -      struct btrfs_root *root = BTRFS_I(inode)->root;
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
 +      struct btrfs_root *root = inode->root;
        u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
  
 -      trace_btrfs_space_reservation(fs_info, "orphan",
 -                                    btrfs_ino(BTRFS_I(inode)), num_bytes, 0);
 +      trace_btrfs_space_reservation(fs_info, "orphan", btrfs_ino(inode),
 +                      num_bytes, 0);
        btrfs_block_rsv_release(fs_info, root->orphan_block_rsv, num_bytes);
  }
  
@@@ -5846,8 -5847,7 +5847,8 @@@ void btrfs_subvolume_release_metadata(s
   * reserved extents that need to be freed.  This must be called with
   * BTRFS_I(inode)->lock held.
   */
 -static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
 +static unsigned drop_outstanding_extent(struct btrfs_inode *inode,
 +              u64 num_bytes)
  {
        unsigned drop_inode_space = 0;
        unsigned dropped_extents = 0;
  
        num_extents = count_max_extents(num_bytes);
        ASSERT(num_extents);
 -      ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
 -      BTRFS_I(inode)->outstanding_extents -= num_extents;
 +      ASSERT(inode->outstanding_extents >= num_extents);
 +      inode->outstanding_extents -= num_extents;
  
 -      if (BTRFS_I(inode)->outstanding_extents == 0 &&
 +      if (inode->outstanding_extents == 0 &&
            test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
 -                             &BTRFS_I(inode)->runtime_flags))
 +                             &inode->runtime_flags))
                drop_inode_space = 1;
  
        /*
         * If we have more or the same amount of outstanding extents than we have
         * reserved then we need to leave the reserved extents count alone.
         */
 -      if (BTRFS_I(inode)->outstanding_extents >=
 -          BTRFS_I(inode)->reserved_extents)
 +      if (inode->outstanding_extents >= inode->reserved_extents)
                return drop_inode_space;
  
 -      dropped_extents = BTRFS_I(inode)->reserved_extents -
 -              BTRFS_I(inode)->outstanding_extents;
 -      BTRFS_I(inode)->reserved_extents -= dropped_extents;
 +      dropped_extents = inode->reserved_extents - inode->outstanding_extents;
 +      inode->reserved_extents -= dropped_extents;
        return dropped_extents + drop_inode_space;
  }
  
   *
   * This must be called with BTRFS_I(inode)->lock held.
   */
 -static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
 +static u64 calc_csum_metadata_size(struct btrfs_inode *inode, u64 num_bytes,
                                   int reserve)
  {
 -      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
        u64 old_csums, num_csums;
  
 -      if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
 -          BTRFS_I(inode)->csum_bytes == 0)
 +      if (inode->flags & BTRFS_INODE_NODATASUM && inode->csum_bytes == 0)
                return 0;
  
 -      old_csums = btrfs_csum_bytes_to_leaves(fs_info,
 -                                             BTRFS_I(inode)->csum_bytes);
 +      old_csums = btrfs_csum_bytes_to_leaves(fs_info, inode->csum_bytes);
        if (reserve)
 -              BTRFS_I(inode)->csum_bytes += num_bytes;
 +              inode->csum_bytes += num_bytes;
        else
 -              BTRFS_I(inode)->csum_bytes -= num_bytes;
 -      num_csums = btrfs_csum_bytes_to_leaves(fs_info,
 -                                             BTRFS_I(inode)->csum_bytes);
 +              inode->csum_bytes -= num_bytes;
 +      num_csums = btrfs_csum_bytes_to_leaves(fs_info, inode->csum_bytes);
  
        /* No change, no need to reserve more */
        if (old_csums == num_csums)
        return btrfs_calc_trans_metadata_size(fs_info, old_csums - num_csums);
  }
  
 -int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
 +int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
  {
 -      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 -      struct btrfs_root *root = BTRFS_I(inode)->root;
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
 +      struct btrfs_root *root = inode->root;
        struct btrfs_block_rsv *block_rsv = &fs_info->delalloc_block_rsv;
        u64 to_reserve = 0;
        u64 csum_bytes;
                schedule_timeout(1);
  
        if (delalloc_lock)
 -              mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
 +              mutex_lock(&inode->delalloc_mutex);
  
        num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
  
 -      spin_lock(&BTRFS_I(inode)->lock);
 +      spin_lock(&inode->lock);
        nr_extents = count_max_extents(num_bytes);
 -      BTRFS_I(inode)->outstanding_extents += nr_extents;
 +      inode->outstanding_extents += nr_extents;
  
        nr_extents = 0;
 -      if (BTRFS_I(inode)->outstanding_extents >
 -          BTRFS_I(inode)->reserved_extents)
 -              nr_extents += BTRFS_I(inode)->outstanding_extents -
 -                      BTRFS_I(inode)->reserved_extents;
 +      if (inode->outstanding_extents > inode->reserved_extents)
 +              nr_extents += inode->outstanding_extents -
 +                      inode->reserved_extents;
  
        /* We always want to reserve a slot for updating the inode. */
        to_reserve = btrfs_calc_trans_metadata_size(fs_info, nr_extents + 1);
        to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
 -      csum_bytes = BTRFS_I(inode)->csum_bytes;
 -      spin_unlock(&BTRFS_I(inode)->lock);
 +      csum_bytes = inode->csum_bytes;
 +      spin_unlock(&inode->lock);
  
        if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
                ret = btrfs_qgroup_reserve_meta(root,
                goto out_fail;
        }
  
 -      spin_lock(&BTRFS_I(inode)->lock);
 +      spin_lock(&inode->lock);
        if (test_and_set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
 -                           &BTRFS_I(inode)->runtime_flags)) {
 +                           &inode->runtime_flags)) {
                to_reserve -= btrfs_calc_trans_metadata_size(fs_info, 1);
                release_extra = true;
        }
 -      BTRFS_I(inode)->reserved_extents += nr_extents;
 -      spin_unlock(&BTRFS_I(inode)->lock);
 +      inode->reserved_extents += nr_extents;
 +      spin_unlock(&inode->lock);
  
        if (delalloc_lock)
 -              mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
 +              mutex_unlock(&inode->delalloc_mutex);
  
        if (to_reserve)
                trace_btrfs_space_reservation(fs_info, "delalloc",
 -                                    btrfs_ino(BTRFS_I(inode)), to_reserve, 1);
 +                                            btrfs_ino(inode), to_reserve, 1);
        if (release_extra)
                btrfs_block_rsv_release(fs_info, block_rsv,
                                btrfs_calc_trans_metadata_size(fs_info, 1));
        return 0;
  
  out_fail:
 -      spin_lock(&BTRFS_I(inode)->lock);
 +      spin_lock(&inode->lock);
        dropped = drop_outstanding_extent(inode, num_bytes);
        /*
         * If the inodes csum_bytes is the same as the original
         * csum_bytes then we know we haven't raced with any free()ers
         * so we can just reduce our inodes csum bytes and carry on.
         */
 -      if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
 +      if (inode->csum_bytes == csum_bytes) {
                calc_csum_metadata_size(inode, num_bytes, 0);
        } else {
 -              u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
 +              u64 orig_csum_bytes = inode->csum_bytes;
                u64 bytes;
  
                /*
                 * number of bytes that were freed while we were trying our
                 * reservation.
                 */
 -              bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
 -              BTRFS_I(inode)->csum_bytes = csum_bytes;
 +              bytes = csum_bytes - inode->csum_bytes;
 +              inode->csum_bytes = csum_bytes;
                to_free = calc_csum_metadata_size(inode, bytes, 0);
  
  
                 * been making this reservation and our ->csum_bytes were not
                 * artificially inflated.
                 */
 -              BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
 +              inode->csum_bytes = csum_bytes - num_bytes;
                bytes = csum_bytes - orig_csum_bytes;
                bytes = calc_csum_metadata_size(inode, bytes, 0);
  
                 * need to do anything, the other free-ers did the correct
                 * thing.
                 */
 -              BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
 +              inode->csum_bytes = orig_csum_bytes - num_bytes;
                if (bytes > to_free)
                        to_free = bytes - to_free;
                else
                        to_free = 0;
        }
 -      spin_unlock(&BTRFS_I(inode)->lock);
 +      spin_unlock(&inode->lock);
        if (dropped)
                to_free += btrfs_calc_trans_metadata_size(fs_info, dropped);
  
        if (to_free) {
                btrfs_block_rsv_release(fs_info, block_rsv, to_free);
                trace_btrfs_space_reservation(fs_info, "delalloc",
 -                                    btrfs_ino(BTRFS_I(inode)), to_free, 0);
 +                                            btrfs_ino(inode), to_free, 0);
        }
        if (delalloc_lock)
 -              mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
 +              mutex_unlock(&inode->delalloc_mutex);
        return ret;
  }
  
   * once we complete IO for a given set of bytes to release their metadata
   * reservations.
   */
 -void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
 +void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes)
  {
 -      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
        u64 to_free = 0;
        unsigned dropped;
  
        num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
 -      spin_lock(&BTRFS_I(inode)->lock);
 +      spin_lock(&inode->lock);
        dropped = drop_outstanding_extent(inode, num_bytes);
  
        if (num_bytes)
                to_free = calc_csum_metadata_size(inode, num_bytes, 0);
 -      spin_unlock(&BTRFS_I(inode)->lock);
 +      spin_unlock(&inode->lock);
        if (dropped > 0)
                to_free += btrfs_calc_trans_metadata_size(fs_info, dropped);
  
        if (btrfs_is_testing(fs_info))
                return;
  
 -      trace_btrfs_space_reservation(fs_info, "delalloc",
 -                                    btrfs_ino(BTRFS_I(inode)), to_free, 0);
 +      trace_btrfs_space_reservation(fs_info, "delalloc", btrfs_ino(inode),
 +                                    to_free, 0);
  
        btrfs_block_rsv_release(fs_info, &fs_info->delalloc_block_rsv, to_free);
  }
@@@ -6136,7 -6142,7 +6137,7 @@@ int btrfs_delalloc_reserve_space(struc
        ret = btrfs_check_data_free_space(inode, start, len);
        if (ret < 0)
                return ret;
 -      ret = btrfs_delalloc_reserve_metadata(inode, len);
 +      ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len);
        if (ret < 0)
                btrfs_free_reserved_data_space(inode, start, len);
        return ret;
   */
  void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len)
  {
 -      btrfs_delalloc_release_metadata(inode, len);
 +      btrfs_delalloc_release_metadata(BTRFS_I(inode), len);
        btrfs_free_reserved_data_space(inode, start, len);
  }
  
@@@ -9735,11 -9741,6 +9736,11 @@@ void btrfs_put_block_group_cache(struc
        }
  }
  
 +/*
 + * Must be called only after stopping all workers, since we could have block
 + * group caching kthreads running, and therefore they could race with us if we
 + * freed the block groups before stopping them.
 + */
  int btrfs_free_block_groups(struct btrfs_fs_info *info)
  {
        struct btrfs_block_group_cache *block_group;
                list_del(&block_group->list);
                up_write(&block_group->space_info->groups_sem);
  
 -              if (block_group->cached == BTRFS_CACHE_STARTED)
 -                      wait_block_group_cache_done(block_group);
 -
                /*
                 * We haven't cached this block group, which means we could
                 * possibly have excluded extents on this block group.
                        free_excluded_extents(info, block_group);
  
                btrfs_remove_free_space_cache(block_group);
 +              ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
                ASSERT(list_empty(&block_group->dirty_list));
                ASSERT(list_empty(&block_group->io_list));
                ASSERT(list_empty(&block_group->bg_list));
@@@ -10340,7 -10343,7 +10341,7 @@@ int btrfs_remove_block_group(struct btr
        mutex_unlock(&trans->transaction->cache_write_mutex);
  
        if (!IS_ERR(inode)) {
 -              ret = btrfs_orphan_add(trans, inode);
 +              ret = btrfs_orphan_add(trans, BTRFS_I(inode));
                if (ret) {
                        btrfs_add_delayed_iput(inode);
                        goto out;
index 7dcf0b100dcd9dca11e2f42d92c6ec9ec0886376,493a654b60127b69960abf59fc5d2f72392fb86e..da6841efac26b1be3509ad3e410c34e72b253a65
@@@ -18,6 -18,7 +18,7 @@@
  
  #include <linux/pagemap.h>
  #include <linux/sched.h>
+ #include <linux/sched/signal.h>
  #include <linux/slab.h>
  #include <linux/math64.h>
  #include <linux/ratelimit.h>
@@@ -260,7 -261,7 +261,7 @@@ int btrfs_truncate_free_space_cache(str
                btrfs_free_path(path);
        }
  
 -      btrfs_i_size_write(inode, 0);
 +      btrfs_i_size_write(BTRFS_I(inode), 0);
        truncate_pagecache(inode, 0);
  
        /*
@@@ -3545,8 -3546,7 +3546,8 @@@ int btrfs_write_out_ino_cache(struct bt
  
        if (ret) {
                if (release_metadata)
 -                      btrfs_delalloc_release_metadata(inode, inode->i_size);
 +                      btrfs_delalloc_release_metadata(BTRFS_I(inode),
 +                                      inode->i_size);
  #ifdef DEBUG
                btrfs_err(fs_info,
                          "failed to write free ino cache for root %llu",
diff --combined fs/cifs/connect.c
index 8a3ecef30d3ce5b8e4b3a325ea147b457749a616,9bf25be05636f194ba3a50eebaa9ec00d6c1378e..3aa457f8321444168091a000942d42930ff62911
@@@ -21,6 -21,7 +21,7 @@@
  #include <linux/fs.h>
  #include <linux/net.h>
  #include <linux/string.h>
+ #include <linux/sched/signal.h>
  #include <linux/list.h>
  #include <linux/wait.h>
  #include <linux/slab.h>
@@@ -2455,7 -2456,7 +2456,7 @@@ cifs_set_cifscreds(struct smb_vol *vol
        }
  
        down_read(&key->sem);
 -      upayload = user_key_payload(key);
 +      upayload = user_key_payload_locked(key);
        if (IS_ERR_OR_NULL(upayload)) {
                rc = upayload ? PTR_ERR(upayload) : -EINVAL;
                goto out_key_put;
diff --combined fs/ncpfs/sock.c
index 4bfeae289b00dbc798f7f725893cfdc981a3fb3c,bdea177aa405dfb3558a7e995ae8d6cc0254a759..98b6db0ed63e0323477be82768f9c3c08a06de5d
@@@ -16,6 -16,7 +16,7 @@@
  #include <linux/fcntl.h>
  #include <linux/stat.h>
  #include <linux/string.h>
+ #include <linux/sched/signal.h>
  #include <linux/uaccess.h>
  #include <linux/in.h>
  #include <linux/net.h>
@@@ -40,12 -41,19 +41,12 @@@ static int _recv(struct socket *sock, v
        return kernel_recvmsg(sock, &msg, &iov, 1, size, flags);
  }
  
 -static inline int do_send(struct socket *sock, struct kvec *vec, int count,
 -                        int len, unsigned flags)
 -{
 -      struct msghdr msg = { .msg_flags = flags };
 -      return kernel_sendmsg(sock, &msg, vec, count, len);
 -}
 -
  static int _send(struct socket *sock, const void *buff, int len)
  {
 -      struct kvec vec;
 -      vec.iov_base = (void *) buff;
 -      vec.iov_len = len;
 -      return do_send(sock, &vec, 1, len, 0);
 +      struct msghdr msg = { .msg_flags = 0 };
 +      struct kvec vec = {.iov_base = (void *)buff, .iov_len = len};
 +      iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &vec, 1, len);
 +      return sock_sendmsg(sock, &msg);
  }
  
  struct ncp_request_reply {
@@@ -56,7 -64,9 +57,7 @@@
        size_t datalen;
        int result;
        enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE, RQ_ABANDONED } status;
 -      struct kvec* tx_ciov;
 -      size_t tx_totallen;
 -      size_t tx_iovlen;
 +      struct iov_iter from;
        struct kvec tx_iov[3];
        u_int16_t tx_type;
        u_int32_t sign[6];
@@@ -196,22 -206,28 +197,22 @@@ static inline void __ncptcp_abort(struc
  
  static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req)
  {
 -      struct kvec vec[3];
 -      /* sock_sendmsg updates iov pointers for us :-( */
 -      memcpy(vec, req->tx_ciov, req->tx_iovlen * sizeof(vec[0]));
 -      return do_send(sock, vec, req->tx_iovlen,
 -                     req->tx_totallen, MSG_DONTWAIT);
 +      struct msghdr msg = { .msg_iter = req->from, .msg_flags = MSG_DONTWAIT };
 +      return sock_sendmsg(sock, &msg);
  }
  
  static void __ncptcp_try_send(struct ncp_server *server)
  {
        struct ncp_request_reply *rq;
 -      struct kvec *iov;
 -      struct kvec iovc[3];
 +      struct msghdr msg = { .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT };
        int result;
  
        rq = server->tx.creq;
        if (!rq)
                return;
  
 -      /* sock_sendmsg updates iov pointers for us :-( */
 -      memcpy(iovc, rq->tx_ciov, rq->tx_iovlen * sizeof(iov[0]));
 -      result = do_send(server->ncp_sock, iovc, rq->tx_iovlen,
 -                       rq->tx_totallen, MSG_NOSIGNAL | MSG_DONTWAIT);
 +      msg.msg_iter = rq->from;
 +      result = sock_sendmsg(server->ncp_sock, &msg);
  
        if (result == -EAGAIN)
                return;
                __ncp_abort_request(server, rq, result);
                return;
        }
 -      if (result >= rq->tx_totallen) {
 +      if (!msg_data_left(&msg)) {
                server->rcv.creq = rq;
                server->tx.creq = NULL;
                return;
        }
 -      rq->tx_totallen -= result;
 -      iov = rq->tx_ciov;
 -      while (iov->iov_len <= result) {
 -              result -= iov->iov_len;
 -              iov++;
 -              rq->tx_iovlen--;
 -      }
 -      iov->iov_base += result;
 -      iov->iov_len -= result;
 -      rq->tx_ciov = iov;
 +      rq->from = msg.msg_iter;
  }
  
  static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h)
        
  static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req)
  {
 -      size_t signlen;
 -      struct ncp_request_header* h;
 +      size_t signlen, len = req->tx_iov[1].iov_len;
 +      struct ncp_request_header *h = req->tx_iov[1].iov_base;
        
 -      req->tx_ciov = req->tx_iov + 1;
 -
 -      h = req->tx_iov[1].iov_base;
        ncp_init_header(server, req, h);
 -      signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1, 
 -                      req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1,
 -                      cpu_to_le32(req->tx_totallen), req->sign);
 +      signlen = sign_packet(server,
 +                      req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1, 
 +                      len - sizeof(struct ncp_request_header) + 1,
 +                      cpu_to_le32(len), req->sign);
        if (signlen) {
 -              req->tx_ciov[1].iov_base = req->sign;
 -              req->tx_ciov[1].iov_len = signlen;
 -              req->tx_iovlen += 1;
 -              req->tx_totallen += signlen;
 +              /* NCP over UDP appends signature */
 +              req->tx_iov[2].iov_base = req->sign;
 +              req->tx_iov[2].iov_len = signlen;
        }
 +      iov_iter_kvec(&req->from, WRITE | ITER_KVEC,
 +                      req->tx_iov + 1, signlen ? 2 : 1, len + signlen);
        server->rcv.creq = req;
        server->timeout_last = server->m.time_out;
        server->timeout_retries = server->m.retry_count;
  
  static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
  {
 -      size_t signlen;
 -      struct ncp_request_header* h;
 +      size_t signlen, len = req->tx_iov[1].iov_len;
 +      struct ncp_request_header *h = req->tx_iov[1].iov_base;
  
 -      req->tx_ciov = req->tx_iov;
 -      h = req->tx_iov[1].iov_base;
        ncp_init_header(server, req, h);
        signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
 -                      req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1,
 -                      cpu_to_be32(req->tx_totallen + 24), req->sign + 4) + 16;
 +                      len - sizeof(struct ncp_request_header) + 1,
 +                      cpu_to_be32(len + 24), req->sign + 4) + 16;
  
        req->sign[0] = htonl(NCP_TCP_XMIT_MAGIC);
 -      req->sign[1] = htonl(req->tx_totallen + signlen);
 +      req->sign[1] = htonl(len + signlen);
        req->sign[2] = htonl(NCP_TCP_XMIT_VERSION);
        req->sign[3] = htonl(req->datalen + 8);
 +      /* NCP over TCP prepends signature */
        req->tx_iov[0].iov_base = req->sign;
        req->tx_iov[0].iov_len = signlen;
 -      req->tx_iovlen += 1;
 -      req->tx_totallen += signlen;
 +      iov_iter_kvec(&req->from, WRITE | ITER_KVEC,
 +                      req->tx_iov, 2, len + signlen);
  
        server->tx.creq = req;
        __ncptcp_try_send(server);
@@@ -338,17 -365,18 +339,17 @@@ static void __ncp_next_request(struct n
  static void info_server(struct ncp_server *server, unsigned int id, const void * data, size_t len)
  {
        if (server->info_sock) {
 -              struct kvec iov[2];
 -              __be32 hdr[2];
 -      
 -              hdr[0] = cpu_to_be32(len + 8);
 -              hdr[1] = cpu_to_be32(id);
 -      
 -              iov[0].iov_base = hdr;
 -              iov[0].iov_len = 8;
 -              iov[1].iov_base = (void *) data;
 -              iov[1].iov_len = len;
 +              struct msghdr msg = { .msg_flags = MSG_NOSIGNAL };
 +              __be32 hdr[2] = {cpu_to_be32(len + 8), cpu_to_be32(id)};
 +              struct kvec iov[2] = {
 +                      {.iov_base = hdr, .iov_len = 8},
 +                      {.iov_base = (void *)data, .iov_len = len},
 +              };
 +
 +              iov_iter_kvec(&msg.msg_iter, ITER_KVEC | WRITE,
 +                              iov, 2, len + 8);
  
 -              do_send(server->info_sock, iov, 2, len + 8, MSG_NOSIGNAL);
 +              sock_sendmsg(server->info_sock, &msg);
        }
  }
  
@@@ -684,6 -712,8 +685,6 @@@ static int do_ncp_rpc_call(struct ncp_s
        req->datalen = max_reply_size;
        req->tx_iov[1].iov_base = server->packet;
        req->tx_iov[1].iov_len = size;
 -      req->tx_iovlen = 1;
 -      req->tx_totallen = size;
        req->tx_type = *(u_int16_t*)server->packet;
  
        result = ncp_add_request(server, req);
diff --combined fs/read_write.c
index f2ed9fdc98fdea5fdc681a611581970d1f433c82,dc60075653a84b9f45299db074719fe56861c1c7..c4f88afbc67f49ae9e451e06a81c45c37f8171f5
@@@ -4,8 -4,9 +4,9 @@@
   *  Copyright (C) 1991, 1992  Linus Torvalds
   */
  
- #include <linux/slab.h> 
+ #include <linux/slab.h>
  #include <linux/stat.h>
+ #include <linux/sched/xacct.h>
  #include <linux/fcntl.h>
  #include <linux/file.h>
  #include <linux/uio.h>
@@@ -23,6 -24,9 +24,6 @@@
  #include <linux/uaccess.h>
  #include <asm/unistd.h>
  
 -typedef ssize_t (*io_fn_t)(struct file *, char __user *, size_t, loff_t *);
 -typedef ssize_t (*iter_fn_t)(struct kiocb *, struct iov_iter *);
 -
  const struct file_operations generic_ro_fops = {
        .llseek         = generic_file_llseek,
        .read_iter      = generic_file_read_iter,
@@@ -367,7 -371,7 +368,7 @@@ ssize_t vfs_iter_read(struct file *file
        kiocb.ki_pos = *ppos;
  
        iter->type |= READ;
 -      ret = file->f_op->read_iter(&kiocb, iter);
 +      ret = call_read_iter(file, &kiocb, iter);
        BUG_ON(ret == -EIOCBQUEUED);
        if (ret > 0)
                *ppos = kiocb.ki_pos;
@@@ -387,7 -391,7 +388,7 @@@ ssize_t vfs_iter_write(struct file *fil
        kiocb.ki_pos = *ppos;
  
        iter->type |= WRITE;
 -      ret = file->f_op->write_iter(&kiocb, iter);
 +      ret = call_write_iter(file, &kiocb, iter);
        BUG_ON(ret == -EIOCBQUEUED);
        if (ret > 0)
                *ppos = kiocb.ki_pos;
@@@ -436,7 -440,7 +437,7 @@@ static ssize_t new_sync_read(struct fil
        kiocb.ki_pos = *ppos;
        iov_iter_init(&iter, READ, &iov, 1, len);
  
 -      ret = filp->f_op->read_iter(&kiocb, &iter);
 +      ret = call_read_iter(filp, &kiocb, &iter);
        BUG_ON(ret == -EIOCBQUEUED);
        *ppos = kiocb.ki_pos;
        return ret;
@@@ -493,7 -497,7 +494,7 @@@ static ssize_t new_sync_write(struct fi
        kiocb.ki_pos = *ppos;
        iov_iter_init(&iter, WRITE, &iov, 1, len);
  
 -      ret = filp->f_op->write_iter(&kiocb, &iter);
 +      ret = call_write_iter(filp, &kiocb, &iter);
        BUG_ON(ret == -EIOCBQUEUED);
        if (ret > 0)
                *ppos = kiocb.ki_pos;
@@@ -672,7 -676,7 +673,7 @@@ unsigned long iov_shorten(struct iovec 
  EXPORT_SYMBOL(iov_shorten);
  
  static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
 -              loff_t *ppos, iter_fn_t fn, int flags)
 +              loff_t *ppos, int type, int flags)
  {
        struct kiocb kiocb;
        ssize_t ret;
                kiocb.ki_flags |= (IOCB_DSYNC | IOCB_SYNC);
        kiocb.ki_pos = *ppos;
  
 -      ret = fn(&kiocb, iter);
 +      if (type == READ)
 +              ret = call_read_iter(filp, &kiocb, iter);
 +      else
 +              ret = call_write_iter(filp, &kiocb, iter);
        BUG_ON(ret == -EIOCBQUEUED);
        *ppos = kiocb.ki_pos;
        return ret;
  
  /* Do it by hand, with file-ops */
  static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter,
 -              loff_t *ppos, io_fn_t fn, int flags)
 +              loff_t *ppos, int type, int flags)
  {
        ssize_t ret = 0;
  
                struct iovec iovec = iov_iter_iovec(iter);
                ssize_t nr;
  
 -              nr = fn(filp, iovec.iov_base, iovec.iov_len, ppos);
 +              if (type == READ) {
 +                      nr = filp->f_op->read(filp, iovec.iov_base,
 +                                            iovec.iov_len, ppos);
 +              } else {
 +                      nr = filp->f_op->write(filp, iovec.iov_base,
 +                                             iovec.iov_len, ppos);
 +              }
  
                if (nr < 0) {
                        if (!ret)
@@@ -840,32 -835,50 +841,32 @@@ out
        return ret;
  }
  
 -static ssize_t do_readv_writev(int type, struct file *file,
 -                             const struct iovec __user * uvector,
 -                             unsigned long nr_segs, loff_t *pos,
 -                             int flags)
 +static ssize_t __do_readv_writev(int type, struct file *file,
 +                               struct iov_iter *iter, loff_t *pos, int flags)
  {
        size_t tot_len;
 -      struct iovec iovstack[UIO_FASTIOV];
 -      struct iovec *iov = iovstack;
 -      struct iov_iter iter;
 -      ssize_t ret;
 -      io_fn_t fn;
 -      iter_fn_t iter_fn;
 -
 -      ret = import_iovec(type, uvector, nr_segs,
 -                         ARRAY_SIZE(iovstack), &iov, &iter);
 -      if (ret < 0)
 -              return ret;
 +      ssize_t ret = 0;
  
 -      tot_len = iov_iter_count(&iter);
 +      tot_len = iov_iter_count(iter);
        if (!tot_len)
                goto out;
        ret = rw_verify_area(type, file, pos, tot_len);
        if (ret < 0)
                goto out;
  
 -      if (type == READ) {
 -              fn = file->f_op->read;
 -              iter_fn = file->f_op->read_iter;
 -      } else {
 -              fn = (io_fn_t)file->f_op->write;
 -              iter_fn = file->f_op->write_iter;
 +      if (type != READ)
                file_start_write(file);
 -      }
  
 -      if (iter_fn)
 -              ret = do_iter_readv_writev(file, &iter, pos, iter_fn, flags);
 +      if ((type == READ && file->f_op->read_iter) ||
 +          (type == WRITE && file->f_op->write_iter))
 +              ret = do_iter_readv_writev(file, iter, pos, type, flags);
        else
 -              ret = do_loop_readv_writev(file, &iter, pos, fn, flags);
 +              ret = do_loop_readv_writev(file, iter, pos, type, flags);
  
        if (type != READ)
                file_end_write(file);
  
  out:
 -      kfree(iov);
        if ((ret + (type == READ)) > 0) {
                if (type == READ)
                        fsnotify_access(file);
        return ret;
  }
  
 +static ssize_t do_readv_writev(int type, struct file *file,
 +                             const struct iovec __user *uvector,
 +                             unsigned long nr_segs, loff_t *pos,
 +                             int flags)
 +{
 +      struct iovec iovstack[UIO_FASTIOV];
 +      struct iovec *iov = iovstack;
 +      struct iov_iter iter;
 +      ssize_t ret;
 +
 +      ret = import_iovec(type, uvector, nr_segs,
 +                         ARRAY_SIZE(iovstack), &iov, &iter);
 +      if (ret < 0)
 +              return ret;
 +
 +      ret = __do_readv_writev(type, file, &iter, pos, flags);
 +      kfree(iov);
 +
 +      return ret;
 +}
 +
  ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
                  unsigned long vlen, loff_t *pos, int flags)
  {
@@@ -1073,19 -1065,51 +1074,19 @@@ static ssize_t compat_do_readv_writev(i
                               unsigned long nr_segs, loff_t *pos,
                               int flags)
  {
 -      compat_ssize_t tot_len;
        struct iovec iovstack[UIO_FASTIOV];
        struct iovec *iov = iovstack;
        struct iov_iter iter;
        ssize_t ret;
 -      io_fn_t fn;
 -      iter_fn_t iter_fn;
  
        ret = compat_import_iovec(type, uvector, nr_segs,
                                  UIO_FASTIOV, &iov, &iter);
        if (ret < 0)
                return ret;
  
 -      tot_len = iov_iter_count(&iter);
 -      if (!tot_len)
 -              goto out;
 -      ret = rw_verify_area(type, file, pos, tot_len);
 -      if (ret < 0)
 -              goto out;
 -
 -      if (type == READ) {
 -              fn = file->f_op->read;
 -              iter_fn = file->f_op->read_iter;
 -      } else {
 -              fn = (io_fn_t)file->f_op->write;
 -              iter_fn = file->f_op->write_iter;
 -              file_start_write(file);
 -      }
 -
 -      if (iter_fn)
 -              ret = do_iter_readv_writev(file, &iter, pos, iter_fn, flags);
 -      else
 -              ret = do_loop_readv_writev(file, &iter, pos, fn, flags);
 -
 -      if (type != READ)
 -              file_end_write(file);
 -
 -out:
 +      ret = __do_readv_writev(type, file, &iter, pos, flags);
        kfree(iov);
 -      if ((ret + (type == READ)) > 0) {
 -              if (type == READ)
 -                      fsnotify_access(file);
 -              else
 -                      fsnotify_modify(file);
 -      }
 +
        return ret;
  }
  
@@@ -1495,11 -1519,6 +1496,11 @@@ ssize_t vfs_copy_file_range(struct fil
        if (flags != 0)
                return -EINVAL;
  
 +      if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
 +              return -EISDIR;
 +      if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
 +              return -EINVAL;
 +
        ret = rw_verify_area(READ, file_in, &pos_in, len);
        if (unlikely(ret))
                return ret;
        if (len == 0)
                return 0;
  
 -      sb_start_write(inode_out->i_sb);
 +      file_start_write(file_out);
  
        /*
         * Try cloning first, this is supported by more file systems, and
@@@ -1556,7 -1575,7 +1557,7 @@@ done
        inc_syscr(current);
        inc_syscw(current);
  
 -      sb_end_write(inode_out->i_sb);
 +      file_end_write(file_out);
  
        return ret;
  }
diff --combined fs/splice.c
index eaafa3d8869aefa1967d3eaa6608e9f54adf1b0b,e49336555739efd807b6c9be3fdae3c221d22b94..006ba50f4ece671f48367b644641ab58b04b65d8
@@@ -33,6 -33,8 +33,8 @@@
  #include <linux/gfp.h>
  #include <linux/socket.h>
  #include <linux/compat.h>
+ #include <linux/sched/signal.h>
  #include "internal.h"
  
  /*
@@@ -307,7 -309,7 +309,7 @@@ ssize_t generic_file_splice_read(struc
        idx = to.idx;
        init_sync_kiocb(&kiocb, in);
        kiocb.ki_pos = *ppos;
 -      ret = in->f_op->read_iter(&kiocb, &to);
 +      ret = call_read_iter(in, &kiocb, &to);
        if (ret > 0) {
                *ppos = kiocb.ki_pos;
                file_accessed(in);
index 72166412989423b475dc7eba1445e67bac31343d,774c29b57e82fd1b10897eac0091275998368a26..37c274e61acceee74d792a240b8f3695f0d78085
@@@ -4,7 -4,9 +4,9 @@@
  #include <linux/configfs.h>      /* struct config_group */
  #include <linux/dma-direction.h> /* enum dma_data_direction */
  #include <linux/percpu_ida.h>    /* struct percpu_ida */
+ #include <linux/percpu-refcount.h>
  #include <linux/semaphore.h>     /* struct semaphore */
+ #include <linux/completion.h>
  
  #define TARGET_CORE_VERSION           "v5.0"
  
@@@ -197,7 -199,6 +199,7 @@@ enum tcm_tmreq_table 
        TMR_LUN_RESET           = 5,
        TMR_TARGET_WARM_RESET   = 6,
        TMR_TARGET_COLD_RESET   = 7,
 +      TMR_UNKNOWN             = 0xff,
  };
  
  /* fabric independent task management response values */
@@@ -398,6 -399,7 +400,6 @@@ struct se_tmr_req 
        void                    *fabric_tmr_ptr;
        struct se_cmd           *task_cmd;
        struct se_device        *tmr_dev;
 -      struct se_lun           *tmr_lun;
        struct list_head        tmr_list;
  };
  
@@@ -488,6 -490,8 +490,6 @@@ struct se_cmd 
  #define CMD_T_COMPLETE                (1 << 2)
  #define CMD_T_SENT            (1 << 4)
  #define CMD_T_STOP            (1 << 5)
 -#define CMD_T_DEV_ACTIVE      (1 << 7)
 -#define CMD_T_BUSY            (1 << 9)
  #define CMD_T_TAS             (1 << 10)
  #define CMD_T_FABRIC_STOP     (1 << 11)
        spinlock_t              t_state_lock;
@@@ -730,7 -734,6 +732,7 @@@ struct se_lun 
        struct config_group     lun_group;
        struct se_port_stat_grps port_stat_grps;
        struct completion       lun_ref_comp;
 +      struct completion       lun_shutdown_comp;
        struct percpu_ref       lun_ref;
        struct list_head        lun_dev_link;
        struct hlist_node       link;
@@@ -766,8 -769,6 +768,8 @@@ struct se_device 
        u32                     dev_index;
        u64                     creation_time;
        atomic_long_t           num_resets;
 +      atomic_long_t           aborts_complete;
 +      atomic_long_t           aborts_no_task;
        atomic_long_t           num_cmds;
        atomic_long_t           read_bytes;
        atomic_long_t           write_bytes;
diff --combined kernel/power/hibernate.c
index 9e1cba069385eed17c1103bcf1be290be040e8a8,b8be5c803cdda568b4f6ff31aacb9731e7461db0..a8b978c35a6a9392c3d4721e12f68c9794ac620b
@@@ -10,8 -10,6 +10,8 @@@
   * This file is released under the GPLv2.
   */
  
 +#define pr_fmt(fmt) "PM: " fmt
 +
  #include <linux/export.h>
  #include <linux/suspend.h>
  #include <linux/syscalls.h>
@@@ -23,6 -21,7 +23,7 @@@
  #include <linux/fs.h>
  #include <linux/mount.h>
  #include <linux/pm.h>
+ #include <linux/nmi.h>
  #include <linux/console.h>
  #include <linux/cpu.h>
  #include <linux/freezer.h>
@@@ -106,7 -105,7 +107,7 @@@ EXPORT_SYMBOL(system_entering_hibernati
  #ifdef CONFIG_PM_DEBUG
  static void hibernation_debug_sleep(void)
  {
 -      printk(KERN_INFO "hibernation debug: Waiting for 5 seconds.\n");
 +      pr_info("hibernation debug: Waiting for 5 seconds.\n");
        mdelay(5000);
  }
  
@@@ -252,9 -251,10 +253,9 @@@ void swsusp_show_speed(ktime_t start, k
                centisecs = 1;  /* avoid div-by-zero */
        k = nr_pages * (PAGE_SIZE / 1024);
        kps = (k * 100) / centisecs;
 -      printk(KERN_INFO "PM: %s %u kbytes in %u.%02u seconds (%u.%02u MB/s)\n",
 -                      msg, k,
 -                      centisecs / 100, centisecs % 100,
 -                      kps / 1000, (kps % 1000) / 10);
 +      pr_info("%s %u kbytes in %u.%02u seconds (%u.%02u MB/s)\n",
 +              msg, k, centisecs / 100, centisecs % 100, kps / 1000,
 +              (kps % 1000) / 10);
  }
  
  /**
@@@ -272,7 -272,8 +273,7 @@@ static int create_image(int platform_mo
  
        error = dpm_suspend_end(PMSG_FREEZE);
        if (error) {
 -              printk(KERN_ERR "PM: Some devices failed to power down, "
 -                      "aborting hibernation\n");
 +              pr_err("Some devices failed to power down, aborting hibernation\n");
                return error;
        }
  
  
        error = syscore_suspend();
        if (error) {
 -              printk(KERN_ERR "PM: Some system devices failed to power down, "
 -                      "aborting hibernation\n");
 +              pr_err("Some system devices failed to power down, aborting hibernation\n");
                goto Enable_irqs;
        }
  
        restore_processor_state();
        trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false);
        if (error)
 -              printk(KERN_ERR "PM: Error %d creating hibernation image\n",
 -                      error);
 +              pr_err("Error %d creating hibernation image\n", error);
 +
        if (!in_suspend) {
                events_check_enabled = false;
                clear_free_pages();
@@@ -431,7 -433,8 +432,7 @@@ static int resume_target_kernel(bool pl
  
        error = dpm_suspend_end(PMSG_QUIESCE);
        if (error) {
 -              printk(KERN_ERR "PM: Some devices failed to power down, "
 -                      "aborting resume\n");
 +              pr_err("Some devices failed to power down, aborting resume\n");
                return error;
        }
  
@@@ -606,22 -609,6 +607,22 @@@ static void power_down(void
  {
  #ifdef CONFIG_SUSPEND
        int error;
 +
 +      if (hibernation_mode == HIBERNATION_SUSPEND) {
 +              error = suspend_devices_and_enter(PM_SUSPEND_MEM);
 +              if (error) {
 +                      hibernation_mode = hibernation_ops ?
 +                                              HIBERNATION_PLATFORM :
 +                                              HIBERNATION_SHUTDOWN;
 +              } else {
 +                      /* Restore swap signature. */
 +                      error = swsusp_unmark();
 +                      if (error)
 +                              pr_err("Swap will be unusable! Try swapon -a.\n");
 +
 +                      return;
 +              }
 +      }
  #endif
  
        switch (hibernation_mode) {
                if (pm_power_off)
                        kernel_power_off();
                break;
 -#ifdef CONFIG_SUSPEND
 -      case HIBERNATION_SUSPEND:
 -              error = suspend_devices_and_enter(PM_SUSPEND_MEM);
 -              if (error) {
 -                      if (hibernation_ops)
 -                              hibernation_mode = HIBERNATION_PLATFORM;
 -                      else
 -                              hibernation_mode = HIBERNATION_SHUTDOWN;
 -                      power_down();
 -              }
 -              /*
 -               * Restore swap signature.
 -               */
 -              error = swsusp_unmark();
 -              if (error)
 -                      printk(KERN_ERR "PM: Swap will be unusable! "
 -                                      "Try swapon -a.\n");
 -              return;
 -#endif
        }
        kernel_halt();
        /*
         * Valid image is on the disk, if we continue we risk serious data
         * corruption after resume.
         */
 -      printk(KERN_CRIT "PM: Please power down manually\n");
 +      pr_crit("Power down manually\n");
        while (1)
                cpu_relax();
  }
@@@ -650,7 -656,7 +651,7 @@@ static int load_image_and_restore(void
        int error;
        unsigned int flags;
  
 -      pr_debug("PM: Loading hibernation image.\n");
 +      pr_debug("Loading hibernation image.\n");
  
        lock_device_hotplug();
        error = create_basic_memory_bitmaps();
        if (!error)
                hibernation_restore(flags & SF_PLATFORM_MODE);
  
 -      printk(KERN_ERR "PM: Failed to load hibernation image, recovering.\n");
 +      pr_err("Failed to load hibernation image, recovering.\n");
        swsusp_free();
        free_basic_memory_bitmaps();
   Unlock:
@@@ -680,7 -686,7 +681,7 @@@ int hibernate(void
        bool snapshot_test = false;
  
        if (!hibernation_available()) {
 -              pr_debug("PM: Hibernation not available.\n");
 +              pr_debug("Hibernation not available.\n");
                return -EPERM;
        }
  
                goto Exit;
        }
  
 -      printk(KERN_INFO "PM: Syncing filesystems ... ");
 +      pr_info("Syncing filesystems ... \n");
        sys_sync();
 -      printk("done.\n");
 +      pr_info("done.\n");
  
        error = freeze_processes();
        if (error)
                else
                        flags |= SF_CRC32_MODE;
  
 -              pr_debug("PM: writing image.\n");
 +              pr_debug("Writing image.\n");
                error = swsusp_write(flags);
                swsusp_free();
                if (!error) {
                in_suspend = 0;
                pm_restore_gfp_mask();
        } else {
 -              pr_debug("PM: Image restored successfully.\n");
 +              pr_debug("Image restored successfully.\n");
        }
  
   Free_bitmaps:
   Thaw:
        unlock_device_hotplug();
        if (snapshot_test) {
 -              pr_debug("PM: Checking hibernation image\n");
 +              pr_debug("Checking hibernation image\n");
                error = swsusp_check();
                if (!error)
                        error = load_image_and_restore();
@@@ -810,10 -816,10 +811,10 @@@ static int software_resume(void
                goto Unlock;
        }
  
 -      pr_debug("PM: Checking hibernation image partition %s\n", resume_file);
 +      pr_debug("Checking hibernation image partition %s\n", resume_file);
  
        if (resume_delay) {
 -              printk(KERN_INFO "Waiting %dsec before reading resume device...\n",
 +              pr_info("Waiting %dsec before reading resume device ...\n",
                        resume_delay);
                ssleep(resume_delay);
        }
        }
  
   Check_image:
 -      pr_debug("PM: Hibernation image partition %d:%d present\n",
 +      pr_debug("Hibernation image partition %d:%d present\n",
                MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));
  
 -      pr_debug("PM: Looking for hibernation image.\n");
 +      pr_debug("Looking for hibernation image.\n");
        error = swsusp_check();
        if (error)
                goto Unlock;
                goto Close_Finish;
        }
  
 -      pr_debug("PM: Preparing processes for restore.\n");
 +      pr_debug("Preparing processes for restore.\n");
        error = freeze_processes();
        if (error)
                goto Close_Finish;
        /* For success case, the suspend path will release the lock */
   Unlock:
        mutex_unlock(&pm_mutex);
 -      pr_debug("PM: Hibernation image not present or could not be loaded.\n");
 +      pr_debug("Hibernation image not present or could not be loaded.\n");
        return error;
   Close_Finish:
        swsusp_close(FMODE_READ);
@@@ -1011,7 -1017,7 +1012,7 @@@ static ssize_t disk_store(struct kobjec
                error = -EINVAL;
  
        if (!error)
 -              pr_debug("PM: Hibernation mode set to '%s'\n",
 +              pr_debug("Hibernation mode set to '%s'\n",
                         hibernation_modes[mode]);
        unlock_system_sleep();
        return error ? error : n;
@@@ -1047,7 -1053,7 +1048,7 @@@ static ssize_t resume_store(struct kobj
        lock_system_sleep();
        swsusp_resume_device = res;
        unlock_system_sleep();
 -      printk(KERN_INFO "PM: Starting manual resume from disk\n");
 +      pr_info("Starting manual resume from disk\n");
        noresume = 0;
        software_resume();
        return n;
diff --combined mm/nommu.c
index 5bbef9cb89eb397533503ca2bdb6f8c4932a9a50,79abed514a4be9d8e37cd214895c61e10c8c2ac6..2d131b97a85169eb11716874b552dc4cef5b4115
@@@ -17,6 -17,7 +17,7 @@@
  
  #include <linux/export.h>
  #include <linux/mm.h>
+ #include <linux/sched/mm.h>
  #include <linux/vmacache.h>
  #include <linux/mman.h>
  #include <linux/swap.h>
@@@ -757,7 -758,7 +758,7 @@@ static void delete_vma_from_mm(struct v
        mm->map_count--;
        for (i = 0; i < VMACACHE_SIZE; i++) {
                /* if the vma is cached, invalidate the entire cache */
-               if (curr->vmacache[i] == vma) {
+               if (curr->vmacache.vmas[i] == vma) {
                        vmacache_invalidate(mm);
                        break;
                }
@@@ -1084,7 -1085,7 +1085,7 @@@ static int do_mmap_shared_file(struct v
  {
        int ret;
  
 -      ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
 +      ret = call_mmap(vma->vm_file, vma);
        if (ret == 0) {
                vma->vm_region->vm_top = vma->vm_region->vm_end;
                return 0;
@@@ -1115,7 -1116,7 +1116,7 @@@ static int do_mmap_private(struct vm_ar
         * - VM_MAYSHARE will be set if it may attempt to share
         */
        if (capabilities & NOMMU_MAP_DIRECT) {
 -              ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
 +              ret = call_mmap(vma->vm_file, vma);
                if (ret == 0) {
                        /* shouldn't return success if we're not sharing */
                        BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
index d502c94b1a82bb463bf1a224e69d2d187dfa53fc,ab9fec00a788e4ab22520a0c645ca14cba1c01a9..af781010753b0f0b53c89934ba79600aea5e2060
  
  #include <linux/module.h>
  #include <linux/slab.h>
+ #include <linux/cred.h>
  #include <linux/dns_resolver.h>
  #include <linux/err.h>
  #include <keys/dns_resolver-type.h>
  #include <keys/user-type.h>
  
@@@ -70,7 -72,7 +72,7 @@@ int dns_query(const char *type, const c
              const char *options, char **_result, time64_t *_expiry)
  {
        struct key *rkey;
 -      const struct user_key_payload *upayload;
 +      struct user_key_payload *upayload;
        const struct cred *saved_cred;
        size_t typelen, desclen;
        char *desc, *cp;
        if (ret)
                goto put;
  
 -      upayload = user_key_payload(rkey);
 +      upayload = user_key_payload_locked(rkey);
        len = upayload->datalen;
  
        ret = -ENOMEM;
diff --combined security/selinux/hooks.c
index 0a4b4b040e0ab0e2ea954744b4d3d023481896d1,57ff536961446e5633c22201bee9216f512f1815..0c2ac318aa7fb8bc11830e7c8c10fe6730f49c46
@@@ -28,7 -28,8 +28,8 @@@
  #include <linux/kernel.h>
  #include <linux/tracehook.h>
  #include <linux/errno.h>
- #include <linux/sched.h>
+ #include <linux/sched/signal.h>
+ #include <linux/sched/task.h>
  #include <linux/lsm_hooks.h>
  #include <linux/xattr.h>
  #include <linux/capability.h>
@@@ -480,13 -481,12 +481,13 @@@ static int selinux_is_sblabel_mnt(struc
                sbsec->behavior == SECURITY_FS_USE_NATIVE ||
                /* Special handling. Genfs but also in-core setxattr handler */
                !strcmp(sb->s_type->name, "sysfs") ||
 -              !strcmp(sb->s_type->name, "cgroup") ||
 -              !strcmp(sb->s_type->name, "cgroup2") ||
                !strcmp(sb->s_type->name, "pstore") ||
                !strcmp(sb->s_type->name, "debugfs") ||
                !strcmp(sb->s_type->name, "tracefs") ||
 -              !strcmp(sb->s_type->name, "rootfs");
 +              !strcmp(sb->s_type->name, "rootfs") ||
 +              (selinux_policycap_cgroupseclabel &&
 +               (!strcmp(sb->s_type->name, "cgroup") ||
 +                !strcmp(sb->s_type->name, "cgroup2")));
  }
  
  static int sb_finish_set_opts(struct super_block *sb)