Al Viro <viro@ftp.linux.org.uk>
Al Viro <viro@zenIV.linux.org.uk>
Andreas Herrmann <aherrman@de.ibm.com>
+Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
Andrew Morton <akpm@linux-foundation.org>
Andrew Vasquez <andrew.vasquez@qlogic.com>
Andy Adamson <andros@citi.umich.edu>
"qcom,kpss-acc-v1"
"qcom,kpss-acc-v2"
"rockchip,rk3066-smp"
+ "ste,dbx500-smp"
- cpu-release-addr
Usage: required for systems that have an "enable-method"
described below.
Optionnal property:
-- link : Should be a phandle to another switch's DSA port.
+- link : Should be a list of phandles to another switch's DSA port.
This property is only used when switches are being
- chained/cascaded together.
+ chained/cascaded together. This port is used as outgoing port
+ towards the phandle port, which can be more than one hop away.
- phy-handle : Phandle to a PHY on an external MDIO bus, not the
switch internal one. See
label = "cpu";
};
- switch0uplink: port@6 {
+ switch0port6: port@6 {
reg = <6>;
label = "dsa";
- link = <&switch1uplink>;
+ link = <&switch1port0
+ &switch2port0>;
};
};
reg = <17 1>; /* MDIO address 17, switch 1 in tree */
mii-bus = <&mii_bus1>;
- switch1uplink: port@0 {
+ switch1port0: port@0 {
reg = <0>;
label = "dsa";
- link = <&switch0uplink>;
+ link = <&switch0port6>;
+ };
+ switch1port1: port@1 {
+ reg = <1>;
+ label = "dsa";
+ link = <&switch2port1>;
+ };
+ };
+
+ switch@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <18 2>; /* MDIO address 18, switch 2 in tree */
+ mii-bus = <&mii_bus1>;
+
+ switch2port0: port@0 {
+ reg = <0>;
+ label = "dsa";
+ link = <&switch1port1
+ &switch0port6>;
};
};
};
--- /dev/null
+
+Netdev private dataroom for 6lowpan interfaces:
+
+All 6lowpan able net devices, means all interfaces with ARPHRD_6LOWPAN,
+must have "struct lowpan_priv" placed at beginning of netdev_priv.
+
+The priv_size of each interface should be calculate by:
+
+ dev->priv_size = LOWPAN_PRIV_SIZE(LL_6LOWPAN_PRIV_DATA);
+
+Where LL_PRIV_6LOWPAN_DATA is sizeof linklayer 6lowpan private data struct.
+To access the LL_PRIV_6LOWPAN_DATA structure you can cast:
+
+ lowpan_priv(dev)-priv;
+
+to your LL_6LOWPAN_PRIV_DATA structure.
+
+Before registering the lowpan netdev interface you must run:
+
+ lowpan_netdev_setup(dev, LOWPAN_LLTYPE_FOOBAR);
+
+wheres LOWPAN_LLTYPE_FOOBAR is a define for your 6LoWPAN linklayer type of
+enum lowpan_lltypes.
+
+Example to evaluate the private usually you can do:
+
+static inline sturct lowpan_priv_foobar *
+lowpan_foobar_priv(struct net_device *dev)
+{
+ return (sturct lowpan_priv_foobar *)lowpan_priv(dev)->priv;
+}
+
+switch (dev->type) {
+case ARPHRD_6LOWPAN:
+ lowpan_priv = lowpan_priv(dev);
+ /* do great stuff which is ARPHRD_6LOWPAN related */
+ switch (lowpan_priv->lltype) {
+ case LOWPAN_LLTYPE_FOOBAR:
+ /* do 802.15.4 6LoWPAN handling here */
+ lowpan_foobar_priv(dev)->bar = foo;
+ break;
+ ...
+ }
+ break;
+...
+}
+
+In case of generic 6lowpan branch ("net/6lowpan") you can remove the check
+on ARPHRD_6LOWPAN, because you can be sure that these function are called
+by ARPHRD_6LOWPAN interfaces.
Allows you to write a number, which can be used as required.
Default value is 0.
+xfrm4_gc_thresh - INTEGER
+ The threshold at which we will start garbage collecting for IPv4
+ destination cache entries. At twice this value the system will
+ refuse new allocations.
+
Alexey Kuznetsov.
kuznet@ms2.inr.ac.ru
otherwise the minimal space between responses in milliseconds.
Default: 1000
+xfrm6_gc_thresh - INTEGER
+ The threshold at which we will start garbage collecting for IPv6
+ destination cache entries. At twice this value the system will
+ refuse new allocations.
+
IPv6 Update by:
Pekka Savola <pekkas@netcore.fi>
S: Maintained
F: net/6lowpan/
F: include/net/6lowpan.h
+F: Documentation/networking/6lowpan.txt
6PACK NETWORK DRIVER FOR AX.25
M: Andreas Koensgen <ajk@comnets.uni-bremen.de>
F: drivers/gpu/drm/rockchip/
F: Documentation/devicetree/bindings/video/rockchip*
+DRM DRIVERS FOR STI
+M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+M: Vincent Abriou <vincent.abriou@st.com>
+L: dri-devel@lists.freedesktop.org
+T: git http://git.linaro.org/people/benjamin.gaignard/kernel.git
+S: Maintained
+F: drivers/gpu/drm/sti
+F: Documentation/devicetree/bindings/gpu/st,stih4xx.txt
+
DSBR100 USB FM RADIO DRIVER
M: Alexey Klimov <klimov.linux@gmail.com>
L: linux-media@vger.kernel.org
VERSION = 4
PATCHLEVEL = 2
SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
NAME = Hurr durr I'ma sheep
# *DOCUMENTATION*
ranges = <0 0x2000 0x2000>;
scm_conf: scm_conf@0 {
- compatible = "syscon";
+ compatible = "syscon", "simple-bus";
reg = <0x0 0x1400>;
#address-cells = <1>;
#size-cells = <1>;
interrupt-names = "msi";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
- interrupt-map = <0 0 0 1 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 2 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 3 &intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 4 &intc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 1 &gpc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &gpc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &gpc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &gpc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6QDL_CLK_PCIE_AXI>,
<&clks IMX6QDL_CLK_LVDS1_GATE>,
<&clks IMX6QDL_CLK_PCIE_REF_125M>;
<GIC_SPI 376 IRQ_TYPE_EDGE_RISING>;
};
};
+
+ mdio: mdio@24200f00 {
+ compatible = "ti,keystone_mdio", "ti,davinci_mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x24200f00 0x100>;
+ status = "disabled";
+ clocks = <&clkcpgmac>;
+ clock-names = "fck";
+ bus_freq = <2500000>;
+ };
/include/ "k2e-netcp.dtsi"
};
};
-
-&mdio {
- reg = <0x24200f00 0x100>;
-};
#gpio-cells = <2>;
gpio,syscon-dev = <&devctrl 0x25c>;
};
+
+ mdio: mdio@02090300 {
+ compatible = "ti,keystone_mdio", "ti,davinci_mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x02090300 0x100>;
+ status = "disabled";
+ clocks = <&clkcpgmac>;
+ clock-names = "fck";
+ bus_freq = <2500000>;
+ };
/include/ "k2hk-netcp.dtsi"
};
};
};
soc {
-
/include/ "k2l-clocks.dtsi"
uart2: serial@02348400 {
#gpio-cells = <2>;
gpio,syscon-dev = <&devctrl 0x24c>;
};
+
+ mdio: mdio@26200f00 {
+ compatible = "ti,keystone_mdio", "ti,davinci_mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x26200f00 0x100>;
+ status = "disabled";
+ clocks = <&clkcpgmac>;
+ clock-names = "fck";
+ bus_freq = <2500000>;
+ };
/include/ "k2l-netcp.dtsi"
};
};
/* Pin muxed. Enabled and configured by Bootloader */
status = "disabled";
};
-
-&mdio {
- reg = <0x26200f00 0x100>;
-};
1 0 0x21000A00 0x00000100>;
};
- mdio: mdio@02090300 {
- compatible = "ti,keystone_mdio", "ti,davinci_mdio";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x02090300 0x100>;
- status = "disabled";
- clocks = <&clkpa>;
- clock-names = "fck";
- bus_freq = <2500000>;
- };
-
kirq0: keystone_irq@26202a0 {
compatible = "ti,keystone-irq";
interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>;
};
scm_conf: scm_conf@270 {
- compatible = "syscon";
+ compatible = "syscon",
+ "simple-bus";
reg = <0x270 0x240>;
#address-cells = <1>;
#size-cells = <1>;
};
omap4_padconf_global: omap4_padconf_global@5a0 {
- compatible = "syscon";
+ compatible = "syscon",
+ "simple-bus";
reg = <0x5a0 0x170>;
#address-cells = <1>;
#size-cells = <1>;
};
omap5_padconf_global: omap5_padconf_global@5a0 {
- compatible = "syscon";
+ compatible = "syscon",
+ "simple-bus";
reg = <0x5a0 0xec>;
#address-cells = <1>;
#size-cells = <1>;
#include "skeleton.dtsi"
/ {
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ enable-method = "ste,dbx500-smp";
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&CPU0>;
+ };
+ core1 {
+ cpu = <&CPU1>;
+ };
+ };
+ };
+ CPU0: cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <0x300>;
+ };
+ CPU1: cpu@301 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <0x301>;
+ };
+ };
+
soc {
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&intc>;
ranges;
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu-map {
- cluster0 {
- core0 {
- cpu = <&CPU0>;
- };
- core1 {
- cpu = <&CPU1>;
- };
- };
- };
- CPU0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a9";
- reg = <0>;
- };
- CPU1: cpu@1 {
- device_type = "cpu";
- compatible = "arm,cortex-a9";
- reg = <1>;
- };
- };
-
ptm@801ae000 {
compatible = "arm,coresight-etm3x", "arm,primecell";
reg = <0x801ae000 0x1000>;
movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
ldmia sp, {r0 - r6} @ have to reload r0 - r6
b local_restart @ ... and off we go
+ENDPROC(ret_fast_syscall)
/*
* "slow" syscall return path. "why" tells us if this was a real syscall.
sub lr, r4, r5 @ mmu has been enabled
add r3, r7, lr
ldrd r4, [r3, #0] @ get secondary_data.pgdir
+ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
+ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
+ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir
badr lr, __enable_mmu @ return address
mov r13, r12 @ __secondary_switched address
*/
void update_vsyscall(struct timekeeper *tk)
{
- struct timespec xtime_coarse;
struct timespec64 *wtm = &tk->wall_to_monotonic;
if (!cntvct_ok) {
vdso_write_begin(vdso_data);
- xtime_coarse = __current_kernel_time();
vdso_data->tk_is_cntvct = tk_is_cntvct(tk);
- vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
- vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
+ vdso_data->xtime_coarse_sec = tk->xtime_sec;
+ vdso_data->xtime_coarse_nsec = (u32)(tk->tkr_mono.xtime_nsec >>
+ tk->tkr_mono.shift);
vdso_data->wtm_clock_sec = wtm->tv_sec;
vdso_data->wtm_clock_nsec = wtm->tv_nsec;
pd->base = of_iomap(np, 0);
if (!pd->base) {
pr_warn("%s: failed to map memory\n", __func__);
- kfree(pd->pd.name);
+ kfree_const(pd->pd.name);
kfree(pd);
- of_node_put(np);
continue;
}
VDSO_LDFLAGS += -nostdlib -shared
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
-VDSO_LDFLAGS += $(call cc-option, -fuse-ld=bfd)
+VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
obj-$(CONFIG_VDSO) += vdso.o
extra-$(CONFIG_VDSO) += vdso.lds
SAVE_STATIC
move s0, t2
move a0, sp
- daddiu a1, v0, __NR_64_Linux
+ move a1, v0
jal syscall_trace_enter
bltz v0, 2f # seccomp failed? Skip syscall
SAVE_STATIC
move s0, t2
move a0, sp
- daddiu a1, v0, __NR_N32_Linux
+ move a1, v0
jal syscall_trace_enter
bltz v0, 2f # seccomp failed? Skip syscall
*/
andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
movl RIP(%rsp), %ecx /* User %eip */
+ movq RAX(%rsp), %rax
RESTORE_RSI_RDI
xorl %edx, %edx /* Do not leak kernel information */
xorq %r8, %r8
1: setbe %al /* 1 if error, 0 if not */
movzbl %al, %edi /* zero-extend that into %edi */
call __audit_syscall_exit
- movq RAX(%rsp), %rax /* reload syscall return value */
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
RESTORE_RSI_RDI_RDX
movl RIP(%rsp), %ecx
movl EFLAGS(%rsp), %r11d
+ movq RAX(%rsp), %rax
xorq %r10, %r10
xorq %r9, %r9
xorq %r8, %r8
if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
cpuc->shared_regs = allocate_shared_regs(cpu);
if (!cpuc->shared_regs)
- return NOTIFY_BAD;
+ goto err;
}
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
if (!cpuc->constraint_list)
- return NOTIFY_BAD;
+ goto err_shared_regs;
cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
- if (!cpuc->excl_cntrs) {
- kfree(cpuc->constraint_list);
- kfree(cpuc->shared_regs);
- return NOTIFY_BAD;
- }
+ if (!cpuc->excl_cntrs)
+ goto err_constraint_list;
+
cpuc->excl_thread_id = 0;
}
return NOTIFY_OK;
+
+err_constraint_list:
+ kfree(cpuc->constraint_list);
+ cpuc->constraint_list = NULL;
+
+err_shared_regs:
+ kfree(cpuc->shared_regs);
+ cpuc->shared_regs = NULL;
+
+err:
+ return NOTIFY_BAD;
}
static void intel_pmu_cpu_starting(int cpu)
cpumask_set_cpu(cpu, &cqm_cpumask);
}
-static void intel_cqm_cpu_prepare(unsigned int cpu)
+static void intel_cqm_cpu_starting(unsigned int cpu)
{
struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
struct cpuinfo_x86 *c = &cpu_data(cpu);
unsigned int cpu = (unsigned long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
- intel_cqm_cpu_prepare(cpu);
- break;
case CPU_DOWN_PREPARE:
intel_cqm_cpu_exit(cpu);
break;
case CPU_STARTING:
+ intel_cqm_cpu_starting(cpu);
cqm_pick_event_reader(cpu);
break;
}
goto out;
for_each_online_cpu(i) {
- intel_cqm_cpu_prepare(i);
+ intel_cqm_cpu_starting(i);
cqm_pick_event_reader(i);
}
struct desc_struct *desc;
unsigned long base;
- seg &= ~7UL;
+ seg >>= 3;
mutex_lock(&child->mm->context.lock);
if (unlikely(!child->mm->context.ldt ||
- (seg >> 3) >= child->mm->context.ldt->size))
+ seg >= child->mm->context.ldt->size))
addr = -1L; /* bogus selector, access would fault */
else {
desc = &child->mm->context.ldt->entries[seg];
if (guest_cpuid_has_tsc_adjust(vcpu)) {
if (!msr_info->host_initiated) {
s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
- kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
+ adjust_tsc_offset_guest(vcpu, adj);
}
vcpu->arch.ia32_tsc_adjust_msr = data;
}
static void process_smi(struct kvm_vcpu *vcpu)
{
struct kvm_segment cs, ds;
+ struct desc_ptr dt;
char buf[512];
u32 cr0;
kvm_x86_ops->set_cr4(vcpu, 0);
+ /* Undocumented: IDT limit is set to zero on entry to SMM. */
+ dt.address = dt.size = 0;
+ kvm_x86_ops->set_idt(vcpu, &dt);
+
__kvm_set_dr(vcpu, 7, DR7_FIXED_1);
cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
#include <asm/uaccess.h>
#include <asm/traps.h>
-#include <asm/desc.h>
#include <asm/user.h>
#include <asm/fpu/internal.h>
math_abort(FPU_info, SIGILL);
}
- code_descriptor = LDT_DESCRIPTOR(FPU_CS);
+ code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
if (SEG_D_SIZE(code_descriptor)) {
/* The above test may be wrong, the book is not clear */
/* Segmented 32 bit protected mode */
#include <linux/kernel.h>
#include <linux/mm.h>
-/* s is always from a cpu register, and the cpu does bounds checking
- * during register load --> no further bounds checks needed */
-#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3])
+#include <asm/desc.h>
+#include <asm/mmu_context.h>
+
+static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
+{
+ static struct desc_struct zero_desc;
+ struct desc_struct ret = zero_desc;
+
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+ seg >>= 3;
+ mutex_lock(¤t->mm->context.lock);
+ if (current->mm->context.ldt && seg < current->mm->context.ldt->size)
+ ret = current->mm->context.ldt->entries[seg];
+ mutex_unlock(¤t->mm->context.lock);
+#endif
+ return ret;
+}
+
#define SEG_D_SIZE(x) ((x).b & (3 << 21))
#define SEG_G_BIT(x) ((x).b & (1 << 23))
#define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1)
#include <linux/stddef.h>
#include <asm/uaccess.h>
-#include <asm/desc.h>
#include "fpu_system.h"
#include "exception.h"
addr->selector = PM_REG_(segment);
}
- descriptor = LDT_DESCRIPTOR(PM_REG_(segment));
+ descriptor = FPU_get_ldt_descriptor(addr->selector);
base_address = SEG_BASE_ADDR(descriptor);
address = base_address + offset;
limit = base_address
select PARAVIRT_CLOCK
select XEN_HAVE_PVMMU
depends on X86_64 || (X86_32 && X86_PAE)
- depends on X86_TSC
+ depends on X86_LOCAL_APIC && X86_TSC
help
This is the Linux Xen port. Enabling this will allow the
kernel to boot in a paravirtualized environment under the
config XEN_DOM0
def_bool y
depends on XEN && PCI_XEN && SWIOTLB_XEN
- depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI
+ depends on X86_IO_APIC && ACPI && PCI
config XEN_PVHVM
def_bool y
* Description:
* Enables a low level driver to set a hard upper limit,
* max_hw_sectors, on the size of requests. max_hw_sectors is set by
- * the device driver based upon the combined capabilities of I/O
- * controller and storage device.
+ * the device driver based upon the capabilities of the I/O
+ * controller.
*
* max_sectors is a soft limit imposed by the block layer for
* filesystem type requests. This value can be overridden on a
struct scatterlist *cipher = areq_ctx->cipher;
struct scatterlist *hsg = areq_ctx->hsg;
struct scatterlist *tsg = areq_ctx->tsg;
- struct scatterlist *assoc1;
- struct scatterlist *assoc2;
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
unsigned int cryptlen = req->cryptlen;
struct page *dstp;
cryptlen += ivsize;
}
- if (sg_is_last(assoc))
- return -EINVAL;
-
- assoc1 = assoc + 1;
- if (sg_is_last(assoc1))
- return -EINVAL;
-
- assoc2 = assoc + 2;
- if (!sg_is_last(assoc2))
+ if (assoc->length < 12)
return -EINVAL;
sg_init_table(hsg, 2);
- sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
- sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
+ sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
+ sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
sg_init_table(tsg, 1);
- sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
+ sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
areq_ctx->cryptlen = cryptlen;
- areq_ctx->headlen = assoc->length + assoc2->length;
- areq_ctx->trailen = assoc1->length;
+ areq_ctx->headlen = 8;
+ areq_ctx->trailen = 4;
areq_ctx->sg = dst;
areq_ctx->complete = authenc_esn_geniv_ahash_done;
struct scatterlist *cipher = areq_ctx->cipher;
struct scatterlist *hsg = areq_ctx->hsg;
struct scatterlist *tsg = areq_ctx->tsg;
- struct scatterlist *assoc1;
- struct scatterlist *assoc2;
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
struct page *srcp;
u8 *vsrc;
cryptlen += ivsize;
}
- if (sg_is_last(assoc))
- return -EINVAL;
-
- assoc1 = assoc + 1;
- if (sg_is_last(assoc1))
- return -EINVAL;
-
- assoc2 = assoc + 2;
- if (!sg_is_last(assoc2))
+ if (assoc->length < 12)
return -EINVAL;
sg_init_table(hsg, 2);
- sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
- sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
+ sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
+ sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
sg_init_table(tsg, 1);
- sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
+ sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
areq_ctx->cryptlen = cryptlen;
- areq_ctx->headlen = assoc->length + assoc2->length;
- areq_ctx->trailen = assoc1->length;
+ areq_ctx->headlen = 8;
+ areq_ctx->trailen = 4;
areq_ctx->sg = src;
areq_ctx->complete = authenc_esn_verify_ahash_done;
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#include <acpi/video.h>
ACPI_MODULE_NAME("video");
static bool backlight_notifier_registered;
static struct notifier_block backlight_nb;
+static struct work_struct backlight_notify_work;
static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef;
{ },
};
+/* This uses a workqueue to avoid various locking ordering issues */
+static void acpi_video_backlight_notify_work(struct work_struct *work)
+{
+ if (acpi_video_get_backlight_type() != acpi_backlight_video)
+ acpi_video_unregister_backlight();
+}
+
static int acpi_video_backlight_notify(struct notifier_block *nb,
unsigned long val, void *bd)
{
/* A raw bl registering may change video -> native */
if (backlight->props.type == BACKLIGHT_RAW &&
- val == BACKLIGHT_REGISTERED &&
- acpi_video_get_backlight_type() != acpi_backlight_video)
- acpi_video_unregister_backlight();
+ val == BACKLIGHT_REGISTERED)
+ schedule_work(&backlight_notify_work);
return NOTIFY_OK;
}
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, find_video, NULL,
&video_caps, NULL);
+ INIT_WORK(&backlight_notify_work,
+ acpi_video_backlight_notify_work);
backlight_nb.notifier_call = acpi_video_backlight_notify;
backlight_nb.priority = 0;
if (backlight_register_notifier(&backlight_nb) == 0)
* Other architectures (e.g., ARM) either do not support big endian, or
* else leave I/O in little endian mode.
*/
- if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
return __raw_readl(addr);
else
return readl_relaxed(addr);
static inline void brcm_sata_writereg(u32 val, void __iomem *addr)
{
/* See brcm_sata_readreg() comments */
- if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
__raw_writel(val, addr);
else
writel_relaxed(val, addr);
priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL);
}
+#ifdef CONFIG_PM_SLEEP
static int brcm_ahci_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
brcm_sata_phys_enable(priv);
return ahci_platform_resume(dev);
}
+#endif
static struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
* RETURNS:
* Block address read from @tf.
*/
-u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
+u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
{
u64 block = 0;
- if (!dev || tf->flags & ATA_TFLAG_LBA) {
+ if (tf->flags & ATA_TFLAG_LBA) {
if (tf->flags & ATA_TFLAG_LBA48) {
block |= (u64)tf->hob_lbah << 40;
block |= (u64)tf->hob_lbam << 32;
return 0;
}
-static void ata_dev_config_sense_reporting(struct ata_device *dev)
-{
- unsigned int err_mask;
-
- if (!ata_id_has_sense_reporting(dev->id))
- return;
-
- if (ata_id_sense_reporting_enabled(dev->id))
- return;
-
- err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
- if (err_mask) {
- ata_dev_dbg(dev,
- "failed to enable Sense Data Reporting, Emask 0x%x\n",
- err_mask);
- }
-}
-
/**
* ata_dev_configure - Configure the specified ATA/ATAPI device
* @dev: Target device to configure
dev->devslp_timing[i] = sata_setting[j];
}
}
- ata_dev_config_sense_reporting(dev);
+
dev->cdb_len = 16;
}
tf->hob_lbah = buf[10];
tf->nsect = buf[12];
tf->hob_nsect = buf[13];
- if (ata_id_has_ncq_autosense(dev->id))
- tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
return 0;
}
return err_mask;
}
-/**
- * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
- * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
- * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
- * @dfl_sense_key: default sense key to use
- *
- * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
- * SENSE. This function is EH helper.
- *
- * LOCKING:
- * Kernel thread context (may sleep).
- *
- * RETURNS:
- * encoded sense data on success, 0 on failure or if sense data
- * is not available.
- */
-static u32 ata_eh_request_sense(struct ata_queued_cmd *qc,
- struct scsi_cmnd *cmd)
-{
- struct ata_device *dev = qc->dev;
- struct ata_taskfile tf;
- unsigned int err_mask;
-
- if (!cmd)
- return 0;
-
- DPRINTK("ATA request sense\n");
- ata_dev_warn(dev, "request sense\n");
- if (!ata_id_sense_reporting_enabled(dev->id)) {
- ata_dev_warn(qc->dev, "sense data reporting disabled\n");
- return 0;
- }
- ata_tf_init(dev, &tf);
-
- tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
- tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
- tf.command = ATA_CMD_REQ_SENSE_DATA;
- tf.protocol = ATA_PROT_NODATA;
-
- err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
- /*
- * ACS-4 states:
- * The device may set the SENSE DATA AVAILABLE bit to one in the
- * STATUS field and clear the ERROR bit to zero in the STATUS field
- * to indicate that the command returned completion without an error
- * and the sense data described in table 306 is available.
- *
- * IOW the 'ATA_SENSE' bit might not be set even though valid
- * sense data is available.
- * So check for both.
- */
- if ((tf.command & ATA_SENSE) ||
- tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) {
- ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal);
- qc->flags |= ATA_QCFLAG_SENSE_VALID;
- ata_dev_warn(dev, "sense data %02x/%02x/%02x\n",
- tf.lbah, tf.lbam, tf.lbal);
- } else {
- ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
- tf.command, err_mask);
- }
- return err_mask;
-}
-
/**
* atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
* @dev: device to perform REQUEST_SENSE to
memcpy(&qc->result_tf, &tf, sizeof(tf));
qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
- if (qc->result_tf.auxiliary) {
- char sense_key, asc, ascq;
-
- sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
- asc = (qc->result_tf.auxiliary >> 8) & 0xff;
- ascq = qc->result_tf.auxiliary & 0xff;
- ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n",
- sense_key, asc, ascq);
- ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq);
- ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf);
- qc->flags |= ATA_QCFLAG_SENSE_VALID;
- }
-
ehc->i.err_mask &= ~AC_ERR_DEV;
}
return ATA_EH_RESET;
}
- /*
- * Sense data reporting does not work if the
- * device fault bit is set.
- */
- if ((stat & ATA_SENSE) && !(stat & ATA_DF) &&
- !(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
- if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
- tmp = ata_eh_request_sense(qc, qc->scsicmd);
- if (tmp)
- qc->err_mask |= tmp;
- else
- ata_scsi_set_sense_information(qc->scsicmd, tf);
- } else {
- ata_dev_warn(qc->dev, "sense data available but port frozen\n");
- }
- }
-
- /* Set by NCQ autosense or request sense above */
- if (qc->flags & ATA_QCFLAG_SENSE_VALID)
- return 0;
-
if (stat & (ATA_ERR | ATA_DF))
qc->err_mask |= AC_ERR_DEV;
else
#ifdef CONFIG_ATA_VERBOSE_ERROR
if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
- ATA_SENSE | ATA_ERR)) {
+ ATA_ERR)) {
if (res->command & ATA_BUSY)
ata_dev_err(qc->dev, "status: { Busy }\n");
else
- ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
+ ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
res->command & ATA_DRDY ? "DRDY " : "",
res->command & ATA_DF ? "DF " : "",
res->command & ATA_DRQ ? "DRQ " : "",
- res->command & ATA_SENSE ? "SENSE " : "",
res->command & ATA_ERR ? "ERR " : "");
}
ata_scsi_park_show, ata_scsi_park_store);
EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
-void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
+static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
{
- if (!cmd)
- return;
-
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
}
-void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf)
-{
- u64 information;
-
- if (!cmd)
- return;
-
- information = ata_tf_read_block(tf, NULL);
- scsi_set_sense_information(cmd->sense_buffer, information);
-}
-
static ssize_t
ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
((cdb[2] & 0x20) || need_sense)) {
ata_gen_passthru_sense(qc);
} else {
- if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
- cmd->result = SAM_STAT_CHECK_CONDITION;
- } else if (!need_sense) {
+ if (!need_sense) {
cmd->result = SAM_STAT_GOOD;
} else {
/* TODO: decide which descriptor format to use
extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags,
unsigned int tag);
-extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
- struct ata_device *dev);
+extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
extern unsigned ata_exec_internal(struct ata_device *dev,
struct ata_taskfile *tf, const u8 *cdb,
int dma_dir, void *buf, unsigned int buflen,
struct scsi_host_template *sht);
extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
extern int ata_scsi_offline_dev(struct ata_device *dev);
-extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
-extern void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf);
extern void ata_scsi_media_change_notify(struct ata_device *dev);
extern void ata_scsi_hotplug(struct work_struct *work);
extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
readl(mmio + PDC_SDRAM_CONTROL);
/* Turn on for ECC */
- pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
- PDC_DIMM_SPD_TYPE, &spd0);
+ if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+ PDC_DIMM_SPD_TYPE, &spd0)) {
+ pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
+ PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
+ return 1;
+ }
if (spd0 == 0x02) {
data |= (0x01 << 16);
writel(data, mmio + PDC_SDRAM_CONTROL);
/* ECC initiliazation. */
- pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
- PDC_DIMM_SPD_TYPE, &spd0);
+ if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+ PDC_DIMM_SPD_TYPE, &spd0)) {
+ pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
+ PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
+ return 1;
+ }
if (spd0 == 0x02) {
void *buf;
VPRINTK("Start ECC initialization\n");
EXPORT_SYMBOL_GPL(device_dma_is_coherent);
/**
- * device_get_phy_mode - Get phy mode for given device_node
+ * device_get_phy_mode - Get phy mode for given device
* @dev: Pointer to the given device
*
* The function gets phy interface string from property 'phy-mode' or
{
int ret = device_property_read_u8_array(dev, name, addr, alen);
- if (ret == 0 && is_valid_ether_addr(addr))
+ if (ret == 0 && alen == ETH_ALEN && is_valid_ether_addr(addr))
return addr;
return NULL;
}
/**
- * Search the device tree for the best MAC address to use. 'mac-address' is
+ * device_get_mac_address - Get the MAC for a given device
+ * @dev: Pointer to the device
+ * @addr: Address of buffer to store the MAC in
+ * @alen: Length of the buffer pointed to by addr, should be ETH_ALEN
+ *
+ * Search the firmware node for the best MAC address to use. 'mac-address' is
* checked first, because that is supposed to contain to "most recent" MAC
* address. If that isn't set, then 'local-mac-address' is checked next,
* because that is the default address. If that isn't set, then the obsolete
* MAC address.
*
* All-zero MAC addresses are rejected, because those could be properties that
- * exist in the device tree, but were not set by U-Boot. For example, the
- * DTS could define 'mac-address' and 'local-mac-address', with zero MAC
- * addresses. Some older U-Boots only initialized 'local-mac-address'. In
- * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists
- * but is all zeros.
+ * exist in the firmware tables, but were not updated by the firmware. For
+ * example, the DTS could define 'mac-address' and 'local-mac-address', with
+ * zero MAC addresses. Some older U-Boots only initialized 'local-mac-address'.
+ * In this case, the real MAC is in 'local-mac-address', and 'mac-address'
+ * exists but is all zeros.
*/
void *device_get_mac_address(struct device *dev, char *addr, int alen)
{
config BCMA_DRIVER_GPIO
bool "BCMA GPIO driver"
depends on BCMA && GPIOLIB
- select IRQ_DOMAIN if BCMA_HOST_SOC
+ select GPIOLIB_IRQCHIP if BCMA_HOST_SOC
help
Driver to provide access to the GPIO pins of the bcma bus.
int bcma_bus_suspend(struct bcma_bus *bus);
int bcma_bus_resume(struct bcma_bus *bus);
#endif
+struct device *bcma_bus_get_host_dev(struct bcma_bus *bus);
/* scan.c */
void bcma_detect_chip(struct bcma_bus *bus);
* Licensed under the GNU/GPL. See COPYING for details.
*/
-#include <linux/gpio.h>
-#include <linux/irq.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
-#include <linux/irqdomain.h>
#include <linux/export.h>
#include <linux/bcma/bcma.h>
}
#if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X)
-static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
-{
- struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
-
- if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
- return irq_find_mapping(cc->irq_domain, gpio);
- else
- return -EINVAL;
-}
static void bcma_gpio_irq_unmask(struct irq_data *d)
{
- struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(gc);
int gpio = irqd_to_hwirq(d);
u32 val = bcma_chipco_gpio_in(cc, BIT(gpio));
static void bcma_gpio_irq_mask(struct irq_data *d)
{
- struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(gc);
int gpio = irqd_to_hwirq(d);
bcma_chipco_gpio_intmask(cc, BIT(gpio), 0);
static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id)
{
struct bcma_drv_cc *cc = dev_id;
+ struct gpio_chip *gc = &cc->gpio;
u32 val = bcma_cc_read32(cc, BCMA_CC_GPIOIN);
u32 mask = bcma_cc_read32(cc, BCMA_CC_GPIOIRQ);
u32 pol = bcma_cc_read32(cc, BCMA_CC_GPIOPOL);
if (!irqs)
return IRQ_NONE;
- for_each_set_bit(gpio, &irqs, cc->gpio.ngpio)
- generic_handle_irq(bcma_gpio_to_irq(&cc->gpio, gpio));
+ for_each_set_bit(gpio, &irqs, gc->ngpio)
+ generic_handle_irq(irq_find_mapping(gc->irqdomain, gpio));
bcma_chipco_gpio_polarity(cc, irqs, val & irqs);
return IRQ_HANDLED;
}
-static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc)
+static int bcma_gpio_irq_init(struct bcma_drv_cc *cc)
{
struct gpio_chip *chip = &cc->gpio;
- int gpio, hwirq, err;
+ int hwirq, err;
if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
return 0;
- cc->irq_domain = irq_domain_add_linear(NULL, chip->ngpio,
- &irq_domain_simple_ops, cc);
- if (!cc->irq_domain) {
- err = -ENODEV;
- goto err_irq_domain;
- }
- for (gpio = 0; gpio < chip->ngpio; gpio++) {
- int irq = irq_create_mapping(cc->irq_domain, gpio);
-
- irq_set_chip_data(irq, cc);
- irq_set_chip_and_handler(irq, &bcma_gpio_irq_chip,
- handle_simple_irq);
- }
-
hwirq = bcma_core_irq(cc->core, 0);
err = request_irq(hwirq, bcma_gpio_irq_handler, IRQF_SHARED, "gpio",
cc);
if (err)
- goto err_req_irq;
+ return err;
bcma_chipco_gpio_intmask(cc, ~0, 0);
bcma_cc_set32(cc, BCMA_CC_IRQMASK, BCMA_CC_IRQ_GPIO);
- return 0;
-
-err_req_irq:
- for (gpio = 0; gpio < chip->ngpio; gpio++) {
- int irq = irq_find_mapping(cc->irq_domain, gpio);
-
- irq_dispose_mapping(irq);
+ err = gpiochip_irqchip_add(chip,
+ &bcma_gpio_irq_chip,
+ 0,
+ handle_simple_irq,
+ IRQ_TYPE_NONE);
+ if (err) {
+ free_irq(hwirq, cc);
+ return err;
}
- irq_domain_remove(cc->irq_domain);
-err_irq_domain:
- return err;
+
+ return 0;
}
-static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
+static void bcma_gpio_irq_exit(struct bcma_drv_cc *cc)
{
- struct gpio_chip *chip = &cc->gpio;
- int gpio;
-
if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
return;
bcma_cc_mask32(cc, BCMA_CC_IRQMASK, ~BCMA_CC_IRQ_GPIO);
free_irq(bcma_core_irq(cc->core, 0), cc);
- for (gpio = 0; gpio < chip->ngpio; gpio++) {
- int irq = irq_find_mapping(cc->irq_domain, gpio);
-
- irq_dispose_mapping(irq);
- }
- irq_domain_remove(cc->irq_domain);
}
#else
-static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc)
+static int bcma_gpio_irq_init(struct bcma_drv_cc *cc)
{
return 0;
}
-static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
+static void bcma_gpio_irq_exit(struct bcma_drv_cc *cc)
{
}
#endif
chip->set = bcma_gpio_set_value;
chip->direction_input = bcma_gpio_direction_input;
chip->direction_output = bcma_gpio_direction_output;
-#if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X)
- chip->to_irq = bcma_gpio_to_irq;
-#endif
+ chip->owner = THIS_MODULE;
+ chip->dev = bcma_bus_get_host_dev(bus);
#if IS_BUILTIN(CONFIG_OF)
if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
chip->of_node = cc->core->dev.of_node;
else
chip->base = -1;
- err = bcma_gpio_irq_domain_init(cc);
+ err = gpiochip_add(chip);
if (err)
return err;
- err = gpiochip_add(chip);
+ err = bcma_gpio_irq_init(cc);
if (err) {
- bcma_gpio_irq_domain_exit(cc);
+ gpiochip_remove(chip);
return err;
}
int bcma_gpio_unregister(struct bcma_drv_cc *cc)
{
- bcma_gpio_irq_domain_exit(cc);
+ bcma_gpio_irq_exit(cc);
gpiochip_remove(&cc->gpio);
return 0;
}
#include "bcma_private.h"
#include <linux/module.h>
+#include <linux/mmc/sdio_func.h>
#include <linux/platform_device.h>
+#include <linux/pci.h>
#include <linux/bcma/bcma.h>
#include <linux/slab.h>
#include <linux/of_address.h>
}
}
+struct device *bcma_bus_get_host_dev(struct bcma_bus *bus)
+{
+ switch (bus->hosttype) {
+ case BCMA_HOSTTYPE_PCI:
+ if (bus->host_pci)
+ return &bus->host_pci->dev;
+ else
+ return NULL;
+ case BCMA_HOSTTYPE_SOC:
+ if (bus->host_pdev)
+ return &bus->host_pdev->dev;
+ else
+ return NULL;
+ case BCMA_HOSTTYPE_SDIO:
+ if (bus->host_sdio)
+ return &bus->host_sdio->dev;
+ else
+ return NULL;
+ }
+ return NULL;
+}
+
void bcma_init_bus(struct bcma_bus *bus)
{
mutex_lock(&bcma_buses_mutex);
{
int err;
struct bcma_device *core;
+ struct device *dev;
/* Scan for devices (cores) */
err = bcma_bus_scan(bus);
bcma_core_pci_early_init(&bus->drv_pci[0]);
}
+ dev = bcma_bus_get_host_dev(bus);
/* TODO: remove check for IS_BUILTIN(CONFIG_BCMA) check when
* of_default_bus_match_table is exported or in some other way
* accessible. This is just a temporary workaround.
*/
- if (IS_BUILTIN(CONFIG_BCMA) && bus->host_pdev) {
- struct device *dev = &bus->host_pdev->dev;
-
+ if (IS_BUILTIN(CONFIG_BCMA) && dev) {
of_platform_populate(dev->of_node, of_default_bus_match_table,
NULL, dev);
}
kfree(meta);
}
-static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
+static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
{
size_t num_pages;
- char pool_name[8];
struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
if (!meta)
goto out_error;
}
- snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
if (!meta->mem_pool) {
pr_err("Error creating memory pool\n");
return -EINVAL;
disksize = PAGE_ALIGN(disksize);
- meta = zram_meta_alloc(zram->disk->first_minor, disksize);
+ meta = zram_meta_alloc(zram->disk->disk_name, disksize);
if (!meta)
return -ENOMEM;
tristate
select FW_LOADER
+config BT_QCA
+ tristate
+ select FW_LOADER
+
config BT_HCIBTUSB
tristate "HCI USB driver"
depends on USB
Say Y here to compile support for Broadcom protocol.
+config BT_HCIUART_QCA
+ bool "Qualcomm Atheros protocol support"
+ depends on BT_HCIUART
+ select BT_HCIUART_H4
+ select BT_QCA
+ help
+ The Qualcomm Atheros protocol supports HCI In-Band Sleep feature
+ over serial port interface(H4) between controller and host.
+ This protocol is required for UART clock control for QCA Bluetooth
+ devices.
+
+ Say Y here to compile support for QCA protocol.
+
config BT_HCIBCM203X
tristate "HCI BCM203x USB driver"
depends on USB
obj-$(CONFIG_BT_WILINK) += btwilink.o
obj-$(CONFIG_BT_BCM) += btbcm.o
obj-$(CONFIG_BT_RTL) += btrtl.o
+obj-$(CONFIG_BT_QCA) += btqca.o
btmrvl-y := btmrvl_main.o
btmrvl-$(CONFIG_DEBUG_FS) += btmrvl_debugfs.o
hci_uart-$(CONFIG_BT_HCIUART_3WIRE) += hci_h5.o
hci_uart-$(CONFIG_BT_HCIUART_INTEL) += hci_intel.o
hci_uart-$(CONFIG_BT_HCIUART_BCM) += hci_bcm.o
+hci_uart-$(CONFIG_BT_HCIUART_QCA) += hci_qca.o
hci_uart-objs := $(hci_uart-y)
ccflags-y += -D__CHECK_ENDIAN__
}
}
- sdio_release_host(card->func);
-
/*
* winner or not, with this test the FW synchronizes when the
* module can continue its initialization
return -ETIMEDOUT;
}
+ sdio_release_host(card->func);
+
return 0;
done:
--- /dev/null
+/*
+ * Bluetooth supports for Qualcomm Atheros chips
+ *
+ * Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/module.h>
+#include <linux/firmware.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "btqca.h"
+
+#define VERSION "0.1"
+
+static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
+{
+ struct sk_buff *skb;
+ struct edl_event_hdr *edl;
+ struct rome_version *ver;
+ char cmd;
+ int err = 0;
+
+ BT_DBG("%s: ROME Patch Version Request", hdev->name);
+
+ cmd = EDL_PATCH_VER_REQ_CMD;
+ skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
+ &cmd, HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ BT_ERR("%s: Failed to read version of ROME (%d)", hdev->name,
+ err);
+ return err;
+ }
+
+ if (skb->len != sizeof(*edl) + sizeof(*ver)) {
+ BT_ERR("%s: Version size mismatch len %d", hdev->name,
+ skb->len);
+ err = -EILSEQ;
+ goto out;
+ }
+
+ edl = (struct edl_event_hdr *)(skb->data);
+ if (!edl || !edl->data) {
+ BT_ERR("%s: TLV with no header or no data", hdev->name);
+ err = -EILSEQ;
+ goto out;
+ }
+
+ if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
+ edl->rtype != EDL_APP_VER_RES_EVT) {
+ BT_ERR("%s: Wrong packet received %d %d", hdev->name,
+ edl->cresp, edl->rtype);
+ err = -EIO;
+ goto out;
+ }
+
+ ver = (struct rome_version *)(edl->data);
+
+ BT_DBG("%s: Product:0x%08x", hdev->name, le32_to_cpu(ver->product_id));
+ BT_DBG("%s: Patch :0x%08x", hdev->name, le16_to_cpu(ver->patch_ver));
+ BT_DBG("%s: ROM :0x%08x", hdev->name, le16_to_cpu(ver->rome_ver));
+ BT_DBG("%s: SOC :0x%08x", hdev->name, le32_to_cpu(ver->soc_id));
+
+ /* ROME chipset version can be decided by patch and SoC
+ * version, combination with upper 2 bytes from SoC
+ * and lower 2 bytes from patch will be used.
+ */
+ *rome_version = (le32_to_cpu(ver->soc_id) << 16) |
+ (le16_to_cpu(ver->rome_ver) & 0x0000ffff);
+
+out:
+ kfree_skb(skb);
+
+ return err;
+}
+
+static int rome_reset(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ int err;
+
+ BT_DBG("%s: ROME HCI_RESET", hdev->name);
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ BT_ERR("%s: Reset failed (%d)", hdev->name, err);
+ return err;
+ }
+
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static void rome_tlv_check_data(struct rome_config *config,
+ const struct firmware *fw)
+{
+ const u8 *data;
+ u32 type_len;
+ u16 tag_id, tag_len;
+ int idx, length;
+ struct tlv_type_hdr *tlv;
+ struct tlv_type_patch *tlv_patch;
+ struct tlv_type_nvm *tlv_nvm;
+
+ tlv = (struct tlv_type_hdr *)fw->data;
+
+ type_len = le32_to_cpu(tlv->type_len);
+ length = (type_len >> 8) & 0x00ffffff;
+
+ BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff);
+ BT_DBG("Length\t\t : %d bytes", length);
+
+ switch (config->type) {
+ case TLV_TYPE_PATCH:
+ tlv_patch = (struct tlv_type_patch *)tlv->data;
+ BT_DBG("Total Length\t\t : %d bytes",
+ le32_to_cpu(tlv_patch->total_size));
+ BT_DBG("Patch Data Length\t : %d bytes",
+ le32_to_cpu(tlv_patch->data_length));
+ BT_DBG("Signing Format Version : 0x%x",
+ tlv_patch->format_version);
+ BT_DBG("Signature Algorithm\t : 0x%x",
+ tlv_patch->signature);
+ BT_DBG("Reserved\t\t : 0x%x",
+ le16_to_cpu(tlv_patch->reserved1));
+ BT_DBG("Product ID\t\t : 0x%04x",
+ le16_to_cpu(tlv_patch->product_id));
+ BT_DBG("Rom Build Version\t : 0x%04x",
+ le16_to_cpu(tlv_patch->rom_build));
+ BT_DBG("Patch Version\t\t : 0x%04x",
+ le16_to_cpu(tlv_patch->patch_version));
+ BT_DBG("Reserved\t\t : 0x%x",
+ le16_to_cpu(tlv_patch->reserved2));
+ BT_DBG("Patch Entry Address\t : 0x%x",
+ le32_to_cpu(tlv_patch->entry));
+ break;
+
+ case TLV_TYPE_NVM:
+ idx = 0;
+ data = tlv->data;
+ while (idx < length) {
+ tlv_nvm = (struct tlv_type_nvm *)(data + idx);
+
+ tag_id = le16_to_cpu(tlv_nvm->tag_id);
+ tag_len = le16_to_cpu(tlv_nvm->tag_len);
+
+ /* Update NVM tags as needed */
+ switch (tag_id) {
+ case EDL_TAG_ID_HCI:
+ /* HCI transport layer parameters
+ * enabling software inband sleep
+ * onto controller side.
+ */
+ tlv_nvm->data[0] |= 0x80;
+
+ /* UART Baud Rate */
+ tlv_nvm->data[2] = config->user_baud_rate;
+
+ break;
+
+ case EDL_TAG_ID_DEEP_SLEEP:
+ /* Sleep enable mask
+ * enabling deep sleep feature on controller.
+ */
+ tlv_nvm->data[0] |= 0x01;
+
+ break;
+ }
+
+ idx += (sizeof(u16) + sizeof(u16) + 8 + tag_len);
+ }
+ break;
+
+ default:
+ BT_ERR("Unknown TLV type %d", config->type);
+ break;
+ }
+}
+
+static int rome_tlv_send_segment(struct hci_dev *hdev, int idx, int seg_size,
+ const u8 *data)
+{
+ struct sk_buff *skb;
+ struct edl_event_hdr *edl;
+ struct tlv_seg_resp *tlv_resp;
+ u8 cmd[MAX_SIZE_PER_TLV_SEGMENT + 2];
+ int err = 0;
+
+ BT_DBG("%s: Download segment #%d size %d", hdev->name, idx, seg_size);
+
+ cmd[0] = EDL_PATCH_TLV_REQ_CMD;
+ cmd[1] = seg_size;
+ memcpy(cmd + 2, data, seg_size);
+
+ skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd,
+ HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ BT_ERR("%s: Failed to send TLV segment (%d)", hdev->name, err);
+ return err;
+ }
+
+ if (skb->len != sizeof(*edl) + sizeof(*tlv_resp)) {
+ BT_ERR("%s: TLV response size mismatch", hdev->name);
+ err = -EILSEQ;
+ goto out;
+ }
+
+ edl = (struct edl_event_hdr *)(skb->data);
+ if (!edl || !edl->data) {
+ BT_ERR("%s: TLV with no header or no data", hdev->name);
+ err = -EILSEQ;
+ goto out;
+ }
+
+ tlv_resp = (struct tlv_seg_resp *)(edl->data);
+
+ if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
+ edl->rtype != EDL_TVL_DNLD_RES_EVT || tlv_resp->result != 0x00) {
+ BT_ERR("%s: TLV with error stat 0x%x rtype 0x%x (0x%x)",
+ hdev->name, edl->cresp, edl->rtype, tlv_resp->result);
+ err = -EIO;
+ }
+
+out:
+ kfree_skb(skb);
+
+ return err;
+}
+
+static int rome_tlv_download_request(struct hci_dev *hdev,
+ const struct firmware *fw)
+{
+ const u8 *buffer, *data;
+ int total_segment, remain_size;
+ int ret, i;
+
+ if (!fw || !fw->data)
+ return -EINVAL;
+
+ total_segment = fw->size / MAX_SIZE_PER_TLV_SEGMENT;
+ remain_size = fw->size % MAX_SIZE_PER_TLV_SEGMENT;
+
+ BT_DBG("%s: Total segment num %d remain size %d total size %zu",
+ hdev->name, total_segment, remain_size, fw->size);
+
+ data = fw->data;
+ for (i = 0; i < total_segment; i++) {
+ buffer = data + i * MAX_SIZE_PER_TLV_SEGMENT;
+ ret = rome_tlv_send_segment(hdev, i, MAX_SIZE_PER_TLV_SEGMENT,
+ buffer);
+ if (ret < 0)
+ return -EIO;
+ }
+
+ if (remain_size) {
+ buffer = data + total_segment * MAX_SIZE_PER_TLV_SEGMENT;
+ ret = rome_tlv_send_segment(hdev, total_segment, remain_size,
+ buffer);
+ if (ret < 0)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int rome_download_firmware(struct hci_dev *hdev,
+ struct rome_config *config)
+{
+ const struct firmware *fw;
+ int ret;
+
+ BT_INFO("%s: ROME Downloading %s", hdev->name, config->fwname);
+
+ ret = request_firmware(&fw, config->fwname, &hdev->dev);
+ if (ret) {
+ BT_ERR("%s: Failed to request file: %s (%d)", hdev->name,
+ config->fwname, ret);
+ return ret;
+ }
+
+ rome_tlv_check_data(config, fw);
+
+ ret = rome_tlv_download_request(hdev, fw);
+ if (ret) {
+ BT_ERR("%s: Failed to download file: %s (%d)", hdev->name,
+ config->fwname, ret);
+ }
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ struct sk_buff *skb;
+ u8 cmd[9];
+ int err;
+
+ cmd[0] = EDL_NVM_ACCESS_SET_REQ_CMD;
+ cmd[1] = 0x02; /* TAG ID */
+ cmd[2] = sizeof(bdaddr_t); /* size */
+ memcpy(cmd + 3, bdaddr, sizeof(bdaddr_t));
+ skb = __hci_cmd_sync_ev(hdev, EDL_NVM_ACCESS_OPCODE, sizeof(cmd), cmd,
+ HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ BT_ERR("%s: Change address command failed (%d)",
+ hdev->name, err);
+ return err;
+ }
+
+ kfree_skb(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
+
+int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate)
+{
+ u32 rome_ver = 0;
+ struct rome_config config;
+ int err;
+
+ BT_DBG("%s: ROME setup on UART", hdev->name);
+
+ config.user_baud_rate = baudrate;
+
+ /* Get ROME version information */
+ err = rome_patch_ver_req(hdev, &rome_ver);
+ if (err < 0 || rome_ver == 0) {
+ BT_ERR("%s: Failed to get version 0x%x", hdev->name, err);
+ return err;
+ }
+
+ BT_INFO("%s: ROME controller version 0x%08x", hdev->name, rome_ver);
+
+ /* Download rampatch file */
+ config.type = TLV_TYPE_PATCH;
+ snprintf(config.fwname, sizeof(config.fwname), "qca/rampatch_%08x.bin",
+ rome_ver);
+ err = rome_download_firmware(hdev, &config);
+ if (err < 0) {
+ BT_ERR("%s: Failed to download patch (%d)", hdev->name, err);
+ return err;
+ }
+
+ /* Download NVM configuration */
+ config.type = TLV_TYPE_NVM;
+ snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin",
+ rome_ver);
+ err = rome_download_firmware(hdev, &config);
+ if (err < 0) {
+ BT_ERR("%s: Failed to download NVM (%d)", hdev->name, err);
+ return err;
+ }
+
+ /* Perform HCI reset */
+ err = rome_reset(hdev);
+ if (err < 0) {
+ BT_ERR("%s: Failed to run HCI_RESET (%d)", hdev->name, err);
+ return err;
+ }
+
+ BT_INFO("%s: ROME setup on UART is completed", hdev->name);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qca_uart_setup_rome);
+
+MODULE_AUTHOR("Ben Young Tae Kim <ytkim@qca.qualcomm.com>");
+MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * Bluetooth supports for Qualcomm Atheros ROME chips
+ *
+ * Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#define EDL_PATCH_CMD_OPCODE (0xFC00)
+#define EDL_NVM_ACCESS_OPCODE (0xFC0B)
+#define EDL_PATCH_CMD_LEN (1)
+#define EDL_PATCH_VER_REQ_CMD (0x19)
+#define EDL_PATCH_TLV_REQ_CMD (0x1E)
+#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01)
+#define MAX_SIZE_PER_TLV_SEGMENT (243)
+
+#define EDL_CMD_REQ_RES_EVT (0x00)
+#define EDL_PATCH_VER_RES_EVT (0x19)
+#define EDL_APP_VER_RES_EVT (0x02)
+#define EDL_TVL_DNLD_RES_EVT (0x04)
+#define EDL_CMD_EXE_STATUS_EVT (0x00)
+#define EDL_SET_BAUDRATE_RSP_EVT (0x92)
+#define EDL_NVM_ACCESS_CODE_EVT (0x0B)
+
+#define EDL_TAG_ID_HCI (17)
+#define EDL_TAG_ID_DEEP_SLEEP (27)
+
+enum qca_bardrate {
+ QCA_BAUDRATE_115200 = 0,
+ QCA_BAUDRATE_57600,
+ QCA_BAUDRATE_38400,
+ QCA_BAUDRATE_19200,
+ QCA_BAUDRATE_9600,
+ QCA_BAUDRATE_230400,
+ QCA_BAUDRATE_250000,
+ QCA_BAUDRATE_460800,
+ QCA_BAUDRATE_500000,
+ QCA_BAUDRATE_720000,
+ QCA_BAUDRATE_921600,
+ QCA_BAUDRATE_1000000,
+ QCA_BAUDRATE_1250000,
+ QCA_BAUDRATE_2000000,
+ QCA_BAUDRATE_3000000,
+ QCA_BAUDRATE_4000000,
+ QCA_BAUDRATE_1600000,
+ QCA_BAUDRATE_3200000,
+ QCA_BAUDRATE_3500000,
+ QCA_BAUDRATE_AUTO = 0xFE,
+ QCA_BAUDRATE_RESERVED
+};
+
+enum rome_tlv_type {
+ TLV_TYPE_PATCH = 1,
+ TLV_TYPE_NVM
+};
+
+struct rome_config {
+ u8 type;
+ char fwname[64];
+ uint8_t user_baud_rate;
+};
+
+struct edl_event_hdr {
+ __u8 cresp;
+ __u8 rtype;
+ __u8 data[0];
+} __packed;
+
+struct rome_version {
+ __le32 product_id;
+ __le16 patch_ver;
+ __le16 rome_ver;
+ __le32 soc_id;
+} __packed;
+
+struct tlv_seg_resp {
+ __u8 result;
+} __packed;
+
+struct tlv_type_patch {
+ __le32 total_size;
+ __le32 data_length;
+ __u8 format_version;
+ __u8 signature;
+ __le16 reserved1;
+ __le16 product_id;
+ __le16 rom_build;
+ __le16 patch_version;
+ __le16 reserved2;
+ __le32 entry;
+} __packed;
+
+struct tlv_type_nvm {
+ __le16 tag_id;
+ __le16 tag_len;
+ __le32 reserve1;
+ __le32 reserve2;
+ __u8 data[0];
+} __packed;
+
+struct tlv_type_hdr {
+ __le32 type_len;
+ __u8 data[0];
+} __packed;
+
+#if IS_ENABLED(CONFIG_BT_QCA)
+
+int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate);
+
+#else
+
+static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int qca_uart_setup_rome(struct hci_dev *hdev, int speed)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
{ USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK },
+ /* Silicon Wave based devices */
+ { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
+
{ } /* Terminating entry */
};
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/tty.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include "btbcm.h"
#include "hci_uart.h"
+struct bcm_device {
+ struct list_head list;
+
+ struct platform_device *pdev;
+
+ const char *name;
+ struct gpio_desc *device_wakeup;
+ struct gpio_desc *shutdown;
+
+ struct clk *clk;
+ bool clk_enabled;
+
+ u32 init_speed;
+
+#ifdef CONFIG_PM_SLEEP
+ struct hci_uart *hu;
+ bool is_suspended; /* suspend/resume flag */
+#endif
+};
+
struct bcm_data {
- struct sk_buff *rx_skb;
- struct sk_buff_head txq;
+ struct sk_buff *rx_skb;
+ struct sk_buff_head txq;
+
+ struct bcm_device *dev;
};
+/* List of BCM BT UART devices */
+static DEFINE_SPINLOCK(bcm_device_list_lock);
+static LIST_HEAD(bcm_device_list);
+
static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed)
{
struct hci_dev *hdev = hu->hdev;
return 0;
}
+/* bcm_device_exists should be protected by bcm_device_list_lock */
+static bool bcm_device_exists(struct bcm_device *device)
+{
+ struct list_head *p;
+
+ list_for_each(p, &bcm_device_list) {
+ struct bcm_device *dev = list_entry(p, struct bcm_device, list);
+
+ if (device == dev)
+ return true;
+ }
+
+ return false;
+}
+
+static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
+{
+ if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled)
+ clk_enable(dev->clk);
+
+ gpiod_set_value_cansleep(dev->shutdown, powered);
+ gpiod_set_value_cansleep(dev->device_wakeup, powered);
+
+ if (!powered && !IS_ERR(dev->clk) && dev->clk_enabled)
+ clk_disable(dev->clk);
+
+ dev->clk_enabled = powered;
+
+ return 0;
+}
+
static int bcm_open(struct hci_uart *hu)
{
struct bcm_data *bcm;
+ struct list_head *p;
BT_DBG("hu %p", hu);
skb_queue_head_init(&bcm->txq);
hu->priv = bcm;
+
+ spin_lock(&bcm_device_list_lock);
+ list_for_each(p, &bcm_device_list) {
+ struct bcm_device *dev = list_entry(p, struct bcm_device, list);
+
+ /* Retrieve saved bcm_device based on parent of the
+ * platform device (saved during device probe) and
+ * parent of tty device used by hci_uart
+ */
+ if (hu->tty->dev->parent == dev->pdev->dev.parent) {
+ bcm->dev = dev;
+ hu->init_speed = dev->init_speed;
+#ifdef CONFIG_PM_SLEEP
+ dev->hu = hu;
+#endif
+ break;
+ }
+ }
+
+ if (bcm->dev)
+ bcm_gpio_set_power(bcm->dev, true);
+
+ spin_unlock(&bcm_device_list_lock);
+
return 0;
}
BT_DBG("hu %p", hu);
+ /* Protect bcm->dev against removal of the device or driver */
+ spin_lock(&bcm_device_list_lock);
+ if (bcm_device_exists(bcm->dev)) {
+ bcm_gpio_set_power(bcm->dev, false);
+#ifdef CONFIG_PM_SLEEP
+ bcm->dev->hu = NULL;
+#endif
+ }
+ spin_unlock(&bcm_device_list_lock);
+
skb_queue_purge(&bcm->txq);
kfree_skb(bcm->rx_skb);
kfree(bcm);
return skb_dequeue(&bcm->txq);
}
+#ifdef CONFIG_PM_SLEEP
+/* Platform suspend callback */
+static int bcm_suspend(struct device *dev)
+{
+ struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
+
+ BT_DBG("suspend (%p): is_suspended %d", bdev, bdev->is_suspended);
+
+ if (!bdev->is_suspended) {
+ hci_uart_set_flow_control(bdev->hu, true);
+
+ /* Once this callback returns, driver suspends BT via GPIO */
+ bdev->is_suspended = true;
+ }
+
+ /* Suspend the device */
+ if (bdev->device_wakeup) {
+ gpiod_set_value(bdev->device_wakeup, false);
+ BT_DBG("suspend, delaying 15 ms");
+ mdelay(15);
+ }
+
+ return 0;
+}
+
+/* Platform resume callback */
+static int bcm_resume(struct device *dev)
+{
+ struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
+
+ BT_DBG("resume (%p): is_suspended %d", bdev, bdev->is_suspended);
+
+ if (bdev->device_wakeup) {
+ gpiod_set_value(bdev->device_wakeup, true);
+ BT_DBG("resume, delaying 15 ms");
+ mdelay(15);
+ }
+
+ /* When this callback executes, the device has woken up already */
+ if (bdev->is_suspended) {
+ bdev->is_suspended = false;
+
+ hci_uart_set_flow_control(bdev->hu, false);
+ }
+
+ return 0;
+}
+#endif
+
+static const struct acpi_gpio_params device_wakeup_gpios = { 0, 0, false };
+static const struct acpi_gpio_params shutdown_gpios = { 1, 0, false };
+
+static const struct acpi_gpio_mapping acpi_bcm_default_gpios[] = {
+ { "device-wakeup-gpios", &device_wakeup_gpios, 1 },
+ { "shutdown-gpios", &shutdown_gpios, 1 },
+ { },
+};
+
+#ifdef CONFIG_ACPI
+static int bcm_resource(struct acpi_resource *ares, void *data)
+{
+ struct bcm_device *dev = data;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+ struct acpi_resource_uart_serialbus *sb;
+
+ sb = &ares->data.uart_serial_bus;
+ if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART)
+ dev->init_speed = sb->default_baud_rate;
+ }
+
+ /* Always tell the ACPI core to skip this resource */
+ return 1;
+}
+
+static int bcm_acpi_probe(struct bcm_device *dev)
+{
+ struct platform_device *pdev = dev->pdev;
+ const struct acpi_device_id *id;
+ struct acpi_device *adev;
+ LIST_HEAD(resources);
+ int ret;
+
+ id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+
+ /* Retrieve GPIO data */
+ dev->name = dev_name(&pdev->dev);
+ ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev),
+ acpi_bcm_default_gpios);
+ if (ret)
+ return ret;
+
+ dev->clk = devm_clk_get(&pdev->dev, NULL);
+
+ dev->device_wakeup = devm_gpiod_get_optional(&pdev->dev,
+ "device-wakeup",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(dev->device_wakeup))
+ return PTR_ERR(dev->device_wakeup);
+
+ dev->shutdown = devm_gpiod_get_optional(&pdev->dev, "shutdown",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(dev->shutdown))
+ return PTR_ERR(dev->shutdown);
+
+ /* Make sure at-least one of the GPIO is defined and that
+ * a name is specified for this instance
+ */
+ if ((!dev->device_wakeup && !dev->shutdown) || !dev->name) {
+ dev_err(&pdev->dev, "invalid platform data\n");
+ return -EINVAL;
+ }
+
+ /* Retrieve UART ACPI info */
+ adev = ACPI_COMPANION(&dev->pdev->dev);
+ if (!adev)
+ return 0;
+
+ acpi_dev_get_resources(adev, &resources, bcm_resource, dev);
+
+ return 0;
+}
+#else
+static int bcm_acpi_probe(struct bcm_device *dev)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_ACPI */
+
+static int bcm_probe(struct platform_device *pdev)
+{
+ struct bcm_device *dev;
+ struct acpi_device_id *pdata = pdev->dev.platform_data;
+ int ret;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->pdev = pdev;
+
+ if (ACPI_HANDLE(&pdev->dev)) {
+ ret = bcm_acpi_probe(dev);
+ if (ret)
+ return ret;
+ } else if (pdata) {
+ dev->name = pdata->id;
+ } else {
+ return -ENODEV;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ dev_info(&pdev->dev, "%s device registered.\n", dev->name);
+
+ /* Place this instance on the device list */
+ spin_lock(&bcm_device_list_lock);
+ list_add_tail(&dev->list, &bcm_device_list);
+ spin_unlock(&bcm_device_list_lock);
+
+ bcm_gpio_set_power(dev, false);
+
+ return 0;
+}
+
+static int bcm_remove(struct platform_device *pdev)
+{
+ struct bcm_device *dev = platform_get_drvdata(pdev);
+
+ spin_lock(&bcm_device_list_lock);
+ list_del(&dev->list);
+ spin_unlock(&bcm_device_list_lock);
+
+ acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev));
+
+ dev_info(&pdev->dev, "%s device unregistered.\n", dev->name);
+
+ return 0;
+}
+
static const struct hci_uart_proto bcm_proto = {
.id = HCI_UART_BCM,
.name = "BCM",
.dequeue = bcm_dequeue,
};
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id bcm_acpi_match[] = {
+ { "BCM2E39", 0 },
+ { "BCM2E67", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, bcm_acpi_match);
+#endif
+
+/* Platform suspend and resume callbacks */
+static SIMPLE_DEV_PM_OPS(bcm_pm_ops, bcm_suspend, bcm_resume);
+
+static struct platform_driver bcm_driver = {
+ .probe = bcm_probe,
+ .remove = bcm_remove,
+ .driver = {
+ .name = "hci_bcm",
+ .acpi_match_table = ACPI_PTR(bcm_acpi_match),
+ .pm = &bcm_pm_ops,
+ },
+};
+
int __init bcm_init(void)
{
+ platform_driver_register(&bcm_driver);
+
return hci_uart_register_proto(&bcm_proto);
}
int __exit bcm_deinit(void)
{
+ platform_driver_unregister(&bcm_driver);
+
return hci_uart_unregister_proto(&bcm_proto);
}
#ifdef CONFIG_BT_HCIUART_BCM
bcm_init();
#endif
+#ifdef CONFIG_BT_HCIUART_QCA
+ qca_init();
+#endif
return 0;
}
#ifdef CONFIG_BT_HCIUART_BCM
bcm_deinit();
#endif
+#ifdef CONFIG_BT_HCIUART_QCA
+ qca_deinit();
+#endif
/* Release tty registration of line discipline */
err = tty_unregister_ldisc(N_HCI);
--- /dev/null
+/*
+ * Bluetooth Software UART Qualcomm protocol
+ *
+ * HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management
+ * protocol extension to H4.
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ * Copyright (c) 2010, 2012 The Linux Foundation. All rights reserved.
+ *
+ * Acknowledgements:
+ * This file is based on hci_ll.c, which was...
+ * Written by Ohad Ben-Cohen <ohad@bencohen.org>
+ * which was in turn based on hci_h4.c, which was written
+ * by Maxim Krasnyansky and Marcel Holtmann.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "hci_uart.h"
+#include "btqca.h"
+
+/* HCI_IBS protocol messages */
+#define HCI_IBS_SLEEP_IND 0xFE
+#define HCI_IBS_WAKE_IND 0xFD
+#define HCI_IBS_WAKE_ACK 0xFC
+#define HCI_MAX_IBS_SIZE 10
+
+/* Controller states */
+#define STATE_IN_BAND_SLEEP_ENABLED 1
+
+#define IBS_WAKE_RETRANS_TIMEOUT_MS 100
+#define IBS_TX_IDLE_TIMEOUT_MS 2000
+#define BAUDRATE_SETTLE_TIMEOUT_MS 300
+
+/* HCI_IBS transmit side sleep protocol states */
+enum tx_ibs_states {
+ HCI_IBS_TX_ASLEEP,
+ HCI_IBS_TX_WAKING,
+ HCI_IBS_TX_AWAKE,
+};
+
+/* HCI_IBS receive side sleep protocol states */
+enum rx_states {
+ HCI_IBS_RX_ASLEEP,
+ HCI_IBS_RX_AWAKE,
+};
+
+/* HCI_IBS transmit and receive side clock state vote */
+enum hci_ibs_clock_state_vote {
+ HCI_IBS_VOTE_STATS_UPDATE,
+ HCI_IBS_TX_VOTE_CLOCK_ON,
+ HCI_IBS_TX_VOTE_CLOCK_OFF,
+ HCI_IBS_RX_VOTE_CLOCK_ON,
+ HCI_IBS_RX_VOTE_CLOCK_OFF,
+};
+
+struct qca_data {
+ struct hci_uart *hu;
+ struct sk_buff *rx_skb;
+ struct sk_buff_head txq;
+ struct sk_buff_head tx_wait_q; /* HCI_IBS wait queue */
+ spinlock_t hci_ibs_lock; /* HCI_IBS state lock */
+ u8 tx_ibs_state; /* HCI_IBS transmit side power state*/
+ u8 rx_ibs_state; /* HCI_IBS receive side power state */
+ u32 tx_vote; /* Clock must be on for TX */
+ u32 rx_vote; /* Clock must be on for RX */
+ struct timer_list tx_idle_timer;
+ u32 tx_idle_delay;
+ struct timer_list wake_retrans_timer;
+ u32 wake_retrans;
+ struct workqueue_struct *workqueue;
+ struct work_struct ws_awake_rx;
+ struct work_struct ws_awake_device;
+ struct work_struct ws_rx_vote_off;
+ struct work_struct ws_tx_vote_off;
+ unsigned long flags;
+
+ /* For debugging purpose */
+ u64 ibs_sent_wacks;
+ u64 ibs_sent_slps;
+ u64 ibs_sent_wakes;
+ u64 ibs_recv_wacks;
+ u64 ibs_recv_slps;
+ u64 ibs_recv_wakes;
+ u64 vote_last_jif;
+ u32 vote_on_ms;
+ u32 vote_off_ms;
+ u64 tx_votes_on;
+ u64 rx_votes_on;
+ u64 tx_votes_off;
+ u64 rx_votes_off;
+ u64 votes_on;
+ u64 votes_off;
+};
+
+static void __serial_clock_on(struct tty_struct *tty)
+{
+ /* TODO: Some chipset requires to enable UART clock on client
+ * side to save power consumption or manual work is required.
+ * Please put your code to control UART clock here if needed
+ */
+}
+
+static void __serial_clock_off(struct tty_struct *tty)
+{
+ /* TODO: Some chipset requires to disable UART clock on client
+ * side to save power consumption or manual work is required.
+ * Please put your code to control UART clock off here if needed
+ */
+}
+
+/* serial_clock_vote needs to be called with the ibs lock held */
+static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
+{
+ struct qca_data *qca = hu->priv;
+ unsigned int diff;
+
+ bool old_vote = (qca->tx_vote | qca->rx_vote);
+ bool new_vote;
+
+ switch (vote) {
+ case HCI_IBS_VOTE_STATS_UPDATE:
+ diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
+
+ if (old_vote)
+ qca->vote_off_ms += diff;
+ else
+ qca->vote_on_ms += diff;
+ return;
+
+ case HCI_IBS_TX_VOTE_CLOCK_ON:
+ qca->tx_vote = true;
+ qca->tx_votes_on++;
+ new_vote = true;
+ break;
+
+ case HCI_IBS_RX_VOTE_CLOCK_ON:
+ qca->rx_vote = true;
+ qca->rx_votes_on++;
+ new_vote = true;
+ break;
+
+ case HCI_IBS_TX_VOTE_CLOCK_OFF:
+ qca->tx_vote = false;
+ qca->tx_votes_off++;
+ new_vote = qca->rx_vote | qca->tx_vote;
+ break;
+
+ case HCI_IBS_RX_VOTE_CLOCK_OFF:
+ qca->rx_vote = false;
+ qca->rx_votes_off++;
+ new_vote = qca->rx_vote | qca->tx_vote;
+ break;
+
+ default:
+ BT_ERR("Voting irregularity");
+ return;
+ }
+
+ if (new_vote != old_vote) {
+ if (new_vote)
+ __serial_clock_on(hu->tty);
+ else
+ __serial_clock_off(hu->tty);
+
+ BT_DBG("Vote serial clock %s(%s)", new_vote? "true" : "false",
+ vote? "true" : "false");
+
+ diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
+
+ if (new_vote) {
+ qca->votes_on++;
+ qca->vote_off_ms += diff;
+ } else {
+ qca->votes_off++;
+ qca->vote_on_ms += diff;
+ }
+ qca->vote_last_jif = jiffies;
+ }
+}
+
+/* Builds and sends an HCI_IBS command packet.
+ * These are very simple packets with only 1 cmd byte.
+ */
+static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu)
+{
+ int err = 0;
+ struct sk_buff *skb = NULL;
+ struct qca_data *qca = hu->priv;
+
+ BT_DBG("hu %p send hci ibs cmd 0x%x", hu, cmd);
+
+ skb = bt_skb_alloc(1, GFP_ATOMIC);
+ if (!skb) {
+ BT_ERR("Failed to allocate memory for HCI_IBS packet");
+ return -ENOMEM;
+ }
+
+ /* Assign HCI_IBS type */
+ *skb_put(skb, 1) = cmd;
+
+ skb_queue_tail(&qca->txq, skb);
+
+ return err;
+}
+
+static void qca_wq_awake_device(struct work_struct *work)
+{
+ struct qca_data *qca = container_of(work, struct qca_data,
+ ws_awake_device);
+ struct hci_uart *hu = qca->hu;
+ unsigned long retrans_delay;
+
+ BT_DBG("hu %p wq awake device", hu);
+
+ /* Vote for serial clock */
+ serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
+
+ spin_lock(&qca->hci_ibs_lock);
+
+ /* Send wake indication to device */
+ if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
+ BT_ERR("Failed to send WAKE to device");
+
+ qca->ibs_sent_wakes++;
+
+ /* Start retransmit timer */
+ retrans_delay = msecs_to_jiffies(qca->wake_retrans);
+ mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
+
+ spin_unlock(&qca->hci_ibs_lock);
+
+ /* Actually send the packets */
+ hci_uart_tx_wakeup(hu);
+}
+
+static void qca_wq_awake_rx(struct work_struct *work)
+{
+ struct qca_data *qca = container_of(work, struct qca_data,
+ ws_awake_rx);
+ struct hci_uart *hu = qca->hu;
+
+ BT_DBG("hu %p wq awake rx", hu);
+
+ serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
+
+ spin_lock(&qca->hci_ibs_lock);
+ qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
+
+ /* Always acknowledge device wake up,
+ * sending IBS message doesn't count as TX ON.
+ */
+ if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0)
+ BT_ERR("Failed to acknowledge device wake up");
+
+ qca->ibs_sent_wacks++;
+
+ spin_unlock(&qca->hci_ibs_lock);
+
+ /* Actually send the packets */
+ hci_uart_tx_wakeup(hu);
+}
+
+static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work)
+{
+ struct qca_data *qca = container_of(work, struct qca_data,
+ ws_rx_vote_off);
+ struct hci_uart *hu = qca->hu;
+
+ BT_DBG("hu %p rx clock vote off", hu);
+
+ serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
+}
+
+static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
+{
+ struct qca_data *qca = container_of(work, struct qca_data,
+ ws_tx_vote_off);
+ struct hci_uart *hu = qca->hu;
+
+ BT_DBG("hu %p tx clock vote off", hu);
+
+ /* Run HCI tx handling unlocked */
+ hci_uart_tx_wakeup(hu);
+
+ /* Now that message queued to tty driver, vote for tty clocks off.
+ * It is up to the tty driver to pend the clocks off until tx done.
+ */
+ serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
+}
+
+static void hci_ibs_tx_idle_timeout(unsigned long arg)
+{
+ struct hci_uart *hu = (struct hci_uart *)arg;
+ struct qca_data *qca = hu->priv;
+ unsigned long flags;
+
+ BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state);
+
+ spin_lock_irqsave_nested(&qca->hci_ibs_lock,
+ flags, SINGLE_DEPTH_NESTING);
+
+ switch (qca->tx_ibs_state) {
+ case HCI_IBS_TX_AWAKE:
+ /* TX_IDLE, go to SLEEP */
+ if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) {
+ BT_ERR("Failed to send SLEEP to device");
+ break;
+ }
+ qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
+ qca->ibs_sent_slps++;
+ queue_work(qca->workqueue, &qca->ws_tx_vote_off);
+ break;
+
+ case HCI_IBS_TX_ASLEEP:
+ case HCI_IBS_TX_WAKING:
+ /* Fall through */
+
+ default:
+ BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
+ break;
+ }
+
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+}
+
+static void hci_ibs_wake_retrans_timeout(unsigned long arg)
+{
+ struct hci_uart *hu = (struct hci_uart *)arg;
+ struct qca_data *qca = hu->priv;
+ unsigned long flags, retrans_delay;
+ unsigned long retransmit = 0;
+
+ BT_DBG("hu %p wake retransmit timeout in %d state",
+ hu, qca->tx_ibs_state);
+
+ spin_lock_irqsave_nested(&qca->hci_ibs_lock,
+ flags, SINGLE_DEPTH_NESTING);
+
+ switch (qca->tx_ibs_state) {
+ case HCI_IBS_TX_WAKING:
+ /* No WAKE_ACK, retransmit WAKE */
+ retransmit = 1;
+ if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
+ BT_ERR("Failed to acknowledge device wake up");
+ break;
+ }
+ qca->ibs_sent_wakes++;
+ retrans_delay = msecs_to_jiffies(qca->wake_retrans);
+ mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
+ break;
+
+ case HCI_IBS_TX_ASLEEP:
+ case HCI_IBS_TX_AWAKE:
+ /* Fall through */
+
+ default:
+ BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
+ break;
+ }
+
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+ if (retransmit)
+ hci_uart_tx_wakeup(hu);
+}
+
+/* Initialize protocol */
+static int qca_open(struct hci_uart *hu)
+{
+ struct qca_data *qca;
+
+ BT_DBG("hu %p qca_open", hu);
+
+ qca = kzalloc(sizeof(struct qca_data), GFP_ATOMIC);
+ if (!qca)
+ return -ENOMEM;
+
+ skb_queue_head_init(&qca->txq);
+ skb_queue_head_init(&qca->tx_wait_q);
+ spin_lock_init(&qca->hci_ibs_lock);
+ qca->workqueue = create_singlethread_workqueue("qca_wq");
+ if (!qca->workqueue) {
+ BT_ERR("QCA Workqueue not initialized properly");
+ kfree(qca);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx);
+ INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
+ INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
+ INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
+
+ qca->hu = hu;
+
+ /* Assume we start with both sides asleep -- extra wakes OK */
+ qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
+ qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
+
+ /* clocks actually on, but we start votes off */
+ qca->tx_vote = false;
+ qca->rx_vote = false;
+ qca->flags = 0;
+
+ qca->ibs_sent_wacks = 0;
+ qca->ibs_sent_slps = 0;
+ qca->ibs_sent_wakes = 0;
+ qca->ibs_recv_wacks = 0;
+ qca->ibs_recv_slps = 0;
+ qca->ibs_recv_wakes = 0;
+ qca->vote_last_jif = jiffies;
+ qca->vote_on_ms = 0;
+ qca->vote_off_ms = 0;
+ qca->votes_on = 0;
+ qca->votes_off = 0;
+ qca->tx_votes_on = 0;
+ qca->tx_votes_off = 0;
+ qca->rx_votes_on = 0;
+ qca->rx_votes_off = 0;
+
+ hu->priv = qca;
+
+ init_timer(&qca->wake_retrans_timer);
+ qca->wake_retrans_timer.function = hci_ibs_wake_retrans_timeout;
+ qca->wake_retrans_timer.data = (u_long)hu;
+ qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
+
+ init_timer(&qca->tx_idle_timer);
+ qca->tx_idle_timer.function = hci_ibs_tx_idle_timeout;
+ qca->tx_idle_timer.data = (u_long)hu;
+ qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
+
+ BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
+ qca->tx_idle_delay, qca->wake_retrans);
+
+ return 0;
+}
+
+static void qca_debugfs_init(struct hci_dev *hdev)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+ struct dentry *ibs_dir;
+ umode_t mode;
+
+ if (!hdev->debugfs)
+ return;
+
+ ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
+
+ /* read only */
+ mode = S_IRUGO;
+ debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state);
+ debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state);
+ debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir,
+ &qca->ibs_sent_slps);
+ debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir,
+ &qca->ibs_sent_wakes);
+ debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir,
+ &qca->ibs_sent_wacks);
+ debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir,
+ &qca->ibs_recv_slps);
+ debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir,
+ &qca->ibs_recv_wakes);
+ debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir,
+ &qca->ibs_recv_wacks);
+ debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote);
+ debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on);
+ debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off);
+ debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote);
+ debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on);
+ debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off);
+ debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on);
+ debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off);
+ debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms);
+ debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms);
+
+ /* read/write */
+ mode = S_IRUGO | S_IWUSR;
+ debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans);
+ debugfs_create_u32("tx_idle_delay", mode, ibs_dir,
+ &qca->tx_idle_delay);
+}
+
+/* Flush protocol data */
+static int qca_flush(struct hci_uart *hu)
+{
+ struct qca_data *qca = hu->priv;
+
+ BT_DBG("hu %p qca flush", hu);
+
+ skb_queue_purge(&qca->tx_wait_q);
+ skb_queue_purge(&qca->txq);
+
+ return 0;
+}
+
+/* Close protocol */
+static int qca_close(struct hci_uart *hu)
+{
+ struct qca_data *qca = hu->priv;
+
+ BT_DBG("hu %p qca close", hu);
+
+ serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu);
+
+ skb_queue_purge(&qca->tx_wait_q);
+ skb_queue_purge(&qca->txq);
+ del_timer(&qca->tx_idle_timer);
+ del_timer(&qca->wake_retrans_timer);
+ destroy_workqueue(qca->workqueue);
+ qca->hu = NULL;
+
+ kfree_skb(qca->rx_skb);
+
+ hu->priv = NULL;
+
+ kfree(qca);
+
+ return 0;
+}
+
+/* Called upon a wake-up-indication from the device.
+ */
+static void device_want_to_wakeup(struct hci_uart *hu)
+{
+ unsigned long flags;
+ struct qca_data *qca = hu->priv;
+
+ BT_DBG("hu %p want to wake up", hu);
+
+ spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+ qca->ibs_recv_wakes++;
+
+ switch (qca->rx_ibs_state) {
+ case HCI_IBS_RX_ASLEEP:
+ /* Make sure clock is on - we may have turned clock off since
+ * receiving the wake up indicator awake rx clock.
+ */
+ queue_work(qca->workqueue, &qca->ws_awake_rx);
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+ return;
+
+ case HCI_IBS_RX_AWAKE:
+ /* Always acknowledge device wake up,
+ * sending IBS message doesn't count as TX ON.
+ */
+ if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) {
+ BT_ERR("Failed to acknowledge device wake up");
+ break;
+ }
+ qca->ibs_sent_wacks++;
+ break;
+
+ default:
+ /* Any other state is illegal */
+ BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d",
+ qca->rx_ibs_state);
+ break;
+ }
+
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+ /* Actually send the packets */
+ hci_uart_tx_wakeup(hu);
+}
+
+/* Called upon a sleep-indication from the device.
+ */
+static void device_want_to_sleep(struct hci_uart *hu)
+{
+ unsigned long flags;
+ struct qca_data *qca = hu->priv;
+
+ BT_DBG("hu %p want to sleep", hu);
+
+ spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+ qca->ibs_recv_slps++;
+
+ switch (qca->rx_ibs_state) {
+ case HCI_IBS_RX_AWAKE:
+ /* Update state */
+ qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
+ /* Vote off rx clock under workqueue */
+ queue_work(qca->workqueue, &qca->ws_rx_vote_off);
+ break;
+
+ case HCI_IBS_RX_ASLEEP:
+ /* Fall through */
+
+ default:
+ /* Any other state is illegal */
+ BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d",
+ qca->rx_ibs_state);
+ break;
+ }
+
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+}
+
+/* Called upon wake-up-acknowledgement from the device
+ */
+static void device_woke_up(struct hci_uart *hu)
+{
+ unsigned long flags, idle_delay;
+ struct qca_data *qca = hu->priv;
+ struct sk_buff *skb = NULL;
+
+ BT_DBG("hu %p woke up", hu);
+
+ spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+ qca->ibs_recv_wacks++;
+
+ switch (qca->tx_ibs_state) {
+ case HCI_IBS_TX_AWAKE:
+ /* Expect one if we send 2 WAKEs */
+ BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d",
+ qca->tx_ibs_state);
+ break;
+
+ case HCI_IBS_TX_WAKING:
+ /* Send pending packets */
+ while ((skb = skb_dequeue(&qca->tx_wait_q)))
+ skb_queue_tail(&qca->txq, skb);
+
+ /* Switch timers and change state to HCI_IBS_TX_AWAKE */
+ del_timer(&qca->wake_retrans_timer);
+ idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
+ mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
+ qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
+ break;
+
+ case HCI_IBS_TX_ASLEEP:
+ /* Fall through */
+
+ default:
+ BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
+ qca->tx_ibs_state);
+ break;
+ }
+
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+ /* Actually send the packets */
+ hci_uart_tx_wakeup(hu);
+}
+
+/* Enqueue frame for transmittion (padding, crc, etc) may be called from
+ * two simultaneous tasklets.
+ */
+static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+ unsigned long flags = 0, idle_delay;
+ struct qca_data *qca = hu->priv;
+
+ BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
+ qca->tx_ibs_state);
+
+ /* Prepend skb with frame type */
+ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+
+ /* Don't go to sleep in middle of patch download or
+ * Out-Of-Band(GPIOs control) sleep is selected.
+ */
+ if (!test_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags)) {
+ skb_queue_tail(&qca->txq, skb);
+ return 0;
+ }
+
+ spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+ /* Act according to current state */
+ switch (qca->tx_ibs_state) {
+ case HCI_IBS_TX_AWAKE:
+ BT_DBG("Device awake, sending normally");
+ skb_queue_tail(&qca->txq, skb);
+ idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
+ mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
+ break;
+
+ case HCI_IBS_TX_ASLEEP:
+ BT_DBG("Device asleep, waking up and queueing packet");
+ /* Save packet for later */
+ skb_queue_tail(&qca->tx_wait_q, skb);
+
+ qca->tx_ibs_state = HCI_IBS_TX_WAKING;
+ /* Schedule a work queue to wake up device */
+ queue_work(qca->workqueue, &qca->ws_awake_device);
+ break;
+
+ case HCI_IBS_TX_WAKING:
+ BT_DBG("Device waking up, queueing packet");
+ /* Transient state; just keep packet for later */
+ skb_queue_tail(&qca->tx_wait_q, skb);
+ break;
+
+ default:
+ BT_ERR("Illegal tx state: %d (losing packet)",
+ qca->tx_ibs_state);
+ kfree_skb(skb);
+ break;
+ }
+
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+ return 0;
+}
+
+static int qca_ibs_sleep_ind(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+
+ BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_SLEEP_IND);
+
+ device_want_to_sleep(hu);
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static int qca_ibs_wake_ind(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+
+ BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_IND);
+
+ device_want_to_wakeup(hu);
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+
+ BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_ACK);
+
+ device_woke_up(hu);
+
+ kfree_skb(skb);
+ return 0;
+}
+
+#define QCA_IBS_SLEEP_IND_EVENT \
+ .type = HCI_IBS_SLEEP_IND, \
+ .hlen = 0, \
+ .loff = 0, \
+ .lsize = 0, \
+ .maxlen = HCI_MAX_IBS_SIZE
+
+#define QCA_IBS_WAKE_IND_EVENT \
+ .type = HCI_IBS_WAKE_IND, \
+ .hlen = 0, \
+ .loff = 0, \
+ .lsize = 0, \
+ .maxlen = HCI_MAX_IBS_SIZE
+
+#define QCA_IBS_WAKE_ACK_EVENT \
+ .type = HCI_IBS_WAKE_ACK, \
+ .hlen = 0, \
+ .loff = 0, \
+ .lsize = 0, \
+ .maxlen = HCI_MAX_IBS_SIZE
+
+static const struct h4_recv_pkt qca_recv_pkts[] = {
+ { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_SCO, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = hci_recv_frame },
+ { QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind },
+ { QCA_IBS_WAKE_ACK_EVENT, .recv = qca_ibs_wake_ack },
+ { QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind },
+};
+
+static int qca_recv(struct hci_uart *hu, const void *data, int count)
+{
+ struct qca_data *qca = hu->priv;
+
+ if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+ return -EUNATCH;
+
+ qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
+ qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
+ if (IS_ERR(qca->rx_skb)) {
+ int err = PTR_ERR(qca->rx_skb);
+ BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+ qca->rx_skb = NULL;
+ return err;
+ }
+
+ return count;
+}
+
+static struct sk_buff *qca_dequeue(struct hci_uart *hu)
+{
+ struct qca_data *qca = hu->priv;
+
+ return skb_dequeue(&qca->txq);
+}
+
+static uint8_t qca_get_baudrate_value(int speed)
+{
+ switch(speed) {
+ case 9600:
+ return QCA_BAUDRATE_9600;
+ case 19200:
+ return QCA_BAUDRATE_19200;
+ case 38400:
+ return QCA_BAUDRATE_38400;
+ case 57600:
+ return QCA_BAUDRATE_57600;
+ case 115200:
+ return QCA_BAUDRATE_115200;
+ case 230400:
+ return QCA_BAUDRATE_230400;
+ case 460800:
+ return QCA_BAUDRATE_460800;
+ case 500000:
+ return QCA_BAUDRATE_500000;
+ case 921600:
+ return QCA_BAUDRATE_921600;
+ case 1000000:
+ return QCA_BAUDRATE_1000000;
+ case 2000000:
+ return QCA_BAUDRATE_2000000;
+ case 3000000:
+ return QCA_BAUDRATE_3000000;
+ case 3500000:
+ return QCA_BAUDRATE_3500000;
+ default:
+ return QCA_BAUDRATE_115200;
+ }
+}
+
+static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+ struct sk_buff *skb;
+ u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
+
+ if (baudrate > QCA_BAUDRATE_3000000)
+ return -EINVAL;
+
+ cmd[4] = baudrate;
+
+ skb = bt_skb_alloc(sizeof(cmd), GFP_ATOMIC);
+ if (!skb) {
+ BT_ERR("Failed to allocate memory for baudrate packet");
+ return -ENOMEM;
+ }
+
+ /* Assign commands to change baudrate and packet type. */
+ memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd));
+ bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
+
+ skb_queue_tail(&qca->txq, skb);
+ hci_uart_tx_wakeup(hu);
+
+ /* wait 300ms to change new baudrate on controller side
+ * controller will come back after they receive this HCI command
+ * then host can communicate with new baudrate to controller
+ */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ return 0;
+}
+
+static int qca_setup(struct hci_uart *hu)
+{
+ struct hci_dev *hdev = hu->hdev;
+ struct qca_data *qca = hu->priv;
+ unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
+ int ret;
+
+ BT_INFO("%s: ROME setup", hdev->name);
+
+ /* Patch downloading has to be done without IBS mode */
+ clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
+
+ /* Setup initial baudrate */
+ speed = 0;
+ if (hu->init_speed)
+ speed = hu->init_speed;
+ else if (hu->proto->init_speed)
+ speed = hu->proto->init_speed;
+
+ if (speed)
+ hci_uart_set_baudrate(hu, speed);
+
+ /* Setup user speed if needed */
+ speed = 0;
+ if (hu->oper_speed)
+ speed = hu->oper_speed;
+ else if (hu->proto->oper_speed)
+ speed = hu->proto->oper_speed;
+
+ if (speed) {
+ qca_baudrate = qca_get_baudrate_value(speed);
+
+ BT_INFO("%s: Set UART speed to %d", hdev->name, speed);
+ ret = qca_set_baudrate(hdev, qca_baudrate);
+ if (ret) {
+ BT_ERR("%s: Failed to change the baud rate (%d)",
+ hdev->name, ret);
+ return ret;
+ }
+ hci_uart_set_baudrate(hu, speed);
+ }
+
+ /* Setup patch / NVM configurations */
+ ret = qca_uart_setup_rome(hdev, qca_baudrate);
+ if (!ret) {
+ set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
+ qca_debugfs_init(hdev);
+ }
+
+ /* Setup bdaddr */
+ hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
+
+ return ret;
+}
+
+static struct hci_uart_proto qca_proto = {
+ .id = HCI_UART_QCA,
+ .name = "QCA",
+ .init_speed = 115200,
+ .oper_speed = 3000000,
+ .open = qca_open,
+ .close = qca_close,
+ .flush = qca_flush,
+ .setup = qca_setup,
+ .recv = qca_recv,
+ .enqueue = qca_enqueue,
+ .dequeue = qca_dequeue,
+};
+
+int __init qca_init(void)
+{
+ return hci_uart_register_proto(&qca_proto);
+}
+
+int __exit qca_deinit(void)
+{
+ return hci_uart_unregister_proto(&qca_proto);
+}
#define HCIUARTGETFLAGS _IOR('U', 204, int)
/* UART protocols */
-#define HCI_UART_MAX_PROTO 8
+#define HCI_UART_MAX_PROTO 9
#define HCI_UART_H4 0
#define HCI_UART_BCSP 1
#define HCI_UART_ATH3K 5
#define HCI_UART_INTEL 6
#define HCI_UART_BCM 7
+#define HCI_UART_QCA 8
#define HCI_UART_RAW_DEVICE 0
#define HCI_UART_RESET_ON_INIT 1
int bcm_init(void);
int bcm_deinit(void);
#endif
+
+#ifdef CONFIG_BT_HCIUART_QCA
+int qca_init(void);
+int qca_deinit(void);
+#endif
PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
-#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB)
+#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENB : &CKENA)
#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
div_hp, bit, is_lp, flags) \
PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp, \
{
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
+ if (!ch->cs_enabled)
+ return;
+
sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
}
{
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
+ if (!ch->cs_enabled)
+ return;
+
pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
sh_cmt_start(ch, FLAG_CLOCKSOURCE);
}
ret = exynos5250_cpufreq_init(exynos_info);
} else {
pr_err("%s: Unknown SoC type\n", __func__);
- return -ENODEV;
+ ret = -ENODEV;
}
if (ret)
if (exynos_info->set_freq == NULL) {
dev_err(&pdev->dev, "No set_freq function (ERR)\n");
+ ret = -EINVAL;
goto err_vdd_arm;
}
arm_regulator = regulator_get(NULL, "vdd_arm");
if (IS_ERR(arm_regulator)) {
dev_err(&pdev->dev, "failed to get resource vdd_arm\n");
+ ret = -EINVAL;
goto err_vdd_arm;
}
regulator_put(arm_regulator);
err_vdd_arm:
kfree(exynos_info);
- return -EINVAL;
+ return ret;
}
static struct platform_driver exynos_cpufreq_platdrv = {
state->buflen_1;
u32 *sh_desc = ctx->sh_desc_fin, *desc;
dma_addr_t ptr = ctx->sh_desc_fin_dma;
- int sec4_sg_bytes;
+ int sec4_sg_bytes, sec4_sg_src_index;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret = 0;
int sh_len;
- sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
+ sec4_sg_src_index = 1 + (buflen ? 1 : 0);
+ sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
last_buflen);
- (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
+ (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct nx_sg *in_sg;
struct nx_sg *out_sg;
u64 to_process = 0, leftover, total;
unsigned long irq_flags;
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len,
}
do {
- /*
- * to_process: the SHA256_BLOCK_SIZE data chunk to process in
- * this update. This value is also restricted by the sg list
- * limits.
- */
- to_process = total - to_process;
- to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+ int used_sgs = 0;
+ struct nx_sg *in_sg = nx_ctx->in_sg;
if (buf_len) {
data_len = buf_len;
- in_sg = nx_build_sg_list(nx_ctx->in_sg,
+ in_sg = nx_build_sg_list(in_sg,
(u8 *) sctx->buf,
&data_len,
max_sg_len);
rc = -EINVAL;
goto out;
}
+ used_sgs = in_sg - nx_ctx->in_sg;
}
+ /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
+ * processed in this iteration. This value is restricted
+ * by sg list limits and number of sgs we already used
+ * for leftover data. (see above)
+ * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+ * but because data may not be aligned, we need to account
+ * for that too. */
+ to_process = min_t(u64, total,
+ (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+ to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+
data_len = to_process - buf_len;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- to_process = (data_len + buf_len);
+ to_process = data_len + buf_len;
leftover = total - to_process;
/*
struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct nx_sg *in_sg;
struct nx_sg *out_sg;
u64 to_process, leftover = 0, total;
unsigned long irq_flags;
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len,
}
do {
- /*
- * to_process: the SHA512_BLOCK_SIZE data chunk to process in
- * this update. This value is also restricted by the sg list
- * limits.
- */
- to_process = total - leftover;
- to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
- leftover = total - to_process;
+ int used_sgs = 0;
+ struct nx_sg *in_sg = nx_ctx->in_sg;
if (buf_len) {
data_len = buf_len;
- in_sg = nx_build_sg_list(nx_ctx->in_sg,
+ in_sg = nx_build_sg_list(in_sg,
(u8 *) sctx->buf,
&data_len, max_sg_len);
rc = -EINVAL;
goto out;
}
+ used_sgs = in_sg - nx_ctx->in_sg;
}
+ /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
+ * processed in this iteration. This value is restricted
+ * by sg list limits and number of sgs we already used
+ * for leftover data. (see above)
+ * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+ * but because data may not be aligned, we need to account
+ * for that too. */
+ to_process = min_t(u64, total,
+ (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+ to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
+
data_len = to_process - buf_len;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
goto out;
}
- to_process = (data_len + buf_len);
+ to_process = data_len + buf_len;
leftover = total - to_process;
/*
struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
if (IS_ERR(ch))
return NULL;
+
+ dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
+ ch->device->privatecnt++;
+
return ch;
}
EXPORT_SYMBOL_GPL(dma_request_slave_channel);
from an EDID retrieval */
if (port->connector) {
mutex_lock(&mgr->destroy_connector_lock);
- list_add(&port->connector->destroy_list, &mgr->destroy_connector_list);
+ list_add(&port->next, &mgr->destroy_connector_list);
mutex_unlock(&mgr->destroy_connector_lock);
schedule_work(&mgr->destroy_connector_work);
+ return;
}
drm_dp_port_teardown_pdt(port, port->pdt);
static void drm_dp_destroy_connector_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
- struct drm_connector *connector;
+ struct drm_dp_mst_port *port;
/*
* Not a regular list traverse as we have to drop the destroy
*/
for (;;) {
mutex_lock(&mgr->destroy_connector_lock);
- connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list);
- if (!connector) {
+ port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
+ if (!port) {
mutex_unlock(&mgr->destroy_connector_lock);
break;
}
- list_del(&connector->destroy_list);
+ list_del(&port->next);
mutex_unlock(&mgr->destroy_connector_lock);
- mgr->cbs->destroy_connector(mgr, connector);
+ mgr->cbs->destroy_connector(mgr, port->connector);
+
+ drm_dp_port_teardown_pdt(port, port->pdt);
+
+ if (!port->input && port->vcpi.vcpi > 0)
+ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+ kfree(port);
}
}
spin_lock_init(&ctx->lock);
platform_set_drvdata(pdev, ctx);
- pm_runtime_set_active(dev);
pm_runtime_enable(dev);
ret = exynos_drm_ippdrv_register(ippdrv);
gsc_write(cfg, GSC_IN_CON);
- ctx->rotation = cfg &
- (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
*swap = ctx->rotation;
return 0;
gsc_write(cfg, GSC_IN_CON);
- ctx->rotation = cfg &
- (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
*swap = ctx->rotation;
return 0;
{
struct hdmi_context *hdata = ctx_from_connector(connector);
struct edid *edid;
+ int ret;
if (!hdata->ddc_adpt)
return -ENODEV;
drm_mode_connector_update_edid_property(connector, edid);
- return drm_add_edid_modes(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+
+ kfree(edid);
+
+ return ret;
}
static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
/* handling VSYNC */
if (val & MXR_INT_STATUS_VSYNC) {
+ /* vsync interrupt use different bit for read and clear */
+ val |= MXR_INT_CLEAR_VSYNC;
+ val &= ~MXR_INT_STATUS_VSYNC;
+
/* interlace scan need to check shadow register */
if (ctx->interlace) {
base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
out:
/* clear interrupts */
- if (~val & MXR_INT_EN_VSYNC) {
- /* vsync interrupt use different bit for read and clear */
- val &= ~MXR_INT_EN_VSYNC;
- val |= MXR_INT_CLEAR_VSYNC;
- }
mixer_reg_write(res, MXR_INT_STATUS, val);
spin_unlock(&res->reg_slock);
}
/* enable vsync interrupt */
- mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC,
- MXR_INT_EN_VSYNC);
+ mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
+ mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
return 0;
}
struct mixer_context *mixer_ctx = crtc->ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
+ if (!mixer_ctx->powered) {
+ mixer_ctx->int_en &= MXR_INT_EN_VSYNC;
+ return;
+ }
+
/* disable vsync interrupt */
+ mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}
mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
+ if (ctx->int_en & MXR_INT_EN_VSYNC)
+ mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
mixer_win_reset(ctx);
}
struct drm_atomic_state *state,
bool async)
{
- int ret;
- int i;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ int ret, i;
if (async) {
DRM_DEBUG_KMS("i915 does not yet support async commit\n");
return ret;
/* Point of no return */
-
- /*
- * FIXME: The proper sequence here will eventually be:
- *
- * drm_atomic_helper_swap_state(dev, state)
- * drm_atomic_helper_commit_modeset_disables(dev, state);
- * drm_atomic_helper_commit_planes(dev, state);
- * drm_atomic_helper_commit_modeset_enables(dev, state);
- * drm_atomic_helper_wait_for_vblanks(dev, state);
- * drm_atomic_helper_cleanup_planes(dev, state);
- * drm_atomic_state_free(state);
- *
- * once we have full atomic modeset. For now, just manually update
- * plane states to avoid clobbering good states with dummy states
- * while nuclear pageflipping.
- */
- for (i = 0; i < dev->mode_config.num_total_plane; i++) {
- struct drm_plane *plane = state->planes[i];
-
- if (!plane)
- continue;
-
- plane->state->state = state;
- swap(state->plane_states[i], plane->state);
- plane->state->state = NULL;
- }
+ drm_atomic_helper_swap_state(dev, state);
/* swap crtc_scaler_state */
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
- if (!crtc) {
- continue;
- }
-
- to_intel_crtc(crtc)->config->scaler_state =
- to_intel_crtc_state(state->crtc_states[i])->scaler_state;
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
if (INTEL_INFO(dev)->gen >= 9)
skl_detach_scalers(to_intel_crtc(crtc));
+
+ drm_atomic_helper_commit_planes_on_crtc(crtc_state);
}
- drm_atomic_helper_commit_planes(dev, state);
drm_atomic_helper_wait_for_vblanks(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_state_free(state);
goto encoder_retry;
}
- pipe_config->dither = pipe_config->pipe_bpp != base_bpp;
+ /* Dithering seems to not pass-through bits correctly when it should, so
+ * only enable it on 6bpc panels. */
+ pipe_config->dither = pipe_config->pipe_bpp == 6*3;
DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
modeset_update_crtc_power_domains(state);
- drm_atomic_helper_commit_planes(dev, state);
-
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (!needs_modeset(crtc->state) || !crtc->state->enable)
+ if (!needs_modeset(crtc->state) || !crtc->state->enable) {
+ drm_atomic_helper_commit_planes_on_crtc(crtc_state);
continue;
+ }
update_scanline_offset(to_intel_crtc(crtc));
dev_priv->display.crtc_enable(crtc);
- intel_crtc_enable_planes(crtc);
+ drm_atomic_helper_commit_planes_on_crtc(crtc_state);
}
/* FIXME: add subpixel order */
return 0;
}
-static bool primary_plane_visible(struct drm_crtc *crtc)
-{
- struct intel_plane_state *plane_state =
- to_intel_plane_state(crtc->primary->state);
-
- return plane_state->visible;
-}
-
static int intel_crtc_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
struct drm_atomic_state *state = NULL;
struct intel_crtc_state *pipe_config;
- bool primary_plane_was_visible;
int ret;
BUG_ON(!set);
intel_update_pipe_size(to_intel_crtc(set->crtc));
- primary_plane_was_visible = primary_plane_visible(set->crtc);
-
ret = intel_set_mode_with_config(set->crtc, pipe_config, true);
- if (ret == 0 &&
- pipe_config->base.enable &&
- pipe_config->base.planes_changed &&
- !needs_modeset(&pipe_config->base)) {
- struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
-
- /*
- * We need to make sure the primary plane is re-enabled if it
- * has previously been turned off.
- */
- if (ret == 0 && !primary_plane_was_visible &&
- primary_plane_visible(set->crtc)) {
- WARN_ON(!intel_crtc->active);
- intel_post_enable_primary(set->crtc);
- }
-
- /*
- * In the fastboot case this may be our only check of the
- * state after boot. It would be better to only do it on
- * the first update, but we don't have a nice way of doing that
- * (and really, set_config isn't used much for high freq page
- * flipping, so increasing its cost here shouldn't be a big
- * deal).
- */
- if (i915.fastboot && ret == 0)
- intel_modeset_check_state(set->crtc->dev);
- }
-
if (ret) {
DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
set->crtc->base.id, ret);
*/
if (IS_BROADWELL(dev))
intel_crtc->atomic.wait_vblank = true;
+
+ if (crtc_state)
+ intel_crtc->atomic.post_enable_primary = true;
}
/*
if (!state->visible || !fb)
intel_crtc->atomic.disable_ips = true;
+ if (!state->visible && old_state->visible &&
+ crtc_state && !needs_modeset(&crtc_state->base))
+ intel_crtc->atomic.pre_disable_primary = true;
+
intel_crtc->atomic.fb_bits |=
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
struct intel_plane_state *plane_state;
memset(crtc->config, 0, sizeof(*crtc->config));
+ crtc->config->base.crtc = &crtc->base;
crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
return 0;
}
-static int
-gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
-{
- struct nvkm_object *obj = (void *)chan;
- struct gk104_fifo_priv *priv = (void *)obj->engine;
-
- nv_wr32(priv, 0x002634, chan->base.chid);
- if (!nv_wait(priv, 0x002634, 0x100000, 0x000000)) {
- nv_error(priv, "channel %d [%s] kick timeout\n",
- chan->base.chid, nvkm_client_name(chan));
- return -EBUSY;
- }
-
- return 0;
-}
-
static int
gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
struct nvkm_object *object)
{
struct nvkm_bar *bar = nvkm_bar(parent);
+ struct gk104_fifo_priv *priv = (void *)parent->engine;
struct gk104_fifo_base *base = (void *)parent->parent;
struct gk104_fifo_chan *chan = (void *)parent;
u32 addr;
- int ret;
switch (nv_engidx(object->engine)) {
case NVDEV_ENGINE_SW : return 0;
return -EINVAL;
}
- ret = gk104_fifo_chan_kick(chan);
- if (ret && suspend)
- return ret;
+ nv_wr32(priv, 0x002634, chan->base.chid);
+ if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
+ nv_error(priv, "channel %d [%s] kick timeout\n",
+ chan->base.chid, nvkm_client_name(chan));
+ if (suspend)
+ return -EBUSY;
+ }
if (addr) {
nv_wo32(base, addr + 0x00, 0x00000000);
gk104_fifo_runlist_update(priv, chan->engine);
}
- gk104_fifo_chan_kick(chan);
nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
return nvkm_fifo_channel_fini(&chan->base, suspend);
}
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
true, NULL);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_err_nores;
ret = vmw_validate_buffers(dev_priv, sw_context);
if (unlikely(ret != 0))
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_fifo_commit(dev_priv, command_size);
+ mutex_unlock(&dev_priv->binding_mutex);
vmw_query_bo_switch_commit(dev_priv, sw_context);
ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
DRM_ERROR("Fence submission error. Syncing.\n");
vmw_resource_list_unreserve(&sw_context->resource_list, false);
- mutex_unlock(&dev_priv->binding_mutex);
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
(void *) fence);
printk(KERN_ERR MOD
"Unexpected cqe_status 0x%x for QPID=0x%0x\n",
CQE_STATUS(&cqe), CQE_QPID(&cqe));
- ret = -EINVAL;
+ wc->status = IB_WC_FATAL_ERR;
}
}
out:
{
int i;
+ if (!gpmc_base)
+ return;
+
gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
{
int i;
+ if (!gpmc_base)
+ return;
+
gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
config VIRTIO_NET
tristate "Virtio network driver"
depends on VIRTIO
- select AVERAGE
---help---
This is the virtual network driver for virtio. It can be used with
lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
/* Initialize the device options */
- bond_dev->tx_queue_len = 0;
bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
- bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT;
+ bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
/* don't acquire bond device's netif_tx_lock when transmitting */
dev->type = ARPHRD_CAIF;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->destructor = free_netdev;
dev->netdev_ops = &cfhsi_netdevops;
for (i = 0; i < CFHSI_PRIO_LAST; ++i)
dev->type = ARPHRD_CAIF;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = CAIF_MAX_MTU;
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->destructor = free_netdev;
skb_queue_head_init(&serdev->head);
serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
dev->netdev_ops = &cfspi_ops;
dev->type = ARPHRD_CAIF;
dev->flags = IFF_NOARP | IFF_POINTOPOINT;
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->mtu = SPI_MAX_PAYLOAD_SIZE;
dev->destructor = free_netdev;
skb_queue_head_init(&cfspi->qhead);
if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
reg |= FLEXCAN_CTRL_SMP;
- netdev_info(dev, "writing ctrl=0x%08x\n", reg);
+ netdev_dbg(dev, "writing ctrl=0x%08x\n", reg);
flexcan_write(reg, ®s->ctrl);
/* print chip status */
struct can_bittiming_const bt_const;
unsigned int channel; /* channel number */
- /* This lock prevents a race condition between xmit and recieve. */
+ /* This lock prevents a race condition between xmit and receive. */
spinlock_t tx_ctx_lock;
struct gs_tx_context tx_context[GS_MAX_TX_URBS];
}
}
-static void gs_usb_recieve_bulk_callback(struct urb *urb)
+static void gs_usb_receive_bulk_callback(struct urb *urb)
{
struct gs_usb *usbcan = urb->context;
struct gs_can *dev;
usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN),
hf,
sizeof(struct gs_host_frame),
- gs_usb_recieve_bulk_callback,
+ gs_usb_receive_bulk_callback,
usbcan
);
GSUSB_ENDPOINT_IN),
buf,
sizeof(struct gs_host_frame),
- gs_usb_recieve_bulk_callback,
+ gs_usb_receive_bulk_callback,
parent);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
* full duplex.
*/
reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
- if (dsa_is_cpu_port(ds, port) ||
- ds->dsa_port_mask & (1 << port)) {
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
reg |= PORT_PCS_CTRL_FORCE_LINK |
PORT_PCS_CTRL_LINK_UP |
PORT_PCS_CTRL_DUPLEX_FULL |
reg |= PORT_CONTROL_EGRESS_ADD_TAG;
}
}
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
- mv88e6xxx_6320_family(ds)) {
- if (ds->dsa_port_mask & (1 << port))
+ if (dsa_is_dsa_port(ds, port)) {
+ if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
+ reg |= PORT_CONTROL_DSA_TAG;
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+ mv88e6xxx_6320_family(ds)) {
reg |= PORT_CONTROL_FRAME_MODE_DSA;
+ }
+
if (port == dsa_upstream_port(ds))
reg |= PORT_CONTROL_FORWARD_UNKNOWN |
PORT_CONTROL_FORWARD_UNKNOWN_MC;
reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
}
- reg |= PORT_CONTROL_2_8021Q_SECURE;
+ reg |= PORT_CONTROL_2_8021Q_FALLBACK;
if (reg) {
ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
dev->destructor = free_netdev;
/* Fill in device structure with ethernet-generic values. */
- dev->tx_queue_len = 0;
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
- dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
eth_hw_addr_random(dev);
return ret;
}
-static int xgene_get_mac_address(struct device *dev,
- unsigned char *addr)
-{
- int ret;
-
- ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6);
- if (ret)
- ret = device_property_read_u8_array(dev, "mac-address",
- addr, 6);
- if (ret)
- return -ENODEV;
-
- return ETH_ALEN;
-}
-
-static int xgene_get_phy_mode(struct device *dev)
-{
- int i, ret;
- char *modestr;
-
- ret = device_property_read_string(dev, "phy-connection-type",
- (const char **)&modestr);
- if (ret)
- ret = device_property_read_string(dev, "phy-mode",
- (const char **)&modestr);
- if (ret)
- return -ENODEV;
-
- for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) {
- if (!strcasecmp(modestr, phy_modes(i)))
- return i;
- }
- return -ENODEV;
-}
static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
{
if (ret)
return ret;
- if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN)
+ if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
eth_hw_addr_random(ndev);
memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
- pdata->phy_mode = xgene_get_phy_mode(dev);
+ pdata->phy_mode = device_get_phy_mode(dev);
if (pdata->phy_mode < 0) {
dev_err(dev, "Unable to get phy-connection-type\n");
return pdata->phy_mode;
Virtualization support in the 578xx and 57712 products. This
allows for virtual function acceleration in virtual environments.
+config BNX2X_VXLAN
+ bool "Virtual eXtensible Local Area Network support"
+ default n
+ depends on BNX2X && VXLAN && !(BNX2X=y && VXLAN=m)
+ ---help---
+ This enables hardward offload support for VXLAN protocol over the
+ NetXtremeII series adapters.
+ Say Y here if you want to enable hardware offload support for
+ Virtual eXtensible Local Area Network (VXLAN) in the driver.
+
config BGMAC
tristate "BCMA bus GBit core support"
depends on BCMA_HOST_SOC && HAS_DMA && (BCM47XX || ARCH_BCM_5301X)
BNX2X_SP_RTNL_HYPERVISOR_VLAN,
BNX2X_SP_RTNL_TX_STOP,
BNX2X_SP_RTNL_GET_DRV_VERSION,
+ BNX2X_SP_RTNL_ADD_VXLAN_PORT,
+ BNX2X_SP_RTNL_DEL_VXLAN_PORT,
};
enum bnx2x_iov_flag {
(IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) || \
IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp))
+/* Determines whether BW configuration arrives in 100Mb units or in
+ * percentages from actual physical link speed.
+ */
+#define IS_MF_PERCENT_BW(bp) (IS_MF_SI(bp) || IS_MF_UFP(bp) || IS_MF_BD(bp))
#define SET_FLAG(value, mask, flag) \
do {\
/* Calculate the current MAX line speed limit for the MF
* devices
*/
- if (IS_MF_SI(bp))
+ if (IS_MF_PERCENT_BW(bp))
line_speed = (line_speed * maxCfg) / 100;
else { /* SD mode */
u16 vn_max_rate = maxCfg * 100;
else /* CHIP_IS_E1X */
start_params->network_cos_mode = FW_WRR;
+ start_params->vxlan_dst_port = bp->vxlan_dst_port;
+
start_params->inner_rss = 1;
if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
else {
u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
- if (IS_MF_SI(bp)) {
+ if (IS_MF_PERCENT_BW(bp)) {
/* maxCfg in percents of linkspeed */
vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
} else /* SD modes */
}
}
+#ifdef CONFIG_BNX2X_VXLAN
+static int bnx2x_vxlan_port_update(struct bnx2x *bp, u16 port)
+{
+ struct bnx2x_func_switch_update_params *switch_update_params;
+ struct bnx2x_func_state_params func_params = {NULL};
+ int rc;
+
+ switch_update_params = &func_params.params.switch_update;
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
+
+ /* Function parameters */
+ __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
+ &switch_update_params->changes);
+ switch_update_params->vxlan_dst_port = port;
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc)
+ BNX2X_ERR("failed to change vxlan dst port to %d (rc = 0x%x)\n",
+ port, rc);
+ return rc;
+}
+
+static void __bnx2x_add_vxlan_port(struct bnx2x *bp, u16 port)
+{
+ if (!netif_running(bp->dev))
+ return;
+
+ if (bp->vxlan_dst_port || !IS_PF(bp)) {
+ DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n");
+ return;
+ }
+
+ bp->vxlan_dst_port = port;
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0);
+}
+
+static void bnx2x_add_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u16 t_port = ntohs(port);
+
+ __bnx2x_add_vxlan_port(bp, t_port);
+}
+
+static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port)
+{
+ if (!bp->vxlan_dst_port || bp->vxlan_dst_port != port || !IS_PF(bp)) {
+ DP(BNX2X_MSG_SP, "Invalid vxlan port\n");
+ return;
+ }
+
+ if (netif_running(bp->dev)) {
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0);
+ } else {
+ bp->vxlan_dst_port = 0;
+ netdev_info(bp->dev, "Deleted vxlan dest port %d", port);
+ }
+}
+
+static void bnx2x_del_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u16 t_port = ntohs(port);
+
+ __bnx2x_del_vxlan_port(bp, t_port);
+}
+#endif
+
static int bnx2x_close(struct net_device *dev);
/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
static void bnx2x_sp_rtnl_task(struct work_struct *work)
{
struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
+#ifdef CONFIG_BNX2X_VXLAN
+ u16 port;
+#endif
rtnl_lock();
&bp->sp_rtnl_state))
bnx2x_update_mng_version(bp);
+#ifdef CONFIG_BNX2X_VXLAN
+ port = bp->vxlan_dst_port;
+ if (test_and_clear_bit(BNX2X_SP_RTNL_ADD_VXLAN_PORT,
+ &bp->sp_rtnl_state)) {
+ if (!bnx2x_vxlan_port_update(bp, port))
+ netdev_info(bp->dev, "Added vxlan dest port %d", port);
+ else
+ bp->vxlan_dst_port = 0;
+ }
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_DEL_VXLAN_PORT,
+ &bp->sp_rtnl_state)) {
+ if (!bnx2x_vxlan_port_update(bp, 0)) {
+ netdev_info(bp->dev,
+ "Deleted vxlan dest port %d", port);
+ bp->vxlan_dst_port = 0;
+ vxlan_get_rx_port(bp->dev);
+ }
+ }
+#endif
+
/* work which needs rtnl lock not-taken (as it takes the lock itself and
* can be called from other contexts as well)
*/
rc = bnx2x_nic_load(bp, LOAD_OPEN);
if (rc)
return rc;
+
+#ifdef CONFIG_BNX2X_VXLAN
+ if (IS_PF(bp))
+ vxlan_get_rx_port(dev);
+#endif
+
return 0;
}
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
.ndo_features_check = bnx2x_features_check,
+#ifdef CONFIG_BNX2X_VXLAN
+ .ndo_add_vxlan_port = bnx2x_add_vxlan_port,
+ .ndo_del_vxlan_port = bnx2x_del_vxlan_port,
+#endif
};
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
static ssize_t mps_trc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- int i, j, enable, ret;
+ int i, enable, ret;
u32 *data, *mask;
struct trace_params tp;
const struct inode *ino;
unsigned int trcidx;
char *s, *p, *word, *end;
struct adapter *adap;
+ u32 j;
ino = file_inode(file);
trcidx = (uintptr_t)ino->i_private & 3;
if (!strncmp(word, "qid=", 4)) {
end = (char *)word + 4;
- ret = kstrtoul(end, 10, (unsigned long *)&j);
+ ret = kstrtouint(end, 10, &j);
if (ret)
goto out;
if (!adap->trace_rss) {
}
if (!strncmp(word, "snaplen=", 8)) {
end = (char *)word + 8;
- ret = kstrtoul(end, 10, (unsigned long *)&j);
+ ret = kstrtouint(end, 10, &j);
if (ret || j > 9600) {
inval: count = -EINVAL;
goto out;
}
if (!strncmp(word, "minlen=", 7)) {
end = (char *)word + 7;
- ret = kstrtoul(end, 10, (unsigned long *)&j);
+ ret = kstrtouint(end, 10, &j);
if (ret || j > TFMINPKTSIZE_M)
goto inval;
tp.min_len = j;
}
if (*word == '@') {
end = (char *)word + 1;
- ret = kstrtoul(end, 10, (unsigned long *)&j);
+ ret = kstrtouint(end, 10, &j);
if (*end && *end != '\n')
goto inval;
if (j & 7) /* doesn't start at multiple of 8 */
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.1.1.83"
+#define DRV_VERSION "2.3.0.12"
#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
struct vnic_gen_stats gen_stats;
};
+static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev)
+{
+ struct enic *enic = vdev->priv;
+
+ return enic->netdev;
+}
+
+/* wrappers function for kernel log
+ * Make sure variable vdev of struct vnic_dev is available in the block where
+ * these macros are used
+ */
+#define vdev_info(args...) dev_info(&vdev->pdev->dev, args)
+#define vdev_warn(args...) dev_warn(&vdev->pdev->dev, args)
+#define vdev_err(args...) dev_err(&vdev->pdev->dev, args)
+
+#define vdev_netinfo(args...) netdev_info(vnic_get_netdev(vdev), args)
+#define vdev_netwarn(args...) netdev_warn(vnic_get_netdev(vdev), args)
+#define vdev_neterr(args...) netdev_err(vnic_get_netdev(vdev), args)
+
static inline struct device *enic_get_dev(struct enic *enic)
{
return &(enic->pdev->dev);
goto err_out_iounmap;
}
+ err = vnic_devcmd_init(enic->vdev);
+
+ if (err)
+ goto err_out_vnic_unregister;
+
#ifdef CONFIG_PCI_IOV
/* Get number of subvnics */
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
pci_disable_sriov(pdev);
enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
}
-err_out_vnic_unregister:
#endif
+err_out_vnic_unregister:
vnic_dev_unregister(enic->vdev);
err_out_iounmap:
enic_iounmap(enic);
#include "vnic_dev.h"
#include "vnic_cq.h"
+#include "enic.h"
void vnic_cq_free(struct vnic_cq *cq)
{
cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
if (!cq->ctrl) {
- pr_err("Failed to hook CQ[%d] resource\n", index);
+ vdev_err("Failed to hook CQ[%d] resource\n", index);
return -EINVAL;
}
#include "vnic_resource.h"
#include "vnic_devcmd.h"
#include "vnic_dev.h"
+#include "vnic_wq.h"
#include "vnic_stats.h"
-
-enum vnic_proxy_type {
- PROXY_NONE,
- PROXY_BY_BDF,
- PROXY_BY_INDEX,
-};
-
-struct vnic_res {
- void __iomem *vaddr;
- dma_addr_t bus_addr;
- unsigned int count;
-};
-
-struct vnic_intr_coal_timer_info {
- u32 mul;
- u32 div;
- u32 max_usec;
-};
-
-struct vnic_dev {
- void *priv;
- struct pci_dev *pdev;
- struct vnic_res res[RES_TYPE_MAX];
- enum vnic_dev_intr_mode intr_mode;
- struct vnic_devcmd __iomem *devcmd;
- struct vnic_devcmd_notify *notify;
- struct vnic_devcmd_notify notify_copy;
- dma_addr_t notify_pa;
- u32 notify_sz;
- dma_addr_t linkstatus_pa;
- struct vnic_stats *stats;
- dma_addr_t stats_pa;
- struct vnic_devcmd_fw_info *fw_info;
- dma_addr_t fw_info_pa;
- enum vnic_proxy_type proxy;
- u32 proxy_index;
- u64 args[VNIC_DEVCMD_NARGS];
- struct vnic_intr_coal_timer_info intr_coal_timer_info;
-};
+#include "enic.h"
#define VNIC_MAX_RES_HDR_SIZE \
(sizeof(struct vnic_resource_header) + \
return -EINVAL;
if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
- pr_err("vNIC BAR0 res hdr length error\n");
+ vdev_err("vNIC BAR0 res hdr length error\n");
return -EINVAL;
}
rh = bar->vaddr;
mrh = bar->vaddr;
if (!rh) {
- pr_err("vNIC BAR0 res hdr not mem-mapped\n");
+ vdev_err("vNIC BAR0 res hdr not mem-mapped\n");
return -EINVAL;
}
(ioread32(&rh->version) != VNIC_RES_VERSION)) {
if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
(ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
- pr_err("vNIC BAR0 res magic/version error "
- "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
- VNIC_RES_MAGIC, VNIC_RES_VERSION,
- MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
- ioread32(&rh->magic), ioread32(&rh->version));
+ vdev_err("vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
+ VNIC_RES_MAGIC, VNIC_RES_VERSION,
+ MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
+ ioread32(&rh->magic), ioread32(&rh->version));
return -EINVAL;
}
}
/* each count is stride bytes long */
len = count * VNIC_RES_STRIDE;
if (len + bar_offset > bar[bar_num].len) {
- pr_err("vNIC BAR0 resource %d "
- "out-of-bounds, offset 0x%x + "
- "size 0x%x > bar len 0x%lx\n",
- type, bar_offset,
- len,
- bar[bar_num].len);
+ vdev_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
+ type, bar_offset, len,
+ bar[bar_num].len);
return -EINVAL;
}
break;
case RES_TYPE_INTR_PBA_LEGACY:
case RES_TYPE_DEVCMD:
+ case RES_TYPE_DEVCMD2:
len = count;
break;
default:
&ring->base_addr_unaligned);
if (!ring->descs_unaligned) {
- pr_err("Failed to allocate ring (size=%d), aborting\n",
- (int)ring->size);
+ vdev_err("Failed to allocate ring (size=%d), aborting\n",
+ (int)ring->size);
return -ENOMEM;
}
return -ENODEV;
}
if (status & STAT_BUSY) {
- pr_err("Busy devcmd %d\n", _CMD_N(cmd));
+ vdev_neterr("Busy devcmd %d\n", _CMD_N(cmd));
return -EBUSY;
}
return -err;
if (err != ERR_ECMDUNKNOWN ||
cmd != CMD_CAPABILITY)
- pr_err("Error %d devcmd %d\n",
- err, _CMD_N(cmd));
+ vdev_neterr("Error %d devcmd %d\n",
+ err, _CMD_N(cmd));
return -err;
}
}
}
- pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
+ vdev_neterr("Timedout devcmd %d\n", _CMD_N(cmd));
return -ETIMEDOUT;
}
+static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ int wait)
+{
+ struct devcmd2_controller *dc2c = vdev->devcmd2;
+ struct devcmd2_result *result = dc2c->result + dc2c->next_result;
+ unsigned int i;
+ int delay, err;
+ u32 fetch_index, posted, new_posted;
+
+ posted = ioread32(&dc2c->wq_ctrl->posted_index);
+ fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index);
+
+ if (posted == 0xFFFFFFFF || fetch_index == 0xFFFFFFFF)
+ return -ENODEV;
+
+ new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
+
+ if (new_posted == fetch_index) {
+ vdev_neterr("devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n",
+ _CMD_N(cmd), fetch_index, posted);
+ return -EBUSY;
+ }
+ dc2c->cmd_ring[posted].cmd = cmd;
+ dc2c->cmd_ring[posted].flags = 0;
+
+ if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
+ dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
+ if (_CMD_DIR(cmd) & _CMD_DIR_WRITE)
+ for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+ dc2c->cmd_ring[posted].args[i] = vdev->args[i];
+
+ /* Adding write memory barrier prevents compiler and/or CPU reordering,
+ * thus avoiding descriptor posting before descriptor is initialized.
+ * Otherwise, hardware can read stale descriptor fields.
+ */
+ wmb();
+ iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
+
+ if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
+ return 0;
+
+ for (delay = 0; delay < wait; delay++) {
+ if (result->color == dc2c->color) {
+ dc2c->next_result++;
+ if (dc2c->next_result == dc2c->result_size) {
+ dc2c->next_result = 0;
+ dc2c->color = dc2c->color ? 0 : 1;
+ }
+ if (result->error) {
+ err = result->error;
+ if (err != ERR_ECMDUNKNOWN ||
+ cmd != CMD_CAPABILITY)
+ vdev_neterr("Error %d devcmd %d\n",
+ err, _CMD_N(cmd));
+ return -err;
+ }
+ if (_CMD_DIR(cmd) & _CMD_DIR_READ)
+ for (i = 0; i < VNIC_DEVCMD2_NARGS; i++)
+ vdev->args[i] = result->results[i];
+
+ return 0;
+ }
+ udelay(100);
+ }
+
+ vdev_neterr("devcmd %d timed out\n", _CMD_N(cmd));
+
+ return -ETIMEDOUT;
+}
+
+static int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
+{
+ vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
+ if (!vdev->devcmd)
+ return -ENODEV;
+ vdev->devcmd_rtn = _vnic_dev_cmd;
+
+ return 0;
+}
+
+static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
+{
+ int err;
+ unsigned int fetch_index;
+
+ if (vdev->devcmd2)
+ return 0;
+
+ vdev->devcmd2 = kzalloc(sizeof(*vdev->devcmd2), GFP_KERNEL);
+ if (!vdev->devcmd2)
+ return -ENOMEM;
+
+ vdev->devcmd2->color = 1;
+ vdev->devcmd2->result_size = DEVCMD2_RING_SIZE;
+ err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE,
+ DEVCMD2_DESC_SIZE);
+ if (err)
+ goto err_free_devcmd2;
+
+ fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
+ if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
+ vdev_err("Fatal error in devcmd2 init - hardware surprise removal");
+
+ return -ENODEV;
+ }
+
+ enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0,
+ 0);
+ vnic_wq_enable(&vdev->devcmd2->wq);
+
+ err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
+ DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
+ if (err)
+ goto err_free_wq;
+
+ vdev->devcmd2->result = vdev->devcmd2->results_ring.descs;
+ vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs;
+ vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl;
+ vdev->args[0] = (u64)vdev->devcmd2->results_ring.base_addr |
+ VNIC_PADDR_TARGET;
+ vdev->args[1] = DEVCMD2_RING_SIZE;
+
+ err = _vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000);
+ if (err)
+ goto err_free_desc_ring;
+
+ vdev->devcmd_rtn = _vnic_dev_cmd2;
+
+ return 0;
+
+err_free_desc_ring:
+ vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
+err_free_wq:
+ vnic_wq_disable(&vdev->devcmd2->wq);
+ vnic_wq_free(&vdev->devcmd2->wq);
+err_free_devcmd2:
+ kfree(vdev->devcmd2);
+ vdev->devcmd2 = NULL;
+
+ return err;
+}
+
+static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
+{
+ vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
+ vnic_wq_disable(&vdev->devcmd2->wq);
+ vnic_wq_free(&vdev->devcmd2->wq);
+ kfree(vdev->devcmd2);
+}
+
static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait)
vdev->args[2] = *a0;
vdev->args[3] = *a1;
- err = _vnic_dev_cmd(vdev, proxy_cmd, wait);
+ err = vdev->devcmd_rtn(vdev, proxy_cmd, wait);
if (err)
return err;
err = (int)vdev->args[1];
if (err != ERR_ECMDUNKNOWN ||
cmd != CMD_CAPABILITY)
- pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
+ vdev_neterr("Error %d proxy devcmd %d\n", err,
+ _CMD_N(cmd));
return err;
}
vdev->args[0] = *a0;
vdev->args[1] = *a1;
- err = _vnic_dev_cmd(vdev, cmd, wait);
+ err = vdev->devcmd_rtn(vdev, cmd, wait);
*a0 = vdev->args[0];
*a1 = vdev->args[1];
err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
if (err)
- pr_err("Can't set packet filter\n");
+ vdev_neterr("Can't set packet filter\n");
return err;
}
err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
if (err)
- pr_err("Can't add addr [%pM], %d\n", addr, err);
+ vdev_neterr("Can't add addr [%pM], %d\n", addr, err);
return err;
}
err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
if (err)
- pr_err("Can't del addr [%pM], %d\n", addr, err);
+ vdev_neterr("Can't del addr [%pM], %d\n", addr, err);
return err;
}
dma_addr_t notify_pa;
if (vdev->notify || vdev->notify_pa) {
- pr_err("notify block %p still allocated", vdev->notify);
+ vdev_neterr("notify block %p still allocated", vdev->notify);
return -EINVAL;
}
memset(vdev->args, 0, sizeof(vdev->args));
if (vnic_dev_capable(vdev, CMD_INTR_COAL_CONVERT))
- err = _vnic_dev_cmd(vdev, CMD_INTR_COAL_CONVERT, wait);
+ err = vdev->devcmd_rtn(vdev, CMD_INTR_COAL_CONVERT, wait);
else
err = ERR_ECMDUNKNOWN;
*/
if ((err == ERR_ECMDUNKNOWN) ||
(!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) {
- pr_warn("Using default conversion factor for interrupt coalesce timer\n");
+ vdev_netwarn("Using default conversion factor for interrupt coalesce timer\n");
vnic_dev_intr_coal_timer_info_default(vdev);
return 0;
}
pci_free_consistent(vdev->pdev,
sizeof(struct vnic_devcmd_fw_info),
vdev->fw_info, vdev->fw_info_pa);
+ if (vdev->devcmd2)
+ vnic_dev_deinit_devcmd2(vdev);
+
kfree(vdev);
}
}
if (vnic_dev_discover_res(vdev, bar, num_bars))
goto err_out;
- vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
- if (!vdev->devcmd)
- goto err_out;
-
return vdev;
err_out:
}
EXPORT_SYMBOL(vnic_dev_get_pdev);
+int vnic_devcmd_init(struct vnic_dev *vdev)
+{
+ void __iomem *res;
+ int err;
+
+ res = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
+ if (res) {
+ err = vnic_dev_init_devcmd2(vdev);
+ if (err)
+ vdev_warn("DEVCMD2 init failed: %d, Using DEVCMD1",
+ err);
+ else
+ return 0;
+ } else {
+ vdev_warn("DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n");
+ }
+ err = vnic_dev_init_devcmd1(vdev);
+ if (err)
+ vdev_err("DEVCMD1 initialization failed: %d", err);
+
+ return err;
+}
+
int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
{
u64 a0, a1 = len;
unsigned int desc_avail;
};
-struct vnic_dev;
+enum vnic_proxy_type {
+ PROXY_NONE,
+ PROXY_BY_BDF,
+ PROXY_BY_INDEX,
+};
+
+struct vnic_res {
+ void __iomem *vaddr;
+ dma_addr_t bus_addr;
+ unsigned int count;
+};
+
+struct vnic_intr_coal_timer_info {
+ u32 mul;
+ u32 div;
+ u32 max_usec;
+};
+
+struct vnic_dev {
+ void *priv;
+ struct pci_dev *pdev;
+ struct vnic_res res[RES_TYPE_MAX];
+ enum vnic_dev_intr_mode intr_mode;
+ struct vnic_devcmd __iomem *devcmd;
+ struct vnic_devcmd_notify *notify;
+ struct vnic_devcmd_notify notify_copy;
+ dma_addr_t notify_pa;
+ u32 notify_sz;
+ dma_addr_t linkstatus_pa;
+ struct vnic_stats *stats;
+ dma_addr_t stats_pa;
+ struct vnic_devcmd_fw_info *fw_info;
+ dma_addr_t fw_info_pa;
+ enum vnic_proxy_type proxy;
+ u32 proxy_index;
+ u64 args[VNIC_DEVCMD_NARGS];
+ struct vnic_intr_coal_timer_info intr_coal_timer_info;
+ struct devcmd2_controller *devcmd2;
+ int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ int wait);
+};
+
struct vnic_stats;
void *vnic_dev_priv(struct vnic_dev *vdev);
int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
struct filter *data);
+int vnic_devcmd_init(struct vnic_dev *vdev);
#endif /* _VNIC_DEV_H_ */
*/
CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56),
+ /* Initialization for the devcmd2 interface.
+ * in: (u64) a0 = host result buffer physical address
+ * in: (u16) a1 = number of entries in result buffer
+ */
+ CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57),
+
/* Add a filter.
* in: (u64) a0= filter address
* (u32) a1= size of filter
u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
};
+#define DEVCMD2_FNORESULT 0x1 /* Don't copy result to host */
+
+#define VNIC_DEVCMD2_NARGS VNIC_DEVCMD_NARGS
+struct vnic_devcmd2 {
+ u16 pad;
+ u16 flags;
+ u32 cmd;
+ u64 args[VNIC_DEVCMD2_NARGS];
+};
+
+#define VNIC_DEVCMD2_NRESULTS VNIC_DEVCMD_NARGS
+struct devcmd2_result {
+ u64 results[VNIC_DEVCMD2_NRESULTS];
+ u32 pad;
+ u16 completed_index;
+ u8 error;
+ u8 color;
+};
+
+#define DEVCMD2_RING_SIZE 32
+#define DEVCMD2_DESC_SIZE 128
+
#endif /* _VNIC_DEVCMD_H_ */
#include "vnic_dev.h"
#include "vnic_intr.h"
+#include "enic.h"
void vnic_intr_free(struct vnic_intr *intr)
{
intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
if (!intr->ctrl) {
- pr_err("Failed to hook INTR[%d].ctrl resource\n", index);
+ vdev_err("Failed to hook INTR[%d].ctrl resource\n", index);
return -EINVAL;
}
RES_TYPE_RSVD7,
RES_TYPE_DEVCMD, /* Device command region */
RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */
+ RES_TYPE_SUBVNIC, /* subvnic resource type */
+ RES_TYPE_MQ_WQ, /* MQ Work queues */
+ RES_TYPE_MQ_RQ, /* MQ Receive queues */
+ RES_TYPE_MQ_CQ, /* MQ Completion queues */
+ RES_TYPE_DEPRECATED1, /* Old version of devcmd 2 */
+ RES_TYPE_DEPRECATED2, /* Old version of devcmd 2 */
+ RES_TYPE_DEVCMD2, /* Device control region */
RES_TYPE_MAX, /* Count of resource types */
};
#include "vnic_dev.h"
#include "vnic_rq.h"
+#include "enic.h"
static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
{
rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
if (!rq->ctrl) {
- pr_err("Failed to hook RQ[%d] resource\n", index);
+ vdev_err("Failed to hook RQ[%d] resource\n", index);
return -EINVAL;
}
int vnic_rq_disable(struct vnic_rq *rq)
{
unsigned int wait;
+ struct vnic_dev *vdev = rq->vdev;
iowrite32(0, &rq->ctrl->enable);
udelay(10);
}
- pr_err("Failed to disable RQ[%d]\n", rq->index);
+ vdev_neterr("Failed to disable RQ[%d]\n", rq->index);
return -ETIMEDOUT;
}
#include "vnic_dev.h"
#include "vnic_wq.h"
+#include "enic.h"
static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
{
wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
if (!wq->ctrl) {
- pr_err("Failed to hook WQ[%d] resource\n", index);
+ vdev_err("Failed to hook WQ[%d] resource\n", index);
return -EINVAL;
}
return 0;
}
-static void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
- unsigned int fetch_index, unsigned int posted_index,
- unsigned int error_interrupt_enable,
- unsigned int error_interrupt_offset)
+int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ int err;
+
+ wq->index = 0;
+ wq->vdev = vdev;
+
+ wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
+ if (!wq->ctrl)
+ return -EINVAL;
+ vnic_wq_disable(wq);
+ err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
+
+ return err;
+}
+
+void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int posted_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
{
u64 paddr;
unsigned int count = wq->ring.desc_count;
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
- vnic_wq_init_start(wq, cq_index, 0, 0,
+ enic_wq_init_start(wq, cq_index, 0, 0,
error_interrupt_enable,
error_interrupt_offset);
}
int vnic_wq_disable(struct vnic_wq *wq)
{
unsigned int wait;
+ struct vnic_dev *vdev = wq->vdev;
iowrite32(0, &wq->ctrl->enable);
udelay(10);
}
- pr_err("Failed to disable WQ[%d]\n", wq->index);
+ vdev_neterr("Failed to disable WQ[%d]\n", wq->index);
return -ETIMEDOUT;
}
unsigned int pkts_outstanding;
};
+struct devcmd2_controller {
+ struct vnic_wq_ctrl __iomem *wq_ctrl;
+ struct vnic_devcmd2 *cmd_ring;
+ struct devcmd2_result *result;
+ u16 next_result;
+ u16 result_size;
+ int color;
+ struct vnic_dev_ring results_ring;
+ struct vnic_wq wq;
+};
+
static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
{
/* how many does SW own? */
int vnic_wq_disable(struct vnic_wq *wq);
void vnic_wq_clean(struct vnic_wq *wq,
void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
+int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+ unsigned int desc_count, unsigned int desc_size);
+void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int posted_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
#endif /* _VNIC_WQ_H_ */
struct device *dev = &adapter->pdev->dev;
int status;
- if (lancer_chip(adapter) || BEx_chip(adapter))
+ if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
return;
if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
{
struct be_adapter *adapter = netdev_priv(netdev);
- if (lancer_chip(adapter) || BEx_chip(adapter))
+ if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
return;
if (adapter->vxlan_port != port)
if (!priv->tx_packet_sent || tx_ctrl.ct)
return;
+ /* Ack Tx ctrl register */
+ nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, 0);
+
/* Check Tx transmit error */
if (unlikely(tx_ctrl.et)) {
ndev->stats.tx_errors++;
ndev->stats.tx_bytes += tx_ctrl.nt;
}
- if (priv->tx_skb) {
- dev_kfree_skb(priv->tx_skb);
- priv->tx_skb = NULL;
- }
-
+ dev_kfree_skb(priv->tx_skb);
priv->tx_packet_sent = false;
if (netif_queue_stopped(ndev))
{
struct net_device *ndev = napi->dev;
struct nps_enet_priv *priv = netdev_priv(ndev);
- struct nps_enet_buf_int_enable buf_int_enable;
u32 work_done;
- buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
- buf_int_enable.tx_done = NPS_ENET_ENABLE;
nps_enet_tx_handler(ndev);
work_done = nps_enet_rx_handler(ndev);
if (work_done < budget) {
+ struct nps_enet_buf_int_enable buf_int_enable;
+
napi_complete(napi);
+ buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
+ buf_int_enable.tx_done = NPS_ENET_ENABLE;
nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
buf_int_enable.value);
}
{
struct net_device *ndev = dev_instance;
struct nps_enet_priv *priv = netdev_priv(ndev);
- struct nps_enet_buf_int_cause buf_int_cause;
+ struct nps_enet_rx_ctl rx_ctrl;
+ struct nps_enet_tx_ctl tx_ctrl;
- buf_int_cause.value =
- nps_enet_reg_get(priv, NPS_ENET_REG_BUF_INT_CAUSE);
+ rx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
+ tx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
- if (buf_int_cause.tx_done || buf_int_cause.rx_rdy)
+ if ((!tx_ctrl.ct && priv->tx_packet_sent) || rx_ctrl.cr)
if (likely(napi_schedule_prep(&priv->napi))) {
nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
__napi_schedule(&priv->napi);
/* Discard Packets bigger than max frame length */
max_frame_length = ETH_HLEN + ndev->mtu + ETH_FCS_LEN;
- if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH) {
+ if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH)
ge_mac_cfg_3->max_len = max_frame_length;
- nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3,
- ge_mac_cfg_3->value);
- }
/* Enable interrupts */
buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
ge_mac_cfg_0.tx_fc_en = NPS_ENET_ENABLE;
ge_mac_cfg_0.rx_fc_en = NPS_ENET_ENABLE;
ge_mac_cfg_0.tx_fc_retr = NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR;
+ ge_mac_cfg_3->cf_drop = NPS_ENET_ENABLE;
/* Enable Rx and Tx */
ge_mac_cfg_0.rx_en = NPS_ENET_ENABLE;
ge_mac_cfg_0.tx_en = NPS_ENET_ENABLE;
+ nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3,
+ ge_mac_cfg_3->value);
nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0,
ge_mac_cfg_0.value);
}
/* This driver handles one frame at a time */
netif_stop_queue(ndev);
- nps_enet_send_frame(ndev, skb);
-
priv->tx_skb = skb;
+ nps_enet_send_frame(ndev, skb);
+
return NETDEV_TX_OK;
}
#define NPS_ENET_REG_RX_CTL 0x810
#define NPS_ENET_REG_RX_BUF 0x818
#define NPS_ENET_REG_BUF_INT_ENABLE 0x8C0
-#define NPS_ENET_REG_BUF_INT_CAUSE 0x8C4
#define NPS_ENET_REG_GE_MAC_CFG_0 0x1000
#define NPS_ENET_REG_GE_MAC_CFG_1 0x1004
#define NPS_ENET_REG_GE_MAC_CFG_2 0x1008
};
};
-/* Interrupt cause for data buffer events register */
-struct nps_enet_buf_int_cause {
- union {
- /* tx_done: Interrupt in the case when current frame was
- * read from TX buffer.
- * rx_rdy: Interrupt in the case when new frame is ready
- * in RX buffer.
- */
- struct {
- u32
- __reserved:30,
- tx_done:1,
- rx_rdy:1;
- };
-
- u32 value;
- };
-};
-
/* Gbps Eth MAC Configuration 0 register */
struct nps_enet_ge_mac_cfg_0 {
union {
/* Start Rx/Tx DMA and enable the interrupts */
gfar_start(priv);
+ /* force link state update after mac reset */
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
phy_start(priv->phydev);
enable_napi(priv);
dma_addr = pci_map_single(nic->pdev,
skb->data, skb->len, PCI_DMA_TODEVICE);
/* If we can't map the skb, have the upper layer try later */
- if (pci_dma_mapping_error(nic->pdev, dma_addr))
+ if (pci_dma_mapping_error(nic->pdev, dma_addr)) {
+ dev_kfree_skb_any(skb);
+ skb = NULL;
return -ENOMEM;
+ }
/*
* Use the last 4 bytes of the SKB payload packet as the CRC, used for
nic->params.cbs.max * sizeof(struct cb),
sizeof(u32),
0);
+ if (!nic->cbs_pool) {
+ netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n");
+ err = -ENOMEM;
+ goto err_out_pool;
+ }
netif_info(nic, probe, nic->netdev,
"addr 0x%llx, irq %d, MAC addr %pM\n",
(unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
return 0;
+err_out_pool:
+ unregister_netdev(netdev);
err_out_free:
e100_free(nic);
err_out_iounmap:
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
adapter->flags2 &= ~FLAG2_IS_DISCARDING;
-
- writel(0, rx_ring->head);
- if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
- e1000e_update_rdt_wa(rx_ring, 0);
- else
- writel(0, rx_ring->tail);
}
static void e1000e_downshift_workaround(struct work_struct *work)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
-
- writel(0, tx_ring->head);
- if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
- e1000e_update_tdt_wa(tx_ring, 0);
- else
- writel(0, tx_ring->tail);
}
/**
tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
+ writel(0, tx_ring->head);
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_tdt_wa(tx_ring, 0);
+ else
+ writel(0, tx_ring->tail);
+
/* Set the Tx Interrupt Delay register */
ew32(TIDV, adapter->tx_int_delay);
/* Tx irq moderation */
rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
+ writel(0, rx_ring->head);
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_rdt_wa(rx_ring, 0);
+ else
+ writel(0, rx_ring->tail);
+
/* Enable Receive Checksum Offload for TCP and UDP */
rxcsum = er32(RXCSUM);
if (adapter->netdev->features & NETIF_F_RXCSUM)
/* Verify phy id and set remaining function pointers */
switch (phy->id) {
case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
case M88E1111_I_PHY_ID:
else
phy->ops.get_cable_length = igb_get_cable_length_m88;
phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
- /* Check if this PHY is confgured for media swap. */
+ /* Check if this PHY is configured for media swap. */
if (phy->id == M88E1112_E_PHY_ID) {
u16 data;
hw->mac.ops.check_for_link =
igb_check_for_link_media_swap;
}
+ if (phy->id == M88E1512_E_PHY_ID) {
+ ret_val = igb_initialize_M88E1512_phy(hw);
+ if (ret_val)
+ goto out;
+ }
break;
case IGP03E1000_E_PHY_ID:
phy->type = e1000_phy_igp_3;
**/
static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
{
+ struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
/* This isn't a true "hard" reset, but is the only reset
goto out;
ret_val = igb_phy_sw_reset(hw);
+ if (ret_val)
+ goto out;
+ if (phy->id == M88E1512_E_PHY_ID)
+ ret_val = igb_initialize_M88E1512_phy(hw);
out:
return ret_val;
}
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
case I210_I_PHY_ID:
ret_val = igb_copper_link_setup_m88_gen2(hw);
break;
u16 phy_data;
if ((hw->phy.media_type != e1000_media_type_copper) ||
- (phy->id != M88E1543_E_PHY_ID))
+ ((phy->id != M88E1543_E_PHY_ID) &&
+ (phy->id != M88E1512_E_PHY_ID)))
goto out;
if (!hw->dev_spec._82575.eee_disable) {
/* Check if EEE is supported on this device. */
if ((hw->phy.media_type != e1000_media_type_copper) ||
- (phy->id != M88E1543_E_PHY_ID))
+ ((phy->id != M88E1543_E_PHY_ID) &&
+ (phy->id != M88E1512_E_PHY_ID)))
goto out;
ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7
#define E1000_M88E1112_PAGE_ADDR 0x16
#define E1000_M88E1112_STATUS 0x01
+#define E1000_M88E1512_CFG_REG_1 0x0010
+#define E1000_M88E1512_CFG_REG_2 0x0011
+#define E1000_M88E1512_CFG_REG_3 0x0007
+#define E1000_M88E1512_MODE 0x0014
/* PCI Express Control */
#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
#define M88_VENDOR 0x0141
#define I210_I_PHY_ID 0x01410C00
#define M88E1543_E_PHY_ID 0x01410EA0
+#define M88E1512_E_PHY_ID 0x01410DD0
/* M88E1000 Specific Registers */
#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
switch (hw->phy.id) {
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
case I210_I_PHY_ID:
reset_dsp = false;
break;
reset_dsp = false;
break;
}
- if (!reset_dsp)
+ if (!reset_dsp) {
hw_dbg("Link taking longer than expected.\n");
- else {
+ } else {
/* We didn't get link.
* Reset the DSP and cross our fingers.
*/
if (hw->phy.type != e1000_phy_m88 ||
hw->phy.id == I347AT4_E_PHY_ID ||
hw->phy.id == M88E1112_E_PHY_ID ||
+ hw->phy.id == M88E1543_E_PHY_ID ||
+ hw->phy.id == M88E1512_E_PHY_ID ||
hw->phy.id == I210_I_PHY_ID)
goto out;
phy->cable_length = phy_data / (is_cm ? 100 : 1);
break;
case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
case I347AT4_E_PHY_ID:
/* Remember the original page select and set it to 7 */
ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
return 0;
}
+/**
+ * igb_initialize_M88E1512_phy - Initialize M88E1512 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Initialize Marvel 1512 to work correctly with Avoton.
+ **/
+s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = 0;
+
+ /* Switch to PHY page 0xFF. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159);
+ if (ret_val)
+ goto out;
+
+ /* Switch to PHY page 0xFB. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D);
+ if (ret_val)
+ goto out;
+
+ /* Switch to PHY page 0x12. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12);
+ if (ret_val)
+ goto out;
+
+ /* Change mode to SGMII-to-Copper */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001);
+ if (ret_val)
+ goto out;
+
+ /* Return the PHY to page 0. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
+ if (ret_val)
+ goto out;
+
+ ret_val = igb_phy_sw_reset(hw);
+ if (ret_val) {
+ hw_dbg("Error committing the PHY changes\n");
+ return ret_val;
+ }
+
+ /* msec_delay(1000); */
+ usleep_range(1000, 2000);
+out:
+ return ret_val;
+}
+
/**
* igb_power_up_phy_copper - Restore copper link in case of PHY power down
* @hw: pointer to the HW structure
void igb_power_up_phy_copper(struct e1000_hw *hw);
void igb_power_down_phy_copper(struct e1000_hw *hw);
s32 igb_phy_init_script_igp3(struct e1000_hw *hw);
+s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw);
s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
#define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */
#define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */
#define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */
+#define E1000_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */
+#define E1000_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */
#define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */
#define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */
#define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */
struct sk_buff *skb);
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
+void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
#ifdef CONFIG_IGB_HWMON
void igb_sysfs_exit(struct igb_adapter *adapter);
int igb_sysfs_init(struct igb_adapter *adapter);
{
struct igb_adapter *adapter = netdev_priv(netdev);
unsigned int count = ch->combined_count;
+ unsigned int max_combined = 0;
/* Verify they are not requesting separate vectors */
if (!count || ch->rx_count || ch->tx_count)
return -EINVAL;
/* Verify the number of channels doesn't exceed hw limits */
- if (count > igb_max_channels(adapter))
+ max_combined = igb_max_channels(adapter);
+ if (count > max_combined)
return -EINVAL;
if (count != adapter->rss_queues) {
adapter->rss_queues = count;
+ igb_set_flag_queue_pairs(adapter, max_combined);
/* Hardware has to reinitialize queues and interrupts to
* match the new configuration.
#ifdef CONFIG_PCI_IOV
static int igb_vf_configure(struct igb_adapter *adapter, int vf);
static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
+static int igb_disable_sriov(struct pci_dev *dev);
+static int igb_pci_disable_sriov(struct pci_dev *dev);
#endif
#ifdef CONFIG_PM
/* allocate q_vector and rings */
q_vector = adapter->q_vector[v_idx];
- if (!q_vector)
+ if (!q_vector) {
q_vector = kzalloc(size, GFP_KERNEL);
- else
+ } else if (size > ksize(q_vector)) {
+ kfree_rcu(q_vector, rcu);
+ q_vector = kzalloc(size, GFP_KERNEL);
+ } else {
memset(q_vector, 0, size);
+ }
if (!q_vector)
return -ENOMEM;
if (hw->flash_address)
iounmap(hw->flash_address);
err_sw_init:
+ kfree(adapter->shadow_vfta);
igb_clear_interrupt_scheme(adapter);
+#ifdef CONFIG_PCI_IOV
+ igb_disable_sriov(pdev);
+#endif
pci_iounmap(pdev, hw->hw_addr);
err_ioremap:
free_netdev(netdev);
*/
igb_release_hw_control(adapter);
- unregister_netdev(netdev);
-
- igb_clear_interrupt_scheme(adapter);
-
#ifdef CONFIG_PCI_IOV
igb_disable_sriov(pdev);
#endif
+ unregister_netdev(netdev);
+
+ igb_clear_interrupt_scheme(adapter);
+
pci_iounmap(pdev, hw->hw_addr);
if (hw->flash_address)
iounmap(hw->flash_address);
return;
pci_sriov_set_totalvfs(pdev, 7);
- igb_pci_enable_sriov(pdev, max_vfs);
+ igb_enable_sriov(pdev, max_vfs);
#endif /* CONFIG_PCI_IOV */
}
adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
+ igb_set_flag_queue_pairs(adapter, max_rss_queues);
+}
+
+void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
+ const u32 max_rss_queues)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
/* Determine if we need to pair queues. */
switch (hw->mac.type) {
case e1000_82575:
}
#endif /* CONFIG_PCI_IOV */
+ igb_probe_vfs(adapter);
+
igb_init_queue_configuration(adapter);
/* Setup and initialize a copy of the hw vlan table array */
return -ENOMEM;
}
- igb_probe_vfs(adapter);
-
/* Explicitly disable IRQ since the NIC can be in any state. */
igb_irq_disable(adapter);
if (igb_init_interrupt_scheme(adapter, true)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ rtnl_unlock();
return -ENOMEM;
}
igb_init_queue_configuration(adapter);
if (igb_init_interrupt_scheme(adapter, true)) {
+ rtnl_unlock();
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
wr32(E1000_CTRL_EXT, ctrl_ext);
}
-static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
+static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin, int freq)
{
static const u32 aux0_sel_sdp[IGB_N_SDP] = {
AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1,
TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1,
};
+ static const u32 ts_sdp_sel_fc0[IGB_N_SDP] = {
+ TS_SDP0_SEL_FC0, TS_SDP1_SEL_FC0,
+ TS_SDP2_SEL_FC0, TS_SDP3_SEL_FC0,
+ };
+ static const u32 ts_sdp_sel_fc1[IGB_N_SDP] = {
+ TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
+ TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
+ };
static const u32 ts_sdp_sel_clr[IGB_N_SDP] = {
TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
tssdp &= ~AUX1_TS_SDP_EN;
tssdp &= ~ts_sdp_sel_clr[pin];
- if (chan == 1)
- tssdp |= ts_sdp_sel_tt1[pin];
- else
- tssdp |= ts_sdp_sel_tt0[pin];
-
+ if (freq) {
+ if (chan == 1)
+ tssdp |= ts_sdp_sel_fc1[pin];
+ else
+ tssdp |= ts_sdp_sel_fc0[pin];
+ } else {
+ if (chan == 1)
+ tssdp |= ts_sdp_sel_tt1[pin];
+ else
+ tssdp |= ts_sdp_sel_tt0[pin];
+ }
tssdp |= ts_sdp_en[pin];
wr32(E1000_TSSDP, tssdp);
struct igb_adapter *igb =
container_of(ptp, struct igb_adapter, ptp_caps);
struct e1000_hw *hw = &igb->hw;
- u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh;
+ u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout;
unsigned long flags;
struct timespec ts;
- int pin = -1;
+ int use_freq = 0, pin = -1;
s64 ns;
switch (rq->type) {
ts.tv_nsec = rq->perout.period.nsec;
ns = timespec_to_ns(&ts);
ns = ns >> 1;
- if (on && ns < 500000LL) {
- /* 2k interrupts per second is an awful lot. */
- return -EINVAL;
+ if (on && ns <= 70000000LL) {
+ if (ns < 8LL)
+ return -EINVAL;
+ use_freq = 1;
}
ts = ns_to_timespec(ns);
if (rq->perout.index == 1) {
- tsauxc_mask = TSAUXC_EN_TT1;
- tsim_mask = TSINTR_TT1;
+ if (use_freq) {
+ tsauxc_mask = TSAUXC_EN_CLK1 | TSAUXC_ST1;
+ tsim_mask = 0;
+ } else {
+ tsauxc_mask = TSAUXC_EN_TT1;
+ tsim_mask = TSINTR_TT1;
+ }
trgttiml = E1000_TRGTTIML1;
trgttimh = E1000_TRGTTIMH1;
+ freqout = E1000_FREQOUT1;
} else {
- tsauxc_mask = TSAUXC_EN_TT0;
- tsim_mask = TSINTR_TT0;
+ if (use_freq) {
+ tsauxc_mask = TSAUXC_EN_CLK0 | TSAUXC_ST0;
+ tsim_mask = 0;
+ } else {
+ tsauxc_mask = TSAUXC_EN_TT0;
+ tsim_mask = TSINTR_TT0;
+ }
trgttiml = E1000_TRGTTIML0;
trgttimh = E1000_TRGTTIMH0;
+ freqout = E1000_FREQOUT0;
}
spin_lock_irqsave(&igb->tmreg_lock, flags);
tsauxc = rd32(E1000_TSAUXC);
tsim = rd32(E1000_TSIM);
+ if (rq->perout.index == 1) {
+ tsauxc &= ~(TSAUXC_EN_TT1 | TSAUXC_EN_CLK1 | TSAUXC_ST1);
+ tsim &= ~TSINTR_TT1;
+ } else {
+ tsauxc &= ~(TSAUXC_EN_TT0 | TSAUXC_EN_CLK0 | TSAUXC_ST0);
+ tsim &= ~TSINTR_TT0;
+ }
if (on) {
int i = rq->perout.index;
-
- igb_pin_perout(igb, i, pin);
+ igb_pin_perout(igb, i, pin, use_freq);
igb->perout[i].start.tv_sec = rq->perout.start.sec;
igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
igb->perout[i].period.tv_sec = ts.tv_sec;
igb->perout[i].period.tv_nsec = ts.tv_nsec;
wr32(trgttimh, rq->perout.start.sec);
wr32(trgttiml, rq->perout.start.nsec);
+ if (use_freq)
+ wr32(freqout, ns);
tsauxc |= tsauxc_mask;
tsim |= tsim_mask;
- } else {
- tsauxc &= ~tsauxc_mask;
- tsim &= ~tsim_mask;
}
wr32(E1000_TSAUXC, tsauxc);
wr32(E1000_TSIM, tsim);
dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_hdr_size,
DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
skb_put(skb, hlen);
}
enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
struct pci_dev *pdev;
- /* determine whether to use the the parent device
- */
+ /* determine whether to use the parent device */
if (ixgbe_pcie_from_parent(&adapter->hw))
pdev = adapter->pdev->bus->parent->self;
else
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P == 0) ? (0x4010) : (0x8010))
-#define IXGBE_KRM_LINK_CTRL_1(P) ((P == 0) ? (0x420C) : (0x820C))
-#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P == 0) ? (0x4634) : (0x8634))
-#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P == 0) ? (0x4638) : (0x8638))
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P == 0) ? (0x4B00) : (0x8B00))
-#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P == 0) ? (0x4E00) : (0x8E00))
-#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P == 0) ? (0x5520) : (0x9520))
-#define IXGBE_KRM_RX_ANA_CTL(P) ((P == 0) ? (0x5A00) : (0x9A00))
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010)
+#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
+#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
+#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00)
+#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P) ? 0x8E00 : 0x4E00)
+#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
+#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11)
#define MLX5E_MAX_NUM_TC 8
-#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x7
+#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
-#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x7
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
-#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (16 * 1024)
+#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
-#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ 0x7
+#define MLX5E_LOG_INDIR_RQT_SIZE 0x7
+#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
+#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
#define MLX5E_SQ_BF_BUDGET 16
"lro_bytes",
"rx_csum_good",
"rx_csum_none",
+ "rx_csum_sw",
"tx_csum_offload",
"tx_queue_stopped",
"tx_queue_wake",
u64 lro_bytes;
u64 rx_csum_good;
u64 rx_csum_none;
+ u64 rx_csum_sw;
u64 tx_csum_offload;
u64 tx_queue_stopped;
u64 tx_queue_wake;
u64 tx_queue_dropped;
u64 rx_wqe_err;
-#define NUM_VPORT_COUNTERS 31
+#define NUM_VPORT_COUNTERS 32
};
static const char pport_strings[][ETH_GSTRING_LEN] = {
static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
"packets",
"csum_none",
+ "csum_sw",
"lro_packets",
"lro_bytes",
"wqe_err"
struct mlx5e_rq_stats {
u64 packets;
u64 csum_none;
+ u64 csum_sw;
u64 lro_packets;
u64 lro_bytes;
u64 wqe_err;
-#define NUM_RQ_STATS 5
+#define NUM_RQ_STATS 6
};
static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
u16 tx_cq_moderation_usec;
u16 tx_cq_moderation_pkts;
u16 min_rx_wqes;
- u16 rx_hash_log_tbl_sz;
bool lro_en;
u32 lro_wqe_sz;
- u8 rss_hfunc;
u16 tx_max_inline;
+ u8 rss_hfunc;
+ u8 toeplitz_hash_key[40];
+ u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
};
enum {
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix);
+
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
u32 link_modes;
u32 speed;
u32 eth_proto_cap, eth_proto_admin;
- u8 port_status;
+ enum mlx5_port_status ps;
int err;
speed = ethtool_cmd_speed(cmd);
if (link_modes == eth_proto_admin)
goto out;
- err = mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
- if (err) {
- netdev_err(netdev, "%s: set port eth proto admin failed: %d\n",
- __func__, err);
- goto out;
- }
-
- err = mlx5_query_port_status(mdev, &port_status);
- if (err)
- goto out;
-
- if (port_status == MLX5_PORT_DOWN)
- return 0;
+ mlx5_query_port_admin_status(mdev, &ps);
+ if (ps == MLX5_PORT_UP)
+ mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
+ mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
+ if (ps == MLX5_PORT_UP)
+ mlx5_set_port_admin_status(mdev, MLX5_PORT_UP);
- err = mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
- if (err)
- goto out;
- err = mlx5_set_port_status(mdev, MLX5_PORT_UP);
out:
return err;
}
+static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return sizeof(priv->params.toeplitz_hash_key);
+}
+
+static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return MLX5E_INDIR_RQT_SIZE;
+}
+
static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ if (indir)
+ memcpy(indir, priv->params.indirection_rqt,
+ sizeof(priv->params.indirection_rqt));
+
+ if (key)
+ memcpy(key, priv->params.toeplitz_hash_key,
+ sizeof(priv->params.toeplitz_hash_key));
+
if (hfunc)
*hfunc = priv->params.rss_hfunc;
const u8 *key, const u8 hfunc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ bool close_open;
int err = 0;
- if (hfunc == ETH_RSS_HASH_NO_CHANGE)
- return 0;
-
- if ((hfunc != ETH_RSS_HASH_XOR) &&
+ if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
+ (hfunc != ETH_RSS_HASH_XOR) &&
(hfunc != ETH_RSS_HASH_TOP))
return -EINVAL;
mutex_lock(&priv->state_lock);
- priv->params.rss_hfunc = hfunc;
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- mlx5e_close_locked(dev);
- err = mlx5e_open_locked(dev);
+ if (indir) {
+ memcpy(priv->params.indirection_rqt, indir,
+ sizeof(priv->params.indirection_rqt));
+ mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
}
+ close_open = (key || (hfunc != ETH_RSS_HASH_NO_CHANGE)) &&
+ test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (close_open)
+ mlx5e_close_locked(dev);
+
+ if (key)
+ memcpy(priv->params.toeplitz_hash_key, key,
+ sizeof(priv->params.toeplitz_hash_key));
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE)
+ priv->params.rss_hfunc = hfunc;
+
+ if (close_open)
+ err = mlx5e_open_locked(priv->netdev);
+
mutex_unlock(&priv->state_lock);
return err;
}
+static int mlx5e_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err = 0;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = priv->params.num_channels;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
static int mlx5e_get_tunable(struct net_device *dev,
const struct ethtool_tunable *tuna,
void *data)
return err;
}
+static void mlx5e_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
+
+ err = mlx5_query_port_pause(mdev, &pauseparam->rx_pause,
+ &pauseparam->tx_pause);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5_query_port_pause failed:0x%x\n",
+ __func__, err);
+ }
+}
+
+static int mlx5e_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
+
+ if (pauseparam->autoneg)
+ return -EINVAL;
+
+ err = mlx5_set_port_pause(mdev,
+ pauseparam->rx_pause ? 1 : 0,
+ pauseparam->tx_pause ? 1 : 0);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5_set_port_pause failed:0x%x\n",
+ __func__, err);
+ }
+
+ return err;
+}
+
const struct ethtool_ops mlx5e_ethtool_ops = {
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
.set_coalesce = mlx5e_set_coalesce,
.get_settings = mlx5e_get_settings,
.set_settings = mlx5e_set_settings,
+ .get_rxfh_key_size = mlx5e_get_rxfh_key_size,
+ .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
+ .get_rxnfc = mlx5e_get_rxnfc,
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
+ .get_pauseparam = mlx5e_get_pauseparam,
+ .set_pauseparam = mlx5e_set_pauseparam,
};
s->lro_packets = 0;
s->lro_bytes = 0;
s->rx_csum_none = 0;
+ s->rx_csum_sw = 0;
s->rx_wqe_err = 0;
for (i = 0; i < priv->params.num_channels; i++) {
rq_stats = &priv->channel[i]->rq.stats;
s->lro_packets += rq_stats->lro_packets;
s->lro_bytes += rq_stats->lro_bytes;
s->rx_csum_none += rq_stats->csum_none;
+ s->rx_csum_sw += rq_stats->csum_sw;
s->rx_wqe_err += rq_stats->wqe_err;
for (j = 0; j < priv->params.num_tc; j++) {
/* Update calculated offload counters */
s->tx_csum_offload = s->tx_packets - tx_offload_none;
- s->rx_csum_good = s->rx_packets - s->rx_csum_none;
+ s->rx_csum_good = s->rx_packets - s->rx_csum_none -
+ s->rx_csum_sw;
mlx5e_update_pport_counters(priv);
free_out:
return inv;
}
+static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
+{
+ int i;
+
+ for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
+ int ix = i;
+
+ if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
+ ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
+
+ ix = priv->params.indirection_rqt[ix];
+ ix = ix % priv->params.num_channels;
+ MLX5_SET(rqtc, rqtc, rq_num[i],
+ test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+ priv->channel[ix]->rq.rqn :
+ priv->drop_rq.rqn);
+ }
+}
+
static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
enum mlx5e_rqt_ix rqt_ix)
{
- int i;
- int log_sz;
switch (rqt_ix) {
case MLX5E_INDIRECTION_RQT:
- log_sz = priv->params.rx_hash_log_tbl_sz;
- for (i = 0; i < (1 << log_sz); i++) {
- int ix = i;
-
- if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
- ix = mlx5e_bits_invert(i, log_sz);
-
- ix = ix % priv->params.num_channels;
- MLX5_SET(rqtc, rqtc, rq_num[i],
- test_bit(MLX5E_STATE_OPENED, &priv->state) ?
- priv->channel[ix]->rq.rqn :
- priv->drop_rq.rqn);
- }
+ mlx5e_fill_indir_rqt_rqns(priv, rqtc);
break;
u32 *in;
void *rqtc;
int inlen;
- int log_sz;
int sz;
int err;
- log_sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 0 :
- priv->params.rx_hash_log_tbl_sz;
- sz = 1 << log_sz;
+ sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen);
return err;
}
-static int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 *in;
void *rqtc;
int inlen;
- int log_sz;
int sz;
int err;
- log_sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 0 :
- priv->params.rx_hash_log_tbl_sz;
- sz = 1 << log_sz;
+ sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen);
ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
MLX5_CAP_ETH(priv->mdev,
- lro_timer_supported_periods[3]));
+ lro_timer_supported_periods[2]));
}
static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
rx_hash_toeplitz_key);
MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
- netdev_rss_key_fill(rss_key, len);
+ memcpy(rss_key, priv->params.toeplitz_hash_key, len);
}
break;
}
static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev,
- int num_comp_vectors)
+ int num_channels)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ int i;
priv->params.log_sq_size =
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
priv->params.min_rx_wqes =
MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
- priv->params.rx_hash_log_tbl_sz =
- (order_base_2(num_comp_vectors) >
- MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
- order_base_2(num_comp_vectors) :
- MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
priv->params.num_tc = 1;
priv->params.default_vlan_prio = 0;
priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
- priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
+ netdev_rss_key_fill(priv->params.toeplitz_hash_key,
+ sizeof(priv->params.toeplitz_hash_key));
+
+ for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
+ priv->params.indirection_rqt[i] = i % num_channels;
+
priv->params.lro_wqe_sz =
MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
priv->mdev = mdev;
priv->netdev = netdev;
- priv->params.num_channels = num_comp_vectors;
+ priv->params.num_channels = num_channels;
priv->default_vlan_prio = priv->params.default_vlan_prio;
spin_lock_init(&priv->async_events_spinlock);
{
struct net_device *netdev;
struct mlx5e_priv *priv;
- int ncv = mdev->priv.eq_table.num_comp_vectors;
+ int nch = min_t(int, mdev->priv.eq_table.num_comp_vectors,
+ MLX5E_MAX_NUM_CHANNELS);
int err;
if (mlx5e_check_required_hca_cap(mdev))
return NULL;
- netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), ncv, ncv);
+ netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), nch, nch);
if (!netdev) {
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
return NULL;
}
- mlx5e_build_netdev_priv(mdev, netdev, ncv);
+ mlx5e_build_netdev_priv(mdev, netdev, nch);
mlx5e_build_netdev(netdev);
netif_carrier_off(netdev);
tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
sizeof(struct iphdr));
ipv6 = NULL;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
} else {
tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
sizeof(struct ipv6hdr));
ipv4 = NULL;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
}
if (get_cqe_lro_tcppsh(cqe))
skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
}
+static inline bool is_first_ethertype_ip(struct sk_buff *skb)
+{
+ __be16 ethertype = ((struct ethhdr *)skb->data)->h_proto;
+
+ return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
+}
+
+static inline void mlx5e_handle_csum(struct net_device *netdev,
+ struct mlx5_cqe64 *cqe,
+ struct mlx5e_rq *rq,
+ struct sk_buff *skb)
+{
+ if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
+ goto csum_none;
+
+ if (likely(cqe->hds_ip_ext & CQE_L4_OK)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if (is_first_ethertype_ip(skb)) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+ rq->stats.csum_sw++;
+ } else {
+ goto csum_none;
+ }
+
+ return;
+
+csum_none:
+ skb->ip_summed = CHECKSUM_NONE;
+ rq->stats.csum_none++;
+}
+
static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq,
struct sk_buff *skb)
lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe);
- skb_shinfo(skb)->gso_size = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
rq->stats.lro_packets++;
rq->stats.lro_bytes += cqe_bcnt;
}
- if (likely(netdev->features & NETIF_F_RXCSUM) &&
- (cqe->hds_ip_ext & CQE_L2_OK) &&
- (cqe->hds_ip_ext & CQE_L3_OK) &&
- (cqe->hds_ip_ext & CQE_L4_OK)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- skb->ip_summed = CHECKSUM_NONE;
- rq->stats.csum_none++;
- }
+ mlx5e_handle_csum(netdev, cqe, rq, skb);
skb->protocol = eth_type_trans(skb, netdev);
}
EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
-int mlx5_set_port_status(struct mlx5_core_dev *dev,
- enum mlx5_port_status status)
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status)
{
u32 in[MLX5_ST_SZ_DW(paos_reg)];
u32 out[MLX5_ST_SZ_DW(paos_reg)];
memset(in, 0, sizeof(in));
+ MLX5_SET(paos_reg, in, local_port, 1);
MLX5_SET(paos_reg, in, admin_status, status);
MLX5_SET(paos_reg, in, ase, 1);
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PAOS, 0, 1);
}
+EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status);
-int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status *status)
{
u32 in[MLX5_ST_SZ_DW(paos_reg)];
u32 out[MLX5_ST_SZ_DW(paos_reg)];
memset(in, 0, sizeof(in));
+ MLX5_SET(paos_reg, in, local_port, 1);
+
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PAOS, 0, 0);
if (err)
return err;
- *status = MLX5_GET(paos_reg, out, oper_status);
+ *status = MLX5_GET(paos_reg, out, admin_status);
return err;
}
+EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
int *max_mtu, int *oper_mtu, u8 port)
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap);
+
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
+{
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(pfcc_reg, in, local_port, 1);
+ MLX5_SET(pfcc_reg, in, pptx, tx_pause);
+ MLX5_SET(pfcc_reg, in, pprx, rx_pause);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PFCC, 0, 1);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
+
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+ u32 *rx_pause, u32 *tx_pause)
+{
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(pfcc_reg, in, local_port, 1);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PFCC, 0, 0);
+ if (err)
+ return err;
+
+ if (rx_pause)
+ *rx_pause = MLX5_GET(pfcc_reg, out, pprx);
+
+ if (tx_pause)
+ *tx_pause = MLX5_GET(pfcc_reg, out, pptx);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
- err = dma_mapping_error(adapter->dev,
- sg_dma_address(&tx_ctl->sg));
- if (err) {
+ if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) {
+ err = -ENOMEM;
sg_dma_address(&tx_ctl->sg) = 0;
goto err;
}
static int smsc911x_probe_config(struct smsc911x_platform_config *config,
struct device *dev)
{
+ int phy_interface;
u32 width = 0;
- if (!dev)
- return -ENODEV;
+ phy_interface = device_get_phy_mode(dev);
+ if (phy_interface < 0)
+ return phy_interface;
- config->phy_interface = device_get_phy_mode(dev);
+ config->phy_interface = phy_interface;
device_get_mac_address(dev, config->mac, ETH_ALEN);
SET_NETDEV_DEVTYPE(dev, &geneve_type);
- dev->tx_queue_len = 0;
dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
netif_keep_dst(dev);
- dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
}
static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
struct hv_device *dev = net_device_ctx->device_ctx;
struct netvsc_device *nvdev = hv_get_drvdata(dev);
struct netvsc_device_info device_info;
- const u32 num_chn = nvdev->num_chn;
- const u32 max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());
+ u32 num_chn;
+ u32 max_chn;
int ret = 0;
bool recovering = false;
if (!nvdev || nvdev->destroy)
return -ENODEV;
+ num_chn = nvdev->num_chn;
+ max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());
+
if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) {
pr_info("vRSS unsupported before NVSP Version 5\n");
return -EINVAL;
struct at86rf230_state_change irq;
- bool tx_aret;
unsigned long cal_timeout;
- s8 max_frame_retries;
bool is_tx;
bool is_tx_from_off;
u8 tx_retry;
enable_irq(ctx->irq);
- ieee802154_xmit_complete(lp->hw, lp->tx_skb, !lp->tx_aret);
+ ieee802154_xmit_complete(lp->hw, lp->tx_skb, false);
}
static void
{
if (lp->is_tx) {
lp->is_tx = 0;
-
- if (lp->tx_aret)
- at86rf230_async_state_change(lp, &lp->irq,
- STATE_FORCE_TX_ON,
- at86rf230_tx_trac_status,
- true);
- else
- at86rf230_async_state_change(lp, &lp->irq,
- STATE_RX_AACK_ON,
- at86rf230_tx_complete,
- true);
+ at86rf230_async_state_change(lp, &lp->irq,
+ STATE_FORCE_TX_ON,
+ at86rf230_tx_trac_status,
+ true);
} else {
at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq,
at86rf230_rx_trac_check, true);
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- /* In ARET mode we need to go into STATE_TX_ARET_ON after we
- * are in STATE_TX_ON. The pfad differs here, so we change
- * the complete handler.
- */
- if (lp->tx_aret) {
- if (lp->is_tx_from_off) {
- lp->is_tx_from_off = false;
- at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
- at86rf230_write_frame,
- false);
- } else {
- at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
- at86rf230_xmit_tx_on,
- false);
- }
+ /* check if we change from off state */
+ if (lp->is_tx_from_off) {
+ lp->is_tx_from_off = false;
+ at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
+ at86rf230_write_frame,
+ false);
} else {
at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
- at86rf230_write_frame, false);
+ at86rf230_xmit_tx_on,
+ false);
}
}
at86rf230_set_frame_retries(struct ieee802154_hw *hw, s8 retries)
{
struct at86rf230_local *lp = hw->priv;
- int rc = 0;
-
- lp->tx_aret = retries >= 0;
- lp->max_frame_retries = retries;
- if (retries >= 0)
- rc = at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries);
-
- return rc;
+ return at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries);
}
static int
if (!spi_pdata)
return -ENOENT;
*pdata = *spi_pdata;
+ priv->fifo_pin = pdata->fifo;
return 0;
}
ether_setup(dev);
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
- dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
dev->netdev_ops = &ipvlan_netdev_ops;
dev->destructor = free_netdev;
dev->header_ops = &ipvlan_header_ops;
dev->ethtool_ops = &ipvlan_ethtool_ops;
- dev->tx_queue_len = 0;
}
static const struct nla_policy ipvlan_nl_policy[IFLA_IPVLAN_MAX + 1] =
dev->mtu = 64 * 1024;
dev->hard_header_len = ETH_HLEN; /* 14 */
dev->addr_len = ETH_ALEN; /* 6 */
- dev->tx_queue_len = 0;
dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
dev->flags = IFF_LOOPBACK;
- dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
netif_keep_dst(dev);
dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
static void nlmon_setup(struct net_device *dev)
{
dev->type = ARPHRD_NETLINK;
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->netdev_ops = &nlmon_ops;
dev->ethtool_ops = &nlmon_ethtool_ops;
bool needs_aneg = false, do_suspend = false;
enum phy_state old_state;
int err = 0;
+ int old_link;
mutex_lock(&phydev->lock);
phydev->adjust_link(phydev->attached_dev);
break;
case PHY_RUNNING:
- /* Only register a CHANGE if we are
- * polling or ignoring interrupts
+ /* Only register a CHANGE if we are polling or ignoring
+ * interrupts and link changed since latest checking.
*/
- if (!phy_interrupt_is_valid(phydev))
- phydev->state = PHY_CHANGELINK;
+ if (!phy_interrupt_is_valid(phydev)) {
+ old_link = phydev->link;
+ err = phy_read_status(phydev);
+ if (err)
+ break;
+
+ if (old_link != phydev->link)
+ phydev->state = PHY_CHANGELINK;
+ }
break;
case PHY_CHANGELINK:
err = phy_read_status(phydev);
}
/*
- * The LAN8710/LAN8720 requires a minimum of 2 link pulses within 64ms of each
- * other in order to set the ENERGYON bit and exit EDPD mode. If a link partner
- * does send the pulses within this interval, the PHY will remained powered
- * down.
- *
- * This workaround will manually toggle the PHY on/off upon calls to read_status
- * in order to generate link test pulses if the link is down. If a link partner
- * is present, it will respond to the pulses, which will cause the ENERGYON bit
- * to be set and will cause the EDPD mode to be exited.
+ * The LAN87xx suffers from rare absence of the ENERGYON-bit when Ethernet cable
+ * plugs in while LAN87xx is in Energy Detect Power-Down mode. This leads to
+ * unstable detection of plugging in Ethernet cable.
+ * This workaround disables Energy Detect Power-Down mode and waiting for
+ * response on link pulses to detect presence of plugged Ethernet cable.
+ * The Energy Detect Power-Down mode is enabled again in the end of procedure to
+ * save approximately 220 mW of power if cable is unplugged.
*/
static int lan87xx_read_status(struct phy_device *phydev)
{
int err = genphy_read_status(phydev);
+ int i;
if (!phydev->link) {
/* Disable EDPD to wake up PHY */
if (rc < 0)
return rc;
- /* Sleep 64 ms to allow ~5 link test pulses to be sent */
- msleep(64);
+ /* Wait max 640 ms to detect energy */
+ for (i = 0; i < 64; i++) {
+ /* Sleep to allow link test pulses to be sent */
+ msleep(10);
+ rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+ if (rc < 0)
+ return rc;
+ if (rc & MII_LAN83C185_ENERGYON)
+ break;
+ }
/* Re-enable EDPD */
rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
/* basic functions */
.config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
+ .read_status = lan87xx_read_status,
.config_init = smsc_phy_config_init,
.soft_reset = smsc_phy_reset,
static void ppp_ccp_closed(struct ppp *ppp);
static struct compressor *find_compressor(int type);
static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
-static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp);
+static struct ppp *ppp_create_interface(struct net *net, int unit,
+ struct file *file, int *retp);
static void init_ppp_file(struct ppp_file *pf, int kind);
-static void ppp_shutdown_interface(struct ppp *ppp);
static void ppp_destroy_interface(struct ppp *ppp);
static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
file->private_data = NULL;
if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf);
+ rtnl_lock();
if (file == ppp->owner)
- ppp_shutdown_interface(ppp);
+ unregister_netdevice(ppp->dev);
+ rtnl_unlock();
}
if (atomic_dec_and_test(&pf->refcnt)) {
switch (pf->kind) {
mutex_lock(&ppp_mutex);
if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf);
+ rtnl_lock();
if (file == ppp->owner)
- ppp_shutdown_interface(ppp);
+ unregister_netdevice(ppp->dev);
+ rtnl_unlock();
}
if (atomic_long_read(&file->f_count) < 2) {
ppp_release(NULL, file);
/* Create a new ppp unit */
if (get_user(unit, p))
break;
- ppp = ppp_create_interface(net, unit, &err);
+ ppp = ppp_create_interface(net, unit, file, &err);
if (!ppp)
break;
file->private_data = &ppp->file;
- ppp->owner = file;
err = -EFAULT;
if (put_user(ppp->file.index, p))
break;
static __net_exit void ppp_exit_net(struct net *net)
{
struct ppp_net *pn = net_generic(net, ppp_net_id);
+ struct ppp *ppp;
+ LIST_HEAD(list);
+ int id;
+
+ rtnl_lock();
+ idr_for_each_entry(&pn->units_idr, ppp, id)
+ unregister_netdevice_queue(ppp->dev, &list);
+
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
idr_destroy(&pn->units_idr);
}
return 0;
}
+static void ppp_dev_uninit(struct net_device *dev)
+{
+ struct ppp *ppp = netdev_priv(dev);
+ struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
+
+ ppp_lock(ppp);
+ ppp->closing = 1;
+ ppp_unlock(ppp);
+
+ mutex_lock(&pn->all_ppp_mutex);
+ unit_put(&pn->units_idr, ppp->file.index);
+ mutex_unlock(&pn->all_ppp_mutex);
+
+ ppp->owner = NULL;
+
+ ppp->file.dead = 1;
+ wake_up_interruptible(&ppp->file.rwait);
+}
+
static const struct net_device_ops ppp_netdev_ops = {
.ndo_init = ppp_dev_init,
+ .ndo_uninit = ppp_dev_uninit,
.ndo_start_xmit = ppp_start_xmit,
.ndo_do_ioctl = ppp_net_ioctl,
.ndo_get_stats64 = ppp_get_stats64,
* or if there is already a unit with the requested number.
* unit == -1 means allocate a new number.
*/
-static struct ppp *
-ppp_create_interface(struct net *net, int unit, int *retp)
+static struct ppp *ppp_create_interface(struct net *net, int unit,
+ struct file *file, int *retp)
{
struct ppp *ppp;
struct ppp_net *pn;
ppp->mru = PPP_MRU;
init_ppp_file(&ppp->file, INTERFACE);
ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
+ ppp->owner = file;
for (i = 0; i < NUM_NP; ++i)
ppp->npmode[i] = NPMODE_PASS;
INIT_LIST_HEAD(&ppp->channels);
init_waitqueue_head(&pf->rwait);
}
-/*
- * Take down a ppp interface unit - called when the owning file
- * (the one that created the unit) is closed or detached.
- */
-static void ppp_shutdown_interface(struct ppp *ppp)
-{
- struct ppp_net *pn;
-
- pn = ppp_pernet(ppp->ppp_net);
- mutex_lock(&pn->all_ppp_mutex);
-
- /* This will call dev_close() for us. */
- ppp_lock(ppp);
- if (!ppp->closing) {
- ppp->closing = 1;
- ppp_unlock(ppp);
- unregister_netdev(ppp->dev);
- unit_put(&pn->units_idr, ppp->file.index);
- } else
- ppp_unlock(ppp);
-
- ppp->file.dead = 1;
- ppp->owner = NULL;
- wake_up_interruptible(&ppp->file.rwait);
-
- mutex_unlock(&pn->all_ppp_mutex);
-}
-
/*
* Free the memory used by a ppp unit. This is only called once
* there are no channels connected to the unit and no file structs
dev->netdev_ops = &team_netdev_ops;
dev->ethtool_ops = &team_ethtool_ops;
dev->destructor = team_destructor;
- dev->tx_queue_len = 0;
dev->flags |= IFF_MULTICAST;
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
+ dev->priv_flags |= IFF_NO_QUEUE;
/*
* Indicate we support unicast address filtering. That way core won't
{QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
+ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
/* 4. Gobi 1000 devices */
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->netdev_ops = &veth_netdev_ops;
dev->ethtool_ops = &veth_ethtool_ops;
#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
#define GOOD_COPY_LEN 128
-/* Weight used for the RX packet size EWMA. The average packet size is used to
- * determine the packet buffer size when refilling RX rings. As the entire RX
- * ring may be refilled at once, the weight is chosen so that the EWMA will be
- * insensitive to short-term, transient changes in packet size.
+/* RX packet size EWMA. The average packet size is used to determine the packet
+ * buffer size when refilling RX rings. As the entire RX ring may be refilled
+ * at once, the weight is chosen so that the EWMA will be insensitive to short-
+ * term, transient changes in packet size.
*/
-#define RECEIVE_AVG_WEIGHT 64
+DECLARE_EWMA(pkt_len, 1, 64)
/* Minimum alignment for mergeable packet buffers. */
#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
struct page *pages;
/* Average packet length for mergeable receive buffers. */
- struct ewma mrg_avg_pkt_len;
+ struct ewma_pkt_len mrg_avg_pkt_len;
/* Page frag for packet buffer allocation. */
struct page_frag alloc_frag;
}
}
- ewma_add(&rq->mrg_avg_pkt_len, head_skb->len);
+ ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
return head_skb;
err_skb:
return err;
}
-static unsigned int get_mergeable_buf_len(struct ewma *avg_pkt_len)
+static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len)
{
const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
unsigned int len;
- len = hdr_len + clamp_t(unsigned int, ewma_read(avg_pkt_len),
+ len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
GOOD_PACKET_LEN, PAGE_SIZE - hdr_len);
return ALIGN(len, MERGEABLE_BUFFER_ALIGN);
}
napi_hash_add(&vi->rq[i].napi);
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
- ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT);
+ ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
}
{
struct virtnet_info *vi = netdev_priv(queue->dev);
unsigned int queue_index = get_netdev_rx_queue_index(queue);
- struct ewma *avg;
+ struct ewma_pkt_len *avg;
BUG_ON(queue_index >= vi->max_queue_pairs);
avg = &vi->rq[queue_index].mrg_avg_pkt_len;
return false;
}
+static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
+{
+ vrf_dev->stats.tx_errors++;
+ kfree_skb(skb);
+}
+
/* note: already called with rcu_read_lock */
static rx_handler_result_t vrf_handle_frame(struct sk_buff **pskb)
{
static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
struct net_device *dev)
{
- return 0;
+ vrf_tx_error(dev, skb);
+ return NET_XMIT_DROP;
}
static int vrf_send_v4_prep(struct sk_buff *skb, struct flowi4 *fl4,
out:
return ret;
err:
- vrf_dev->stats.tx_errors++;
- kfree_skb(skb);
+ vrf_tx_error(vrf_dev, skb);
goto out;
}
case htons(ETH_P_IPV6):
return vrf_process_v6_outbound(skb, dev);
default:
+ vrf_tx_error(dev, skb);
return NET_XMIT_DROP;
}
}
{
struct dst_entry *dst = (struct dst_entry *)vrf->rth;
- if (dst)
- dst_destroy(dst);
+ dst_destroy(dst);
vrf->rth = NULL;
}
rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached);
rth->rt_uncached_list = NULL;
- rth->rt_lwtstate = NULL;
}
return rth;
/* inverse of __vrf_insert_slave */
static void __vrf_remove_slave(struct slave_queue *queue, struct slave *slave)
{
- dev_put(slave->dev);
list_del(&slave->list);
- queue->num_slaves--;
}
static void __vrf_insert_slave(struct slave_queue *queue, struct slave *slave)
{
- dev_hold(slave->dev);
list_add(&slave->list, &queue->all_slaves);
- queue->num_slaves++;
}
static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
{
struct net_vrf_dev *vrf_ptr = kmalloc(sizeof(*vrf_ptr), GFP_KERNEL);
struct slave *slave = kzalloc(sizeof(*slave), GFP_KERNEL);
- struct slave *duplicate_slave;
struct net_vrf *vrf = netdev_priv(dev);
struct slave_queue *queue = &vrf->queue;
int ret = -ENOMEM;
goto out_fail;
slave->dev = port_dev;
-
vrf_ptr->ifindex = dev->ifindex;
vrf_ptr->tb_id = vrf->tb_id;
- duplicate_slave = __vrf_find_slave_dev(queue, port_dev);
- if (duplicate_slave) {
- ret = -EBUSY;
- goto out_fail;
- }
-
- __vrf_insert_slave(queue, slave);
-
/* register the packet handler for slave ports */
ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev);
if (ret) {
netdev_err(port_dev,
"Device %s failed to register rx_handler\n",
port_dev->name);
- goto out_remove;
+ goto out_fail;
}
ret = netdev_master_upper_dev_link(port_dev, dev);
goto out_unregister;
port_dev->flags |= IFF_SLAVE;
-
+ __vrf_insert_slave(queue, slave);
rcu_assign_pointer(port_dev->vrf_ptr, vrf_ptr);
cycle_netdev(port_dev);
out_unregister:
netdev_rx_handler_unregister(port_dev);
-out_remove:
- __vrf_remove_slave(queue, slave);
out_fail:
kfree(vrf_ptr);
kfree(slave);
static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
{
- if (!netif_is_vrf(dev) || netif_is_vrf(port_dev) ||
- vrf_is_slave(port_dev))
+ if (netif_is_vrf(port_dev) || vrf_is_slave(port_dev))
return -EINVAL;
return do_vrf_add_slave(dev, port_dev);
static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
{
- if (!netif_is_vrf(dev))
- return -EINVAL;
-
return do_vrf_del_slave(dev, port_dev);
}
list_for_each_entry_safe(slave, next, head, list)
vrf_del_slave(dev, slave->dev);
- if (dev->dstats)
- free_percpu(dev->dstats);
+ free_percpu(dev->dstats);
dev->dstats = NULL;
}
if (!vrf_ptr || netif_is_vrf(dev))
goto out;
- vrf_dev = __dev_get_by_index(dev_net(dev), vrf_ptr->ifindex);
- if (vrf_dev)
- vrf_del_slave(vrf_dev, dev);
+ vrf_dev = netdev_master_upper_dev_get(dev);
+ vrf_del_slave(vrf_dev, dev);
}
out:
return NOTIFY_DONE;
vrf_dst_ops.kmem_cachep =
kmem_cache_create("vrf_ip_dst_cache",
sizeof(struct rtable), 0,
- SLAB_HWCACHE_ALIGN | SLAB_PANIC,
+ SLAB_HWCACHE_ALIGN,
NULL);
if (!vrf_dst_ops.kmem_cachep)
hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
if (inet_sk(vs->sock->sk)->inet_sport == port &&
- inet_sk(vs->sock->sk)->sk.sk_family == family &&
+ vxlan_get_sk_family(vs) == family &&
vs->flags == flags)
return vs;
}
u32 data, struct gro_remcsum *grc,
bool nopartial)
{
- size_t start, offset, plen;
+ size_t start, offset;
if (skb->remcsum_offload)
- return NULL;
+ return vh;
if (!NAPI_GRO_CB(skb)->csum_valid)
return NULL;
offsetof(struct udphdr, check) :
offsetof(struct tcphdr, check));
- plen = hdrlen + offset + sizeof(u16);
-
- /* Pull checksum that will be written */
- if (skb_gro_header_hard(skb, off + plen)) {
- vh = skb_gro_header_slow(skb, off + plen, off);
- if (!vh)
- return NULL;
- }
-
- skb_gro_remcsum_process(skb, (void *)vh + hdrlen,
- start, offset, grc, nopartial);
+ vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen,
+ start, offset, grc, nopartial);
skb->remcsum_offload = 1;
goto out;
}
- skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
flags = ntohl(vh->vx_flags);
goto out;
}
+ skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
+
flush = 0;
for (p = *head; p; p = p->next) {
struct net_device *dev;
struct sock *sk = vs->sock->sk;
struct net *net = sock_net(sk);
- sa_family_t sa_family = sk->sk_family;
+ sa_family_t sa_family = vxlan_get_sk_family(vs);
__be16 port = inet_sk(sk)->inet_sport;
int err;
struct net_device *dev;
struct sock *sk = vs->sock->sk;
struct net *net = sock_net(sk);
- sa_family_t sa_family = sk->sk_family;
+ sa_family_t sa_family = vxlan_get_sk_family(vs);
__be16 port = inet_sk(sk)->inet_sport;
rcu_read_lock();
{
size_t start, offset, plen;
+ if (skb->remcsum_offload)
+ return vh;
+
start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
offset = start + ((data & VXLAN_RCO_UDP) ?
offsetof(struct udphdr, check) :
stats->rx_bytes += skb->len;
u64_stats_update_end(&stats->syncp);
- netif_rx(skb);
+ gro_cells_receive(&vxlan->gro_cells, skb);
return;
drop:
}
if (vxlan_collect_metadata(vs)) {
- const struct iphdr *iph = ip_hdr(skb);
-
tun_dst = metadata_dst_alloc(sizeof(*md), GFP_ATOMIC);
if (!tun_dst)
goto drop;
info = &tun_dst->u.tun_info;
- info->key.ipv4_src = iph->saddr;
- info->key.ipv4_dst = iph->daddr;
- info->key.ipv4_tos = iph->tos;
- info->key.ipv4_ttl = iph->ttl;
+ if (vxlan_get_sk_family(vs) == AF_INET) {
+ const struct iphdr *iph = ip_hdr(skb);
+
+ info->key.u.ipv4.src = iph->saddr;
+ info->key.u.ipv4.dst = iph->daddr;
+ info->key.tos = iph->tos;
+ info->key.ttl = iph->ttl;
+ } else {
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+ info->key.u.ipv6.src = ip6h->saddr;
+ info->key.u.ipv6.dst = ip6h->daddr;
+ info->key.tos = ipv6_get_dsfield(ip6h);
+ info->key.ttl = ip6h->hop_limit;
+ }
+
info->key.tp_src = udp_hdr(skb)->source;
info->key.tp_dst = udp_hdr(skb)->dest;
struct ip_tunnel_info *info;
struct vxlan_dev *vxlan = netdev_priv(dev);
struct sock *sk = vxlan->vn_sock->sock->sk;
+ unsigned short family = vxlan_get_sk_family(vxlan->vn_sock);
struct rtable *rt = NULL;
const struct iphdr *old_iph;
struct flowi4 fl4;
int err;
u32 flags = vxlan->flags;
- /* FIXME: Support IPv6 */
- info = skb_tunnel_info(skb, AF_INET);
+ info = skb_tunnel_info(skb);
if (rdst) {
dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
vni = be64_to_cpu(info->key.tun_id);
- remote_ip.sin.sin_family = AF_INET;
- remote_ip.sin.sin_addr.s_addr = info->key.ipv4_dst;
+ remote_ip.sa.sa_family = family;
+ if (family == AF_INET)
+ remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
+ else
+ remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
dst = &remote_ip;
}
src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
vxlan->cfg.port_max, true);
+ if (info) {
+ if (info->key.tun_flags & TUNNEL_CSUM)
+ flags |= VXLAN_F_UDP_CSUM;
+ else
+ flags &= ~VXLAN_F_UDP_CSUM;
+
+ ttl = info->key.ttl;
+ tos = info->key.tos;
+
+ if (info->options_len)
+ md = ip_tunnel_info_opts(info, sizeof(*md));
+ } else {
+ md->gbp = skb->mark;
+ }
+
if (dst->sa.sa_family == AF_INET) {
- if (info) {
- if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
- df = htons(IP_DF);
- if (info->key.tun_flags & TUNNEL_CSUM)
- flags |= VXLAN_F_UDP_CSUM;
- else
- flags &= ~VXLAN_F_UDP_CSUM;
-
- ttl = info->key.ipv4_ttl;
- tos = info->key.ipv4_tos;
-
- if (info->options_len)
- md = ip_tunnel_info_opts(info, sizeof(*md));
- } else {
- md->gbp = skb->mark;
- }
+ if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT))
+ df = htons(IP_DF);
memset(&fl4, 0, sizeof(fl4));
fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
} else {
struct dst_entry *ndst;
struct flowi6 fl6;
- u32 flags;
+ u32 rt6i_flags;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0;
}
/* Bypass encapsulation if the destination is local */
- flags = ((struct rt6_info *)ndst)->rt6i_flags;
- if (flags & RTF_LOCAL &&
- !(flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
+ rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
+ if (rt6i_flags & RTF_LOCAL &&
+ !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
struct vxlan_dev *dst_vxlan;
dst_release(ndst);
}
ttl = ttl ? : ip6_dst_hoplimit(ndst);
- md->gbp = skb->mark;
-
err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr,
0, ttl, src_port, dst_port, htonl(vni << 8), md,
!net_eq(vxlan->net, dev_net(vxlan->dev)),
- vxlan->flags);
+ flags);
#endif
}
struct vxlan_rdst *rdst, *fdst = NULL;
struct vxlan_fdb *f;
- /* FIXME: Support IPv6 */
- info = skb_tunnel_info(skb, AF_INET);
+ info = skb_tunnel_info(skb);
skb_reset_mac_header(skb);
eth = eth_hdr(skb);
for (i = 0; i < PORT_HASH_SIZE; ++i) {
hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
port = inet_sk(vs->sock->sk)->inet_sport;
- sa_family = vs->sock->sk->sk_family;
+ sa_family = vxlan_get_sk_family(vs);
dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
port);
}
dev->destructor = free_netdev;
SET_NETDEV_DEVTYPE(dev, &vxlan_type);
- dev->tx_queue_len = 0;
dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
netif_keep_dst(dev);
- dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
INIT_LIST_HEAD(&vxlan->next);
spin_lock_init(&vxlan->hash_lock);
vxlan->dev = dev;
+ gro_cells_init(&vxlan->gro_cells, dev);
+
for (h = 0; h < FDB_HASH_SIZE; ++h)
INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
}
hlist_del_rcu(&vxlan->hlist);
spin_unlock(&vn->sock_lock);
+ gro_cells_destroy(&vxlan->gro_cells);
list_del(&vxlan->next);
unregister_netdevice_queue(dev, head);
}
/* If vxlan->dev is in the same netns, it has already been added
* to the list by the previous loop.
*/
- if (!net_eq(dev_net(vxlan->dev), net))
+ if (!net_eq(dev_net(vxlan->dev), net)) {
+ gro_cells_destroy(&vxlan->gro_cells);
unregister_netdevice_queue(vxlan->dev, &list);
+ }
}
unregister_netdevice_many(&list);
}
dev->netdev_ops = &pvc_ops;
dev->mtu = HDLC_MAX_MTU;
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->ml_priv = pvc;
if (register_netdevice(dev) != 0) {
#include "wmi-ops.h"
unsigned int ath10k_debug_mask;
+static unsigned int ath10k_cryptmode_param;
static bool uart_print;
static bool skip_otp;
module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
+module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
module_param(uart_print, bool, 0644);
module_param(skip_otp, bool, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
MODULE_PARM_DESC(uart_print, "Uart target debugging");
MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
+MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software");
static const struct ath10k_hw_params ath10k_hw_params_list[] = {
{
return -EINVAL;
}
+ ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_NATIVE_WIFI;
+ switch (ath10k_cryptmode_param) {
+ case ATH10K_CRYPT_MODE_HW:
+ clear_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
+ clear_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags);
+ break;
+ case ATH10K_CRYPT_MODE_SW:
+ if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
+ ar->fw_features)) {
+ ath10k_err(ar, "cryptmode > 0 requires raw mode support from firmware");
+ return -EINVAL;
+ }
+
+ set_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
+ set_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags);
+ break;
+ default:
+ ath10k_info(ar, "invalid cryptmode: %d\n",
+ ath10k_cryptmode_param);
+ return -EINVAL;
+ }
+
+ ar->htt.max_num_amsdu = ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT;
+ ar->htt.max_num_ampdu = ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT;
+
+ if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_RAW;
+
+ /* Workaround:
+ *
+ * Firmware A-MSDU aggregation breaks with RAW Tx encap mode
+ * and causes enormous performance issues (malformed frames,
+ * etc).
+ *
+ * Disabling A-MSDU makes RAW mode stable with heavy traffic
+ * albeit a bit slower compared to regular operation.
+ */
+ ar->htt.max_num_amsdu = 1;
+ }
+
/* Backwards compatibility for firmwares without
* ATH10K_FW_IE_WMI_OP_VERSION.
*/
if (!ar->workqueue)
goto err_free_mac;
+ ar->workqueue_aux = create_singlethread_workqueue("ath10k_aux_wq");
+ if (!ar->workqueue_aux)
+ goto err_free_wq;
+
mutex_init(&ar->conf_mutex);
spin_lock_init(&ar->data_lock);
ret = ath10k_debug_create(ar);
if (ret)
- goto err_free_wq;
+ goto err_free_aux_wq;
return ar;
+err_free_aux_wq:
+ destroy_workqueue(ar->workqueue_aux);
err_free_wq:
destroy_workqueue(ar->workqueue);
flush_workqueue(ar->workqueue);
destroy_workqueue(ar->workqueue);
+ flush_workqueue(ar->workqueue_aux);
+ destroy_workqueue(ar->workqueue_aux);
+
ath10k_debug_destroy(ar);
ath10k_mac_destroy(ar);
}
u8 tid;
u16 freq;
bool is_offchan;
+ bool nohwcrypt;
struct ath10k_htt_txbuf *txbuf;
u32 txbuf_paddr;
} __packed htt;
const struct wmi_ops *ops;
u32 num_mem_chunks;
+ u32 rx_decap_mode;
struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
};
} u;
bool use_cts_prot;
+ bool nohwcrypt;
int num_legacy_stations;
int txpower;
struct wmi_wmm_params_all_arg wmm_params;
u32 reg_addr;
u32 nf_cal_period;
- u8 htt_max_amsdu;
- u8 htt_max_ampdu;
-
struct ath10k_fw_crash_data *fw_crash_data;
};
ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6,
/* Don't trust error code from otp.bin */
- ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
+ ATH10K_FW_FEATURE_IGNORE_OTP_RESULT = 7,
/* Some firmware revisions pad 4th hw address to 4 byte boundary making
* it 8 bytes long in Native Wifi Rx decap.
*/
- ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
+ ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING = 8,
/* Firmware supports bypassing PLL setting on init. */
ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT = 9,
+ /* Raw mode support. If supported, FW supports receiving and trasmitting
+ * frames in raw mode.
+ */
+ ATH10K_FW_FEATURE_RAW_MODE_SUPPORT = 10,
+
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
* waiters should immediately cancel instead of waiting for a time out.
*/
ATH10K_FLAG_CRASH_FLUSH,
+
+ /* Use Raw mode instead of native WiFi Tx/Rx encap mode.
+ * Raw mode supports both hardware and software crypto. Native WiFi only
+ * supports hardware crypto.
+ */
+ ATH10K_FLAG_RAW_MODE,
+
+ /* Disable HW crypto engine */
+ ATH10K_FLAG_HW_CRYPTO_DISABLED,
};
enum ath10k_cal_mode {
ATH10K_CAL_MODE_DT,
};
+enum ath10k_crypt_mode {
+ /* Only use hardware crypto engine */
+ ATH10K_CRYPT_MODE_HW,
+ /* Only use software crypto engine */
+ ATH10K_CRYPT_MODE_SW,
+};
+
static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
{
switch (mode) {
struct completion vdev_setup_done;
struct workqueue_struct *workqueue;
+ /* Auxiliary workqueue */
+ struct workqueue_struct *workqueue_aux;
/* prevents concurrent FW reconfiguration */
struct mutex conf_mutex;
int num_active_peers;
int num_tids;
+ struct work_struct svc_rdy_work;
+ struct sk_buff *svc_rdy_skb;
+
struct work_struct offchan_tx_work;
struct sk_buff_head offchan_tx_queue;
struct completion offchan_tx_completed;
void ath10k_print_driver_info(struct ath10k *ar)
{
- char fw_features[128];
+ char fw_features[128] = {};
ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
- ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d features %s\n",
+ ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d features %s\n",
ar->hw_params.name,
ar->target_version,
ar->chip_id,
ar->htt.op_version,
ath10k_cal_mode_str(ar->cal_mode),
ar->max_num_stations,
+ test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags),
+ !test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags),
fw_features);
ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n",
config_enabled(CONFIG_ATH10K_DEBUG),
mutex_lock(&ar->conf_mutex);
- if (ar->debug.htt_max_amsdu)
- amsdu = ar->debug.htt_max_amsdu;
-
- if (ar->debug.htt_max_ampdu)
- ampdu = ar->debug.htt_max_ampdu;
-
+ amsdu = ar->htt.max_num_amsdu;
+ ampdu = ar->htt.max_num_ampdu;
mutex_unlock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf), "%u %u\n", amsdu, ampdu);
goto out;
res = count;
- ar->debug.htt_max_amsdu = amsdu;
- ar->debug.htt_max_ampdu = ampdu;
+ ar->htt.max_num_amsdu = amsdu;
+ ar->htt.max_num_ampdu = ampdu;
out:
mutex_unlock(&ar->conf_mutex);
if (ar->debug.htt_stats_mask != 0)
cancel_delayed_work(&ar->debug.htt_stats_dwork);
- ar->debug.htt_max_amsdu = 0;
- ar->debug.htt_max_ampdu = 0;
-
ath10k_wmi_pdev_pktlog_disable(ar);
}
}
status = ath10k_htt_verify_version(htt);
- if (status)
+ if (status) {
+ ath10k_warn(ar, "failed to verify htt version: %d\n",
+ status);
return status;
+ }
status = ath10k_htt_send_frag_desc_bank_cfg(htt);
if (status)
return status;
- return ath10k_htt_send_rx_ring_cfg_ll(htt);
+ status = ath10k_htt_send_rx_ring_cfg_ll(htt);
+ if (status) {
+ ath10k_warn(ar, "failed to setup rx ring: %d\n",
+ status);
+ return status;
+ }
+
+ status = ath10k_htt_h2t_aggr_cfg_msg(htt,
+ htt->max_num_ampdu,
+ htt->max_num_amsdu);
+ if (status) {
+ ath10k_warn(ar, "failed to setup amsdu/ampdu limit: %d\n",
+ status);
+ return status;
+ }
+
+ return 0;
}
* around the mask + shift defs.
*/
struct htt_data_tx_desc_frag {
- __le32 paddr;
- __le32 len;
+ union {
+ struct double_word_addr {
+ __le32 paddr;
+ __le32 len;
+ } __packed dword_addr;
+ struct triple_word_addr {
+ __le32 paddr_lo;
+ __le16 paddr_hi;
+ __le16 len_16;
+ } __packed tword_addr;
+ } __packed;
} __packed;
struct htt_msdu_ext_desc {
- __le32 tso_flag[4];
+ __le32 tso_flag[3];
+ __le16 ip_identification;
+ u8 flags;
+ u8 reserved;
struct htt_data_tx_desc_frag frags[6];
};
+#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE BIT(0)
+#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE BIT(1)
+#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE BIT(2)
+#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE BIT(3)
+#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE BIT(4)
+
+#define HTT_MSDU_CHECKSUM_ENABLE (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE \
+ | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE \
+ | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE \
+ | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \
+ | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE)
+
enum htt_data_tx_desc_flags0 {
HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1,
} __packed;
#define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
+struct htt_mgmt_tx_desc_qca99x0 {
+ __le32 rate;
+} __packed;
struct htt_mgmt_tx_desc {
u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
__le32 len;
__le32 vdev_id;
u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN];
+ union {
+ struct htt_mgmt_tx_desc_qca99x0 qca99x0;
+ } __packed;
} __packed;
enum htt_mgmt_tx_status {
u8 target_version_minor;
struct completion target_version_received;
enum ath10k_fw_htt_op_version op_version;
+ u8 max_num_amsdu;
+ u8 max_num_ampdu;
const enum htt_t2h_msg_type *t2h_msg_types;
u32 t2h_msg_types_max;
#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
+/* These values are default in most firmware revisions and apparently are a
+ * sweet spot performance wise.
+ */
+#define ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT 3
+#define ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT 64
+
int ath10k_htt_connect(struct ath10k_htt *htt);
int ath10k_htt_init(struct ath10k *ar);
int ath10k_htt_setup(struct ath10k_htt *htt);
msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
- msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
+ msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
RX_MSDU_START_INFO0_MSDU_LENGTH);
msdu_chained = rx_desc->frag_info.ring2_more_count;
msdu_chaining = 1;
}
- last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
+ last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
RX_MSDU_END_INFO0_LAST_MSDU;
trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
__cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
return NULL;
- if (!(rxd->msdu_end.info0 &
+ if (!(rxd->msdu_end.common.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
return NULL;
bool is_last;
rxd = (void *)msdu->data - sizeof(*rxd);
- is_first = !!(rxd->msdu_end.info0 &
+ is_first = !!(rxd->msdu_end.common.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
- is_last = !!(rxd->msdu_end.info0 &
+ is_last = !!(rxd->msdu_end.common.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
/* Delivered decapped frame:
skb_trim(msdu, msdu->len - FCS_LEN);
/* In most cases this will be true for sniffed frames. It makes sense
- * to deliver them as-is without stripping the crypto param. This would
- * also make sense for software based decryption (which is not
- * implemented in ath10k).
+ * to deliver them as-is without stripping the crypto param. This is
+ * necessary for software based decryption.
*
* If there's no error then the frame is decrypted. At least that is
* the case for frames that come in via fragmented rx indication.
rxd = (void *)msdu->data - sizeof(*rxd);
hdr = (void *)rxd->rx_hdr_status;
- is_first = !!(rxd->msdu_end.info0 &
+ is_first = !!(rxd->msdu_end.common.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
- is_last = !!(rxd->msdu_end.info0 &
+ is_last = !!(rxd->msdu_end.common.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
is_amsdu = !(is_first && is_last);
*/
rxd = (void *)msdu->data - sizeof(*rxd);
- decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
+ decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
switch (decap) {
rxd = (void *)skb->data - sizeof(*rxd);
flags = __le32_to_cpu(rxd->attention.flags);
- info = __le32_to_cpu(rxd->msdu_start.info1);
+ info = __le32_to_cpu(rxd->msdu_start.common.info1);
is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
first = skb_peek(amsdu);
rxd = (void *)first->data - sizeof(*rxd);
- decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
+ decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
if (!chained)
__le16 msdu_id;
int i;
- lockdep_assert_held(&htt->tx_lock);
-
switch (status) {
case HTT_DATA_TX_STATUS_NO_ACK:
tx_done.no_ack = true;
__skb_queue_tail(amsdu, msdu);
rxd = (void *)msdu->data - sizeof(*rxd);
- if (rxd->msdu_end.info0 &
+ if (rxd->msdu_end.common.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
break;
}
msdu = skb_peek_tail(amsdu);
rxd = (void *)msdu->data - sizeof(*rxd);
- if (!(rxd->msdu_end.info0 &
+ if (!(rxd->msdu_end.common.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
skb_queue_splice_init(amsdu, list);
return -EAGAIN;
break;
}
- spin_lock_bh(&htt->tx_lock);
ath10k_txrx_tx_unref(htt, &tx_done);
- spin_unlock_bh(&htt->tx_lock);
break;
}
case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
- spin_lock_bh(&htt->tx_lock);
- __skb_queue_tail(&htt->tx_compl_q, skb);
- spin_unlock_bh(&htt->tx_lock);
+ skb_queue_tail(&htt->tx_compl_q, skb);
tasklet_schedule(&htt->txrx_compl_task);
return;
case HTT_T2H_MSG_TYPE_SEC_IND: {
break;
case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
break;
+ case HTT_T2H_MSG_TYPE_AGGR_CONF:
+ break;
case HTT_T2H_MSG_TYPE_EN_STATS:
case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
case HTT_T2H_MSG_TYPE_TX_FETCH_CONF:
struct htt_resp *resp;
struct sk_buff *skb;
- spin_lock_bh(&htt->tx_lock);
- while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
+ while ((skb = skb_dequeue(&htt->tx_compl_q))) {
ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
dev_kfree_skb_any(skb);
}
- spin_unlock_bh(&htt->tx_lock);
spin_lock_bh(&htt->rx_ring.lock);
while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
lockdep_assert_held(&htt->tx_lock);
- ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC);
+ ret = idr_alloc(&htt->pending_tx, skb, 0,
+ htt->max_num_pending_tx, GFP_ATOMIC);
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
tx_done.discard = 1;
tx_done.msdu_id = msdu_id;
- spin_lock_bh(&htt->tx_lock);
ath10k_txrx_tx_unref(htt, &tx_done);
- spin_unlock_bh(&htt->tx_lock);
return 0;
}
cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc);
cmd->frag_desc_bank_cfg.bank_base_addrs[0] =
__cpu_to_le32(htt->frag_desc.paddr);
+ cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0;
cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id =
__cpu_to_le16(htt->max_num_pending_tx - 1);
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+ spin_unlock_bh(&htt->tx_lock);
if (res < 0) {
- spin_unlock_bh(&htt->tx_lock);
goto err_tx_dec;
}
msdu_id = res;
- spin_unlock_bh(&htt->tx_lock);
txdesc = ath10k_htc_alloc_skb(ar, len);
if (!txdesc) {
skb_put(txdesc, len);
cmd = (struct htt_cmd *)txdesc->data;
+ memset(cmd, 0, len);
+
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
u16 msdu_id, flags1 = 0;
dma_addr_t paddr = 0;
u32 frags_paddr = 0;
+ struct htt_msdu_ext_desc *ext_desc = NULL;
res = ath10k_htt_tx_inc_pending(htt);
if (res)
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+ spin_unlock_bh(&htt->tx_lock);
if (res < 0) {
- spin_unlock_bh(&htt->tx_lock);
goto err_tx_dec;
}
msdu_id = res;
- spin_unlock_bh(&htt->tx_lock);
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
- ieee80211_has_protected(hdr->frame_control))
+ ieee80211_has_protected(hdr->frame_control)) {
skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ } else if (!skb_cb->htt.nohwcrypt &&
+ skb_cb->txmode == ATH10K_HW_TXRX_RAW) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ }
skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
DMA_TO_DEVICE);
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
/* pass through */
case ATH10K_HW_TXRX_ETHERNET:
- frags = skb_cb->htt.txbuf->frags;
-
- frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
- frags[0].len = __cpu_to_le32(msdu->len);
- frags[1].paddr = 0;
- frags[1].len = 0;
-
+ if (ar->hw_params.continuous_frag_desc) {
+ memset(&htt->frag_desc.vaddr[msdu_id], 0,
+ sizeof(struct htt_msdu_ext_desc));
+ frags = (struct htt_data_tx_desc_frag *)
+ &htt->frag_desc.vaddr[msdu_id].frags;
+ ext_desc = &htt->frag_desc.vaddr[msdu_id];
+ frags[0].tword_addr.paddr_lo =
+ __cpu_to_le32(skb_cb->paddr);
+ frags[0].tword_addr.paddr_hi = 0;
+ frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
+
+ frags_paddr = htt->frag_desc.paddr +
+ (sizeof(struct htt_msdu_ext_desc) * msdu_id);
+ } else {
+ frags = skb_cb->htt.txbuf->frags;
+ frags[0].dword_addr.paddr =
+ __cpu_to_le32(skb_cb->paddr);
+ frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
+ frags[1].dword_addr.paddr = 0;
+ frags[1].dword_addr.len = 0;
+
+ frags_paddr = skb_cb->htt.txbuf_paddr;
+ }
flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
-
- frags_paddr = skb_cb->htt.txbuf_paddr;
break;
case ATH10K_HW_TXRX_MGMT:
flags0 |= SM(ATH10K_HW_TXRX_MGMT,
prefetch_len);
skb_cb->htt.txbuf->htc_hdr.flags = 0;
+ if (skb_cb->htt.nohwcrypt)
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+
if (!skb_cb->is_protected)
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
- if (msdu->ip_summed == CHECKSUM_PARTIAL) {
+ if (msdu->ip_summed == CHECKSUM_PARTIAL &&
+ !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
+ if (ar->hw_params.continuous_frag_desc)
+ ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
}
/* Prevent firmware from sending up tx inspection requests. There's
#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0)
/* Known pecularities:
- * - current FW doesn't support raw rx mode (last tested v599)
- * - current FW dumps upon raw tx mode (last tested v599)
* - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
* - raw have FCS, nwifi doesn't
* - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
* param, llc/snap) are aligned to 4byte boundaries each */
enum ath10k_hw_txrx_mode {
ATH10K_HW_TXRX_RAW = 0,
+
+ /* Native Wifi decap mode is used to align IP frames to 4-byte
+ * boundaries and avoid a very expensive re-alignment in mac80211.
+ */
ATH10K_HW_TXRX_NATIVE_WIFI = 1,
ATH10K_HW_TXRX_ETHERNET = 2,
#define TARGET_RX_TIMEOUT_LO_PRI 100
#define TARGET_RX_TIMEOUT_HI_PRI 40
-/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and
- * avoid a very expensive re-alignment in mac80211. */
-#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
-
#define TARGET_SCAN_MAX_PENDING_REQS 4
#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
#define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_10X_RX_TIMEOUT_LO_PRI 100
#define TARGET_10X_RX_TIMEOUT_HI_PRI 40
-#define TARGET_10X_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
#define TARGET_10X_SCAN_MAX_PENDING_REQS 4
#define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV 2
#define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV 2
(TARGET_10_4_NUM_VDEVS))
#define TARGET_10_4_ACTIVE_PEERS 0
-/* TODO: increase qcache max client limit to 512 after
- * testing with 512 client.
- */
-#define TARGET_10_4_NUM_QCACHE_PEERS_MAX 256
+#define TARGET_10_4_NUM_QCACHE_PEERS_MAX 512
#define TARGET_10_4_QCACHE_ACTIVE_PEERS 50
#define TARGET_10_4_NUM_OFFLOAD_PEERS 0
#define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS 0
return -EOPNOTSUPP;
}
+ if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ }
+
if (cmd == DISABLE_KEY) {
arg.key_cipher = WMI_CIPHER_NONE;
arg.key_data = NULL;
reinit_completion(&ar->install_key_done);
+ if (arvif->nohwcrypt)
+ return 1;
+
ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
if (ret)
return ret;
ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
addr, flags);
- if (ret)
+ if (ret < 0)
return ret;
flags = 0;
ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
addr, flags);
- if (ret)
+ if (ret < 0)
return ret;
spin_lock_bh(&ar->data_lock);
/* key flags are not required to delete the key */
ret = ath10k_install_key(arvif, peer->keys[i],
DISABLE_KEY, addr, flags);
- if (ret && first_errno == 0)
+ if (ret < 0 && first_errno == 0)
first_errno = ret;
- if (ret)
+ if (ret < 0)
ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
i, ret);
break;
/* key flags are not required to delete the key */
ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
- if (ret && first_errno == 0)
+ if (ret < 0 && first_errno == 0)
first_errno = ret;
if (ret)
static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
enum wmi_peer_type peer_type)
{
+ struct ath10k_vif *arvif;
+ int num_peers = 0;
int ret;
lockdep_assert_held(&ar->conf_mutex);
- if (ar->num_peers >= ar->max_num_peers)
+ num_peers = ar->num_peers;
+
+ /* Each vdev consumes a peer entry as well */
+ list_for_each_entry(arvif, &ar->arvifs, list)
+ num_peers++;
+
+ if (num_peers >= ar->max_num_peers)
return -ENOBUFS;
ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
}
-static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value)
-{
- struct ath10k *ar = arvif->ar;
- u32 vdev_param;
-
- if (value != 0xFFFFFFFF)
- value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold,
- ATH10K_FRAGMT_THRESHOLD_MIN,
- ATH10K_FRAGMT_THRESHOLD_MAX);
-
- vdev_param = ar->wmi.vdev_param->fragmentation_threshold;
- return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
-}
-
static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
{
int ret;
static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
{
struct cfg80211_chan_def *chandef = NULL;
- struct ieee80211_channel *channel = chandef->chan;
+ struct ieee80211_channel *channel = NULL;
struct wmi_vdev_start_request_arg arg = {};
int ret = 0;
u32 param;
u32 value;
+ if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
+ return 0;
+
if (!(ar->vht_cap_info &
(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
* Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
* NativeWifi txmode - it selects AP key instead of peer key. It seems
* to work with Ethernet txmode so use it.
+ *
+ * FIXME: Check if raw mode works with TDLS.
*/
if (ieee80211_is_data_present(fc) && sta && sta->tdls)
return ATH10K_HW_TXRX_ETHERNET;
+ if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+ return ATH10K_HW_TXRX_RAW;
+
return ATH10K_HW_TXRX_NATIVE_WIFI;
}
+static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
+ struct sk_buff *skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
+ IEEE80211_TX_CTL_INJECTED;
+ if ((info->flags & mask) == mask)
+ return false;
+ if (vif)
+ return !ath10k_vif_to_arvif(vif)->nohwcrypt;
+ return true;
+}
+
/* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
* Control in the header.
*/
int vdev_id;
int ret;
unsigned long time_left;
+ bool tmp_peer_created = false;
/* FW requirement: We must create a peer before FW will send out
* an offchannel frame. Otherwise the frame will be stuck and
if (ret)
ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
peer_addr, vdev_id, ret);
+ tmp_peer_created = (ret == 0);
}
spin_lock_bh(&ar->data_lock);
ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
skb);
- if (!peer) {
+ if (!peer && tmp_peer_created) {
ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
if (ret)
ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
ATH10K_SKB_CB(skb)->htt.is_offchan = false;
ATH10K_SKB_CB(skb)->htt.freq = 0;
ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr);
+ ATH10K_SKB_CB(skb)->htt.nohwcrypt = !ath10k_tx_h_use_hwcrypto(vif, skb);
ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif);
ATH10K_SKB_CB(skb)->txmode = ath10k_tx_h_get_txmode(ar, vif, sta, skb);
ATH10K_SKB_CB(skb)->is_protected = ieee80211_has_protected(fc);
ath10k_tx_h_8023(skb);
break;
case ATH10K_HW_TXRX_RAW:
- /* FIXME: Packet injection isn't implemented. It should be
- * doable with firmware 10.2 on qca988x.
- */
- WARN_ON_ONCE(1);
- ieee80211_free_txskb(hw, skb);
- return;
+ if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ WARN_ON_ONCE(1);
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
}
if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
return 1;
}
+static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
+{
+ u32 value = 0;
+ struct ath10k *ar = arvif->ar;
+
+ if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
+ return 0;
+
+ if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
+ value |= SM((ar->num_rf_chains - 1), WMI_TXBF_STS_CAP_OFFSET);
+
+ if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
+ value |= SM((ar->num_rf_chains - 1), WMI_BF_SOUND_DIM_OFFSET);
+
+ if (!value)
+ return 0;
+
+ if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+ if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
+ value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
+ WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
+
+ if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+ if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
+ value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
+ WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
+
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ ar->wmi.vdev_param->txbf, value);
+}
+
/*
* TODO:
* Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
sizeof(arvif->bitrate_mask.control[i].vht_mcs));
}
+ if (ar->num_peers >= ar->max_num_peers) {
+ ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
+ return -ENOBUFS;
+ }
+
if (ar->free_vdev_map == 0) {
ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
ret = -EBUSY;
goto err;
}
}
+ if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
+ arvif->nohwcrypt = true;
+
+ if (arvif->nohwcrypt &&
+ !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
+ goto err;
+ }
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
}
}
- ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
+ ret = ath10k_mac_set_txbf_conf(arvif);
if (ret) {
- ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
+ ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
- ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
+ ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
if (ret) {
- ath10k_warn(ar, "failed to set frag threshold for vdev %d: %d\n",
+ ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
return 1;
+ if (arvif->nohwcrypt)
+ return 1;
+
if (key->keyidx > WMI_MAX_KEY_INDEX)
return -ENOSPC;
ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
if (ret) {
+ WARN_ON(ret > 0);
ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
arvif->vdev_id, peer_addr, ret);
goto exit;
ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
if (ret) {
+ WARN_ON(ret > 0);
ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
arvif->vdev_id, peer_addr, ret);
ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
peer_addr, flags);
- if (ret2)
+ if (ret2) {
+ WARN_ON(ret2 > 0);
ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
arvif->vdev_id, peer_addr, ret2);
+ }
goto exit;
}
}
return ret;
}
+static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ /* Even though there's a WMI enum for fragmentation threshold no known
+ * firmware actually implements it. Moreover it is not possible to rely
+ * frame fragmentation to mac80211 because firmware clears the "more
+ * fragments" bit in frame control making it impossible for remote
+ * devices to reassemble frames.
+ *
+ * Hence implement a dummy callback just to say fragmentation isn't
+ * supported. This effectively prevents mac80211 from doing frame
+ * fragmentation in software.
+ */
+ return -EOPNOTSUPP;
+}
+
static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
.remain_on_channel = ath10k_remain_on_channel,
.cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
.set_rts_threshold = ath10k_set_rts_threshold,
+ .set_frag_threshold = ath10k_mac_op_set_frag_threshold,
.flush = ath10k_flush,
.tx_last_beacon = ath10k_tx_last_beacon,
.set_antenna = ath10k_set_antenna,
ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
ieee80211_hw_set(ar->hw, AP_LINK_PS);
ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
- ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
+ if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+ ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
+
ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
goto err_free;
}
- ar->hw->netdev_features = NETIF_F_HW_CSUM;
+ if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+ ar->hw->netdev_features = NETIF_F_HW_CSUM;
if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
/* Init ath dfs pattern detector */
static const struct pci_device_id ath10k_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
+ { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
{0}
};
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
+ { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
};
static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
static int ath10k_pci_cold_reset(struct ath10k *ar)
{
- int i;
u32 val;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
val |= 1;
ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
- for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
- if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
- RTC_STATE_COLD_RESET_MASK)
- break;
- msleep(1);
- }
+ /* After writing into SOC_GLOBAL_RESET to put device into
+ * reset and pulling out of reset pcie may not be stable
+ * for any immediate pcie register access and cause bus error,
+ * add delay before any pcie access request to fix this issue.
+ */
+ msleep(20);
/* Pull Target, including PCIe, out of RESET. */
val &= ~1;
ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
- for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
- if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
- RTC_STATE_COLD_RESET_MASK))
- break;
- msleep(1);
- }
+ msleep(20);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
#define RX_MSDU_START_INFO1_IP_FRAG (1 << 14)
#define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15)
+#define RX_MSDU_START_INFO2_DA_IDX_MASK 0x000007ff
+#define RX_MSDU_START_INFO2_DA_IDX_LSB 0
+#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_MASK 0x00ff0000
+#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_LSB 16
+#define RX_MSDU_START_INFO2_DA_BCAST_MCAST BIT(11)
+
/* The decapped header (rx_hdr_status) contains the following:
* a) 802.11 header
* [padding to 4 bytes]
RX_MSDU_DECAP_8023_SNAP_LLC = 3
};
-struct rx_msdu_start {
+struct rx_msdu_start_common {
__le32 info0; /* %RX_MSDU_START_INFO0_ */
__le32 flow_id_crc;
__le32 info1; /* %RX_MSDU_START_INFO1_ */
} __packed;
+struct rx_msdu_start_qca99x0 {
+ __le32 info2; /* %RX_MSDU_START_INFO2_ */
+} __packed;
+
+struct rx_msdu_start {
+ struct rx_msdu_start_common common;
+ union {
+ struct rx_msdu_start_qca99x0 qca99x0;
+ } __packed;
+} __packed;
+
/*
* msdu_length
* MSDU length in bytes after decapsulation. This field is
#define RX_MSDU_END_INFO0_PRE_DELIM_ERR (1 << 30)
#define RX_MSDU_END_INFO0_RESERVED_3B (1 << 31)
-struct rx_msdu_end {
+struct rx_msdu_end_common {
__le16 ip_hdr_cksum;
__le16 tcp_hdr_cksum;
u8 key_id_octet;
__le32 info0;
} __packed;
+#define RX_MSDU_END_INFO1_TCP_FLAG_MASK 0x000001ff
+#define RX_MSDU_END_INFO1_TCP_FLAG_LSB 0
+#define RX_MSDU_END_INFO1_L3_HDR_PAD_MASK 0x00001c00
+#define RX_MSDU_END_INFO1_L3_HDR_PAD_LSB 10
+#define RX_MSDU_END_INFO1_WINDOW_SIZE_MASK 0xffff0000
+#define RX_MSDU_END_INFO1_WINDOW_SIZE_LSB 16
+#define RX_MSDU_END_INFO1_IRO_ELIGIBLE BIT(9)
+
+#define RX_MSDU_END_INFO2_DA_OFFSET_MASK 0x0000003f
+#define RX_MSDU_END_INFO2_DA_OFFSET_LSB 0
+#define RX_MSDU_END_INFO2_SA_OFFSET_MASK 0x00000fc0
+#define RX_MSDU_END_INFO2_SA_OFFSET_LSB 6
+#define RX_MSDU_END_INFO2_TYPE_OFFSET_MASK 0x0003f000
+#define RX_MSDU_END_INFO2_TYPE_OFFSET_LSB 12
+
+struct rx_msdu_end_qca99x0 {
+ __le32 ipv6_crc;
+ __le32 tcp_seq_no;
+ __le32 tcp_ack_no;
+ __le32 info1;
+ __le32 info2;
+} __packed;
+
+struct rx_msdu_end {
+ struct rx_msdu_end_common common;
+ union {
+ struct rx_msdu_end_qca99x0 qca99x0;
+ } __packed;
+} __packed;
+
/*
*ip_hdr_chksum
* This can include the IP header checksum or the pseudo header
#define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK (1 << 24)
#define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL (1 << 25)
-#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15)
+#define RX_PPDU_END_INFO1_PEER_IDX_MASK 0x1ffc
+#define RX_PPDU_END_INFO1_PEER_IDX_LSB 2
+#define RX_PPDU_END_INFO1_BB_DATA BIT(0)
+#define RX_PPDU_END_INFO1_PEER_IDX_VALID BIT(1)
+#define RX_PPDU_END_INFO1_PPDU_DONE BIT(15)
struct rx_ppdu_end_common {
__le32 evm_p0;
__le32 evm_p15;
__le32 tsf_timestamp;
__le32 wb_timestamp;
+} __packed;
+
+struct rx_ppdu_end_qca988x {
u8 locationing_timestamp;
u8 phy_err_code;
__le16 flags; /* %RX_PPDU_END_FLAGS_ */
__le32 info0; /* %RX_PPDU_END_INFO0_ */
-} __packed;
-
-struct rx_ppdu_end_qca988x {
__le16 bb_length;
__le16 info1; /* %RX_PPDU_END_INFO1_ */
} __packed;
#define RX_PPDU_END_RTT_NORMAL_MODE BIT(31)
struct rx_ppdu_end_qca6174 {
+ u8 locationing_timestamp;
+ u8 phy_err_code;
+ __le16 flags; /* %RX_PPDU_END_FLAGS_ */
+ __le32 info0; /* %RX_PPDU_END_INFO0_ */
__le32 rtt; /* %RX_PPDU_END_RTT_ */
__le16 bb_length;
__le16 info1; /* %RX_PPDU_END_INFO1_ */
} __packed;
+#define RX_PKT_END_INFO0_RX_SUCCESS BIT(0)
+#define RX_PKT_END_INFO0_ERR_TX_INTERRUPT_RX BIT(3)
+#define RX_PKT_END_INFO0_ERR_OFDM_POWER_DROP BIT(4)
+#define RX_PKT_END_INFO0_ERR_OFDM_RESTART BIT(5)
+#define RX_PKT_END_INFO0_ERR_CCK_POWER_DROP BIT(6)
+#define RX_PKT_END_INFO0_ERR_CCK_RESTART BIT(7)
+
+#define RX_LOCATION_INFO_RTT_CORR_VAL_MASK 0x0001ffff
+#define RX_LOCATION_INFO_RTT_CORR_VAL_LSB 0
+#define RX_LOCATION_INFO_FAC_STATUS_MASK 0x000c0000
+#define RX_LOCATION_INFO_FAC_STATUS_LSB 18
+#define RX_LOCATION_INFO_PKT_BW_MASK 0x00700000
+#define RX_LOCATION_INFO_PKT_BW_LSB 20
+#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_MASK 0x01800000
+#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_LSB 23
+#define RX_LOCATION_INFO_CIR_STATUS BIT(17)
+#define RX_LOCATION_INFO_RTT_MAC_PHY_PHASE BIT(25)
+#define RX_LOCATION_INFO_RTT_TX_DATA_START_X BIT(26)
+#define RX_LOCATION_INFO_HW_IFFT_MODE BIT(30)
+#define RX_LOCATION_INFO_RX_LOCATION_VALID BIT(31)
+
+struct rx_pkt_end {
+ __le32 info0; /* %RX_PKT_END_INFO0_ */
+ __le32 phy_timestamp_1;
+ __le32 phy_timestamp_2;
+ __le32 rx_location_info; /* %RX_LOCATION_INFO_ */
+} __packed;
+
+enum rx_phy_ppdu_end_info0 {
+ RX_PHY_PPDU_END_INFO0_ERR_RADAR = BIT(2),
+ RX_PHY_PPDU_END_INFO0_ERR_RX_ABORT = BIT(3),
+ RX_PHY_PPDU_END_INFO0_ERR_RX_NAP = BIT(4),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_TIMING = BIT(5),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_PARITY = BIT(6),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_RATE = BIT(7),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_LENGTH = BIT(8),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_RESTART = BIT(9),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_SERVICE = BIT(10),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_POWER_DROP = BIT(11),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_BLOCKER = BIT(12),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_TIMING = BIT(13),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_HEADER_CRC = BIT(14),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_RATE = BIT(15),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_LENGTH = BIT(16),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_RESTART = BIT(17),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_SERVICE = BIT(18),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_POWER_DROP = BIT(19),
+ RX_PHY_PPDU_END_INFO0_ERR_HT_CRC = BIT(20),
+ RX_PHY_PPDU_END_INFO0_ERR_HT_LENGTH = BIT(21),
+ RX_PHY_PPDU_END_INFO0_ERR_HT_RATE = BIT(22),
+ RX_PHY_PPDU_END_INFO0_ERR_HT_ZLF = BIT(23),
+ RX_PHY_PPDU_END_INFO0_ERR_FALSE_RADAR_EXT = BIT(24),
+ RX_PHY_PPDU_END_INFO0_ERR_GREEN_FIELD = BIT(25),
+ RX_PHY_PPDU_END_INFO0_ERR_SPECTRAL_SCAN = BIT(26),
+ RX_PHY_PPDU_END_INFO0_ERR_RX_DYN_BW = BIT(27),
+ RX_PHY_PPDU_END_INFO0_ERR_LEG_HT_MISMATCH = BIT(28),
+ RX_PHY_PPDU_END_INFO0_ERR_VHT_CRC = BIT(29),
+ RX_PHY_PPDU_END_INFO0_ERR_VHT_SIGA = BIT(30),
+ RX_PHY_PPDU_END_INFO0_ERR_VHT_LSIG = BIT(31),
+};
+
+enum rx_phy_ppdu_end_info1 {
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_NDP = BIT(0),
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_NSYM = BIT(1),
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_EXT_SYM = BIT(2),
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID0 = BIT(3),
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID1_62 = BIT(4),
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID63 = BIT(5),
+ RX_PHY_PPDU_END_INFO1_ERR_OFDM_LDPC_DECODER = BIT(6),
+ RX_PHY_PPDU_END_INFO1_ERR_DEFER_NAP = BIT(7),
+ RX_PHY_PPDU_END_INFO1_ERR_FDOMAIN_TIMEOUT = BIT(8),
+ RX_PHY_PPDU_END_INFO1_ERR_LSIG_REL_CHECK = BIT(9),
+ RX_PHY_PPDU_END_INFO1_ERR_BT_COLLISION = BIT(10),
+ RX_PHY_PPDU_END_INFO1_ERR_MU_FEEDBACK = BIT(11),
+ RX_PHY_PPDU_END_INFO1_ERR_TX_INTERRUPT_RX = BIT(12),
+ RX_PHY_PPDU_END_INFO1_ERR_RX_CBF = BIT(13),
+};
+
+struct rx_phy_ppdu_end {
+ __le32 info0; /* %RX_PHY_PPDU_END_INFO0_ */
+ __le32 info1; /* %RX_PHY_PPDU_END_INFO1_ */
+} __packed;
+
+#define RX_PPDU_END_RX_TIMING_OFFSET_MASK 0x00000fff
+#define RX_PPDU_END_RX_TIMING_OFFSET_LSB 0
+
+#define RX_PPDU_END_RX_INFO_RX_ANTENNA_MASK 0x00ffffff
+#define RX_PPDU_END_RX_INFO_RX_ANTENNA_LSB 0
+#define RX_PPDU_END_RX_INFO_TX_HT_VHT_ACK BIT(24)
+#define RX_PPDU_END_RX_INFO_RX_PKT_END_VALID BIT(25)
+#define RX_PPDU_END_RX_INFO_RX_PHY_PPDU_END_VALID BIT(26)
+#define RX_PPDU_END_RX_INFO_RX_TIMING_OFFSET_VALID BIT(27)
+#define RX_PPDU_END_RX_INFO_BB_CAPTURED_CHANNEL BIT(28)
+#define RX_PPDU_END_RX_INFO_UNSUPPORTED_MU_NC BIT(29)
+#define RX_PPDU_END_RX_INFO_OTP_TXBF_DISABLE BIT(30)
+
+struct rx_ppdu_end_qca99x0 {
+ struct rx_pkt_end rx_pkt_end;
+ struct rx_phy_ppdu_end rx_phy_ppdu_end;
+ __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */
+ __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */
+ __le16 bb_length;
+ __le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
struct rx_ppdu_end {
struct rx_ppdu_end_common common;
union {
struct rx_ppdu_end_qca988x qca988x;
struct rx_ppdu_end_qca6174 qca6174;
+ struct rx_ppdu_end_qca99x0 qca99x0;
} __packed;
} __packed;
struct ath10k_skb_cb *skb_cb;
struct sk_buff *msdu;
- lockdep_assert_held(&htt->tx_lock);
-
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
tx_done->msdu_id, !!tx_done->discard,
return;
}
+ spin_lock_bh(&htt->tx_lock);
msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
if (!msdu) {
ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
tx_done->msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
return;
}
+ ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
+ __ath10k_htt_tx_dec_pending(htt);
+ if (htt->num_pending_tx == 0)
+ wake_up(&htt->empty_tx_wq);
+ spin_unlock_bh(&htt->tx_lock);
skb_cb = ATH10K_SKB_CB(msdu);
if (tx_done->discard) {
ieee80211_free_txskb(htt->ar->hw, msdu);
- goto exit;
+ return;
}
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */
-
-exit:
- ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
- __ath10k_htt_tx_dec_pending(htt);
- if (htt->num_pending_tx == 0)
- wake_up(&htt->empty_tx_wq);
}
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
struct wmi_roam_ev_arg *arg);
int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_wow_ev_arg *arg);
+ enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
return ar->wmi.ops->pull_wow_event(ar, skb, arg);
}
+static inline enum wmi_txbf_conf
+ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
+{
+ if (!ar->wmi.ops->get_txbf_conf_scheme)
+ return WMI_TXBF_CONF_UNSUPPORTED;
+
+ return ar->wmi.ops->get_txbf_conf_scheme(ar);
+}
+
static inline int
ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
{
break;
case WMI_TLV_SERVICE_READY_EVENTID:
ath10k_wmi_event_service_ready(ar, skb);
- break;
+ return;
case WMI_TLV_READY_EVENTID:
ath10k_wmi_event_ready(ar, skb);
break;
return skb;
}
+static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar)
+{
+ return WMI_TXBF_CONF_AFTER_ASSOC;
+}
+
static struct sk_buff *
ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
u32 param_value)
cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
- cfg->rx_decap_mode = __cpu_to_le32(1);
+ cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
cfg->scan_max_pending_reqs = __cpu_to_le32(4);
cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
.pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
.pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
+ .get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
return 0;
}
+static enum wmi_txbf_conf ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k *ar)
+{
+ return WMI_TXBF_CONF_BEFORE_ASSOC;
+}
+
void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_swba_ev_arg arg = {};
fftr, fftr_len,
tsf);
if (res < 0) {
- ath10k_warn(ar, "failed to process fft report: %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n",
res);
return;
}
ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
pool_size,
&paddr,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!ar->wmi.mem_chunks[idx].vaddr) {
ath10k_warn(ar, "failed to allocate memory chunk\n");
return -ENOMEM;
return 0;
}
-void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
+static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
{
+ struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work);
+ struct sk_buff *skb = ar->svc_rdy_skb;
struct wmi_svc_rdy_ev_arg arg = {};
u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
int ret;
+ if (!skb) {
+ ath10k_warn(ar, "invalid service ready event skb\n");
+ return;
+ }
+
ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
if (ret) {
ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
__le32_to_cpu(arg.eeprom_rd),
__le32_to_cpu(arg.num_mem_reqs));
+ dev_kfree_skb(skb);
+ ar->svc_rdy_skb = NULL;
complete(&ar->wmi.service_ready);
}
+void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
+{
+ ar->svc_rdy_skb = skb;
+ queue_work(ar->workqueue_aux, &ar->svc_rdy_work);
+}
+
static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
struct wmi_rdy_ev_arg *arg)
{
break;
case WMI_SERVICE_READY_EVENTID:
ath10k_wmi_event_service_ready(ar, skb);
- break;
+ return;
case WMI_READY_EVENTID:
ath10k_wmi_event_ready(ar, skb);
break;
break;
case WMI_10X_SERVICE_READY_EVENTID:
ath10k_wmi_event_service_ready(ar, skb);
- break;
+ return;
case WMI_10X_READY_EVENTID:
ath10k_wmi_event_ready(ar, skb);
break;
break;
case WMI_10_2_SERVICE_READY_EVENTID:
ath10k_wmi_event_service_ready(ar, skb);
- break;
+ return;
case WMI_10_2_READY_EVENTID:
ath10k_wmi_event_ready(ar, skb);
break;
break;
case WMI_10_4_SERVICE_READY_EVENTID:
ath10k_wmi_event_service_ready(ar, skb);
- break;
+ return;
case WMI_10_4_SCAN_EVENTID:
ath10k_wmi_event_scan(ar, skb);
break;
config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
- config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE);
-
+ config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
config.scan_max_pending_reqs =
__cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
- config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
-
+ config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
config.scan_max_pending_reqs =
__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
- config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
+ config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
config.scan_max_pending_reqs =
__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
.pull_swba = ath10k_wmi_10_4_op_pull_swba_ev,
.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
init_completion(&ar->wmi.service_ready);
init_completion(&ar->wmi.unified_ready);
+ INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
+
return 0;
}
{
int i;
+ cancel_work_sync(&ar->svc_rdy_work);
+
+ if (ar->svc_rdy_skb)
+ dev_kfree_skb(ar->svc_rdy_skb);
+
/* free the host memory chunks requested by firmware */
for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
dma_free_coherent(ar->dev,
#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
+#define WMI_TXBF_STS_CAP_OFFSET_LSB 4
+#define WMI_TXBF_STS_CAP_OFFSET_MASK 0xf0
+#define WMI_BF_SOUND_DIM_OFFSET_LSB 8
+#define WMI_BF_SOUND_DIM_OFFSET_MASK 0xf00
+
/* slot time long */
#define WMI_VDEV_SLOT_TIME_LONG 0x1
/* slot time short */
u32 pref_offchan_bw;
};
+enum wmi_txbf_conf {
+ WMI_TXBF_CONF_UNSUPPORTED,
+ WMI_TXBF_CONF_BEFORE_ASSOC,
+ WMI_TXBF_CONF_AFTER_ASSOC,
+};
+
struct ath10k;
struct ath10k_vif;
struct ath10k_fw_stats_pdev;
ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
exit:
+ if (ret) {
+ switch (ar->state) {
+ case ATH10K_STATE_ON:
+ ar->state = ATH10K_STATE_RESTARTING;
+ ret = 1;
+ break;
+ case ATH10K_STATE_OFF:
+ case ATH10K_STATE_RESTARTING:
+ case ATH10K_STATE_RESTARTED:
+ case ATH10K_STATE_UTF:
+ case ATH10K_STATE_WEDGED:
+ ath10k_warn(ar, "encountered unexpected device state %d on resume, cannot recover\n",
+ ar->state);
+ ret = -EIO;
+ break;
+ }
+ }
+
mutex_unlock(&ar->conf_mutex);
- return ret ? 1 : 0;
+ return ret;
}
int ath10k_wow_init(struct ath10k *ar)
select MAC80211_LEDS
select LEDS_CLASS
select NEW_LEDS
- select AVERAGE
select ATH5K_AHB if ATH25
select ATH5K_PCI if !ATH25
---help---
ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
bool ofdm_trigger)
{
- int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
+ int rssi = ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg);
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "raise immunity (%s)",
ofdm_trigger ? "ODFM" : "CCK");
static void
ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
{
- int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
+ int rssi = ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg);
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "lower immunity");
#define ATH5K_TXQ_LEN_MAX (ATH_TXBUF / 4) /* bufs per queue */
#define ATH5K_TXQ_LEN_LOW (ATH5K_TXQ_LEN_MAX / 2) /* low mark */
+DECLARE_EWMA(beacon_rssi, 1024, 8)
+
/* Driver state associated with an instance of a device */
struct ath5k_hw {
struct ath_common common;
struct ath5k_nfcal_hist ah_nfcal_hist;
/* average beacon RSSI in our BSS (used by ANI) */
- struct ewma ah_beacon_rssi_avg;
+ struct ewma_beacon_rssi ah_beacon_rssi_avg;
/* noise floor from last periodic calibration */
s32 ah_noise_floor;
trace_ath5k_rx(ah, skb);
if (ath_is_mybeacon(common, (struct ieee80211_hdr *)skb->data)) {
- ewma_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi);
+ ewma_beacon_rssi_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi);
/* check beacons in IBSS mode */
if (ah->opmode == NL80211_IFTYPE_ADHOC)
ah->ah_cal_next_short = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
- ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
+ ewma_beacon_rssi_init(&ah->ah_beacon_rssi_avg);
/* clear survey data and cycle counters */
memset(&ah->survey, 0, sizeof(ah->survey));
st->mib_intr);
len += snprintf(buf + len, sizeof(buf) - len,
"beacon RSSI average:\t%d\n",
- (int)ewma_read(&ah->ah_beacon_rssi_avg));
+ (int)ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg));
#define CC_PRINT(_struct, _field) \
_struct._field, \
};
/*
- * credit distibution code that is passed into the distrbution function,
+ * credit distribution code that is passed into the distribution function,
* there are mandatory and optional codes that must be handled
*/
enum htc_credit_dist_reason {
struct sk_buff_head complete_q;
};
-struct ath_atx_ac {
- struct ath_txq *txq;
- struct list_head list;
- struct list_head tid_q;
- bool clear_ps_filter;
- bool sched;
-};
-
struct ath_frame_info {
struct ath_buf *bf;
u16 framelen;
struct sk_buff_head buf_q;
struct sk_buff_head retry_q;
struct ath_node *an;
- struct ath_atx_ac *ac;
+ struct ath_txq *txq;
unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
u16 seq_start;
u16 seq_next;
int baw_tail; /* next unused tx buffer slot */
s8 bar_index;
- bool sched;
bool active;
+ bool clear_ps_filter;
};
struct ath_node {
struct ieee80211_sta *sta; /* station struct we're part of */
struct ieee80211_vif *vif; /* interface with which we're associated */
struct ath_atx_tid tid[IEEE80211_NUM_TIDS];
- struct ath_atx_ac ac[IEEE80211_NUM_ACS];
u16 maxampdu;
u8 mpdudensity;
ATH_OFFCHANNEL_ROC_DONE,
};
+enum ath_roc_complete_reason {
+ ATH_ROC_COMPLETE_EXPIRE,
+ ATH_ROC_COMPLETE_ABORT,
+ ATH_ROC_COMPLETE_CANCEL,
+};
+
struct ath_offchannel {
struct ath_chanctx chan;
struct timer_list timer;
void ath_chanctx_set_next(struct ath_softc *sc, bool force);
void ath_offchannel_next(struct ath_softc *sc);
void ath_scan_complete(struct ath_softc *sc, bool abort);
-void ath_roc_complete(struct ath_softc *sc, bool abort);
+void ath_roc_complete(struct ath_softc *sc,
+ enum ath_roc_complete_reason reason);
struct ath_chanctx* ath_is_go_chanctx_present(struct ath_softc *sc);
#else
}
}
-void ath_roc_complete(struct ath_softc *sc, bool abort)
+void ath_roc_complete(struct ath_softc *sc, enum ath_roc_complete_reason reason)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- if (abort)
+ sc->offchannel.roc_vif = NULL;
+ sc->offchannel.roc_chan = NULL;
+
+ switch (reason) {
+ case ATH_ROC_COMPLETE_ABORT:
ath_dbg(common, CHAN_CTX, "RoC aborted\n");
- else
+ ieee80211_remain_on_channel_expired(sc->hw);
+ break;
+ case ATH_ROC_COMPLETE_EXPIRE:
ath_dbg(common, CHAN_CTX, "RoC expired\n");
+ ieee80211_remain_on_channel_expired(sc->hw);
+ break;
+ case ATH_ROC_COMPLETE_CANCEL:
+ ath_dbg(common, CHAN_CTX, "RoC canceled\n");
+ break;
+ }
- sc->offchannel.roc_vif = NULL;
- sc->offchannel.roc_chan = NULL;
- ieee80211_remain_on_channel_expired(sc->hw);
ath_offchannel_next(sc);
ath9k_ps_restore(sc);
}
case ATH_OFFCHANNEL_ROC_START:
case ATH_OFFCHANNEL_ROC_WAIT:
sc->offchannel.state = ATH_OFFCHANNEL_ROC_DONE;
- ath_roc_complete(sc, false);
+ ath_roc_complete(sc, ATH_ROC_COMPLETE_EXPIRE);
break;
default:
break;
struct ath_node *an = file->private_data;
struct ath_softc *sc = an->sc;
struct ath_atx_tid *tid;
- struct ath_atx_ac *ac;
struct ath_txq *txq;
u32 len = 0, size = 4096;
char *buf;
size_t retval;
- int tidno, acno;
+ int tidno;
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
len += scnprintf(buf + len, size - len, "MPDU Density: %d\n\n",
an->mpdudensity);
- len += scnprintf(buf + len, size - len,
- "%2s%7s\n", "AC", "SCHED");
-
- for (acno = 0, ac = &an->ac[acno];
- acno < IEEE80211_NUM_ACS; acno++, ac++) {
- txq = ac->txq;
- ath_txq_lock(sc, txq);
- len += scnprintf(buf + len, size - len,
- "%2d%7d\n",
- acno, ac->sched);
- ath_txq_unlock(sc, txq);
- }
-
len += scnprintf(buf + len, size - len,
"\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
"TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
- txq = tid->ac->txq;
+ txq = tid->txq;
ath_txq_lock(sc, txq);
if (tid->active) {
len += scnprintf(buf + len, size - len,
tid->baw_head,
tid->baw_tail,
tid->bar_index,
- tid->sched);
+ !list_empty(&tid->list));
}
ath_txq_unlock(sc, txq);
}
static int ath9k_htc_wait_for_target(struct ath9k_htc_priv *priv)
{
- int time_left;
+ unsigned long time_left;
if (atomic_read(&priv->htc->tgt_ready) > 0) {
atomic_dec(&priv->htc->tgt_ready);
{
struct sk_buff *skb;
struct htc_config_pipe_msg *cp_msg;
- int ret, time_left;
+ int ret;
+ unsigned long time_left;
skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
if (!skb) {
{
struct sk_buff *skb;
struct htc_comp_msg *comp_msg;
- int ret = 0, time_left;
+ int ret = 0;
+ unsigned long time_left;
skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
if (!skb) {
struct sk_buff *skb;
struct htc_endpoint *endpoint;
struct htc_conn_svc_msg *conn_msg;
- int ret, time_left;
+ int ret;
+ unsigned long time_left;
/* Find an available endpoint */
endpoint = get_next_avail_ep(target->endpoint);
{ AR_SREV_VERSION_9550, "9550" },
{ AR_SREV_VERSION_9565, "9565" },
{ AR_SREV_VERSION_9531, "9531" },
+ { AR_SREV_VERSION_9561, "9561" },
};
/* For devices with external radios */
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) },
{ .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) },
+ { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
};
static const struct ieee80211_iface_combination if_comb_multi[] = {
{
.limits = if_limits_multi,
.n_limits = ARRAY_SIZE(if_limits_multi),
- .max_interfaces = 2,
+ .max_interfaces = 3,
.num_different_channels = 2,
.beacon_int_infra_match = true,
},
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, RX_INCLUDES_FCS);
ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
if (ath9k_ps_enable)
ieee80211_hw_set(hw, SUPPORTS_PS);
BIT(NL80211_IFTYPE_MESH_POINT) |
BIT(NL80211_IFTYPE_WDS);
+ if (ath9k_is_chanctx_enabled())
+ hw->wiphy->interface_modes |=
+ BIT(NL80211_IFTYPE_P2P_DEVICE);
+
hw->wiphy->iface_combinations = if_comb;
hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
}
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_tx_control txctl;
- int time_left;
+ unsigned long time_left;
memset(&txctl, 0, sizeof(txctl));
txctl.txq = sc->tx.txq_map[IEEE80211_AC_BE];
u64 multicast)
{
struct ath_softc *sc = hw->priv;
+ struct ath_chanctx *ctx;
u32 rfilt;
changed_flags &= SUPPORTED_FILTERS;
*total_flags &= SUPPORTED_FILTERS;
spin_lock_bh(&sc->chan_lock);
- sc->cur_chan->rxfilter = *total_flags;
+ ath_for_each_chanctx(sc, ctx)
+ ctx->rxfilter = *total_flags;
+#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+ sc->offchannel.chan.rxfilter = *total_flags;
+#endif
spin_unlock_bh(&sc->chan_lock);
ath9k_ps_wakeup(sc);
del_timer_sync(&sc->offchannel.timer);
if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
- ath_roc_complete(sc, true);
+ ath_roc_complete(sc, ATH_ROC_COMPLETE_ABORT);
}
if (test_bit(ATH_OP_SCANNING, &common->op_flags)) {
if (sc->offchannel.roc_vif) {
if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
- ath_roc_complete(sc, true);
+ ath_roc_complete(sc, ATH_ROC_COMPLETE_CANCEL);
}
mutex_unlock(&sc->mutex);
sizeof(struct wmi_cmd_hdr);
struct sk_buff *skb;
u8 *data;
- int time_left, ret = 0;
+ unsigned long time_left;
+ int ret = 0;
if (ah->ah_flags & AH_UNPLUGGED)
return 0;
static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid)
{
- struct ath_atx_ac *ac = tid->ac;
struct list_head *list;
struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv;
struct ath_chanctx *ctx = avp->chanctx;
if (!ctx)
return;
- if (tid->sched)
- return;
-
- tid->sched = true;
- list_add_tail(&tid->list, &ac->tid_q);
-
- if (ac->sched)
- return;
-
- ac->sched = true;
-
list = &ctx->acq[TID_TO_WME_AC(tid->tidno)];
- list_add_tail(&ac->list, list);
+ if (list_empty(&tid->list))
+ list_add_tail(&tid->list, list);
}
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
static void
ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
{
- struct ath_txq *txq = tid->ac->txq;
+ struct ath_txq *txq = tid->txq;
struct ieee80211_tx_info *tx_info;
struct sk_buff *skb, *tskb;
struct ath_buf *bf;
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
- struct ath_txq *txq = tid->ac->txq;
+ struct ath_txq *txq = tid->txq;
struct sk_buff *skb;
struct ath_buf *bf;
struct list_head bf_head;
ath_tx_queue_tid(sc, txq, tid);
if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
- tid->ac->clear_ps_filter = true;
+ tid->clear_ps_filter = true;
}
}
struct ieee80211_tx_rate *rates;
u32 max_4ms_framelen, frmlen;
u16 aggr_limit, bt_aggr_limit, legacy = 0;
- int q = tid->ac->txq->mac80211_qnum;
+ int q = tid->txq->mac80211_qnum;
int i;
skb = bf->bf_mpdu;
if (list_empty(&bf_q))
return false;
- if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) {
- tid->ac->clear_ps_filter = false;
+ if (tid->clear_ps_filter || tid->an->no_ps_filter) {
+ tid->clear_ps_filter = false;
tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
}
an = (struct ath_node *)sta->drv_priv;
txtid = ATH_AN_2_TID(an, tid);
- txq = txtid->ac->txq;
+ txq = txtid->txq;
ath_txq_lock(sc, txq);
{
struct ath_node *an = (struct ath_node *)sta->drv_priv;
struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
- struct ath_txq *txq = txtid->ac->txq;
+ struct ath_txq *txq = txtid->txq;
ath_txq_lock(sc, txq);
txtid->active = false;
struct ath_node *an)
{
struct ath_atx_tid *tid;
- struct ath_atx_ac *ac;
struct ath_txq *txq;
bool buffered;
int tidno;
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
- ac = tid->ac;
- txq = ac->txq;
+ txq = tid->txq;
ath_txq_lock(sc, txq);
- if (!tid->sched) {
+ if (list_empty(&tid->list)) {
ath_txq_unlock(sc, txq);
continue;
}
buffered = ath_tid_has_buffered(tid);
- tid->sched = false;
- list_del(&tid->list);
-
- if (ac->sched) {
- ac->sched = false;
- list_del(&ac->list);
- }
+ list_del_init(&tid->list);
ath_txq_unlock(sc, txq);
void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
{
struct ath_atx_tid *tid;
- struct ath_atx_ac *ac;
struct ath_txq *txq;
int tidno;
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
- ac = tid->ac;
- txq = ac->txq;
+ txq = tid->txq;
ath_txq_lock(sc, txq);
- ac->clear_ps_filter = true;
+ tid->clear_ps_filter = true;
if (ath_tid_has_buffered(tid)) {
ath_tx_queue_tid(sc, txq, tid);
an = (struct ath_node *)sta->drv_priv;
tid = ATH_AN_2_TID(an, tidno);
- txq = tid->ac->txq;
+ txq = tid->txq;
ath_txq_lock(sc, txq);
tid = ATH_AN_2_TID(an, i);
- ath_txq_lock(sc, tid->ac->txq);
+ ath_txq_lock(sc, tid->txq);
while (nframes > 0) {
bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
if (!bf)
if (an->sta && !ath_tid_has_buffered(tid))
ieee80211_sta_set_buffered(an->sta, i, false);
}
- ath_txq_unlock_complete(sc, tid->ac->txq);
+ ath_txq_unlock_complete(sc, tid->txq);
}
if (list_empty(&bf_q))
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_atx_ac *ac, *last_ac;
struct ath_atx_tid *tid, *last_tid;
- struct list_head *ac_list;
+ struct list_head *tid_list;
bool sent = false;
if (txq->mac80211_qnum < 0)
return;
spin_lock_bh(&sc->chan_lock);
- ac_list = &sc->cur_chan->acq[txq->mac80211_qnum];
+ tid_list = &sc->cur_chan->acq[txq->mac80211_qnum];
- if (list_empty(ac_list)) {
+ if (list_empty(tid_list)) {
spin_unlock_bh(&sc->chan_lock);
return;
}
rcu_read_lock();
- last_ac = list_entry(ac_list->prev, struct ath_atx_ac, list);
- while (!list_empty(ac_list)) {
+ last_tid = list_entry(tid_list->prev, struct ath_atx_tid, list);
+ while (!list_empty(tid_list)) {
bool stop = false;
if (sc->cur_chan->stopped)
break;
- ac = list_first_entry(ac_list, struct ath_atx_ac, list);
- last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
- list_del(&ac->list);
- ac->sched = false;
-
- while (!list_empty(&ac->tid_q)) {
-
- tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
- list);
- list_del(&tid->list);
- tid->sched = false;
+ tid = list_first_entry(tid_list, struct ath_atx_tid, list);
+ list_del_init(&tid->list);
- if (ath_tx_sched_aggr(sc, txq, tid, &stop))
- sent = true;
-
- /*
- * add tid to round-robin queue if more frames
- * are pending for the tid
- */
- if (ath_tid_has_buffered(tid))
- ath_tx_queue_tid(sc, txq, tid);
+ if (ath_tx_sched_aggr(sc, txq, tid, &stop))
+ sent = true;
- if (stop || tid == last_tid)
- break;
- }
-
- if (!list_empty(&ac->tid_q) && !ac->sched) {
- ac->sched = true;
- list_add_tail(&ac->list, ac_list);
- }
+ /*
+ * add tid to round-robin queue if more frames
+ * are pending for the tid
+ */
+ if (ath_tid_has_buffered(tid))
+ ath_tx_queue_tid(sc, txq, tid);
if (stop)
break;
- if (ac == last_ac) {
+ if (tid == last_tid) {
if (!sent)
break;
sent = false;
- last_ac = list_entry(ac_list->prev,
- struct ath_atx_ac, list);
+ last_tid = list_entry(tid_list->prev,
+ struct ath_atx_tid, list);
}
}
txq = sc->tx.uapsdq;
ath_txq_lock(sc, txq);
} else if (txctl->an && queue) {
- WARN_ON(tid->ac->txq != txctl->txq);
+ WARN_ON(tid->txq != txctl->txq);
if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
- tid->ac->clear_ps_filter = true;
+ tid->clear_ps_filter = true;
/*
* Add this frame to software queue for scheduling later
void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
{
struct ath_atx_tid *tid;
- struct ath_atx_ac *ac;
int tidno, acno;
for (tidno = 0, tid = &an->tid[tidno];
tid->seq_start = tid->seq_next = 0;
tid->baw_size = WME_MAX_BA;
tid->baw_head = tid->baw_tail = 0;
- tid->sched = false;
tid->active = false;
+ tid->clear_ps_filter = true;
__skb_queue_head_init(&tid->buf_q);
__skb_queue_head_init(&tid->retry_q);
+ INIT_LIST_HEAD(&tid->list);
acno = TID_TO_WME_AC(tidno);
- tid->ac = &an->ac[acno];
- }
-
- for (acno = 0, ac = &an->ac[acno];
- acno < IEEE80211_NUM_ACS; acno++, ac++) {
- ac->sched = false;
- ac->clear_ps_filter = true;
- ac->txq = sc->tx.txq_map[acno];
- INIT_LIST_HEAD(&ac->tid_q);
+ tid->txq = sc->tx.txq_map[acno];
}
}
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
{
- struct ath_atx_ac *ac;
struct ath_atx_tid *tid;
struct ath_txq *txq;
int tidno;
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
- ac = tid->ac;
- txq = ac->txq;
+ txq = tid->txq;
ath_txq_lock(sc, txq);
- if (tid->sched) {
- list_del(&tid->list);
- tid->sched = false;
- }
-
- if (ac->sched) {
- list_del(&ac->list);
- tid->ac->sched = false;
- }
+ if (!list_empty(&tid->list))
+ list_del_init(&tid->list);
ath_tid_drain(sc, txq, tid);
tid->active = false;
return "P2P-CLIENT";
case NL80211_IFTYPE_P2P_GO:
return "P2P-GO";
+ case NL80211_IFTYPE_OCB:
+ return "OCB";
default:
return "UNKNOWN";
}
wil6210-y += rx_reorder.o
wil6210-y += ioctl.o
wil6210-y += fw.o
+wil6210-y += pm.o
wil6210-y += pmc.o
wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
wil6210-y += wil_platform.o
--- /dev/null
+/* Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* This file contains the definitions for the boot loader
+ * for the Qualcomm "Sparrow" 60 Gigabit wireless solution.
+ */
+#ifndef BOOT_LOADER_EXPORT_H_
+#define BOOT_LOADER_EXPORT_H_
+
+struct bl_dedicated_registers_v1 {
+ __le32 boot_loader_ready; /* 0x880A3C driver will poll
+ * this Dword until BL will
+ * set it to 1 (initial value
+ * should be 0)
+ */
+ __le32 boot_loader_struct_version; /* 0x880A40 BL struct ver. */
+ __le16 rf_type; /* 0x880A44 connected RF ID */
+ __le16 rf_status; /* 0x880A46 RF status,
+ * 0 is OK else error
+ */
+ __le32 baseband_type; /* 0x880A48 board type ID */
+ u8 mac_address[6]; /* 0x880A4c BL mac address */
+ u8 bl_version_major; /* 0x880A52 BL ver. major */
+ u8 bl_version_minor; /* 0x880A53 BL ver. minor */
+ __le16 bl_version_subminor; /* 0x880A54 BL ver. subminor */
+ __le16 bl_version_build; /* 0x880A56 BL ver. build */
+ /* valid only for version 2 and above */
+ __le32 bl_assert_code; /* 0x880A58 BL Assert code */
+ __le32 bl_assert_blink; /* 0x880A5C BL Assert Branch */
+ __le32 bl_reserved[22]; /* 0x880A60 - 0x880AB4 */
+ __le32 bl_magic_number; /* 0x880AB8 BL Magic number */
+} __packed;
+
+/* the following struct is the version 0 struct */
+
+struct bl_dedicated_registers_v0 {
+ __le32 boot_loader_ready; /* 0x880A3C driver will poll
+ * this Dword until BL will
+ * set it to 1 (initial value
+ * should be 0)
+ */
+#define BL_READY (1) /* ready indication */
+ __le32 boot_loader_struct_version; /* 0x880A40 BL struct ver. */
+ __le32 rf_type; /* 0x880A44 connected RF ID */
+ __le32 baseband_type; /* 0x880A48 board type ID */
+ u8 mac_address[6]; /* 0x880A4c BL mac address */
+} __packed;
+
+#endif /* BOOT_LOADER_EXPORT_H_ */
else
wil_dbg_misc(wil, "Scan has no IE's\n");
- rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len,
- request->ie);
- if (rc) {
- wil_err(wil, "Aborting scan, set_ie failed: %d\n", rc);
+ rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie);
+ if (rc)
goto out;
- }
rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
* ies in FW.
*/
rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie);
- if (rc) {
- wil_err(wil, "WMI_SET_APPIE_CMD failed\n");
+ if (rc)
goto out;
- }
/* WMI_CONNECT_CMD */
memset(&conn, 0, sizeof(conn));
{
struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
- int rc = 0;
if (bcon->probe_resp_len <= hlen)
return 0;
+/* always use IE's from full probe frame, they has more info
+ * notable RSN
+ */
+ bcon->proberesp_ies = f->u.probe_resp.variable;
+ bcon->proberesp_ies_len = bcon->probe_resp_len - hlen;
if (!bcon->assocresp_ies) {
- bcon->assocresp_ies = f->u.probe_resp.variable;
- bcon->assocresp_ies_len = bcon->probe_resp_len - hlen;
- rc = 1;
+ bcon->assocresp_ies = bcon->proberesp_ies;
+ bcon->assocresp_ies_len = bcon->proberesp_ies_len;
}
- return rc;
+ return 1;
}
/* internal functions for device reset and starting AP */
static int _wil_cfg80211_set_ies(struct wiphy *wiphy,
- size_t probe_ies_len, const u8 *probe_ies,
- size_t assoc_ies_len, const u8 *assoc_ies)
-
+ struct cfg80211_beacon_data *bcon)
{
int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- /* FW do not form regular beacon, so bcon IE's are not set
- * For the DMG bcon, when it will be supported, bcon IE's will
- * be reused; add something like:
- * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
- * bcon->beacon_ies);
- */
- rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, probe_ies_len, probe_ies);
- if (rc) {
- wil_err(wil, "set_ie(PROBE_RESP) failed\n");
+ rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
+ bcon->proberesp_ies);
+ if (rc)
return rc;
- }
- rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, assoc_ies_len, assoc_ies);
- if (rc) {
- wil_err(wil, "set_ie(ASSOC_RESP) failed\n");
+ rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
+ bcon->assocresp_ies);
+#if 0 /* to use beacon IE's, remove this #if 0 */
+ if (rc)
return rc;
- }
- return 0;
+ rc = wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->tail_len, bcon->tail);
+#endif
+
+ return rc;
}
static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
struct net_device *ndev,
const u8 *ssid, size_t ssid_len, u32 privacy,
int bi, u8 chan,
- size_t probe_ies_len, const u8 *probe_ies,
- size_t assoc_ies_len, const u8 *assoc_ies,
+ struct cfg80211_beacon_data *bcon,
u8 hidden_ssid)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
if (rc)
goto out;
- rc = _wil_cfg80211_set_ies(wiphy, probe_ies_len, probe_ies,
- assoc_ies_len, assoc_ies);
+ rc = _wil_cfg80211_set_ies(wiphy, bcon);
if (rc)
goto out;
struct cfg80211_beacon_data *bcon)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
- size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
- const u8 *pr_ies = NULL;
- size_t pr_ies_len = 0;
int rc;
u32 privacy = 0;
wil_dbg_misc(wil, "%s()\n", __func__);
wil_print_bcon_data(bcon);
- if (bcon->probe_resp_len > hlen) {
- pr_ies = f->u.probe_resp.variable;
- pr_ies_len = bcon->probe_resp_len - hlen;
- }
-
if (wil_fix_bcon(wil, bcon)) {
wil_dbg_misc(wil, "Fixed bcon\n");
wil_print_bcon_data(bcon);
}
- if (pr_ies && cfg80211_find_ie(WLAN_EID_RSN, pr_ies, pr_ies_len))
+ if (bcon->proberesp_ies &&
+ cfg80211_find_ie(WLAN_EID_RSN, bcon->proberesp_ies,
+ bcon->proberesp_ies_len))
privacy = 1;
/* in case privacy has changed, need to restart the AP */
rc = _wil_cfg80211_start_ap(wiphy, ndev, wdev->ssid,
wdev->ssid_len, privacy,
wdev->beacon_interval,
- wil->channel, pr_ies_len, pr_ies,
- bcon->assocresp_ies_len,
- bcon->assocresp_ies,
+ wil->channel, bcon,
wil->hidden_ssid);
} else {
- rc = _wil_cfg80211_set_ies(wiphy, pr_ies_len, pr_ies,
- bcon->assocresp_ies_len,
- bcon->assocresp_ies);
+ rc = _wil_cfg80211_set_ies(wiphy, bcon);
}
return rc;
struct ieee80211_channel *channel = info->chandef.chan;
struct cfg80211_beacon_data *bcon = &info->beacon;
struct cfg80211_crypto_settings *crypto = &info->crypto;
- struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
- size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
- const u8 *pr_ies = NULL;
- size_t pr_ies_len = 0;
u8 hidden_ssid;
wil_dbg_misc(wil, "%s()\n", __func__);
wil_print_bcon_data(bcon);
wil_print_crypto(wil, crypto);
- if (bcon->probe_resp_len > hlen) {
- pr_ies = f->u.probe_resp.variable;
- pr_ies_len = bcon->probe_resp_len - hlen;
- }
-
if (wil_fix_bcon(wil, bcon)) {
wil_dbg_misc(wil, "Fixed bcon\n");
wil_print_bcon_data(bcon);
rc = _wil_cfg80211_start_ap(wiphy, ndev,
info->ssid, info->ssid_len, info->privacy,
info->beacon_interval, channel->hw_value,
- pr_ies_len, pr_ies,
- bcon->assocresp_ies_len,
- bcon->assocresp_ies,
- hidden_ssid);
+ bcon, hidden_ssid);
return rc;
}
seq_printf(s, " swhead = %d\n", vring->swhead);
seq_printf(s, " hwtail = [0x%08x] -> ", vring->hwtail);
if (x) {
- v = ioread32(x);
+ v = readl(x);
seq_printf(s, "0x%08x = %d\n", v, v);
} else {
seq_puts(s, "???\n");
static int wil_debugfs_iomem_x32_set(void *data, u64 val)
{
- iowrite32(val, (void __iomem *)data);
+ writel(val, (void __iomem *)data);
wmb(); /* make sure write propagated to HW */
return 0;
static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
{
- *val = ioread32((void __iomem *)data);
+ *val = readl((void __iomem *)data);
return 0;
}
}
DEFINE_SIMPLE_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get,
- wil_debugfs_ulong_set, "%llu\n");
+ wil_debugfs_ulong_set, "0x%llx\n");
static struct dentry *wil_debugfs_create_ulong(const char *name, umode_t mode,
struct dentry *parent,
void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr));
if (a)
- seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, ioread32(a));
+ seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, readl(a));
else
seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
{
int i;
u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size;
+ unsigned long long drop_dup = r->drop_dup, drop_old = r->drop_old;
seq_printf(s, "([%2d] %3d TU) 0x%03x [", r->buf_size, r->timeout,
r->head_seq_num);
else
seq_printf(s, "%c", r->reorder_buf[i] ? '*' : '_');
}
- seq_printf(s, "] last drop 0x%03x\n", r->ssn_last_drop);
+ seq_printf(s,
+ "] total %llu drop %llu (dup %llu + old %llu) last 0x%03x\n",
+ r->total, drop_dup + drop_old, drop_dup, drop_old,
+ r->ssn_last_drop);
}
static int wil_sta_debugfs_show(struct seq_file *s, void *data)
wil_dbg_misc(wil, "%s()\n", __func__);
- tx_itr_en = ioread32(wil->csr +
- HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL));
+ tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
- tx_itr_val =
- ioread32(wil->csr +
- HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH));
+ tx_itr_val = wil_r(wil, RGF_DMA_ITR_TX_CNT_TRSH);
- rx_itr_en = ioread32(wil->csr +
- HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL));
+ rx_itr_en = wil_r(wil, RGF_DMA_ITR_RX_CNT_CTL);
if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
- rx_itr_val =
- ioread32(wil->csr +
- HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH));
+ rx_itr_val = wil_r(wil, RGF_DMA_ITR_RX_CNT_TRSH);
cp->tx_coalesce_usecs = tx_itr_val;
cp->rx_coalesce_usecs = rx_itr_val;
MODULE_FIRMWARE(WIL_FW_NAME);
MODULE_FIRMWARE(WIL_FW2_NAME);
-/* target operations */
-/* register read */
-#define R(a) ioread32(wil->csr + HOSTADDR(a))
-/* register write. wmb() to make sure it is completed */
-#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
-/* register set = read, OR, write */
-#define S(a, v) W(a, R(a) | v)
-/* register clear = read, AND with inverted, write */
-#define C(a, v) W(a, R(a) & ~v)
-
static
void wil_memset_toio_32(volatile void __iomem *dst, u32 val,
size_t count)
FW_ADDR_CHECK(dst, block[i].addr, "address");
- x = ioread32(dst);
+ x = readl(dst);
y = (x & m) | (v & ~m);
wil_dbg_fw(wil, "write [0x%08x] <== 0x%08x "
"(old 0x%08x val 0x%08x mask 0x%08x)\n",
le32_to_cpu(block[i].addr), y, x, v, m);
- iowrite32(y, dst);
+ writel(y, dst);
wmb(); /* finish before processing next record */
}
{
unsigned delay = 0;
- iowrite32(a, gwa_addr);
- iowrite32(gw_cmd, gwa_cmd);
+ writel(a, gwa_addr);
+ writel(gw_cmd, gwa_cmd);
wmb(); /* finish before activate gw */
- iowrite32(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */
+ writel(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */
do {
udelay(1); /* typical time is few usec */
if (delay++ > 100) {
wil_err_fw(wil, "gw timeout\n");
return -EINVAL;
}
- } while (ioread32(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */
+ } while (readl(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */
return 0;
}
wil_dbg_fw(wil, " gw write[%3d] [0x%08x] <== 0x%08x\n",
i, a, v);
- iowrite32(v, gwa_val);
+ writel(v, gwa_val);
rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a);
if (rc)
return rc;
sizeof(v), false);
for (k = 0; k < ARRAY_SIZE(block->value); k++)
- iowrite32(v[k], gwa_val[k]);
+ writel(v[k], gwa_val[k]);
rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a);
if (rc)
return rc;
static inline void wil_icr_clear(u32 x, void __iomem *addr)
{
- iowrite32(x, addr);
+ writel(x, addr);
}
#endif /* defined(CONFIG_WIL6210_ISR_COR) */
static inline u32 wil_ioread32_and_clear(void __iomem *addr)
{
- u32 x = ioread32(addr);
+ u32 x = readl(addr);
wil_icr_clear(x, addr);
static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
{
- iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
- HOSTADDR(RGF_DMA_EP_TX_ICR) +
- offsetof(struct RGF_ICR, IMS));
+ wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMS),
+ WIL6210_IRQ_DISABLE);
}
static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
{
- iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
- HOSTADDR(RGF_DMA_EP_RX_ICR) +
- offsetof(struct RGF_ICR, IMS));
+ wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMS),
+ WIL6210_IRQ_DISABLE);
}
static void wil6210_mask_irq_misc(struct wil6210_priv *wil)
{
- iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
- HOSTADDR(RGF_DMA_EP_MISC_ICR) +
- offsetof(struct RGF_ICR, IMS));
+ wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
+ WIL6210_IRQ_DISABLE);
}
static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "%s()\n", __func__);
- iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
- HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+ wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_DISABLE);
clear_bit(wil_status_irqen, wil->status);
}
void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
{
- iowrite32(WIL6210_IMC_TX, wil->csr +
- HOSTADDR(RGF_DMA_EP_TX_ICR) +
- offsetof(struct RGF_ICR, IMC));
+ wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMC),
+ WIL6210_IMC_TX);
}
void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
{
- iowrite32(WIL6210_IMC_RX, wil->csr +
- HOSTADDR(RGF_DMA_EP_RX_ICR) +
- offsetof(struct RGF_ICR, IMC));
+ wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMC),
+ WIL6210_IMC_RX);
}
static void wil6210_unmask_irq_misc(struct wil6210_priv *wil)
{
- iowrite32(WIL6210_IMC_MISC, wil->csr +
- HOSTADDR(RGF_DMA_EP_MISC_ICR) +
- offsetof(struct RGF_ICR, IMC));
+ wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
+ WIL6210_IMC_MISC);
}
static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
set_bit(wil_status_irqen, wil->status);
- iowrite32(WIL6210_IRQ_PSEUDO_MASK, wil->csr +
- HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+ wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_PSEUDO_MASK);
}
void wil_mask_irq(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "%s()\n", __func__);
- iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
- offsetof(struct RGF_ICR, ICC));
- iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
- offsetof(struct RGF_ICR, ICC));
- iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
- offsetof(struct RGF_ICR, ICC));
+ wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC),
+ WIL_ICR_ICC_VALUE);
+ wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, ICC),
+ WIL_ICR_ICC_VALUE);
+ wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC),
+ WIL_ICR_ICC_VALUE);
wil6210_unmask_irq_pseudo(wil);
wil6210_unmask_irq_tx(wil);
wil6210_unmask_irq_misc(wil);
}
-/* target write operation */
-#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
-
void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "%s()\n", __func__);
return;
/* Disable and clear tx counter before (re)configuration */
- W(RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
- W(RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
+ wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
+ wil_w(wil, RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
wil_info(wil, "set ITR_TX_CNT_TRSH = %d usec\n",
wil->tx_max_burst_duration);
/* Configure TX max burst duration timer to use usec units */
- W(RGF_DMA_ITR_TX_CNT_CTL,
- BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL);
+ wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL,
+ BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL);
/* Disable and clear tx idle counter before (re)configuration */
- W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR);
- W(RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout);
+ wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR);
+ wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout);
wil_info(wil, "set ITR_TX_IDL_CNT_TRSH = %d usec\n",
wil->tx_interframe_timeout);
/* Configure TX max burst duration timer to use usec units */
- W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN |
- BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL);
+ wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN |
+ BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL);
/* Disable and clear rx counter before (re)configuration */
- W(RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR);
- W(RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration);
+ wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR);
+ wil_w(wil, RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration);
wil_info(wil, "set ITR_RX_CNT_TRSH = %d usec\n",
wil->rx_max_burst_duration);
/* Configure TX max burst duration timer to use usec units */
- W(RGF_DMA_ITR_RX_CNT_CTL,
- BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL);
+ wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL,
+ BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL);
/* Disable and clear rx idle counter before (re)configuration */
- W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR);
- W(RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout);
+ wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR);
+ wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout);
wil_info(wil, "set ITR_RX_IDL_CNT_TRSH = %d usec\n",
wil->rx_interframe_timeout);
/* Configure TX max burst duration timer to use usec units */
- W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN |
- BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
+ wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN |
+ BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
}
-#undef W
-
static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
u32 icr_rx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_RX_ICR) +
offsetof(struct RGF_ICR, ICR));
- u32 imv_rx = ioread32(wil->csr +
- HOSTADDR(RGF_DMA_EP_RX_ICR) +
- offsetof(struct RGF_ICR, IMV));
+ u32 imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR +
+ offsetof(struct RGF_ICR, IMV));
u32 icm_tx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICM));
u32 icr_tx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
- u32 imv_tx = ioread32(wil->csr +
- HOSTADDR(RGF_DMA_EP_TX_ICR) +
- offsetof(struct RGF_ICR, IMV));
+ u32 imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR +
+ offsetof(struct RGF_ICR, IMV));
u32 icm_misc = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICM));
u32 icr_misc = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICR));
- u32 imv_misc = ioread32(wil->csr +
- HOSTADDR(RGF_DMA_EP_MISC_ICR) +
- offsetof(struct RGF_ICR, IMV));
+ u32 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
+ offsetof(struct RGF_ICR, IMV));
wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n"
"Rx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
"Tx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
{
irqreturn_t rc = IRQ_HANDLED;
struct wil6210_priv *wil = cookie;
- u32 pseudo_cause = ioread32(wil->csr + HOSTADDR(RGF_DMA_PSEUDO_CAUSE));
+ u32 pseudo_cause = wil_r(wil, RGF_DMA_PSEUDO_CAUSE);
/**
* pseudo_cause is Clear-On-Read, no need to ACK
return rc;
}
-static int wil6210_request_3msi(struct wil6210_priv *wil, int irq)
-{
- int rc;
- /*
- * IRQ's are in the following order:
- * - Tx
- * - Rx
- * - Misc
- */
-
- rc = request_irq(irq, wil6210_irq_tx, IRQF_SHARED,
- WIL_NAME"_tx", wil);
- if (rc)
- return rc;
-
- rc = request_irq(irq + 1, wil6210_irq_rx, IRQF_SHARED,
- WIL_NAME"_rx", wil);
- if (rc)
- goto free0;
-
- rc = request_threaded_irq(irq + 2, wil6210_irq_misc,
- wil6210_irq_misc_thread,
- IRQF_SHARED, WIL_NAME"_misc", wil);
- if (rc)
- goto free1;
-
- return 0;
- /* error branch */
-free1:
- free_irq(irq + 1, wil);
-free0:
- free_irq(irq, wil);
-
- return rc;
-}
-
/* can't use wil_ioread32_and_clear because ICC value is not set yet */
static inline void wil_clear32(void __iomem *addr)
{
- u32 x = ioread32(addr);
+ u32 x = readl(addr);
- iowrite32(x, addr);
+ writel(x, addr);
}
void wil6210_clear_irq(struct wil6210_priv *wil)
wmb(); /* make sure write completed */
}
-int wil6210_init_irq(struct wil6210_priv *wil, int irq)
+int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
{
int rc;
- wil_dbg_misc(wil, "%s() n_msi=%d\n", __func__, wil->n_msi);
+ wil_dbg_misc(wil, "%s(%s)\n", __func__, use_msi ? "MSI" : "INTx");
- if (wil->n_msi == 3)
- rc = wil6210_request_3msi(wil, irq);
- else
- rc = request_threaded_irq(irq, wil6210_hardirq,
- wil6210_thread_irq,
- wil->n_msi ? 0 : IRQF_SHARED,
- WIL_NAME, wil);
+ rc = request_threaded_irq(irq, wil6210_hardirq,
+ wil6210_thread_irq,
+ use_msi ? 0 : IRQF_SHARED,
+ WIL_NAME, wil);
return rc;
}
wil_mask_irq(wil);
free_irq(irq, wil);
- if (wil->n_msi == 3) {
- free_irq(irq + 1, wil);
- free_irq(irq + 2, wil);
- }
}
/* operation */
switch (io.op & wil_mmio_op_mask) {
case wil_mmio_read:
- io.val = ioread32(a);
+ io.val = readl(a);
need_copy = true;
break;
case wil_mmio_write:
- iowrite32(io.val, a);
+ writel(io.val, a);
wmb(); /* make sure write propagated to HW */
break;
default:
#include "wil6210.h"
#include "txrx.h"
#include "wmi.h"
+#include "boot_loader.h"
#define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000
#define WAIT_FOR_DISCONNECT_INTERVAL_MS 10
clear_bit(wil_status_fwready, wil->status);
wil_err(wil, "Scan timeout detected, start fw error recovery\n");
- wil->recovery_state = fw_recovery_pending;
- schedule_work(&wil->fw_error_worker);
+ wil_fw_error_recovery(wil);
}
static int wil_wait_for_recovery(struct wil6210_priv *wil)
destroy_workqueue(wil->wmi_wq);
}
-/* target operations */
-/* register read */
-#define R(a) ioread32(wil->csr + HOSTADDR(a))
-/* register write. wmb() to make sure it is completed */
-#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
-/* register set = read, OR, write */
-#define S(a, v) W(a, R(a) | v)
-/* register clear = read, AND with inverted, write */
-#define C(a, v) W(a, R(a) & ~v)
-
static inline void wil_halt_cpu(struct wil6210_priv *wil)
{
- W(RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
- W(RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST);
+ wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
+ wil_w(wil, RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST);
}
static inline void wil_release_cpu(struct wil6210_priv *wil)
{
/* Start CPU */
- W(RGF_USER_USER_CPU_0, 1);
+ wil_w(wil, RGF_USER_USER_CPU_0, 1);
}
static int wil_target_reset(struct wil6210_priv *wil)
wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
/* Clear MAC link up */
- S(RGF_HP_CTRL, BIT(15));
- S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD);
- S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
+ wil_s(wil, RGF_HP_CTRL, BIT(15));
+ wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD);
+ wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
wil_halt_cpu(wil);
/* clear all boot loader "ready" bits */
- W(RGF_USER_BL + offsetof(struct RGF_BL, ready), 0);
+ wil_w(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v0, boot_loader_ready), 0);
/* Clear Fw Download notification */
- C(RGF_USER_USAGE_6, BIT(0));
+ wil_c(wil, RGF_USER_USAGE_6, BIT(0));
- S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
+ wil_s(wil, RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
/* XTAL stabilization should take about 3ms */
usleep_range(5000, 7000);
- x = R(RGF_CAF_PLL_LOCK_STATUS);
+ x = wil_r(wil, RGF_CAF_PLL_LOCK_STATUS);
if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) {
wil_err(wil, "Xtal stabilization timeout\n"
"RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x);
return -ETIME;
}
/* switch 10k to XTAL*/
- C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
+ wil_c(wil, RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
/* 40 MHz */
- C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
+ wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
- W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
- W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
+ wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
+ wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
- W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
- W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
+ wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
+ wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000); /* reset A2 PCIE AHB */
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
+ /* reset A2 PCIE AHB */
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
/* wait until device ready. typical time is 20..80 msec */
do {
msleep(RST_DELAY);
- x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready));
+ x = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v0,
+ boot_loader_ready));
if (x1 != x) {
wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", x1, x);
x1 = x;
x);
return -ETIME;
}
- } while (x != BIT_BL_READY);
+ } while (x != BL_READY);
- C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+ wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
/* enable fix for HW bug related to the SA/DA swap in AP Rx */
- S(RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
- BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
+ wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
+ BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY);
return 0;
static int wil_get_bl_info(struct wil6210_priv *wil)
{
struct net_device *ndev = wil_to_ndev(wil);
- struct RGF_BL bl;
-
- wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL), sizeof(bl));
- le32_to_cpus(&bl.ready);
- le32_to_cpus(&bl.version);
- le32_to_cpus(&bl.rf_type);
- le32_to_cpus(&bl.baseband_type);
+ union {
+ struct bl_dedicated_registers_v0 bl0;
+ struct bl_dedicated_registers_v1 bl1;
+ } bl;
+ u32 bl_ver;
+ u8 *mac;
+ u16 rf_status;
+
+ wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL),
+ sizeof(bl));
+ bl_ver = le32_to_cpu(bl.bl0.boot_loader_struct_version);
+ mac = bl.bl0.mac_address;
+
+ if (bl_ver == 0) {
+ le32_to_cpus(&bl.bl0.rf_type);
+ le32_to_cpus(&bl.bl0.baseband_type);
+ rf_status = 0; /* actually, unknown */
+ wil_info(wil,
+ "Boot Loader struct v%d: MAC = %pM RF = 0x%08x bband = 0x%08x\n",
+ bl_ver, mac,
+ bl.bl0.rf_type, bl.bl0.baseband_type);
+ wil_info(wil, "Boot Loader build unknown for struct v0\n");
+ } else {
+ le16_to_cpus(&bl.bl1.rf_type);
+ rf_status = le16_to_cpu(bl.bl1.rf_status);
+ le32_to_cpus(&bl.bl1.baseband_type);
+ le16_to_cpus(&bl.bl1.bl_version_subminor);
+ le16_to_cpus(&bl.bl1.bl_version_build);
+ wil_info(wil,
+ "Boot Loader struct v%d: MAC = %pM RF = 0x%04x (status 0x%04x) bband = 0x%08x\n",
+ bl_ver, mac,
+ bl.bl1.rf_type, rf_status,
+ bl.bl1.baseband_type);
+ wil_info(wil, "Boot Loader build %d.%d.%d.%d\n",
+ bl.bl1.bl_version_major, bl.bl1.bl_version_minor,
+ bl.bl1.bl_version_subminor, bl.bl1.bl_version_build);
+ }
- if (!is_valid_ether_addr(bl.mac_address)) {
- wil_err(wil, "BL: Invalid MAC %pM\n", bl.mac_address);
+ if (!is_valid_ether_addr(mac)) {
+ wil_err(wil, "BL: Invalid MAC %pM\n", mac);
return -EINVAL;
}
- ether_addr_copy(ndev->perm_addr, bl.mac_address);
+ ether_addr_copy(ndev->perm_addr, mac);
if (!is_valid_ether_addr(ndev->dev_addr))
- ether_addr_copy(ndev->dev_addr, bl.mac_address);
- wil_info(wil,
- "Boot Loader: ver = %d MAC = %pM RF = 0x%08x bband = 0x%08x\n",
- bl.version, bl.mac_address, bl.rf_type, bl.baseband_type);
+ ether_addr_copy(ndev->dev_addr, mac);
+
+ if (rf_status) {/* bad RF cable? */
+ wil_err(wil, "RF communication error 0x%04x",
+ rf_status);
+ return -EAGAIN;
+ }
return 0;
}
+static void wil_bl_crash_info(struct wil6210_priv *wil, bool is_err)
+{
+ u32 bl_assert_code, bl_assert_blink, bl_magic_number;
+ u32 bl_ver = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v0,
+ boot_loader_struct_version));
+
+ if (bl_ver < 2)
+ return;
+
+ bl_assert_code = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_assert_code));
+ bl_assert_blink = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_assert_blink));
+ bl_magic_number = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_magic_number));
+
+ if (is_err) {
+ wil_err(wil,
+ "BL assert code 0x%08x blink 0x%08x magic 0x%08x\n",
+ bl_assert_code, bl_assert_blink, bl_magic_number);
+ } else {
+ wil_dbg_misc(wil,
+ "BL assert code 0x%08x blink 0x%08x magic 0x%08x\n",
+ bl_assert_code, bl_assert_blink, bl_magic_number);
+ }
+}
+
static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
{
ulong to = msecs_to_jiffies(1000);
wil_dbg_misc(wil, "%s()\n", __func__);
- if (wil->hw_version == HW_VER_UNKNOWN)
- return -ENODEV;
-
WARN_ON(!mutex_is_locked(&wil->mutex));
WARN_ON(test_bit(wil_status_napi_en, wil->status));
return 0;
}
+ if (wil->hw_version == HW_VER_UNKNOWN)
+ return -ENODEV;
+
cancel_work_sync(&wil->disconnect_worker);
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
wil_bcast_fini(wil);
flush_workqueue(wil->wq_service);
flush_workqueue(wil->wmi_wq);
+ wil_bl_crash_info(wil, false);
rc = wil_target_reset(wil);
wil_rx_fini(wil);
- if (rc)
+ if (rc) {
+ wil_bl_crash_info(wil, true);
return rc;
+ }
rc = wil_get_bl_info(wil);
+ if (rc == -EAGAIN && !load_fw) /* ignore RF error if not going up */
+ rc = 0;
if (rc)
return rc;
return rc;
/* Mark FW as loaded from host */
- S(RGF_USER_USAGE_6, 1);
+ wil_s(wil, RGF_USER_USAGE_6, 1);
/* clear any interrupts which on-card-firmware
* may have set
wil6210_clear_irq(wil);
/* CAF_ICR - clear and mask */
/* it is W1C, clear by writing back same value */
- S(RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
- W(RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
+ wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
+ wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
wil_release_cpu(wil);
}
return rc;
}
-#undef R
-#undef W
-#undef S
-#undef C
-
void wil_fw_error_recovery(struct wil6210_priv *wil)
{
wil_dbg_misc(wil, "starting fw error recovery\n");
wil_set_ethtoolops(ndev);
ndev->ieee80211_ptr = wdev;
ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
- NETIF_F_SG | NETIF_F_GRO;
+ NETIF_F_SG | NETIF_F_GRO |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_RXHASH;
+
ndev->features |= ndev->hw_features;
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
wdev->netdev = ndev;
#include "wil6210.h"
-static int use_msi = 1;
-module_param(use_msi, int, S_IRUGO);
-MODULE_PARM_DESC(use_msi,
- " Use MSI interrupt: "
- "0 - don't, 1 - (default) - single, or 3");
+static bool use_msi = true;
+module_param(use_msi, bool, S_IRUGO);
+MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
static
void wil_set_capabilities(struct wil6210_priv *wil)
{
- u32 rev_id = ioread32(wil->csr + HOSTADDR(RGF_USER_JTAG_DEV_ID));
+ u32 rev_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
bitmap_zero(wil->hw_capabilities, hw_capability_last);
void wil_disable_irq(struct wil6210_priv *wil)
{
- int irq = wil->pdev->irq;
-
- disable_irq(irq);
- if (wil->n_msi == 3) {
- disable_irq(irq + 1);
- disable_irq(irq + 2);
- }
+ disable_irq(wil->pdev->irq);
}
void wil_enable_irq(struct wil6210_priv *wil)
{
- int irq = wil->pdev->irq;
-
- enable_irq(irq);
- if (wil->n_msi == 3) {
- enable_irq(irq + 1);
- enable_irq(irq + 2);
- }
+ enable_irq(wil->pdev->irq);
}
/* Bus ops */
* and only MSI should be used
*/
int msi_only = pdev->msi_enabled;
+ bool _use_msi = use_msi;
wil_dbg_misc(wil, "%s()\n", __func__);
pci_set_master(pdev);
- /*
- * how many MSI interrupts to request?
- */
- switch (use_msi) {
- case 3:
- case 1:
- wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
- break;
- case 0:
- wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
- break;
- default:
- wil_err(wil, "Invalid use_msi=%d, default to 1\n", use_msi);
- use_msi = 1;
- }
-
- if (use_msi == 3 && pci_enable_msi_range(pdev, 3, 3) < 0) {
- wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
- use_msi = 1;
- }
+ wil_dbg_misc(wil, "Setup %s interrupt\n", use_msi ? "MSI" : "INTx");
- if (use_msi == 1 && pci_enable_msi(pdev)) {
+ if (use_msi && pci_enable_msi(pdev)) {
wil_err(wil, "pci_enable_msi failed, use INTx\n");
- use_msi = 0;
+ _use_msi = false;
}
- wil->n_msi = use_msi;
-
- if ((wil->n_msi == 0) && msi_only) {
+ if (!_use_msi && msi_only) {
wil_err(wil, "Interrupt pin not routed, unable to use INTx\n");
rc = -ENODEV;
goto stop_master;
}
- rc = wil6210_init_irq(wil, pdev->irq);
+ rc = wil6210_init_irq(wil, pdev->irq, _use_msi);
if (rc)
goto stop_master;
};
MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
+#ifdef CONFIG_PM
+
+static int wil6210_suspend(struct device *dev, bool is_runtime)
+{
+ int rc = 0;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+ wil_dbg_pm(wil, "%s(%s)\n", __func__,
+ is_runtime ? "runtime" : "system");
+
+ rc = wil_can_suspend(wil, is_runtime);
+ if (rc)
+ goto out;
+
+ rc = wil_suspend(wil, is_runtime);
+ if (rc)
+ goto out;
+
+ /* TODO: how do I bring card in low power state? */
+
+ /* disable bus mastering */
+ pci_clear_master(pdev);
+ /* PCI will call pci_save_state(pdev) and pci_prepare_to_sleep(pdev) */
+
+out:
+ return rc;
+}
+
+static int wil6210_resume(struct device *dev, bool is_runtime)
+{
+ int rc = 0;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+ wil_dbg_pm(wil, "%s(%s)\n", __func__,
+ is_runtime ? "runtime" : "system");
+
+ /* allow master */
+ pci_set_master(pdev);
+
+ rc = wil_resume(wil, is_runtime);
+ if (rc)
+ pci_clear_master(pdev);
+
+ return rc;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int wil6210_pm_suspend(struct device *dev)
+{
+ return wil6210_suspend(dev, false);
+}
+
+static int wil6210_pm_resume(struct device *dev)
+{
+ return wil6210_resume(dev, false);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops wil6210_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume)
+};
+
static struct pci_driver wil6210_driver = {
.probe = wil_pcie_probe,
.remove = wil_pcie_remove,
.id_table = wil6210_pcie_ids,
.name = WIL_NAME,
+ .driver = {
+ .pm = &wil6210_pm_ops,
+ },
};
static int __init wil6210_driver_init(void)
--- /dev/null
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "wil6210.h"
+
+int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
+{
+ int rc = 0;
+ struct wireless_dev *wdev = wil->wdev;
+
+ wil_dbg_pm(wil, "%s(%s)\n", __func__,
+ is_runtime ? "runtime" : "system");
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ break;
+ /* AP-like interface - can't suspend */
+ default:
+ wil_dbg_pm(wil, "AP-like interface\n");
+ rc = -EBUSY;
+ break;
+ }
+
+ wil_dbg_pm(wil, "%s(%s) => %s (%d)\n", __func__,
+ is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc);
+
+ return rc;
+}
+
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
+{
+ int rc = 0;
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ wil_dbg_pm(wil, "%s(%s)\n", __func__,
+ is_runtime ? "runtime" : "system");
+
+ /* if netif up, hardware is alive, shut it down */
+ if (ndev->flags & IFF_UP) {
+ rc = wil_down(wil);
+ if (rc) {
+ wil_err(wil, "wil_down : %d\n", rc);
+ goto out;
+ }
+ }
+
+ if (wil->platform_ops.suspend)
+ rc = wil->platform_ops.suspend(wil->platform_handle);
+
+out:
+ wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
+ is_runtime ? "runtime" : "system", rc);
+ return rc;
+}
+
+int wil_resume(struct wil6210_priv *wil, bool is_runtime)
+{
+ int rc = 0;
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ wil_dbg_pm(wil, "%s(%s)\n", __func__,
+ is_runtime ? "runtime" : "system");
+
+ if (wil->platform_ops.resume) {
+ rc = wil->platform_ops.resume(wil->platform_handle);
+ if (rc) {
+ wil_err(wil, "platform_ops.resume : %d\n", rc);
+ goto out;
+ }
+ }
+
+ /* if netif up, bring hardware up
+ * During open(), IFF_UP set after actual device method
+ * invocation. This prevent recursive call to wil_up()
+ */
+ if (ndev->flags & IFF_UP)
+ rc = wil_up(wil);
+
+out:
+ wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
+ is_runtime ? "runtime" : "system", rc);
+ return rc;
+}
goto out;
}
+ r->total++;
hseq = r->head_seq_num;
/** Due to the race between WMI events, where BACK establishment
/* frame with out of date sequence number */
if (seq_less(seq, r->head_seq_num)) {
r->ssn_last_drop = seq;
+ r->drop_old++;
+ wil_dbg_txrx(wil, "Rx drop: old seq 0x%03x head 0x%03x\n",
+ seq, r->head_seq_num);
dev_kfree_skb(skb);
goto out;
}
/* check if we already stored this frame */
if (r->reorder_buf[index]) {
+ r->drop_dup++;
+ wil_dbg_txrx(wil, "Rx drop: dup seq 0x%03x\n", seq);
dev_kfree_skb(skb);
goto out;
}
break;
}
}
- iowrite32(v->swtail, wil->csr + HOSTADDR(v->hwtail));
+ wil_w(wil, v->hwtail, v->swtail);
return rc;
}
[GRO_DROP] = "GRO_DROP",
};
+ if (ndev->features & NETIF_F_RXHASH)
+ /* fake L4 to ensure it won't be re-calculated later
+ * set hash to any non-zero value to activate rps
+ * mechanism, core will be chosen according
+ * to user-level rps configuration.
+ */
+ skb_set_hash(skb, 1, PKT_HASH_TYPE_L4);
+
skb_orphan(skb);
if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
static inline
void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
{
- d->mac.d[2] |= ((nr_frags + 1) <<
- MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
+ d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
}
-static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
- struct vring_tx_desc *d,
- struct sk_buff *skb)
+/**
+ * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
+ * @skb is used to obtain the protocol and headers length.
+ * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
+ * 2 - middle, 3 - last descriptor.
+ */
+
+static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
+ struct sk_buff *skb,
+ int tso_desc_type, bool is_ipv4,
+ int tcp_hdr_len, int skb_net_hdr_len)
{
+ d->dma.b11 = ETH_HLEN; /* MAC header length */
+ d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
+
+ d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
+ /* L4 header len: TCP header length */
+ d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
+
+ /* Setup TSO: bit and desc type */
+ d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) |
+ (tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS);
+ d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS);
+
+ d->dma.ip_length = skb_net_hdr_len;
+ /* Enable TCP/UDP checksum */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
+ /* Calculate pseudo-header */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
+}
+
+/**
+ * Sets the descriptor @d up for csum. The corresponding
+ * @skb is used to obtain the protocol and headers length.
+ * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
+ * Note, if d==NULL, the function only returns the protocol result.
+ *
+ * It is very similar to previous wil_tx_desc_offload_setup_tso. This
+ * is "if unrolling" to optimize the critical path.
+ */
+
+static int wil_tx_desc_offload_setup(struct vring_tx_desc *d,
+ struct sk_buff *skb){
int protocol;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
}
+static inline void wil_tx_last_desc(struct vring_tx_desc *d)
+{
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) |
+ BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) |
+ BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
+}
+
+static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
+{
+ d->dma.d0 |= wil_tso_type_lst <<
+ DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS;
+}
+
+static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
+ struct sk_buff *skb)
+{
+ struct device *dev = wil_to_dev(wil);
+
+ /* point to descriptors in shared memory */
+ volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc,
+ *_first_desc = NULL;
+
+ /* pointers to shadow descriptors */
+ struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem,
+ *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem,
+ *first_desc = &first_desc_mem;
+
+ /* pointer to shadow descriptors' context */
+ struct wil_ctx *hdr_ctx, *first_ctx = NULL;
+
+ int descs_used = 0; /* total number of used descriptors */
+ int sg_desc_cnt = 0; /* number of descriptors for current mss*/
+
+ u32 swhead = vring->swhead;
+ int used, avail = wil_vring_avail_tx(vring);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int min_desc_required = nr_frags + 1;
+ int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */
+ int f, len, hdrlen, headlen;
+ int vring_index = vring - wil->vring_tx;
+ struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+ uint i = swhead;
+ dma_addr_t pa;
+ const skb_frag_t *frag = NULL;
+ int rem_data = mss;
+ int lenmss;
+ int hdr_compensation_need = true;
+ int desc_tso_type = wil_tso_type_first;
+ bool is_ipv4;
+ int tcp_hdr_len;
+ int skb_net_hdr_len;
+ int gso_type;
+
+ wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
+ __func__, skb->len, vring_index);
+
+ if (unlikely(!txdata->enabled))
+ return -EINVAL;
+
+ /* A typical page 4K is 3-4 payloads, we assume each fragment
+ * is a full payload, that's how min_desc_required has been
+ * calculated. In real we might need more or less descriptors,
+ * this is the initial check only.
+ */
+ if (unlikely(avail < min_desc_required)) {
+ wil_err_ratelimited(wil,
+ "TSO: Tx ring[%2d] full. No space for %d fragments\n",
+ vring_index, min_desc_required);
+ return -ENOMEM;
+ }
+
+ /* Header Length = MAC header len + IP header len + TCP header len*/
+ hdrlen = ETH_HLEN +
+ (int)skb_network_header_len(skb) +
+ tcp_hdrlen(skb);
+
+ gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
+ switch (gso_type) {
+ case SKB_GSO_TCPV4:
+ /* TCP v4, zero out the IP length and IPv4 checksum fields
+ * as required by the offloading doc
+ */
+ ip_hdr(skb)->tot_len = 0;
+ ip_hdr(skb)->check = 0;
+ is_ipv4 = true;
+ break;
+ case SKB_GSO_TCPV6:
+ /* TCP v6, zero out the payload length */
+ ipv6_hdr(skb)->payload_len = 0;
+ is_ipv4 = false;
+ break;
+ default:
+ /* other than TCPv4 or TCPv6 types are not supported for TSO.
+ * It is also illegal for both to be set simultaneously
+ */
+ return -EINVAL;
+ }
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return -EINVAL;
+
+ /* tcp header length and skb network header length are fixed for all
+ * packet's descriptors - read then once here
+ */
+ tcp_hdr_len = tcp_hdrlen(skb);
+ skb_net_hdr_len = skb_network_header_len(skb);
+
+ _hdr_desc = &vring->va[i].tx;
+
+ pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, pa))) {
+ wil_err(wil, "TSO: Skb head DMA map error\n");
+ goto err_exit;
+ }
+
+ wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index);
+ wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
+ tcp_hdr_len, skb_net_hdr_len);
+ wil_tx_last_desc(hdr_desc);
+
+ vring->ctx[i].mapped_as = wil_mapped_as_single;
+ hdr_ctx = &vring->ctx[i];
+
+ descs_used++;
+ headlen = skb_headlen(skb) - hdrlen;
+
+ for (f = headlen ? -1 : 0; f < nr_frags; f++) {
+ if (headlen) {
+ len = headlen;
+ wil_dbg_txrx(wil, "TSO: process skb head, len %u\n",
+ len);
+ } else {
+ frag = &skb_shinfo(skb)->frags[f];
+ len = frag->size;
+ wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
+ }
+
+ while (len) {
+ wil_dbg_txrx(wil,
+ "TSO: len %d, rem_data %d, descs_used %d\n",
+ len, rem_data, descs_used);
+
+ if (descs_used == avail) {
+ wil_err(wil, "TSO: ring overflow\n");
+ goto dma_error;
+ }
+
+ lenmss = min_t(int, rem_data, len);
+ i = (swhead + descs_used) % vring->size;
+ wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i);
+
+ if (!headlen) {
+ pa = skb_frag_dma_map(dev, frag,
+ frag->size - len, lenmss,
+ DMA_TO_DEVICE);
+ vring->ctx[i].mapped_as = wil_mapped_as_page;
+ } else {
+ pa = dma_map_single(dev,
+ skb->data +
+ skb_headlen(skb) - headlen,
+ lenmss,
+ DMA_TO_DEVICE);
+ vring->ctx[i].mapped_as = wil_mapped_as_single;
+ headlen -= lenmss;
+ }
+
+ if (unlikely(dma_mapping_error(dev, pa)))
+ goto dma_error;
+
+ _desc = &vring->va[i].tx;
+
+ if (!_first_desc) {
+ _first_desc = _desc;
+ first_ctx = &vring->ctx[i];
+ d = first_desc;
+ } else {
+ d = &desc_mem;
+ }
+
+ wil_tx_desc_map(d, pa, lenmss, vring_index);
+ wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
+ is_ipv4, tcp_hdr_len,
+ skb_net_hdr_len);
+
+ /* use tso_type_first only once */
+ desc_tso_type = wil_tso_type_mid;
+
+ descs_used++; /* desc used so far */
+ sg_desc_cnt++; /* desc used for this segment */
+ len -= lenmss;
+ rem_data -= lenmss;
+
+ wil_dbg_txrx(wil,
+ "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
+ len, rem_data, descs_used, sg_desc_cnt);
+
+ /* Close the segment if reached mss size or last frag*/
+ if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) {
+ if (hdr_compensation_need) {
+ /* first segment include hdr desc for
+ * release
+ */
+ hdr_ctx->nr_frags = sg_desc_cnt;
+ wil_tx_desc_set_nr_frags(first_desc,
+ sg_desc_cnt +
+ 1);
+ hdr_compensation_need = false;
+ } else {
+ wil_tx_desc_set_nr_frags(first_desc,
+ sg_desc_cnt);
+ }
+ first_ctx->nr_frags = sg_desc_cnt - 1;
+
+ wil_tx_last_desc(d);
+
+ /* first descriptor may also be the last
+ * for this mss - make sure not to copy
+ * it twice
+ */
+ if (first_desc != d)
+ *_first_desc = *first_desc;
+
+ /*last descriptor will be copied at the end
+ * of this TS processing
+ */
+ if (f < nr_frags - 1 || len > 0)
+ *_desc = *d;
+
+ rem_data = mss;
+ _first_desc = NULL;
+ sg_desc_cnt = 0;
+ } else if (first_desc != d) /* update mid descriptor */
+ *_desc = *d;
+ }
+ }
+
+ /* first descriptor may also be the last.
+ * in this case d pointer is invalid
+ */
+ if (_first_desc == _desc)
+ d = first_desc;
+
+ /* Last data descriptor */
+ wil_set_tx_desc_last_tso(d);
+ *_desc = *d;
+
+ /* Fill the total number of descriptors in first desc (hdr)*/
+ wil_tx_desc_set_nr_frags(hdr_desc, descs_used);
+ *_hdr_desc = *hdr_desc;
+
+ /* hold reference to skb
+ * to prevent skb release before accounting
+ * in case of immediate "tx done"
+ */
+ vring->ctx[i].skb = skb_get(skb);
+
+ /* performance monitoring */
+ used = wil_vring_used_tx(vring);
+ if (wil_val_in_range(vring_idle_trsh,
+ used, used + descs_used)) {
+ txdata->idle += get_cycles() - txdata->last_idle;
+ wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
+ vring_index, used, used + descs_used);
+ }
+
+ /* advance swhead */
+ wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
+ wil_vring_advance_head(vring, descs_used);
+
+ /* make sure all writes to descriptors (shared memory) are done before
+ * committing them to HW
+ */
+ wmb();
+
+ wil_w(wil, vring->hwtail, vring->swhead);
+ return 0;
+
+dma_error:
+ wil_err(wil, "TSO: DMA map page error\n");
+ while (descs_used > 0) {
+ struct wil_ctx *ctx;
+
+ i = (swhead + descs_used) % vring->size;
+ d = (struct vring_tx_desc *)&vring->va[i].tx;
+ _desc = &vring->va[i].tx;
+ *d = *_desc;
+ _desc->dma.status = TX_DMA_STATUS_DU;
+ ctx = &vring->ctx[i];
+ wil_txdesc_unmap(dev, d, ctx);
+ if (ctx->skb)
+ dev_kfree_skb_any(ctx->skb);
+ memset(ctx, 0, sizeof(*ctx));
+ descs_used--;
+ }
+
+err_exit:
+ return -EINVAL;
+}
+
static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
struct sk_buff *skb)
{
bool mcast = (vring_index == wil->bcast_vring);
uint len = skb_headlen(skb);
- wil_dbg_txrx(wil, "%s()\n", __func__);
+ wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
+ __func__, skb->len, vring_index);
if (unlikely(!txdata->enabled))
return -EINVAL;
d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
}
/* Process TCP/UDP checksum offloading */
- if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) {
+ if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
vring_index);
goto dma_error;
}
vring->ctx[i].nr_frags = nr_frags;
- wil_tx_desc_set_nr_frags(d, nr_frags);
+ wil_tx_desc_set_nr_frags(d, nr_frags + 1);
/* middle segments */
for (; f < nr_frags; f++) {
* if it succeeded for 1-st descriptor,
* it will succeed here too
*/
- wil_tx_desc_offload_cksum_set(wil, d, skb);
+ wil_tx_desc_offload_setup(d, skb);
}
/* for the last seg only */
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
vring->swhead);
trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
- iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
+
+ /* make sure all writes to descriptors (shared memory) are done before
+ * committing them to HW
+ */
+ wmb();
+
+ wil_w(wil, vring->hwtail, vring->swhead);
return 0;
dma_error:
int rc;
spin_lock(&txdata->lock);
- rc = __wil_tx_vring(wil, vring, skb);
+
+ rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
+ (wil, vring, skb);
+
spin_unlock(&txdata->lock);
+
return rc;
}
struct wil_ctx *ctx = &vring->ctx[vring->swtail];
/**
* For the fragmented skb, HW will set DU bit only for the
- * last fragment. look for it
+ * last fragment. look for it.
+ * In TSO the first DU will include hdr desc
*/
int lf = (vring->swtail + ctx->nr_frags) % vring->size;
/* TODO: check we are not past head */
__le16 length;
} __packed;
+/* TSO type used in dma descriptor d0 bits 11-12 */
+enum {
+ wil_tso_type_hdr = 0,
+ wil_tso_type_first = 1,
+ wil_tso_type_mid = 2,
+ wil_tso_type_lst = 3,
+};
+
/* Rx descriptor - MAC part
* [dword 0]
* bit 0.. 3 : tid:4 The QoS (b3-0) TID Field
u32 IMC; /* Mask Clear, write 1 to clear */
} __packed;
-struct RGF_BL {
- u32 ready; /* 0x880A3C bit [0] */
-#define BIT_BL_READY BIT(0)
- u32 version; /* 0x880A40 version of the BL struct */
- u32 rf_type; /* 0x880A44 ID of the connected RF */
- u32 baseband_type; /* 0x880A48 ID of the baseband */
- u8 mac_address[ETH_ALEN]; /* 0x880A4C permanent MAC */
- u8 pad[2];
-} __packed;
-
/* registers - FW addresses */
#define RGF_USER_USAGE_1 (0x880004)
#define RGF_USER_USAGE_6 (0x880018)
};
/* popular locations */
-#define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
-#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \
- offsetof(struct RGF_ICR, ICS))
+#define RGF_MBOX RGF_USER_USER_SCRATCH_PAD
+#define HOST_MBOX HOSTADDR(RGF_MBOX)
#define SW_INT_MBOX BIT_USER_USER_ICR_SW_INT_2
/* ISR register bits */
* @ssn: Starting Sequence Number expected to be aggregated.
* @buf_size: buffer size for incoming A-MPDUs
* @timeout: reset timer value (in TUs).
+ * @ssn_last_drop: SSN of the last dropped frame
+ * @total: total number of processed incoming frames
+ * @drop_dup: duplicate frames dropped for this reorder buffer
+ * @drop_old: old frames dropped for this reorder buffer
* @dialog_token: dialog token for aggregation session
- * @rcu_head: RCU head used for freeing this struct
- *
- * This structure's lifetime is managed by RCU, assignments to
- * the array holding it must hold the aggregation mutex.
- *
+ * @first_time: true when this buffer used 1-st time
*/
struct wil_tid_ampdu_rx {
struct sk_buff **reorder_buf;
u16 buf_size;
u16 timeout;
u16 ssn_last_drop;
+ unsigned long long total; /* frames processed */
+ unsigned long long drop_dup;
+ unsigned long long drop_old;
u8 dialog_token;
bool first_time; /* is it 1-st time this buffer used? */
};
struct wil6210_priv {
struct pci_dev *pdev;
- int n_msi;
struct wireless_dev *wdev;
void __iomem *csr;
DECLARE_BITMAP(status, wil_status_last);
#define wil_dbg_txrx(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg)
#define wil_dbg_wmi(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg)
#define wil_dbg_misc(wil, fmt, arg...) wil_dbg(wil, "DBG[MISC]" fmt, ##arg)
+#define wil_dbg_pm(wil, fmt, arg...) wil_dbg(wil, "DBG[ PM ]" fmt, ##arg)
+
+/* target operations */
+/* register read */
+static inline u32 wil_r(struct wil6210_priv *wil, u32 reg)
+{
+ return readl(wil->csr + HOSTADDR(reg));
+}
+
+/* register write. wmb() to make sure it is completed */
+static inline void wil_w(struct wil6210_priv *wil, u32 reg, u32 val)
+{
+ writel(val, wil->csr + HOSTADDR(reg));
+ wmb(); /* wait for write to propagate to the HW */
+}
+
+/* register set = read, OR, write */
+static inline void wil_s(struct wil6210_priv *wil, u32 reg, u32 val)
+{
+ wil_w(wil, reg, wil_r(wil, reg) | val);
+}
+
+/* register clear = read, AND with inverted, write */
+static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val)
+{
+ wil_w(wil, reg, wil_r(wil, reg) & ~val);
+}
#if defined(CONFIG_DYNAMIC_DEBUG)
#define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \
void wil_back_tx_flush(struct wil6210_priv *wil);
void wil6210_clear_irq(struct wil6210_priv *wil);
-int wil6210_init_irq(struct wil6210_priv *wil, int irq);
+int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi);
void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
void wil_mask_irq(struct wil6210_priv *wil);
void wil_unmask_irq(struct wil6210_priv *wil);
int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
int wil_request_firmware(struct wil6210_priv *wil, const char *name);
+int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
+int wil_resume(struct wil6210_priv *wil, bool is_runtime);
+
#endif /* __WIL6210_H__ */
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include "linux/device.h"
+#include <linux/device.h>
#include "wil_platform.h"
int __init wil_platform_modinit(void)
wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
/* wait till FW finish with previous command */
for (retry = 5; retry > 0; retry--) {
- r->tail = ioread32(wil->csr + HOST_MBOX +
- offsetof(struct wil6210_mbox_ctl, tx.tail));
+ r->tail = wil_r(wil, RGF_MBOX +
+ offsetof(struct wil6210_mbox_ctl, tx.tail));
if (next_head != r->tail)
break;
msleep(20);
wil_memcpy_toio_32(dst, &cmd, sizeof(cmd));
wil_memcpy_toio_32(dst + sizeof(cmd), buf, len);
/* mark entry as full */
- iowrite32(1, wil->csr + HOSTADDR(r->head) +
- offsetof(struct wil6210_mbox_ring_desc, sync));
+ wil_w(wil, r->head + offsetof(struct wil6210_mbox_ring_desc, sync), 1);
/* advance next ptr */
- iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
- offsetof(struct wil6210_mbox_ctl, tx.head));
+ wil_w(wil, RGF_MBOX + offsetof(struct wil6210_mbox_ctl, tx.head),
+ r->head = next_head);
trace_wil6210_wmi_cmd(&cmd.wmi, buf, len);
/* interrupt to FW */
- iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
+ wil_w(wil, RGF_USER_USER_ICR + offsetof(struct RGF_ICR, ICS),
+ SW_INT_MBOX);
return 0;
}
struct wiphy *wiphy = wil_to_wiphy(wil);
struct ieee80211_mgmt *rx_mgmt_frame =
(struct ieee80211_mgmt *)data->payload;
- int ch_no = data->info.channel+1;
- u32 freq = ieee80211_channel_to_frequency(ch_no,
- IEEE80211_BAND_60GHZ);
- struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq);
- s32 signal = data->info.sqi;
- __le16 fc = rx_mgmt_frame->frame_control;
- u32 d_len = le32_to_cpu(data->info.len);
- u16 d_status = le16_to_cpu(data->info.status);
-
- wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d SQI %d%%\n",
+ int flen = len - offsetof(struct wmi_rx_mgmt_packet_event, payload);
+ int ch_no;
+ u32 freq;
+ struct ieee80211_channel *channel;
+ s32 signal;
+ __le16 fc;
+ u32 d_len;
+ u16 d_status;
+
+ if (flen < 0) {
+ wil_err(wil, "MGMT Rx: short event, len %d\n", len);
+ return;
+ }
+
+ d_len = le32_to_cpu(data->info.len);
+ if (d_len != flen) {
+ wil_err(wil,
+ "MGMT Rx: length mismatch, d_len %d should be %d\n",
+ d_len, flen);
+ return;
+ }
+
+ ch_no = data->info.channel + 1;
+ freq = ieee80211_channel_to_frequency(ch_no, IEEE80211_BAND_60GHZ);
+ channel = ieee80211_get_channel(wiphy, freq);
+ signal = data->info.sqi;
+ d_status = le16_to_cpu(data->info.status);
+ fc = rx_mgmt_frame->frame_control;
+
+ wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %d SNR %d SQI %d%%\n",
data->info.channel, data->info.mcs, data->info.snr,
data->info.sqi);
wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
le16_to_cpu(fc));
wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
data->info.qid, data->info.mid, data->info.cid);
+ wil_hex_dump_wmi("MGMT Rx ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame,
+ d_len, true);
if (!channel) {
wil_err(wil, "Frame on unsupported channel\n");
}
}
+static void wmi_evt_tx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct wmi_tx_mgmt_packet_event *data = d;
+ struct ieee80211_mgmt *mgmt_frame =
+ (struct ieee80211_mgmt *)data->payload;
+ int flen = len - offsetof(struct wmi_tx_mgmt_packet_event, payload);
+
+ wil_hex_dump_wmi("MGMT Tx ", DUMP_PREFIX_OFFSET, 16, 1, mgmt_frame,
+ flen, true);
+}
+
static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
void *d, int len)
{
{WMI_READY_EVENTID, wmi_evt_ready},
{WMI_FW_READY_EVENTID, wmi_evt_fw_ready},
{WMI_RX_MGMT_PACKET_EVENTID, wmi_evt_rx_mgmt},
+ {WMI_TX_MGMT_PACKET_EVENTID, wmi_evt_tx_mgmt},
{WMI_SCAN_COMPLETE_EVENTID, wmi_evt_scan_complete},
{WMI_CONNECT_EVENTID, wmi_evt_connect},
{WMI_DISCONNECT_EVENTID, wmi_evt_disconnect},
u16 len;
bool q;
- r->head = ioread32(wil->csr + HOST_MBOX +
- offsetof(struct wil6210_mbox_ctl, rx.head));
+ r->head = wil_r(wil, RGF_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx.head));
if (r->tail == r->head)
break;
cmd = (void *)&evt->event.wmi;
wil_memcpy_fromio_32(cmd, src, len);
/* mark entry as empty */
- iowrite32(0, wil->csr + HOSTADDR(r->tail) +
- offsetof(struct wil6210_mbox_ring_desc, sync));
+ wil_w(wil, r->tail +
+ offsetof(struct wil6210_mbox_ring_desc, sync), 0);
/* indicate */
if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
(len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
/* advance tail */
r->tail = r->base + ((r->tail - r->base +
sizeof(struct wil6210_mbox_ring_desc)) % r->size);
- iowrite32(r->tail, wil->csr + HOST_MBOX +
- offsetof(struct wil6210_mbox_ctl, rx.tail));
+ wil_w(wil, RGF_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx.tail), r->tail);
/* add to the pending list */
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
{
+ static const char *const names[] = {
+ [WMI_FRAME_BEACON] = "BEACON",
+ [WMI_FRAME_PROBE_REQ] = "PROBE_REQ",
+ [WMI_FRAME_PROBE_RESP] = "WMI_FRAME_PROBE_RESP",
+ [WMI_FRAME_ASSOC_REQ] = "WMI_FRAME_ASSOC_REQ",
+ [WMI_FRAME_ASSOC_RESP] = "WMI_FRAME_ASSOC_RESP",
+ };
int rc;
u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
+ if (!cmd) {
+ rc = -ENOMEM;
+ goto out;
+ }
if (!ie)
ie_len = 0;
memcpy(cmd->ie_info, ie, ie_len);
rc = wmi_send(wil, WMI_SET_APPIE_CMDID, cmd, len);
kfree(cmd);
+out:
+ if (rc) {
+ const char *name = type < ARRAY_SIZE(names) ?
+ names[type] : "??";
+ wil_err(wil, "set_ie(%d %s) failed : %d\n", type, name, rc);
+ }
return rc;
}
int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason)
{
+ int rc;
+ u16 reason_code;
struct wmi_disconnect_sta_cmd cmd = {
.disconnect_reason = cpu_to_le16(reason),
};
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_disconnect_event evt;
+ } __packed reply;
ether_addr_copy(cmd.dst_mac, mac);
wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason);
- return wmi_send(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd));
+ rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd),
+ WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), 1000);
+ /* failure to disconnect in reasonable time treated as FW error */
+ if (rc) {
+ wil_fw_error_recovery(wil);
+ return rc;
+ }
+
+ /* call event handler manually after processing wmi_call,
+ * to avoid deadlock - disconnect event handler acquires wil->mutex
+ * while it is already held here
+ */
+ reason_code = le16_to_cpu(reply.evt.protocol_reason_status);
+
+ wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n",
+ reply.evt.bssid, reason_code,
+ reply.evt.disconnect_reason);
+
+ wil->sinfo_gen++;
+ wil6210_disconnect(wil, reply.evt.bssid, reason_code, true);
+
+ return 0;
}
int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout)
/* search for handler */
if (!wmi_evt_call_handler(wil, id, evt_data,
len - sizeof(*wmi))) {
- wil_err(wil, "Unhandled event 0x%04x\n", id);
+ wil_info(wil, "Unhandled event 0x%04x\n", id);
}
} else {
wil_err(wil, "Unknown event type\n");
}
}
-static void atomic_orr(int val, atomic_t *v)
-{
- int old_val;
-
- old_val = atomic_read(v);
- while (atomic_cmpxchg(v, old_val, val | old_val) != old_val)
- old_val = atomic_read(v);
-}
-
static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
{
struct brcmf_core *buscore;
if (val) {
brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
bus->sdcnt.f1regdata++;
- atomic_orr(val, &bus->intstatus);
+ atomic_or(val, &bus->intstatus);
}
return ret;
/* Keep still-pending events for next scheduling */
if (intstatus)
- atomic_orr(intstatus, &bus->intstatus);
+ atomic_or(intstatus, &bus->intstatus);
brcmf_sdio_clrintr(bus);
switch(type) {
case HOSTAP_INTERFACE_AP:
- dev->tx_queue_len = 0; /* use main radio device queue */
+ dev->priv_flags |= IFF_NO_QUEUE; /* use main radio device queue */
dev->netdev_ops = &hostap_mgmt_netdev_ops;
dev->type = ARPHRD_IEEE80211;
dev->header_ops = &hostap_80211_ops;
dev->netdev_ops = &hostap_master_ops;
break;
default:
- dev->tx_queue_len = 0; /* use main radio device queue */
+ dev->priv_flags |= IFF_NO_QUEUE; /* use main radio device queue */
dev->netdev_ops = &hostap_netdev_ops;
}
void iwl_down(struct iwl_priv *priv);
void iwl_cancel_deferred_work(struct iwl_priv *priv);
void iwlagn_prepare_restart(struct iwl_priv *priv);
-int __must_check iwl_rx_dispatch(struct iwl_op_mode *op_mode,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb);
bool iwl_check_for_ct_kill(struct iwl_priv *priv);
struct ieee80211_sta *sta, u16 tid);
int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
-int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
static inline u32 iwl_tx_status_to_mac80211(u32 status)
{
/* bt coex */
void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
-int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv);
void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv);
void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv);
int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct iwl_link_quality_cmd *lq, u8 flags, bool init);
-int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct ieee80211_sta *sta);
pos += scnprintf(buf + pos, buf_size - pos,
"NVM version: 0x%x\n", nvm_ver);
for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
- pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
- hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
- buf_size - pos, 0);
- pos += strlen(buf + pos);
- if (buf_size - pos > 0)
- buf[pos++] = '\n';
+ pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n",
+ ofs, ptr + ofs);
}
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
/* ieee device used by generic ieee processing code */
struct ieee80211_hw *hw;
+ struct napi_struct *napi;
+
struct list_head calib_results;
struct workqueue_struct *workqueue;
enum ieee80211_band band;
u8 valid_contexts;
- int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+ void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb);
struct iwl_notif_wait_data notif_wait;
return need_update;
}
-int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bt_coex_profile_notif *coex = (void *)pkt->data;
if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
/* bt coex disabled */
- return 0;
+ return;
}
IWL_DEBUG_COEX(priv, "BT Coex notification:\n");
/* FIXME: based on notification, adjust the prio_boost */
priv->bt_ci_compliance = coex->bt_ci_compliance;
- return 0;
}
void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
}
}
+ ret = iwl_trans_start_hw(priv->trans);
+ if (ret) {
+ IWL_ERR(priv, "Failed to start HW: %d\n", ret);
+ goto error;
+ }
+
ret = iwl_run_init_ucode(priv);
if (ret) {
IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
goto error;
}
+ ret = iwl_trans_start_hw(priv->trans);
+ if (ret) {
+ IWL_ERR(priv, "Failed to start HW: %d\n", ret);
+ goto error;
+ }
+
ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
if (ret) {
IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
u32 error_id;
} err_info;
struct iwl_notification_wait status_wait;
- static const u8 status_cmd[] = {
+ static const u16 status_cmd[] = {
REPLY_WOWLAN_GET_STATUS,
};
struct iwlagn_wowlan_status status_data = {};
return false;
}
-static void iwl_napi_add(struct iwl_op_mode *op_mode,
- struct napi_struct *napi,
- struct net_device *napi_dev,
- int (*poll)(struct napi_struct *, int),
- int weight)
-{
- struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
-
- ieee80211_napi_add(priv->hw, napi, napi_dev, poll, weight);
-}
-
static const struct iwl_op_mode_ops iwl_dvm_ops = {
.start = iwl_op_mode_dvm_start,
.stop = iwl_op_mode_dvm_stop,
.cmd_queue_full = iwl_cmd_queue_full,
.nic_config = iwl_nic_config,
.wimax_active = iwl_wimax_active,
- .napi_add = iwl_napi_add,
};
/*****************************************************************************
/*
* Try to switch to new modulation mode from legacy
*/
-static int rs_move_legacy_other(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta,
- int index)
+static void rs_move_legacy_other(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta,
+ int index)
{
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
struct iwl_scale_tbl_info *search_tbl =
}
search_tbl->lq_type = LQ_NONE;
- return 0;
+ return;
out:
lq_sta->search_better_tbl = 1;
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
- return 0;
-
}
/*
* Try to switch to new modulation mode from SISO
*/
-static int rs_move_siso_to_other(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta, int index)
+static void rs_move_siso_to_other(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, int index)
{
u8 is_green = lq_sta->is_green;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
break;
}
search_tbl->lq_type = LQ_NONE;
- return 0;
+ return;
out:
lq_sta->search_better_tbl = 1;
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
-
- return 0;
}
/*
* Try to switch to new modulation mode from MIMO2
*/
-static int rs_move_mimo2_to_other(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta, int index)
+static void rs_move_mimo2_to_other(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, int index)
{
s8 is_green = lq_sta->is_green;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
break;
}
search_tbl->lq_type = LQ_NONE;
- return 0;
+ return;
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
- return 0;
-
}
/*
* Try to switch to new modulation mode from MIMO3
*/
-static int rs_move_mimo3_to_other(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta, int index)
+static void rs_move_mimo3_to_other(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, int index)
{
s8 is_green = lq_sta->is_green;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
break;
}
search_tbl->lq_type = LQ_NONE;
- return 0;
+ return;
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
-
- return 0;
-
}
/*
*
******************************************************************************/
-static int iwlagn_rx_reply_error(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_error(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_error_resp *err_resp = (void *)pkt->data;
err_resp->cmd_id,
le16_to_cpu(err_resp->bad_cmd_seq_num),
le32_to_cpu(err_resp->error_info));
- return 0;
}
-static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_csa_notification *csa = (void *)pkt->data;
struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
- return 0;
+ return;
if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
rxon->channel = csa->channel;
le16_to_cpu(csa->channel));
iwl_chswitch_done(priv, false);
}
- return 0;
}
-static int iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_spectrum_notification *report = (void *)pkt->data;
if (!report->state) {
IWL_DEBUG_11H(priv,
"Spectrum Measure Notification: Start\n");
- return 0;
+ return;
}
memcpy(&priv->measure_report, report, sizeof(*report));
priv->measurement_status |= MEASUREMENT_READY;
- return 0;
}
-static int iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
sleep->pm_sleep_mode, sleep->pm_wakeup_src);
#endif
- return 0;
}
-static int iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u32 __maybe_unused len = iwl_rx_packet_len(pkt);
IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
"notification for PM_DEBUG_STATISTIC_NOTIFIC:\n", len);
iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len);
- return 0;
}
-static int iwlagn_rx_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwlagn_beacon_notif *beacon = (void *)pkt->data;
#endif
priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-
- return 0;
}
/**
}
#endif
-static int iwlagn_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_statistics(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
unsigned long stamp = jiffies;
const int reg_recalib_period = 60;
len, sizeof(struct iwl_bt_notif_statistics),
sizeof(struct iwl_notif_statistics));
spin_unlock(&priv->statistics.lock);
- return 0;
+ return;
}
change = common->temperature != priv->statistics.common.temperature ||
priv->lib->temperature(priv);
spin_unlock(&priv->statistics.lock);
-
- return 0;
}
-static int iwlagn_rx_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_statistics(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_notif_statistics *stats = (void *)pkt->data;
#endif
IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
}
- iwlagn_rx_statistics(priv, rxb, cmd);
- return 0;
+
+ iwlagn_rx_statistics(priv, rxb);
}
/* Handle notification from uCode that card's power state is changing
* due to software, hardware, or critical temperature RFKILL */
-static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_card_state_notif(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
test_bit(STATUS_RF_KILL_HW, &priv->status)))
wiphy_rfkill_set_hw_state(priv->hw->wiphy,
test_bit(STATUS_RF_KILL_HW, &priv->status));
- return 0;
}
-static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
if (!test_bit(STATUS_SCANNING, &priv->status))
iwl_init_sensitivity(priv);
}
- return 0;
}
/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
* This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
-static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
priv->ampdu_ref++;
memcpy(&priv->last_phy_res, pkt->data,
sizeof(struct iwl_rx_phy_res));
- return 0;
}
/*
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
- ieee80211_rx(priv->hw, skb);
+ ieee80211_rx_napi(priv->hw, skb, priv->napi);
}
static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
}
/* Called for REPLY_RX_MPDU_CMD */
-static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_rx(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct ieee80211_hdr *header;
struct ieee80211_rx_status rx_status = {};
if (!priv->last_phy_res_valid) {
IWL_ERR(priv, "MPDU frame without cached PHY data\n");
- return 0;
+ return;
}
phy_res = &priv->last_phy_res;
amsdu = (struct iwl_rx_mpdu_res_start *)pkt->data;
if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d\n",
phy_res->cfg_phy_cnt);
- return 0;
+ return;
}
if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
!(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
le32_to_cpu(rx_pkt_status));
- return 0;
+ return;
}
/* This will be used in several places later */
iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
rxb, &rx_status);
- return 0;
}
-static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_noa_notification(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_wipan_noa_data *new_data, *old_data;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
if (old_data)
kfree_rcu(old_data, rcu_head);
-
- return 0;
}
/**
*/
void iwl_setup_rx_handlers(struct iwl_priv *priv)
{
- int (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+ void (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
handlers = priv->rx_handlers;
iwlagn_bt_rx_handler_setup(priv);
}
-int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
- int err = 0;
/*
* Do the notification wait before RX handlers so
* rx_handlers table. See iwl_setup_rx_handlers() */
if (priv->rx_handlers[pkt->hdr.cmd]) {
priv->rx_handlers_stats[pkt->hdr.cmd]++;
- err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd);
+ priv->rx_handlers[pkt->hdr.cmd](priv, rxb);
} else {
/* No handling needed */
IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
iwl_dvm_get_cmd_string(pkt->hdr.cmd),
pkt->hdr.cmd);
}
- return err;
}
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
__le32 old_filter = send->filter_flags;
u8 old_dev_type = send->dev_type;
int ret;
- static const u8 deactivate_cmd[] = {
+ static const u16 deactivate_cmd[] = {
REPLY_WIPAN_DEACTIVATION_COMPLETE
};
}
/* Service response to REPLY_SCAN_CMD (0x80) */
-static int iwl_rx_reply_scan(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwl_rx_reply_scan(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
#endif
- return 0;
}
/* Service SCAN_START_NOTIFICATION (0x82) */
-static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scanstart_notification *notif = (void *)pkt->data;
le32_to_cpu(notif->tsf_high),
le32_to_cpu(notif->tsf_low),
notif->status, notif->beacon_timer);
-
- return 0;
}
/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
-static int iwl_rx_scan_results_notif(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
le32_to_cpu(notif->statistics[0]),
le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
#endif
- return 0;
}
/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
-static int iwl_rx_scan_complete_notif(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scancomplete_notification *scan_notif = (void *)pkt->data;
queue_work(priv->workqueue,
&priv->bt_traffic_change_work);
}
- return 0;
}
void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
return 0;
}
-static int iwl_process_add_sta_resp(struct iwl_priv *priv,
- struct iwl_addsta_cmd *addsta,
- struct iwl_rx_packet *pkt)
+static void iwl_process_add_sta_resp(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt)
{
struct iwl_add_sta_resp *add_sta_resp = (void *)pkt->data;
- u8 sta_id = addsta->sta.sta_id;
- int ret = -EIO;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
- pkt->hdr.flags);
- return ret;
- }
-
- IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
- sta_id);
+ IWL_DEBUG_INFO(priv, "Processing response for adding station\n");
spin_lock_bh(&priv->sta_lock);
switch (add_sta_resp->status) {
case ADD_STA_SUCCESS_MSK:
IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
- ret = iwl_sta_ucode_activate(priv, sta_id);
break;
case ADD_STA_NO_ROOM_IN_TABLE:
- IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
- sta_id);
+ IWL_ERR(priv, "Adding station failed, no room in table.\n");
break;
case ADD_STA_NO_BLOCK_ACK_RESOURCE:
- IWL_ERR(priv, "Adding station %d failed, no block ack "
- "resource.\n", sta_id);
+ IWL_ERR(priv,
+ "Adding station failed, no block ack resource.\n");
break;
case ADD_STA_MODIFY_NON_EXIST_STA:
- IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
- sta_id);
+ IWL_ERR(priv, "Attempting to modify non-existing station\n");
break;
default:
IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
break;
}
- IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
- priv->stations[sta_id].sta.mode ==
- STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
- sta_id, priv->stations[sta_id].sta.sta.addr);
-
- /*
- * XXX: The MAC address in the command buffer is often changed from
- * the original sent to the device. That is, the MAC address
- * written to the command buffer often is not the same MAC address
- * read from the command buffer when the command returns. This
- * issue has not yet been resolved and this debugging is left to
- * observe the problem.
- */
- IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
- priv->stations[sta_id].sta.mode ==
- STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
- addsta->sta.addr);
spin_unlock_bh(&priv->sta_lock);
-
- return ret;
}
-int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- if (!cmd)
- return 0;
-
- return iwl_process_add_sta_resp(priv, (void *)cmd->payload, pkt);
+ iwl_process_add_sta_resp(priv, pkt);
}
int iwl_send_add_sta(struct iwl_priv *priv,
.len = { sizeof(*sta), },
};
u8 sta_id __maybe_unused = sta->sta.sta_id;
+ struct iwl_rx_packet *pkt;
+ struct iwl_add_sta_resp *add_sta_resp;
IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
if (ret || (flags & CMD_ASYNC))
return ret;
- /*else the command was successfully sent in SYNC mode, need to free
- * the reply page */
- iwl_free_resp(&cmd);
+ pkt = cmd.resp_pkt;
+ add_sta_resp = (void *)pkt->data;
- if (cmd.handler_status)
- IWL_ERR(priv, "%s - error in the CMD response %d\n", __func__,
- cmd.handler_status);
+ /* debug messages are printed in the handler */
+ if (add_sta_resp->status == ADD_STA_SUCCESS_MSK) {
+ spin_lock_bh(&priv->sta_lock);
+ ret = iwl_sta_ucode_activate(priv, sta_id);
+ spin_unlock_bh(&priv->sta_lock);
+ } else {
+ ret = -EIO;
+ }
- return cmd.handler_status;
+ iwl_free_resp(&cmd);
+
+ return ret;
}
bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
struct iwl_rx_packet *pkt;
int ret;
struct iwl_rem_sta_cmd rm_sta_cmd;
+ struct iwl_rem_sta_resp *rem_sta_resp;
struct iwl_host_cmd cmd = {
.id = REPLY_REMOVE_STA,
return ret;
pkt = cmd.resp_pkt;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
- pkt->hdr.flags);
- ret = -EIO;
- }
+ rem_sta_resp = (void *)pkt->data;
- if (!ret) {
- struct iwl_rem_sta_resp *rem_sta_resp = (void *)pkt->data;
- switch (rem_sta_resp->status) {
- case REM_STA_SUCCESS_MSK:
- if (!temporary) {
- spin_lock_bh(&priv->sta_lock);
- iwl_sta_ucode_deactivate(priv, sta_id);
- spin_unlock_bh(&priv->sta_lock);
- }
- IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
- break;
- default:
- ret = -EIO;
- IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
- break;
+ switch (rem_sta_resp->status) {
+ case REM_STA_SUCCESS_MSK:
+ if (!temporary) {
+ spin_lock_bh(&priv->sta_lock);
+ iwl_sta_ucode_deactivate(priv, sta_id);
+ spin_unlock_bh(&priv->sta_lock);
}
+ IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
+ break;
}
+
iwl_free_resp(&cmd);
return ret;
}
}
-int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
skb = __skb_dequeue(&skbs);
ieee80211_tx_status(priv->hw, skb);
}
-
- return 0;
}
/**
* Handles block-acknowledge notification from device, which reports success
* of frames sent via aggregation.
*/
-int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
if (scd_flow >= priv->cfg->base_params->num_of_queues) {
IWL_ERR(priv,
"BUG_ON scd_flow is bigger than number of queues\n");
- return 0;
+ return;
}
sta_id = ba_resp->sta_id;
if (unlikely(ba_resp->bitmap))
IWL_ERR(priv, "Received BA when not expected\n");
spin_unlock_bh(&priv->sta_lock);
- return 0;
+ return;
}
if (unlikely(scd_flow != agg->txq_id)) {
"Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
scd_flow, sta_id, tid, agg->txq_id);
spin_unlock_bh(&priv->sta_lock);
- return 0;
+ return;
}
__skb_queue_head_init(&reclaimed_skbs);
skb = __skb_dequeue(&reclaimed_skbs);
ieee80211_tx_status(priv->hw, skb);
}
-
- return 0;
}
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
const struct fw_img *fw;
int ret;
enum iwl_ucode_type old_type;
- static const u8 alive_cmd[] = { REPLY_ALIVE };
+ static const u16 alive_cmd[] = { REPLY_ALIVE };
fw = iwl_get_ucode_image(priv, ucode_type);
if (WARN_ON(!fw))
int iwl_run_init_ucode(struct iwl_priv *priv)
{
struct iwl_notification_wait calib_wait;
- static const u8 calib_complete[] = {
+ static const u16 calib_complete[] = {
CALIBRATION_RES_NOTIFICATION,
CALIBRATION_COMPLETE_NOTIFICATION
};
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX 15
+#define IWL7260_UCODE_API_MAX 16
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 12
#define IWL3165_UCODE_API_OK 13
/* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN 10
+#define IWL7260_UCODE_API_MIN 12
#define IWL3165_UCODE_API_MIN 13
/* NVM versions */
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX 15
+#define IWL8000_UCODE_API_MAX 16
/* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 12
/* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN 10
+#define IWL8000_UCODE_API_MIN 12
/* NVM versions */
#define IWL8000_NVM_VERSION 0x0a1d
#define DEFAULT_NVM_FILE_FAMILY_8000B "nvmData-8000B"
#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C"
-/* Max SDIO RX aggregation size of the ADDBA request/response */
-#define MAX_RX_AGG_SIZE_8260_SDIO 28
+/* Max SDIO RX/TX aggregation sizes of the ADDBA request/response */
+#define MAX_RX_AGG_SIZE_8260_SDIO 21
+#define MAX_TX_AGG_SIZE_8260_SDIO 40
/* Max A-MPDU exponent for HT and VHT */
#define MAX_HT_AMPDU_EXPONENT_8260_SDIO IEEE80211_HT_MAX_AMPDU_32K
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \
.d0i3 = true, \
+ .features = NETIF_F_RXCSUM, \
.non_shared_ant = ANT_A, \
.dccm_offset = IWL8260_DCCM_OFFSET, \
.dccm_len = IWL8260_DCCM_LEN, \
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
+ .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
.disable_dummy_notification = true,
.max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
.max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
+ .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
.bt_shared_single_ant = true,
.disable_dummy_notification = true,
.max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
* mode set
* @d0i3: device uses d0i3 instead of d3
* @nvm_hw_section_num: the ID of the HW NVM section
+ * @features: hw features, any combination of feature_whitelist
* @pwr_tx_backoffs: translation table between power limits and backoffs
* @max_rx_agg_size: max RX aggregation size of the ADDBA request/response
* @max_tx_agg_size: max TX aggregation size of the ADDBA request/response
bool no_power_up_nic_in_init;
const char *default_nvm_file_B_step;
const char *default_nvm_file_C_step;
+ netdev_features_t features;
unsigned int max_rx_agg_size;
bool disable_dummy_notification;
unsigned int max_tx_agg_size;
#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
+#define CSR_INT_BIT_PAGING (1 << 24) /* SDIO PAGING */
#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses */
CSR_INT_BIT_HW_ERR | \
CSR_INT_BIT_FH_TX | \
CSR_INT_BIT_SW_ERR | \
+ CSR_INT_BIT_PAGING | \
CSR_INT_BIT_RF_KILL | \
CSR_INT_BIT_SW_RX | \
CSR_INT_BIT_WAKEUP | \
/* DRAM INT TABLE */
#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
+#define CSR_DRAM_INIT_TBL_WRITE_POINTER (1 << 28)
#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
/*
TRACE_EVENT(iwlwifi_dev_tx_data,
TP_PROTO(const struct device *dev,
struct sk_buff *skb,
- void *data, size_t data_len),
- TP_ARGS(dev, skb, data, data_len),
+ u8 hdr_len, size_t data_len),
+ TP_ARGS(dev, skb, hdr_len, data_len),
TP_STRUCT__entry(
DEV_ENTRY
TP_fast_assign(
DEV_ASSIGN;
if (iwl_trace_data(skb))
- memcpy(__get_dynamic_array(data), data, data_len);
+ skb_copy_bits(skb, hdr_len,
+ __get_dynamic_array(data), data_len);
),
TP_printk("[%s] TX frame data", __get_str(dev))
);
TRACE_EVENT(iwlwifi_dev_hcmd,
TP_PROTO(const struct device *dev,
struct iwl_host_cmd *cmd, u16 total_size,
- struct iwl_cmd_header *hdr),
+ struct iwl_cmd_header_wide *hdr),
TP_ARGS(dev, cmd, total_size, hdr),
TP_STRUCT__entry(
DEV_ENTRY
__field(u32, flags)
),
TP_fast_assign(
- int i, offset = sizeof(*hdr);
+ int i, offset = sizeof(struct iwl_cmd_header);
+
+ if (hdr->group_id)
+ offset = sizeof(struct iwl_cmd_header_wide);
DEV_ASSIGN;
__entry->flags = cmd->flags;
- memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
+ memcpy(__get_dynamic_array(hcmd), hdr, offset);
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
if (!cmd->len[i])
offset += cmd->len[i];
}
),
- TP_printk("[%s] hcmd %#.2x (%ssync)",
- __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[0],
+ TP_printk("[%s] hcmd %#.2x.%#.2x (%ssync)",
+ __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[1],
+ ((u8 *)__get_dynamic_array(hcmd))[0],
__entry->flags & CMD_ASYNC ? "a" : "")
);
return 0;
}
+static int iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data,
+ const u32 len)
+{
+ struct iwl_fw_gscan_capabilities *fw_capa = (void *)data;
+ struct iwl_gscan_capabilities *capa = &fw->gscan_capa;
+
+ if (len < sizeof(*fw_capa))
+ return -EINVAL;
+
+ capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size);
+ capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets);
+ capa->max_ap_cache_per_scan =
+ le32_to_cpu(fw_capa->max_ap_cache_per_scan);
+ capa->max_rssi_sample_size = le32_to_cpu(fw_capa->max_rssi_sample_size);
+ capa->max_scan_reporting_threshold =
+ le32_to_cpu(fw_capa->max_scan_reporting_threshold);
+ capa->max_hotlist_aps = le32_to_cpu(fw_capa->max_hotlist_aps);
+ capa->max_significant_change_aps =
+ le32_to_cpu(fw_capa->max_significant_change_aps);
+ capa->max_bssid_history_entries =
+ le32_to_cpu(fw_capa->max_bssid_history_entries);
+ return 0;
+}
+
/*
* Gets uCode section from tlv.
*/
size_t len = ucode_raw->size;
const u8 *data;
u32 tlv_len;
+ u32 usniffer_img;
enum iwl_ucode_tlv_type tlv_type;
const u8 *tlv_data;
char buildstr[25];
- u32 build;
+ u32 build, paging_mem_size;
int num_of_cpus;
bool usniffer_images = false;
bool usniffer_req = false;
+ bool gscan_capa = false;
if (len < sizeof(*ucode)) {
IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
IWL_UCODE_REGULAR_USNIFFER,
tlv_len);
break;
+ case IWL_UCODE_TLV_PAGING:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ paging_mem_size = le32_to_cpup((__le32 *)tlv_data);
+
+ IWL_DEBUG_FW(drv,
+ "Paging: paging enabled (size = %u bytes)\n",
+ paging_mem_size);
+
+ if (paging_mem_size > MAX_PAGING_IMAGE_SIZE) {
+ IWL_ERR(drv,
+ "Paging: driver supports up to %lu bytes for paging image\n",
+ MAX_PAGING_IMAGE_SIZE);
+ return -EINVAL;
+ }
+
+ if (paging_mem_size & (FW_PAGING_SIZE - 1)) {
+ IWL_ERR(drv,
+ "Paging: image isn't multiple %lu\n",
+ FW_PAGING_SIZE);
+ return -EINVAL;
+ }
+
+ drv->fw.img[IWL_UCODE_REGULAR].paging_mem_size =
+ paging_mem_size;
+ usniffer_img = IWL_UCODE_REGULAR_USNIFFER;
+ drv->fw.img[usniffer_img].paging_mem_size =
+ paging_mem_size;
+ break;
case IWL_UCODE_TLV_SDIO_ADMA_ADDR:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
drv->fw.sdio_adma_addr =
le32_to_cpup((__le32 *)tlv_data);
break;
+ case IWL_UCODE_TLV_FW_GSCAN_CAPA:
+ if (iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len))
+ goto invalid_tlv_len;
+ gscan_capa = true;
+ break;
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break;
return -EINVAL;
}
+ /*
+ * If ucode advertises that it supports GSCAN but GSCAN
+ * capabilities TLV is not present, warn and continue without GSCAN.
+ */
+ if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
+ WARN(!gscan_capa,
+ "GSCAN is supported but capabilities TLV is unavailable\n"))
+ __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
+ capa->_capa);
+
return 0;
invalid_tlv_len:
struct ieee80211_channel *chan = &data->channels[0];
int n = 0, idx = 0;
- while (chan->band != band && idx < n_channels)
+ while (idx < n_channels && chan->band != band)
chan = &data->channels[++idx];
sband->channels = &data->channels[idx];
- while (chan->band == band && idx < n_channels) {
+ while (idx < n_channels && chan->band == band) {
chan = &data->channels[++idx];
n++;
}
#define RX_QUEUE_MASK 255
#define RX_QUEUE_SIZE_LOG 8
-/*
- * RX related structures and functions
- */
-#define RX_FREE_BUFFERS 64
-#define RX_LOW_WATERMARK 8
-
/**
* struct iwl_rb_status - reserve buffer status
* host memory mapped FH registers
* @IWL_FW_ERROR_DUMP_MEM: chunk of memory
* @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump.
* Structured as &struct iwl_fw_error_dump_trigger_desc.
+ * @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as
+ * &struct iwl_fw_error_dump_rb
*/
enum iwl_fw_error_dump_type {
/* 0 is deprecated */
IWL_FW_ERROR_DUMP_FH_REGS = 8,
IWL_FW_ERROR_DUMP_MEM = 9,
IWL_FW_ERROR_DUMP_ERROR_INFO = 10,
+ IWL_FW_ERROR_DUMP_RB = 11,
IWL_FW_ERROR_DUMP_MAX,
};
u8 data[];
};
+/**
+ * struct iwl_fw_error_dump_rb - content of an Receive Buffer
+ * @index: the index of the Receive Buffer in the Rx queue
+ * @rxq: the RB's Rx queue
+ * @reserved:
+ * @data: the content of the Receive Buffer
+ */
+struct iwl_fw_error_dump_rb {
+ __le32 index;
+ __le32 rxq;
+ __le32 reserved;
+ u8 data[];
+};
+
/**
* iwl_fw_error_next_data - advance fw error dump data pointer
* @data: previous data block
IWL_UCODE_TLV_API_CHANGES_SET = 29,
IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30,
IWL_UCODE_TLV_N_SCAN_CHANNELS = 31,
+ IWL_UCODE_TLV_PAGING = 32,
IWL_UCODE_TLV_SEC_RT_USNIFFER = 34,
IWL_UCODE_TLV_SDIO_ADMA_ADDR = 35,
IWL_UCODE_TLV_FW_VERSION = 36,
IWL_UCODE_TLV_FW_DBG_DEST = 38,
IWL_UCODE_TLV_FW_DBG_CONF = 39,
IWL_UCODE_TLV_FW_DBG_TRIGGER = 40,
+ IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
};
struct iwl_ucode_tlv {
* @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
* IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR
* @IWL_UCODE_TLV_API_TX_POWER_DEV: new API for tx power.
- * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
- * regardless of the band or the number of the probes. FW will calculate
- * the actual dwell time.
+ * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header
* @IWL_UCODE_TLV_API_SCD_CFG: This firmware can configure the scheduler
* through the dedicated host command.
* @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9,
IWL_UCODE_TLV_API_HDC_PHASE_0 = (__force iwl_ucode_tlv_api_t)10,
IWL_UCODE_TLV_API_TX_POWER_DEV = (__force iwl_ucode_tlv_api_t)11,
- IWL_UCODE_TLV_API_BASIC_DWELL = (__force iwl_ucode_tlv_api_t)13,
+ IWL_UCODE_TLV_API_WIDE_CMD_HDR = (__force iwl_ucode_tlv_api_t)14,
IWL_UCODE_TLV_API_SCD_CFG = (__force iwl_ucode_tlv_api_t)15,
IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = (__force iwl_ucode_tlv_api_t)16,
IWL_UCODE_TLV_API_ASYNC_DTM = (__force iwl_ucode_tlv_api_t)17,
* @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
* @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
* @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer
+ * @IWL_UCODE_TLV_CAPA_TOF_SUPPORT: supports Time of Flight (802.11mc FTM)
* @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
* @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
* tx power value into TPC Report action frame and Link Measurement Report
* @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
* @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
* @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
+ * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
* @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
* @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
* @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
* IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
* is supported.
* @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
+ * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
*/
enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1,
IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2,
IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3,
+ IWL_UCODE_TLV_CAPA_TOF_SUPPORT = (__force iwl_ucode_tlv_capa_t)5,
IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6,
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8,
IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9,
IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13,
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18,
IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19,
+ IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22,
IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28,
IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29,
IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30,
+ IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31,
};
/* The default calibrate table size if not specified by firmware file */
* For 16.0 uCode and above, there is no differentiation between sections,
* just an offset to the HW address.
*/
-#define IWL_UCODE_SECTION_MAX 12
+#define IWL_UCODE_SECTION_MAX 16
#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
+#define PAGING_SEPARATOR_SECTION 0xAAAABBBB
/* uCode version contains 4 values: Major/Minor/API/Serial */
#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
PRPH_ASSIGN,
PRPH_SETBIT,
PRPH_CLEARBIT,
+
+ INDIRECT_ASSIGN,
+ INDIRECT_SETBIT,
+ INDIRECT_CLEARBIT,
+
+ PRPH_BLOCKBIT,
};
/**
*
* @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism
* @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data
+ * @IWL_FW_DBG_TRIGGER_MONITOR_ONLY: when trigger occurs trigger is set to
+ * collect only monitor data
*/
enum iwl_fw_dbg_trigger_mode {
IWL_FW_DBG_TRIGGER_START = BIT(0),
IWL_FW_DBG_TRIGGER_STOP = BIT(1),
+ IWL_FW_DBG_TRIGGER_MONITOR_ONLY = BIT(2),
};
/**
struct iwl_fw_dbg_conf_hcmd hcmd;
} __packed;
+/**
+ * struct iwl_fw_gscan_capabilities - gscan capabilities supported by FW
+ * @max_scan_cache_size: total space allocated for scan results (in bytes).
+ * @max_scan_buckets: maximum number of channel buckets.
+ * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
+ * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
+ * @max_scan_reporting_threshold: max possible report threshold. in percentage.
+ * @max_hotlist_aps: maximum number of entries for hotlist APs.
+ * @max_significant_change_aps: maximum number of entries for significant
+ * change APs.
+ * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
+ * hold.
+ */
+struct iwl_fw_gscan_capabilities {
+ __le32 max_scan_cache_size;
+ __le32 max_scan_buckets;
+ __le32 max_ap_cache_per_scan;
+ __le32 max_rssi_sample_size;
+ __le32 max_scan_reporting_threshold;
+ __le32 max_hotlist_aps;
+ __le32 max_significant_change_aps;
+ __le32 max_bssid_history_entries;
+} __packed;
+
#endif /* __iwl_fw_file_h__ */
struct fw_img {
struct fw_desc sec[IWL_UCODE_SECTION_MAX];
bool is_dual_cpus;
+ u32 paging_mem_size;
};
struct iwl_sf_region {
u32 size;
};
+/*
+ * Block paging calculations
+ */
+#define PAGE_2_EXP_SIZE 12 /* 4K == 2^12 */
+#define FW_PAGING_SIZE BIT(PAGE_2_EXP_SIZE) /* page size is 4KB */
+#define PAGE_PER_GROUP_2_EXP_SIZE 3
+/* 8 pages per group */
+#define NUM_OF_PAGE_PER_GROUP BIT(PAGE_PER_GROUP_2_EXP_SIZE)
+/* don't change, support only 32KB size */
+#define PAGING_BLOCK_SIZE (NUM_OF_PAGE_PER_GROUP * FW_PAGING_SIZE)
+/* 32K == 2^15 */
+#define BLOCK_2_EXP_SIZE (PAGE_2_EXP_SIZE + PAGE_PER_GROUP_2_EXP_SIZE)
+
+/*
+ * Image paging calculations
+ */
+#define BLOCK_PER_IMAGE_2_EXP_SIZE 5
+/* 2^5 == 32 blocks per image */
+#define NUM_OF_BLOCK_PER_IMAGE BIT(BLOCK_PER_IMAGE_2_EXP_SIZE)
+/* maximum image size 1024KB */
+#define MAX_PAGING_IMAGE_SIZE (NUM_OF_BLOCK_PER_IMAGE * PAGING_BLOCK_SIZE)
+
+/* Virtual address signature */
+#define PAGING_ADDR_SIG 0xAA000000
+
+#define PAGING_CMD_IS_SECURED BIT(9)
+#define PAGING_CMD_IS_ENABLED BIT(8)
+#define PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS 0
+#define PAGING_TLV_SECURE_MASK 1
+
+/**
+ * struct iwl_fw_paging
+ * @fw_paging_phys: page phy pointer
+ * @fw_paging_block: pointer to the allocated block
+ * @fw_paging_size: page size
+ */
+struct iwl_fw_paging {
+ dma_addr_t fw_paging_phys;
+ struct page *fw_paging_block;
+ u32 fw_paging_size;
+};
+
/**
* struct iwl_fw_cscheme_list - a cipher scheme list
* @size: a number of entries
struct iwl_fw_cipher_scheme cs[];
} __packed;
+/**
+ * struct iwl_gscan_capabilities - gscan capabilities supported by FW
+ * @max_scan_cache_size: total space allocated for scan results (in bytes).
+ * @max_scan_buckets: maximum number of channel buckets.
+ * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
+ * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
+ * @max_scan_reporting_threshold: max possible report threshold. in percentage.
+ * @max_hotlist_aps: maximum number of entries for hotlist APs.
+ * @max_significant_change_aps: maximum number of entries for significant
+ * change APs.
+ * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
+ * hold.
+ */
+struct iwl_gscan_capabilities {
+ u32 max_scan_cache_size;
+ u32 max_scan_buckets;
+ u32 max_ap_cache_per_scan;
+ u32 max_rssi_sample_size;
+ u32 max_scan_reporting_threshold;
+ u32 max_hotlist_aps;
+ u32 max_significant_change_aps;
+ u32 max_bssid_history_entries;
+};
+
/**
* struct iwl_fw - variables associated with the firmware
*
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
u8 dbg_dest_reg_num;
+ struct iwl_gscan_capabilities gscan_capa;
};
static inline const char *get_fw_dbg_mode_string(int mode)
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
continue;
for (i = 0; i < w->n_cmds; i++) {
- if (w->cmds[i] == pkt->hdr.cmd) {
+ if (w->cmds[i] ==
+ WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
found = true;
break;
}
void
iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
struct iwl_notification_wait *wait_entry,
- const u8 *cmds, int n_cmds,
+ const u16 *cmds, int n_cmds,
bool (*fn)(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data),
void *fn_data)
wait_entry->fn = fn;
wait_entry->fn_data = fn_data;
wait_entry->n_cmds = n_cmds;
- memcpy(wait_entry->cmds, cmds, n_cmds);
+ memcpy(wait_entry->cmds, cmds, n_cmds * sizeof(u16));
wait_entry->triggered = false;
wait_entry->aborted = false;
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
struct iwl_rx_packet *pkt, void *data);
void *fn_data;
- u8 cmds[MAX_NOTIF_CMDS];
+ u16 cmds[MAX_NOTIF_CMDS];
u8 n_cmds;
bool triggered, aborted;
};
void __acquires(wait_entry)
iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
struct iwl_notification_wait *wait_entry,
- const u8 *cmds, int n_cmds,
+ const u16 *cmds, int n_cmds,
bool (*fn)(struct iwl_notif_wait_data *notif_data,
struct iwl_rx_packet *pkt, void *data),
void *fn_data);
* May sleep
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
* HCMD this Rx responds to. Can't sleep.
- * @napi_add: NAPI initialization. The transport is fully responsible for NAPI,
- * but the higher layers need to know about it (in particular mac80211 to
- * to able to call the right NAPI RX functions); this function is needed
- * to eventually call netif_napi_add() with higher layer involvement.
* @queue_full: notifies that a HW queue is full.
* Must be atomic and called with BH disabled.
* @queue_not_full: notifies that a HW queue is not full any more.
const struct iwl_fw *fw,
struct dentry *dbgfs_dir);
void (*stop)(struct iwl_op_mode *op_mode);
- int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
- void (*napi_add)(struct iwl_op_mode *op_mode,
- struct napi_struct *napi,
- struct net_device *napi_dev,
- int (*poll)(struct napi_struct *, int),
- int weight);
+ void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb);
void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
op_mode->ops->stop(op_mode);
}
-static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb)
{
- return op_mode->ops->rx(op_mode, rxb, cmd);
+ return op_mode->ops->rx(op_mode, napi, rxb);
}
static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
return op_mode->ops->exit_d0i3(op_mode);
}
-static inline void iwl_op_mode_napi_add(struct iwl_op_mode *op_mode,
- struct napi_struct *napi,
- struct net_device *napi_dev,
- int (*poll)(struct napi_struct *, int),
- int weight)
-{
- if (!op_mode->ops->napi_add)
- return;
- op_mode->ops->napi_add(op_mode, napi, napi_dev, poll, weight);
-}
-
#endif /* __iwl_op_mode_h__ */
#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
#define SCD_GP_CTRL_ENABLE_31_QUEUES BIT(0)
+#define SCD_GP_CTRL_AUTO_ACTIVE_MODE BIT(18)
/* Context Data */
#define SCD_CONTEXT_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x600)
/*********************** END TX SCHEDULER *************************************/
+/* tcp checksum offload */
+#define RX_EN_CSUM (0x00a00d88)
+
/* Oscillator clock */
#define OSC_CLK (0xa04068)
#define OSC_CLK_FORCE_CONTROL (0x8)
#define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800
#define RSA_ENABLE 0xA24B08
#define PREG_AUX_BUS_WPROT_0 0xA04CC0
+#define SB_CPU_1_STATUS 0xA01E30
+#define SB_CPU_2_STATUS 0xA01E34
/* FW chicken bits */
#define LMPM_CHICK 0xA01FF8
LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0),
};
+/* FW chicken bits */
+#define LMPM_PAGE_PASS_NOTIF 0xA03824
+enum {
+ LMPM_PAGE_PASS_NOTIF_POS = BIT(20),
+};
+
#endif /* __iwl_prph_h__ */
#define INDEX_TO_SEQ(i) ((i) & 0xff)
#define SEQ_RX_FRAME cpu_to_le16(0x8000)
+/*
+ * those functions retrieve specific information from
+ * the id field in the iwl_host_cmd struct which contains
+ * the command id, the group id and the version of the command
+ * and vice versa
+*/
+static inline u8 iwl_cmd_opcode(u32 cmdid)
+{
+ return cmdid & 0xFF;
+}
+
+static inline u8 iwl_cmd_groupid(u32 cmdid)
+{
+ return ((cmdid & 0xFF00) >> 8);
+}
+
+static inline u8 iwl_cmd_version(u32 cmdid)
+{
+ return ((cmdid & 0xFF0000) >> 16);
+}
+
+static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version)
+{
+ return opcode + (groupid << 8) + (version << 16);
+}
+
+/* make u16 wide id out of u8 group and opcode */
+#define WIDE_ID(grp, opcode) ((grp << 8) | opcode)
+
+/* due to the conversion, this group is special; new groups
+ * should be defined in the appropriate fw-api header files
+ */
+#define IWL_ALWAYS_LONG_GROUP 1
+
/**
* struct iwl_cmd_header
*
*/
struct iwl_cmd_header {
u8 cmd; /* Command ID: REPLY_RXON, etc. */
- u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
+ u8 group_id;
/*
* The driver sets up the sequence number to values of its choosing.
* uCode does not use this value, but passes it back to the driver
__le16 sequence;
} __packed;
-/* iwl_cmd_header flags value */
-#define IWL_CMD_FAILED_MSK 0x40
-
+/**
+ * struct iwl_cmd_header_wide
+ *
+ * This header format appears in the beginning of each command sent from the
+ * driver, and each response/notification received from uCode.
+ * this is the wide version that contains more information about the command
+ * like length, version and command type
+ */
+struct iwl_cmd_header_wide {
+ u8 cmd;
+ u8 group_id;
+ __le16 sequence;
+ __le16 length;
+ u8 reserved;
+ u8 version;
+} __packed;
#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
#define FH_RSCSR_FRAME_INVALID 0x55550000
* @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
* @CMD_WAKE_UP_TRANS: The command response should wake up the trans
* (i.e. mark it as non-idle).
+ * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to
+ * check that we leave enough room for the TBs bitmap which needs 20 bits.
*/
enum CMD_MODE {
CMD_ASYNC = BIT(0),
CMD_SEND_IN_IDLE = BIT(4),
CMD_MAKE_TRANS_IDLE = BIT(5),
CMD_WAKE_UP_TRANS = BIT(6),
+
+ CMD_TB_BITMAP_POS = 11,
};
#define DEF_CMD_PAYLOAD_SIZE 320
* aren't fully copied and use other TFD space.
*/
struct iwl_device_cmd {
- struct iwl_cmd_header hdr; /* uCode API */
- u8 payload[DEF_CMD_PAYLOAD_SIZE];
+ union {
+ struct {
+ struct iwl_cmd_header hdr; /* uCode API */
+ u8 payload[DEF_CMD_PAYLOAD_SIZE];
+ };
+ struct {
+ struct iwl_cmd_header_wide hdr_wide;
+ u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
+ sizeof(struct iwl_cmd_header_wide) +
+ sizeof(struct iwl_cmd_header)];
+ };
+ };
} __packed;
#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
* @resp_pkt: response packet, if %CMD_WANT_SKB was set
* @_rx_page_order: (internally used to free response packet)
* @_rx_page_addr: (internally used to free response packet)
- * @handler_status: return value of the handler of the command
- * (put in setup_rx_handlers) - valid for SYNC mode only
* @flags: can be CMD_*
* @len: array of the lengths of the chunks in data
* @dataflags: IWL_HCMD_DFL_*
- * @id: id of the host command
+ * @id: command id of the host command, for wide commands encoding the
+ * version and group as well
*/
struct iwl_host_cmd {
const void *data[IWL_MAX_CMD_TBS_PER_TFD];
struct iwl_rx_packet *resp_pkt;
unsigned long _rx_page_addr;
u32 _rx_page_order;
- int handler_status;
u32 flags;
+ u32 id;
u16 len[IWL_MAX_CMD_TBS_PER_TFD];
u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
- u8 id;
};
static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
* @bc_table_dword: set to true if the BC table expects the byte count to be
* in DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue
+ * @wide_cmd_header: firmware supports wide host command header
* @command_names: array of command names, must be 256 entries
* (one for each command); for debugging only
* @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
bool rx_buf_size_8k;
bool bc_table_dword;
bool scd_set_active;
+ bool wide_cmd_header;
const char *const *command_names;
u32 sdio_adma_addr;
u32 value);
void (*ref)(struct iwl_trans *trans);
void (*unref)(struct iwl_trans *trans);
- void (*suspend)(struct iwl_trans *trans);
+ int (*suspend)(struct iwl_trans *trans);
void (*resume)(struct iwl_trans *trans);
- struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans);
+ struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
+ struct iwl_fw_dbg_trigger_tlv
+ *trigger);
};
/**
* @cfg - pointer to the configuration
* @status: a bit-mask of transport status flags
* @dev - pointer to struct device * that represents the device
+ * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
+ * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
* @hw_id: a u32 with the ID of the device / sub-device.
* Set during transport allocation.
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
* @dbg_conf_tlv: array of pointers to configuration TLVs for debug
* @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
* @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
+ * @paging_req_addr: The location were the FW will upload / download the pages
+ * from. The address is set by the opmode
+ * @paging_db: Pointer to the opmode paging data base, the pointer is set by
+ * the opmode.
+ * @paging_download_buf: Buffer used for copying all of the pages before
+ * downloading them to the FW. The buffer is allocated in the opmode
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
unsigned long status;
struct device *dev;
+ u32 max_skb_frags;
u32 hw_rev;
u32 hw_id;
char hw_id_str[52];
struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
u8 dbg_dest_reg_num;
+ /*
+ * Paging parameters - All of the parameters should be set by the
+ * opmode when paging is enabled
+ */
+ u32 paging_req_addr;
+ struct iwl_fw_paging *paging_db;
+ void *paging_download_buf;
+
enum iwl_d0i3_mode d0i3_mode;
bool wowlan_d0i3;
static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
{
might_sleep();
- trans->ops->d3_suspend(trans, test);
+ if (trans->ops->d3_suspend)
+ trans->ops->d3_suspend(trans, test);
}
static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
bool test)
{
might_sleep();
+ if (!trans->ops->d3_resume)
+ return 0;
+
return trans->ops->d3_resume(trans, status, test);
}
trans->ops->unref(trans);
}
-static inline void iwl_trans_suspend(struct iwl_trans *trans)
+static inline int iwl_trans_suspend(struct iwl_trans *trans)
{
- if (trans->ops->suspend)
- trans->ops->suspend(trans);
+ if (!trans->ops->suspend)
+ return 0;
+
+ return trans->ops->suspend(trans);
}
static inline void iwl_trans_resume(struct iwl_trans *trans)
}
static inline struct iwl_trans_dump_data *
-iwl_trans_dump_data(struct iwl_trans *trans)
+iwl_trans_dump_data(struct iwl_trans *trans,
+ struct iwl_fw_dbg_trigger_tlv *trigger)
{
if (!trans->ops->dump_data)
return NULL;
- return trans->ops->dump_data(trans);
+ return trans->ops->dump_data(trans, trigger);
}
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
iwlmvm-y += tt.o offloading.o tdls.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
+iwlmvm-y += tof.o
iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
}
}
-int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
- return iwl_mvm_rx_bt_coex_notif_old(mvm, rxb, dev_cmd);
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+ iwl_mvm_rx_bt_coex_notif_old(mvm, rxb);
+ return;
+ }
IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
iwl_mvm_bt_coex_notif_handle(mvm);
-
- /*
- * This is an async handler for a notification, returning anything other
- * than 0 doesn't make sense even if HCMD failed.
- */
- return 0;
}
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_bt_coex_notif_handle(mvm);
}
-int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u32 ant_isolation = le32_to_cpup((void *)pkt->data);
u8 __maybe_unused lower_bound, upper_bound;
u8 lut;
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
- return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd);
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+ iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb);
+ return;
+ }
if (!iwl_mvm_bt_is_plcr_supported(mvm))
- return 0;
+ return;
lockdep_assert_held(&mvm->mutex);
/* Ignore updates if we are in force mode */
if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
- return 0;
+ return;
if (ant_isolation == mvm->last_ant_isol)
- return 0;
+ return;
for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
mvm->last_ant_isol = ant_isolation;
if (mvm->last_corun_lut == lut)
- return 0;
+ return;
mvm->last_corun_lut = lut;
memcpy(&cmd.corun_lut40, antenna_coupling_ranges[lut].lut20,
sizeof(cmd.corun_lut40));
- return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0,
- sizeof(cmd), &cmd);
+ if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0,
+ sizeof(cmd), &cmd))
+ IWL_ERR(mvm,
+ "failed to send BT_COEX_UPDATE_CORUN_LUT command\n");
}
IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
}
-int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bt_coex_profile_notif_old *notif = (void *)pkt->data;
memcpy(&mvm->last_bt_notif_old, notif, sizeof(mvm->last_bt_notif_old));
iwl_mvm_bt_coex_notif_handle(mvm);
-
- /*
- * This is an async handler for a notification, returning anything other
- * than 0 doesn't make sense even if HCMD failed.
- */
- return 0;
}
static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
iwl_mvm_bt_coex_notif_handle(mvm);
}
-int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u32 ant_isolation = le32_to_cpup((void *)pkt->data);
u8 __maybe_unused lower_bound, upper_bound;
- int ret;
u8 lut;
struct iwl_bt_coex_cmd_old *bt_cmd;
};
if (!iwl_mvm_bt_is_plcr_supported(mvm))
- return 0;
+ return;
lockdep_assert_held(&mvm->mutex);
/* Ignore updates if we are in force mode */
if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
- return 0;
+ return;
if (ant_isolation == mvm->last_ant_isol)
- return 0;
+ return;
for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
mvm->last_ant_isol = ant_isolation;
if (mvm->last_corun_lut == lut)
- return 0;
+ return;
mvm->last_corun_lut = lut;
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
if (!bt_cmd)
- return 0;
+ return;
cmd.data[0] = bt_cmd;
bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
sizeof(bt_cmd->bt4_corun_lut40));
- ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (iwl_mvm_send_cmd(mvm, &cmd))
+ IWL_ERR(mvm, "failed to send BT_CONFIG command\n");
kfree(bt_cmd);
- return ret;
}
#define IWL_MVM_QUOTA_THRESHOLD 4
#define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0
#define IWL_MVM_RS_DISABLE_P2P_MIMO 0
+#define IWL_MVM_TOF_IS_RESPONDER 0
#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1
static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm)
{
struct iwl_notification_wait wait_d3;
- static const u8 d3_notif[] = { D3_CONFIG_CMD };
+ static const u16 d3_notif[] = { D3_CONFIG_CMD };
int ret;
iwl_init_notification_wait(&mvm->notif_wait, &wait_d3,
int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ ret = iwl_trans_suspend(mvm->trans);
+ if (ret)
+ return ret;
- iwl_trans_suspend(mvm->trans);
mvm->trans->wowlan_d0i3 = wowlan->any;
if (mvm->trans->wowlan_d0i3) {
/* 'any' trigger means d0i3 usage */
if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
- int ret = iwl_mvm_enter_d0i3_sync(mvm);
+ ret = iwl_mvm_enter_d0i3_sync(mvm);
if (ret)
return ret;
mutex_lock(&mvm->d0i3_suspend_mutex);
__set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
mutex_unlock(&mvm->d0i3_suspend_mutex);
+
+ iwl_trans_d3_suspend(mvm->trans, false);
+
return 0;
}
return 1;
}
-int iwl_mvm_resume(struct ieee80211_hw *hw)
+static int iwl_mvm_resume_d3(struct iwl_mvm *mvm)
{
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ iwl_trans_resume(mvm->trans);
+
+ return __iwl_mvm_resume(mvm, false);
+}
+
+static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm)
+{
+ bool exit_now;
+ enum iwl_d3_status d3_status;
+
+ iwl_trans_d3_resume(mvm->trans, &d3_status, false);
+
+ /*
+ * make sure to clear D0I3_DEFER_WAKEUP before
+ * calling iwl_trans_resume(), which might wait
+ * for d0i3 exit completion.
+ */
+ mutex_lock(&mvm->d0i3_suspend_mutex);
+ __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+ exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
+ &mvm->d0i3_suspend_flags);
+ mutex_unlock(&mvm->d0i3_suspend_mutex);
+ if (exit_now) {
+ IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
+ _iwl_mvm_exit_d0i3(mvm);
+ }
iwl_trans_resume(mvm->trans);
- if (mvm->hw->wiphy->wowlan_config->any) {
- /* 'any' trigger means d0i3 usage */
- if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
- int ret = iwl_mvm_exit_d0i3(hw->priv);
+ if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
+ int ret = iwl_mvm_exit_d0i3(mvm->hw->priv);
- if (ret)
- return ret;
- /*
- * d0i3 exit will be deferred until reconfig_complete.
- * make sure there we are out of d0i3.
- */
- }
- return 0;
+ if (ret)
+ return ret;
+ /*
+ * d0i3 exit will be deferred until reconfig_complete.
+ * make sure there we are out of d0i3.
+ */
}
+ return 0;
+}
- return __iwl_mvm_resume(mvm, false);
+int iwl_mvm_resume(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* 'any' trigger means d0i3 was used */
+ if (hw->wiphy->wowlan_config->any)
+ return iwl_mvm_resume_d0i3(mvm);
+ else
+ return iwl_mvm_resume_d3(mvm);
}
void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
*
*****************************************************************************/
#include "mvm.h"
+#include "fw-api-tof.h"
#include "debugfs.h"
static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
+static inline char *iwl_dbgfs_is_match(char *name, char *buf)
+{
+ int len = strlen(name);
+
+ return !strncmp(name, buf, len) ? buf + len : NULL;
+}
+
+static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif,
+ char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ int value, ret = -EINVAL;
+ char *data;
+
+ mutex_lock(&mvm->mutex);
+
+ data = iwl_dbgfs_is_match("tof_disabled=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.tof_cfg.tof_disabled = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("one_sided_disabled=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.tof_cfg.one_sided_disabled = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("is_debug_mode=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.tof_cfg.is_debug_mode = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("is_buf=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.tof_cfg.is_buf_required = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("send_tof_cfg=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0 && value) {
+ ret = iwl_mvm_tof_config_cmd(mvm);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_enable_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct iwl_tof_config_cmd *cmd;
+
+ cmd = &mvm->tof_data.tof_cfg;
+
+ mutex_lock(&mvm->mutex);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "tof_disabled = %d\n",
+ cmd->tof_disabled);
+ pos += scnprintf(buf + pos, bufsz - pos, "one_sided_disabled = %d\n",
+ cmd->one_sided_disabled);
+ pos += scnprintf(buf + pos, bufsz - pos, "is_debug_mode = %d\n",
+ cmd->is_debug_mode);
+ pos += scnprintf(buf + pos, bufsz - pos, "is_buf_required = %d\n",
+ cmd->is_buf_required);
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif,
+ char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ int value, ret = 0;
+ char *data;
+
+ mutex_lock(&mvm->mutex);
+
+ data = iwl_dbgfs_is_match("burst_period=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (!ret)
+ mvm->tof_data.responder_cfg.burst_period =
+ cpu_to_le16(value);
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.min_delta_ftm = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("burst_duration=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.burst_duration = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("num_of_burst_exp=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.num_of_burst_exp = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("abort_responder=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.abort_responder = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("get_ch_est=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.get_ch_est = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("recv_sta_req_params=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.recv_sta_req_params = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("channel_num=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.channel_num = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("bandwidth=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.bandwidth = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("rate=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.rate = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("bssid=", buf);
+ if (data) {
+ u8 *mac = mvm->tof_data.responder_cfg.bssid;
+
+ if (!mac_pton(data, mac)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ data = iwl_dbgfs_is_match("tsf_timer_offset_msecs=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.tsf_timer_offset_msecs =
+ cpu_to_le16(value);
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("toa_offset=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.toa_offset =
+ cpu_to_le16(value);
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("ctrl_ch_position=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.ctrl_ch_position = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("ftm_per_burst=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.ftm_per_burst = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("ftm_resp_ts_avail=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.ftm_resp_ts_avail = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("asap_mode=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.asap_mode = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("send_responder_cfg=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0 && value) {
+ ret = iwl_mvm_tof_responder_cmd(mvm, vif);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_responder_params_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct iwl_tof_responder_config_cmd *cmd;
+
+ cmd = &mvm->tof_data.responder_cfg;
+
+ mutex_lock(&mvm->mutex);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "burst_period = %d\n",
+ le16_to_cpu(cmd->burst_period));
+ pos += scnprintf(buf + pos, bufsz - pos, "burst_duration = %d\n",
+ cmd->burst_duration);
+ pos += scnprintf(buf + pos, bufsz - pos, "bandwidth = %d\n",
+ cmd->bandwidth);
+ pos += scnprintf(buf + pos, bufsz - pos, "channel_num = %d\n",
+ cmd->channel_num);
+ pos += scnprintf(buf + pos, bufsz - pos, "ctrl_ch_position = 0x%x\n",
+ cmd->ctrl_ch_position);
+ pos += scnprintf(buf + pos, bufsz - pos, "bssid = %pM\n",
+ cmd->bssid);
+ pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %d\n",
+ cmd->min_delta_ftm);
+ pos += scnprintf(buf + pos, bufsz - pos, "num_of_burst_exp = %d\n",
+ cmd->num_of_burst_exp);
+ pos += scnprintf(buf + pos, bufsz - pos, "rate = %d\n", cmd->rate);
+ pos += scnprintf(buf + pos, bufsz - pos, "abort_responder = %d\n",
+ cmd->abort_responder);
+ pos += scnprintf(buf + pos, bufsz - pos, "get_ch_est = %d\n",
+ cmd->get_ch_est);
+ pos += scnprintf(buf + pos, bufsz - pos, "recv_sta_req_params = %d\n",
+ cmd->recv_sta_req_params);
+ pos += scnprintf(buf + pos, bufsz - pos, "ftm_per_burst = %d\n",
+ cmd->ftm_per_burst);
+ pos += scnprintf(buf + pos, bufsz - pos, "ftm_resp_ts_avail = %d\n",
+ cmd->ftm_resp_ts_avail);
+ pos += scnprintf(buf + pos, bufsz - pos, "asap_mode = %d\n",
+ cmd->asap_mode);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "tsf_timer_offset_msecs = %d\n",
+ le16_to_cpu(cmd->tsf_timer_offset_msecs));
+ pos += scnprintf(buf + pos, bufsz - pos, "toa_offset = %d\n",
+ le16_to_cpu(cmd->toa_offset));
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ int value, ret = 0;
+ char *data;
+
+ mutex_lock(&mvm->mutex);
+
+ data = iwl_dbgfs_is_match("request_id=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req.request_id = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("initiator=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req.initiator = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("one_sided_los_disable=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req.one_sided_los_disable = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("req_timeout=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req.req_timeout = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("report_policy=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req.report_policy = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("macaddr_random=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req.macaddr_random = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("num_of_ap=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req.num_of_ap = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("macaddr_template=", buf);
+ if (data) {
+ u8 mac[ETH_ALEN];
+
+ if (!mac_pton(data, mac)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN);
+ }
+
+ data = iwl_dbgfs_is_match("macaddr_mask=", buf);
+ if (data) {
+ u8 mac[ETH_ALEN];
+
+ if (!mac_pton(data, mac)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN);
+ }
+
+ data = iwl_dbgfs_is_match("ap=", buf);
+ if (data) {
+ struct iwl_tof_range_req_ap_entry ap;
+ int size = sizeof(struct iwl_tof_range_req_ap_entry);
+ u16 burst_period;
+ u8 *mac = ap.bssid;
+ unsigned int i;
+
+ if (sscanf(data, "%u %hhd %hhx %hhx"
+ "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
+ "%hhx %hhx %hx"
+ "%hhx %hhx %x"
+ "%hhx %hhx %hhx %hhx",
+ &i, &ap.channel_num, &ap.bandwidth,
+ &ap.ctrl_ch_position,
+ mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5,
+ &ap.measure_type, &ap.num_of_bursts,
+ &burst_period,
+ &ap.samples_per_burst, &ap.retries_per_sample,
+ &ap.tsf_delta, &ap.location_req, &ap.asap_mode,
+ &ap.enable_dyn_ack, &ap.rssi) != 20) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (i >= IWL_MVM_TOF_MAX_APS) {
+ IWL_ERR(mvm, "Invalid AP index %d\n", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ap.burst_period = cpu_to_le16(burst_period);
+
+ memcpy(&mvm->tof_data.range_req.ap[i], &ap, size);
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("send_range_request=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0 && value) {
+ ret = iwl_mvm_tof_range_request_cmd(mvm, vif);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_request_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[512];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct iwl_tof_range_req_cmd *cmd;
+ int i;
+
+ cmd = &mvm->tof_data.range_req;
+
+ mutex_lock(&mvm->mutex);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "request_id= %d\n",
+ cmd->request_id);
+ pos += scnprintf(buf + pos, bufsz - pos, "initiator= %d\n",
+ cmd->initiator);
+ pos += scnprintf(buf + pos, bufsz - pos, "one_sided_los_disable = %d\n",
+ cmd->one_sided_los_disable);
+ pos += scnprintf(buf + pos, bufsz - pos, "req_timeout= %d\n",
+ cmd->req_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, "report_policy= %d\n",
+ cmd->report_policy);
+ pos += scnprintf(buf + pos, bufsz - pos, "macaddr_random= %d\n",
+ cmd->macaddr_random);
+ pos += scnprintf(buf + pos, bufsz - pos, "macaddr_template= %pM\n",
+ cmd->macaddr_template);
+ pos += scnprintf(buf + pos, bufsz - pos, "macaddr_mask= %pM\n",
+ cmd->macaddr_mask);
+ pos += scnprintf(buf + pos, bufsz - pos, "num_of_ap= %d\n",
+ cmd->num_of_ap);
+ for (i = 0; i < cmd->num_of_ap; i++) {
+ struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i];
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "ap %.2d: channel_num=%hhx bw=%hhx"
+ " control=%hhx bssid=%pM type=%hhx"
+ " num_of_bursts=%hhx burst_period=%hx ftm=%hhx"
+ " retries=%hhx tsf_delta=%x location_req=%hhx "
+ " asap=%hhx enable=%hhx rssi=%hhx\n",
+ i, ap->channel_num, ap->bandwidth,
+ ap->ctrl_ch_position, ap->bssid,
+ ap->measure_type, ap->num_of_bursts,
+ ap->burst_period, ap->samples_per_burst,
+ ap->retries_per_sample, ap->tsf_delta,
+ ap->location_req, ap->asap_mode,
+ ap->enable_dyn_ack, ap->rssi);
+ }
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif,
+ char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ int value, ret = 0;
+ char *data;
+
+ mutex_lock(&mvm->mutex);
+
+ data = iwl_dbgfs_is_match("tsf_timer_offset_msec=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req_ext.tsf_timer_offset_msec =
+ cpu_to_le16(value);
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req_ext.min_delta_ftm = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("ftm_format_and_bw20M=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req_ext.ftm_format_and_bw20M =
+ value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("ftm_format_and_bw40M=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req_ext.ftm_format_and_bw40M =
+ value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("ftm_format_and_bw80M=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req_ext.ftm_format_and_bw80M =
+ value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("send_range_req_ext=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0 && value) {
+ ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct iwl_tof_range_req_ext_cmd *cmd;
+
+ cmd = &mvm->tof_data.range_req_ext;
+
+ mutex_lock(&mvm->mutex);
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "tsf_timer_offset_msec = %hx\n",
+ cmd->tsf_timer_offset_msec);
+ pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhx\n",
+ cmd->min_delta_ftm);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "ftm_format_and_bw20M = %hhx\n",
+ cmd->ftm_format_and_bw20M);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "ftm_format_and_bw40M = %hhx\n",
+ cmd->ftm_format_and_bw40M);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "ftm_format_and_bw80M = %hhx\n",
+ cmd->ftm_format_and_bw80M);
+
+ mutex_unlock(&mvm->mutex);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif *vif,
+ char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ int value, ret = 0;
+ int abort_id;
+ char *data;
+
+ mutex_lock(&mvm->mutex);
+
+ data = iwl_dbgfs_is_match("abort_id=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.last_abort_id = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("send_range_abort=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0 && value) {
+ abort_id = mvm->tof_data.last_abort_id;
+ ret = iwl_mvm_tof_range_abort_cmd(mvm, abort_id);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_abort_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[32];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ int last_abort_id;
+
+ mutex_lock(&mvm->mutex);
+ last_abort_id = mvm->tof_data.last_abort_id;
+ mutex_unlock(&mvm->mutex);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "last_abort_id = %d\n",
+ last_abort_id);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_response_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char *buf;
+ int pos = 0;
+ const size_t bufsz = sizeof(struct iwl_tof_range_rsp_ntfy) + 256;
+ struct iwl_tof_range_rsp_ntfy *cmd;
+ int i, ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+ cmd = &mvm->tof_data.range_resp;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "request_id = %d\n",
+ cmd->request_id);
+ pos += scnprintf(buf + pos, bufsz - pos, "status = %d\n",
+ cmd->request_status);
+ pos += scnprintf(buf + pos, bufsz - pos, "last_in_batch = %d\n",
+ cmd->last_in_batch);
+ pos += scnprintf(buf + pos, bufsz - pos, "num_of_aps = %d\n",
+ cmd->num_of_aps);
+ for (i = 0; i < cmd->num_of_aps; i++) {
+ struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i];
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "ap %.2d: bssid=%pM status=%hhx bw=%hhx"
+ " rtt=%x rtt_var=%x rtt_spread=%x"
+ " rssi=%hhx rssi_spread=%hhx"
+ " range=%x range_var=%x"
+ " time_stamp=%x\n",
+ i, ap->bssid, ap->measure_status,
+ ap->measure_bw,
+ ap->rtt, ap->rtt_variance, ap->rtt_spread,
+ ap->rssi, ap->rssi_spread, ap->range,
+ ap->range_variance, ap->timestamp);
+ }
+ mutex_unlock(&mvm->mutex);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
size_t count, loff_t *ppos)
{
MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_enable, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_request, 512);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
+MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) &&
+ !vif->p2p && (vif->type != NL80211_IFTYPE_P2P_DEVICE)) {
+ if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP)
+ MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params,
+ mvmvif->dbgfs_dir,
+ S_IRUSR | S_IWUSR);
+
+ MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir,
+ S_IRUSR | S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir,
+ S_IRUSR | S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir,
+ S_IRUSR | S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir,
+ S_IRUSR | S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir,
+ S_IRUSR);
+ }
+
/*
* Create symlink for convenience pointing to interface specific
* debugfs entries for the driver. For example, under
if (ret)
return ret;
- iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, 0);
+ iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, NULL);
iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
if (ptr) {
for (ofs = 0; ofs < len; ofs += 16) {
pos += scnprintf(buf + pos, bufsz - pos,
- "0x%.4x ", ofs);
- hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos,
- bufsz - pos, false);
- pos += strlen(buf + pos);
- if (bufsz - pos > 0)
- buf[pos++] = '\n';
+ "0x%.4x %16ph\n", ofs, ptr + ofs);
}
} else {
pos += scnprintf(buf + pos, bufsz - pos,
#define IWL_BF_TEMP_FAST_FILTER_MIN 0
#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
-#define IWL_BF_TEMP_SLOW_FILTER_D0I3 5
+#define IWL_BF_TEMP_SLOW_FILTER_D0I3 20
#define IWL_BF_TEMP_SLOW_FILTER_MAX 255
#define IWL_BF_TEMP_SLOW_FILTER_MIN 0
u8 ssid[IEEE80211_MAX_SSID_LEN];
} __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
-/* How many statistics are gathered for each channel */
-#define SCAN_RESULTS_STATISTICS 1
-
-/**
- * enum iwl_scan_complete_status - status codes for scan complete notifications
- * @SCAN_COMP_STATUS_OK: scan completed successfully
- * @SCAN_COMP_STATUS_ABORT: scan was aborted by user
- * @SCAN_COMP_STATUS_ERR_SLEEP: sending null sleep packet failed
- * @SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT: timeout before channel is ready
- * @SCAN_COMP_STATUS_ERR_PROBE: sending probe request failed
- * @SCAN_COMP_STATUS_ERR_WAKEUP: sending null wakeup packet failed
- * @SCAN_COMP_STATUS_ERR_ANTENNAS: invalid antennas chosen at scan command
- * @SCAN_COMP_STATUS_ERR_INTERNAL: internal error caused scan abort
- * @SCAN_COMP_STATUS_ERR_COEX: medium was lost ot WiMax
- * @SCAN_COMP_STATUS_P2P_ACTION_OK: P2P public action frame TX was successful
- * (not an error!)
- * @SCAN_COMP_STATUS_ITERATION_END: indicates end of one repetition the driver
- * asked for
- * @SCAN_COMP_STATUS_ERR_ALLOC_TE: scan could not allocate time events
-*/
-enum iwl_scan_complete_status {
- SCAN_COMP_STATUS_OK = 0x1,
- SCAN_COMP_STATUS_ABORT = 0x2,
- SCAN_COMP_STATUS_ERR_SLEEP = 0x3,
- SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT = 0x4,
- SCAN_COMP_STATUS_ERR_PROBE = 0x5,
- SCAN_COMP_STATUS_ERR_WAKEUP = 0x6,
- SCAN_COMP_STATUS_ERR_ANTENNAS = 0x7,
- SCAN_COMP_STATUS_ERR_INTERNAL = 0x8,
- SCAN_COMP_STATUS_ERR_COEX = 0x9,
- SCAN_COMP_STATUS_P2P_ACTION_OK = 0xA,
- SCAN_COMP_STATUS_ITERATION_END = 0x0B,
- SCAN_COMP_STATUS_ERR_ALLOC_TE = 0x0C,
-};
-
/* scan offload */
#define IWL_SCAN_MAX_BLACKLIST_LEN 64
#define IWL_SCAN_SHORT_BLACKLIST_LEN 16
SCAN_CLIENT_ASSET_TRACKING = BIT(2),
};
-/**
- * struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6
- * @scan_flags: see enum iwl_scan_flags
- * @channel_count: channels in channel list
- * @quiet_time: dwell time, in milliseconds, on quiet channel
- * @quiet_plcp_th: quiet channel num of packets threshold
- * @good_CRC_th: passive to active promotion threshold
- * @rx_chain: RXON rx chain.
- * @max_out_time: max TUs to be out of associated channel
- * @suspend_time: pause scan this TUs when returning to service channel
- * @flags: RXON flags
- * @filter_flags: RXONfilter
- * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz.
- * @direct_scan: list of SSIDs for directed active scan
- * @scan_type: see enum iwl_scan_type.
- * @rep_count: repetition count for each scheduled scan iteration.
- */
-struct iwl_scan_offload_cmd {
- __le16 len;
- u8 scan_flags;
- u8 channel_count;
- __le16 quiet_time;
- __le16 quiet_plcp_th;
- __le16 good_CRC_th;
- __le16 rx_chain;
- __le32 max_out_time;
- __le32 suspend_time;
- /* RX_ON_FLAGS_API_S_VER_1 */
- __le32 flags;
- __le32 filter_flags;
- struct iwl_tx_cmd tx_cmd[2];
- /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
- struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
- __le32 scan_type;
- __le32 rep_count;
-} __packed;
-
-enum iwl_scan_offload_channel_flags {
- IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE = BIT(0),
- IWL_SCAN_OFFLOAD_CHANNEL_NARROW = BIT(22),
- IWL_SCAN_OFFLOAD_CHANNEL_FULL = BIT(24),
- IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL = BIT(25),
-};
-
-/* channel configuration for struct iwl_scan_offload_cfg. Each channels needs:
- * __le32 type: bitmap; bits 1-20 are for directed scan to i'th ssid and
- * see enum iwl_scan_offload_channel_flags.
- * __le16 channel_number: channel number 1-13 etc.
- * __le16 iter_count: repetition count for the channel.
- * __le32 iter_interval: interval between two iterations on one channel.
- * u8 active_dwell.
- * u8 passive_dwell.
- */
-#define IWL_SCAN_CHAN_SIZE 14
-
-/**
- * iwl_scan_offload_cfg - SCAN_OFFLOAD_CONFIG_API_S
- * @scan_cmd: scan command fixed part
- * @data: scan channel configuration and probe request frames
- */
-struct iwl_scan_offload_cfg {
- struct iwl_scan_offload_cmd scan_cmd;
- u8 data[0];
-} __packed;
-
/**
* iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
* @ssid: MAC address to filter out
IWL_SCAN_EBS_INACTIVE,
};
-/**
- * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
- * @last_schedule_line: last schedule line executed (fast or regular)
- * @last_schedule_iteration: last scan iteration executed before scan abort
- * @status: enum iwl_scan_offload_compleate_status
- * @ebs_status: last EBS status, see IWL_SCAN_EBS_*
- */
-struct iwl_scan_offload_complete {
- u8 last_schedule_line;
- u8 last_schedule_iteration;
- u8 status;
- u8 ebs_status;
-} __packed;
-
-/**
- * iwl_sched_scan_results - SCAN_OFFLOAD_MATCH_FOUND_NTF_API_S_VER_1
- * @ssid_bitmap: SSIDs indexes found in this iteration
- * @client_bitmap: clients that are active and wait for this notification
- */
-struct iwl_sched_scan_results {
- __le16 ssid_bitmap;
- u8 client_bitmap;
- u8 reserved;
-};
-
-/* Unified LMAC scan API */
-
-#define IWL_MVM_BASIC_PASSIVE_DWELL 110
-
/**
* iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S
* @tx_flags: combination of TX_CMD_FLG_*
/* UMAC Scan API */
-/**
- * struct iwl_mvm_umac_cmd_hdr - Command header for UMAC commands
- * @size: size of the command (not including header)
- * @reserved0: for future use and alignment
- * @ver: API version number
- */
-struct iwl_mvm_umac_cmd_hdr {
- __le16 size;
- u8 reserved0;
- u8 ver;
-} __packed;
-
/* The maximum of either of these cannot exceed 8, because we use an
* 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h).
*/
/**
* struct iwl_scan_config
- * @hdr: umac command header
* @flags: enum scan_config_flags
* @tx_chains: valid_tx antenna - ANT_* definitions
* @rx_chains: valid_rx antenna - ANT_* definitions
* @channel_array: default supported channels
*/
struct iwl_scan_config {
- struct iwl_mvm_umac_cmd_hdr hdr;
__le32 flags;
__le32 tx_chains;
__le32 rx_chains;
/**
* struct iwl_scan_req_umac
- * @hdr: umac command header
* @flags: &enum iwl_umac_scan_flags
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
* &struct iwl_scan_req_umac_tail
*/
struct iwl_scan_req_umac {
- struct iwl_mvm_umac_cmd_hdr hdr;
__le32 flags;
__le32 uid;
__le32 ooc_priority;
/**
* struct iwl_umac_scan_abort
- * @hdr: umac command header
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @flags: reserved
*/
struct iwl_umac_scan_abort {
- struct iwl_mvm_umac_cmd_hdr hdr;
__le32 uid;
__le32 flags;
} __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */
* ( MGMT_MCAST_KEY = 0x1f )
* @ctrl_flags: %iwl_sta_key_flag
* @IGTK:
- * @K1: IGTK master key
- * @K2: IGTK sub key
+ * @K1: unused
+ * @K2: unused
* @sta_id: station ID that support IGTK
* @key_id:
* @receive_seq_cnt: initial RSC/PN needed for replay check
--- /dev/null
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __fw_api_tof_h__
+#define __fw_api_tof_h__
+
+#include "fw-api.h"
+
+/* ToF sub-group command IDs */
+enum iwl_mvm_tof_sub_grp_ids {
+ TOF_RANGE_REQ_CMD = 0x1,
+ TOF_CONFIG_CMD = 0x2,
+ TOF_RANGE_ABORT_CMD = 0x3,
+ TOF_RANGE_REQ_EXT_CMD = 0x4,
+ TOF_RESPONDER_CONFIG_CMD = 0x5,
+ TOF_NW_INITIATED_RES_SEND_CMD = 0x6,
+ TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7,
+ TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC,
+ TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD,
+ TOF_RANGE_RESPONSE_NOTIF = 0xFE,
+ TOF_MCSI_DEBUG_NOTIF = 0xFB,
+};
+
+/**
+ * struct iwl_tof_config_cmd - ToF configuration
+ * @tof_disabled: 0 enabled, 1 - disabled
+ * @one_sided_disabled: 0 enabled, 1 - disabled
+ * @is_debug_mode: 1 debug mode, 0 - otherwise
+ * @is_buf_required: 1 channel estimation buffer required, 0 - otherwise
+ */
+struct iwl_tof_config_cmd {
+ __le32 sub_grp_cmd_id;
+ u8 tof_disabled;
+ u8 one_sided_disabled;
+ u8 is_debug_mode;
+ u8 is_buf_required;
+} __packed;
+
+/**
+ * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
+ * @burst_period: future use: (currently hard coded in the LMAC)
+ * The interval between two sequential bursts.
+ * @min_delta_ftm: future use: (currently hard coded in the LMAC)
+ * The minimum delay between two sequential FTM Responses
+ * in the same burst.
+ * @burst_duration: future use: (currently hard coded in the LMAC)
+ * The total time for all FTMs handshake in the same burst.
+ * Affect the time events duration in the LMAC.
+ * @num_of_burst_exp: future use: (currently hard coded in the LMAC)
+ * The number of bursts for the current ToF request. Affect
+ * the number of events allocations in the current iteration.
+ * @get_ch_est: for xVT only, NA for driver
+ * @abort_responder: when set to '1' - Responder will terminate its activity
+ * (all other fields in the command are ignored)
+ * @recv_sta_req_params: 1 - Responder will ignore the other Responder's
+ * params and use the recomended Initiator params.
+ * 0 - otherwise
+ * @channel_num: current AP Channel
+ * @bandwidth: current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
+ * @rate: current AP rate
+ * @ctrl_ch_position: coding of the control channel position relative to
+ * the center frequency.
+ * 40MHz 0 below center, 1 above center
+ * 80MHz bits [0..1]: 0 the near 20MHz to the center,
+ * 1 the far 20MHz to the center
+ * bit[2] as above 40MHz
+ * @ftm_per_burst: FTMs per Burst
+ * @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response,
+ * '1' - we measure over the Initial FTM Response
+ * @asap_mode: ASAP / Non ASAP mode for the current WLS station
+ * @sta_id: index of the AP STA when in AP mode
+ * @tsf_timer_offset_msecs: The dictated time offset (mSec) from the AP's TSF
+ * @toa_offset: Artificial addition [0.1nsec] for the ToA - to be used for debug
+ * purposes, simulating station movement by adding various values
+ * to this field
+ * @bssid: Current AP BSSID
+ */
+struct iwl_tof_responder_config_cmd {
+ __le32 sub_grp_cmd_id;
+ __le16 burst_period;
+ u8 min_delta_ftm;
+ u8 burst_duration;
+ u8 num_of_burst_exp;
+ u8 get_ch_est;
+ u8 abort_responder;
+ u8 recv_sta_req_params;
+ u8 channel_num;
+ u8 bandwidth;
+ u8 rate;
+ u8 ctrl_ch_position;
+ u8 ftm_per_burst;
+ u8 ftm_resp_ts_avail;
+ u8 asap_mode;
+ u8 sta_id;
+ __le16 tsf_timer_offset_msecs;
+ __le16 toa_offset;
+ u8 bssid[ETH_ALEN];
+} __packed;
+
+/**
+ * struct iwl_tof_range_request_ext_cmd - extended range req for WLS
+ * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF
+ * @min_delta_ftm: Minimal time between two consecutive measurements,
+ * in units of 100us. 0 means no preference by station
+ * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended
+ * value be sent to the AP
+ * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended
+ * value to be sent to the AP
+ * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended
+ * value to be sent to the AP
+ */
+struct iwl_tof_range_req_ext_cmd {
+ __le32 sub_grp_cmd_id;
+ __le16 tsf_timer_offset_msec;
+ __le16 reserved;
+ u8 min_delta_ftm;
+ u8 ftm_format_and_bw20M;
+ u8 ftm_format_and_bw40M;
+ u8 ftm_format_and_bw80M;
+} __packed;
+
+#define IWL_MVM_TOF_MAX_APS 21
+
+/**
+ * struct iwl_tof_range_req_ap_entry - AP configuration parameters
+ * @channel_num: Current AP Channel
+ * @bandwidth: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
+ * @tsf_delta_direction: TSF relatively to the subject AP
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ * center frequency.
+ * 40MHz 0 below center, 1 above center
+ * 80MHz bits [0..1]: 0 the near 20MHz to the center,
+ * 1 the far 20MHz to the center
+ * bit[2] as above 40MHz
+ * @bssid: AP's bss id
+ * @measure_type: Measurement type: 0 - two sided, 1 - One sided
+ * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the
+ * number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ * periodicity In units of 100ms. ignored if num_of_bursts = 0
+ * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31)
+ * 1-sided: how many rts/cts pairs should be used per burst.
+ * @retries_per_sample: Max number of retries that the LMAC should send
+ * in case of no replies by the AP.
+ * @tsf_delta: TSF Delta in units of microseconds.
+ * The difference between the AP TSF and the device local clock.
+ * @location_req: Location Request Bit[0] LCI should be sent in the FTMR
+ * Bit[1] Civic should be sent in the FTMR
+ * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided)
+ * @enable_dyn_ack: Enable Dynamic ACK BW.
+ * 0 Initiator interact with regular AP
+ * 1 Initiator interact with Responder machine: need to send the
+ * Initiator Acks with HT 40MHz / 80MHz, since the Responder should
+ * use it for its ch est measurement (this flag will be set when we
+ * configure the opposite machine to be Responder).
+ * @rssi: Last received value
+ * leagal values: -128-0 (0x7f). above 0x0 indicating an invalid value.
+ */
+struct iwl_tof_range_req_ap_entry {
+ u8 channel_num;
+ u8 bandwidth;
+ u8 tsf_delta_direction;
+ u8 ctrl_ch_position;
+ u8 bssid[ETH_ALEN];
+ u8 measure_type;
+ u8 num_of_bursts;
+ __le16 burst_period;
+ u8 samples_per_burst;
+ u8 retries_per_sample;
+ __le32 tsf_delta;
+ u8 location_req;
+ u8 asap_mode;
+ u8 enable_dyn_ack;
+ s8 rssi;
+} __packed;
+
+/**
+ * enum iwl_tof_response_mode
+ * @IWL_MVM_TOF_RESPOSE_ASAP: report each AP measurement separately as soon as
+ * possible (not supported for this release)
+ * @IWL_MVM_TOF_RESPOSE_TIMEOUT: report all AP measurements as a batch upon
+ * timeout expiration
+ * @IWL_MVM_TOF_RESPOSE_COMPLETE: report all AP measurements as a batch at the
+ * earlier of: measurements completion / timeout
+ * expiration.
+ */
+enum iwl_tof_response_mode {
+ IWL_MVM_TOF_RESPOSE_ASAP = 1,
+ IWL_MVM_TOF_RESPOSE_TIMEOUT,
+ IWL_MVM_TOF_RESPOSE_COMPLETE,
+};
+
+/**
+ * struct iwl_tof_range_req_cmd - start measurement cmd
+ * @request_id: A Token incremented per request. The same Token will be
+ * sent back in the range response
+ * @initiator: 0- NW initiated, 1 - Client Initiated
+ * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided,
+ * '1' - run ML-Algo for ToF only
+ * @req_timeout: Requested timeout of the response in units of 100ms.
+ * This is equivalent to the session time configured to the
+ * LMAC in Initiator Request
+ * @report_policy: Supported partially for this release: For current release -
+ * the range report will be uploaded as a batch when ready or
+ * when the session is done (successfully / partially).
+ * one of iwl_tof_response_mode.
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @macaddr_random: '0' Use default source MAC address (i.e. p2_p),
+ * '1' Use MAC Address randomization according to the below
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ * Bits set to 1 shall be randomized by the UMAC
+ */
+struct iwl_tof_range_req_cmd {
+ __le32 sub_grp_cmd_id;
+ u8 request_id;
+ u8 initiator;
+ u8 one_sided_los_disable;
+ u8 req_timeout;
+ u8 report_policy;
+ u8 los_det_disable;
+ u8 num_of_ap;
+ u8 macaddr_random;
+ u8 macaddr_template[ETH_ALEN];
+ u8 macaddr_mask[ETH_ALEN];
+ struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
+} __packed;
+
+/**
+ * struct iwl_tof_gen_resp_cmd - generic ToF response
+ */
+struct iwl_tof_gen_resp_cmd {
+ __le32 sub_grp_cmd_id;
+ u8 data[];
+} __packed;
+
+/**
+ * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
+ * @measure_status: current APs measurement status
+ * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
+ * @rtt: The Round Trip Time that took for the last measurement for
+ * current AP [nSec]
+ * @rtt_variance: The Variance of the RTT values measured for current AP
+ * @rtt_spread: The Difference between the maximum and the minimum RTT
+ * values measured for current AP in the current session [nsec]
+ * @rssi: RSSI as uploaded in the Channel Estimation notification
+ * @rssi_spread: The Difference between the maximum and the minimum RSSI values
+ * measured for current AP in the current session
+ * @range: Measured range [cm]
+ * @range_variance: Measured range variance [cm]
+ * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
+ * uploaded by the LMAC
+ */
+struct iwl_tof_range_rsp_ap_entry_ntfy {
+ u8 bssid[ETH_ALEN];
+ u8 measure_status;
+ u8 measure_bw;
+ __le32 rtt;
+ __le32 rtt_variance;
+ __le32 rtt_spread;
+ s8 rssi;
+ u8 rssi_spread;
+ __le16 reserved;
+ __le32 range;
+ __le32 range_variance;
+ __le32 timestamp;
+} __packed;
+
+/**
+ * struct iwl_tof_range_rsp_ntfy -
+ * @request_id: A Token ID of the corresponding Range request
+ * @request_status: status of current measurement session
+ * @last_in_batch: reprot policy (when not all responses are uploaded at once)
+ * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ */
+struct iwl_tof_range_rsp_ntfy {
+ u8 request_id;
+ u8 request_status;
+ u8 last_in_batch;
+ u8 num_of_aps;
+ struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
+} __packed;
+
+#define IWL_MVM_TOF_MCSI_BUF_SIZE (245)
+/**
+ * struct iwl_tof_mcsi_notif - used for debug
+ * @token: token ID for the current session
+ * @role: '0' - initiator, '1' - responder
+ * @initiator_bssid: initiator machine
+ * @responder_bssid: responder machine
+ * @mcsi_buffer: debug data
+ */
+struct iwl_tof_mcsi_notif {
+ u8 token;
+ u8 role;
+ __le16 reserved;
+ u8 initiator_bssid[ETH_ALEN];
+ u8 responder_bssid[ETH_ALEN];
+ u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4];
+} __packed;
+
+/**
+ * struct iwl_tof_neighbor_report_notif
+ * @bssid: BSSID of the AP which sent the report
+ * @request_token: same token as the corresponding request
+ * @status:
+ * @report_ie_len: the length of the response frame starting from the Element ID
+ * @data: the IEs
+ */
+struct iwl_tof_neighbor_report {
+ u8 bssid[ETH_ALEN];
+ u8 request_token;
+ u8 status;
+ __le16 report_ie_len;
+ u8 data[];
+} __packed;
+
+/**
+ * struct iwl_tof_range_abort_cmd
+ * @request_id: corresponds to a range request
+ */
+struct iwl_tof_range_abort_cmd {
+ __le32 sub_grp_cmd_id;
+ u8 request_id;
+ u8 reserved[3];
+} __packed;
+
+#endif
TX_CMD_FLG_HCCA_CHUNK = BIT(31)
}; /* TX_FLAGS_BITS_API_S_VER_1 */
+/**
+ * enum iwl_tx_pm_timeouts - pm timeout values in TX command
+ * @PM_FRAME_NONE: no need to suspend sleep mode
+ * @PM_FRAME_MGMT: fw suspend sleep mode for 100TU
+ * @PM_FRAME_ASSOC: fw suspend sleep mode for 10sec
+ */
+enum iwl_tx_pm_timeouts {
+ PM_FRAME_NONE = 0,
+ PM_FRAME_MGMT = 2,
+ PM_FRAME_ASSOC = 3,
+};
+
/*
* TX command security control
*/
#include "fw-api-coex.h"
#include "fw-api-scan.h"
#include "fw-api-stats.h"
+#include "fw-api-tof.h"
/* Tx queue numbers */
enum {
ADD_STA = 0x18,
REMOVE_STA = 0x19,
+ /* paging get item */
+ FW_GET_ITEM_CMD = 0x1a,
+
/* TX */
TX_CMD = 0x1c,
TXPATH_FLUSH = 0x1e,
LQ_CMD = 0x4e,
+ /* paging block to FW cpu2 */
+ FW_PAGING_BLOCK_CMD = 0x4f,
+
/* Scan offload */
SCAN_OFFLOAD_REQUEST_CMD = 0x51,
SCAN_OFFLOAD_ABORT_CMD = 0x52,
CALIB_RES_NOTIF_PHY_DB = 0x6b,
/* PHY_DB_CMD = 0x6c, */
+ /* ToF - 802.11mc FTM */
+ TOF_CMD = 0x10,
+ TOF_NOTIFICATION = 0x11,
+
/* Power - legacy power table command */
POWER_TABLE_CMD = 0x77,
PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
u8 data[];
} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */
+#define NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */
+
+/*
+ * struct iwl_fw_paging_cmd - paging layout
+ *
+ * (FW_PAGING_BLOCK_CMD = 0x4f)
+ *
+ * Send to FW the paging layout in the driver.
+ *
+ * @flags: various flags for the command
+ * @block_size: the block size in powers of 2
+ * @block_num: number of blocks specified in the command.
+ * @device_phy_addr: virtual addresses from device side
+*/
+struct iwl_fw_paging_cmd {
+ __le32 flags;
+ __le32 block_size;
+ __le32 block_num;
+ __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
+} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */
+
+/*
+ * Fw items ID's
+ *
+ * @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload
+ * download
+ */
+enum iwl_fw_item_id {
+ IWL_FW_ITEM_ID_PAGING = 3,
+};
+
+/*
+ * struct iwl_fw_get_item_cmd - get an item from the fw
+ */
+struct iwl_fw_get_item_cmd {
+ __le32 item_id;
+} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */
+
+struct iwl_fw_get_item_resp {
+ __le32 item_id;
+ __le32 item_byte_cnt;
+ __le32 item_val;
+} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */
+
/**
* struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD
* @offset: offset in bytes into the section
__le16 frame_time;
} __packed;
+/*
+ * TCP offload Rx assist info
+ *
+ * bits 0:3 - reserved
+ * bits 4:7 - MIC CRC length
+ * bits 8:12 - MAC header length
+ * bit 13 - Padding indication
+ * bit 14 - A-AMSDU indication
+ * bit 15 - Offload enabled
+ */
+enum iwl_csum_rx_assist_info {
+ CSUM_RXA_RESERVED_MASK = 0x000f,
+ CSUM_RXA_MICSIZE_MASK = 0x00f0,
+ CSUM_RXA_HEADERLEN_MASK = 0x1f00,
+ CSUM_RXA_PADD = BIT(13),
+ CSUM_RXA_AMSDU = BIT(14),
+ CSUM_RXA_ENA = BIT(15)
+};
+
+/**
+ * struct iwl_rx_mpdu_res_start - phy info
+ * @assist: see CSUM_RX_ASSIST_ above
+ */
struct iwl_rx_mpdu_res_start {
__le16 byte_count;
- __le16 reserved;
-} __packed;
+ __le16 assist;
+} __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */
/**
* enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags
* @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP:
* @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT:
* @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame
+ * @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw
+ * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors
* @RX_MPDU_RES_STATUS_HASH_INDEX_MSK:
* @RX_MPDU_RES_STATUS_STA_ID_MSK:
* @RX_MPDU_RES_STATUS_RRF_KILL:
RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP = BIT(13),
RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT = BIT(14),
RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME = BIT(15),
+ RX_MPDU_RES_STATUS_CSUM_DONE = BIT(16),
+ RX_MPDU_RES_STATUS_CSUM_OK = BIT(17),
RX_MPDU_RES_STATUS_HASH_INDEX_MSK = (0x3F0000),
RX_MPDU_RES_STATUS_STA_ID_MSK = (0x1f000000),
RX_MPDU_RES_STATUS_RRF_KILL = BIT(29),
sizeof(tx_ant_cmd), &tx_ant_cmd);
}
+static void iwl_free_fw_paging(struct iwl_mvm *mvm)
+{
+ int i;
+
+ if (!mvm->fw_paging_db[0].fw_paging_block)
+ return;
+
+ for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
+ if (!mvm->fw_paging_db[i].fw_paging_block) {
+ IWL_DEBUG_FW(mvm,
+ "Paging: block %d already freed, continue to next page\n",
+ i);
+
+ continue;
+ }
+
+ __free_pages(mvm->fw_paging_db[i].fw_paging_block,
+ get_order(mvm->fw_paging_db[i].fw_paging_size));
+ }
+ kfree(mvm->trans->paging_download_buf);
+ memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
+}
+
+static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
+{
+ int sec_idx, idx;
+ u32 offset = 0;
+
+ /*
+ * find where is the paging image start point:
+ * if CPU2 exist and it's in paging format, then the image looks like:
+ * CPU1 sections (2 or more)
+ * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
+ * CPU2 sections (not paged)
+ * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
+ * non paged to CPU2 paging sec
+ * CPU2 paging CSS
+ * CPU2 paging image (including instruction and data)
+ */
+ for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) {
+ if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
+ sec_idx++;
+ break;
+ }
+ }
+
+ if (sec_idx >= IWL_UCODE_SECTION_MAX) {
+ IWL_ERR(mvm, "driver didn't find paging image\n");
+ iwl_free_fw_paging(mvm);
+ return -EINVAL;
+ }
+
+ /* copy the CSS block to the dram */
+ IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n",
+ sec_idx);
+
+ memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
+ image->sec[sec_idx].data,
+ mvm->fw_paging_db[0].fw_paging_size);
+
+ IWL_DEBUG_FW(mvm,
+ "Paging: copied %d CSS bytes to first block\n",
+ mvm->fw_paging_db[0].fw_paging_size);
+
+ sec_idx++;
+
+ /*
+ * copy the paging blocks to the dram
+ * loop index start from 1 since that CSS block already copied to dram
+ * and CSS index is 0.
+ * loop stop at num_of_paging_blk since that last block is not full.
+ */
+ for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
+ memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+ image->sec[sec_idx].data + offset,
+ mvm->fw_paging_db[idx].fw_paging_size);
+
+ IWL_DEBUG_FW(mvm,
+ "Paging: copied %d paging bytes to block %d\n",
+ mvm->fw_paging_db[idx].fw_paging_size,
+ idx);
+
+ offset += mvm->fw_paging_db[idx].fw_paging_size;
+ }
+
+ /* copy the last paging block */
+ if (mvm->num_of_pages_in_last_blk > 0) {
+ memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+ image->sec[sec_idx].data + offset,
+ FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
+
+ IWL_DEBUG_FW(mvm,
+ "Paging: copied %d pages in the last block %d\n",
+ mvm->num_of_pages_in_last_blk, idx);
+ }
+
+ return 0;
+}
+
+static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
+ const struct fw_img *image)
+{
+ struct page *block;
+ dma_addr_t phys = 0;
+ int blk_idx = 0;
+ int order, num_of_pages;
+ int dma_enabled;
+
+ if (mvm->fw_paging_db[0].fw_paging_block)
+ return 0;
+
+ dma_enabled = is_device_dma_capable(mvm->trans->dev);
+
+ /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
+ BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
+
+ num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
+ mvm->num_of_paging_blk = ((num_of_pages - 1) /
+ NUM_OF_PAGE_PER_GROUP) + 1;
+
+ mvm->num_of_pages_in_last_blk =
+ num_of_pages -
+ NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
+
+ IWL_DEBUG_FW(mvm,
+ "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
+ mvm->num_of_paging_blk,
+ mvm->num_of_pages_in_last_blk);
+
+ /* allocate block of 4Kbytes for paging CSS */
+ order = get_order(FW_PAGING_SIZE);
+ block = alloc_pages(GFP_KERNEL, order);
+ if (!block) {
+ /* free all the previous pages since we failed */
+ iwl_free_fw_paging(mvm);
+ return -ENOMEM;
+ }
+
+ mvm->fw_paging_db[blk_idx].fw_paging_block = block;
+ mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE;
+
+ if (dma_enabled) {
+ phys = dma_map_page(mvm->trans->dev, block, 0,
+ PAGE_SIZE << order, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(mvm->trans->dev, phys)) {
+ /*
+ * free the previous pages and the current one since
+ * we failed to map_page.
+ */
+ iwl_free_fw_paging(mvm);
+ return -ENOMEM;
+ }
+ mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
+ } else {
+ mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG |
+ blk_idx << BLOCK_2_EXP_SIZE;
+ }
+
+ IWL_DEBUG_FW(mvm,
+ "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
+ order);
+
+ /*
+ * allocate blocks in dram.
+ * since that CSS allocated in fw_paging_db[0] loop start from index 1
+ */
+ for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
+ /* allocate block of PAGING_BLOCK_SIZE (32K) */
+ order = get_order(PAGING_BLOCK_SIZE);
+ block = alloc_pages(GFP_KERNEL, order);
+ if (!block) {
+ /* free all the previous pages since we failed */
+ iwl_free_fw_paging(mvm);
+ return -ENOMEM;
+ }
+
+ mvm->fw_paging_db[blk_idx].fw_paging_block = block;
+ mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE;
+
+ if (dma_enabled) {
+ phys = dma_map_page(mvm->trans->dev, block, 0,
+ PAGE_SIZE << order,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(mvm->trans->dev, phys)) {
+ /*
+ * free the previous pages and the current one
+ * since we failed to map_page.
+ */
+ iwl_free_fw_paging(mvm);
+ return -ENOMEM;
+ }
+ mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
+ } else {
+ mvm->fw_paging_db[blk_idx].fw_paging_phys =
+ PAGING_ADDR_SIG |
+ blk_idx << BLOCK_2_EXP_SIZE;
+ }
+
+ IWL_DEBUG_FW(mvm,
+ "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
+ order);
+ }
+
+ return 0;
+}
+
+static int iwl_save_fw_paging(struct iwl_mvm *mvm,
+ const struct fw_img *fw)
+{
+ int ret;
+
+ ret = iwl_alloc_fw_paging_mem(mvm, fw);
+ if (ret)
+ return ret;
+
+ return iwl_fill_paging_mem(mvm, fw);
+}
+
+/* send paging cmd to FW in case CPU2 has paging image */
+static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
+{
+ int blk_idx;
+ __le32 dev_phy_addr;
+ struct iwl_fw_paging_cmd fw_paging_cmd = {
+ .flags =
+ cpu_to_le32(PAGING_CMD_IS_SECURED |
+ PAGING_CMD_IS_ENABLED |
+ (mvm->num_of_pages_in_last_blk <<
+ PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
+ .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
+ .block_num = cpu_to_le32(mvm->num_of_paging_blk),
+ };
+
+ /* loop for for all paging blocks + CSS block */
+ for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
+ dev_phy_addr =
+ cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >>
+ PAGE_2_EXP_SIZE);
+ fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
+ }
+
+ return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
+ IWL_ALWAYS_LONG_GROUP, 0),
+ 0, sizeof(fw_paging_cmd), &fw_paging_cmd);
+}
+
+/*
+ * Send paging item cmd to FW in case CPU2 has paging image
+ */
+static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
+{
+ int ret;
+ struct iwl_fw_get_item_cmd fw_get_item_cmd = {
+ .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
+ };
+
+ struct iwl_fw_get_item_resp *item_resp;
+ struct iwl_host_cmd cmd = {
+ .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+ .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+ .data = { &fw_get_item_cmd, },
+ };
+
+ cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret) {
+ IWL_ERR(mvm,
+ "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
+ ret);
+ return ret;
+ }
+
+ item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
+ if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
+ IWL_ERR(mvm,
+ "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
+ le32_to_cpu(item_resp->item_id));
+ ret = -EIO;
+ goto exit;
+ }
+
+ mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE,
+ GFP_KERNEL);
+ if (!mvm->trans->paging_download_buf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
+ mvm->trans->paging_db = mvm->fw_paging_db;
+ IWL_DEBUG_FW(mvm,
+ "Paging: got paging request address (paging_req_addr 0x%08x)\n",
+ mvm->trans->paging_req_addr);
+
+exit:
+ iwl_free_resp(&cmd);
+
+ return ret;
+}
+
static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
const struct fw_img *fw;
int ret, i;
enum iwl_ucode_type old_type = mvm->cur_ucode;
- static const u8 alive_cmd[] = { MVM_ALIVE };
+ static const u16 alive_cmd[] = { MVM_ALIVE };
struct iwl_sf_region st_fwrd_space;
if (ucode_type == IWL_UCODE_REGULAR &&
ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
MVM_UCODE_ALIVE_TIMEOUT);
if (ret) {
+ if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ IWL_ERR(mvm,
+ "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
+ iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
+ iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
mvm->cur_ucode = old_type;
return ret;
}
iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
+ /*
+ * configure and operate fw paging mechanism.
+ * driver configures the paging flow only once, CPU2 paging image
+ * included in the IWL_UCODE_INIT image.
+ */
+ if (fw->paging_mem_size) {
+ /*
+ * When dma is not enabled, the driver needs to copy / write
+ * the downloaded / uploaded page to / from the smem.
+ * This gets the location of the place were the pages are
+ * stored.
+ */
+ if (!is_device_dma_capable(mvm->trans->dev)) {
+ ret = iwl_trans_get_paging_item(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "failed to get FW paging item\n");
+ return ret;
+ }
+ }
+
+ ret = iwl_save_fw_paging(mvm, fw);
+ if (ret) {
+ IWL_ERR(mvm, "failed to save the FW paging image\n");
+ return ret;
+ }
+
+ ret = iwl_send_paging_cmd(mvm, fw);
+ if (ret) {
+ IWL_ERR(mvm, "failed to send the paging cmd\n");
+ iwl_free_fw_paging(mvm);
+ return ret;
+ }
+ }
+
/*
* Note: all the queues are enabled as part of the interface
* initialization, but in firmware restart scenarios they
int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
{
struct iwl_notification_wait calib_wait;
- static const u8 init_complete[] = {
+ static const u16 init_complete[] = {
INIT_COMPLETE_NOTIF,
CALIB_RES_NOTIF_PHY_DB
};
return;
pkt = cmd.resp_pkt;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(mvm, "Bad return from SHARED_MEM_CFG (0x%08X)\n",
- pkt->hdr.flags);
- goto exit;
- }
-
mem_cfg = (void *)pkt->data;
mvm->shared_mem_cfg.shared_mem_addr =
le32_to_cpu(mem_cfg->page_buff_size);
IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
-exit:
iwl_free_resp(&cmd);
}
int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
struct iwl_mvm_dump_desc *desc,
- unsigned int delay)
+ struct iwl_fw_dbg_trigger_tlv *trigger)
{
+ unsigned int delay = 0;
+
+ if (trigger)
+ delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
+
if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status))
return -EBUSY;
le32_to_cpu(desc->trig_desc.type));
mvm->fw_dump_desc = desc;
+ mvm->fw_dump_trig = trigger;
queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay);
}
int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
- const char *str, size_t len, unsigned int delay)
+ const char *str, size_t len,
+ struct iwl_fw_dbg_trigger_tlv *trigger)
{
struct iwl_mvm_dump_desc *desc;
desc->trig_desc.type = cpu_to_le32(trig);
memcpy(desc->trig_desc.data, str, len);
- return iwl_mvm_fw_dbg_collect_desc(mvm, desc, delay);
+ return iwl_mvm_fw_dbg_collect_desc(mvm, desc, trigger);
}
int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trigger,
const char *fmt, ...)
{
- unsigned int delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
u16 occurrences = le16_to_cpu(trigger->occurrences);
int ret, len = 0;
char buf[64];
len = strlen(buf) + 1;
}
- ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf,
- len, delay);
+ ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, len,
+ trigger);
+
if (ret)
return ret;
goto error;
}
- if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10)
- iwl_mvm_get_shared_mem_conf(mvm);
+ iwl_mvm_get_shared_mem_conf(mvm);
ret = iwl_mvm_sf_update(mvm, NULL, false);
if (ret)
goto error;
}
+ if (iwl_mvm_is_csum_supported(mvm) &&
+ mvm->cfg->features & NETIF_F_RXCSUM)
+ iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3);
+
/* allow FW/transport low power modes if not during restart */
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
return ret;
}
-int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
(flags & SW_CARD_DISABLED) ? "Kill" : "On",
(flags & CT_KILL_CARD_DISABLED) ?
"Reached" : "Not reached");
-
- return 0;
}
-int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
le32_to_cpu(mfuart_notif->external_ver),
le32_to_cpu(mfuart_notif->status),
le32_to_cpu(mfuart_notif->duration));
- return 0;
}
}
}
-int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
}
}
-
- return 0;
}
static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL);
}
-int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_beacon_loss_iterator,
mb);
- return 0;
}
IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+ ieee80211_hw_set(hw, TDLS_WIDER_BW);
}
if (fw_has_capa(&mvm->fw->ucode_capa,
hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
}
+ hw->netdev_features |= mvm->cfg->features;
+ if (!iwl_mvm_is_csum_supported(mvm))
+ hw->netdev_features &= ~NETIF_F_RXCSUM;
+
ret = ieee80211_register_hw(mvm->hw);
if (ret)
iwl_mvm_leds_exit(mvm);
u32 file_len, fifo_data_len = 0;
u32 smem_len = mvm->cfg->smem_len;
u32 sram2_len = mvm->cfg->dccm2_len;
+ bool monitor_dump_only = false;
lockdep_assert_held(&mvm->mutex);
+ if (mvm->fw_dump_trig &&
+ mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
+ monitor_dump_only = true;
+
fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
if (!fw_error_dump)
return;
fifo_data_len +
sizeof(*dump_info);
+ /* Make room for the SMEM, if it exists */
+ if (smem_len)
+ file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
+
+ /* Make room for the secondary SRAM, if it exists */
+ if (sram2_len)
+ file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
+
+ /* If we only want a monitor dump, reset the file length */
+ if (monitor_dump_only) {
+ file_len = sizeof(*dump_file) + sizeof(*dump_data) +
+ sizeof(*dump_info);
+ }
+
/*
* In 8000 HW family B-step include the ICCM (which resides separately)
*/
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
mvm->fw_dump_desc->len;
- /* Make room for the SMEM, if it exists */
- if (smem_len)
- file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
-
- /* Make room for the secondary SRAM, if it exists */
- if (sram2_len)
- file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
-
dump_file = vzalloc(file_len);
if (!dump_file) {
kfree(fw_error_dump);
dump_data = iwl_fw_error_next_data(dump_data);
}
+ /* In case we only want monitor dump, skip to dump trasport data */
+ if (monitor_dump_only)
+ goto dump_trans_data;
+
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
dump_mem->data, IWL8260_ICCM_LEN);
}
- fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans);
+dump_trans_data:
+ fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,
+ mvm->fw_dump_trig);
fw_error_dump->op_mode_len = file_len;
if (fw_error_dump->trans_ptr)
file_len += fw_error_dump->trans_ptr->len;
dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
+ mvm->fw_dump_trig = NULL;
clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
}
static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
{
- bool exit_now;
-
if (!iwl_mvm_is_d0i3_supported(mvm))
return;
- mutex_lock(&mvm->d0i3_suspend_mutex);
- __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
- exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
- &mvm->d0i3_suspend_flags);
- mutex_unlock(&mvm->d0i3_suspend_mutex);
-
- if (exit_now) {
- IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
- _iwl_mvm_exit_d0i3(mvm);
- }
-
if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND)
if (!wait_event_timeout(mvm->d0i3_exit_waitq,
!test_bit(IWL_MVM_STATUS_IN_D0I3,
goto out_unlock;
}
+ mvmvif->features |= hw->netdev_features;
+
ret = iwl_mvm_mac_ctxt_add(mvm, vif);
if (ret)
goto out_release;
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
- /* fall-through */
- case WLAN_CIPHER_SUITE_CCMP:
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+ break;
case WLAN_CIPHER_SUITE_AES_CMAC:
WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
break;
int res, time_reg = DEVICE_SYSTEM_TIME_REG;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
- static const u8 time_event_response[] = { HOT_SPOT_CMD };
+ static const u16 time_event_response[] = { HOT_SPOT_CMD };
struct iwl_notification_wait wait_time_event;
struct iwl_hs20_roc_req aux_roc_req = {
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
#include "sta.h"
#include "fw-api.h"
#include "constants.h"
+#include "tof.h"
#define IWL_INVALID_MAC80211_QUEUE 0xff
#define IWL_MVM_MAX_ADDRESSES 5
* be up'ed after the INIT fw asserted. This is useful to be able to use
* proprietary tools over testmode to debug the INIT fw.
* @tfd_q_hang_detect: enabled the detection of hung transmit queues
- * @power_scheme: CAM(Continuous Active Mode)-1, BPS(Balanced Power
- * Save)-2(default), LP(Low Power)-3
+ * @power_scheme: one of enum iwl_power_scheme
*/
struct iwl_mvm_mod_params {
bool init_dbg;
* # of received beacons accumulated over FW restart, and the current
* average signal of beacons retrieved from the firmware
* @csa_failed: CSA failed to schedule time event, report an error later
+ * @features: hw features active for this vif
*/
struct iwl_mvm_vif {
struct iwl_mvm *mvm;
/* Indicates that CSA countdown may be started */
bool csa_countdown;
bool csa_failed;
+
+ /* TCP Checksum Offload */
+ netdev_features_t features;
};
static inline struct iwl_mvm_vif *
/* NVM sections */
struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
+ /* Paging section */
+ struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS];
+ u16 num_of_paging_blk;
+ u16 num_of_pages_in_last_blk;
+
/* EEPROM MAC addresses */
struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
* can hold 16 keys at most. Reflect this fact.
*/
unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
+ u8 fw_key_deleted[STA_KEY_MAX_NUM];
/* references taken by the driver and spinlock protecting them */
spinlock_t refs_lock;
u8 fw_dbg_conf;
struct delayed_work fw_dump_wk;
struct iwl_mvm_dump_desc *fw_dump_desc;
+ struct iwl_fw_dbg_trigger_tlv *fw_dump_trig;
#ifdef CONFIG_IWLWIFI_LEDS
struct led_classdev led;
struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
u32 ciphers[6];
+ struct iwl_mvm_tof_data tof_data;
};
/* Extract MVM priv from op_mode and _hw */
IWL_MVM_BT_COEX_RRC;
}
+static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CSUM_SUPPORT);
+}
+
extern const u8 iwl_mvm_ac_to_tx_fifo[];
struct iwl_rate_info {
/* Tx / Host Commands */
int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
struct iwl_host_cmd *cmd);
-int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id,
+int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
u32 flags, u16 len, const void *data);
int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
struct iwl_host_cmd *cmd,
u32 *status);
-int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id,
+int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
u16 len, const void *data,
u32 *status);
int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info, u8 sta_id);
-void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
- struct ieee80211_tx_info *info,
- struct iwl_tx_cmd *tx_cmd,
- struct sk_buff *skb_frag);
void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta, __le16 fc);
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync);
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
+static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd *tx_cmd)
+{
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+ tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+ memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
+}
+
static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
{
flush_work(&mvm->async_handlers_wk);
/* Statistics */
void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt);
-int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear);
void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
* FW notifications / CMD responses handlers
* Convention: iwl_mvm_rx_<NAME OF THE CMD>
*/
-int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
/* MVM PHY */
int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif);
int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
-int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
/* Scheduled scan */
-int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_scan_ies *ies,
int type);
-int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
/* UMAC scan */
int iwl_mvm_config_scan(struct iwl_mvm *mvm);
-int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
/* MVM debugfs */
#ifdef CONFIG_IWLWIFI_DEBUGFS
char *buf, int bufsz);
void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
#ifdef CONFIG_IWLWIFI_LEDS
int iwl_mvm_leds_init(struct iwl_mvm *mvm);
/* BT Coex */
int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
-int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event_data);
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
-int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event_data);
u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
enum ieee80211_band band);
-int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
/* beacon filtering */
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
-int iwl_mvm_temp_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
enum iwl_mcc_source src_id);
int iwl_mvm_init_mcc(struct iwl_mvm *mvm);
-int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
const char *alpha2,
enum iwl_mcc_source src_id,
void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id);
int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
- const char *str, size_t len, unsigned int delay);
+ const char *str, size_t len,
+ struct iwl_fw_dbg_trigger_tlv *trigger);
int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
struct iwl_mvm_dump_desc *desc,
- unsigned int delay);
+ struct iwl_fw_dbg_trigger_tlv *trigger);
void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm);
int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trigger,
return ret;
pkt = cmd.resp_pkt;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(mvm, "Bad return from NVM_ACCES_COMMAND (0x%08X)\n",
- pkt->hdr.flags);
- ret = -EIO;
- goto exit;
- }
/* Extract NVM response */
nvm_resp = (void *)pkt->data;
return ERR_PTR(ret);
pkt = cmd.resp_pkt;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(mvm, "Bad return from MCC_UPDATE_COMMAND (0x%08X)\n",
- pkt->hdr.flags);
- ret = -EIO;
- goto exit;
- }
/* Extract MCC response */
mcc_resp = (void *)pkt->data;
return retval;
}
-int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mcc_chub_notif *notif = (void *)pkt->data;
lockdep_assert_held(&mvm->mutex);
if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
- return 0;
+ return;
mcc[0] = notif->mcc >> 8;
mcc[1] = notif->mcc & 0xff;
mcc, src);
regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL);
if (IS_ERR_OR_NULL(regd))
- return 0;
+ return;
regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
kfree(regd);
-
- return 0;
}
}
struct iwl_rx_handlers {
- u8 cmd_id;
+ u16 cmd_id;
bool async;
- int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+ void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
};
#define RX_HANDLER(_cmd_id, _fn, _async) \
{ .cmd_id = _cmd_id , .fn = _fn , .async = _async }
+#define RX_HANDLER_GRP(_grp, _cmd, _fn, _async) \
+ { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .async = _async }
/*
* Handlers for fw notifications
* called from a worker with mvm->mutex held.
*/
static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
- RX_HANDLER(REPLY_RX_MPDU_CMD, iwl_mvm_rx_rx_mpdu, false),
RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false),
RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
true),
RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false),
+ RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true),
};
#undef RX_HANDLER
+#undef RX_HANDLER_GRP
#define CMD(x) [x] = #x
static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(PHY_CONFIGURATION_CMD),
CMD(CALIB_RES_NOTIF_PHY_DB),
CMD(SET_CALIB_DEFAULT_CMD),
+ CMD(FW_PAGING_BLOCK_CMD),
CMD(ADD_STA_KEY),
CMD(ADD_STA),
+ CMD(FW_GET_ITEM_CMD),
CMD(REMOVE_STA),
CMD(LQ_CMD),
CMD(SCAN_OFFLOAD_CONFIG_CMD),
trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
+ trans_cfg.wide_cmd_header = fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_WIDE_CMD_HDR);
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE)
trans_cfg.bc_table_dword = true;
/* rpm starts with a taken ref. only set the appropriate bit here. */
mvm->refs[IWL_MVM_REF_UCODE_DOWN] = 1;
+ iwl_mvm_tof_init(mvm);
+
return op_mode;
out_unregister:
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
+ iwl_mvm_tof_clean(mvm);
+
ieee80211_free_hw(mvm->hw);
}
struct iwl_async_handler_entry {
struct list_head list;
struct iwl_rx_cmd_buffer rxb;
- int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+ void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
};
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
spin_unlock_bh(&mvm->async_handlers_lock);
list_for_each_entry_safe(entry, tmp, &local_list, list) {
- if (entry->fn(mvm, &entry->rxb, NULL))
- IWL_WARN(mvm,
- "returned value from ASYNC handlers are ignored\n");
+ entry->fn(mvm, &entry->rxb);
iwl_free_rxb(&entry->rxb);
list_del(&entry->list);
kfree(entry);
if (!cmds_trig->cmds[i].cmd_id)
break;
- if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd)
+ if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
+ cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
continue;
iwl_mvm_fw_dbg_collect_trig(mvm, trig,
- "CMD 0x%02x received",
- pkt->hdr.cmd);
+ "CMD 0x%02x.%02x received",
+ pkt->hdr.group_id, pkt->hdr.cmd);
break;
}
}
-static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
u8 i;
+ if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) {
+ iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
+ return;
+ }
+
iwl_mvm_rx_check_trigger(mvm, pkt);
/*
const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
struct iwl_async_handler_entry *entry;
- if (rx_h->cmd_id != pkt->hdr.cmd)
+ if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
continue;
- if (!rx_h->async)
- return rx_h->fn(mvm, rxb, cmd);
+ if (!rx_h->async) {
+ rx_h->fn(mvm, rxb);
+ return;
+ }
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
/* we can't do much... */
if (!entry)
- return 0;
+ return;
entry->rxb._page = rxb_steal_page(rxb);
entry->rxb._offset = rxb->_offset;
schedule_work(&mvm->async_handlers_wk);
break;
}
-
- return 0;
}
static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
* can't recover this since we're already half suspended.
*/
if (!mvm->restart_fw && fw_error) {
- iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, 0);
+ iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert,
+ NULL);
} else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART,
&mvm->status)) {
struct iwl_mvm_reprobe *reprobe;
IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
- /* make sure we have no running tx while configuring the qos */
set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
- synchronize_net();
/*
* iwl_mvm_ref_sync takes a reference before checking the flag.
mvm->d0i3_offloading = false;
}
+ /* make sure we have no running tx while configuring the seqno */
+ synchronize_net();
+
iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, &d0i3_iter_data);
ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
sizeof(wowlan_config_cmd),
iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
}
-static void iwl_mvm_d0i3_disconnect_iter(void *data, u8 *mac,
- struct ieee80211_vif *vif)
+struct iwl_mvm_wakeup_reason_iter_data {
+ struct iwl_mvm *mvm;
+ u32 wakeup_reasons;
+};
+
+static void iwl_mvm_d0i3_wakeup_reason_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
{
- struct iwl_mvm *mvm = data;
+ struct iwl_mvm_wakeup_reason_iter_data *data = _data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc &&
- mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
- iwl_mvm_connection_loss(mvm, vif, "D0i3");
+ data->mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) {
+ if (data->wakeup_reasons &
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)
+ iwl_mvm_connection_loss(data->mvm, vif, "D0i3");
+ else
+ ieee80211_beacon_loss(vif);
+ }
}
void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
};
struct iwl_wowlan_status *status;
int ret;
- u32 disconnection_reasons, wakeup_reasons;
+ u32 handled_reasons, wakeup_reasons;
__le16 *qos_seq = NULL;
mutex_lock(&mvm->mutex);
IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
- disconnection_reasons =
- IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
- IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
- if (wakeup_reasons & disconnection_reasons)
+ handled_reasons = IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
+ if (wakeup_reasons & handled_reasons) {
+ struct iwl_mvm_wakeup_reason_iter_data data = {
+ .mvm = mvm,
+ .wakeup_reasons = wakeup_reasons,
+ };
+
ieee80211_iterate_active_interfaces(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_d0i3_disconnect_iter, mvm);
+ iwl_mvm_d0i3_wakeup_reason_iter, &data);
+ }
out:
iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
return _iwl_mvm_exit_d0i3(mvm);
}
-static void iwl_mvm_napi_add(struct iwl_op_mode *op_mode,
- struct napi_struct *napi,
- struct net_device *napi_dev,
- int (*poll)(struct napi_struct *, int),
- int weight)
-{
- struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-
- ieee80211_napi_add(mvm->hw, napi, napi_dev, poll, weight);
-}
-
static const struct iwl_op_mode_ops iwl_mvm_ops = {
.start = iwl_op_mode_mvm_start,
.stop = iwl_op_mode_mvm_stop,
.nic_config = iwl_mvm_nic_config,
.enter_d0i3 = iwl_mvm_enter_d0i3,
.exit_d0i3 = iwl_mvm_exit_d0i3,
- .napi_add = iwl_mvm_napi_add,
};
static
void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_beacon_filter_cmd *cmd)
+ struct iwl_beacon_filter_cmd *cmd,
+ bool d0i3)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- if (vif->bss_conf.cqm_rssi_thold) {
+ if (vif->bss_conf.cqm_rssi_thold && !d0i3) {
cmd->bf_energy_delta =
cpu_to_le32(vif->bss_conf.cqm_rssi_hyst);
/* fw uses an absolute value for this */
return true;
}
-static int iwl_mvm_power_get_skip_over_dtim(int dtimper, int bi)
-{
- int numerator;
- int dtim_interval = dtimper * bi;
-
- if (WARN_ON(!dtim_interval))
- return 0;
-
- if (dtimper == 1) {
- if (bi > 100)
- numerator = 408;
- else
- numerator = 510;
- } else if (dtimper < 10) {
- numerator = 612;
- } else {
- return 0;
- }
- return max(1, (numerator / dtim_interval));
-}
-
static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
{
struct ieee80211_chanctx_conf *chanctx_conf;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
- if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
- !mvmvif->pm_enabled)
+ if (!vif->bss_conf.ps || !mvmvif->pm_enabled ||
+ (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p))
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
if (!radar_detect && (dtimper < 10) &&
(iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
- cmd->skip_dtim_periods =
- iwl_mvm_power_get_skip_over_dtim(dtimper, bi);
- if (cmd->skip_dtim_periods)
- cmd->flags |=
- cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+ cmd->skip_dtim_periods = 3;
}
if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
ETH_ALEN);
}
-int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data;
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id);
-
- return 0;
}
struct iwl_power_vifs {
vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
- iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd);
+ iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd, d0i3);
if (!d0i3)
iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
mvmsta = iwl_mvm_sta_from_mac80211(sta);
mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
- if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
+ if (IWL_MVM_RS_DISABLE_P2P_MIMO &&
+ iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
return false;
if (mvm->nvm_data->sku_cap_mimo_disabled)
u8 rate_idx;
};
-static const struct rs_init_rate_info rs_init_rates_24ghz[] = {
+static const struct rs_init_rate_info rs_optimal_rates_24ghz_legacy[] = {
{ -60, IWL_RATE_54M_INDEX },
{ -64, IWL_RATE_48M_INDEX },
{ -68, IWL_RATE_36M_INDEX },
{ S8_MIN, IWL_RATE_1M_INDEX },
};
-static const struct rs_init_rate_info rs_init_rates_5ghz[] = {
+static const struct rs_init_rate_info rs_optimal_rates_5ghz_legacy[] = {
{ -60, IWL_RATE_54M_INDEX },
{ -64, IWL_RATE_48M_INDEX },
{ -72, IWL_RATE_36M_INDEX },
{ S8_MIN, IWL_RATE_6M_INDEX },
};
+static const struct rs_init_rate_info rs_optimal_rates_ht[] = {
+ { -60, IWL_RATE_MCS_7_INDEX },
+ { -64, IWL_RATE_MCS_6_INDEX },
+ { -68, IWL_RATE_MCS_5_INDEX },
+ { -72, IWL_RATE_MCS_4_INDEX },
+ { -80, IWL_RATE_MCS_3_INDEX },
+ { -84, IWL_RATE_MCS_2_INDEX },
+ { -85, IWL_RATE_MCS_1_INDEX },
+ { S8_MIN, IWL_RATE_MCS_0_INDEX},
+};
+
+static const struct rs_init_rate_info rs_optimal_rates_vht_20mhz[] = {
+ { -60, IWL_RATE_MCS_8_INDEX },
+ { -64, IWL_RATE_MCS_7_INDEX },
+ { -68, IWL_RATE_MCS_6_INDEX },
+ { -72, IWL_RATE_MCS_5_INDEX },
+ { -80, IWL_RATE_MCS_4_INDEX },
+ { -84, IWL_RATE_MCS_3_INDEX },
+ { -85, IWL_RATE_MCS_2_INDEX },
+ { -87, IWL_RATE_MCS_1_INDEX },
+ { S8_MIN, IWL_RATE_MCS_0_INDEX},
+};
+
+static const struct rs_init_rate_info rs_optimal_rates_vht_40_80mhz[] = {
+ { -60, IWL_RATE_MCS_9_INDEX },
+ { -64, IWL_RATE_MCS_8_INDEX },
+ { -68, IWL_RATE_MCS_7_INDEX },
+ { -72, IWL_RATE_MCS_6_INDEX },
+ { -80, IWL_RATE_MCS_5_INDEX },
+ { -84, IWL_RATE_MCS_4_INDEX },
+ { -85, IWL_RATE_MCS_3_INDEX },
+ { -87, IWL_RATE_MCS_2_INDEX },
+ { -88, IWL_RATE_MCS_1_INDEX },
+ { S8_MIN, IWL_RATE_MCS_0_INDEX },
+};
+
+/* Init the optimal rate based on STA caps
+ * This combined with rssi is used to report the last tx rate
+ * to userspace when we haven't transmitted enough frames.
+ */
+static void rs_init_optimal_rate(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta)
+{
+ struct rs_rate *rate = &lq_sta->optimal_rate;
+
+ if (lq_sta->max_mimo2_rate_idx != IWL_RATE_INVALID)
+ rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
+ else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID)
+ rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
+ else if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ rate->type = LQ_LEGACY_A;
+ else
+ rate->type = LQ_LEGACY_G;
+
+ rate->bw = rs_bw_from_sta_bw(sta);
+ rate->sgi = rs_sgi_allow(mvm, sta, rate, NULL);
+
+ /* ANT/LDPC/STBC aren't relevant for the rate reported to userspace */
+
+ if (is_mimo(rate)) {
+ lq_sta->optimal_rate_mask = lq_sta->active_mimo2_rate;
+ } else if (is_siso(rate)) {
+ lq_sta->optimal_rate_mask = lq_sta->active_siso_rate;
+ } else {
+ lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate;
+
+ if (lq_sta->band == IEEE80211_BAND_5GHZ) {
+ lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy;
+ lq_sta->optimal_nentries =
+ ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
+ } else {
+ lq_sta->optimal_rates = rs_optimal_rates_24ghz_legacy;
+ lq_sta->optimal_nentries =
+ ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
+ }
+ }
+
+ if (is_vht(rate)) {
+ if (rate->bw == RATE_MCS_CHAN_WIDTH_20) {
+ lq_sta->optimal_rates = rs_optimal_rates_vht_20mhz;
+ lq_sta->optimal_nentries =
+ ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
+ } else {
+ lq_sta->optimal_rates = rs_optimal_rates_vht_40_80mhz;
+ lq_sta->optimal_nentries =
+ ARRAY_SIZE(rs_optimal_rates_vht_40_80mhz);
+ }
+ } else if (is_ht(rate)) {
+ lq_sta->optimal_rates = rs_optimal_rates_ht;
+ lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_ht);
+ }
+}
+
+/* Compute the optimal rate index based on RSSI */
+static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta)
+{
+ struct rs_rate *rate = &lq_sta->optimal_rate;
+ int i;
+
+ rate->index = find_first_bit(&lq_sta->optimal_rate_mask,
+ BITS_PER_LONG);
+
+ for (i = 0; i < lq_sta->optimal_nentries; i++) {
+ int rate_idx = lq_sta->optimal_rates[i].rate_idx;
+
+ if ((lq_sta->pers.last_rssi >= lq_sta->optimal_rates[i].rssi) &&
+ (BIT(rate_idx) & lq_sta->optimal_rate_mask)) {
+ rate->index = rate_idx;
+ break;
+ }
+ }
+
+ rs_dump_rate(mvm, rate, "OPTIMAL RATE");
+ return rate;
+}
+
/* Choose an initial legacy rate and antenna to use based on the RSSI
* of last Rx
*/
if (band == IEEE80211_BAND_5GHZ) {
rate->type = LQ_LEGACY_A;
- initial_rates = rs_init_rates_5ghz;
- nentries = ARRAY_SIZE(rs_init_rates_5ghz);
+ initial_rates = rs_optimal_rates_5ghz_legacy;
+ nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
} else {
rate->type = LQ_LEGACY_G;
- initial_rates = rs_init_rates_24ghz;
- nentries = ARRAY_SIZE(rs_init_rates_24ghz);
+ initial_rates = rs_optimal_rates_24ghz_legacy;
+ nentries = ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
}
if (IWL_MVM_RS_RSSI_BASED_INIT_RATE) {
struct iwl_lq_sta *lq_sta,
struct ieee80211_rx_status *rx_status)
{
+ int i;
+
lq_sta->pers.chains = rx_status->chains;
lq_sta->pers.chain_signal[0] = rx_status->chain_signal[0];
lq_sta->pers.chain_signal[1] = rx_status->chain_signal[1];
lq_sta->pers.chain_signal[2] = rx_status->chain_signal[2];
+ lq_sta->pers.last_rssi = S8_MIN;
+
+ for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) {
+ if (!(lq_sta->pers.chains & BIT(i)))
+ continue;
+
+ if (lq_sta->pers.chain_signal[i] > lq_sta->pers.last_rssi)
+ lq_sta->pers.last_rssi = lq_sta->pers.chain_signal[i];
+ }
}
/**
rate = &tbl->rate;
rs_get_initial_rate(mvm, lq_sta, band, rate);
+ rs_init_optimal_rate(mvm, sta, lq_sta);
WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B);
if (rate->ant == ANT_A)
struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_lq_sta *lq_sta = mvm_sta;
+ struct rs_rate *optimal_rate;
+ u32 last_ucode_rate;
if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) {
/* if vif isn't initialized mvm doesn't know about
iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
info->band, &info->control.rates[0]);
-
info->control.rates[0].count = 1;
+
+ /* Report the optimal rate based on rssi and STA caps if we haven't
+ * converged yet (too little traffic) or exploring other modulations
+ */
+ if (lq_sta->rs_state != RS_STATE_STAY_IN_COLUMN) {
+ optimal_rate = rs_get_optimal_rate(mvm, lq_sta);
+ last_ucode_rate = ucode_rate_from_rs_rate(mvm,
+ optimal_rate);
+ iwl_mvm_hwrate_to_tx_rate(last_ucode_rate, info->band,
+ &txrc->reported_rate);
+ }
}
static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
#endif
lq_sta->pers.chains = 0;
memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
+ lq_sta->pers.last_rssi = S8_MIN;
return &sta_priv->lq_sta;
}
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
u8 max_siso_rate_idx;
u8 max_mimo2_rate_idx;
+ /* Optimal rate based on RSSI and STA caps.
+ * Used only to reflect link speed to userspace.
+ */
+ struct rs_rate optimal_rate;
+ unsigned long optimal_rate_mask;
+ const struct rs_init_rate_info *optimal_rates;
+ int optimal_nentries;
+
u8 missed_rate_counter;
struct iwl_lq_cmd lq;
#endif
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
+ s8 last_rssi;
struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
struct iwl_mvm *drv;
} pers;
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
+#include <linux/skbuff.h>
#include "iwl-trans.h"
#include "mvm.h"
#include "fw-api.h"
* Copies the phy information in mvm->last_phy_info, it will be used when the
* actual data will come from the fw in the next packet.
*/
-int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
spin_unlock(&mvm->drv_stats_lock);
}
#endif
-
- return 0;
}
/*
* Adds the rxb to a new skb and give it to mac80211
*/
static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
+ struct napi_struct *napi,
struct sk_buff *skb,
struct ieee80211_hdr *hdr, u16 len,
u32 ampdu_status, u8 crypt_len,
fraglen, rxb->truesize);
}
- ieee80211_rx(mvm->hw, skb);
+ ieee80211_rx_napi(mvm->hw, skb, napi);
}
/*
return 0;
}
+static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ u32 status)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
+ if (mvmvif->features & NETIF_F_RXCSUM &&
+ status & RX_MPDU_RES_STATUS_CSUM_DONE &&
+ status & RX_MPDU_RES_STATUS_CSUM_OK)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
/*
* iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler
*
* Handles the actual data of the Rx packet from the fw
*/
-int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct ieee80211_hdr *hdr;
struct ieee80211_rx_status *rx_status;
skb = alloc_skb(128, GFP_ATOMIC);
if (!skb) {
IWL_ERR(mvm, "alloc_skb failed\n");
- return 0;
+ return;
}
rx_status = IEEE80211_SKB_RXCB(skb);
IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n",
rx_pkt_status);
kfree_skb(skb);
- return 0;
+ return;
}
if ((unlikely(phy_info->cfg_phy_cnt > 20))) {
IWL_DEBUG_DROP(mvm, "dsp size out of range [0,20]: %d\n",
phy_info->cfg_phy_cnt);
kfree_skb(skb);
- return 0;
+ return;
}
/*
}
}
+ if (sta && ieee80211_is_data(hdr->frame_control))
+ iwl_mvm_rx_csum(sta, skb, rx_pkt_status);
+
rcu_read_unlock();
/* set the preamble flag if appropriate */
iwl_mvm_update_frame_stats(mvm, rate_n_flags,
rx_status->flag & RX_FLAG_AMPDU_DETAILS);
#endif
- iwl_mvm_pass_packet_to_mac80211(mvm, skb, hdr, len, ampdu_status,
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status,
crypt_len, rxb);
- return 0;
}
static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,
iwl_rx_packet_payload_len(pkt));
}
-int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb));
- return 0;
}
#define IWL_DENSE_EBS_SCAN_RATIO 5
#define IWL_SPARSE_EBS_SCAN_RATIO 1
-struct iwl_mvm_scan_params {
- u32 max_out_time;
+enum iwl_mvm_scan_type {
+ IWL_SCAN_TYPE_UNASSOC,
+ IWL_SCAN_TYPE_WILD,
+ IWL_SCAN_TYPE_MILD,
+ IWL_SCAN_TYPE_FRAGMENTED,
+};
+
+enum iwl_mvm_traffic_load {
+ IWL_MVM_TRAFFIC_LOW,
+ IWL_MVM_TRAFFIC_MEDIUM,
+ IWL_MVM_TRAFFIC_HIGH,
+};
+
+struct iwl_mvm_scan_timing_params {
+ u32 dwell_active;
+ u32 dwell_passive;
+ u32 dwell_fragmented;
u32 suspend_time;
- bool passive_fragmented;
+ u32 max_out_time;
+};
+
+static struct iwl_mvm_scan_timing_params scan_timing[] = {
+ [IWL_SCAN_TYPE_UNASSOC] = {
+ .dwell_active = 10,
+ .dwell_passive = 110,
+ .dwell_fragmented = 44,
+ .suspend_time = 0,
+ .max_out_time = 0,
+ },
+ [IWL_SCAN_TYPE_WILD] = {
+ .dwell_active = 10,
+ .dwell_passive = 110,
+ .dwell_fragmented = 44,
+ .suspend_time = 30,
+ .max_out_time = 120,
+ },
+ [IWL_SCAN_TYPE_MILD] = {
+ .dwell_active = 10,
+ .dwell_passive = 110,
+ .dwell_fragmented = 44,
+ .suspend_time = 120,
+ .max_out_time = 120,
+ },
+ [IWL_SCAN_TYPE_FRAGMENTED] = {
+ .dwell_active = 10,
+ .dwell_passive = 110,
+ .dwell_fragmented = 44,
+ .suspend_time = 95,
+ .max_out_time = 44,
+ },
+};
+
+struct iwl_mvm_scan_params {
+ enum iwl_mvm_scan_type type;
u32 n_channels;
u16 delay;
int n_ssids;
int n_match_sets;
struct iwl_scan_probe_req preq;
struct cfg80211_match_set *match_sets;
- struct _dwell {
- u16 passive;
- u16 active;
- u16 fragmented;
- } dwell[IEEE80211_NUM_BANDS];
- struct {
- u8 iterations;
- u8 full_scan_mul; /* not used for UMAC */
- } schedule[2];
+ u8 iterations[2];
};
static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
}
-/*
- * If req->n_ssids > 0, it means we should do an active scan.
- * In case of active scan w/o directed scan, we receive a zero-length SSID
- * just to notify that this scan is active and not passive.
- * In order to notify the FW of the number of SSIDs we wish to scan (including
- * the zero-length one), we need to set the corresponding bits in chan->type,
- * one for each SSID, and set the active bit (first). If the first SSID is
- * already included in the probe template, so we need to set only
- * req->n_ssids - 1 bits in addition to the first bit.
- */
-static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
- enum ieee80211_band band, int n_ssids)
-{
- if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
- return 10;
- if (band == IEEE80211_BAND_2GHZ)
- return 20 + 3 * (n_ssids + 1);
- return 10 + 2 * (n_ssids + 1);
-}
-
-static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
- enum ieee80211_band band)
-{
- if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
- return 110;
- return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
-}
-
static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
*global_cnt += 1;
}
-static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct iwl_mvm_scan_params *params)
+static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
+{
+ return IWL_MVM_TRAFFIC_LOW;
+}
+
+static enum
+iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params)
{
int global_cnt = 0;
- enum ieee80211_band band;
- u8 frag_passive_dwell = 0;
+ enum iwl_mvm_traffic_load load;
+ bool low_latency;
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_scan_condition_iterator,
&global_cnt);
if (!global_cnt)
- goto not_bound;
-
- params->suspend_time = 30;
- params->max_out_time = 120;
-
- if (iwl_mvm_low_latency(mvm)) {
- if (fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
-
- params->suspend_time = 105;
- /*
- * If there is more than one active interface make
- * passive scan more fragmented.
- */
- frag_passive_dwell = 40;
- params->max_out_time = frag_passive_dwell;
- } else {
- params->suspend_time = 120;
- params->max_out_time = 120;
- }
- }
+ return IWL_SCAN_TYPE_UNASSOC;
- if (frag_passive_dwell &&
- fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
- /*
- * P2P device scan should not be fragmented to avoid negative
- * impact on P2P device discovery. Configure max_out_time to be
- * equal to dwell time on passive channel. Take a longest
- * possible value, one that corresponds to 2GHz band
- */
- if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
- u32 passive_dwell =
- iwl_mvm_get_passive_dwell(mvm,
- IEEE80211_BAND_2GHZ);
- params->max_out_time = passive_dwell;
- } else {
- params->passive_fragmented = true;
- }
- }
-
- if ((params->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
- (params->max_out_time > 200))
- params->max_out_time = 200;
-
-not_bound:
+ load = iwl_mvm_get_traffic_load(mvm);
+ low_latency = iwl_mvm_low_latency(mvm);
- for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
- if (params->passive_fragmented)
- params->dwell[band].fragmented = frag_passive_dwell;
+ if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) &&
+ vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+ fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN))
+ return IWL_SCAN_TYPE_FRAGMENTED;
- params->dwell[band].passive = iwl_mvm_get_passive_dwell(mvm,
- band);
- params->dwell[band].active =
- iwl_mvm_get_active_dwell(mvm, band, params->n_ssids);
- }
+ if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
+ return IWL_SCAN_TYPE_MILD;
- IWL_DEBUG_SCAN(mvm,
- "scan parameters: max_out_time %d, suspend_time %d, passive_fragmented %d\n",
- params->max_out_time, params->suspend_time,
- params->passive_fragmented);
- IWL_DEBUG_SCAN(mvm,
- "dwell[IEEE80211_BAND_2GHZ]: passive %d, active %d, fragmented %d\n",
- params->dwell[IEEE80211_BAND_2GHZ].passive,
- params->dwell[IEEE80211_BAND_2GHZ].active,
- params->dwell[IEEE80211_BAND_2GHZ].fragmented);
- IWL_DEBUG_SCAN(mvm,
- "dwell[IEEE80211_BAND_5GHZ]: passive %d, active %d, fragmented %d\n",
- params->dwell[IEEE80211_BAND_5GHZ].passive,
- params->dwell[IEEE80211_BAND_5GHZ].active,
- params->dwell[IEEE80211_BAND_5GHZ].fragmented);
+ return IWL_SCAN_TYPE_WILD;
}
static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
return buf;
}
-int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
iwl_mvm_dump_channel_list(notif->results,
notif->scanned_channels, buf,
sizeof(buf)));
- return 0;
}
-int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
ieee80211_sched_scan_results(mvm->hw);
-
- return 0;
}
static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
}
}
-int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) {
WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR);
- IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
+ IWL_DEBUG_SCAN(mvm,
+ "Scheduled scan %s, EBS status %s, Last line %d, Last iteration %d, Time after last iteration %d\n",
aborted ? "aborted" : "completed",
- iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+ iwl_mvm_ebs_status_str(scan_notif->ebs_status),
+ scan_notif->last_schedule_line,
+ scan_notif->last_schedule_iteration,
+ __le32_to_cpu(scan_notif->time_after_last_iter));
mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
} else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
} else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
- IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s (FW)\n",
+ IWL_DEBUG_SCAN(mvm,
+ "Scheduled scan %s, EBS status %s, Last line %d, Last iteration %d, Time after last iteration %d (FW)\n",
aborted ? "aborted" : "completed",
- iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+ iwl_mvm_ebs_status_str(scan_notif->ebs_status),
+ scan_notif->last_schedule_line,
+ scan_notif->last_schedule_iteration,
+ __le32_to_cpu(scan_notif->time_after_last_iter));
mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
ieee80211_sched_scan_stopped(mvm->hw);
mvm->last_ebs_successful =
scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
-
- return 0;
}
static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
struct iwl_scan_req_lmac *cmd,
struct iwl_mvm_scan_params *params)
{
- cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
- cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
- if (params->passive_fragmented)
- cmd->fragmented_dwell =
- params->dwell[IEEE80211_BAND_2GHZ].fragmented;
- cmd->max_out_time = cpu_to_le32(params->max_out_time);
- cmd->suspend_time = cpu_to_le32(params->suspend_time);
+ cmd->active_dwell = scan_timing[params->type].dwell_active;
+ cmd->passive_dwell = scan_timing[params->type].dwell_passive;
+ cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
+ cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
+ cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
}
static int iwl_mvm_scan_total_iterations(struct iwl_mvm_scan_params *params)
{
- return params->schedule[0].iterations + params->schedule[1].iterations;
+ return params->iterations[0] + params->iterations[1];
}
static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
- if (params->passive_fragmented)
+ if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
if (iwl_mvm_rrm_scan_needed(mvm))
ssid_bitmap <<= 1;
cmd->schedule[0].delay = cpu_to_le16(params->interval);
- cmd->schedule[0].iterations = params->schedule[0].iterations;
- cmd->schedule[0].full_scan_mul = params->schedule[0].full_scan_mul;
+ cmd->schedule[0].iterations = params->iterations[0];
+ cmd->schedule[0].full_scan_mul = 1;
cmd->schedule[1].delay = cpu_to_le16(params->interval);
- cmd->schedule[1].iterations = params->schedule[1].iterations;
- cmd->schedule[1].full_scan_mul = params->schedule[1].iterations;
+ cmd->schedule[1].iterations = params->iterations[1];
+ cmd->schedule[1].full_scan_mul = 1;
if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) {
cmd->channel_opt[0].flags =
int num_channels =
mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
- int ret, i, j = 0, cmd_size, data_size;
+ int ret, i, j = 0, cmd_size;
struct iwl_host_cmd cmd = {
- .id = SCAN_CFG_CMD,
+ .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
};
if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
if (!scan_config)
return -ENOMEM;
- data_size = cmd_size - sizeof(struct iwl_mvm_umac_cmd_hdr);
- scan_config->hdr.size = cpu_to_le16(data_size);
scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE |
SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
SCAN_CONFIG_FLAG_SET_TX_CHAINS |
struct iwl_scan_req_umac *cmd,
struct iwl_mvm_scan_params *params)
{
- cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
- cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
- if (params->passive_fragmented)
- cmd->fragmented_dwell =
- params->dwell[IEEE80211_BAND_2GHZ].fragmented;
- cmd->max_out_time = cpu_to_le32(params->max_out_time);
- cmd->suspend_time = cpu_to_le32(params->suspend_time);
+ cmd->active_dwell = scan_timing[params->type].dwell_active;
+ cmd->passive_dwell = scan_timing[params->type].dwell_passive;
+ cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
+ cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
+ cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
cmd->scan_priority =
iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
- if (params->passive_fragmented)
+ if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
if (iwl_mvm_rrm_scan_needed(mvm))
return uid;
memset(cmd, 0, ksize(cmd));
- cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
- sizeof(struct iwl_mvm_umac_cmd_hdr));
iwl_mvm_scan_umac_dwell(mvm, cmd, params);
params.n_match_sets = 0;
params.match_sets = NULL;
- params.schedule[0].iterations = 1;
- params.schedule[0].full_scan_mul = 0;
- params.schedule[1].iterations = 0;
- params.schedule[1].full_scan_mul = 0;
+ params.iterations[0] = 1;
+ params.iterations[1] = 0;
- iwl_mvm_scan_calc_dwell(mvm, vif, ¶ms);
+ params.type = iwl_mvm_get_scan_type(mvm, vif, ¶ms);
iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms);
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
- hcmd.id = SCAN_REQ_UMAC;
+ hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
ret = iwl_mvm_scan_umac(mvm, vif, ¶ms,
IWL_MVM_SCAN_REGULAR);
} else {
params.n_match_sets = req->n_match_sets;
params.match_sets = req->match_sets;
- params.schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS;
- params.schedule[0].full_scan_mul = 1;
- params.schedule[1].iterations = 0xff;
- params.schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
+ params.iterations[0] = 0;
+ params.iterations[1] = 0xff;
+
+ params.type = iwl_mvm_get_scan_type(mvm, vif, ¶ms);
if (req->interval > U16_MAX) {
IWL_DEBUG_SCAN(mvm,
params.delay = req->delay;
}
- iwl_mvm_scan_calc_dwell(mvm, vif, ¶ms);
-
ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
if (ret)
return ret;
iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms);
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
- hcmd.id = SCAN_REQ_UMAC;
+ hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
ret = iwl_mvm_scan_umac(mvm, vif, ¶ms, IWL_MVM_SCAN_SCHED);
} else {
hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
return ret;
}
-int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_umac_scan_complete *notif = (void *)pkt->data;
bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
- return 0;
+ return;
/* if the scan is already stopping, we don't need to notify mac80211 */
if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
}
mvm->scan_status &= ~mvm->scan_uid_status[uid];
-
IWL_DEBUG_SCAN(mvm,
- "Scan completed, uid %u type %u, status %s, EBS status %s\n",
+ "Scan completed, uid %u type %u, status %s, EBS status %s, Last line %d, Last iteration %d, Time from last iteration %d\n",
uid, mvm->scan_uid_status[uid],
notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
"completed" : "aborted",
- iwl_mvm_ebs_status_str(notif->ebs_status));
+ iwl_mvm_ebs_status_str(notif->ebs_status),
+ notif->last_schedule, notif->last_iter,
+ __le32_to_cpu(notif->time_from_last_iter));
if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
mvm->last_ebs_successful = false;
mvm->scan_uid_status[uid] = 0;
-
- return 0;
}
-int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
iwl_mvm_dump_channel_list(notif->results,
notif->scanned_channels, buf,
sizeof(buf)));
- return 0;
}
static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
{
- struct iwl_umac_scan_abort cmd = {
- .hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) -
- sizeof(struct iwl_mvm_umac_cmd_hdr)),
- };
+ struct iwl_umac_scan_abort cmd = {};
int uid, ret;
lockdep_assert_held(&mvm->mutex);
IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
- ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ iwl_cmd_id(SCAN_ABORT_UMAC,
+ IWL_ALWAYS_LONG_GROUP, 0),
+ 0, sizeof(cmd), &cmd);
if (!ret)
mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
{
struct iwl_notification_wait wait_scan_done;
- static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
+ static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
SCAN_OFFLOAD_COMPLETE, };
int ret;
static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
{
- int i;
+ int i, max = -1, max_offs = -1;
lockdep_assert_held(&mvm->mutex);
- i = find_first_zero_bit(mvm->fw_key_table, STA_KEY_MAX_NUM);
+ /* Pick the unused key offset with the highest 'deleted'
+ * counter. Every time a key is deleted, all the counters
+ * are incremented and the one that was just deleted is
+ * reset to zero. Thus, the highest counter is the one
+ * that was deleted longest ago. Pick that one.
+ */
+ for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+ if (test_bit(i, mvm->fw_key_table))
+ continue;
+ if (mvm->fw_key_deleted[i] > max) {
+ max = mvm->fw_key_deleted[i];
+ max_offs = i;
+ }
+ }
- if (i == STA_KEY_MAX_NUM)
+ if (max_offs < 0)
return STA_KEY_IDX_INVALID;
- __set_bit(i, mvm->fw_key_table);
+ __set_bit(max_offs, mvm->fw_key_table);
- return i;
+ return max_offs;
}
static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
const u8 *pn;
memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
- ieee80211_aes_cmac_calculate_k1_k2(keyconf,
- igtk_cmd.K1, igtk_cmd.K2);
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
pn = seq.aes_cmac.pn;
igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
{
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
u8 sta_id;
- int ret;
+ int ret, i;
lockdep_assert_held(&mvm->mutex);
return -ENOENT;
}
+ /* track which key was deleted last */
+ for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+ if (mvm->fw_key_deleted[i] < U8_MAX)
+ mvm->fw_key_deleted[i]++;
+ }
+ mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
+
if (sta_id == IWL_MVM_STATION_COUNT) {
IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
return 0;
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
-int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
u32 sta_id = le32_to_cpu(notif->sta_id);
if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
- return 0;
+ return;
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
if (!IS_ERR_OR_NULL(sta))
ieee80211_sta_eosp(sta);
rcu_read_unlock();
-
- return 0;
}
void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u32 iv32,
u16 *phase1key);
-int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
/* AMPDU */
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return;
pkt = cmd.resp_pkt;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(mvm, "Bad return from TDLS_CONFIG_COMMAND (0x%08X)\n",
- pkt->hdr.flags);
- goto exit;
- }
- if (WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp)))
- goto exit;
+ WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
/* we don't really care about the response at this point */
-exit:
iwl_free_resp(&cmd);
}
mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
}
-int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
/* can fail sometimes */
if (!le32_to_cpu(notif->status)) {
iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
- goto out;
+ return;
}
if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
- goto out;
+ return;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
/* the station may not be here, but if it is, it must be a TDLS peer */
if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
- goto out;
+ return;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
vif = mvmsta->vif;
msecs_to_jiffies(delay));
iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
-
-out:
- return 0;
}
static int
cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
info = IEEE80211_SKB_CB(skb);
- if (info->control.hw_key)
- iwl_mvm_set_tx_cmd_crypto(mvm, info, &cmd.frame.tx_cmd, skb);
+ hdr = (void *)skb->data;
+ if (info->control.hw_key) {
+ if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) {
+ rcu_read_unlock();
+ ret = -EINVAL;
+ goto out;
+ }
+ iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd);
+ }
iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
mvmsta->sta_id);
- hdr = (void *)skb->data;
iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
hdr->frame_control);
rcu_read_unlock();
/*
* The Rx handler for time event notifications
*/
-int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_time_event_notif *notif = (void *)pkt->data;
}
unlock:
spin_unlock_bh(&mvm->time_event_lock);
-
- return 0;
}
static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
struct iwl_mvm_time_event_data *te_data,
struct iwl_time_event_cmd *te_cmd)
{
- static const u8 time_event_response[] = { TIME_EVENT_CMD };
+ static const u16 time_event_response[] = { TIME_EVENT_CMD };
struct iwl_notification_wait wait_time_event;
int ret;
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
- const u8 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
+ const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
struct iwl_notification_wait wait_te_notif;
struct iwl_time_event_cmd time_cmd = {};
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
- time_cmd.apply_time =
- cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
+ time_cmd.apply_time = cpu_to_le32(0);
time_cmd.max_frags = TE_V2_FRAG_NONE;
time_cmd.max_delay = cpu_to_le32(max_delay);
/*
* iwl_mvm_rx_time_event_notif - handles %TIME_EVENT_NOTIFICATION.
*/
-int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
/**
* iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionality
--- /dev/null
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "mvm.h"
+#include "fw-api-tof.h"
+
+#define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256
+
+void iwl_mvm_tof_init(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+ return;
+
+ memset(tof_data, 0, sizeof(*tof_data));
+
+ tof_data->tof_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_CONFIG_CMD);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (IWL_MVM_TOF_IS_RESPONDER) {
+ tof_data->responder_cfg.sub_grp_cmd_id =
+ cpu_to_le32(TOF_RESPONDER_CONFIG_CMD);
+ tof_data->responder_cfg.sta_id = IWL_MVM_STATION_COUNT;
+ }
+#endif
+
+ tof_data->range_req.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_REQ_CMD);
+ tof_data->range_req.req_timeout = 1;
+ tof_data->range_req.initiator = 1;
+ tof_data->range_req.report_policy = 3;
+
+ tof_data->range_req_ext.sub_grp_cmd_id =
+ cpu_to_le32(TOF_RANGE_REQ_EXT_CMD);
+
+ mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+}
+
+void iwl_mvm_tof_clean(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+ return;
+
+ memset(tof_data, 0, sizeof(*tof_data));
+ mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+}
+
+static void iwl_tof_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ bool *enabled = _data;
+
+ /* non bss vif exists */
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
+ *enabled = false;
+}
+
+int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm)
+{
+ struct iwl_tof_config_cmd *cmd = &mvm->tof_data.tof_cfg;
+ bool enabled;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+ return -EINVAL;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_tof_iterator, &enabled);
+ if (!enabled) {
+ IWL_DEBUG_INFO(mvm, "ToF is not supported (non bss vif)\n");
+ return -EINVAL;
+ }
+
+ mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+ return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+ IWL_ALWAYS_LONG_GROUP, 0),
+ 0, sizeof(*cmd), cmd);
+}
+
+int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id)
+{
+ struct iwl_tof_range_abort_cmd cmd = {
+ .sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_ABORT_CMD),
+ .request_id = id,
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+ return -EINVAL;
+
+ if (id != mvm->tof_data.active_range_request) {
+ IWL_ERR(mvm, "Invalid range request id %d (active %d)\n",
+ id, mvm->tof_data.active_range_request);
+ return -EINVAL;
+ }
+
+ /* after abort is sent there's no active request anymore */
+ mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+
+ return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+ IWL_ALWAYS_LONG_GROUP, 0),
+ 0, sizeof(cmd), &cmd);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_tof_responder_config_cmd *cmd = &mvm->tof_data.responder_cfg;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+ return -EINVAL;
+
+ if (vif->p2p || vif->type != NL80211_IFTYPE_AP) {
+ IWL_ERR(mvm, "Cannot start responder, not in AP mode\n");
+ return -EIO;
+ }
+
+ cmd->sta_id = mvmvif->bcast_sta.sta_id;
+ return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+ IWL_ALWAYS_LONG_GROUP, 0),
+ 0, sizeof(*cmd), cmd);
+}
+#endif
+
+int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_host_cmd cmd = {
+ .id = iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+ .len = { sizeof(mvm->tof_data.range_req), },
+ /* no copy because of the command size */
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+ return -EINVAL;
+
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
+ IWL_ERR(mvm, "Cannot send range request, not STA mode\n");
+ return -EIO;
+ }
+
+ /* nesting of range requests is not supported in FW */
+ if (mvm->tof_data.active_range_request !=
+ IWL_MVM_TOF_RANGE_REQ_MAX_ID) {
+ IWL_ERR(mvm, "Cannot send range req, already active req %d\n",
+ mvm->tof_data.active_range_request);
+ return -EIO;
+ }
+
+ mvm->tof_data.active_range_request = mvm->tof_data.range_req.request_id;
+
+ cmd.data[0] = &mvm->tof_data.range_req;
+ return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+ return -EINVAL;
+
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
+ IWL_ERR(mvm, "Cannot send ext range req, not in STA mode\n");
+ return -EIO;
+ }
+
+ return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+ IWL_ALWAYS_LONG_GROUP, 0),
+ 0, sizeof(mvm->tof_data.range_req_ext),
+ &mvm->tof_data.range_req_ext);
+}
+
+static int iwl_mvm_tof_range_resp(struct iwl_mvm *mvm, void *data)
+{
+ struct iwl_tof_range_rsp_ntfy *resp = (void *)data;
+
+ if (resp->request_id != mvm->tof_data.active_range_request) {
+ IWL_ERR(mvm, "Request id mismatch, got %d, active %d\n",
+ resp->request_id, mvm->tof_data.active_range_request);
+ return -EIO;
+ }
+
+ memcpy(&mvm->tof_data.range_resp, resp,
+ sizeof(struct iwl_tof_range_rsp_ntfy));
+ mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+
+ return 0;
+}
+
+static int iwl_mvm_tof_mcsi_notif(struct iwl_mvm *mvm, void *data)
+{
+ struct iwl_tof_mcsi_notif *resp = (struct iwl_tof_mcsi_notif *)data;
+
+ IWL_DEBUG_INFO(mvm, "MCSI notification, token %d\n", resp->token);
+ return 0;
+}
+
+static int iwl_mvm_tof_nb_report_notif(struct iwl_mvm *mvm, void *data)
+{
+ struct iwl_tof_neighbor_report *report =
+ (struct iwl_tof_neighbor_report *)data;
+
+ IWL_DEBUG_INFO(mvm, "NB report, bssid %pM, token %d, status 0x%x\n",
+ report->bssid, report->request_token, report->status);
+ return 0;
+}
+
+void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_tof_gen_resp_cmd *resp = (void *)pkt->data;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ switch (le32_to_cpu(resp->sub_grp_cmd_id)) {
+ case TOF_RANGE_RESPONSE_NOTIF:
+ iwl_mvm_tof_range_resp(mvm, resp->data);
+ break;
+ case TOF_MCSI_DEBUG_NOTIF:
+ iwl_mvm_tof_mcsi_notif(mvm, resp->data);
+ break;
+ case TOF_NEIGHBOR_REPORT_RSP_NOTIF:
+ iwl_mvm_tof_nb_report_notif(mvm, resp->data);
+ break;
+ default:
+ IWL_ERR(mvm, "Unknown sub-group command 0x%x\n",
+ resp->sub_grp_cmd_id);
+ break;
+ }
+}
--- /dev/null
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __tof
+#define __tof_h__
+
+#include "fw-api-tof.h"
+
+struct iwl_mvm_tof_data {
+ struct iwl_tof_config_cmd tof_cfg;
+ struct iwl_tof_range_req_cmd range_req;
+ struct iwl_tof_range_req_ext_cmd range_req_ext;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct iwl_tof_responder_config_cmd responder_cfg;
+#endif
+ struct iwl_tof_range_rsp_ntfy range_resp;
+ u8 last_abort_id;
+ u16 active_range_request;
+};
+
+void iwl_mvm_tof_init(struct iwl_mvm *mvm);
+void iwl_mvm_tof_clean(struct iwl_mvm *mvm);
+int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm);
+int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id);
+int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+#endif
+#endif /* __tof_h__ */
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
return true;
}
-int iwl_mvm_temp_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
int temp;
/* the notification is handled synchronously in ctkill, so skip here */
if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
- return 0;
+ return;
temp = iwl_mvm_temp_notif_parse(mvm, pkt);
if (temp < 0)
- return 0;
+ return;
iwl_mvm_tt_temp_changed(mvm, temp);
-
- return 0;
}
static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
int iwl_mvm_get_temp(struct iwl_mvm *mvm)
{
struct iwl_notification_wait wait_temp_notif;
- static const u8 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION };
+ static const u16 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION };
int ret, temp;
lockdep_assert_held(&mvm->mutex);
if (ieee80211_is_mgmt(fc)) {
if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
- tx_cmd->pm_frame_timeout = cpu_to_le16(3);
+ tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
+ else if (ieee80211_is_action(fc))
+ tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
else
- tx_cmd->pm_frame_timeout = cpu_to_le16(2);
+ tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
/* The spec allows Action frames in A-MPDU, we don't support
* it
*/
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
} else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
- tx_cmd->pm_frame_timeout = cpu_to_le16(2);
+ tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
} else {
- tx_cmd->pm_frame_timeout = 0;
+ tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
}
if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
/*
* Sets the fields in the Tx cmd that are crypto related
*/
-void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
- struct ieee80211_tx_info *info,
- struct iwl_tx_cmd *tx_cmd,
- struct sk_buff *skb_frag)
+static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd *tx_cmd,
+ struct sk_buff *skb_frag,
+ int hdrlen)
{
struct ieee80211_key_conf *keyconf = info->control.hw_key;
+ u8 *crypto_hdr = skb_frag->data + hdrlen;
+ u64 pn;
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
- tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
- memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
+ pn = atomic64_inc_return(&keyconf->tx_pn);
+ crypto_hdr[0] = pn;
+ crypto_hdr[2] = 0;
+ crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
+ crypto_hdr[1] = pn >> 8;
+ crypto_hdr[4] = pn >> 16;
+ crypto_hdr[5] = pn >> 24;
+ crypto_hdr[6] = pn >> 32;
+ crypto_hdr[7] = pn >> 40;
break;
case WLAN_CIPHER_SUITE_TKIP:
*/
static struct iwl_device_cmd *
iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
- struct ieee80211_sta *sta, u8 sta_id)
+ int hdrlen, struct ieee80211_sta *sta, u8 sta_id)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
if (info->control.hw_key)
- iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb);
+ iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
struct iwl_device_cmd *dev_cmd;
struct iwl_tx_cmd *tx_cmd;
u8 sta_id;
+ int hdrlen = ieee80211_hdrlen(hdr->frame_control);
if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU))
return -1;
IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
/*
- * If the interface on which frame is sent is the P2P_DEVICE
+ * If the interface on which the frame is sent is the P2P_DEVICE
* or an AP/GO interface use the broadcast station associated
- * with it; otherwise use the AUX station.
+ * with it; otherwise if the interface is a managed interface
+ * use the AP station associated with it for multicast traffic
+ * (this is not possible for unicast packets as a TLDS discovery
+ * response are sent without a station entry); otherwise use the
+ * AUX station.
*/
- if (info->control.vif &&
- (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
- info->control.vif->type == NL80211_IFTYPE_AP)) {
+ sta_id = mvm->aux_sta.sta_id;
+ if (info->control.vif) {
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(info->control.vif);
- sta_id = mvmvif->bcast_sta.sta_id;
- } else {
- sta_id = mvm->aux_sta.sta_id;
+
+ if (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+ info->control.vif->type == NL80211_IFTYPE_AP)
+ sta_id = mvmvif->bcast_sta.sta_id;
+ else if (info->control.vif->type == NL80211_IFTYPE_STATION &&
+ is_multicast_ether_addr(hdr->addr1)) {
+ u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
+
+ if (ap_sta_id != IWL_MVM_STATION_COUNT)
+ sta_id = ap_sta_id;
+ }
}
IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue);
- dev_cmd = iwl_mvm_set_tx_params(mvm, skb, NULL, sta_id);
+ dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, NULL, sta_id);
if (!dev_cmd)
return -1;
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
/* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control));
+ memcpy(tx_cmd->hdr, hdr, hdrlen);
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) {
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
u8 tid = IWL_MAX_TID_COUNT;
u8 txq_id = info->hw_queue;
bool is_data_qos = false, is_ampdu = false;
+ int hdrlen;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
fc = hdr->frame_control;
+ hdrlen = ieee80211_hdrlen(fc);
if (WARN_ON_ONCE(!mvmsta))
return -1;
if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
return -1;
- dev_cmd = iwl_mvm_set_tx_params(mvm, skb, sta, mvmsta->sta_id);
+ dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, sta, mvmsta->sta_id);
if (!dev_cmd)
goto drop;
}
/* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(fc));
+ memcpy(tx_cmd->hdr, hdr, hdrlen);
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
rcu_read_unlock();
}
-int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
iwl_mvm_rx_tx_cmd_single(mvm, pkt);
else
iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
-
- return 0;
}
static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
(void *)(uintptr_t)tid_data->rate_n_flags;
}
-int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
tid >= IWL_MAX_TID_COUNT,
"sta_id %d tid %d", sta_id, tid))
- return 0;
+ return;
rcu_read_lock();
/* Reclaiming frames for a station that has been deleted ? */
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
rcu_read_unlock();
- return 0;
+ return;
}
mvmsta = iwl_mvm_sta_from_mac80211(sta);
"invalid BA notification: Q %d, tid %d, flow %d\n",
tid_data->txq_id, tid, scd_flow);
rcu_read_unlock();
- return 0;
+ return;
}
spin_lock_bh(&mvmsta->lock);
skb = __skb_dequeue(&reclaimed_skbs);
ieee80211_tx_status(mvm->hw, skb);
}
-
- return 0;
}
/*
return ret;
}
-int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id,
+int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
u32 flags, u16 len, const void *data)
{
struct iwl_host_cmd cmd = {
goto out_free_resp;
}
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- ret = -EIO;
- goto out_free_resp;
- }
-
resp_len = iwl_rx_packet_payload_len(pkt);
if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
ret = -EIO;
/*
* We assume that the caller set the status to the sucess value
*/
-int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id, u16 len,
+int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
const void *data, u32 *status)
{
struct iwl_host_cmd cmd = {
return fw_rate_idx_to_plcp[rate_idx];
}
-int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_error_resp *err_resp = (void *)pkt->data;
le32_to_cpu(err_resp->error_service));
IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n",
le64_to_cpu(err_resp->timestamp));
- return 0;
}
/*
{
struct pci_dev *pdev = to_pci_dev(device);
struct iwl_trans *trans = pci_get_drvdata(pdev);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill;
/* Before you put code here, think about WoWLAN. You cannot check here
return 0;
/*
- * On suspend, ict is disabled, and the interrupt mask
- * gets cleared. Reconfigure them both in case of d0i3
- * image. Otherwise, only enable rfkill interrupt (in
- * order to keep track of the rfkill status)
+ * Enable rfkill interrupt (in order to keep track of
+ * the rfkill status)
*/
- if (trans->wowlan_d0i3) {
- iwl_pcie_reset_ict(trans);
- iwl_enable_interrupts(trans);
- } else {
- iwl_enable_rfkill_int(trans);
- }
+ iwl_enable_rfkill_int(trans);
hw_rfkill = iwl_is_rfkill_set(trans);
+
+ mutex_lock(&trans_pcie->mutex);
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+ mutex_unlock(&trans_pcie->mutex);
return 0;
}
#include "iwl-io.h"
#include "iwl-op-mode.h"
+/* We need 2 entries for the TX command and header, and another one might
+ * be needed for potential data in the SKB's head. The remaining ones can
+ * be used for frags.
+ */
+#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3)
+
+/*
+ * RX related structures and functions
+ */
+#define RX_NUM_QUEUES 1
+#define RX_POST_REQ_ALLOC 2
+#define RX_CLAIM_REQ_ALLOC 8
+#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
+#define RX_LOW_WATERMARK 8
+
struct iwl_host_cmd;
/*This file includes the declaration that are internal to the
* struct iwl_rxq - Rx queue
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @pool:
- * @queue:
* @read: Shared index to newest available Rx buffer
* @write: Shared index to oldest written Rx packet
* @free_count: Number of pre-allocated buffers in rx_free
+ * @used_count: Number of RBDs handled to allocator to use for allocation
* @write_actual:
- * @rx_free: list of free SKBs for use
- * @rx_used: List of Rx buffers with no SKB
+ * @rx_free: list of RBDs with allocated RB ready for use
+ * @rx_used: list of RBDs with no RB attached
* @need_update: flag to indicate we need to update read/write index
* @rb_stts: driver's pointer to receive buffer status
* @rb_stts_dma: bus address of receive buffer status
* @lock:
+ * @pool: initial pool of iwl_rx_mem_buffer for the queue
+ * @queue: actual rx queue
*
* NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
*/
struct iwl_rxq {
__le32 *bd;
dma_addr_t bd_dma;
- struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
- struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
u32 read;
u32 write;
u32 free_count;
+ u32 used_count;
u32 write_actual;
struct list_head rx_free;
struct list_head rx_used;
struct iwl_rb_status *rb_stts;
dma_addr_t rb_stts_dma;
spinlock_t lock;
+ struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
+ struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+};
+
+/**
+ * struct iwl_rb_allocator - Rx allocator
+ * @pool: initial pool of allocator
+ * @req_pending: number of requests the allcator had not processed yet
+ * @req_ready: number of requests honored and ready for claiming
+ * @rbd_allocated: RBDs with pages allocated and ready to be handled to
+ * the queue. This is a list of &struct iwl_rx_mem_buffer
+ * @rbd_empty: RBDs with no page attached for allocator use. This is a list
+ * of &struct iwl_rx_mem_buffer
+ * @lock: protects the rbd_allocated and rbd_empty lists
+ * @alloc_wq: work queue for background calls
+ * @rx_alloc: work struct for background calls
+ */
+struct iwl_rb_allocator {
+ struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
+ atomic_t req_pending;
+ atomic_t req_ready;
+ struct list_head rbd_allocated;
+ struct list_head rbd_empty;
+ spinlock_t lock;
+ struct workqueue_struct *alloc_wq;
+ struct work_struct rx_alloc;
};
struct iwl_dma_ptr {
/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
- * @rx_replenish: work that will be called when buffers need to be allocated
+ * @rba: allocator for RX replenishing
* @drv - pointer to iwl_drv
* @trans: pointer to the generic transport area
* @scd_base_addr: scheduler sram base address in SRAM
* @rx_buf_size_8k: 8 kB RX buffer size
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue
+ * @wide_cmd_header: true when ucode supports wide command header format
* @rx_page_order: page order for receive buffer size
* @reg_lock: protect hw register access
+ * @mutex: to protect stop_device / start_fw / start_hw
* @cmd_in_flight: true when we have a host command in flight
* @fw_mon_phys: physical address of the buffer for the firmware monitor
* @fw_mon_page: points to the first page of the buffer for the firmware monitor
*/
struct iwl_trans_pcie {
struct iwl_rxq rxq;
- struct work_struct rx_replenish;
+ struct iwl_rb_allocator rba;
struct iwl_trans *trans;
struct iwl_drv *drv;
dma_addr_t ict_tbl_dma;
int ict_index;
bool use_ict;
+ bool is_down;
struct isr_statistics isr_stats;
spinlock_t irq_lock;
+ struct mutex mutex;
u32 inta_mask;
u32 scd_base_addr;
struct iwl_dma_ptr scd_bc_tbls;
bool rx_buf_size_8k;
bool bc_table_dword;
bool scd_set_active;
+ bool wide_cmd_header;
u32 rx_page_order;
const char *const *command_names;
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
- struct iwl_rx_cmd_buffer *rxb, int handler_status);
+ struct iwl_rx_cmd_buffer *rxb);
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
* resets the Rx queue buffers with new memory.
*
* The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
- * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- * to replenish the iwl->rxq->rx_free.
- * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
- * iwl->rxq is replenished and the READ INDEX is updated (updating the
- * 'processed' and 'read' driver indexes as well)
+ * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
+ * When the interrupt handler is called, the request is processed.
+ * The page is either stolen - transferred to the upper layer
+ * or reused - added immediately to the iwl->rxq->rx_free list.
+ * + When the page is stolen - the driver updates the matching queue's used
+ * count, detaches the RBD and transfers it to the queue used list.
+ * When there are two used RBDs - they are transferred to the allocator empty
+ * list. Work is then scheduled for the allocator to start allocating
+ * eight buffers.
+ * When there are another 6 used RBDs - they are transferred to the allocator
+ * empty list and the driver tries to claim the pre-allocated buffers and
+ * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
+ * until ready.
+ * When there are 8+ buffers in the free list - either from allocation or from
+ * 8 reused unstolen pages - restock is called to update the FW and indexes.
+ * + In order to make sure the allocator always has RBDs to use for allocation
+ * the allocator has initial pool in the size of num_queues*(8-2) - the
+ * maximum missing RBDs per allocation request (request posted with 2
+ * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
+ * The queues supplies the recycle of the rest of the RBDs.
* + A received packet is processed and handed to the kernel network stack,
* detached from the iwl->rxq. The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
- * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
+ * + If there are no allocated buffers in iwl->rxq->rx_free,
* the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
* If there were enough free buffers and RX_STALLED is set it is cleared.
*
*
* iwl_rxq_alloc() Allocates rx_free
* iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
- * iwl_pcie_rxq_restock
+ * iwl_pcie_rxq_restock.
+ * Used only during initialization.
* iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
* queue, updates firmware pointers, and updates
- * the WRITE index. If insufficient rx_free buffers
- * are available, schedules iwl_pcie_rx_replenish
+ * the WRITE index.
+ * iwl_pcie_rx_allocator() Background work for allocating pages.
*
* -- enable interrupts --
* ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
* READ INDEX, detaching the SKB from the pool.
* Moves the packet buffer from queue to rx_used.
+ * Posts and claims requests to the allocator.
* Calls iwl_pcie_rxq_restock to refill any empty
* slots.
+ *
+ * RBD life-cycle:
+ *
+ * Init:
+ * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
+ *
+ * Regular Receive interrupt:
+ * Page Stolen:
+ * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
+ * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
+ * Page not Stolen:
+ * rxq.queue -> rxq.rx_free -> rxq.queue
* ...
*
*/
rxq->free_count--;
}
spin_unlock(&rxq->lock);
- /* If the pre-allocated buffer pool is dropping low, schedule to
- * refill it */
- if (rxq->free_count <= RX_LOW_WATERMARK)
- schedule_work(&trans_pcie->rx_replenish);
/* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8. */
}
}
+/*
+ * iwl_pcie_rx_alloc_page - allocates and returns a page.
+ *
+ */
+static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
+ gfp_t priority)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+ struct page *page;
+ gfp_t gfp_mask = priority;
+
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
+
+ if (trans_pcie->rx_page_order > 0)
+ gfp_mask |= __GFP_COMP;
+
+ /* Alloc a new receive buffer */
+ page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
+ if (!page) {
+ if (net_ratelimit())
+ IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
+ trans_pcie->rx_page_order);
+ /* Issue an error if the hardware has consumed more than half
+ * of its free buffer list and we don't have enough
+ * pre-allocated buffers.
+` */
+ if (rxq->free_count <= RX_LOW_WATERMARK &&
+ iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
+ net_ratelimit())
+ IWL_CRIT(trans,
+ "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
+ rxq->free_count);
+ return NULL;
+ }
+ return page;
+}
+
/*
* iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
*
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
struct page *page;
- gfp_t gfp_mask = priority;
while (1) {
spin_lock(&rxq->lock);
}
spin_unlock(&rxq->lock);
- if (rxq->free_count > RX_LOW_WATERMARK)
- gfp_mask |= __GFP_NOWARN;
-
- if (trans_pcie->rx_page_order > 0)
- gfp_mask |= __GFP_COMP;
-
/* Alloc a new receive buffer */
- page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
- if (!page) {
- if (net_ratelimit())
- IWL_DEBUG_INFO(trans, "alloc_pages failed, "
- "order: %d\n",
- trans_pcie->rx_page_order);
-
- if ((rxq->free_count <= RX_LOW_WATERMARK) &&
- net_ratelimit())
- IWL_CRIT(trans, "Failed to alloc_pages with %s."
- "Only %u free buffers remaining.\n",
- priority == GFP_ATOMIC ?
- "GFP_ATOMIC" : "GFP_KERNEL",
- rxq->free_count);
- /* We don't reschedule replenish work here -- we will
- * call the restock method and if it still needs
- * more buffers it will schedule replenish */
+ page = iwl_pcie_rx_alloc_page(trans, priority);
+ if (!page)
return;
- }
spin_lock(&rxq->lock);
lockdep_assert_held(&rxq->lock);
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ for (i = 0; i < RX_QUEUE_SIZE; i++) {
if (!rxq->pool[i].page)
continue;
dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
* When moving to rx_free an page is allocated for the slot.
*
* Also restock the Rx queue via iwl_pcie_rxq_restock.
- * This is called as a scheduled work item (except for during initialization)
+ * This is called only during initialization
*/
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
{
- iwl_pcie_rxq_alloc_rbs(trans, gfp);
+ iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
iwl_pcie_rxq_restock(trans);
}
-static void iwl_pcie_rx_replenish_work(struct work_struct *data)
+/*
+ * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
+ *
+ * Allocates for each received request 8 pages
+ * Called as a scheduled work item.
+ */
+static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+ struct list_head local_empty;
+ int pending = atomic_xchg(&rba->req_pending, 0);
+
+ IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
+
+ /* If we were scheduled - there is at least one request */
+ spin_lock(&rba->lock);
+ /* swap out the rba->rbd_empty to a local list */
+ list_replace_init(&rba->rbd_empty, &local_empty);
+ spin_unlock(&rba->lock);
+
+ while (pending) {
+ int i;
+ struct list_head local_allocated;
+
+ INIT_LIST_HEAD(&local_allocated);
+
+ for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
+ struct iwl_rx_mem_buffer *rxb;
+ struct page *page;
+
+ /* List should never be empty - each reused RBD is
+ * returned to the list, and initial pool covers any
+ * possible gap between the time the page is allocated
+ * to the time the RBD is added.
+ */
+ BUG_ON(list_empty(&local_empty));
+ /* Get the first rxb from the rbd list */
+ rxb = list_first_entry(&local_empty,
+ struct iwl_rx_mem_buffer, list);
+ BUG_ON(rxb->page);
+
+ /* Alloc a new receive buffer */
+ page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL);
+ if (!page)
+ continue;
+ rxb->page = page;
+
+ /* Get physical address of the RB */
+ rxb->page_dma = dma_map_page(trans->dev, page, 0,
+ PAGE_SIZE << trans_pcie->rx_page_order,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+ rxb->page = NULL;
+ __free_pages(page, trans_pcie->rx_page_order);
+ continue;
+ }
+ /* dma address must be no more than 36 bits */
+ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+ /* and also 256 byte aligned! */
+ BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+ /* move the allocated entry to the out list */
+ list_move(&rxb->list, &local_allocated);
+ i++;
+ }
+
+ pending--;
+ if (!pending) {
+ pending = atomic_xchg(&rba->req_pending, 0);
+ IWL_DEBUG_RX(trans,
+ "Pending allocation requests = %d\n",
+ pending);
+ }
+
+ spin_lock(&rba->lock);
+ /* add the allocated rbds to the allocator allocated list */
+ list_splice_tail(&local_allocated, &rba->rbd_allocated);
+ /* get more empty RBDs for current pending requests */
+ list_splice_tail_init(&rba->rbd_empty, &local_empty);
+ spin_unlock(&rba->lock);
+
+ atomic_inc(&rba->req_ready);
+ }
+
+ spin_lock(&rba->lock);
+ /* return unused rbds to the allocator empty list */
+ list_splice_tail(&local_empty, &rba->rbd_empty);
+ spin_unlock(&rba->lock);
+}
+
+/*
+ * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
+.*
+.* Called by queue when the queue posted allocation request and
+ * has freed 8 RBDs in order to restock itself.
+ */
+static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
+ struct iwl_rx_mem_buffer
+ *out[RX_CLAIM_REQ_ALLOC])
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+ int i;
+
+ /*
+ * atomic_dec_if_positive returns req_ready - 1 for any scenario.
+ * If req_ready is 0 atomic_dec_if_positive will return -1 and this
+ * function will return -ENOMEM, as there are no ready requests.
+ * atomic_dec_if_positive will perofrm the *actual* decrement only if
+ * req_ready > 0, i.e. - there are ready requests and the function
+ * hands one request to the caller.
+ */
+ if (atomic_dec_if_positive(&rba->req_ready) < 0)
+ return -ENOMEM;
+
+ spin_lock(&rba->lock);
+ for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
+ /* Get next free Rx buffer, remove it from free list */
+ out[i] = list_first_entry(&rba->rbd_allocated,
+ struct iwl_rx_mem_buffer, list);
+ list_del(&out[i]->list);
+ }
+ spin_unlock(&rba->lock);
+
+ return 0;
+}
+
+static void iwl_pcie_rx_allocator_work(struct work_struct *data)
+{
+ struct iwl_rb_allocator *rba_p =
+ container_of(data, struct iwl_rb_allocator, rx_alloc);
struct iwl_trans_pcie *trans_pcie =
- container_of(data, struct iwl_trans_pcie, rx_replenish);
+ container_of(rba_p, struct iwl_trans_pcie, rba);
- iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
+ iwl_pcie_rx_allocator(trans_pcie->trans);
}
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
struct device *dev = trans->dev;
memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
spin_lock_init(&rxq->lock);
+ spin_lock_init(&rba->lock);
if (WARN_ON(rxq->bd || rxq->rb_stts))
return -EINVAL;
INIT_LIST_HEAD(&rxq->rx_free);
INIT_LIST_HEAD(&rxq->rx_used);
rxq->free_count = 0;
+ rxq->used_count = 0;
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+ for (i = 0; i < RX_QUEUE_SIZE; i++)
list_add(&rxq->pool[i].list, &rxq->rx_used);
}
+static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
+{
+ int i;
+
+ lockdep_assert_held(&rba->lock);
+
+ INIT_LIST_HEAD(&rba->rbd_allocated);
+ INIT_LIST_HEAD(&rba->rbd_empty);
+
+ for (i = 0; i < RX_POOL_SIZE; i++)
+ list_add(&rba->pool[i].list, &rba->rbd_empty);
+}
+
+static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+ int i;
+
+ lockdep_assert_held(&rba->lock);
+
+ for (i = 0; i < RX_POOL_SIZE; i++) {
+ if (!rba->pool[i].page)
+ continue;
+ dma_unmap_page(trans->dev, rba->pool[i].page_dma,
+ PAGE_SIZE << trans_pcie->rx_page_order,
+ DMA_FROM_DEVICE);
+ __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
+ rba->pool[i].page = NULL;
+ }
+}
+
int iwl_pcie_rx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
int i, err;
if (!rxq->bd) {
if (err)
return err;
}
+ if (!rba->alloc_wq)
+ rba->alloc_wq = alloc_workqueue("rb_allocator",
+ WQ_HIGHPRI | WQ_UNBOUND, 1);
+ INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
+
+ spin_lock(&rba->lock);
+ atomic_set(&rba->req_pending, 0);
+ atomic_set(&rba->req_ready, 0);
+ /* free all first - we might be reconfigured for a different size */
+ iwl_pcie_rx_free_rba(trans);
+ iwl_pcie_rx_init_rba(rba);
+ spin_unlock(&rba->lock);
spin_lock(&rxq->lock);
- INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
-
/* free all first - we might be reconfigured for a different size */
iwl_pcie_rxq_free_rbs(trans);
iwl_pcie_rx_init_rxb_lists(rxq);
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
spin_unlock(&rxq->lock);
- iwl_pcie_rx_replenish(trans, GFP_KERNEL);
+ iwl_pcie_rx_replenish(trans);
iwl_pcie_rx_hw_init(trans, rxq);
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
/*if rxq->bd is NULL, it means that nothing has been allocated,
* exit now */
return;
}
- cancel_work_sync(&trans_pcie->rx_replenish);
+ cancel_work_sync(&rba->rx_alloc);
+ if (rba->alloc_wq) {
+ destroy_workqueue(rba->alloc_wq);
+ rba->alloc_wq = NULL;
+ }
+
+ spin_lock(&rba->lock);
+ iwl_pcie_rx_free_rba(trans);
+ spin_unlock(&rba->lock);
spin_lock(&rxq->lock);
iwl_pcie_rxq_free_rbs(trans);
rxq->rb_stts = NULL;
}
+/*
+ * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
+ *
+ * Called when a RBD can be reused. The RBD is transferred to the allocator.
+ * When there are 2 empty RBDs - a request for allocation is posted
+ */
+static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rxq *rxq, bool emergency)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+
+ /* Move the RBD to the used list, will be moved to allocator in batches
+ * before claiming or posting a request*/
+ list_add_tail(&rxb->list, &rxq->rx_used);
+
+ if (unlikely(emergency))
+ return;
+
+ /* Count the allocator owned RBDs */
+ rxq->used_count++;
+
+ /* If we have RX_POST_REQ_ALLOC new released rx buffers -
+ * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
+ * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
+ * after but we still need to post another request.
+ */
+ if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
+ /* Move the 2 RBDs to the allocator ownership.
+ Allocator has another 6 from pool for the request completion*/
+ spin_lock(&rba->lock);
+ list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+ spin_unlock(&rba->lock);
+
+ atomic_inc(&rba->req_pending);
+ queue_work(rba->alloc_wq, &rba->rx_alloc);
+ }
+}
+
static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
- struct iwl_rx_mem_buffer *rxb)
+ struct iwl_rx_mem_buffer *rxb,
+ bool emergency)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
struct iwl_rx_packet *pkt;
- struct iwl_device_cmd *cmd;
u16 sequence;
bool reclaim;
- int index, cmd_index, err, len;
+ int index, cmd_index, len;
struct iwl_rx_cmd_buffer rxcb = {
._offset = offset,
._rx_page_order = trans_pcie->rx_page_order,
index = SEQ_TO_INDEX(sequence);
cmd_index = get_cmd_index(&txq->q, index);
- if (reclaim)
- cmd = txq->entries[cmd_index].cmd;
- else
- cmd = NULL;
-
- err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
+ iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb);
if (reclaim) {
kzfree(txq->entries[cmd_index].free_buf);
* iwl_trans_send_cmd()
* as we reclaim the driver command queue */
if (!rxcb._page_stolen)
- iwl_pcie_hcmd_complete(trans, &rxcb, err);
+ iwl_pcie_hcmd_complete(trans, &rxcb);
else
IWL_WARN(trans, "Claim null rxb?\n");
}
*/
__free_pages(rxb->page, trans_pcie->rx_page_order);
rxb->page = NULL;
- list_add_tail(&rxb->list, &rxq->rx_used);
+ iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
} else {
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
}
} else
- list_add_tail(&rxb->list, &rxq->rx_used);
+ iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
}
/*
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
- u32 r, i;
- u8 fill_rx = 0;
- u32 count = 8;
- int total_empty;
+ u32 r, i, j, count = 0;
+ bool emergency = false;
restart:
spin_lock(&rxq->lock);
if (i == r)
IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
- /* calculate total frames need to be restock after handling RX */
- total_empty = r - rxq->write_actual;
- if (total_empty < 0)
- total_empty += RX_QUEUE_SIZE;
-
- if (total_empty > (RX_QUEUE_SIZE / 2))
- fill_rx = 1;
-
while (i != r) {
struct iwl_rx_mem_buffer *rxb;
+ if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2))
+ emergency = true;
+
rxb = rxq->queue[i];
rxq->queue[i] = NULL;
IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
r, i, rxb);
- iwl_pcie_rx_handle_rb(trans, rxb);
+ iwl_pcie_rx_handle_rb(trans, rxb, emergency);
i = (i + 1) & RX_QUEUE_MASK;
- /* If there are a lot of unused frames,
- * restock the Rx queue so ucode wont assert. */
- if (fill_rx) {
+
+ /* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
+ * try to claim the pre-allocated buffers from the allocator */
+ if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+ struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
+
+ if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 &&
+ !emergency) {
+ /* Add the remaining 6 empty RBDs
+ * for allocator use
+ */
+ spin_lock(&rba->lock);
+ list_splice_tail_init(&rxq->rx_used,
+ &rba->rbd_empty);
+ spin_unlock(&rba->lock);
+ }
+
+ /* If not ready - continue, will try to reclaim later.
+ * No need to reschedule work - allocator exits only on
+ * success */
+ if (!iwl_pcie_rx_allocator_get(trans, out)) {
+ /* If success - then RX_CLAIM_REQ_ALLOC
+ * buffers were retrieved and should be added
+ * to free list */
+ rxq->used_count -= RX_CLAIM_REQ_ALLOC;
+ for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
+ list_add_tail(&out[j]->list,
+ &rxq->rx_free);
+ rxq->free_count++;
+ }
+ }
+ }
+ if (emergency) {
count++;
- if (count >= 8) {
- rxq->read = i;
- spin_unlock(&rxq->lock);
- iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
+ if (count == 8) {
count = 0;
- goto restart;
+ if (rxq->used_count < RX_QUEUE_SIZE / 3)
+ emergency = false;
+ spin_unlock(&rxq->lock);
+ iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
+ spin_lock(&rxq->lock);
}
}
+ /* handle restock for three cases, can be all of them at once:
+ * - we just pulled buffers from the allocator
+ * - we have 8+ unstolen pages accumulated
+ * - we are in emergency and allocated buffers
+ */
+ if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) {
+ rxq->read = i;
+ spin_unlock(&rxq->lock);
+ iwl_pcie_rxq_restock(trans);
+ goto restart;
+ }
}
/* Backtrack one entry */
rxq->read = i;
spin_unlock(&rxq->lock);
- if (fill_rx)
- iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
- else
- iwl_pcie_rxq_restock(trans);
+ /*
+ * handle a case where in emergency there are some unallocated RBDs.
+ * those RBDs are in the used list, but are not tracked by the queue's
+ * used_count which counts allocator owned RBDs.
+ * unallocated emergency RBDs must be allocated on exit, otherwise
+ * when called again the function may not be in emergency mode and
+ * they will be handed to the allocator with no tracking in the RBD
+ * allocator counters, which will lead to them never being claimed back
+ * by the queue.
+ * by allocating them here, they are now in the queue free list, and
+ * will be restocked by the next call of iwl_pcie_rxq_restock.
+ */
+ if (unlikely(emergency && count))
+ iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
if (trans_pcie->napi.poll)
napi_gro_flush(&trans_pcie->napi, false);
static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i;
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
if (trans->cfg->internal_wimax_coex &&
iwl_trans_fw_error(trans);
local_bh_enable();
+ for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
+ del_timer(&trans_pcie->txq[i].stuck_timer);
+
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
wake_up(&trans_pcie->wait_command_queue);
}
isr_stats->rfkill++;
+ mutex_lock(&trans_pcie->mutex);
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+ mutex_unlock(&trans_pcie->mutex);
if (hw_rfkill) {
set_bit(STATUS_RFKILL, &trans->status);
if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
- val |= CSR_DRAM_INT_TBL_ENABLE;
- val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
+ val |= CSR_DRAM_INT_TBL_ENABLE |
+ CSR_DRAM_INIT_TBL_WRAP_CHECK |
+ CSR_DRAM_INIT_TBL_WRITE_POINTER;
IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
last_read_idx = i;
+ /*
+ * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
+ * CPU1 to CPU2.
+ * PAGING_SEPARATOR_SECTION delimiter - separate between
+ * CPU2 non paged to CPU2 paging sec.
+ */
if (!image->sec[i].data ||
- image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+ image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
+ image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
IWL_DEBUG_FW(trans,
"Break since Data not valid or Empty section, sec = %d\n",
i);
for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
last_read_idx = i;
+ /*
+ * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
+ * CPU1 to CPU2.
+ * PAGING_SEPARATOR_SECTION delimiter - separate between
+ * CPU2 non paged to CPU2 paging sec.
+ */
if (!image->sec[i].data ||
- image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+ image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
+ image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
IWL_DEBUG_FW(trans,
"Break since Data not valid or Empty section, sec = %d\n",
i);
case PRPH_CLEARBIT:
iwl_clear_bits_prph(trans, addr, BIT(val));
break;
+ case PRPH_BLOCKBIT:
+ if (iwl_read_prph(trans, addr) & BIT(val)) {
+ IWL_ERR(trans,
+ "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
+ val, addr);
+ goto monitor;
+ }
+ break;
default:
IWL_ERR(trans, "FW debug - unknown OP %d\n",
dest->reg_ops[i].op);
}
}
+monitor:
if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
trans_pcie->fw_mon_phys >> dest->base_shift);
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill)
{
- int ret;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill;
+ int ret;
+
+ mutex_lock(&trans_pcie->mutex);
+
+ /* Someone called stop_device, don't try to start_fw */
+ if (trans_pcie->is_down) {
+ IWL_WARN(trans,
+ "Can't start_fw since the HW hasn't been started\n");
+ ret = EIO;
+ goto out;
+ }
/* This may fail if AMT took ownership of the device */
if (iwl_pcie_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n");
- return -EIO;
+ ret = -EIO;
+ goto out;
}
iwl_enable_rfkill_int(trans);
else
clear_bit(STATUS_RFKILL, &trans->status);
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
- if (hw_rfkill && !run_in_rfkill)
- return -ERFKILL;
+ if (hw_rfkill && !run_in_rfkill) {
+ ret = -ERFKILL;
+ goto out;
+ }
iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
ret = iwl_pcie_nic_init(trans);
if (ret) {
IWL_ERR(trans, "Unable to init nic\n");
- return ret;
+ goto out;
}
/* make sure rfkill handshake bits are cleared */
/* Load the given image to the HW */
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
- return iwl_pcie_load_given_ucode_8000(trans, fw);
+ ret = iwl_pcie_load_given_ucode_8000(trans, fw);
else
- return iwl_pcie_load_given_ucode(trans, fw);
+ ret = iwl_pcie_load_given_ucode(trans, fw);
+
+out:
+ mutex_unlock(&trans_pcie->mutex);
+ return ret;
}
static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
iwl_pcie_tx_start(trans, scd_addr);
}
-static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill, was_hw_rfkill;
+ lockdep_assert_held(&trans_pcie->mutex);
+
+ if (trans_pcie->is_down)
+ return;
+
+ trans_pcie->is_down = true;
+
was_hw_rfkill = iwl_is_rfkill_set(trans);
/* tell the device to stop sending interrupts */
iwl_pcie_prepare_card_hw(trans);
}
+static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ mutex_lock(&trans_pcie->mutex);
+ _iwl_trans_pcie_stop_device(trans, low_power);
+ mutex_unlock(&trans_pcie->mutex);
+}
+
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
{
+ struct iwl_trans_pcie __maybe_unused *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ lockdep_assert_held(&trans_pcie->mutex);
+
if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
- iwl_trans_pcie_stop_device(trans, true);
+ _iwl_trans_pcie_stop_device(trans, true);
}
static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (trans->wowlan_d0i3) {
+ /* Enable persistence mode to avoid reset */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
+ }
+
iwl_disable_interrupts(trans);
/*
iwl_pcie_disable_ict(trans);
+ synchronize_irq(trans_pcie->pci_dev->irq);
+
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
- /*
- * reset TX queues -- some of their registers reset during S3
- * so if we don't reset everything here the D3 image would try
- * to execute some invalid memory upon resume
- */
- iwl_trans_pcie_tx_reset(trans);
+ if (!trans->wowlan_d0i3) {
+ /*
+ * reset TX queues -- some of their registers reset during S3
+ * so if we don't reset everything here the D3 image would try
+ * to execute some invalid memory upon resume
+ */
+ iwl_trans_pcie_tx_reset(trans);
+ }
iwl_pcie_set_pwr(trans, true);
}
iwl_pcie_set_pwr(trans, false);
- iwl_trans_pcie_tx_reset(trans);
+ if (trans->wowlan_d0i3) {
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ } else {
+ iwl_trans_pcie_tx_reset(trans);
- ret = iwl_pcie_rx_init(trans);
- if (ret) {
- IWL_ERR(trans, "Failed to resume the device (RX reset)\n");
- return ret;
+ ret = iwl_pcie_rx_init(trans);
+ if (ret) {
+ IWL_ERR(trans,
+ "Failed to resume the device (RX reset)\n");
+ return ret;
+ }
}
val = iwl_read32(trans, CSR_RESET);
return 0;
}
-static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill;
int err;
+ lockdep_assert_held(&trans_pcie->mutex);
+
err = iwl_pcie_prepare_card_hw(trans);
if (err) {
IWL_ERR(trans, "Error while preparing HW: %d\n", err);
/* From now on, the op_mode will be kept updated about RF kill state */
iwl_enable_rfkill_int(trans);
+ /* Set is_down to false here so that...*/
+ trans_pcie->is_down = false;
+
hw_rfkill = iwl_is_rfkill_set(trans);
if (hw_rfkill)
set_bit(STATUS_RFKILL, &trans->status);
else
clear_bit(STATUS_RFKILL, &trans->status);
+ /* ... rfkill can call stop_device and set it false if needed */
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
return 0;
}
+static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret;
+
+ mutex_lock(&trans_pcie->mutex);
+ ret = _iwl_trans_pcie_start_hw(trans, low_power);
+ mutex_unlock(&trans_pcie->mutex);
+
+ return ret;
+}
+
static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ mutex_lock(&trans_pcie->mutex);
+
/* disable interrupts - don't enable HW RF kill interrupt */
spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
spin_unlock(&trans_pcie->irq_lock);
iwl_pcie_disable_ict(trans);
+
+ mutex_unlock(&trans_pcie->mutex);
+
+ synchronize_irq(trans_pcie->pci_dev->irq);
}
static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
else
trans_pcie->rx_page_order = get_order(4 * 1024);
+ trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header;
trans_pcie->command_names = trans_cfg->command_names;
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active;
* As this function may be called again in some corner cases don't
* do anything if NAPI was already initialized.
*/
- if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
+ if (!trans_pcie->napi.poll) {
init_dummy_netdev(&trans_pcie->napi_dev);
- iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
- &trans_pcie->napi_dev,
- iwl_pcie_dummy_napi_poll, 64);
+ netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi,
+ iwl_pcie_dummy_napi_poll, 64);
}
}
return prph_len;
}
+static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
+ struct iwl_fw_error_dump_data **data,
+ int allocated_rb_nums)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+ u32 i, r, j, rb_len = 0;
+
+ spin_lock(&rxq->lock);
+
+ r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+
+ for (i = rxq->read, j = 0;
+ i != r && j < allocated_rb_nums;
+ i = (i + 1) & RX_QUEUE_MASK, j++) {
+ struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
+ struct iwl_fw_error_dump_rb *rb;
+
+ dma_unmap_page(trans->dev, rxb->page_dma, max_len,
+ DMA_FROM_DEVICE);
+
+ rb_len += sizeof(**data) + sizeof(*rb) + max_len;
+
+ (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
+ (*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
+ rb = (void *)(*data)->data;
+ rb->index = cpu_to_le32(i);
+ memcpy(rb->data, page_address(rxb->page), max_len);
+ /* remap the page for the free benefit */
+ rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
+ max_len,
+ DMA_FROM_DEVICE);
+
+ *data = iwl_fw_error_next_data(*data);
+ }
+
+ spin_unlock(&rxq->lock);
+
+ return rb_len;
+}
#define IWL_CSR_TO_DUMP (0x250)
static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
return monitor_len;
}
-static
-struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
+static u32
+iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
+ struct iwl_fw_error_dump_data **data,
+ u32 monitor_len)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 len = 0;
+
+ if ((trans_pcie->fw_mon_page &&
+ trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
+ trans->dbg_dest_tlv) {
+ struct iwl_fw_error_dump_fw_mon *fw_mon_data;
+ u32 base, write_ptr, wrap_cnt;
+
+ /* If there was a dest TLV - use the values from there */
+ if (trans->dbg_dest_tlv) {
+ write_ptr =
+ le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
+ wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
+ base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+ } else {
+ base = MON_BUFF_BASE_ADDR;
+ write_ptr = MON_BUFF_WRPTR;
+ wrap_cnt = MON_BUFF_CYCLE_CNT;
+ }
+
+ (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
+ fw_mon_data = (void *)(*data)->data;
+ fw_mon_data->fw_mon_wr_ptr =
+ cpu_to_le32(iwl_read_prph(trans, write_ptr));
+ fw_mon_data->fw_mon_cycle_cnt =
+ cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
+ fw_mon_data->fw_mon_base_ptr =
+ cpu_to_le32(iwl_read_prph(trans, base));
+
+ len += sizeof(**data) + sizeof(*fw_mon_data);
+ if (trans_pcie->fw_mon_page) {
+ /*
+ * The firmware is now asserted, it won't write anything
+ * to the buffer. CPU can take ownership to fetch the
+ * data. The buffer will be handed back to the device
+ * before the firmware will be restarted.
+ */
+ dma_sync_single_for_cpu(trans->dev,
+ trans_pcie->fw_mon_phys,
+ trans_pcie->fw_mon_size,
+ DMA_FROM_DEVICE);
+ memcpy(fw_mon_data->data,
+ page_address(trans_pcie->fw_mon_page),
+ trans_pcie->fw_mon_size);
+
+ monitor_len = trans_pcie->fw_mon_size;
+ } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
+ /*
+ * Update pointers to reflect actual values after
+ * shifting
+ */
+ base = iwl_read_prph(trans, base) <<
+ trans->dbg_dest_tlv->base_shift;
+ iwl_trans_read_mem(trans, base, fw_mon_data->data,
+ monitor_len / sizeof(u32));
+ } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
+ monitor_len =
+ iwl_trans_pci_dump_marbh_monitor(trans,
+ fw_mon_data,
+ monitor_len);
+ } else {
+ /* Didn't match anything - output no monitor data */
+ monitor_len = 0;
+ }
+
+ len += monitor_len;
+ (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
+ }
+
+ return len;
+}
+
+static struct iwl_trans_dump_data
+*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
+ struct iwl_fw_dbg_trigger_tlv *trigger)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_fw_error_dump_data *data;
struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
- u32 len;
+ u32 len, num_rbs;
u32 monitor_len;
int i, ptr;
+ bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status);
/* transport dump header */
len = sizeof(*dump_data);
len += sizeof(*data) +
cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
- /* CSR registers */
- len += sizeof(*data) + IWL_CSR_TO_DUMP;
-
- /* PRPH registers */
- for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
- /* The range includes both boundaries */
- int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
- iwl_prph_dump_addr[i].start + 4;
-
- len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
- num_bytes_in_chunk;
- }
-
- /* FH registers */
- len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
-
/* FW monitor */
if (trans_pcie->fw_mon_page) {
len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
monitor_len = 0;
}
+ if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
+ dump_data = vzalloc(len);
+ if (!dump_data)
+ return NULL;
+
+ data = (void *)dump_data->data;
+ len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
+ dump_data->len = len;
+
+ return dump_data;
+ }
+
+ /* CSR registers */
+ len += sizeof(*data) + IWL_CSR_TO_DUMP;
+
+ /* PRPH registers */
+ for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
+ /* The range includes both boundaries */
+ int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
+ iwl_prph_dump_addr[i].start + 4;
+
+ len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
+ num_bytes_in_chunk;
+ }
+
+ /* FH registers */
+ len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
+
+ if (dump_rbs) {
+ /* RBs */
+ num_rbs = le16_to_cpu(ACCESS_ONCE(
+ trans_pcie->rxq.rb_stts->closed_rb_num))
+ & 0x0FFF;
+ num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK;
+ len += num_rbs * (sizeof(*data) +
+ sizeof(struct iwl_fw_error_dump_rb) +
+ (PAGE_SIZE << trans_pcie->rx_page_order));
+ }
+
dump_data = vzalloc(len);
if (!dump_data)
return NULL;
len += iwl_trans_pcie_dump_prph(trans, &data);
len += iwl_trans_pcie_dump_csr(trans, &data);
len += iwl_trans_pcie_fh_regs_dump(trans, &data);
- /* data is already pointing to the next section */
+ if (dump_rbs)
+ len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
- if ((trans_pcie->fw_mon_page &&
- trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
- trans->dbg_dest_tlv) {
- struct iwl_fw_error_dump_fw_mon *fw_mon_data;
- u32 base, write_ptr, wrap_cnt;
-
- /* If there was a dest TLV - use the values from there */
- if (trans->dbg_dest_tlv) {
- write_ptr =
- le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
- wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
- base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
- } else {
- base = MON_BUFF_BASE_ADDR;
- write_ptr = MON_BUFF_WRPTR;
- wrap_cnt = MON_BUFF_CYCLE_CNT;
- }
-
- data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
- fw_mon_data = (void *)data->data;
- fw_mon_data->fw_mon_wr_ptr =
- cpu_to_le32(iwl_read_prph(trans, write_ptr));
- fw_mon_data->fw_mon_cycle_cnt =
- cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
- fw_mon_data->fw_mon_base_ptr =
- cpu_to_le32(iwl_read_prph(trans, base));
-
- len += sizeof(*data) + sizeof(*fw_mon_data);
- if (trans_pcie->fw_mon_page) {
- /*
- * The firmware is now asserted, it won't write anything
- * to the buffer. CPU can take ownership to fetch the
- * data. The buffer will be handed back to the device
- * before the firmware will be restarted.
- */
- dma_sync_single_for_cpu(trans->dev,
- trans_pcie->fw_mon_phys,
- trans_pcie->fw_mon_size,
- DMA_FROM_DEVICE);
- memcpy(fw_mon_data->data,
- page_address(trans_pcie->fw_mon_page),
- trans_pcie->fw_mon_size);
-
- monitor_len = trans_pcie->fw_mon_size;
- } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
- /*
- * Update pointers to reflect actual values after
- * shifting
- */
- base = iwl_read_prph(trans, base) <<
- trans->dbg_dest_tlv->base_shift;
- iwl_trans_read_mem(trans, base, fw_mon_data->data,
- monitor_len / sizeof(u32));
- } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
- monitor_len =
- iwl_trans_pci_dump_marbh_monitor(trans,
- fw_mon_data,
- monitor_len);
- } else {
- /* Didn't match anything - output no monitor data */
- monitor_len = 0;
- }
-
- len += monitor_len;
- data->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
- }
+ len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
dump_data->len = len;
if (!trans)
return ERR_PTR(-ENOMEM);
+ trans->max_skb_frags = IWL_PCIE_MAX_FRAGS;
+
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->trans = trans;
spin_lock_init(&trans_pcie->irq_lock);
spin_lock_init(&trans_pcie->reg_lock);
spin_lock_init(&trans_pcie->ref_lock);
+ mutex_init(&trans_pcie->mutex);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
ret = pci_enable_device(pdev);
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
- WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
-
sta_id = tx_cmd->sta_id;
sec_ctl = tx_cmd->sec_ctl;
if (trans_pcie->bc_table_dword)
len = DIV_ROUND_UP(len, 4);
+ if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
+ return;
+
bc_ent = cpu_to_le16(len | (sta_id << 12));
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
/* first TB is never freed - it's the scratchbuf data */
- for (i = 1; i < num_tbs; i++)
- dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
- iwl_pcie_tfd_tb_get_len(tfd, i),
- DMA_TO_DEVICE);
-
+ for (i = 1; i < num_tbs; i++) {
+ if (meta->flags & BIT(i + CMD_TB_BITMAP_POS))
+ dma_unmap_page(trans->dev,
+ iwl_pcie_tfd_tb_get_addr(tfd, i),
+ iwl_pcie_tfd_tb_get_len(tfd, i),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev,
+ iwl_pcie_tfd_tb_get_addr(tfd, i),
+ iwl_pcie_tfd_tb_get_len(tfd, i),
+ DMA_TO_DEVICE);
+ }
tfd->num_tbs = 0;
}
iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
- return 0;
+ return num_tbs;
}
static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
}
}
+ iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
if (trans->cfg->base_params->num_of_queues > 20)
iwl_set_bits_prph(trans, SCD_GP_CTRL,
SCD_GP_CTRL_ENABLE_31_QUEUES);
int idx;
u16 copy_size, cmd_size, scratch_size;
bool had_nocopy = false;
+ u8 group_id = iwl_cmd_groupid(cmd->id);
int i, ret;
u32 cmd_pos;
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
- copy_size = sizeof(out_cmd->hdr);
- cmd_size = sizeof(out_cmd->hdr);
+ if (WARN(!trans_pcie->wide_cmd_header &&
+ group_id > IWL_ALWAYS_LONG_GROUP,
+ "unsupported wide command %#x\n", cmd->id))
+ return -EINVAL;
+
+ if (group_id != 0) {
+ copy_size = sizeof(struct iwl_cmd_header_wide);
+ cmd_size = sizeof(struct iwl_cmd_header_wide);
+ } else {
+ copy_size = sizeof(struct iwl_cmd_header);
+ cmd_size = sizeof(struct iwl_cmd_header);
+ }
/* need one for the header if the first is NOCOPY */
BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
out_meta->source = cmd;
/* set up the header */
-
- out_cmd->hdr.cmd = cmd->id;
- out_cmd->hdr.flags = 0;
- out_cmd->hdr.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
- INDEX_TO_SEQ(q->write_ptr));
+ if (group_id != 0) {
+ out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
+ out_cmd->hdr_wide.group_id = group_id;
+ out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
+ out_cmd->hdr_wide.length =
+ cpu_to_le16(cmd_size -
+ sizeof(struct iwl_cmd_header_wide));
+ out_cmd->hdr_wide.reserved = 0;
+ out_cmd->hdr_wide.sequence =
+ cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+ INDEX_TO_SEQ(q->write_ptr));
+
+ cmd_pos = sizeof(struct iwl_cmd_header_wide);
+ copy_size = sizeof(struct iwl_cmd_header_wide);
+ } else {
+ out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
+ out_cmd->hdr.sequence =
+ cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+ INDEX_TO_SEQ(q->write_ptr));
+ out_cmd->hdr.group_id = 0;
+
+ cmd_pos = sizeof(struct iwl_cmd_header);
+ copy_size = sizeof(struct iwl_cmd_header);
+ }
/* and copy the data that needs to be copied */
- cmd_pos = offsetof(struct iwl_device_cmd, payload);
- copy_size = sizeof(out_cmd->hdr);
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
int copy;
}
IWL_DEBUG_HC(trans,
- "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
+ "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
- out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
+ group_id, out_cmd->hdr.cmd,
+ le16_to_cpu(out_cmd->hdr.sequence),
cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
/* start the TFD with the scratchbuf */
iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
}
+ BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS >
+ sizeof(out_meta->flags) * BITS_PER_BYTE);
out_meta->flags = cmd->flags;
if (WARN_ON_ONCE(txq->entries[idx].free_buf))
kzfree(txq->entries[idx].free_buf);
txq->entries[idx].free_buf = dup_buf;
- trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
+ trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
/* start timer if queue currently empty */
if (q->read_ptr == q->write_ptr && txq->wd_timeout)
/*
* iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
* @rxb: Rx buffer to reclaim
- * @handler_status: return value of the handler of the command
- * (put in setup_rx_handlers)
*
* If an Rx buffer has an async callback associated with it the callback
* will be executed. The attached skb (if present) will only be freed
* if the callback returns 1
*/
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
- struct iwl_rx_cmd_buffer *rxb, int handler_status)
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
meta->source->resp_pkt = pkt;
meta->source->_rx_page_addr = (unsigned long)page_address(p);
meta->source->_rx_page_order = trans_pcie->rx_page_order;
- meta->source->handler_status = handler_status;
}
iwl_pcie_cmdq_reclaim(trans, txq_id, index);
struct iwl_device_cmd *dev_cmd, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_hdr *hdr;
struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
struct iwl_cmd_meta *out_meta;
struct iwl_txq *txq;
void *tb1_addr;
u16 len, tb1_len, tb2_len;
bool wait_write_ptr;
- __le16 fc = hdr->frame_control;
- u8 hdr_len = ieee80211_hdrlen(fc);
+ __le16 fc;
+ u8 hdr_len;
u16 wifi_seq;
+ int i;
txq = &trans_pcie->txq[txq_id];
q = &txq->q;
"TX on unused queue %d\n", txq_id))
return -EINVAL;
+ if (skb_is_nonlinear(skb) &&
+ skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS &&
+ __skb_linearize(skb))
+ return -ENOMEM;
+
+ /* mac80211 always puts the full header into the SKB's head,
+ * so there's no need to check if it's readable there
+ */
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
+ hdr_len = ieee80211_hdrlen(fc);
+
spin_lock(&txq->lock);
/* In AGG mode, the index in the ring must correspond to the WiFi
/* Set up first empty entry in queue's array of Tx/cmd buffers */
out_meta = &txq->entries[q->write_ptr].meta;
+ out_meta->flags = 0;
/*
* The second TB (tb1) points to the remainder of the TX command
/*
* Set up TFD's third entry to point directly to remainder
- * of skb, if any (802.11 null frames have no payload).
+ * of skb's head, if any
*/
- tb2_len = skb->len - hdr_len;
+ tb2_len = skb_headlen(skb) - hdr_len;
if (tb2_len > 0) {
dma_addr_t tb2_phys = dma_map_single(trans->dev,
skb->data + hdr_len,
iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
}
+ /* set up the remaining entries to point to the data */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ dma_addr_t tb_phys;
+ int tb_idx;
+
+ if (!skb_frag_size(frag))
+ continue;
+
+ tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+ iwl_pcie_tfd_unmap(trans, out_meta,
+ &txq->tfds[q->write_ptr]);
+ goto out_err;
+ }
+ tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
+ skb_frag_size(frag), false);
+
+ out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS);
+ }
+
/* Set up entry for this TFD in Tx byte-count array */
iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
&dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
skb->data + hdr_len, tb2_len);
trace_iwlwifi_dev_tx_data(trans->dev, skb,
- skb->data + hdr_len, tb2_len);
+ hdr_len, skb->len - hdr_len);
wait_write_ptr = ieee80211_has_morefrags(fc);
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, TDLS_WIDER_BW);
if (rctbl)
ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
dev->netdev_ops = &hwsim_netdev_ops;
dev->destructor = free_netdev;
ether_setup(dev);
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->type = ARPHRD_IEEE80211_RADIOTAP;
eth_zero_addr(dev->dev_addr);
dev->dev_addr[0] = 0x12;
goto failure;
rc = netlink_register_notifier(&hwsim_netlink_notifier);
- if (rc)
+ if (rc) {
+ genl_unregister_family(&hwsim_genl_family);
goto failure;
+ }
return 0;
if (!skb)
return;
- ieee80211_rx_ni(dev->hw, skb);
+ spin_lock(&dev->mac_lock);
+ ieee80211_rx(dev->hw, skb);
+ spin_unlock(&dev->mac_lock);
}
static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
skb = q->e[q->start].skb;
trace_mt_tx_dma_done(dev, skb);
- mt7601u_tx_status(dev, skb);
+ __skb_queue_tail(&dev->tx_skb_done, skb);
+ tasklet_schedule(&dev->tx_tasklet);
if (q->used == q->entries - q->entries / 8)
ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
q->start = (q->start + 1) % q->entries;
q->used--;
+out:
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
- if (urb->status)
- goto out;
+static void mt7601u_tx_tasklet(unsigned long data)
+{
+ struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
+ struct sk_buff_head skbs;
+ unsigned long flags;
+
+ __skb_queue_head_init(&skbs);
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
queue_delayed_work(dev->stat_wq, &dev->stat_work,
msecs_to_jiffies(10));
-out:
+
+ skb_queue_splice_init(&dev->tx_skb_done, &skbs);
+
spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+ while (!skb_queue_empty(&skbs)) {
+ struct sk_buff *skb = __skb_dequeue(&skbs);
+
+ mt7601u_tx_status(dev, skb);
+ }
}
static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
{
int ret = -ENOMEM;
+ tasklet_init(&dev->tx_tasklet, mt7601u_tx_tasklet, (unsigned long) dev);
tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev);
ret = mt7601u_alloc_tx(dev);
mt7601u_free_rx(dev);
mt7601u_free_tx(dev);
+
+ tasklet_kill(&dev->tx_tasklet);
}
spin_lock_init(&dev->tx_lock);
spin_lock_init(&dev->rx_lock);
spin_lock_init(&dev->lock);
+ spin_lock_init(&dev->mac_lock);
spin_lock_init(&dev->con_mon_lock);
atomic_set(&dev->avg_ampdu_len, 1);
+ skb_queue_head_init(&dev->tx_skb_done);
dev->stat_wq = alloc_workqueue("mt7601u", WQ_UNBOUND, 0);
if (!dev->stat_wq) {
}
mt76_mac_fill_tx_status(dev, &info, stat);
+
+ spin_lock_bh(&dev->mac_lock);
ieee80211_tx_status_noskb(dev->hw, sta, &info);
+ spin_unlock_bh(&dev->mac_lock);
+
rcu_read_unlock();
}
/**
* struct mt7601u_dev - adapter structure
* @lock: protects @wcid->tx_rate.
+ * @mac_lock: locks out mac80211's tx status and rx paths.
* @tx_lock: protects @tx_q and changes of MT7601U_STATE_*_STATS
- flags in @state.
+ * flags in @state.
* @rx_lock: protects @rx_q.
* @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi.
* @mutex: ensures exclusive access from mac80211 callbacks.
- * @vendor_req_mutex: ensures atomicity of vendor requests.
+ * @vendor_req_mutex: protects @vend_buf, ensures atomicity of split writes.
* @reg_atomic_mutex: ensures atomicity of indirect register accesses
* (accesses to RF and BBP).
* @hw_atomic_mutex: ensures exclusive access to HW during critical
struct mt76_wcid __rcu *wcid[N_WCIDS];
spinlock_t lock;
+ spinlock_t mac_lock;
const u16 *beacon_offsets;
struct mt7601u_eeprom_params *ee;
struct mutex vendor_req_mutex;
+ void *vend_buf;
+
struct mutex reg_atomic_mutex;
struct mutex hw_atomic_mutex;
/* TX */
spinlock_t tx_lock;
+ struct tasklet_struct tx_tasklet;
struct mt7601u_tx_queue *tx_q;
+ struct sk_buff_head tx_skb_done;
atomic_t avg_ampdu_len;
ieee80211_tx_info_clear_status(info);
info->status.rates[0].idx = -1;
info->flags |= IEEE80211_TX_STAT_ACK;
+
+ spin_lock(&dev->mac_lock);
ieee80211_tx_status(dev->hw, skb);
+ spin_unlock(&dev->mac_lock);
}
static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
complete(cmpl);
}
-static int
-__mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
- const u8 direction, const u16 val, const u16 offset,
- void *buf, const size_t buflen)
+int mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
+ const u8 direction, const u16 val, const u16 offset,
+ void *buf, const size_t buflen)
{
int i, ret;
struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
trace_mt_vend_req(dev, pipe, req, req_type, val, offset,
buf, buflen, ret);
+ if (ret == -ENODEV)
+ set_bit(MT7601U_STATE_REMOVED, &dev->state);
if (ret >= 0 || ret == -ENODEV)
return ret;
return ret;
}
-int
-mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
- const u8 direction, const u16 val, const u16 offset,
- void *buf, const size_t buflen)
-{
- int ret;
-
- mutex_lock(&dev->vendor_req_mutex);
-
- ret = __mt7601u_vendor_request(dev, req, direction, val, offset,
- buf, buflen);
- if (ret == -ENODEV)
- set_bit(MT7601U_STATE_REMOVED, &dev->state);
-
- mutex_unlock(&dev->vendor_req_mutex);
-
- return ret;
-}
-
void mt7601u_vendor_reset(struct mt7601u_dev *dev)
{
mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset)
{
int ret;
- __le32 reg;
- u32 val;
+ u32 val = ~0;
WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset);
+ mutex_lock(&dev->vendor_req_mutex);
+
ret = mt7601u_vendor_request(dev, MT_VEND_MULTI_READ, USB_DIR_IN,
- 0, offset, ®, sizeof(reg));
- val = le32_to_cpu(reg);
- if (ret > 0 && ret != sizeof(reg)) {
+ 0, offset, dev->vend_buf, MT_VEND_BUF);
+ if (ret == MT_VEND_BUF)
+ val = get_unaligned_le32(dev->vend_buf);
+ else if (ret > 0)
dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n",
ret, offset);
- val = ~0;
- }
+
+ mutex_unlock(&dev->vendor_req_mutex);
trace_reg_read(dev, offset, val);
return val;
{
int ret;
+ mutex_lock(&dev->vendor_req_mutex);
+
ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
val & 0xffff, offset, NULL, 0);
- if (ret)
- return ret;
- return mt7601u_vendor_request(dev, req, USB_DIR_OUT,
- val >> 16, offset + 2, NULL, 0);
+ if (!ret)
+ ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
+ val >> 16, offset + 2, NULL, 0);
+
+ mutex_unlock(&dev->vendor_req_mutex);
+
+ return ret;
}
void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val)
usb_set_intfdata(usb_intf, dev);
+ dev->vend_buf = devm_kmalloc(dev->dev, MT_VEND_BUF, GFP_KERNEL);
+ if (!dev->vend_buf) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
ret = mt7601u_assign_pipes(usb_intf, dev);
if (ret)
goto err;
#define MT_VEND_DEV_MODE_RESET 1
+#define MT_VEND_BUF sizeof(__le32)
+
enum mt_vendor_req {
MT_VEND_DEV_MODE = 1,
MT_VEND_WRITE = 2,
mwifiex.
config MWIFIEX_SDIO
- tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897"
+ tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8997"
depends on MWIFIEX && MMC
select FW_LOADER
select WANT_DEV_COREDUMP
---help---
This adds support for wireless adapters based on Marvell
- 8786/8787/8797/8887/8897 chipsets with SDIO interface.
+ 8786/8787/8797/8887/8897/8997 chipsets with SDIO interface.
If you choose to build it as a module, it will be called
mwifiex_sdio.
config MWIFIEX_PCIE
- tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897"
+ tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897/8997"
depends on MWIFIEX && PCI
select FW_LOADER
select WANT_DEV_COREDUMP
---help---
This adds support for wireless adapters based on Marvell
- 8766/8897 chipsets with PCIe interface.
+ 8766/8897/8997 chipsets with PCIe interface.
If you choose to build it as a module, it will be called
mwifiex_pcie.
config MWIFIEX_USB
- tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897"
+ tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897/8997"
depends on MWIFIEX && USB
select FW_LOADER
---help---
This adds support for wireless adapters based on Marvell
- 8797/8897 chipset with USB interface.
+ 8797/8897/8997 chipset with USB interface.
If you choose to build it as a module, it will be called
mwifiex_usb.
mwifiex_dbg(adapter, ERROR,
"DNLD_CMD: FW in reset state, ignore cmd %#x\n",
cmd_code);
- if (cmd_node->wait_q_enabled)
- mwifiex_complete_cmd(adapter, cmd_node);
mwifiex_recycle_cmd_node(adapter, cmd_node);
queue_work(adapter->workqueue, &adapter->main_work);
return -1;
adapter->is_cmd_timedout = 0;
resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
- if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) {
- mwifiex_dbg(adapter, ERROR,
- "CMD_RESP: %#x been canceled\n",
- le16_to_cpu(resp->command));
- mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
- adapter->curr_cmd = NULL;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
- return -1;
- }
-
if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) {
/* Copy original response back to response buffer */
struct mwifiex_ds_misc_cmd *hostcmd;
if (cmd_node->wait_q_enabled) {
adapter->cmd_wait_q.status = -ETIMEDOUT;
- wake_up_interruptible(&adapter->cmd_wait_q.wait);
mwifiex_cancel_pending_ioctl(adapter);
}
}
- if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
+ if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) {
mwifiex_init_fw_complete(adapter);
+ return;
+ }
if (adapter->if_ops.device_dump)
adapter->if_ops.device_dump(adapter);
adapter->curr_cmd->wait_q_enabled = false;
adapter->cmd_wait_q.status = -1;
mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+ /* no recycle probably wait for response */
}
/* Cancel all pending command */
spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
list_del(&cmd_node->list);
spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
- if (cmd_node->wait_q_enabled) {
+ if (cmd_node->wait_q_enabled)
adapter->cmd_wait_q.status = -1;
- mwifiex_complete_cmd(adapter, cmd_node);
- cmd_node->wait_q_enabled = false;
- }
mwifiex_recycle_cmd_node(adapter, cmd_node);
spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
}
(adapter->curr_cmd->wait_q_enabled)) {
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
cmd_node = adapter->curr_cmd;
- cmd_node->wait_q_enabled = false;
- cmd_node->cmd_flag |= CMD_F_CANCELED;
- mwifiex_recycle_cmd_node(adapter, cmd_node);
- mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+ /* setting curr_cmd to NULL is quite dangerous, because
+ * mwifiex_process_cmdresp checks curr_cmd to be != NULL
+ * at the beginning then relies on it and dereferences
+ * it at will
+ * this probably works since mwifiex_cmd_timeout_func
+ * is the only caller of this function and responses
+ * at that point
+ */
adapter->curr_cmd = NULL;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+
+ mwifiex_recycle_cmd_node(adapter, cmd_node);
}
/* Cancel all pending scan command */
}
}
}
- adapter->cmd_wait_q.status = -1;
}
/*
#define CMD_F_HOSTCMD (1 << 0)
-#define CMD_F_CANCELED (1 << 1)
#define HostCmd_CMD_ID_MASK 0x0fff
enum mwifiex_chan_scan_mode_bitmasks {
MWIFIEX_PASSIVE_SCAN = BIT(0),
MWIFIEX_DISABLE_CHAN_FILT = BIT(1),
+ MWIFIEX_HIDDEN_SSID_REPORT = BIT(4),
};
struct mwifiex_chan_scan_param_set {
adapter->iface_limit.sta_intf = MWIFIEX_MAX_STA_NUM;
adapter->iface_limit.uap_intf = MWIFIEX_MAX_UAP_NUM;
adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM;
-
+ adapter->active_scan_triggered = false;
setup_timer(&adapter->wakeup_timer, wakeup_timer_fn,
(unsigned long)adapter);
}
}
}
- if (adapter->if_ops.init_fw_port) {
- if (adapter->if_ops.init_fw_port(adapter))
- return -1;
- }
-
for (i = 0; i < adapter->priv_num; i++) {
if (adapter->priv[i]) {
ret = mwifiex_sta_init_cmd(adapter->priv[i], first_sta,
struct mwifiex_11h_intf_state state_11h;
struct mwifiex_ds_mem_rw mem_rw;
struct sk_buff_head bypass_txq;
+ struct mwifiex_user_scan_chan hidden_chan[MWIFIEX_USER_SCAN_CHAN_MAX];
};
u8 coex_tx_win_size;
u8 coex_rx_win_size;
bool drcs_enabled;
+ u8 active_scan_triggered;
};
void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
{
PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- .driver_data = (unsigned long) &mwifiex_pcie8766,
+ .driver_data = (unsigned long)&mwifiex_pcie8766,
},
{
PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8897,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- .driver_data = (unsigned long) &mwifiex_pcie8897,
+ .driver_data = (unsigned long)&mwifiex_pcie8897,
+ },
+ {
+ PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8997,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ .driver_data = (unsigned long)&mwifiex_pcie8997,
},
{},
};
card->txbd_rdptr++;
break;
case PCIE_DEVICE_ID_MARVELL_88W8897:
+ case PCIE_DEVICE_ID_MARVELL_88W8997:
card->txbd_rdptr += reg->ring_tx_start_ptr;
break;
}
card->txbd_wrptr++;
break;
case PCIE_DEVICE_ID_MARVELL_88W8897:
+ case PCIE_DEVICE_ID_MARVELL_88W8997:
card->txbd_wrptr += reg->ring_tx_start_ptr;
break;
}
if (!card->evt_buf_list[rdptr]) {
skb_push(skb, INTF_HEADER_LEN);
+ skb_put(skb, MAX_EVENT_SIZE - skb->len);
+ memset(skb->data, 0, MAX_EVENT_SIZE);
if (mwifiex_map_pci_memory(adapter, skb,
MAX_EVENT_SIZE,
PCI_DMA_FROMDEVICE))
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(PCIE8766_DEFAULT_FW_NAME);
MODULE_FIRMWARE(PCIE8897_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(PCIE8997_DEFAULT_FW_NAME);
#define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin"
#define PCIE8897_DEFAULT_FW_NAME "mrvl/pcie8897_uapsta.bin"
+#define PCIE8997_DEFAULT_FW_NAME "mrvl/pcie8997_uapsta.bin"
#define PCIE_VENDOR_ID_MARVELL (0x11ab)
#define PCIE_DEVICE_ID_MARVELL_88W8766P (0x2b30)
#define PCIE_DEVICE_ID_MARVELL_88W8897 (0x2b38)
+#define PCIE_DEVICE_ID_MARVELL_88W8997 (0x2b42)
/* Constants for Buffer Descriptor (BD) rings */
#define MWIFIEX_MAX_TXRX_BD 0x20
.sleep_cookie = 0,
.fw_dump_ctrl = 0xcf4,
.fw_dump_start = 0xcf8,
- .fw_dump_end = 0xcff
+ .fw_dump_end = 0xcff,
+};
+
+static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = {
+ .cmd_addr_lo = PCIE_SCRATCH_0_REG,
+ .cmd_addr_hi = PCIE_SCRATCH_1_REG,
+ .cmd_size = PCIE_SCRATCH_2_REG,
+ .fw_status = PCIE_SCRATCH_3_REG,
+ .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
+ .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
+ .tx_rdptr = 0xC1A4,
+ .tx_wrptr = 0xC1A8,
+ .rx_rdptr = 0xC1A8,
+ .rx_wrptr = 0xC1A4,
+ .evt_rdptr = PCIE_SCRATCH_10_REG,
+ .evt_wrptr = PCIE_SCRATCH_11_REG,
+ .drv_rdy = PCIE_SCRATCH_12_REG,
+ .tx_start_ptr = 16,
+ .tx_mask = 0x0FFF0000,
+ .tx_wrap_mask = 0x01FF0000,
+ .rx_mask = 0x00000FFF,
+ .rx_wrap_mask = 0x000001FF,
+ .tx_rollover_ind = BIT(28),
+ .rx_rollover_ind = BIT(12),
+ .evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND,
+ .ring_flag_sop = MWIFIEX_BD_FLAG_SOP,
+ .ring_flag_eop = MWIFIEX_BD_FLAG_EOP,
+ .ring_flag_xs_sop = MWIFIEX_BD_FLAG_XS_SOP,
+ .ring_flag_xs_eop = MWIFIEX_BD_FLAG_XS_EOP,
+ .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
+ .pfu_enabled = 1,
+ .sleep_cookie = 0,
};
struct mwifiex_pcie_device {
.can_ext_scan = true,
};
+static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
+ .firmware = PCIE8997_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_8997,
+ .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+ .can_dump_fw = false,
+ .can_ext_scan = true,
+};
+
struct mwifiex_evt_buf_desc {
u64 paddr;
u16 len;
return 1;
break;
case PCIE_DEVICE_ID_MARVELL_88W8897:
+ case PCIE_DEVICE_ID_MARVELL_88W8997:
if (((card->txbd_wrptr & reg->tx_mask) !=
(card->txbd_rdptr & reg->tx_mask)) ||
((card->txbd_wrptr & reg->tx_rollover_ind) ==
if (ch->flags & IEEE80211_CHAN_NO_IR)
scan_chan_list[chan_idx].chan_scan_mode_bitmap
- |= MWIFIEX_PASSIVE_SCAN;
+ |= (MWIFIEX_PASSIVE_SCAN |
+ MWIFIEX_HIDDEN_SSID_REPORT);
else
scan_chan_list[chan_idx].chan_scan_mode_bitmap
&= ~MWIFIEX_PASSIVE_SCAN;
if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
(scan_chan_list +
chan_idx)->chan_scan_mode_bitmap
- |= MWIFIEX_PASSIVE_SCAN;
+ |= (MWIFIEX_PASSIVE_SCAN |
+ MWIFIEX_HIDDEN_SSID_REPORT);
else
(scan_chan_list +
chan_idx)->chan_scan_mode_bitmap
return ret;
}
+/* This function checks if SSID string contains all zeroes or length is zero */
+static bool mwifiex_is_hidden_ssid(struct cfg80211_ssid *ssid)
+{
+ int idx;
+
+ for (idx = 0; idx < ssid->ssid_len; idx++) {
+ if (ssid->ssid[idx])
+ return false;
+ }
+
+ return true;
+}
+
+/* This function checks if any hidden SSID found in passive scan channels
+ * and save those channels for specific SSID active scan
+ */
+static int mwifiex_save_hidden_ssid_channels(struct mwifiex_private *priv,
+ struct cfg80211_bss *bss)
+{
+ struct mwifiex_bssdescriptor *bss_desc;
+ int ret;
+ int chid;
+
+ /* Allocate and fill new bss descriptor */
+ bss_desc = kzalloc(sizeof(*bss_desc), GFP_KERNEL);
+ if (!bss_desc)
+ return -ENOMEM;
+
+ ret = mwifiex_fill_new_bss_desc(priv, bss, bss_desc);
+ if (ret)
+ goto done;
+
+ if (mwifiex_is_hidden_ssid(&bss_desc->ssid)) {
+ mwifiex_dbg(priv->adapter, INFO, "found hidden SSID\n");
+ for (chid = 0 ; chid < MWIFIEX_USER_SCAN_CHAN_MAX; chid++) {
+ if (priv->hidden_chan[chid].chan_number ==
+ bss->channel->hw_value)
+ break;
+
+ if (!priv->hidden_chan[chid].chan_number) {
+ priv->hidden_chan[chid].chan_number =
+ bss->channel->hw_value;
+ priv->hidden_chan[chid].radio_type =
+ bss->channel->band;
+ priv->hidden_chan[chid].scan_type =
+ MWIFIEX_SCAN_TYPE_ACTIVE;
+ break;
+ }
+ }
+ }
+
+done:
+ kfree(bss_desc);
+ return 0;
+}
+
static int mwifiex_update_curr_bss_params(struct mwifiex_private *priv,
struct cfg80211_bss *bss)
{
.mac_address, ETH_ALEN))
mwifiex_update_curr_bss_params(priv, bss);
cfg80211_put_bss(priv->wdev.wiphy, bss);
+
+ if ((chan->flags & IEEE80211_CHAN_RADAR) ||
+ (chan->flags & IEEE80211_CHAN_NO_IR)) {
+ mwifiex_dbg(adapter, INFO,
+ "radar or passive channel %d\n",
+ channel);
+ mwifiex_save_hidden_ssid_channels(priv, bss);
+ }
}
} else {
mwifiex_dbg(adapter, WARN, "missing BSS channel IE\n");
}
}
+/* This function checks if any hidden SSID found in passive scan channels
+ * and do specific SSID active scan for those channels
+ */
+static int
+mwifiex_active_scan_req_for_passive_chan(struct mwifiex_private *priv)
+{
+ int ret;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u8 id = 0;
+ struct mwifiex_user_scan_cfg *user_scan_cfg;
+
+ if (adapter->active_scan_triggered) {
+ adapter->active_scan_triggered = false;
+ return 0;
+ }
+
+ if (!priv->hidden_chan[0].chan_number) {
+ mwifiex_dbg(adapter, INFO, "No BSS with hidden SSID found on DFS channels\n");
+ return 0;
+ }
+ user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL);
+
+ if (!user_scan_cfg)
+ return -ENOMEM;
+
+ memset(user_scan_cfg, 0, sizeof(*user_scan_cfg));
+
+ for (id = 0; id < MWIFIEX_USER_SCAN_CHAN_MAX; id++) {
+ if (!priv->hidden_chan[id].chan_number)
+ break;
+ memcpy(&user_scan_cfg->chan_list[id],
+ &priv->hidden_chan[id],
+ sizeof(struct mwifiex_user_scan_chan));
+ }
+
+ adapter->active_scan_triggered = true;
+ user_scan_cfg->num_ssids = priv->scan_request->n_ssids;
+ user_scan_cfg->ssid_list = priv->scan_request->ssids;
+
+ ret = mwifiex_scan_networks(priv, user_scan_cfg);
+ kfree(user_scan_cfg);
+
+ memset(&priv->hidden_chan, 0, sizeof(priv->hidden_chan));
+
+ if (ret) {
+ dev_err(priv->adapter->dev, "scan failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
{
struct mwifiex_adapter *adapter = priv->adapter;
adapter->scan_processing = false;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ mwifiex_active_scan_req_for_passive_chan(priv);
+
if (!adapter->ext_scan)
mwifiex_complete_scan(priv);
adapter->scan_processing = false;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
- if (priv->scan_request) {
- mwifiex_dbg(adapter, INFO,
- "info: aborting scan\n");
- cfg80211_scan_done(priv->scan_request, 1);
- priv->scan_request = NULL;
- } else {
- priv->scan_aborting = false;
- mwifiex_dbg(adapter, INFO,
- "info: scan already aborted\n");
+ if (!adapter->active_scan_triggered) {
+ if (priv->scan_request) {
+ mwifiex_dbg(adapter, INFO,
+ "info: aborting scan\n");
+ cfg80211_scan_done(priv->scan_request, 1);
+ priv->scan_request = NULL;
+ } else {
+ priv->scan_aborting = false;
+ mwifiex_dbg(adapter, INFO,
+ "info: scan already aborted\n");
+ }
}
} else {
/* Get scan command from scan_pending_q and put to
static struct semaphore add_remove_card_sem;
+static struct memory_type_mapping generic_mem_type_map[] = {
+ {"DUMP", NULL, 0, 0xDD},
+};
+
static struct memory_type_mapping mem_type_mapping_tbl[] = {
{"ITCM", NULL, 0, 0xF0},
{"DTCM", NULL, 0, 0xF1},
return -ENOMEM;
card->func = func;
+ card->device_id = id;
func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size;
card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size;
card->can_dump_fw = data->can_dump_fw;
+ card->fw_dump_enh = data->fw_dump_enh;
card->can_auto_tdls = data->can_auto_tdls;
card->can_ext_scan = data->can_ext_scan;
}
#define SDIO_DEVICE_ID_MARVELL_8887 (0x9135)
/* Device ID for SD8801 */
#define SDIO_DEVICE_ID_MARVELL_8801 (0x9139)
+/* Device ID for SD8997 */
+#define SDIO_DEVICE_ID_MARVELL_8997 (0x9141)
/* WLAN IDs */
.driver_data = (unsigned long)&mwifiex_sdio_sd8887},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8801),
.driver_data = (unsigned long)&mwifiex_sdio_sd8801},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997),
+ .driver_data = (unsigned long)&mwifiex_sdio_sd8997},
{},
};
if (!fwbuf)
return -ENOMEM;
+ sdio_claim_host(card->func);
+
/* Perform firmware data transfer */
do {
/* The host polls for the DN_LD_CARD_RDY and CARD_IO_READY
offset += txlen;
} while (true);
+ sdio_release_host(card->func);
+
mwifiex_dbg(adapter, MSG,
"info: FW download over, size %d bytes\n", offset);
adapter->dev = &func->dev;
strcpy(adapter->fw_name, card->firmware);
- adapter->mem_type_mapping_tbl = mem_type_mapping_tbl;
- adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl);
+ if (card->fw_dump_enh) {
+ adapter->mem_type_mapping_tbl = generic_mem_type_map;
+ adapter->num_mem_types = 1;
+ } else {
+ adapter->mem_type_mapping_tbl = mem_type_mapping_tbl;
+ adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl);
+ }
return 0;
}
port, card->mp_data_port_mask);
}
+static void mwifiex_recreate_adapter(struct sdio_mmc_card *card)
+{
+ struct sdio_func *func = card->func;
+ const struct sdio_device_id *device_id = card->device_id;
+
+ /* TODO mmc_hw_reset does not require destroying and re-probing the
+ * whole adapter. Hence there was no need to for this rube-goldberg
+ * design to reload the fw from an external workqueue. If we don't
+ * destroy the adapter we could reload the fw from
+ * mwifiex_main_work_queue directly.
+ * The real difficulty with fw reset is to restore all the user
+ * settings applied through ioctl. By destroying and recreating the
+ * adapter, we take the easy way out, since we rely on user space to
+ * restore them. We assume that user space will treat the new
+ * incarnation of the adapter(interfaces) as if they had been just
+ * discovered and initializes them from scratch.
+ */
+
+ mwifiex_sdio_remove(func);
+
+ /* power cycle the adapter */
+ sdio_claim_host(func);
+ mmc_hw_reset(func->card->host);
+ sdio_release_host(func);
+
+ mwifiex_sdio_probe(func, device_id);
+}
+
static struct mwifiex_adapter *save_adapter;
static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
- struct mmc_host *target = card->func->card->host;
-
- /* The actual reset operation must be run outside of driver thread.
- * This is because mmc_remove_host() will cause the device to be
- * instantly destroyed, and the driver then needs to end its thread,
- * leading to a deadlock.
- *
- * We run it in a totally independent workqueue.
- */
- mwifiex_dbg(adapter, WARN, "Resetting card...\n");
- mmc_remove_host(target);
- /* 200ms delay is based on experiment with sdhci controller */
- mdelay(200);
- target->rescan_entered = 0; /* rescan non-removable cards */
- mmc_add_host(target);
+ /* TODO card pointer is unprotected. If the adapter is removed
+ * physically, sdio core might trigger mwifiex_sdio_remove, before this
+ * workqueue is run, which will destroy the adapter struct. When this
+ * workqueue eventually exceutes it will dereference an invalid adapter
+ * pointer
+ */
+ mwifiex_recreate_adapter(card);
}
/* This function read/write firmware */
int ret, tries;
u8 ctrl_data = 0;
- sdio_writeb(card->func, FW_DUMP_HOST_READY, card->reg->fw_dump_ctrl,
- &ret);
+ sdio_writeb(card->func, card->reg->fw_dump_host_ready,
+ card->reg->fw_dump_ctrl, &ret);
if (ret) {
mwifiex_dbg(adapter, ERROR, "SDIO Write ERR\n");
return RDWR_STATUS_FAILURE;
break;
if (doneflag && ctrl_data == doneflag)
return RDWR_STATUS_DONE;
- if (ctrl_data != FW_DUMP_HOST_READY) {
+ if (ctrl_data != card->reg->fw_dump_host_ready) {
mwifiex_dbg(adapter, WARN,
- "The ctrl reg was changed, re-try again!\n");
- sdio_writeb(card->func, FW_DUMP_HOST_READY,
+ "The ctrl reg was changed, re-try again\n");
+ sdio_writeb(card->func, card->reg->fw_dump_host_ready,
card->reg->fw_dump_ctrl, &ret);
if (ret) {
mwifiex_dbg(adapter, ERROR, "SDIO write err\n");
}
usleep_range(100, 200);
}
- if (ctrl_data == FW_DUMP_HOST_READY) {
+ if (ctrl_data == card->reg->fw_dump_host_ready) {
mwifiex_dbg(adapter, ERROR,
"Fail to pull ctrl_data\n");
return RDWR_STATUS_FAILURE;
sdio_release_host(card->func);
}
+static void mwifiex_sdio_generic_fw_dump(struct mwifiex_adapter *adapter)
+{
+ struct sdio_mmc_card *card = adapter->card;
+ struct memory_type_mapping *entry = &generic_mem_type_map[0];
+ unsigned int reg, reg_start, reg_end;
+ u8 start_flag = 0, done_flag = 0;
+ u8 *dbg_ptr, *end_ptr;
+ enum rdwr_status stat;
+ int ret = -1, tries;
+
+ if (!card->fw_dump_enh)
+ return;
+
+ if (entry->mem_ptr) {
+ vfree(entry->mem_ptr);
+ entry->mem_ptr = NULL;
+ }
+ entry->mem_size = 0;
+
+ mwifiex_pm_wakeup_card(adapter);
+ sdio_claim_host(card->func);
+
+ mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump start ==\n");
+
+ stat = mwifiex_sdio_rdwr_firmware(adapter, done_flag);
+ if (stat == RDWR_STATUS_FAILURE)
+ goto done;
+
+ reg_start = card->reg->fw_dump_start;
+ reg_end = card->reg->fw_dump_end;
+ for (reg = reg_start; reg <= reg_end; reg++) {
+ for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
+ start_flag = sdio_readb(card->func, reg, &ret);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR,
+ "SDIO read err\n");
+ goto done;
+ }
+ if (start_flag == 0)
+ break;
+ if (tries == MAX_POLL_TRIES) {
+ mwifiex_dbg(adapter, ERROR,
+ "FW not ready to dump\n");
+ ret = -1;
+ goto done;
+ }
+ }
+ usleep_range(100, 200);
+ }
+
+ entry->mem_ptr = vmalloc(0xf0000 + 1);
+ if (!entry->mem_ptr) {
+ ret = -1;
+ goto done;
+ }
+ dbg_ptr = entry->mem_ptr;
+ entry->mem_size = 0xf0000;
+ end_ptr = dbg_ptr + entry->mem_size;
+
+ done_flag = entry->done_flag;
+ mwifiex_dbg(adapter, DUMP,
+ "Start %s output, please wait...\n", entry->mem_name);
+
+ while (true) {
+ stat = mwifiex_sdio_rdwr_firmware(adapter, done_flag);
+ if (stat == RDWR_STATUS_FAILURE)
+ goto done;
+ for (reg = reg_start; reg <= reg_end; reg++) {
+ *dbg_ptr = sdio_readb(card->func, reg, &ret);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR,
+ "SDIO read err\n");
+ goto done;
+ }
+ dbg_ptr++;
+ if (dbg_ptr >= end_ptr) {
+ u8 *tmp_ptr;
+
+ tmp_ptr = vmalloc(entry->mem_size + 0x4000 + 1);
+ if (!tmp_ptr)
+ goto done;
+
+ memcpy(tmp_ptr, entry->mem_ptr,
+ entry->mem_size);
+ vfree(entry->mem_ptr);
+ entry->mem_ptr = tmp_ptr;
+ tmp_ptr = NULL;
+ dbg_ptr = entry->mem_ptr + entry->mem_size;
+ entry->mem_size += 0x4000;
+ end_ptr = entry->mem_ptr + entry->mem_size;
+ }
+ }
+ if (stat == RDWR_STATUS_DONE) {
+ entry->mem_size = dbg_ptr - entry->mem_ptr;
+ mwifiex_dbg(adapter, DUMP, "dump %s done size=0x%x\n",
+ entry->mem_name, entry->mem_size);
+ ret = 0;
+ break;
+ }
+ }
+ mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n");
+
+done:
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR, "firmware dump failed\n");
+ if (entry->mem_ptr) {
+ vfree(entry->mem_ptr);
+ entry->mem_ptr = NULL;
+ }
+ entry->mem_size = 0;
+ }
+ sdio_release_host(card->func);
+}
+
static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter)
{
+ struct sdio_mmc_card *card = adapter->card;
+
mwifiex_drv_info_dump(adapter);
- mwifiex_sdio_fw_dump(adapter);
+ if (card->fw_dump_enh)
+ mwifiex_sdio_generic_fw_dump(adapter);
+ else
+ mwifiex_sdio_fw_dump(adapter);
mwifiex_upload_device_dump(adapter);
}
MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(SD8997_DEFAULT_FW_NAME);
#define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin"
#define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin"
#define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin"
+#define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin"
#define BLOCK_MODE 1
#define BYTE_MODE 0
u8 cmd_cfg_1;
u8 cmd_cfg_2;
u8 cmd_cfg_3;
+ u8 fw_dump_host_ready;
u8 fw_dump_ctrl;
u8 fw_dump_start;
u8 fw_dump_end;
bool supports_sdio_new_mode;
bool has_control_mask;
bool can_dump_fw;
+ bool fw_dump_enh;
bool can_auto_tdls;
bool can_ext_scan;
struct mwifiex_sdio_mpa_tx mpa_tx;
struct mwifiex_sdio_mpa_rx mpa_rx;
+
+ /* needed for card reset */
+ const struct sdio_device_id *device_id;
};
struct mwifiex_sdio_device {
bool supports_sdio_new_mode;
bool has_control_mask;
bool can_dump_fw;
+ bool fw_dump_enh;
bool can_auto_tdls;
bool can_ext_scan;
};
.cmd_cfg_1 = 0xb9,
.cmd_cfg_2 = 0xba,
.cmd_cfg_3 = 0xbb,
+ .fw_dump_host_ready = 0xee,
.fw_dump_ctrl = 0xe2,
.fw_dump_start = 0xe3,
.fw_dump_end = 0xea,
0x59, 0x5c, 0x5d},
};
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8997 = {
+ .start_rd_port = 0,
+ .start_wr_port = 0,
+ .base_0_reg = 0xF8,
+ .base_1_reg = 0xF9,
+ .poll_reg = 0x5C,
+ .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
+ CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
+ .host_int_rsr_reg = 0x4,
+ .host_int_status_reg = 0x0C,
+ .host_int_mask_reg = 0x08,
+ .status_reg_0 = 0xE8,
+ .status_reg_1 = 0xE9,
+ .sdio_int_mask = 0xff,
+ .data_port_mask = 0xffffffff,
+ .io_port_0_reg = 0xE4,
+ .io_port_1_reg = 0xE5,
+ .io_port_2_reg = 0xE6,
+ .max_mp_regs = 196,
+ .rd_bitmap_l = 0x10,
+ .rd_bitmap_u = 0x11,
+ .rd_bitmap_1l = 0x12,
+ .rd_bitmap_1u = 0x13,
+ .wr_bitmap_l = 0x14,
+ .wr_bitmap_u = 0x15,
+ .wr_bitmap_1l = 0x16,
+ .wr_bitmap_1u = 0x17,
+ .rd_len_p0_l = 0x18,
+ .rd_len_p0_u = 0x19,
+ .card_misc_cfg_reg = 0xd8,
+ .card_cfg_2_1_reg = 0xd9,
+ .cmd_rd_len_0 = 0xc0,
+ .cmd_rd_len_1 = 0xc1,
+ .cmd_rd_len_2 = 0xc2,
+ .cmd_rd_len_3 = 0xc3,
+ .cmd_cfg_0 = 0xc4,
+ .cmd_cfg_1 = 0xc5,
+ .cmd_cfg_2 = 0xc6,
+ .cmd_cfg_3 = 0xc7,
+ .fw_dump_host_ready = 0xcc,
+ .fw_dump_ctrl = 0xf0,
+ .fw_dump_start = 0xf1,
+ .fw_dump_end = 0xf8,
+ .func1_dump_reg_start = 0x10,
+ .func1_dump_reg_end = 0x17,
+ .func1_scratch_reg = 0xe8,
+ .func1_spec_reg_num = 13,
+ .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D,
+ 0x60, 0x61, 0x62, 0x64,
+ 0x65, 0x66, 0x68, 0x69,
+ 0x6a},
+};
+
static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = {
.start_rd_port = 0,
.start_wr_port = 0,
.can_ext_scan = true,
};
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
+ .firmware = SD8997_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd8997,
+ .max_ports = 32,
+ .mp_agg_pkt_limit = 16,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .supports_sdio_new_mode = true,
+ .has_control_mask = false,
+ .can_dump_fw = true,
+ .fw_dump_enh = true,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
+};
+
static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
.firmware = SD8887_DEFAULT_FW_NAME,
.reg = &mwifiex_reg_sd8887,
case ACT_TDLS_DELETE:
if (reason) {
if (!node || reason == TDLS_ERR_LINK_NONEXISTENT)
- mwifiex_dbg(priv->adapter, ERROR,
+ mwifiex_dbg(priv->adapter, MSG,
"TDLS link delete for %pM failed: reason %d\n",
cmd_tdls_oper->peer_mac, reason);
else
if (status <= 0) {
if (status == 0)
status = -ETIMEDOUT;
- mwifiex_dbg(adapter, ERROR,
- "cmd_wait_q terminated: %d\n", status);
+ mwifiex_dbg(adapter, ERROR, "cmd_wait_q terminated: %d\n",
+ status);
mwifiex_cancel_all_pending_cmd(adapter);
return status;
}
pos = (void *)skb_put(skb, 4);
*pos++ = WLAN_EID_AID;
*pos++ = 2;
- *pos++ = le16_to_cpu(assoc_rsp->a_id);
+ memcpy(pos, &assoc_rsp->a_id, sizeof(assoc_rsp->a_id));
return;
}
mwifiex_dbg_dump(priv->adapter, EVT_D, "uap capabilties:",
event->data, event->len);
+ skb_push(event, MWIFIEX_BSS_START_EVT_FIX_SIZE);
+
while ((evt_len >= sizeof(tlv_hdr->header))) {
tlv_hdr = (struct mwifiex_ie_types_data *)curr;
tlv_len = le16_to_cpu(tlv_hdr->header.len);
{USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8897_PID_2,
USB_CLASS_VENDOR_SPEC,
USB_SUBCLASS_VENDOR_SPEC, 0xff)},
+ /* 8997 */
+ {USB_DEVICE(USB8XXX_VID, USB8997_PID_1)},
+ {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8997_PID_2,
+ USB_CLASS_VENDOR_SPEC,
+ USB_SUBCLASS_VENDOR_SPEC, 0xff)},
{ } /* Terminating entry */
};
case USB8797_PID_1:
case USB8801_PID_1:
case USB8897_PID_1:
+ case USB8997_PID_1:
card->usb_boot_state = USB8XXX_FW_DNLD;
break;
case USB8766_PID_2:
case USB8797_PID_2:
case USB8801_PID_2:
case USB8897_PID_2:
+ case USB8997_PID_2:
card->usb_boot_state = USB8XXX_FW_READY;
break;
default:
adapter->dev = &card->udev->dev;
switch (le16_to_cpu(card->udev->descriptor.idProduct)) {
+ case USB8997_PID_1:
+ case USB8997_PID_2:
+ adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
+ strcpy(adapter->fw_name, USB8997_DEFAULT_FW_NAME);
+ adapter->ext_scan = true;
+ break;
case USB8897_PID_1:
case USB8897_PID_2:
adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
/* Allocate memory for transmit */
fwdata = kzalloc(FW_DNLD_TX_BUF_SIZE, GFP_KERNEL);
- if (!fwdata)
+ if (!fwdata) {
+ ret = -ENOMEM;
goto fw_exit;
+ }
/* Allocate memory for receive */
recv_buff = kzalloc(FW_DNLD_RX_BUF_SIZE, GFP_KERNEL);
MODULE_FIRMWARE(USB8797_DEFAULT_FW_NAME);
MODULE_FIRMWARE(USB8801_DEFAULT_FW_NAME);
MODULE_FIRMWARE(USB8897_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(USB8997_DEFAULT_FW_NAME);
#define USB8897_PID_2 0x2046
#define USB8801_PID_1 0x2049
#define USB8801_PID_2 0x204a
+#define USB8997_PID_1 0x204d
+#define USB8997_PID_2 0x204e
#define USB8XXX_FW_DNLD 1
#define USB8797_DEFAULT_FW_NAME "mrvl/usb8797_uapsta.bin"
#define USB8801_DEFAULT_FW_NAME "mrvl/usb8801_uapsta.bin"
#define USB8897_DEFAULT_FW_NAME "mrvl/usb8897_uapsta.bin"
+#define USB8997_DEFAULT_FW_NAME "mrvl/usb8997_uapsta.bin"
#define FW_DNLD_TX_BUF_SIZE 620
#define FW_DNLD_RX_BUF_SIZE 2048
int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter)
{
+ if (adapter->hw_status == MWIFIEX_HW_STATUS_READY)
+ if (adapter->if_ops.init_fw_port)
+ adapter->if_ops.init_fw_port(adapter);
+
adapter->init_wait_q_woken = true;
wake_up_interruptible(&adapter->init_wait_q);
return 0;
int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
struct cmd_ctrl_node *cmd_node)
{
- mwifiex_dbg(adapter, CMD,
- "cmd completed: status=%d\n",
+ WARN_ON(!cmd_node->wait_q_enabled);
+ mwifiex_dbg(adapter, CMD, "cmd completed: status=%d\n",
adapter->cmd_wait_q.status);
- *(cmd_node->condition) = true;
-
- if (adapter->cmd_wait_q.status == -ETIMEDOUT)
- mwifiex_dbg(adapter, ERROR, "cmd timeout\n");
- else
- wake_up_interruptible(&adapter->cmd_wait_q.wait);
+ *cmd_node->condition = true;
+ wake_up_interruptible(&adapter->cmd_wait_q.wait);
return 0;
}
memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
rcu_read_unlock();
- }
- if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc &&
- !priv->ap_fw) {
- rc = mwl8k_cmd_set_rate(hw, vif, ap_legacy_rates, ap_mcs_rates);
- if (rc)
- goto out;
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (!priv->ap_fw) {
+ rc = mwl8k_cmd_set_rate(hw, vif,
+ ap_legacy_rates,
+ ap_mcs_rates);
+ if (rc)
+ goto out;
- rc = mwl8k_cmd_use_fixed_rate_sta(hw);
- if (rc)
- goto out;
- } else {
- if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc &&
- priv->ap_fw) {
- int idx;
- int rate;
+ rc = mwl8k_cmd_use_fixed_rate_sta(hw);
+ if (rc)
+ goto out;
+ } else {
+ int idx;
+ int rate;
- /* Use AP firmware specific rate command.
- */
- idx = ffs(vif->bss_conf.basic_rates);
- if (idx)
- idx--;
+ /* Use AP firmware specific rate command.
+ */
+ idx = ffs(vif->bss_conf.basic_rates);
+ if (idx)
+ idx--;
- if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
- rate = mwl8k_rates_24[idx].hw_value;
- else
- rate = mwl8k_rates_50[idx].hw_value;
+ if (hw->conf.chandef.chan->band ==
+ IEEE80211_BAND_2GHZ)
+ rate = mwl8k_rates_24[idx].hw_value;
+ else
+ rate = mwl8k_rates_50[idx].hw_value;
- mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
+ mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
+ }
}
}
struct orinoco_rx_data *rx_data, *temp;
struct orinoco_scan_data *sd, *sdtemp;
- wiphy_unregister(wiphy);
-
/* If the tasklet is scheduled when we call tasklet_kill it
* will run one final time. However the tasklet will only
* drain priv->rx_list if the hw is still available. */
orinoco_cs_release(link);
+ wiphy_unregister(priv_to_wiphy(priv));
free_orinocodev(priv);
} /* orinoco_cs_detach */
err = orinoco_if_add(priv, 0, 0, NULL);
if (err) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
- goto fail;
+ goto fail_wiphy;
}
pci_set_drvdata(pdev, priv);
return 0;
+ fail_wiphy:
+ wiphy_unregister(priv_to_wiphy(priv));
fail:
free_irq(pdev->irq, priv);
iowrite16(0, card->bridge_io + 10);
orinoco_if_del(priv);
+ wiphy_unregister(priv_to_wiphy(priv));
free_irq(pdev->irq, priv);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
err = orinoco_if_add(priv, 0, 0, NULL);
if (err) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
- goto fail;
+ goto fail_wiphy;
}
pci_set_drvdata(pdev, priv);
return 0;
+ fail_wiphy:
+ wiphy_unregister(priv_to_wiphy(priv));
fail:
free_irq(pdev->irq, priv);
struct orinoco_private *priv = pci_get_drvdata(pdev);
orinoco_if_del(priv);
+ wiphy_unregister(priv_to_wiphy(priv));
free_irq(pdev->irq, priv);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
err = orinoco_if_add(priv, 0, 0, NULL);
if (err) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
- goto fail;
+ goto fail_wiphy;
}
pci_set_drvdata(pdev, priv);
return 0;
+ fail_wiphy:
+ wiphy_unregister(priv_to_wiphy(priv));
fail:
free_irq(pdev->irq, priv);
struct orinoco_pci_card *card = priv->card;
orinoco_if_del(priv);
+ wiphy_unregister(priv_to_wiphy(priv));
free_irq(pdev->irq, priv);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
if (upriv->dev) {
struct orinoco_private *priv = ndev_priv(upriv->dev);
orinoco_if_del(priv);
+ wiphy_unregister(priv_to_wiphy(upriv));
free_orinocodev(priv);
}
}
if (orinoco_if_add(priv, 0, 0, &ezusb_netdev_ops) != 0) {
upriv->dev = NULL;
err("%s: orinoco_if_add() failed", __func__);
+ wiphy_unregister(priv_to_wiphy(priv));
goto error;
}
upriv->dev = priv->ndev;
config RT2X00_LIB
tristate
- select AVERAGE
config RT2X00_LIB_FIRMWARE
bool
#define CSR_REG_BASE 0x0400
#define CSR_REG_SIZE 0x0100
#define EEPROM_BASE 0x0000
-#define EEPROM_SIZE 0x006a
+#define EEPROM_SIZE 0x006e
#define BBP_BASE 0x0000
#define BBP_SIZE 0x0060
#define RF_BASE 0x0004
int tx_failed;
};
+DECLARE_EWMA(rssi, 1024, 8)
+
/*
* Antenna settings about the currently active link.
*/
* Similar to the avg_rssi in the link_qual structure
* this value is updated by using the walking average.
*/
- struct ewma rssi_ant;
+ struct ewma_rssi rssi_ant;
};
/*
/*
* Currently active average RSSI value
*/
- struct ewma avg_rssi;
+ struct ewma_rssi avg_rssi;
/*
* Work structure for scheduling periodic link tuning.
*/
#define DEFAULT_RSSI -128
-/* Constants for EWMA calculations. */
-#define RT2X00_EWMA_FACTOR 1024
-#define RT2X00_EWMA_WEIGHT 8
-
-static inline int rt2x00link_get_avg_rssi(struct ewma *ewma)
+static inline int rt2x00link_get_avg_rssi(struct ewma_rssi *ewma)
{
unsigned long avg;
- avg = ewma_read(ewma);
+ avg = ewma_rssi_read(ewma);
if (avg)
return -avg;
static void rt2x00link_antenna_reset(struct rt2x00_dev *rt2x00dev)
{
- ewma_init(&rt2x00dev->link.ant.rssi_ant, RT2X00_EWMA_FACTOR,
- RT2X00_EWMA_WEIGHT);
+ ewma_rssi_init(&rt2x00dev->link.ant.rssi_ant);
}
static void rt2x00lib_antenna_diversity_sample(struct rt2x00_dev *rt2x00dev)
/*
* Update global RSSI
*/
- ewma_add(&link->avg_rssi, -rxdesc->rssi);
+ ewma_rssi_add(&link->avg_rssi, -rxdesc->rssi);
/*
* Update antenna RSSI
*/
- ewma_add(&ant->rssi_ant, -rxdesc->rssi);
+ ewma_rssi_add(&ant->rssi_ant, -rxdesc->rssi);
}
void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
*/
rt2x00dev->link.count = 0;
memset(qual, 0, sizeof(*qual));
- ewma_init(&rt2x00dev->link.avg_rssi, RT2X00_EWMA_FACTOR,
- RT2X00_EWMA_WEIGHT);
+ ewma_rssi_init(&rt2x00dev->link.avg_rssi);
/*
* Restore the VGC level as stored in the registers,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl92c_firmware_header *pfwheader;
+ struct rtlwifi_firmware_header *pfwheader;
u8 *pfwdata;
u32 fwsize;
int err;
if (!rtlhal->pfirmware)
return 1;
- pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
+ pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
pfwdata = rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
"Firmware Version(%d), Signature(%#x), Size(%d)\n",
pfwheader->version, pfwheader->signature,
- (int)sizeof(struct rtl92c_firmware_header));
+ (int)sizeof(struct rtlwifi_firmware_header));
- pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
- fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
+ pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+ fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
}
if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) {
#define FW_8192C_POLLING_TIMEOUT_COUNT 3000
#define IS_FW_HEADER_EXIST(_pfwhdr) \
- ((_pfwhdr->signature&0xFFFF) == 0x88E1)
+ ((le16_to_cpu(_pfwhdr->signature) & 0xFFFF) == 0x88E1)
#define USE_OLD_WOWLAN_DEBUG_FW 0
#define H2C_88E_RSVDPAGE_LOC_LEN 5
#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
#define FW_PWR_STATE_RF_OFF 0
-struct rtl92c_firmware_header {
- u16 signature;
- u8 category;
- u8 function;
- u16 version;
- u8 subversion;
- u8 rsvd1;
- u8 month;
- u8 date;
- u8 hour;
- u8 minute;
- u16 ramcodesize;
- u16 rsvd2;
- u32 svnindex;
- u32 rsvd3;
- u32 rsvd4;
- u32 rsvd5;
-};
-
enum rtl8188e_h2c_cmd {
H2C_88E_RSVDPAGE = 0,
H2C_88E_JOINBSSRPT = 1,
#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1)
#define BT_RSSI_STATE_BG_EDCA_LOW BIT_OFFSET_LEN_MASK_32(3, 1)
#define BT_RSSI_STATE_TXPOWER_LOW BIT_OFFSET_LEN_MASK_32(4, 1)
+#define BT_MASK 0x00ffffff
#define RTLPRIV (struct rtl_priv *)
#define GET_UNDECORATED_AVERAGE_RSSI(_priv) \
struct dig_t *digtable = &rtlpriv->dm_digtable;
u32 isbt;
- /* modify DIG lower bound, deal with abnorally large false alarm */
+ /* modify DIG lower bound, deal with abnormally large false alarm */
if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
digtable->large_fa_hit++;
if (digtable->forbidden_igi < digtable->cur_igvalue) {
return false;
bt_state = rtl_read_byte(rtlpriv, 0x4fd);
- bt_tx = rtl_read_dword(rtlpriv, 0x488);
- bt_tx = bt_tx & 0x00ffffff;
- bt_pri = rtl_read_dword(rtlpriv, 0x48c);
- bt_pri = bt_pri & 0x00ffffff;
+ bt_tx = rtl_read_dword(rtlpriv, 0x488) & BT_MASK;
+ bt_pri = rtl_read_dword(rtlpriv, 0x48c) & BT_MASK;
polling = rtl_read_dword(rtlpriv, 0x490);
- if (bt_tx == 0xffffffff && bt_pri == 0xffffffff &&
+ if (bt_tx == BT_MASK && bt_pri == BT_MASK &&
polling == 0xffffffff && bt_state == 0xff)
return false;
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl92c_firmware_header *pfwheader;
+ struct rtlwifi_firmware_header *pfwheader;
u8 *pfwdata;
u32 fwsize;
int err;
if (!rtlhal->pfirmware)
return 1;
- pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
+ pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
pfwdata = (u8 *)rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
if (IS_FW_HEADER_EXIST(pfwheader)) {
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
"Firmware Version(%d), Signature(%#x),Size(%d)\n",
pfwheader->version, pfwheader->signature,
- (int)sizeof(struct rtl92c_firmware_header));
+ (int)sizeof(struct rtlwifi_firmware_header));
- rtlhal->fw_version = pfwheader->version;
+ rtlhal->fw_version = le16_to_cpu(pfwheader->version);
rtlhal->fw_subversion = pfwheader->subversion;
- pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
- fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
+ pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+ fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
}
_rtl92c_enable_fw_download(hw, true);
((GET_CVID_CUT_VERSION(version) == \
CHIP_VENDOR_UMC_B_CUT) ? true : false) : false)
-struct rtl92c_firmware_header {
- __le16 signature;
- u8 category;
- u8 function;
- __le16 version;
- u8 subversion;
- u8 rsvd1;
- u8 month;
- u8 date;
- u8 hour;
- u8 minute;
- __le16 ramcodeSize;
- __le16 rsvd2;
- __le32 svnindex;
- __le32 rsvd3;
- __le32 rsvd4;
- __le32 rsvd5;
-};
-
#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
static void _rtl92cu_init_wmac_setting(struct ieee80211_hw *hw)
{
- u16 value16;
-
+ u16 value16;
+ u32 value32;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- mac->rx_conf = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APPFCS |
- RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL |
- RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32);
- rtl_write_dword(rtlpriv, REG_RCR, mac->rx_conf);
+ value32 = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APPFCS |
+ RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL |
+ RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&value32));
/* Accept all multicast address */
rtl_write_dword(rtlpriv, REG_MAR, 0xFFFFFFFF);
rtl_write_dword(rtlpriv, REG_MAR + 4, 0xFFFFFFFF);
/* Accept all management frames */
value16 = 0xFFFF;
- rtl92c_set_mgt_filter(hw, value16);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_MGT_FILTER,
+ (u8 *)(&value16));
/* Reject all control frame - default value is 0 */
- rtl92c_set_ctrl_filter(hw, 0x0);
+ value16 = 0x0;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CTRL_FILTER,
+ (u8 *)(&value16));
/* Accept all data frames */
value16 = 0xFFFF;
- rtl92c_set_data_filter(hw, value16);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DATA_FILTER,
+ (u8 *)(&value16));
}
static void _rtl92cu_init_beacon_parameters(struct ieee80211_hw *hw)
}
}
-static void _update_mac_setting(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-
- mac->rx_conf = rtl_read_dword(rtlpriv, REG_RCR);
- mac->rx_mgt_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP0);
- mac->rx_ctrl_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP1);
- mac->rx_data_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
-}
-
int rtl92cu_hw_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
}
_rtl92cu_hw_configure(hw);
_InitPABias(hw);
- _update_mac_setting(hw);
rtl92c_dm_init(hw);
exit:
local_irq_restore(flags);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
enum wireless_mode wirelessmode = mac->mode;
u8 idx = 0;
u4b_ac_param);
break;
default:
- RT_ASSERT(false,
- "SetHwReg8185(): invalid aci: %d !\n",
+ RT_ASSERT(false, "invalid aci: %d !\n",
e_aci);
break;
}
- if (rtlusb->acm_method != EACMWAY2_SW)
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_ACM_CTRL, &e_aci);
- break;
- }
- case HW_VAR_ACM_CTRL:{
- u8 e_aci = *val;
- union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)
- (&(mac->ac[0].aifs));
- u8 acm = p_aci_aifsn->f.acm;
- u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
-
- acm_ctrl =
- acm_ctrl | ((rtlusb->acm_method == 2) ? 0x0 : 0x1);
- if (acm) {
- switch (e_aci) {
- case AC0_BE:
- acm_ctrl |= AcmHw_BeqEn;
- break;
- case AC2_VI:
- acm_ctrl |= AcmHw_ViqEn;
- break;
- case AC3_VO:
- acm_ctrl |= AcmHw_VoqEn;
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
- acm);
- break;
- }
- } else {
- switch (e_aci) {
- case AC0_BE:
- acm_ctrl &= (~AcmHw_BeqEn);
- break;
- case AC2_VI:
- acm_ctrl &= (~AcmHw_ViqEn);
- break;
- case AC3_VO:
- acm_ctrl &= (~AcmHw_VoqEn);
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
- break;
- }
- }
- RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
- "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
- acm_ctrl);
- rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
break;
}
case HW_VAR_RCR:{
}
case HW_VAR_MGT_FILTER:
rtl_write_word(rtlpriv, REG_RXFLTMAP0, *(u16 *)val);
+ mac->rx_mgt_filter = *(u16 *)val;
break;
case HW_VAR_CTRL_FILTER:
rtl_write_word(rtlpriv, REG_RXFLTMAP1, *(u16 *)val);
+ mac->rx_ctrl_filter = *(u16 *)val;
break;
case HW_VAR_DATA_FILTER:
rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *)val);
+ mac->rx_data_filter = *(u16 *)val;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
void rtl92c_set_qos(struct ieee80211_hw *hw, int aci)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- u32 u4b_ac_param;
rtl92c_dm_init_edca_turbo(hw);
- u4b_ac_param = (u32) mac->ac[aci].aifs;
- u4b_ac_param |=
- ((u32) le16_to_cpu(mac->ac[aci].cw_min) & 0xF) <<
- AC_PARAM_ECW_MIN_OFFSET;
- u4b_ac_param |=
- ((u32) le16_to_cpu(mac->ac[aci].cw_max) & 0xF) <<
- AC_PARAM_ECW_MAX_OFFSET;
- u4b_ac_param |= (u32) le16_to_cpu(mac->ac[aci].tx_op) <<
- AC_PARAM_TXOP_OFFSET;
- RT_TRACE(rtlpriv, COMP_QOS, DBG_LOUD, "queue:%x, ac_param:%x\n",
- aci, u4b_ac_param);
- switch (aci) {
- case AC1_BK:
- rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param);
- break;
- case AC0_BE:
- rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param);
- break;
- case AC2_VI:
- rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, u4b_ac_param);
- break;
- case AC3_VO:
- rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, u4b_ac_param);
- break;
- default:
- RT_ASSERT(false, "invalid aci: %d !\n", aci);
- break;
- }
-}
-
-/*-------------------------------------------------------------------------
- * HW MAC Address
- *-------------------------------------------------------------------------*/
-void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr)
-{
- u32 i;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- for (i = 0 ; i < ETH_ALEN ; i++)
- rtl_write_byte(rtlpriv, (REG_MACID + i), *(addr+i));
-
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
- "MAC Address: %02X-%02X-%02X-%02X-%02X-%02X\n",
- rtl_read_byte(rtlpriv, REG_MACID),
- rtl_read_byte(rtlpriv, REG_MACID+1),
- rtl_read_byte(rtlpriv, REG_MACID+2),
- rtl_read_byte(rtlpriv, REG_MACID+3),
- rtl_read_byte(rtlpriv, REG_MACID+4),
- rtl_read_byte(rtlpriv, REG_MACID+5));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, (u8 *)&aci);
}
void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size)
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, value);
}
-u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- return rtl_read_word(rtlpriv, REG_RXFLTMAP0);
-}
-
-void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtl_write_word(rtlpriv, REG_RXFLTMAP0, filter);
-}
-
-u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- return rtl_read_word(rtlpriv, REG_RXFLTMAP1);
-}
-
-void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtl_write_word(rtlpriv, REG_RXFLTMAP1, filter);
-}
-
-u16 rtl92c_get_data_filter(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- return rtl_read_word(rtlpriv, REG_RXFLTMAP2);
-}
-
-void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtl_write_word(rtlpriv, REG_RXFLTMAP2, filter);
-}
/*==============================================================*/
static u8 _rtl92c_query_rxpwrpercentage(char antpower)
/*---------------------------------------------------------------
* Hardware init functions
*---------------------------------------------------------------*/
-void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr);
void rtl92c_init_interrupt(struct ieee80211_hw *hw);
void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size);
void rtl92c_disable_fast_edca(struct ieee80211_hw *hw);
void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T);
-/* For filter */
-u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw);
-void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter);
-u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw);
-void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter);
-u16 rtl92c_get_data_filter(struct ieee80211_hw *hw);
-void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter);
-
-
u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw);
struct rx_fwinfo_92c {
"dm_DIG() Before: Recover_cnt=%d, rx_gain_min=%x\n",
de_digtable->recover_cnt, de_digtable->rx_gain_min);
- /* deal with abnorally large false alarm */
+ /* deal with abnormally large false alarm */
if (falsealm_cnt->cnt_all > 10000) {
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"dm_DIG(): Abnormally false alarm case\n");
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 2, 0, 8, __val)
-struct rtl92d_firmware_header {
- u16 signature;
- u8 category;
- u8 function;
- u16 version;
- u8 subversion;
- u8 rsvd1;
-
- u8 month;
- u8 date;
- u8 hour;
- u8 minute;
- u16 ramcodeSize;
- u16 rsvd2;
-
- u32 svnindex;
- u32 rsvd3;
-
- u32 rsvd4;
- u32 rsvd5;
-};
-
int rtl92d_download_fw(struct ieee80211_hw *hw);
void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
u32 cmd_len, u8 *p_cmdbuffer);
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl92c_firmware_header *pfwheader;
+ struct rtlwifi_firmware_header *pfwheader;
u8 *pfwdata;
u32 fwsize;
int err;
if (!rtlhal->pfirmware)
return 1;
- pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
- rtlhal->fw_version = pfwheader->version;
+ pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
+ rtlhal->fw_version = le16_to_cpu(pfwheader->version);
rtlhal->fw_subversion = pfwheader->subversion;
pfwdata = (u8 *)rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
"Firmware Version(%d), Signature(%#x),Size(%d)\n",
pfwheader->version, pfwheader->signature,
- (int)sizeof(struct rtl92c_firmware_header));
+ (int)sizeof(struct rtlwifi_firmware_header));
- pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
- fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
+ pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+ fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
} else {
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
"Firmware no Header, Signature(%#x)\n",
#define FW_8192C_POLLING_TIMEOUT_COUNT 3000
#define IS_FW_HEADER_EXIST(_pfwhdr) \
- ((_pfwhdr->signature&0xFFF0) == 0x92E0)
+ ((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x92E0)
#define USE_OLD_WOWLAN_DEBUG_FW 0
#define H2C_92E_RSVDPAGE_LOC_LEN 5
#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
#define FW_PWR_STATE_RF_OFF 0
-struct rtl92c_firmware_header {
- u16 signature;
- u8 category;
- u8 function;
- u16 version;
- u8 subversion;
- u8 rsvd1;
- u8 month;
- u8 date;
- u8 hour;
- u8 minute;
- u16 ramcodesize;
- u16 rsvd2;
- u32 svnindex;
- u32 rsvd3;
- u32 rsvd4;
- u32 rsvd5;
-};
-
enum rtl8192e_h2c_cmd {
H2C_92E_RSVDPAGE = 0,
H2C_92E_MSRRPT = 1,
return true;
}
-static bool is_fw_header(struct rtl8723e_firmware_header *hdr)
+static bool is_fw_header(struct rtlwifi_firmware_header *hdr)
{
- return (hdr->signature & 0xfff0) == 0x2300;
+ return (le16_to_cpu(hdr->signature) & 0xfff0) == 0x2300;
}
static struct rtl_hal_ops rtl8723e_hal_ops = {
return true;
}
-static bool is_fw_header(struct rtl8723e_firmware_header *hdr)
+static bool is_fw_header(struct rtlwifi_firmware_header *hdr)
{
- return (hdr->signature & 0xfff0) == 0x5300;
+ return (le16_to_cpu(hdr->signature) & 0xfff0) == 0x5300;
}
static struct rtl_hal_ops rtl8723be_hal_ops = {
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl8723e_firmware_header *pfwheader;
+ struct rtlwifi_firmware_header *pfwheader;
u8 *pfwdata;
u32 fwsize;
int err;
if (!rtlhal->pfirmware)
return 1;
- pfwheader = (struct rtl8723e_firmware_header *)rtlhal->pfirmware;
+ pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
pfwdata = rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
"Firmware Version(%d), Signature(%#x), Size(%d)\n",
pfwheader->version, pfwheader->signature,
- (int)sizeof(struct rtl8723e_firmware_header));
+ (int)sizeof(struct rtlwifi_firmware_header));
- pfwdata = pfwdata + sizeof(struct rtl8723e_firmware_header);
- fwsize = fwsize - sizeof(struct rtl8723e_firmware_header);
+ pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+ fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
}
if (rtl_read_byte(rtlpriv, REG_MCUFWDL)&BIT(7)) {
VERSION_UNKNOWN = 0xFF,
};
-struct rtl8723e_firmware_header {
- u16 signature;
- u8 category;
- u8 function;
- u16 version;
- u8 subversion;
- u8 rsvd1;
- u8 month;
- u8 date;
- u8 hour;
- u8 minute;
- u16 ramcodesize;
- u16 rsvd2;
- u32 svnindex;
- u32 rsvd3;
- u32 rsvd4;
- u32 rsvd5;
-};
-
enum rtl8723be_cmd {
H2C_8723BE_RSVDPAGE = 0,
H2C_8723BE_JOINBSSRPT = 1,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl8821a_firmware_header *pfwheader;
+ struct rtlwifi_firmware_header *pfwheader;
u8 *pfwdata;
u32 fwsize;
int err;
return 1;
pfwheader =
- (struct rtl8821a_firmware_header *)rtlhal->wowlan_firmware;
- rtlhal->fw_version = pfwheader->version;
+ (struct rtlwifi_firmware_header *)rtlhal->wowlan_firmware;
+ rtlhal->fw_version = le16_to_cpu(pfwheader->version);
rtlhal->fw_subversion = pfwheader->subversion;
pfwdata = (u8 *)rtlhal->wowlan_firmware;
fwsize = rtlhal->wowlan_fwsize;
return 1;
pfwheader =
- (struct rtl8821a_firmware_header *)rtlhal->pfirmware;
- rtlhal->fw_version = pfwheader->version;
+ (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
+ rtlhal->fw_version = le16_to_cpu(pfwheader->version);
rtlhal->fw_subversion = pfwheader->subversion;
pfwdata = (u8 *)rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
"Firmware Version(%d), Signature(%#x)\n",
pfwheader->version, pfwheader->signature);
- pfwdata = pfwdata + sizeof(struct rtl8821a_firmware_header);
- fwsize = fwsize - sizeof(struct rtl8821a_firmware_header);
+ pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+ fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
}
if (rtlhal->mac_func_enable) {
#define FW_8821AE_POLLING_TIMEOUT_COUNT 6000
#define IS_FW_HEADER_EXIST_8812(_pfwhdr) \
- ((_pfwhdr->signature&0xFFF0) == 0x9500)
+ ((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x9500)
#define IS_FW_HEADER_EXIST_8821(_pfwhdr) \
- ((_pfwhdr->signature&0xFFF0) == 0x2100)
+ ((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x2100)
#define USE_OLD_WOWLAN_DEBUG_FW 0
#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
#define FW_PWR_STATE_RF_OFF 0
-struct rtl8821a_firmware_header {
- u16 signature;
- u8 category;
- u8 function;
- u16 version;
- u8 subversion;
- u8 rsvd1;
- u8 month;
- u8 date;
- u8 hour;
- u8 minute;
- u16 ramcodeSize;
- u16 rsvd2;
- u32 svnindex;
- u32 rsvd3;
- u32 rsvd4;
- u32 rsvd5;
-};
-
enum rtl8812_c2h_evt {
C2H_8812_DBG = 0,
C2H_8812_LB = 1,
#define WOL_REASON_REALWOW_V2_WAKEUPPKT BIT(9)
#define WOL_REASON_REALWOW_V2_ACKLOST BIT(10)
+struct rtlwifi_firmware_header {
+ __le16 signature;
+ u8 category;
+ u8 function;
+ __le16 version;
+ u8 subversion;
+ u8 rsvd1;
+ u8 month;
+ u8 date;
+ u8 hour;
+ u8 minute;
+ __le16 ramcodeSize;
+ __le16 rsvd2;
+ __le32 svnindex;
+ __le32 rsvd3;
+ __le32 rsvd4;
+ __le32 rsvd5;
+};
+
struct txpower_info_2g {
u8 index_cck_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
bool tx_enable_sw_calc_duration;
};
-struct rtl92c_firmware_header;
-
struct rtl_wow_pattern {
u8 type;
u16 crc;
u32 mask[4];
};
-struct rtl8723e_firmware_header;
-
struct rtl_hal_ops {
int (*init_sw_vars) (struct ieee80211_hw *hw);
void (*deinit_sw_vars) (struct ieee80211_hw *hw);
void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id,
u32 cmd_len, u8 *p_cmdbuffer);
bool (*get_btc_status) (void);
- bool (*is_fw_header)(struct rtl8723e_firmware_header *hdr);
+ bool (*is_fw_header)(struct rtlwifi_firmware_header *hdr);
u32 (*rx_command_packet)(struct ieee80211_hw *hw,
struct rtl_stats status, struct sk_buff *skb);
void (*add_wowlan_pattern)(struct ieee80211_hw *hw,
if (passive)
scan_options |= WL1271_SCAN_OPT_PASSIVE;
- cmd->params.role_id = wlvif->role_id;
+ /* scan on the dev role if the regular one is not started */
+ if (wlcore_is_p2p_mgmt(wlvif))
+ cmd->params.role_id = wlvif->dev_role_id;
+ else
+ cmd->params.role_id = wlvif->role_id;
if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) {
ret = -EINVAL;
kfree(acx);
return ret;
}
+
+int wl18xx_acx_dynamic_fw_traces(struct wl1271 *wl)
+{
+ struct acx_dynamic_fw_traces_cfg *acx;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx dynamic fw traces config %d",
+ wl->dynamic_fw_traces);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->dynamic_fw_traces = cpu_to_le32(wl->dynamic_fw_traces);
+
+ ret = wl1271_cmd_configure(wl, ACX_DYNAMIC_TRACES_CFG,
+ acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx config dynamic fw traces failed: %d", ret);
+ goto out;
+ }
+out:
+ kfree(acx);
+ return ret;
+}
ACX_PEER_CAP = 0x0056,
ACX_INTERRUPT_NOTIFY = 0x0057,
ACX_RX_BA_FILTER = 0x0058,
- ACX_AP_SLEEP_CFG = 0x0059
+ ACX_AP_SLEEP_CFG = 0x0059,
+ ACX_DYNAMIC_TRACES_CFG = 0x005A,
};
/* numbers of bits the length field takes (add 1 for the actual number) */
struct wl18xx_acx_error_stats {
- u32 error_frame;
- u32 error_null_Frame_tx_start;
- u32 error_numll_frame_cts_start;
- u32 error_bar_retry;
- u32 error_frame_cts_nul_flid;
-} __packed;
-
-struct wl18xx_acx_debug_stats {
- u32 debug1;
- u32 debug2;
- u32 debug3;
- u32 debug4;
- u32 debug5;
- u32 debug6;
-} __packed;
-
-struct wl18xx_acx_ring_stats {
- u32 prepared_descs;
- u32 tx_cmplt;
+ u32 error_frame_non_ctrl;
+ u32 error_frame_ctrl;
+ u32 error_frame_during_protection;
+ u32 null_frame_tx_start;
+ u32 null_frame_cts_start;
+ u32 bar_retry;
+ u32 num_frame_cts_nul_flid;
+ u32 tx_abort_failure;
+ u32 tx_resume_failure;
+ u32 rx_cmplt_db_overflow_cnt;
+ u32 elp_while_rx_exch;
+ u32 elp_while_tx_exch;
+ u32 elp_while_tx;
+ u32 elp_while_nvic_pending;
+ u32 rx_excessive_frame_len;
+ u32 burst_mismatch;
+ u32 tbc_exch_mismatch;
} __packed;
+#define NUM_OF_RATES_INDEXES 30
struct wl18xx_acx_tx_stats {
u32 tx_prepared_descs;
u32 tx_cmplt;
u32 tx_data_programmed;
u32 tx_burst_programmed;
u32 tx_starts;
- u32 tx_imm_resp;
+ u32 tx_stop;
u32 tx_start_templates;
u32 tx_start_int_templates;
u32 tx_start_fw_gen;
u32 tx_exch;
u32 tx_retry_template;
u32 tx_retry_data;
+ u32 tx_retry_per_rate[NUM_OF_RATES_INDEXES];
u32 tx_exch_pending;
u32 tx_exch_expiry;
u32 tx_done_template;
u32 tx_done_data;
u32 tx_done_int_template;
- u32 tx_frame_checksum;
- u32 tx_checksum_result;
+ u32 tx_cfe1;
+ u32 tx_cfe2;
u32 frag_called;
u32 frag_mpdu_alloc_failed;
u32 frag_init_called;
u32 rx_cmplt_task;
u32 rx_phy_hdr;
u32 rx_timeout;
+ u32 rx_rts_timeout;
u32 rx_timeout_wa;
- u32 rx_wa_density_dropped_frame;
- u32 rx_wa_ba_not_expected;
- u32 rx_frame_checksum;
- u32 rx_checksum_result;
u32 defrag_called;
u32 defrag_init_called;
u32 defrag_in_process_called;
u32 decrypt_key_not_found;
u32 defrag_need_decrypt;
u32 rx_tkip_replays;
+ u32 rx_xfr;
} __packed;
struct wl18xx_acx_isr_stats {
u32 connection_out_of_sync;
u32 cont_miss_bcns_spread[PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD];
u32 rcvd_awake_bcns_cnt;
-} __packed;
-
-struct wl18xx_acx_event_stats {
- u32 calibration;
- u32 rx_mismatch;
- u32 rx_mem_empty;
-} __packed;
-
-struct wl18xx_acx_ps_poll_stats {
- u32 ps_poll_timeouts;
- u32 upsd_timeouts;
- u32 upsd_max_ap_turn;
- u32 ps_poll_max_ap_turn;
- u32 ps_poll_utilization;
- u32 upsd_utilization;
+ u32 sleep_time_count;
+ u32 sleep_time_avg;
+ u32 sleep_cycle_avg;
+ u32 sleep_percent;
+ u32 ap_sleep_active_conf;
+ u32 ap_sleep_user_conf;
+ u32 ap_sleep_counter;
} __packed;
struct wl18xx_acx_rx_filter_stats {
} __packed;
#define AGGR_STATS_TX_AGG 16
-#define AGGR_STATS_TX_RATE 16
#define AGGR_STATS_RX_SIZE_LEN 16
struct wl18xx_acx_aggr_stats {
- u32 tx_agg_vs_rate[AGGR_STATS_TX_AGG * AGGR_STATS_TX_RATE];
+ u32 tx_agg_rate[AGGR_STATS_TX_AGG];
+ u32 tx_agg_len[AGGR_STATS_TX_AGG];
u32 rx_size[AGGR_STATS_RX_SIZE_LEN];
} __packed;
struct wl18xx_acx_pipeline_stats {
u32 hs_tx_stat_fifo_int;
u32 hs_rx_stat_fifo_int;
- u32 tcp_tx_stat_fifo_int;
- u32 tcp_rx_stat_fifo_int;
u32 enc_tx_stat_fifo_int;
u32 enc_rx_stat_fifo_int;
u32 rx_complete_stat_fifo_int;
u32 post_proc_swi;
u32 sec_frag_swi;
u32 pre_to_defrag_swi;
- u32 defrag_to_csum_swi;
- u32 csum_to_rx_xfer_swi;
+ u32 defrag_to_rx_xfer_swi;
u32 dec_packet_in;
u32 dec_packet_in_fifo_full;
u32 dec_packet_out;
- u32 cs_rx_packet_in;
- u32 cs_rx_packet_out;
u16 pipeline_fifo_full[PIPE_STATS_HW_FIFO];
+ u16 padding;
+} __packed;
+
+#define DIVERSITY_STATS_NUM_OF_ANT 2
+
+struct wl18xx_acx_diversity_stats {
+ u32 num_of_packets_per_ant[DIVERSITY_STATS_NUM_OF_ANT];
+ u32 total_num_of_toggles;
} __packed;
-struct wl18xx_acx_mem_stats {
- u32 rx_free_mem_blks;
- u32 tx_free_mem_blks;
- u32 fwlog_free_mem_blks;
- u32 fw_gen_free_mem_blks;
+struct wl18xx_acx_thermal_stats {
+ u16 irq_thr_low;
+ u16 irq_thr_high;
+ u16 tx_stop;
+ u16 tx_resume;
+ u16 false_irq;
+ u16 adc_source_unexpected;
+} __packed;
+
+#define WL18XX_NUM_OF_CALIBRATIONS_ERRORS 18
+struct wl18xx_acx_calib_failure_stats {
+ u16 fail_count[WL18XX_NUM_OF_CALIBRATIONS_ERRORS];
+ u32 calib_count;
+} __packed;
+
+struct wl18xx_roaming_stats {
+ s32 rssi_level;
+} __packed;
+
+struct wl18xx_dfs_stats {
+ u32 num_of_radar_detections;
} __packed;
struct wl18xx_acx_statistics {
struct acx_header header;
struct wl18xx_acx_error_stats error;
- struct wl18xx_acx_debug_stats debug;
struct wl18xx_acx_tx_stats tx;
struct wl18xx_acx_rx_stats rx;
struct wl18xx_acx_isr_stats isr;
struct wl18xx_acx_pwr_stats pwr;
- struct wl18xx_acx_ps_poll_stats ps_poll;
struct wl18xx_acx_rx_filter_stats rx_filter;
struct wl18xx_acx_rx_rate_stats rx_rate;
struct wl18xx_acx_aggr_stats aggr_size;
struct wl18xx_acx_pipeline_stats pipeline;
- struct wl18xx_acx_mem_stats mem;
+ struct wl18xx_acx_diversity_stats diversity;
+ struct wl18xx_acx_thermal_stats thermal;
+ struct wl18xx_acx_calib_failure_stats calib;
+ struct wl18xx_roaming_stats roaming;
+ struct wl18xx_dfs_stats dfs;
} __packed;
struct wl18xx_acx_clear_statistics {
u8 idle_conn_thresh;
} __packed;
+/*
+ * ACX_DYNAMIC_TRACES_CFG
+ * configure the FW dynamic traces
+ */
+struct acx_dynamic_fw_traces_cfg {
+ struct acx_header header;
+ __le32 dynamic_fw_traces;
+} __packed;
+
int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
u32 sdio_blk_size, u32 extra_mem_blks,
u32 len_field_size);
int wl18xx_acx_interrupt_notify_config(struct wl1271 *wl, bool action);
int wl18xx_acx_rx_ba_filter(struct wl1271 *wl, bool action);
int wl18xx_acx_ap_sleep(struct wl1271 *wl);
+int wl18xx_acx_dynamic_fw_traces(struct wl1271 *wl);
#endif /* __WL18XX_ACX_H__ */
DEBUGFS_FWSTATS_FILE_ARRAY(a, b, c, wl18xx_acx_statistics)
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug1, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug2, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug3, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug4, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug5, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug6, "%u");
-
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_null_Frame_tx_start, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_numll_frame_cts_start, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_bar_retry, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_cts_nul_flid, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_non_ctrl, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_ctrl, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_during_protection, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, null_frame_tx_start, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, null_frame_cts_start, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, bar_retry, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, num_frame_cts_nul_flid, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, tx_abort_failure, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, tx_resume_failure, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, rx_cmplt_db_overflow_cnt, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_rx_exch, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_tx_exch, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_tx, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_nvic_pending, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, rx_excessive_frame_len, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, burst_mismatch, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, tbc_exch_mismatch, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_prepared_descs, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cmplt, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_data_programmed, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_burst_programmed, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_starts, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_imm_resp, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_stop, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_templates, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_int_templates, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_fw_gen, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_template, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_data, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(tx, tx_retry_per_rate,
+ NUM_OF_RATES_INDEXES);
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_pending, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_expiry, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_template, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_data, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_int_template, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_frame_checksum, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_checksum_result, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cfe1, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cfe2, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_called, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_mpdu_alloc_failed, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_init_called, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_cmplt_task, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_phy_hdr, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_rts_timeout, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout_wa, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_density_dropped_frame, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_ba_not_expected, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_frame_checksum, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_checksum_result, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_called, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_init_called, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_in_process_called, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, decrypt_key_not_found, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_need_decrypt, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_tkip_replays, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_xfr, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pwr, cont_miss_bcns_spread,
PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD);
WL18XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_bcns_cnt, "%u");
-
-
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_timeouts, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_timeouts, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_max_ap_turn, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_max_ap_turn, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_utilization, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_utilization, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_time_count, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_time_avg, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_cycle_avg, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_percent, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_active_conf, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_user_conf, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_counter, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, beacon_filter, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, arp_filter, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50);
-WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
- AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_rate,
+ AGGR_STATS_TX_AGG);
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_len,
+ AGGR_STATS_TX_AGG);
WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, rx_size,
AGGR_STATS_RX_SIZE_LEN);
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, hs_tx_stat_fifo_int, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_tx_stat_fifo_int, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_rx_stat_fifo_int, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_tx_stat_fifo_int, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_rx_stat_fifo_int, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, rx_complete_stat_fifo_int, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, post_proc_swi, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, sec_frag_swi, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, pre_to_defrag_swi, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, defrag_to_csum_swi, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, csum_to_rx_xfer_swi, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, defrag_to_rx_xfer_swi, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in_fifo_full, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_out, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_in, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_out, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pipeline, pipeline_fifo_full,
PIPE_STATS_HW_FIFO);
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, rx_free_mem_blks, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, tx_free_mem_blks, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, fwlog_free_mem_blks, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, fw_gen_free_mem_blks, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(diversity, num_of_packets_per_ant,
+ DIVERSITY_STATS_NUM_OF_ANT);
+WL18XX_DEBUGFS_FWSTATS_FILE(diversity, total_num_of_toggles, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, irq_thr_low, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, irq_thr_high, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, tx_stop, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, tx_resume, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, false_irq, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, adc_source_unexpected, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(calib, fail_count,
+ WL18XX_NUM_OF_CALIBRATIONS_ERRORS);
+WL18XX_DEBUGFS_FWSTATS_FILE(calib, calib_count, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(roaming, rssi_level, "%d");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(dfs, num_of_radar_detections, "%d");
static ssize_t conf_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
.llseek = default_llseek,
};
+static ssize_t dynamic_fw_traces_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 0, &value);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&wl->mutex);
+
+ wl->dynamic_fw_traces = value;
+
+ if (unlikely(wl->state != WLCORE_STATE_ON))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl18xx_acx_dynamic_fw_traces(wl);
+ if (ret < 0)
+ count = ret;
+
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static ssize_t dynamic_fw_traces_read(struct file *file,
+ char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ return wl1271_format_buffer(userbuf, count, ppos,
+ "%d\n", wl->dynamic_fw_traces);
+}
+
+static const struct file_operations dynamic_fw_traces_ops = {
+ .read = dynamic_fw_traces_read,
+ .write = dynamic_fw_traces_write,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
int wl18xx_debugfs_add_files(struct wl1271 *wl,
struct dentry *rootdir)
{
DEBUGFS_ADD(clear_fw_stats, stats);
- DEBUGFS_FWSTATS_ADD(debug, debug1);
- DEBUGFS_FWSTATS_ADD(debug, debug2);
- DEBUGFS_FWSTATS_ADD(debug, debug3);
- DEBUGFS_FWSTATS_ADD(debug, debug4);
- DEBUGFS_FWSTATS_ADD(debug, debug5);
- DEBUGFS_FWSTATS_ADD(debug, debug6);
-
- DEBUGFS_FWSTATS_ADD(error, error_frame);
- DEBUGFS_FWSTATS_ADD(error, error_null_Frame_tx_start);
- DEBUGFS_FWSTATS_ADD(error, error_numll_frame_cts_start);
- DEBUGFS_FWSTATS_ADD(error, error_bar_retry);
- DEBUGFS_FWSTATS_ADD(error, error_frame_cts_nul_flid);
+ DEBUGFS_FWSTATS_ADD(error, error_frame_non_ctrl);
+ DEBUGFS_FWSTATS_ADD(error, error_frame_ctrl);
+ DEBUGFS_FWSTATS_ADD(error, error_frame_during_protection);
+ DEBUGFS_FWSTATS_ADD(error, null_frame_tx_start);
+ DEBUGFS_FWSTATS_ADD(error, null_frame_cts_start);
+ DEBUGFS_FWSTATS_ADD(error, bar_retry);
+ DEBUGFS_FWSTATS_ADD(error, num_frame_cts_nul_flid);
+ DEBUGFS_FWSTATS_ADD(error, tx_abort_failure);
+ DEBUGFS_FWSTATS_ADD(error, tx_resume_failure);
+ DEBUGFS_FWSTATS_ADD(error, rx_cmplt_db_overflow_cnt);
+ DEBUGFS_FWSTATS_ADD(error, elp_while_rx_exch);
+ DEBUGFS_FWSTATS_ADD(error, elp_while_tx_exch);
+ DEBUGFS_FWSTATS_ADD(error, elp_while_tx);
+ DEBUGFS_FWSTATS_ADD(error, elp_while_nvic_pending);
+ DEBUGFS_FWSTATS_ADD(error, rx_excessive_frame_len);
+ DEBUGFS_FWSTATS_ADD(error, burst_mismatch);
+ DEBUGFS_FWSTATS_ADD(error, tbc_exch_mismatch);
DEBUGFS_FWSTATS_ADD(tx, tx_prepared_descs);
DEBUGFS_FWSTATS_ADD(tx, tx_cmplt);
DEBUGFS_FWSTATS_ADD(tx, tx_data_programmed);
DEBUGFS_FWSTATS_ADD(tx, tx_burst_programmed);
DEBUGFS_FWSTATS_ADD(tx, tx_starts);
- DEBUGFS_FWSTATS_ADD(tx, tx_imm_resp);
+ DEBUGFS_FWSTATS_ADD(tx, tx_stop);
DEBUGFS_FWSTATS_ADD(tx, tx_start_templates);
DEBUGFS_FWSTATS_ADD(tx, tx_start_int_templates);
DEBUGFS_FWSTATS_ADD(tx, tx_start_fw_gen);
DEBUGFS_FWSTATS_ADD(tx, tx_exch);
DEBUGFS_FWSTATS_ADD(tx, tx_retry_template);
DEBUGFS_FWSTATS_ADD(tx, tx_retry_data);
+ DEBUGFS_FWSTATS_ADD(tx, tx_retry_per_rate);
DEBUGFS_FWSTATS_ADD(tx, tx_exch_pending);
DEBUGFS_FWSTATS_ADD(tx, tx_exch_expiry);
DEBUGFS_FWSTATS_ADD(tx, tx_done_template);
DEBUGFS_FWSTATS_ADD(tx, tx_done_data);
DEBUGFS_FWSTATS_ADD(tx, tx_done_int_template);
- DEBUGFS_FWSTATS_ADD(tx, tx_frame_checksum);
- DEBUGFS_FWSTATS_ADD(tx, tx_checksum_result);
+ DEBUGFS_FWSTATS_ADD(tx, tx_cfe1);
+ DEBUGFS_FWSTATS_ADD(tx, tx_cfe2);
DEBUGFS_FWSTATS_ADD(tx, frag_called);
DEBUGFS_FWSTATS_ADD(tx, frag_mpdu_alloc_failed);
DEBUGFS_FWSTATS_ADD(tx, frag_init_called);
DEBUGFS_FWSTATS_ADD(rx, rx_cmplt_task);
DEBUGFS_FWSTATS_ADD(rx, rx_phy_hdr);
DEBUGFS_FWSTATS_ADD(rx, rx_timeout);
+ DEBUGFS_FWSTATS_ADD(rx, rx_rts_timeout);
DEBUGFS_FWSTATS_ADD(rx, rx_timeout_wa);
- DEBUGFS_FWSTATS_ADD(rx, rx_wa_density_dropped_frame);
- DEBUGFS_FWSTATS_ADD(rx, rx_wa_ba_not_expected);
- DEBUGFS_FWSTATS_ADD(rx, rx_frame_checksum);
- DEBUGFS_FWSTATS_ADD(rx, rx_checksum_result);
DEBUGFS_FWSTATS_ADD(rx, defrag_called);
DEBUGFS_FWSTATS_ADD(rx, defrag_init_called);
DEBUGFS_FWSTATS_ADD(rx, defrag_in_process_called);
DEBUGFS_FWSTATS_ADD(rx, decrypt_key_not_found);
DEBUGFS_FWSTATS_ADD(rx, defrag_need_decrypt);
DEBUGFS_FWSTATS_ADD(rx, rx_tkip_replays);
+ DEBUGFS_FWSTATS_ADD(rx, rx_xfr);
DEBUGFS_FWSTATS_ADD(isr, irqs);
DEBUGFS_FWSTATS_ADD(pwr, connection_out_of_sync);
DEBUGFS_FWSTATS_ADD(pwr, cont_miss_bcns_spread);
DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_bcns_cnt);
-
- DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_timeouts);
- DEBUGFS_FWSTATS_ADD(ps_poll, upsd_timeouts);
- DEBUGFS_FWSTATS_ADD(ps_poll, upsd_max_ap_turn);
- DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_max_ap_turn);
- DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_utilization);
- DEBUGFS_FWSTATS_ADD(ps_poll, upsd_utilization);
+ DEBUGFS_FWSTATS_ADD(pwr, sleep_time_count);
+ DEBUGFS_FWSTATS_ADD(pwr, sleep_time_avg);
+ DEBUGFS_FWSTATS_ADD(pwr, sleep_cycle_avg);
+ DEBUGFS_FWSTATS_ADD(pwr, sleep_percent);
+ DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_active_conf);
+ DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_user_conf);
+ DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_counter);
DEBUGFS_FWSTATS_ADD(rx_filter, beacon_filter);
DEBUGFS_FWSTATS_ADD(rx_filter, arp_filter);
DEBUGFS_FWSTATS_ADD(rx_rate, rx_frames_per_rates);
- DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_vs_rate);
+ DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_rate);
+ DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_len);
DEBUGFS_FWSTATS_ADD(aggr_size, rx_size);
DEBUGFS_FWSTATS_ADD(pipeline, hs_tx_stat_fifo_int);
- DEBUGFS_FWSTATS_ADD(pipeline, tcp_tx_stat_fifo_int);
- DEBUGFS_FWSTATS_ADD(pipeline, tcp_rx_stat_fifo_int);
DEBUGFS_FWSTATS_ADD(pipeline, enc_tx_stat_fifo_int);
DEBUGFS_FWSTATS_ADD(pipeline, enc_rx_stat_fifo_int);
DEBUGFS_FWSTATS_ADD(pipeline, rx_complete_stat_fifo_int);
DEBUGFS_FWSTATS_ADD(pipeline, post_proc_swi);
DEBUGFS_FWSTATS_ADD(pipeline, sec_frag_swi);
DEBUGFS_FWSTATS_ADD(pipeline, pre_to_defrag_swi);
- DEBUGFS_FWSTATS_ADD(pipeline, defrag_to_csum_swi);
- DEBUGFS_FWSTATS_ADD(pipeline, csum_to_rx_xfer_swi);
+ DEBUGFS_FWSTATS_ADD(pipeline, defrag_to_rx_xfer_swi);
DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in);
DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in_fifo_full);
DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_out);
- DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_in);
- DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_out);
DEBUGFS_FWSTATS_ADD(pipeline, pipeline_fifo_full);
- DEBUGFS_FWSTATS_ADD(mem, rx_free_mem_blks);
- DEBUGFS_FWSTATS_ADD(mem, tx_free_mem_blks);
- DEBUGFS_FWSTATS_ADD(mem, fwlog_free_mem_blks);
- DEBUGFS_FWSTATS_ADD(mem, fw_gen_free_mem_blks);
+ DEBUGFS_FWSTATS_ADD(diversity, num_of_packets_per_ant);
+ DEBUGFS_FWSTATS_ADD(diversity, total_num_of_toggles);
+
+ DEBUGFS_FWSTATS_ADD(thermal, irq_thr_low);
+ DEBUGFS_FWSTATS_ADD(thermal, irq_thr_high);
+ DEBUGFS_FWSTATS_ADD(thermal, tx_stop);
+ DEBUGFS_FWSTATS_ADD(thermal, tx_resume);
+ DEBUGFS_FWSTATS_ADD(thermal, false_irq);
+ DEBUGFS_FWSTATS_ADD(thermal, adc_source_unexpected);
+
+ DEBUGFS_FWSTATS_ADD(calib, fail_count);
+
+ DEBUGFS_FWSTATS_ADD(calib, calib_count);
+
+ DEBUGFS_FWSTATS_ADD(roaming, rssi_level);
+
+ DEBUGFS_FWSTATS_ADD(dfs, num_of_radar_detections);
DEBUGFS_ADD(conf, moddir);
DEBUGFS_ADD(radar_detection, moddir);
+ DEBUGFS_ADD(dynamic_fw_traces, moddir);
return 0;
return 0;
}
+static void wlcore_event_time_sync(struct wl1271 *wl, u16 tsf_msb, u16 tsf_lsb)
+{
+ u32 clock;
+ /* convert the MSB+LSB to a u32 TSF value */
+ clock = (tsf_msb << 16) | tsf_lsb;
+ wl1271_info("TIME_SYNC_EVENT_ID: clock %u", clock);
+}
+
int wl18xx_process_mailbox_events(struct wl1271 *wl)
{
struct wl18xx_event_mailbox *mbox = wl->mbox;
wl18xx_scan_completed(wl, wl->scan_wlvif);
}
+ if (vector & TIME_SYNC_EVENT_ID)
+ wlcore_event_time_sync(wl,
+ mbox->time_sync_tsf_msb,
+ mbox->time_sync_tsf_lsb);
+
if (vector & RADAR_DETECTED_EVENT_ID) {
wl1271_info("radar event: channel %d type %s",
mbox->radar_channel,
REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID = BIT(18),
DFS_CHANNELS_CONFIG_COMPLETE_EVENT = BIT(19),
PERIODIC_SCAN_REPORT_EVENT_ID = BIT(20),
- SMART_CONFIG_SYNC_EVENT_ID = BIT(22),
- SMART_CONFIG_DECODE_EVENT_ID = BIT(23),
+ SMART_CONFIG_SYNC_EVENT_ID = BIT(22),
+ SMART_CONFIG_DECODE_EVENT_ID = BIT(23),
+ TIME_SYNC_EVENT_ID = BIT(24),
};
enum wl18xx_radar_types {
/* smart config sync channel */
u8 sc_sync_channel;
u8 sc_sync_band;
- u8 padding2[2];
+ /* time sync msb*/
+ u16 time_sync_tsf_msb;
/* radar detect */
u8 radar_channel;
u8 radar_type;
- u8 padding3[2];
+ /* time sync lsb*/
+ u16 time_sync_tsf_lsb;
+
} __packed;
int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
.num_probe_reqs = 2,
.rssi_threshold = -90,
.snr_threshold = 0,
+ .num_short_intervals = SCAN_MAX_SHORT_INTERVALS,
+ .long_interval = 30000,
},
.ht = {
.rx_ba_win_size = 32,
CHANNEL_SWITCH_COMPLETE_EVENT_ID |
DFS_CHANNELS_CONFIG_COMPLETE_EVENT |
SMART_CONFIG_SYNC_EVENT_ID |
- SMART_CONFIG_DECODE_EVENT_ID;
-;
+ SMART_CONFIG_DECODE_EVENT_ID |
+ TIME_SYNC_EVENT_ID;
wl->ap_event_mask = MAX_TX_FAILURE_EVENT_ID;
if (ret < 0)
return ret;
+ /* set the dynamic fw traces bitmap */
+ ret = wl18xx_acx_dynamic_fw_traces(wl);
+ if (ret < 0)
+ return ret;
+
if (checksum_param) {
ret = wl18xx_acx_set_checksum_state(wl);
if (ret != 0)
static const struct ieee80211_iface_limit wl18xx_iface_limits[] = {
{
- .max = 3,
+ .max = 2,
.types = BIT(NL80211_IFTYPE_STATION),
},
{
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_P2P_CLIENT),
},
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
};
static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = {
.max = 2,
.types = BIT(NL80211_IFTYPE_AP),
},
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
+};
+
+static const struct ieee80211_iface_limit wl18xx_iface_ap_cl_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
+};
+
+static const struct ieee80211_iface_limit wl18xx_iface_ap_go_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_GO),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
};
static const struct ieee80211_iface_combination
goto out;
}
- cmd->role_id = wlvif->role_id;
+ /* scan on the dev role if the regular one is not started */
+ if (wlcore_is_p2p_mgmt(wlvif))
+ cmd->role_id = wlvif->dev_role_id;
+ else
+ cmd->role_id = wlvif->role_id;
if (WARN_ON(cmd->role_id == WL12XX_INVALID_ROLE_ID)) {
ret = -EINVAL;
SCAN_TYPE_PERIODIC);
wl18xx_adjust_channels(cmd, cmd_channels);
- cmd->short_cycles_sec = 0;
- cmd->long_cycles_sec = cpu_to_le16(req->interval);
- cmd->short_cycles_count = 0;
+ if (c->num_short_intervals && c->long_interval &&
+ c->long_interval > req->interval) {
+ cmd->short_cycles_msec = cpu_to_le16(req->interval);
+ cmd->long_cycles_msec = cpu_to_le16(c->long_interval);
+ cmd->short_cycles_count = c->num_short_intervals;
+ } else {
+ cmd->short_cycles_msec = 0;
+ cmd->long_cycles_msec = cpu_to_le16(req->interval);
+ cmd->short_cycles_count = 0;
+ }
+ wl1271_debug(DEBUG_SCAN, "short_interval: %d, long_interval: %d, num_short: %d",
+ le16_to_cpu(cmd->short_cycles_msec),
+ le16_to_cpu(cmd->long_cycles_msec),
+ cmd->short_cycles_count);
cmd->total_cycles = 0;
u8 dfs; /* number of dfs channels in 5ghz */
u8 passive_active; /* number of passive before active channels 2.4ghz */
- __le16 short_cycles_sec;
- __le16 long_cycles_sec;
+ __le16 short_cycles_msec;
+ __le16 long_cycles_msec;
u8 short_cycles_count;
u8 total_cycles; /* 0 - infinite */
u8 padding[2];
wlvif->bss_type == BSS_TYPE_IBSS)))
return -EINVAL;
- ret = wl12xx_cmd_role_enable(wl,
- wl12xx_wlvif_to_vif(wlvif)->addr,
- WL1271_ROLE_DEVICE,
- &wlvif->dev_role_id);
- if (ret < 0)
- goto out;
+ /* the dev role is already started for p2p mgmt interfaces */
+ if (!wlcore_is_p2p_mgmt(wlvif)) {
+ ret = wl12xx_cmd_role_enable(wl,
+ wl12xx_wlvif_to_vif(wlvif)->addr,
+ WL1271_ROLE_DEVICE,
+ &wlvif->dev_role_id);
+ if (ret < 0)
+ goto out;
+ }
ret = wl12xx_cmd_role_start_dev(wl, wlvif, band, channel);
if (ret < 0)
out_stop:
wl12xx_cmd_role_stop_dev(wl, wlvif);
out_disable:
- wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
+ if (!wlcore_is_p2p_mgmt(wlvif))
+ wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
out:
return ret;
}
if (ret < 0)
goto out;
- ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
- if (ret < 0)
- goto out;
+ if (!wlcore_is_p2p_mgmt(wlvif)) {
+ ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
+ if (ret < 0)
+ goto out;
+ }
out:
return ret;
}
+
+int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 feature, u8 enable, u8 value)
+{
+ struct wlcore_cmd_generic_cfg *cmd;
+ int ret;
+
+ wl1271_debug(DEBUG_CMD,
+ "cmd generic cfg (role %d feature %d enable %d value %d)",
+ wlvif->role_id, feature, enable, value);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->role_id = wlvif->role_id;
+ cmd->feature = feature;
+ cmd->enable = enable;
+ cmd->value = value;
+
+ ret = wl1271_cmd_send(wl, CMD_GENERIC_CFG, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to send generic cfg command");
+ goto out_free;
+ }
+out_free:
+ kfree(cmd);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(wlcore_cmd_generic_cfg);
void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
enum ieee80211_band band);
int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl);
+int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 feature, u8 enable, u8 value);
int wl12xx_cmd_config_fwlog(struct wl1271 *wl);
int wl12xx_cmd_start_fwlog(struct wl1271 *wl);
int wl12xx_cmd_stop_fwlog(struct wl1271 *wl);
u8 padding[3];
} __packed;
+enum wlcore_generic_cfg_feature {
+ WLCORE_CFG_FEATURE_RADAR_DEBUG = 2,
+};
+
+struct wlcore_cmd_generic_cfg {
+ struct wl1271_cmd_header header;
+
+ u8 role_id;
+ u8 feature;
+ u8 enable;
+ u8 value;
+} __packed;
+
struct wl12xx_cmd_config_fwlog {
struct wl1271_cmd_header header;
/* SNR threshold to be used for filtering */
s8 snr_threshold;
+
+ /*
+ * number of short intervals scheduled scan cycles before
+ * switching to long intervals
+ */
+ u8 num_short_intervals;
+
+ /* interval between each long scheduled scan cycle (in ms) */
+ u16 long_interval;
} __packed;
struct conf_ht_setting {
* version, the two LSB are the lower driver's private conf
* version.
*/
-#define WLCORE_CONF_VERSION (0x0006 << 16)
+#define WLCORE_CONF_VERSION (0x0007 << 16)
#define WLCORE_CONF_MASK 0xffff0000
#define WLCORE_CONF_SIZE (sizeof(struct wlcore_conf_header) + \
sizeof(struct wlcore_conf))
}
/* generic sta initialization (non vif-specific) */
-static int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif);
int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif);
+int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif);
#endif
wl->wow_enabled = true;
wl12xx_for_each_wlvif(wl, wlvif) {
+ if (wlcore_is_p2p_mgmt(wlvif))
+ continue;
+
ret = wl1271_configure_suspend(wl, wlvif, wow);
if (ret < 0) {
mutex_unlock(&wl->mutex);
goto out;
wl12xx_for_each_wlvif(wl, wlvif) {
+ if (wlcore_is_p2p_mgmt(wlvif))
+ continue;
+
wl1271_configure_resume(wl, wlvif);
}
wlvif->p2p = 1;
/* fall-through */
case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_DEVICE:
wlvif->bss_type = BSS_TYPE_STA_BSS;
break;
case NL80211_IFTYPE_ADHOC:
{
struct wlcore_hw_queue_iter_data *iter_data = data;
- if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+ WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
return;
if (iter_data->cur_running || vif == iter_data->vif) {
struct wlcore_hw_queue_iter_data iter_data = {};
int i, q_base;
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+ return 0;
+ }
+
iter_data.vif = vif;
/* mark all bits taken by active interfaces */
goto out;
}
- ret = wl12xx_cmd_role_enable(wl, vif->addr,
- role_type, &wlvif->role_id);
- if (ret < 0)
- goto out;
+ if (!wlcore_is_p2p_mgmt(wlvif)) {
+ ret = wl12xx_cmd_role_enable(wl, vif->addr,
+ role_type, &wlvif->role_id);
+ if (ret < 0)
+ goto out;
- ret = wl1271_init_vif_specific(wl, vif);
- if (ret < 0)
- goto out;
+ ret = wl1271_init_vif_specific(wl, vif);
+ if (ret < 0)
+ goto out;
+
+ } else {
+ ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
+ &wlvif->dev_role_id);
+ if (ret < 0)
+ goto out;
+
+ /* needed mainly for configuring rate policies */
+ ret = wl1271_sta_hw_init(wl, wlvif);
+ if (ret < 0)
+ goto out;
+ }
list_add(&wlvif->list, &wl->wlvif_list);
set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
wl12xx_stop_dev(wl, wlvif);
}
- ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
- if (ret < 0)
- goto deinit;
+ if (!wlcore_is_p2p_mgmt(wlvif)) {
+ ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
+ if (ret < 0)
+ goto deinit;
+ } else {
+ ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
+ if (ret < 0)
+ goto deinit;
+ }
wl1271_ps_elp_sleep(wl);
}
{
int ret;
+ if (wlcore_is_p2p_mgmt(wlvif))
+ return 0;
+
if (conf->power_level != wlvif->power_level) {
ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
if (ret < 0)
goto out;
wl12xx_for_each_wlvif(wl, wlvif) {
+ if (wlcore_is_p2p_mgmt(wlvif))
+ continue;
+
if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
if (*total & FIF_ALLMULTI)
ret = wl1271_acx_group_address_tbl(wl, wlvif,
u8 ps_scheme;
int ret = 0;
+ if (wlcore_is_p2p_mgmt(wlvif))
+ return 0;
+
mutex_lock(&wl->mutex);
wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
wl->hw->wiphy->max_scan_ssids = 1;
wl->hw->wiphy->max_sched_scan_ssids = 16;
wl->hw->wiphy->max_match_sets = 16;
if (desc->rate <= wl->hw_min_ht_rate)
status->flag |= RX_FLAG_HT;
+ /*
+ * Read the signal level and antenna diversity indication.
+ * The msb in the signal level is always set as it is a
+ * negative number.
+ * The antenna indication is the msb of the rssi.
+ */
status->signal = ((desc->rssi & RSSI_LEVEL_BITMASK) | BIT(7));
status->antenna = ((desc->rssi & ANT_DIVERSITY_BITMASK) >> 7);
#define MAX_CHANNELS_5GHZ 42
#define SCAN_MAX_CYCLE_INTERVALS 16
+
+/* The FW intervals can take up to 16 entries.
+ * The 1st entry isn't used (scan is immediate). The last
+ * entry should be used for the long_interval
+ */
+#define SCAN_MAX_SHORT_INTERVALS (SCAN_MAX_CYCLE_INTERVALS - 2)
#define SCAN_MAX_BANDS 3
enum {
/* interface combinations supported by the hw */
const struct ieee80211_iface_combination *iface_combinations;
u8 n_iface_combinations;
+
+ /* dynamic fw traces */
+ u32 dynamic_fw_traces;
};
int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
return container_of((void *)wlvif, struct ieee80211_vif, drv_priv);
}
+static inline bool wlcore_is_p2p_mgmt(struct wl12xx_vif *wlvif)
+{
+ return wl12xx_wlvif_to_vif(wlvif)->type == NL80211_IFTYPE_P2P_DEVICE;
+}
+
#define wl12xx_for_each_wlvif(wl, wlvif) \
list_for_each_entry(wlvif, &wl->wlvif_list, list)
netif_carrier_off(info->netdev);
- for (i = 0; i < num_queues; ++i) {
+ for (i = 0; i < num_queues && info->queues; ++i) {
struct netfront_queue *queue = &info->queues[i];
if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
unregister_netdev(info->netdev);
- xennet_destroy_queues(info);
+ if (info->queues)
+ xennet_destroy_queues(info);
xennet_free_netdev(info->netdev);
return 0;
if (resp) {
resp(sp, fp, arg);
res = true;
- } else if (!IS_ERR(fp)) {
- fc_frame_free(fp);
}
spin_lock_bh(&ep->ex_lock);
* If new exch resp handler is valid then call that
* first.
*/
- fc_invoke_resp(ep, sp, fp);
+ if (!fc_invoke_resp(ep, sp, fp))
+ fc_frame_free(fp);
fc_exch_release(ep);
return;
fc_exch_hold(ep);
if (!rc)
fc_exch_delete(ep);
- fc_invoke_resp(ep, sp, fp);
+ if (!fc_invoke_resp(ep, sp, fp))
+ fc_frame_free(fp);
if (has_rec)
fc_exch_timer_set(ep, ep->r_a_tov);
fc_exch_release(ep);
fc_fcp_pkt_hold(fsp);
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
- if (!fc_fcp_lock_pkt(fsp)) {
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ if (!(fsp->state & FC_SRB_COMPL)) {
+ fsp->state |= FC_SRB_COMPL;
+ /*
+ * TODO: dropping scsi_pkt_lock and then reacquiring
+ * again around fc_fcp_cleanup_cmd() is required,
+ * since fc_fcp_cleanup_cmd() calls into
+ * fc_seq_set_resp() and that func preempts cpu using
+ * schedule. May be schedule and related code should be
+ * removed instead of unlocking here to avoid scheduling
+ * while atomic bug.
+ */
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+
fc_fcp_cleanup_cmd(fsp, error);
+
+ spin_lock_bh(&fsp->scsi_pkt_lock);
fc_io_compl(fsp);
- fc_fcp_unlock_pkt(fsp);
}
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
fc_fcp_pkt_release(fsp);
spin_lock_irqsave(&si->scsi_queue_lock, flags);
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- unsigned long flags;
del_timer_sync(&conn->transport_timer);
+ mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->frwd_lock);
conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
if (session->leadconn == conn) {
}
spin_unlock_bh(&session->frwd_lock);
- /*
- * Block until all in-progress commands for this connection
- * time out or fail.
- */
- for (;;) {
- spin_lock_irqsave(session->host->host_lock, flags);
- if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */
- spin_unlock_irqrestore(session->host->host_lock, flags);
- break;
- }
- spin_unlock_irqrestore(session->host->host_lock, flags);
- msleep_interruptible(500);
- iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
- "host_busy %d host_failed %d\n",
- atomic_read(&session->host->host_busy),
- session->host->host_failed);
- /*
- * force eh_abort() to unblock
- */
- wake_up(&conn->ehwait);
- }
-
/* flush queued up work because we free the connection below */
iscsi_suspend_tx(conn);
if (session->leadconn == conn)
session->leadconn = NULL;
spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
iscsi_destroy_conn(cls_conn);
}
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
-#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
}
}
EXPORT_SYMBOL(scsi_build_sense_buffer);
-
-/**
- * scsi_set_sense_information - set the information field in a
- * formatted sense data buffer
- * @buf: Where to build sense data
- * @info: 64-bit information value to be set
- *
- **/
-void scsi_set_sense_information(u8 *buf, u64 info)
-{
- if ((buf[0] & 0x7f) == 0x72) {
- u8 *ucp, len;
-
- len = buf[7];
- ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0);
- if (!ucp) {
- buf[7] = len + 0xa;
- ucp = buf + 8 + len;
- }
- ucp[0] = 0;
- ucp[1] = 0xa;
- ucp[2] = 0x80; /* Valid bit */
- ucp[3] = 0;
- put_unaligned_be64(info, &ucp[4]);
- } else if ((buf[0] & 0x7f) == 0x70) {
- buf[0] |= 0x80;
- put_unaligned_be64(info, &buf[3]);
- }
-}
-EXPORT_SYMBOL(scsi_set_sense_information);
max_xfer = sdkp->max_xfer_blocks;
max_xfer <<= ilog2(sdp->sector_size) - 9;
- max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
- max_xfer);
- blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
+ sdkp->disk->queue->limits.max_sectors =
+ min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
+
set_capacity(disk, sdkp->capacity);
sd_config_write_same(sdkp);
kfree(buffer);
/* dev->destructor = free_netdev; */
PRINT_INFO(CORECONFIG_DBG, "In Ethernet setup function\n");
ether_setup(dev);
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->type = ARPHRD_IEEE80211_RADIOTAP;
memset(dev->dev_addr, 0, ETH_ALEN);
cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
- if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+ if (hdr->flags & ISCSI_FLAG_CMD_READ)
cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
- } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
+ else
cmd->targ_xfer_tag = 0xFFFFFFFF;
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
if (!strcmp(t->tf_ops->name, fo->name)) {
BUG_ON(atomic_read(&t->tf_access_cnt));
list_del(&t->tf_list);
+ mutex_unlock(&g_tf_lock);
+ /*
+ * Wait for any outstanding fabric se_deve_entry->rcu_head
+ * callbacks to complete post kfree_rcu(), before allowing
+ * fabric driver unload of TFO->module to proceed.
+ */
+ rcu_barrier();
kfree(t);
- break;
+ return;
}
}
mutex_unlock(&g_tf_lock);
list_for_each_entry(tb, &backend_list, list) {
if (tb->ops == ops) {
list_del(&tb->list);
+ mutex_unlock(&backend_mutex);
+ /*
+ * Wait for any outstanding backend driver ->rcu_head
+ * callbacks to complete post TBO->free_device() ->
+ * call_rcu(), before allowing backend driver module
+ * unload of target_backend_ops->owner to proceed.
+ */
+ rcu_barrier();
kfree(tb);
- break;
+ return;
}
}
mutex_unlock(&backend_mutex);
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
+ struct scsi_lun slun;
unsigned char *buf;
u32 lun_count = 0, offset = 8;
-
- if (cmd->data_length < 16) {
- pr_warn("REPORT LUNS allocation length %u too small\n",
- cmd->data_length);
- return TCM_INVALID_CDB_FIELD;
- }
+ __be32 len;
buf = transport_kmap_data_sg(cmd);
- if (!buf)
+ if (cmd->data_length && !buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
* coming via a target_core_mod PASSTHROUGH op, and not through
* a $FABRIC_MOD. In that case, report LUN=0 only.
*/
- if (!sess) {
- int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
- lun_count = 1;
+ if (!sess)
goto done;
- }
+
nacl = sess->se_node_acl;
rcu_read_lock();
* See SPC2-R20 7.19.
*/
lun_count++;
- if ((offset + 8) > cmd->data_length)
+ if (offset >= cmd->data_length)
continue;
- int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
+ int_to_scsilun(deve->mapped_lun, &slun);
+ memcpy(buf + offset, &slun,
+ min(8u, cmd->data_length - offset));
offset += 8;
}
rcu_read_unlock();
* See SPC3 r07, page 159.
*/
done:
- lun_count *= 8;
- buf[0] = ((lun_count >> 24) & 0xff);
- buf[1] = ((lun_count >> 16) & 0xff);
- buf[2] = ((lun_count >> 8) & 0xff);
- buf[3] = (lun_count & 0xff);
- transport_kunmap_data_sg(cmd);
+ /*
+ * If no LUNs are accessible, report virtual LUN 0.
+ */
+ if (lun_count == 0) {
+ int_to_scsilun(0, &slun);
+ if (cmd->data_length > 8)
+ memcpy(buf + offset, &slun,
+ min(8u, cmd->data_length - offset));
+ lun_count = 1;
+ }
+
+ if (buf) {
+ len = cpu_to_be32(lun_count * 8);
+ memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length));
+ transport_kunmap_data_sg(cmd);
+ }
target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
return 0;
* registered cooling device.
* @cpufreq_state: integer value representing the current state of cpufreq
* cooling devices.
- * @cpufreq_val: integer value representing the absolute value of the clipped
+ * @clipped_freq: integer value representing the absolute value of the clipped
* frequency.
* @max_level: maximum cooling level. One less than total number of valid
* cpufreq frequencies.
int id;
struct thermal_cooling_device *cool_dev;
unsigned int cpufreq_state;
- unsigned int cpufreq_val;
+ unsigned int clipped_freq;
unsigned int max_level;
unsigned int *freq_table; /* In descending order */
struct cpumask allowed_cpus;
static DEFINE_IDR(cpufreq_idr);
static DEFINE_MUTEX(cooling_cpufreq_lock);
+static unsigned int cpufreq_dev_count;
+
+static DEFINE_MUTEX(cooling_list_lock);
static LIST_HEAD(cpufreq_dev_list);
/**
{
struct cpufreq_cooling_device *cpufreq_dev;
- mutex_lock(&cooling_cpufreq_lock);
+ mutex_lock(&cooling_list_lock);
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
- mutex_unlock(&cooling_cpufreq_lock);
+ mutex_unlock(&cooling_list_lock);
return get_level(cpufreq_dev, freq);
}
}
- mutex_unlock(&cooling_cpufreq_lock);
+ mutex_unlock(&cooling_list_lock);
pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
return THERMAL_CSTATE_INVALID;
unsigned long event, void *data)
{
struct cpufreq_policy *policy = data;
- unsigned long max_freq = 0;
+ unsigned long clipped_freq;
struct cpufreq_cooling_device *cpufreq_dev;
- switch (event) {
+ if (event != CPUFREQ_ADJUST)
+ return NOTIFY_DONE;
- case CPUFREQ_ADJUST:
- mutex_lock(&cooling_cpufreq_lock);
- list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
- if (!cpumask_test_cpu(policy->cpu,
- &cpufreq_dev->allowed_cpus))
- continue;
+ mutex_lock(&cooling_list_lock);
+ list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+ if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
+ continue;
- max_freq = cpufreq_dev->cpufreq_val;
+ /*
+ * policy->max is the maximum allowed frequency defined by user
+ * and clipped_freq is the maximum that thermal constraints
+ * allow.
+ *
+ * If clipped_freq is lower than policy->max, then we need to
+ * readjust policy->max.
+ *
+ * But, if clipped_freq is greater than policy->max, we don't
+ * need to do anything.
+ */
+ clipped_freq = cpufreq_dev->clipped_freq;
- if (policy->max != max_freq)
- cpufreq_verify_within_limits(policy, 0,
- max_freq);
- }
- mutex_unlock(&cooling_cpufreq_lock);
+ if (policy->max > clipped_freq)
+ cpufreq_verify_within_limits(policy, 0, clipped_freq);
break;
- default:
- return NOTIFY_DONE;
}
+ mutex_unlock(&cooling_list_lock);
return NOTIFY_OK;
}
clip_freq = cpufreq_device->freq_table[state];
cpufreq_device->cpufreq_state = state;
- cpufreq_device->cpufreq_val = clip_freq;
+ cpufreq_device->clipped_freq = clip_freq;
cpufreq_update_policy(cpu);
pr_debug("%s: freq:%u KHz\n", __func__, freq);
}
- cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0];
+ cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
cpufreq_dev->cool_dev = cool_dev;
mutex_lock(&cooling_cpufreq_lock);
+ mutex_lock(&cooling_list_lock);
+ list_add(&cpufreq_dev->node, &cpufreq_dev_list);
+ mutex_unlock(&cooling_list_lock);
+
/* Register the notifier for first cpufreq cooling device */
- if (list_empty(&cpufreq_dev_list))
+ if (!cpufreq_dev_count++)
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
- list_add(&cpufreq_dev->node, &cpufreq_dev_list);
-
mutex_unlock(&cooling_cpufreq_lock);
return cool_dev;
return;
cpufreq_dev = cdev->devdata;
- mutex_lock(&cooling_cpufreq_lock);
- list_del(&cpufreq_dev->node);
/* Unregister the notifier for the last cpufreq cooling device */
- if (list_empty(&cpufreq_dev_list))
+ mutex_lock(&cooling_cpufreq_lock);
+ if (!--cpufreq_dev_count)
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
+
+ mutex_lock(&cooling_list_lock);
+ list_del(&cpufreq_dev->node);
+ mutex_unlock(&cooling_list_lock);
+
mutex_unlock(&cooling_cpufreq_lock);
thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
max_allocatable_power, current_temp,
(s32)control_temp - (s32)current_temp);
- devm_kfree(&tz->device, req_power);
+ kfree(req_power);
unlock:
mutex_unlock(&tz->lock);
return -EINVAL;
}
- params = devm_kzalloc(&tz->device, sizeof(*params), GFP_KERNEL);
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
return 0;
free:
- devm_kfree(&tz->device, params);
+ kfree(params);
return ret;
}
static void power_allocator_unbind(struct thermal_zone_device *tz)
{
dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
- devm_kfree(&tz->device, tz->governor_data);
+ kfree(tz->governor_data);
tz->governor_data = NULL;
}
err = -EINVAL;
if (old) {
- struct fuse_dev *fud = fuse_get_dev(old);
+ struct fuse_dev *fud = NULL;
+
+ /*
+ * Check against file->f_op because CUSE
+ * uses the same ioctl handler.
+ */
+ if (old->f_op == file->f_op &&
+ old->f_cred->user_ns == file->f_cred->user_ns)
+ fud = fuse_get_dev(old);
if (fud) {
mutex_lock(&fuse_mutex);
uint8_t num_h_tile, num_v_tile;
uint8_t tile_h_loc, tile_v_loc;
uint16_t tile_h_size, tile_v_size;
-
- struct list_head destroy_list;
};
/**
SATA_SSP = 0x06, /* Software Settings Preservation */
SATA_DEVSLP = 0x09, /* Device Sleep */
- SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
-
/* feature values for SET_MAX */
ATA_SET_MAX_ADDR = 0x00,
ATA_SET_MAX_PASSWD = 0x01,
#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
-#define ata_id_has_ncq_autosense(id) \
- ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
static inline bool ata_id_has_hipm(const u16 *id)
{
return false;
}
-static inline bool ata_id_has_sense_reporting(const u16 *id)
-{
- if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
- return false;
- return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
-}
-
-static inline bool ata_id_sense_reporting_enabled(const u16 *id)
-{
- if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
- return false;
- return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
-}
-
/**
* ata_id_major_version - get ATA level of drive
* @id: Identify data
/* Exponentially weighted moving average (EWMA) */
-/* For more documentation see lib/average.c */
-
-struct ewma {
- unsigned long internal;
- unsigned long factor;
- unsigned long weight;
-};
-
-extern void ewma_init(struct ewma *avg, unsigned long factor,
- unsigned long weight);
-
-extern struct ewma *ewma_add(struct ewma *avg, unsigned long val);
-
-/**
- * ewma_read() - Get average value
- * @avg: Average structure
- *
- * Returns the average value held in @avg.
- */
-static inline unsigned long ewma_read(const struct ewma *avg)
-{
- return avg->internal >> avg->factor;
-}
+#define DECLARE_EWMA(name, _factor, _weight) \
+ struct ewma_##name { \
+ unsigned long internal; \
+ }; \
+ static inline void ewma_##name##_init(struct ewma_##name *e) \
+ { \
+ BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
+ BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
+ e->internal = 0; \
+ } \
+ static inline unsigned long \
+ ewma_##name##_read(struct ewma_##name *e) \
+ { \
+ BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
+ BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
+ return e->internal >> ilog2(_factor); \
+ } \
+ static inline void ewma_##name##_add(struct ewma_##name *e, \
+ unsigned long val) \
+ { \
+ unsigned long internal = ACCESS_ONCE(e->internal); \
+ unsigned long weight = ilog2(_weight); \
+ unsigned long factor = ilog2(_factor); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
+ BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
+ \
+ ACCESS_ONCE(e->internal) = internal ? \
+ (((internal << weight) - internal) + \
+ (val << factor)) >> weight : \
+ (val << factor); \
+ }
#endif /* _LINUX_AVERAGE_H */
spinlock_t gpio_lock;
#ifdef CONFIG_BCMA_DRIVER_GPIO
struct gpio_chip gpio;
- struct irq_domain *irq_domain;
#endif
};
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
return (((*(const u32 *)addr) ^ (*(const u32 *)b)) |
- ((a[2] ^ b[2]) & m)) == 0;
+ (__force int)((a[2] ^ b[2]) & m)) == 0;
#else
return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
#endif
#define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6)
#define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7)
+#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(5)
#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6)
-#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7)
/* TDLS specific payload type in the LLC/SNAP header */
#define WLAN_TDLS_SNAP_RFTYPE 0x2
MLX5_REG_PMTU = 0x5003,
MLX5_REG_PTYS = 0x5004,
MLX5_REG_PAOS = 0x5006,
+ MLX5_REG_PFCC = 0x5007,
MLX5_REG_PPCNT = 0x5008,
MLX5_REG_PMAOS = 0x5012,
MLX5_REG_PUDE = 0x5009,
};
enum mlx5_port_status {
- MLX5_PORT_UP = 1 << 1,
- MLX5_PORT_DOWN = 1 << 2,
+ MLX5_PORT_UP = 1,
+ MLX5_PORT_DOWN = 2,
};
struct mlx5_uuar_info {
u8 local_port);
int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
int proto_mask);
-int mlx5_set_port_status(struct mlx5_core_dev *dev,
- enum mlx5_port_status status);
-int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status);
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status);
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status *status);
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
u8 *vl_hw_cap, u8 local_port);
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+ u32 *rx_pause, u32 *tx_pause);
+
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
* @IFF_LIVE_ADDR_CHANGE: device supports hardware address
* change when it's running
* @IFF_MACVLAN: Macvlan device
+ * @IFF_VRF_MASTER: device is a VRF master
+ * @IFF_NO_QUEUE: device can run without qdisc attached
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
IFF_IPVLAN_MASTER = 1<<23,
IFF_IPVLAN_SLAVE = 1<<24,
IFF_VRF_MASTER = 1<<25,
+ IFF_NO_QUEUE = 1<<26,
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
#define IFF_VRF_MASTER IFF_VRF_MASTER
+#define IFF_NO_QUEUE IFF_NO_QUEUE
/**
* struct net_device - The DEVICE structure.
static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
{
- return (NAPI_GRO_CB(skb)->gro_remcsum_start - skb_headroom(skb) ==
- skb_gro_offset(skb));
+ return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
}
static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
grc->delta = 0;
}
-static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
- int start, int offset,
- struct gro_remcsum *grc,
- bool nopartial)
+static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
+ unsigned int off, size_t hdrlen,
+ int start, int offset,
+ struct gro_remcsum *grc,
+ bool nopartial)
{
__wsum delta;
+ size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
if (!nopartial) {
- NAPI_GRO_CB(skb)->gro_remcsum_start =
- ((unsigned char *)ptr + start) - skb->head;
- return;
+ NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
+ return ptr;
+ }
+
+ ptr = skb_gro_header_fast(skb, off);
+ if (skb_gro_header_hard(skb, off + plen)) {
+ ptr = skb_gro_header_slow(skb, off + plen, off);
+ if (!ptr)
+ return NULL;
}
- delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset);
+ delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
+ start, offset);
/* Adjust skb->csum since we changed the packet */
NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
- grc->offset = (ptr + offset) - (void *)skb->head;
+ grc->offset = off + hdrlen + offset;
grc->delta = delta;
+
+ return ptr;
}
static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
struct gro_remcsum *grc)
{
+ void *ptr;
+ size_t plen = grc->offset + sizeof(u16);
+
if (!grc->delta)
return;
- remcsum_unadjust((__sum16 *)(skb->head + grc->offset), grc->delta);
+ ptr = skb_gro_header_fast(skb, grc->offset);
+ if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
+ ptr = skb_gro_header_slow(skb, plen, grc->offset);
+ if (!ptr)
+ return;
+ }
+
+ remcsum_unadjust((__sum16 *)ptr, grc->delta);
}
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
static inline bool netif_index_is_vrf(struct net *net, int ifindex)
{
- struct net_device *dev = dev_get_by_index_rcu(net, ifindex);
bool rc = false;
+#if IS_ENABLED(CONFIG_NET_VRF)
+ struct net_device *dev;
+
+ if (ifindex == 0)
+ return false;
+
+ rcu_read_lock();
+
+ dev = dev_get_by_index_rcu(net, ifindex);
if (dev)
rc = netif_is_vrf(dev);
+ rcu_read_unlock();
+#endif
return rc;
}
#define _NFNL_ACCT_H_
#include <uapi/linux/netfilter/nfnetlink_acct.h>
+#include <net/net_namespace.h>
enum {
NFACCT_NO_QUOTA = -1,
struct nf_acct;
-struct nf_acct *nfnl_acct_find_get(const char *filter_name);
+struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name);
void nfnl_acct_put(struct nf_acct *acct);
void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
extern int nfnl_acct_overquota(const struct sk_buff *skb,
#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
#define LOWPAN_NHC_UDP_CS_C 0x04 /* checksum elided */
+#define LOWPAN_PRIV_SIZE(llpriv_size) \
+ (sizeof(struct lowpan_priv) + llpriv_size)
+
+enum lowpan_lltypes {
+ LOWPAN_LLTYPE_BTLE,
+ LOWPAN_LLTYPE_IEEE802154,
+};
+
+struct lowpan_priv {
+ enum lowpan_lltypes lltype;
+
+ /* must be last */
+ u8 priv[0] __aligned(sizeof(void *));
+};
+
+static inline
+struct lowpan_priv *lowpan_priv(const struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
#ifdef DEBUG
/* print data in line */
static inline void raw_dump_inline(const char *caller, char *msg,
return skb->len + uncomp_header - ret;
}
+void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype);
+
int
lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
const u8 *saddr, const u8 saddr_type,
HCI_AUTO_CONN_DIRECT,
HCI_AUTO_CONN_ALWAYS,
HCI_AUTO_CONN_LINK_LOSS,
+ HCI_AUTO_CONN_EXPLICIT,
} auto_connect;
struct hci_conn *conn;
+ bool explicit_connect;
};
extern struct list_head hci_dev_list;
HCI_CONN_DROP,
HCI_CONN_PARAM_REMOVAL_PEND,
HCI_CONN_NEW_LINK_KEY,
+ HCI_CONN_SCANNING,
};
static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
return NULL;
}
+static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == LE_LINK && c->state == BT_CONNECT &&
+ !test_bit(HCI_CONN_SCANNING, &c->flags)) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
int hci_disconnect(struct hci_conn *conn, __u8 reason);
bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
void hci_sco_setup(struct hci_conn *conn, __u8 status);
void hci_chan_list_flush(struct hci_conn *conn);
struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
+struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
+ u8 dst_type, u8 sec_level,
+ u16 conn_timeout, u8 role);
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level, u16 conn_timeout,
u8 role);
struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
bdaddr_t *addr,
u8 addr_type);
+struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
+ bdaddr_t *addr,
+ u8 addr_type);
void hci_uuids_clear(struct hci_dev *hdev);
* method returns 0.)
*
* @mgmt_frame_register: Notify driver that a management frame type was
- * registered. Note that this callback may not sleep, and cannot run
- * concurrently with itself.
+ * registered. The callback is allowed to sleep.
*
* @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device.
* Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may
s8 max_frame_retries);
int (*set_lbt_mode)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev, bool mode);
+ int (*set_ackreq_default)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev, bool ackreq);
};
static inline bool
struct list_head list;
struct net_device *netdev;
+ /* lowpan interface, set when the wpan_dev belongs to one lowpan_dev */
+ struct net_device *lowpan_dev;
+
u32 identifier;
/* MAC PIB */
bool lbt;
bool promiscuous_mode;
+
+ /* fallback for acknowledgment bit setting */
+ bool ackreq;
};
#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev)
struct sk_buff;
void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
- __be32 from, __be32 to, int pseudohdr);
+ __be32 from, __be32 to, bool pseudohdr);
void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
const __be32 *from, const __be32 *to,
- int pseudohdr);
+ bool pseudohdr);
+void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
+ __wsum diff, bool pseudohdr);
static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
__be16 from, __be16 to,
- int pseudohdr)
+ bool pseudohdr)
{
inet_proto_csum_replace4(sum, skb, (__force __be32)from,
(__force __be32)to, pseudohdr);
return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
}
+static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
+{
+ return !!((ds->dsa_port_mask) & (1 << p));
+}
+
static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
{
return ds->phys_port_mask & (1 << p) && ds->ports[p];
__u32 __pad2;
#endif
+#ifdef CONFIG_64BIT
+ struct lwtunnel_state *lwtstate;
/*
* Align __refcnt to a 64 bytes alignment
* (L1_CACHE_SIZE would be too much)
*/
-#ifdef CONFIG_64BIT
- long __pad_to_align_refcnt[2];
+ long __pad_to_align_refcnt[1];
#endif
/*
* __refcnt wants to be on a different cache line from
atomic_t __refcnt; /* client references */
int __use;
unsigned long lastuse;
+#ifndef CONFIG_64BIT
+ struct lwtunnel_state *lwtstate;
+#endif
union {
struct dst_entry *next;
struct rtable __rcu *rt_next;
return NULL;
}
-static inline struct ip_tunnel_info *skb_tunnel_info(struct sk_buff *skb,
- int family)
+static inline struct ip_tunnel_info *skb_tunnel_info(struct sk_buff *skb)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
- struct rtable *rt;
+ struct dst_entry *dst;
if (md_dst)
return &md_dst->u.tun_info;
- switch (family) {
- case AF_INET:
- rt = (struct rtable *)skb_dst(skb);
- if (rt && rt->rt_lwtstate)
- return lwt_tun_info(rt->rt_lwtstate);
- break;
- }
+ dst = skb_dst(skb);
+ if (dst && dst->lwtstate)
+ return lwt_tun_info(dst->lwtstate);
return NULL;
}
#define flowi6_proto __fl_common.flowic_proto
#define flowi6_flags __fl_common.flowic_flags
#define flowi6_secid __fl_common.flowic_secid
+#define flowi6_tun_key __fl_common.flowic_tun_key
struct in6_addr daddr;
struct in6_addr saddr;
__be32 flowlabel;
/* more non-fragment space at head required */
unsigned short rt6i_nfheader_len;
u8 rt6i_protocol;
- struct lwtunnel_state *rt6i_lwtstate;
};
static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
#define IPTUNNEL_ERR_TIMEO (30*HZ)
/* Used to memset ip_tunnel padding. */
-#define IP_TUNNEL_KEY_SIZE \
- (offsetof(struct ip_tunnel_key, tp_dst) + \
- FIELD_SIZEOF(struct ip_tunnel_key, tp_dst))
+#define IP_TUNNEL_KEY_SIZE offsetofend(struct ip_tunnel_key, tp_dst)
+
+/* Used to memset ipv4 address padding. */
+#define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst)
+#define IP_TUNNEL_KEY_IPV4_PAD_LEN \
+ (FIELD_SIZEOF(struct ip_tunnel_key, u) - \
+ FIELD_SIZEOF(struct ip_tunnel_key, u.ipv4))
struct ip_tunnel_key {
__be64 tun_id;
- __be32 ipv4_src;
- __be32 ipv4_dst;
+ union {
+ struct {
+ __be32 src;
+ __be32 dst;
+ } ipv4;
+ struct {
+ struct in6_addr src;
+ struct in6_addr dst;
+ } ipv6;
+ } u;
__be16 tun_flags;
- __u8 ipv4_tos;
- __u8 ipv4_ttl;
+ u8 tos; /* TOS for IPv4, TC for IPv6 */
+ u8 ttl; /* TTL for IPv4, HL for IPv6 */
__be16 tp_src;
__be16 tp_dst;
-} __packed __aligned(4); /* Minimize padding. */
+};
/* Indicates whether the tunnel info structure represents receive
* or transmit tunnel parameters.
#endif
struct ip_tunnel_encap {
- __u16 type;
- __u16 flags;
+ u16 type;
+ u16 flags;
__be16 sport;
__be16 dport;
};
* arrived */
/* These four fields used only by GRE */
- __u32 i_seqno; /* The last seen seqno */
- __u32 o_seqno; /* The last output seqno */
+ u32 i_seqno; /* The last seen seqno */
+ u32 o_seqno; /* The last output seqno */
int tun_hlen; /* Precalculated header length */
int mlink;
const void *opts, u8 opts_len)
{
tun_info->key.tun_id = tun_id;
- tun_info->key.ipv4_src = saddr;
- tun_info->key.ipv4_dst = daddr;
- tun_info->key.ipv4_tos = tos;
- tun_info->key.ipv4_ttl = ttl;
+ tun_info->key.u.ipv4.src = saddr;
+ tun_info->key.u.ipv4.dst = daddr;
+ memset((unsigned char *)&tun_info->key + IP_TUNNEL_KEY_IPV4_PAD,
+ 0, IP_TUNNEL_KEY_IPV4_PAD_LEN);
+ tun_info->key.tos = tos;
+ tun_info->key.ttl = ttl;
tun_info->key.tun_flags = tun_flags;
/* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
- __be32 src, __be32 dst, __u8 proto,
- __u8 tos, __u8 ttl, __be16 df, bool xnet);
+ __be32 src, __be32 dst, u8 proto,
+ u8 tos, u8 ttl, __be16 df, bool xnet);
struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
int gso_type_mask);
#define LWTUNNEL_HASH_SIZE (1 << LWTUNNEL_HASH_BITS)
/* lw tunnel state flags */
-#define LWTUNNEL_STATE_OUTPUT_REDIRECT 0x1
+#define LWTUNNEL_STATE_OUTPUT_REDIRECT BIT(0)
+#define LWTUNNEL_STATE_INPUT_REDIRECT BIT(1)
struct lwtunnel_state {
__u16 type;
__u16 flags;
atomic_t refcnt;
+ int (*orig_output)(struct sock *sk, struct sk_buff *skb);
+ int (*orig_input)(struct sk_buff *);
int len;
__u8 data[0];
};
int (*build_state)(struct net_device *dev, struct nlattr *encap,
struct lwtunnel_state **ts);
int (*output)(struct sock *sk, struct sk_buff *skb);
+ int (*input)(struct sk_buff *skb);
int (*fill_encap)(struct sk_buff *skb,
struct lwtunnel_state *lwtstate);
int (*get_encap_size)(struct lwtunnel_state *lwtstate);
};
#ifdef CONFIG_LWTUNNEL
+static inline void lwtstate_free(struct lwtunnel_state *lws)
+{
+ kfree(lws);
+}
+
static inline struct lwtunnel_state *
lwtstate_get(struct lwtunnel_state *lws)
{
return;
if (atomic_dec_and_test(&lws->refcnt))
- kfree(lws);
+ lwtstate_free(lws);
}
static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
return false;
}
+static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
+{
+ if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_INPUT_REDIRECT))
+ return true;
+
+ return false;
+}
int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
unsigned int num);
int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len);
int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b);
int lwtunnel_output(struct sock *sk, struct sk_buff *skb);
-int lwtunnel_output6(struct sock *sk, struct sk_buff *skb);
+int lwtunnel_input(struct sk_buff *skb);
#else
+static inline void lwtstate_free(struct lwtunnel_state *lws)
+{
+}
+
static inline struct lwtunnel_state *
lwtstate_get(struct lwtunnel_state *lws)
{
return false;
}
+static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
+{
+ return false;
+}
+
static inline int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
unsigned int num)
{
return -EOPNOTSUPP;
}
-static inline int lwtunnel_output6(struct sock *sk, struct sk_buff *skb)
+static inline int lwtunnel_input(struct sk_buff *skb)
{
return -EOPNOTSUPP;
}
* @RX_FLAG_IV_STRIPPED: The IV/ICV are stripped from this frame.
* If this flag is set, the stack cannot do any replay detection
* hence the driver or hardware will have to do that.
+ * @RX_FLAG_PN_VALIDATED: Currently only valid for CCMP/GCMP frames, this
+ * flag indicates that the PN was verified for replay protection.
+ * Note that this flag is also currently only supported when a frame
+ * is also decrypted (ie. @RX_FLAG_DECRYPTED must be set)
* @RX_FLAG_FAILED_FCS_CRC: Set this flag if the FCS check failed on
* the frame.
* @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
* @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference
* number (@ampdu_reference) must be populated and be a distinct number for
* each A-MPDU
- * @RX_FLAG_AMPDU_REPORT_ZEROLEN: driver reports 0-length subframes
- * @RX_FLAG_AMPDU_IS_ZEROLEN: This is a zero-length subframe, for
- * monitoring purposes only
* @RX_FLAG_AMPDU_LAST_KNOWN: last subframe is known, should be set on all
* subframes of a single A-MPDU
* @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU
RX_FLAG_NO_SIGNAL_VAL = BIT(12),
RX_FLAG_HT_GF = BIT(13),
RX_FLAG_AMPDU_DETAILS = BIT(14),
- RX_FLAG_AMPDU_REPORT_ZEROLEN = BIT(15),
- RX_FLAG_AMPDU_IS_ZEROLEN = BIT(16),
+ RX_FLAG_PN_VALIDATED = BIT(15),
+ /* bit 16 free */
RX_FLAG_AMPDU_LAST_KNOWN = BIT(17),
RX_FLAG_AMPDU_IS_LAST = BIT(18),
RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(19),
* - Temporal Authenticator Rx MIC Key (64 bits)
* @icv_len: The ICV length for this key type
* @iv_len: The IV length for this key type
+ * @drv_priv: pointer for driver use
*/
struct ieee80211_key_conf {
+ void *drv_priv;
atomic64_t tx_pn;
u32 cipher;
u8 icv_len;
* @tdls: indicates whether the STA is a TDLS peer
* @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
* valid if the STA is a TDLS peer in the first place.
- * @mfp: indicates whether the STA uses management frame protection or not.
* @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
*/
struct ieee80211_sta {
struct ieee80211_sta_rates __rcu *rates;
bool tdls;
bool tdls_initiator;
- bool mfp;
struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
* @IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands
* in one command, mac80211 doesn't have to run separate scans per band.
*
+ * @IEEE80211_HW_TDLS_WIDER_BW: The device/driver supports wider bandwidth
+ * than then BSS bandwidth for a TDLS link on the base channel.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
IEEE80211_HW_CHANCTX_STA_CSA,
IEEE80211_HW_SUPPORTS_CLONED_SKBS,
IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS,
+ IEEE80211_HW_TDLS_WIDER_BW,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
void ieee80211_restart_hw(struct ieee80211_hw *hw);
/**
- * ieee80211_napi_add - initialize mac80211 NAPI context
- * @hw: the hardware to initialize the NAPI context on
- * @napi: the NAPI context to initialize
- * @napi_dev: dummy NAPI netdevice, here to not waste the space if the
- * driver doesn't use NAPI
- * @poll: poll function
- * @weight: default weight
+ * ieee80211_rx_napi - receive frame from NAPI context
+ *
+ * Use this function to hand received frames to mac80211. The receive
+ * buffer in @skb must start with an IEEE 802.11 header. In case of a
+ * paged @skb is used, the driver is recommended to put the ieee80211
+ * header of the frame on the linear part of the @skb to avoid memory
+ * allocation and/or memcpy by the stack.
+ *
+ * This function may not be called in IRQ context. Calls to this function
+ * for a single hardware must be synchronized against each other. Calls to
+ * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be
+ * mixed for a single hardware. Must not run concurrently with
+ * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ *
+ * This function must be called with BHs disabled.
*
- * See also netif_napi_add().
+ * @hw: the hardware this frame came in on
+ * @skb: the buffer to receive, owned by mac80211 after this call
+ * @napi: the NAPI context
*/
-void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi,
- struct net_device *napi_dev,
- int (*poll)(struct napi_struct *, int),
- int weight);
+void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct napi_struct *napi);
/**
* ieee80211_rx - receive frame
* @hw: the hardware this frame came in on
* @skb: the buffer to receive, owned by mac80211 after this call
*/
-void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb);
+static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ ieee80211_rx_napi(hw, skb, NULL);
+}
/**
* ieee80211_rx_irqsafe - receive frame
void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf,
struct sk_buff *skb, u8 *p2k);
-/**
- * ieee80211_aes_cmac_calculate_k1_k2 - calculate the AES-CMAC sub keys
- *
- * This function computes the two AES-CMAC sub-keys, based on the
- * previously installed master key.
- *
- * @keyconf: the parameter passed with the set key
- * @k1: a buffer to be filled with the 1st sub-key
- * @k2: a buffer to be filled with the 2nd sub-key
- */
-void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
- u8 *k1, u8 *k2);
-
/**
* ieee80211_get_key_tx_seq - get key TX sequence counter
*
void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
const struct in6_addr *solicit,
- const struct in6_addr *daddr, const struct in6_addr *saddr);
+ const struct in6_addr *daddr, const struct in6_addr *saddr,
+ struct sk_buff *oskb);
void ndisc_send_rs(struct net_device *dev,
const struct in6_addr *saddr, const struct in6_addr *daddr);
#endif
struct sock *nfnl;
struct sock *nfnl_stash;
+#if IS_ENABLED(CONFIG_NETFILTER_NETLINK_ACCT)
+ struct list_head nfnl_acct_list;
+#endif
#endif
#ifdef CONFIG_WEXT_CORE
struct sk_buff_head wext_nlevents;
--- /dev/null
+#ifndef _NF_DUP_IPV4_H_
+#define _NF_DUP_IPV4_H_
+
+void nf_dup_ipv4(struct sk_buff *skb, unsigned int hooknum,
+ const struct in_addr *gw, int oif);
+
+#endif /* _NF_DUP_IPV4_H_ */
--- /dev/null
+#ifndef _NF_DUP_IPV6_H_
+#define _NF_DUP_IPV6_H_
+
+void nf_dup_ipv6(struct sk_buff *skb, unsigned int hooknum,
+ const struct in6_addr *gw, int oif);
+
+#endif /* _NF_DUP_IPV6_H_ */
void nf_ct_iterate_cleanup(struct net *net,
int (*iter)(struct nf_conn *i, void *data),
void *data, u32 portid, int report);
+
+struct nf_conntrack_zone;
+
void nf_conntrack_free(struct nf_conn *ct);
-struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
+struct nf_conn *nf_conntrack_alloc(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *orig,
const struct nf_conntrack_tuple *repl,
gfp_t gfp);
extern unsigned int nf_conntrack_hash_rnd;
void init_nf_conntrack_hash_rnd(void);
-struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags);
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
+ const struct nf_conntrack_zone *zone,
+ gfp_t flags);
#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
/* Find a connection corresponding to a tuple. */
struct nf_conntrack_tuple_hash *
-nf_conntrack_find_get(struct net *net, u16 zone,
+nf_conntrack_find_get(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple);
int __nf_conntrack_confirm(struct sk_buff *skb);
#ifndef _NF_CONNTRACK_EXPECT_H
#define _NF_CONNTRACK_EXPECT_H
+
#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_zones.h>
extern unsigned int nf_ct_expect_hsize;
extern unsigned int nf_ct_expect_max;
void nf_conntrack_expect_fini(void);
struct nf_conntrack_expect *
-__nf_ct_expect_find(struct net *net, u16 zone,
+__nf_ct_expect_find(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple);
struct nf_conntrack_expect *
-nf_ct_expect_find_get(struct net *net, u16 zone,
+nf_ct_expect_find_get(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple);
struct nf_conntrack_expect *
-nf_ct_find_expectation(struct net *net, u16 zone,
+nf_ct_find_expectation(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple);
void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
#ifndef _NF_CONNTRACK_ZONES_H
#define _NF_CONNTRACK_ZONES_H
-#define NF_CT_DEFAULT_ZONE 0
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-#include <net/netfilter/nf_conntrack_extend.h>
+#define NF_CT_DEFAULT_ZONE_ID 0
+
+#define NF_CT_ZONE_DIR_ORIG (1 << IP_CT_DIR_ORIGINAL)
+#define NF_CT_ZONE_DIR_REPL (1 << IP_CT_DIR_REPLY)
+
+#define NF_CT_DEFAULT_ZONE_DIR (NF_CT_ZONE_DIR_ORIG | NF_CT_ZONE_DIR_REPL)
+
+#define NF_CT_FLAG_MARK 1
struct nf_conntrack_zone {
u16 id;
+ u8 flags;
+ u8 dir;
};
-static inline u16 nf_ct_zone(const struct nf_conn *ct)
+extern const struct nf_conntrack_zone nf_ct_zone_dflt;
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack_extend.h>
+
+static inline const struct nf_conntrack_zone *
+nf_ct_zone(const struct nf_conn *ct)
{
+ const struct nf_conntrack_zone *nf_ct_zone = NULL;
+
#ifdef CONFIG_NF_CONNTRACK_ZONES
- struct nf_conntrack_zone *nf_ct_zone;
nf_ct_zone = nf_ct_ext_find(ct, NF_CT_EXT_ZONE);
- if (nf_ct_zone)
- return nf_ct_zone->id;
#endif
- return NF_CT_DEFAULT_ZONE;
+ return nf_ct_zone ? nf_ct_zone : &nf_ct_zone_dflt;
+}
+
+static inline const struct nf_conntrack_zone *
+nf_ct_zone_init(struct nf_conntrack_zone *zone, u16 id, u8 dir, u8 flags)
+{
+ zone->id = id;
+ zone->flags = flags;
+ zone->dir = dir;
+
+ return zone;
+}
+
+static inline const struct nf_conntrack_zone *
+nf_ct_zone_tmpl(const struct nf_conn *tmpl, const struct sk_buff *skb,
+ struct nf_conntrack_zone *tmp)
+{
+ const struct nf_conntrack_zone *zone;
+
+ if (!tmpl)
+ return &nf_ct_zone_dflt;
+
+ zone = nf_ct_zone(tmpl);
+ if (zone->flags & NF_CT_FLAG_MARK)
+ zone = nf_ct_zone_init(tmp, skb->mark, zone->dir, 0);
+
+ return zone;
+}
+
+static inline int nf_ct_zone_add(struct nf_conn *ct, gfp_t flags,
+ const struct nf_conntrack_zone *info)
+{
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+ struct nf_conntrack_zone *nf_ct_zone;
+
+ nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, flags);
+ if (!nf_ct_zone)
+ return -ENOMEM;
+
+ nf_ct_zone_init(nf_ct_zone, info->id, info->dir,
+ info->flags);
+#endif
+ return 0;
}
-#endif /* CONFIG_NF_CONNTRACK || CONFIG_NF_CONNTRACK_MODULE */
+static inline bool nf_ct_zone_matches_dir(const struct nf_conntrack_zone *zone,
+ enum ip_conntrack_dir dir)
+{
+ return zone->dir & (1 << dir);
+}
+
+static inline u16 nf_ct_zone_id(const struct nf_conntrack_zone *zone,
+ enum ip_conntrack_dir dir)
+{
+ return nf_ct_zone_matches_dir(zone, dir) ?
+ zone->id : NF_CT_DEFAULT_ZONE_ID;
+}
+
+static inline bool nf_ct_zone_equal(const struct nf_conn *a,
+ const struct nf_conntrack_zone *b,
+ enum ip_conntrack_dir dir)
+{
+ return nf_ct_zone_id(nf_ct_zone(a), dir) ==
+ nf_ct_zone_id(b, dir);
+}
+
+static inline bool nf_ct_zone_equal_any(const struct nf_conn *a,
+ const struct nf_conntrack_zone *b)
+{
+ return nf_ct_zone(a)->id == b->id;
+}
+#endif /* IS_ENABLED(CONFIG_NF_CONNTRACK) */
#endif /* _NF_CONNTRACK_ZONES_H */
--- /dev/null
+#ifndef _NFT_DUP_H_
+#define _NFT_DUP_H_
+
+struct nft_dup_inet {
+ enum nft_registers sreg_addr:8;
+ enum nft_registers sreg_dev:8;
+};
+
+#endif /* _NFT_DUP_H_ */
NL802154_CMD_SET_LBT_MODE,
+ NL802154_CMD_SET_ACKREQ_DEFAULT,
+
/* add new commands above here */
/* used to define NL802154_CMD_MAX below */
NL802154_ATTR_SUPPORTED_COMMANDS,
+ NL802154_ATTR_ACKREQ_DEFAULT,
+
/* add attributes here, update the policy in nl802154.c */
__NL802154_ATTR_AFTER_LAST,
struct list_head rt_uncached;
struct uncached_list *rt_uncached_list;
- struct lwtunnel_state *rt_lwtstate;
};
static inline bool rt_is_input_route(const struct rtable *rt)
struct slave_queue {
struct list_head all_slaves;
- int num_slaves;
};
struct net_vrf {
if (!dev)
return 0;
- if (netif_is_vrf(dev))
+ if (netif_is_vrf(dev)) {
ifindex = dev->ifindex;
- else {
+ } else {
vrf_ptr = rcu_dereference(dev->vrf_ptr);
if (vrf_ptr)
ifindex = vrf_ptr->ifindex;
return ifindex;
}
+static inline int vrf_master_ifindex(const struct net_device *dev)
+{
+ int ifindex;
+
+ rcu_read_lock();
+ ifindex = vrf_master_ifindex_rcu(dev);
+ rcu_read_unlock();
+
+ return ifindex;
+}
+
/* called with rcu_read_lock */
static inline int vrf_dev_table_rcu(const struct net_device *dev)
{
return tb_id;
}
+static inline int vrf_dev_table_ifindex(struct net *net, int ifindex)
+{
+ struct net_device *dev;
+ int tb_id = 0;
+
+ if (!ifindex)
+ return 0;
+
+ rcu_read_lock();
+
+ dev = dev_get_by_index_rcu(net, ifindex);
+ if (dev)
+ tb_id = vrf_dev_table_rcu(dev);
+
+ rcu_read_unlock();
+
+ return tb_id;
+}
+
/* called with rtnl */
static inline int vrf_dev_table_rtnl(const struct net_device *dev)
{
return 0;
}
+static inline int vrf_master_ifindex(const struct net_device *dev)
+{
+ return 0;
+}
+
static inline int vrf_dev_table_rcu(const struct net_device *dev)
{
return 0;
return 0;
}
+static inline int vrf_dev_table_ifindex(struct net *net, int ifindex)
+{
+ return 0;
+}
+
static inline int vrf_dev_table_rtnl(const struct net_device *dev)
{
return 0;
struct timer_list age_timer;
spinlock_t hash_lock;
unsigned int addrcnt;
+ struct gro_cells gro_cells;
struct vxlan_config cfg;
}
#endif
#endif
+
+static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs)
+{
+ return vs->sock->sk->sk_family;
+}
unsigned short family;
struct dst_ops *dst_ops;
void (*garbage_collect)(struct net *net);
- struct dst_entry *(*dst_lookup)(struct net *net, int tos,
+ struct dst_entry *(*dst_lookup)(struct net *net,
+ int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr);
- int (*get_saddr)(struct net *net, xfrm_address_t *saddr, xfrm_address_t *daddr);
+ int (*get_saddr)(struct net *net, int oif,
+ xfrm_address_t *saddr,
+ xfrm_address_t *daddr);
void (*decode_session)(struct sk_buff *skb,
struct flowi *fl,
int reverse);
u64 * info_out);
extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
-extern void scsi_set_sense_information(u8 *buf, u64 info);
extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
int io_ops_count;
};
+#ifdef CONFIG_SND_SOC_TOPOLOGY
+
/* gets a pointer to data from the firmware block header */
static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr)
{
const struct snd_soc_tplg_widget_events *events, int num_events,
u16 event_type);
+#else
+
+static inline int snd_soc_tplg_component_remove(struct snd_soc_component *comp,
+ u32 index)
+{
+ return 0;
+}
+
+#endif
+
#endif
#define PACKET_TX_HAS_OFF 19
#define PACKET_QDISC_BYPASS 20
#define PACKET_ROLLOVER_STATS 21
+#define PACKET_FANOUT_DATA 22
#define PACKET_FANOUT_HASH 0
#define PACKET_FANOUT_LB 1
#define PACKET_FANOUT_ROLLOVER 3
#define PACKET_FANOUT_RND 4
#define PACKET_FANOUT_QM 5
+#define PACKET_FANOUT_CBPF 6
+#define PACKET_FANOUT_EBPF 7
#define PACKET_FANOUT_FLAG_ROLLOVER 0x1000
#define PACKET_FANOUT_FLAG_DEFRAG 0x8000
--- /dev/null
+/* ila.h - ILA Interface */
+
+#ifndef _UAPI_LINUX_ILA_H
+#define _UAPI_LINUX_ILA_H
+
+enum {
+ ILA_ATTR_UNSPEC,
+ ILA_ATTR_LOCATOR, /* u64 */
+
+ __ILA_ATTR_MAX,
+};
+
+#define ILA_ATTR_MAX (__ILA_ATTR_MAX - 1)
+
+#endif /* _UAPI_LINUX_ILA_H */
LWTUNNEL_ENCAP_NONE,
LWTUNNEL_ENCAP_MPLS,
LWTUNNEL_ENCAP_IP,
+ LWTUNNEL_ENCAP_ILA,
+ LWTUNNEL_ENCAP_IP6,
__LWTUNNEL_ENCAP_MAX,
};
#define LWTUNNEL_ENCAP_MAX (__LWTUNNEL_ENCAP_MAX - 1)
+enum lwtunnel_ip_t {
+ LWTUNNEL_IP_UNSPEC,
+ LWTUNNEL_IP_ID,
+ LWTUNNEL_IP_DST,
+ LWTUNNEL_IP_SRC,
+ LWTUNNEL_IP_TTL,
+ LWTUNNEL_IP_TOS,
+ LWTUNNEL_IP_SPORT,
+ LWTUNNEL_IP_DPORT,
+ LWTUNNEL_IP_FLAGS,
+ __LWTUNNEL_IP_MAX,
+};
+
+#define LWTUNNEL_IP_MAX (__LWTUNNEL_IP_MAX - 1)
+
+enum lwtunnel_ip6_t {
+ LWTUNNEL_IP6_UNSPEC,
+ LWTUNNEL_IP6_ID,
+ LWTUNNEL_IP6_DST,
+ LWTUNNEL_IP6_SRC,
+ LWTUNNEL_IP6_HOPLIMIT,
+ LWTUNNEL_IP6_TC,
+ LWTUNNEL_IP6_SPORT,
+ LWTUNNEL_IP6_DPORT,
+ LWTUNNEL_IP6_FLAGS,
+ __LWTUNNEL_IP6_MAX,
+};
+
+#define LWTUNNEL_IP6_MAX (__LWTUNNEL_IP6_MAX - 1)
#endif /* _UAPI_LWTUNNEL_H_ */
};
#define NFTA_CT_MAX (__NFTA_CT_MAX - 1)
+enum nft_limit_type {
+ NFT_LIMIT_PKTS,
+ NFT_LIMIT_PKT_BYTES
+};
+
/**
* enum nft_limit_attributes - nf_tables limit expression netlink attributes
*
* @NFTA_LIMIT_RATE: refill rate (NLA_U64)
* @NFTA_LIMIT_UNIT: refill unit (NLA_U64)
+ * @NFTA_LIMIT_BURST: burst (NLA_U32)
+ * @NFTA_LIMIT_TYPE: type of limit (NLA_U32: enum nft_limit_type)
*/
enum nft_limit_attributes {
NFTA_LIMIT_UNSPEC,
NFTA_LIMIT_RATE,
NFTA_LIMIT_UNIT,
+ NFTA_LIMIT_BURST,
+ NFTA_LIMIT_TYPE,
__NFTA_LIMIT_MAX
};
#define NFTA_LIMIT_MAX (__NFTA_LIMIT_MAX - 1)
};
#define NFTA_REDIR_MAX (__NFTA_REDIR_MAX - 1)
+/**
+ * enum nft_dup_attributes - nf_tables dup expression netlink attributes
+ *
+ * @NFTA_DUP_SREG_ADDR: source register of address (NLA_U32: nft_registers)
+ * @NFTA_DUP_SREG_DEV: source register of output interface (NLA_U32: nft_register)
+ */
+enum nft_dup_attributes {
+ NFTA_DUP_UNSPEC,
+ NFTA_DUP_SREG_ADDR,
+ NFTA_DUP_SREG_DEV,
+ __NFTA_DUP_MAX
+};
+#define NFTA_DUP_MAX (__NFTA_DUP_MAX - 1)
+
/**
* enum nft_gen_attributes - nf_tables ruleset generation attributes
*
CTA_TUPLE_UNSPEC,
CTA_TUPLE_IP,
CTA_TUPLE_PROTO,
+ CTA_TUPLE_ZONE,
__CTA_TUPLE_MAX
};
#define CTA_TUPLE_MAX (__CTA_TUPLE_MAX - 1)
enum {
XT_CT_NOTRACK = 1 << 0,
XT_CT_NOTRACK_ALIAS = 1 << 1,
- XT_CT_MASK = XT_CT_NOTRACK | XT_CT_NOTRACK_ALIAS,
+ XT_CT_ZONE_DIR_ORIG = 1 << 2,
+ XT_CT_ZONE_DIR_REPL = 1 << 3,
+ XT_CT_ZONE_MARK = 1 << 4,
+
+ XT_CT_MASK = XT_CT_NOTRACK | XT_CT_NOTRACK_ALIAS |
+ XT_CT_ZONE_DIR_ORIG | XT_CT_ZONE_DIR_REPL |
+ XT_CT_ZONE_MARK,
};
struct xt_ct_target_info {
/* Routing message attributes */
-enum ip_tunnel_t {
- IP_TUN_UNSPEC,
- IP_TUN_ID,
- IP_TUN_DST,
- IP_TUN_SRC,
- IP_TUN_TTL,
- IP_TUN_TOS,
- IP_TUN_SPORT,
- IP_TUN_DPORT,
- IP_TUN_FLAGS,
- __IP_TUN_MAX,
-};
-
-#define IP_TUN_MAX (__IP_TUN_MAX - 1)
-
enum rtattr_type_t {
RTA_UNSPEC,
RTA_DST,
#include <linux/types.h>
#include <sound/asound.h>
+#ifndef __KERNEL__
+#error This API is an early revision and not enabled in the current
+#error kernel release, it will be enabled in a future kernel version
+#error with incompatible changes to what is here.
+#endif
+
/*
* Maximum number of channels topology kcontrol can represent.
*/
ipc_rcu_free(head);
}
+/*
+ * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
+ * are only control barriers.
+ * The code must pair with spin_unlock(&sem->lock) or
+ * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
+ *
+ * smp_rmb() is sufficient, as writes cannot pass the control barrier.
+ */
+#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb()
+
/*
* Wait until all currently ongoing simple ops have completed.
* Caller must own sem_perm.lock.
sem = sma->sem_base + i;
spin_unlock_wait(&sem->lock);
}
+ ipc_smp_acquire__after_spin_is_unlocked();
}
/*
/* Then check that the global lock is free */
if (!spin_is_locked(&sma->sem_perm.lock)) {
/*
- * The ipc object lock check must be visible on all
- * cores before rechecking the complex count. Otherwise
- * we can race with another thread that does:
+ * We need a memory barrier with acquire semantics,
+ * otherwise we can race with another thread that does:
* complex_count++;
* spin_unlock(sem_perm.lock);
*/
- smp_rmb();
+ ipc_smp_acquire__after_spin_is_unlocked();
/*
* Now repeat the test of complex_count:
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
struct sem_undo, list_proc);
- if (&un->list_proc == &ulp->list_proc)
- semid = -1;
- else
- semid = un->semid;
+ if (&un->list_proc == &ulp->list_proc) {
+ /*
+ * We must wait for freeary() before freeing this ulp,
+ * in case we raced with last sem_undo. There is a small
+ * possibility where we exit while freeary() didn't
+ * finish unlocking sem_undo_list.
+ */
+ spin_unlock_wait(&ulp->lock);
+ rcu_read_unlock();
+ break;
+ }
+ spin_lock(&ulp->lock);
+ semid = un->semid;
+ spin_unlock(&ulp->lock);
+ /* exit_sem raced with IPC_RMID, nothing to do */
if (semid == -1) {
rcu_read_unlock();
- break;
+ continue;
}
- sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
/* exit_sem raced with IPC_RMID, nothing to do */
if (IS_ERR(sma)) {
rcu_read_unlock();
ipc_assert_locked_object(&sma->sem_perm);
list_del(&un->list_id);
- spin_lock(&ulp->lock);
+ /* we are the last process using this ulp, acquiring ulp->lock
+ * isn't required. Besides that, we are also protected against
+ * IPC_RMID as we hold sma->sem_perm lock now
+ */
list_del_rcu(&un->list_proc);
- spin_unlock(&ulp->lock);
/* perform adjustments registered in un */
for (i = 0; i < sma->sem_nsems; i++) {
spin_unlock_irq(&callback_lock);
/* use trialcs->mems_allowed as a temp variable */
- update_nodemasks_hier(cs, &cs->mems_allowed);
+ update_nodemasks_hier(cs, &trialcs->mems_allowed);
done:
return retval;
}
perf_pmu_disable(event->pmu);
- event->tstamp_running += tstamp - event->tstamp_stopped;
-
perf_set_shadow_time(event, ctx, tstamp);
perf_log_itrace_start(event);
goto out;
}
+ event->tstamp_running += tstamp - event->tstamp_stopped;
+
if (!is_software_event(event))
cpuctx->active_oncpu++;
if (!ctx->nr_active++)
perf_event_for_each_child(sibling, func);
}
-static int perf_event_period(struct perf_event *event, u64 __user *arg)
-{
- struct perf_event_context *ctx = event->ctx;
- int ret = 0, active;
+struct period_event {
+ struct perf_event *event;
u64 value;
+};
- if (!is_sampling_event(event))
- return -EINVAL;
-
- if (copy_from_user(&value, arg, sizeof(value)))
- return -EFAULT;
-
- if (!value)
- return -EINVAL;
+static int __perf_event_period(void *info)
+{
+ struct period_event *pe = info;
+ struct perf_event *event = pe->event;
+ struct perf_event_context *ctx = event->ctx;
+ u64 value = pe->value;
+ bool active;
- raw_spin_lock_irq(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
if (event->attr.freq) {
- if (value > sysctl_perf_event_sample_rate) {
- ret = -EINVAL;
- goto unlock;
- }
-
event->attr.sample_freq = value;
} else {
event->attr.sample_period = value;
event->pmu->start(event, PERF_EF_RELOAD);
perf_pmu_enable(ctx->pmu);
}
+ raw_spin_unlock(&ctx->lock);
-unlock:
+ return 0;
+}
+
+static int perf_event_period(struct perf_event *event, u64 __user *arg)
+{
+ struct period_event pe = { .event = event, };
+ struct perf_event_context *ctx = event->ctx;
+ struct task_struct *task;
+ u64 value;
+
+ if (!is_sampling_event(event))
+ return -EINVAL;
+
+ if (copy_from_user(&value, arg, sizeof(value)))
+ return -EFAULT;
+
+ if (!value)
+ return -EINVAL;
+
+ if (event->attr.freq && value > sysctl_perf_event_sample_rate)
+ return -EINVAL;
+
+ task = ctx->task;
+ pe.value = value;
+
+ if (!task) {
+ cpu_function_call(event->cpu, __perf_event_period, &pe);
+ return 0;
+ }
+
+retry:
+ if (!task_function_call(task, __perf_event_period, &pe))
+ return 0;
+
+ raw_spin_lock_irq(&ctx->lock);
+ if (ctx->is_active) {
+ raw_spin_unlock_irq(&ctx->lock);
+ task = ctx->task;
+ goto retry;
+ }
+
+ __perf_event_period(&pe);
raw_spin_unlock_irq(&ctx->lock);
- return ret;
+ return 0;
}
static const struct file_operations perf_fops;
* to user-space before waking everybody up.
*/
+static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
+{
+ /* only the parent has fasync state */
+ if (event->parent)
+ event = event->parent;
+ return &event->fasync;
+}
+
void perf_event_wakeup(struct perf_event *event)
{
ring_buffer_wakeup(event);
if (event->pending_kill) {
- kill_fasync(&event->fasync, SIGIO, event->pending_kill);
+ kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
event->pending_kill = 0;
}
}
else
perf_event_output(event, data, regs);
- if (event->fasync && event->pending_kill) {
+ if (*perf_event_fasync(event) && event->pending_kill) {
event->pending_wakeup = 1;
irq_work_queue(&event->pending);
}
rb->aux_priv = NULL;
}
- for (pg = 0; pg < rb->aux_nr_pages; pg++)
- rb_free_aux_page(rb, pg);
+ if (rb->aux_nr_pages) {
+ for (pg = 0; pg < rb->aux_nr_pages; pg++)
+ rb_free_aux_page(rb, pg);
- kfree(rb->aux_pages);
- rb->aux_nr_pages = 0;
+ kfree(rb->aux_pages);
+ rb->aux_nr_pages = 0;
+ }
}
void rb_free_aux(struct ring_buffer *rb)
#include <linux/hash.h>
#include <linux/bootmem.h>
+#include <linux/debug_locks.h>
/*
* Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
{
struct __qspinlock *l = (void *)lock;
struct pv_node *node;
+ u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
/*
* We must not unlock if SLOW, because in that case we must first
* unhash. Otherwise it would be possible to have multiple @lock
* entries, which would be BAD.
*/
- if (likely(cmpxchg(&l->locked, _Q_LOCKED_VAL, 0) == _Q_LOCKED_VAL))
+ if (likely(lockval == _Q_LOCKED_VAL))
return;
+ if (unlikely(lockval != _Q_SLOW_VAL)) {
+ if (debug_locks_silent)
+ return;
+ WARN(1, "pvqspinlock: lock %p has corrupted value 0x%x!\n", lock, atomic_read(&lock->val));
+ return;
+ }
+
/*
* Since the above failed to release, this must be the SLOW path.
* Therefore start by looking up the blocked node and unhashing it.
config LRU_CACHE
tristate
-config AVERAGE
- bool "Averaging functions"
- help
- This option is provided for the case where no in-kernel-tree
- modules require averaging functions, but a module built outside
- the kernel tree does. Such modules that use library averaging
- functions require Y here.
-
- If unsure, say N.
-
config CLZ_TAB
bool
+++ /dev/null
-/*
- * lib/average.c
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
- */
-
-#include <linux/export.h>
-#include <linux/average.h>
-#include <linux/kernel.h>
-#include <linux/bug.h>
-#include <linux/log2.h>
-
-/**
- * DOC: Exponentially Weighted Moving Average (EWMA)
- *
- * These are generic functions for calculating Exponentially Weighted Moving
- * Averages (EWMA). We keep a structure with the EWMA parameters and a scaled
- * up internal representation of the average value to prevent rounding errors.
- * The factor for scaling up and the exponential weight (or decay rate) have to
- * be specified thru the init fuction. The structure should not be accessed
- * directly but only thru the helper functions.
- */
-
-/**
- * ewma_init() - Initialize EWMA parameters
- * @avg: Average structure
- * @factor: Factor to use for the scaled up internal value. The maximum value
- * of averages can be ULONG_MAX/(factor*weight). For performance reasons
- * factor has to be a power of 2.
- * @weight: Exponential weight, or decay rate. This defines how fast the
- * influence of older values decreases. For performance reasons weight has
- * to be a power of 2.
- *
- * Initialize the EWMA parameters for a given struct ewma @avg.
- */
-void ewma_init(struct ewma *avg, unsigned long factor, unsigned long weight)
-{
- WARN_ON(!is_power_of_2(weight) || !is_power_of_2(factor));
-
- avg->weight = ilog2(weight);
- avg->factor = ilog2(factor);
- avg->internal = 0;
-}
-EXPORT_SYMBOL(ewma_init);
-
-/**
- * ewma_add() - Exponentially weighted moving average (EWMA)
- * @avg: Average structure
- * @val: Current value
- *
- * Add a sample to the average.
- */
-struct ewma *ewma_add(struct ewma *avg, unsigned long val)
-{
- unsigned long internal = ACCESS_ONCE(avg->internal);
-
- ACCESS_ONCE(avg->internal) = internal ?
- (((internal << avg->weight) - internal) +
- (val << avg->factor)) >> avg->weight :
- (val << avg->factor);
- return avg;
-}
-EXPORT_SYMBOL(ewma_add);
#include <linux/init.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
+#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/rcupdate.h>
#include <linux/rhashtable.h>
+#include <linux/semaphore.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/vmalloc.h>
#define MAX_ENTRIES 1000000
#define TEST_INSERT_FAIL INT_MAX
module_param(size, int, 0);
MODULE_PARM_DESC(size, "Initial size hint of table (default: 8)");
+static int tcount = 10;
+module_param(tcount, int, 0);
+MODULE_PARM_DESC(tcount, "Number of threads to spawn (default: 10)");
+
struct test_obj {
int value;
struct rhash_head node;
};
+struct thread_data {
+ int id;
+ struct task_struct *task;
+ struct test_obj *objs;
+};
+
static struct test_obj array[MAX_ENTRIES];
static struct rhashtable_params test_rht_params = {
.nulls_base = (3U << RHT_BASE_SHIFT),
};
+static struct semaphore prestart_sem;
+static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0);
+
static int __init test_rht_lookup(struct rhashtable *ht)
{
unsigned int i;
static struct rhashtable ht;
+static int thread_lookup_test(struct thread_data *tdata)
+{
+ int i, err = 0;
+
+ for (i = 0; i < entries; i++) {
+ struct test_obj *obj;
+ int key = (tdata->id << 16) | i;
+
+ obj = rhashtable_lookup_fast(&ht, &key, test_rht_params);
+ if (obj && (tdata->objs[i].value == TEST_INSERT_FAIL)) {
+ pr_err(" found unexpected object %d\n", key);
+ err++;
+ } else if (!obj && (tdata->objs[i].value != TEST_INSERT_FAIL)) {
+ pr_err(" object %d not found!\n", key);
+ err++;
+ } else if (obj && (obj->value != key)) {
+ pr_err(" wrong object returned (got %d, expected %d)\n",
+ obj->value, key);
+ err++;
+ }
+ }
+ return err;
+}
+
+static int threadfunc(void *data)
+{
+ int i, step, err = 0, insert_fails = 0;
+ struct thread_data *tdata = data;
+
+ up(&prestart_sem);
+ if (down_interruptible(&startup_sem))
+ pr_err(" thread[%d]: down_interruptible failed\n", tdata->id);
+
+ for (i = 0; i < entries; i++) {
+ tdata->objs[i].value = (tdata->id << 16) | i;
+ err = rhashtable_insert_fast(&ht, &tdata->objs[i].node,
+ test_rht_params);
+ if (err == -ENOMEM || err == -EBUSY) {
+ tdata->objs[i].value = TEST_INSERT_FAIL;
+ insert_fails++;
+ } else if (err) {
+ pr_err(" thread[%d]: rhashtable_insert_fast failed\n",
+ tdata->id);
+ goto out;
+ }
+ }
+ if (insert_fails)
+ pr_info(" thread[%d]: %d insert failures\n",
+ tdata->id, insert_fails);
+
+ err = thread_lookup_test(tdata);
+ if (err) {
+ pr_err(" thread[%d]: rhashtable_lookup_test failed\n",
+ tdata->id);
+ goto out;
+ }
+
+ for (step = 10; step > 0; step--) {
+ for (i = 0; i < entries; i += step) {
+ if (tdata->objs[i].value == TEST_INSERT_FAIL)
+ continue;
+ err = rhashtable_remove_fast(&ht, &tdata->objs[i].node,
+ test_rht_params);
+ if (err) {
+ pr_err(" thread[%d]: rhashtable_remove_fast failed\n",
+ tdata->id);
+ goto out;
+ }
+ tdata->objs[i].value = TEST_INSERT_FAIL;
+ }
+ err = thread_lookup_test(tdata);
+ if (err) {
+ pr_err(" thread[%d]: rhashtable_lookup_test (2) failed\n",
+ tdata->id);
+ goto out;
+ }
+ }
+out:
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ }
+ return err;
+}
+
static int __init test_rht_init(void)
{
- int i, err;
+ int i, err, started_threads = 0, failed_threads = 0;
u64 total_time = 0;
+ struct thread_data *tdata;
+ struct test_obj *objs;
entries = min(entries, MAX_ENTRIES);
do_div(total_time, runs);
pr_info("Average test time: %llu\n", total_time);
+ if (!tcount)
+ return 0;
+
+ pr_info("Testing concurrent rhashtable access from %d threads\n",
+ tcount);
+ sema_init(&prestart_sem, 1 - tcount);
+ tdata = vzalloc(tcount * sizeof(struct thread_data));
+ if (!tdata)
+ return -ENOMEM;
+ objs = vzalloc(tcount * entries * sizeof(struct test_obj));
+ if (!objs) {
+ vfree(tdata);
+ return -ENOMEM;
+ }
+
+ err = rhashtable_init(&ht, &test_rht_params);
+ if (err < 0) {
+ pr_warn("Test failed: Unable to initialize hashtable: %d\n",
+ err);
+ vfree(tdata);
+ vfree(objs);
+ return -EINVAL;
+ }
+ for (i = 0; i < tcount; i++) {
+ tdata[i].id = i;
+ tdata[i].objs = objs + i * entries;
+ tdata[i].task = kthread_run(threadfunc, &tdata[i],
+ "rhashtable_thrad[%d]", i);
+ if (IS_ERR(tdata[i].task))
+ pr_err(" kthread_run failed for thread %d\n", i);
+ else
+ started_threads++;
+ }
+ if (down_interruptible(&prestart_sem))
+ pr_err(" down interruptible failed\n");
+ for (i = 0; i < tcount; i++)
+ up(&startup_sem);
+ for (i = 0; i < tcount; i++) {
+ if (IS_ERR(tdata[i].task))
+ continue;
+ if ((err = kthread_stop(tdata[i].task))) {
+ pr_warn("Test failed: thread %d returned: %d\n",
+ i, err);
+ failed_threads++;
+ }
+ }
+ pr_info("Started %d threads, %d failed\n",
+ started_threads, failed_threads);
+ rhashtable_destroy(&ht);
+ vfree(tdata);
+ vfree(objs);
return 0;
}
extern struct cma cma_areas[MAX_CMA_AREAS];
extern unsigned cma_area_count;
-static unsigned long cma_bitmap_maxno(struct cma *cma)
+static inline unsigned long cma_bitmap_maxno(struct cma *cma)
{
return cma->count >> cma->order_per_bit;
}
* This file contains shadow memory manipulation code.
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
- * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
*
* Some of code borrowed from https://github.com/xairy/linux by
* Andrey Konovalov <adech.fo@gmail.com>
* This file contains error reporting code.
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
- * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
*
* Some of code borrowed from https://github.com/xairy/linux by
* Andrey Konovalov <adech.fo@gmail.com>
}
if (!PageHuge(p) && PageTransHuge(hpage)) {
- if (unlikely(split_huge_page(hpage))) {
- pr_err("MCE: %#lx: thp split failed\n", pfn);
+ if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
+ if (!PageAnon(hpage))
+ pr_err("MCE: %#lx: non anonymous thp\n", pfn);
+ else
+ pr_err("MCE: %#lx: thp split failed\n", pfn);
if (TestClearPageHWPoison(p))
atomic_long_sub(nr_pages, &num_poisoned_pages);
put_page(p);
*/
ret = __get_any_page(page, pfn, 0);
if (!PageLRU(page)) {
+ /* Drop page reference which is from __get_any_page() */
+ put_page(page);
pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
pfn, page->flags);
return -EIO;
unlock_page(hpage);
ret = isolate_huge_page(hpage, &pagelist);
- if (ret) {
- /*
- * get_any_page() and isolate_huge_page() takes a refcount each,
- * so need to drop one here.
- */
- put_page(hpage);
- } else {
+ /*
+ * get_any_page() and isolate_huge_page() takes a refcount each,
+ * so need to drop one here.
+ */
+ put_page(hpage);
+ if (!ret) {
pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
return -EBUSY;
}
/* create new memmap entry */
firmware_map_add_hotplug(start, start + size, "System RAM");
+ memblock_add_node(start, size, nid);
goto out;
/* remove memmap entry */
firmware_map_remove(start, start + size, "System RAM");
+ memblock_free(start, size);
+ memblock_remove(start, size);
arch_remove_memory(start, size);
{
unsigned long zone_start_pfn, zone_end_pfn;
+ /* When hotadd a new node, the node should be empty */
+ if (!node_start_pfn && !node_end_pfn)
+ return 0;
+
/* Get the start and end of the zone */
zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
unsigned long zone_start_pfn, zone_end_pfn;
+ /* When hotadd a new node, the node should be empty */
+ if (!node_start_pfn && !node_end_pfn)
+ return 0;
+
zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
obj-$(CONFIG_6LOWPAN) += 6lowpan.o
-6lowpan-y := iphc.o nhc.o
+6lowpan-y := core.o iphc.o nhc.o
#rfc6282 nhcs
obj-$(CONFIG_6LOWPAN_NHC_DEST) += nhc_dest.o
--- /dev/null
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Authors:
+ * (C) 2015 Pengutronix, Alexander Aring <aar@pengutronix.de>
+ */
+
+#include <linux/module.h>
+
+#include <net/6lowpan.h>
+
+void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype)
+{
+ lowpan_priv(dev)->lltype = lltype;
+}
+EXPORT_SYMBOL(lowpan_netdev_setup);
+
+static int __init lowpan_module_init(void)
+{
+ request_module_nowait("ipv6");
+
+ request_module_nowait("nhc_dest");
+ request_module_nowait("nhc_fragment");
+ request_module_nowait("nhc_hop");
+ request_module_nowait("nhc_ipv6");
+ request_module_nowait("nhc_mobility");
+ request_module_nowait("nhc_routing");
+ request_module_nowait("nhc_udp");
+
+ return 0;
+}
+module_init(lowpan_module_init);
+
+MODULE_LICENSE("GPL");
#include <linux/bitops.h>
#include <linux/if_arp.h>
-#include <linux/module.h>
#include <linux/netdevice.h>
#include <net/6lowpan.h>
#include <net/ipv6.h>
if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
return -EINVAL;
- hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
+ hdr.flow_lbl[0] = (tmp & 0x0F) | ((tmp >> 2) & 0x30);
memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
skb_pull(skb, 2);
break;
return 0;
}
EXPORT_SYMBOL_GPL(lowpan_header_compress);
-
-static int __init lowpan_module_init(void)
-{
- request_module_nowait("ipv6");
-
- request_module_nowait("nhc_dest");
- request_module_nowait("nhc_fragment");
- request_module_nowait("nhc_hop");
- request_module_nowait("nhc_ipv6");
- request_module_nowait("nhc_mobility");
- request_module_nowait("nhc_routing");
- request_module_nowait("nhc_udp");
-
- return 0;
-}
-module_init(lowpan_module_init);
-
-MODULE_LICENSE("GPL");
{
ether_setup(dev);
- dev->priv_flags |= IFF_802_1Q_VLAN;
+ dev->priv_flags |= IFF_802_1Q_VLAN | IFF_NO_QUEUE;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
netif_keep_dst(dev);
- dev->tx_queue_len = 0;
dev->netdev_ops = &vlan_netdev_ops;
dev->destructor = vlan_dev_free;
#include "main.h"
#include <linux/atomic.h>
+#include <linux/bitops.h>
#include <linux/byteorder/generic.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
int j;
/* check if orig node candidate is running DAT */
- if (!(candidate->capabilities & BATADV_ORIG_CAPA_HAS_DAT))
+ if (!test_bit(BATADV_ORIG_CAPA_HAS_DAT, &candidate->capabilities))
goto out;
/* Check if this node has already been selected... */
uint16_t tvlv_value_len)
{
if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
- orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_DAT;
+ clear_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities);
else
- orig->capabilities |= BATADV_ORIG_CAPA_HAS_DAT;
+ set_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities);
}
/**
struct batadv_neigh_node *router;
struct batadv_neigh_ifinfo *router_ifinfo;
struct batadv_gw_node *gw_node, *curr_gw = NULL;
- uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
- uint32_t gw_divisor;
+ uint64_t max_gw_factor = 0, tmp_gw_factor = 0;
uint8_t max_tq = 0;
uint8_t tq_avg;
struct batadv_orig_node *orig_node;
- gw_divisor = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE;
- gw_divisor *= 64;
-
rcu_read_lock();
hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
if (gw_node->deleted)
tmp_gw_factor = tq_avg * tq_avg;
tmp_gw_factor *= gw_node->bandwidth_down;
tmp_gw_factor *= 100 * 100;
- tmp_gw_factor /= gw_divisor;
+ tmp_gw_factor >>= 18;
if ((tmp_gw_factor > max_gw_factor) ||
((tmp_gw_factor == max_gw_factor) &&
#include "main.h"
#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
#include <linux/byteorder/generic.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
*
* If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator,
* orig, has toggled then this method updates counter and list accordingly.
+ *
+ * Caller needs to hold orig->mcast_handler_lock.
*/
static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
uint8_t mcast_flags)
{
+ struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node;
+ struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list;
+
/* switched from flag unset to set */
if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
!(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) {
atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables);
spin_lock_bh(&bat_priv->mcast.want_lists_lock);
- hlist_add_head_rcu(&orig->mcast_want_all_unsnoopables_node,
- &bat_priv->mcast.want_all_unsnoopables_list);
+ /* flag checks above + mcast_handler_lock prevents this */
+ WARN_ON(!hlist_unhashed(node));
+
+ hlist_add_head_rcu(node, head);
spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
/* switched from flag set to unset */
} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) &&
atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables);
spin_lock_bh(&bat_priv->mcast.want_lists_lock);
- hlist_del_rcu(&orig->mcast_want_all_unsnoopables_node);
+ /* flag checks above + mcast_handler_lock prevents this */
+ WARN_ON(hlist_unhashed(node));
+
+ hlist_del_init_rcu(node);
spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
}
}
*
* If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has
* toggled then this method updates counter and list accordingly.
+ *
+ * Caller needs to hold orig->mcast_handler_lock.
*/
static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
uint8_t mcast_flags)
{
+ struct hlist_node *node = &orig->mcast_want_all_ipv4_node;
+ struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list;
+
/* switched from flag unset to set */
if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 &&
!(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) {
atomic_inc(&bat_priv->mcast.num_want_all_ipv4);
spin_lock_bh(&bat_priv->mcast.want_lists_lock);
- hlist_add_head_rcu(&orig->mcast_want_all_ipv4_node,
- &bat_priv->mcast.want_all_ipv4_list);
+ /* flag checks above + mcast_handler_lock prevents this */
+ WARN_ON(!hlist_unhashed(node));
+
+ hlist_add_head_rcu(node, head);
spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
/* switched from flag set to unset */
} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) &&
atomic_dec(&bat_priv->mcast.num_want_all_ipv4);
spin_lock_bh(&bat_priv->mcast.want_lists_lock);
- hlist_del_rcu(&orig->mcast_want_all_ipv4_node);
+ /* flag checks above + mcast_handler_lock prevents this */
+ WARN_ON(hlist_unhashed(node));
+
+ hlist_del_init_rcu(node);
spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
}
}
*
* If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has
* toggled then this method updates counter and list accordingly.
+ *
+ * Caller needs to hold orig->mcast_handler_lock.
*/
static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
uint8_t mcast_flags)
{
+ struct hlist_node *node = &orig->mcast_want_all_ipv6_node;
+ struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list;
+
/* switched from flag unset to set */
if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 &&
!(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) {
atomic_inc(&bat_priv->mcast.num_want_all_ipv6);
spin_lock_bh(&bat_priv->mcast.want_lists_lock);
- hlist_add_head_rcu(&orig->mcast_want_all_ipv6_node,
- &bat_priv->mcast.want_all_ipv6_list);
+ /* flag checks above + mcast_handler_lock prevents this */
+ WARN_ON(!hlist_unhashed(node));
+
+ hlist_add_head_rcu(node, head);
spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
/* switched from flag set to unset */
} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) &&
atomic_dec(&bat_priv->mcast.num_want_all_ipv6);
spin_lock_bh(&bat_priv->mcast.want_lists_lock);
- hlist_del_rcu(&orig->mcast_want_all_ipv6_node);
+ /* flag checks above + mcast_handler_lock prevents this */
+ WARN_ON(hlist_unhashed(node));
+
+ hlist_del_init_rcu(node);
spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
}
}
uint8_t mcast_flags = BATADV_NO_FLAGS;
bool orig_initialized;
- orig_initialized = orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST;
+ if (orig_mcast_enabled && tvlv_value &&
+ (tvlv_value_len >= sizeof(mcast_flags)))
+ mcast_flags = *(uint8_t *)tvlv_value;
+
+ spin_lock_bh(&orig->mcast_handler_lock);
+ orig_initialized = test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
+ &orig->capa_initialized);
/* If mcast support is turned on decrease the disabled mcast node
* counter only if we had increased it for this node before. If this
* is a completely new orig_node no need to decrease the counter.
*/
if (orig_mcast_enabled &&
- !(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST)) {
+ !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
if (orig_initialized)
atomic_dec(&bat_priv->mcast.num_disabled);
- orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST;
+ set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
/* If mcast support is being switched off or if this is an initial
* OGM without mcast support then increase the disabled mcast
* node counter.
*/
} else if (!orig_mcast_enabled &&
- (orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST ||
+ (test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities) ||
!orig_initialized)) {
atomic_inc(&bat_priv->mcast.num_disabled);
- orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST;
+ clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
}
- orig->capa_initialized |= BATADV_ORIG_CAPA_HAS_MCAST;
-
- if (orig_mcast_enabled && tvlv_value &&
- (tvlv_value_len >= sizeof(mcast_flags)))
- mcast_flags = *(uint8_t *)tvlv_value;
+ set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized);
batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags);
batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags);
batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags);
orig->mcast_flags = mcast_flags;
+ spin_unlock_bh(&orig->mcast_handler_lock);
}
/**
{
struct batadv_priv *bat_priv = orig->bat_priv;
- if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) &&
- orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST)
+ spin_lock_bh(&orig->mcast_handler_lock);
+
+ if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities) &&
+ test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized))
atomic_dec(&bat_priv->mcast.num_disabled);
batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
+
+ spin_unlock_bh(&orig->mcast_handler_lock);
}
#include "main.h"
#include <linux/atomic.h>
+#include <linux/bitops.h>
#include <linux/byteorder/generic.h>
#include <linux/compiler.h>
#include <linux/debugfs.h>
uint16_t tvlv_value_len)
{
if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
- orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_NC;
+ clear_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities);
else
- orig->capabilities |= BATADV_ORIG_CAPA_HAS_NC;
+ set_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities);
}
/**
goto out;
/* check if orig node is network coding enabled */
- if (!(orig_node->capabilities & BATADV_ORIG_CAPA_HAS_NC))
+ if (!test_bit(BATADV_ORIG_CAPA_HAS_NC, &orig_node->capabilities))
goto out;
/* accept ogms from 'good' neighbors and single hop neighbors */
orig_node->last_seen = jiffies;
reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
orig_node->bcast_seqno_reset = reset_time;
+
#ifdef CONFIG_BATMAN_ADV_MCAST
orig_node->mcast_flags = BATADV_NO_FLAGS;
+ INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
+ INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
+ INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
+ spin_lock_init(&orig_node->mcast_handler_lock);
#endif
/* create a vlan object for the "untagged" LAN */
* we delete only packets belonging to the given interface
*/
if ((hard_iface) &&
- (forw_packet->if_incoming != hard_iface))
+ (forw_packet->if_incoming != hard_iface) &&
+ (forw_packet->if_outgoing != hard_iface))
continue;
spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
int gw_mode;
enum batadv_forw_mode forw_mode;
struct batadv_orig_node *mcast_single_orig = NULL;
+ int network_offset = ETH_HLEN;
if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
goto dropped;
case ETH_P_8021Q:
vhdr = vlan_eth_hdr(skb);
- if (vhdr->h_vlan_encapsulated_proto != ethertype)
+ if (vhdr->h_vlan_encapsulated_proto != ethertype) {
+ network_offset += VLAN_HLEN;
break;
+ }
/* fall through */
case ETH_P_BATMAN:
goto dropped;
}
+ skb_set_network_header(skb, network_offset);
+
if (batadv_bla_tx(bat_priv, skb, vid))
goto dropped;
dev->netdev_ops = &batadv_netdev_ops;
dev->destructor = batadv_softif_free;
dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
/* can't call min_mtu, because the needed variables
* have not been initialized yet
#include "main.h"
#include <linux/atomic.h>
+#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/byteorder/generic.h>
#include <linux/compiler.h>
/* increase the refcounter of the related vlan */
vlan = batadv_softif_vlan_get(bat_priv, vid);
if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
- addr, BATADV_PRINT_VID(vid)))
+ addr, BATADV_PRINT_VID(vid))) {
+ kfree(tt_local);
+ tt_local = NULL;
goto out;
+ }
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
}
spin_unlock_bh(list_lock);
}
- orig_node->capa_initialized &= ~BATADV_ORIG_CAPA_HAS_TT;
+ clear_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized);
}
static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global,
spin_lock_bh(&bat_priv->tt.req_list_lock);
list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
- list_del(&node->list);
+ list_del_init(&node->list);
kfree(node);
}
list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
if (batadv_has_timed_out(node->issued_at,
BATADV_TT_REQUEST_TIMEOUT)) {
- list_del(&node->list);
+ list_del_init(&node->list);
kfree(node);
}
}
batadv_hardif_free_ref(primary_if);
if (ret && tt_req_node) {
spin_lock_bh(&bat_priv->tt.req_list_lock);
- list_del(&tt_req_node->list);
+ /* list_del_init() verifies tt_req_node still is in the list */
+ list_del_init(&tt_req_node->list);
spin_unlock_bh(&bat_priv->tt.req_list_lock);
kfree(tt_req_node);
}
return;
}
}
- orig_node->capa_initialized |= BATADV_ORIG_CAPA_HAS_TT;
+ set_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized);
}
static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
if (!batadv_compare_eth(node->addr, resp_src))
continue;
- list_del(&node->list);
+ list_del_init(&node->list);
kfree(node);
}
bool has_tt_init;
tt_vlan = (struct batadv_tvlv_tt_vlan_data *)tt_buff;
- has_tt_init = orig_node->capa_initialized & BATADV_ORIG_CAPA_HAS_TT;
+ has_tt_init = test_bit(BATADV_ORIG_CAPA_HAS_TT,
+ &orig_node->capa_initialized);
/* orig table not initialised AND first diff is in the OGM OR the ttvn
* increased by one -> we can apply the attached changes
* @batadv_dat_addr_t: address of the orig node in the distributed hash
* @last_seen: time when last packet from this node was received
* @bcast_seqno_reset: time when the broadcast seqno window was reset
+ * @mcast_handler_lock: synchronizes mcast-capability and -flag changes
* @mcast_flags: multicast flags announced by the orig node
* @mcast_want_all_unsnoop_node: a list node for the
* mcast.want_all_unsnoopables list
unsigned long last_seen;
unsigned long bcast_seqno_reset;
#ifdef CONFIG_BATMAN_ADV_MCAST
+ /* synchronizes mcast tvlv specific orig changes */
+ spinlock_t mcast_handler_lock;
uint8_t mcast_flags;
struct hlist_node mcast_want_all_unsnoopables_node;
struct hlist_node mcast_want_all_ipv4_node;
struct hlist_node mcast_want_all_ipv6_node;
#endif
- uint8_t capabilities;
- uint8_t capa_initialized;
+ unsigned long capabilities;
+ unsigned long capa_initialized;
atomic_t last_ttvn;
unsigned char *tt_buff;
int16_t tt_buff_len;
* (= orig node announces a tvlv of type BATADV_TVLV_MCAST)
*/
enum batadv_orig_capabilities {
- BATADV_ORIG_CAPA_HAS_DAT = BIT(0),
- BATADV_ORIG_CAPA_HAS_NC = BIT(1),
- BATADV_ORIG_CAPA_HAS_TT = BIT(2),
- BATADV_ORIG_CAPA_HAS_MCAST = BIT(3),
+ BATADV_ORIG_CAPA_HAS_DAT,
+ BATADV_ORIG_CAPA_HAS_NC,
+ BATADV_ORIG_CAPA_HAS_TT,
+ BATADV_ORIG_CAPA_HAS_MCAST,
};
/**
static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
{
- return netdev_priv(netdev);
+ return (struct lowpan_dev *)lowpan_priv(netdev)->priv;
}
static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
struct net_device *netdev;
int err = 0;
- netdev = alloc_netdev(sizeof(struct lowpan_dev), IFACE_NAME_TEMPLATE,
- NET_NAME_UNKNOWN, netdev_setup);
+ netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev)),
+ IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN,
+ netdev_setup);
if (!netdev)
return -ENOMEM;
SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
SET_NETDEV_DEVTYPE(netdev, &bt_type);
- *dev = netdev_priv(netdev);
+ *dev = lowpan_dev(netdev);
(*dev)->netdev = netdev;
(*dev)->hdev = chan->conn->hcon->hdev;
INIT_LIST_HEAD(&(*dev)->peers);
list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
spin_unlock(&devices_lock);
+ lowpan_netdev_setup(netdev, LOWPAN_LLTYPE_BTLE);
+
err = register_netdev(netdev);
if (err < 0) {
BT_INFO("register_netdev failed %d", err);
amp_ctrl_put(ctrl);
hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, sizeof(cp), &cp);
+ hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
hci_req_run_skb(&req, write_remote_amp_assoc_complete);
kfree(cp);
hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
}
+/* This function requires the caller holds hdev->lock */
+static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
+{
+ struct hci_conn_params *params;
+ struct smp_irk *irk;
+ bdaddr_t *bdaddr;
+ u8 bdaddr_type;
+
+ bdaddr = &conn->dst;
+ bdaddr_type = conn->dst_type;
+
+ /* Check if we need to convert to identity address */
+ irk = hci_get_irk(conn->hdev, bdaddr, bdaddr_type);
+ if (irk) {
+ bdaddr = &irk->bdaddr;
+ bdaddr_type = irk->addr_type;
+ }
+
+ params = hci_explicit_connect_lookup(conn->hdev, bdaddr, bdaddr_type);
+ if (!params)
+ return;
+
+ /* The connection attempt was doing scan for new RPA, and is
+ * in scan phase. If params are not associated with any other
+ * autoconnect action, remove them completely. If they are, just unmark
+ * them as waiting for connection, by clearing explicit_connect field.
+ */
+ if (params->auto_connect == HCI_AUTO_CONN_EXPLICIT)
+ hci_conn_params_del(conn->hdev, bdaddr, bdaddr_type);
+ else
+ params->explicit_connect = false;
+}
+
+/* This function requires the caller holds hdev->lock */
+static void hci_connect_le_scan_remove(struct hci_conn *conn)
+{
+ hci_connect_le_scan_cleanup(conn);
+
+ hci_conn_hash_del(conn->hdev, conn);
+ hci_update_background_scan(conn->hdev);
+}
+
static void hci_acl_create_connection(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
if (conn->out) {
if (conn->type == ACL_LINK)
hci_acl_create_connection_cancel(conn);
- else if (conn->type == LE_LINK)
- hci_le_create_connection_cancel(conn);
+ else if (conn->type == LE_LINK) {
+ if (test_bit(HCI_CONN_SCANNING, &conn->flags))
+ hci_connect_le_scan_remove(conn);
+ else
+ hci_le_create_connection_cancel(conn);
+ }
} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
hci_reject_sco(conn);
}
{
struct hci_conn *conn;
- if (status == 0)
- return;
+ hci_dev_lock(hdev);
+
+ conn = hci_lookup_le_connect(hdev);
+
+ if (!status) {
+ hci_connect_le_scan_cleanup(conn);
+ goto done;
+ }
BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
status);
- hci_dev_lock(hdev);
-
- conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
if (!conn)
goto done;
hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
conn->state = BT_CONNECT;
+ clear_bit(HCI_CONN_SCANNING, &conn->flags);
}
static void hci_req_directed_advertising(struct hci_request *req,
u8 role)
{
struct hci_conn_params *params;
- struct hci_conn *conn;
+ struct hci_conn *conn, *conn_unfinished;
struct smp_irk *irk;
struct hci_request req;
int err;
* and return the object found.
*/
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+ conn_unfinished = NULL;
if (conn) {
- conn->pending_sec_level = sec_level;
- goto done;
+ if (conn->state == BT_CONNECT &&
+ test_bit(HCI_CONN_SCANNING, &conn->flags)) {
+ BT_DBG("will continue unfinished conn %pMR", dst);
+ conn_unfinished = conn;
+ } else {
+ if (conn->pending_sec_level < sec_level)
+ conn->pending_sec_level = sec_level;
+ goto done;
+ }
}
/* Since the controller supports only one LE connection attempt at a
* time, we return -EBUSY if there is any connection attempt running.
*/
- conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
- if (conn)
+ if (hci_lookup_le_connect(hdev))
return ERR_PTR(-EBUSY);
/* When given an identity address with existing identity
* resolving key, the connection needs to be established
* to a resolvable random address.
*
- * This uses the cached random resolvable address from
- * a previous scan. When no cached address is available,
- * try connecting to the identity address instead.
- *
* Storing the resolvable random address is required here
* to handle connection failures. The address will later
* be resolved back into the original identity address
dst_type = ADDR_LE_DEV_RANDOM;
}
- conn = hci_conn_add(hdev, LE_LINK, dst, role);
+ if (conn_unfinished) {
+ conn = conn_unfinished;
+ bacpy(&conn->dst, dst);
+ } else {
+ conn = hci_conn_add(hdev, LE_LINK, dst, role);
+ }
+
if (!conn)
return ERR_PTR(-ENOMEM);
conn->dst_type = dst_type;
conn->sec_level = BT_SECURITY_LOW;
- conn->pending_sec_level = sec_level;
conn->conn_timeout = conn_timeout;
+ if (!conn_unfinished)
+ conn->pending_sec_level = sec_level;
+
hci_req_init(&req, hdev);
/* Disable advertising if we're active. For master role
return ERR_PTR(err);
}
+done:
+ /* If this is continuation of connect started by hci_connect_le_scan,
+ * it already called hci_conn_hold and calling it again would mess the
+ * counter.
+ */
+ if (!conn_unfinished)
+ hci_conn_hold(conn);
+
+ return conn;
+}
+
+static void hci_connect_le_scan_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode)
+{
+ struct hci_conn *conn;
+
+ if (!status)
+ return;
+
+ BT_ERR("Failed to add device to auto conn whitelist: status 0x%2.2x",
+ status);
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+ if (conn)
+ hci_le_conn_failed(conn, status);
+
+ hci_dev_unlock(hdev);
+}
+
+static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
+{
+ struct hci_conn *conn;
+
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
+ if (!conn)
+ return false;
+
+ if (conn->dst_type != type)
+ return false;
+
+ if (conn->state != BT_CONNECTED)
+ return false;
+
+ return true;
+}
+
+/* This function requires the caller holds hdev->lock */
+static int hci_explicit_conn_params_set(struct hci_request *req,
+ bdaddr_t *addr, u8 addr_type)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_conn_params *params;
+
+ if (is_connected(hdev, addr, addr_type))
+ return -EISCONN;
+
+ params = hci_conn_params_add(hdev, addr, addr_type);
+ if (!params)
+ return -EIO;
+
+ /* If we created new params, or existing params were marked as disabled,
+ * mark them to be used just once to connect.
+ */
+ if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
+ params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
+ list_del_init(¶ms->action);
+ list_add(¶ms->action, &hdev->pend_le_conns);
+ }
+
+ params->explicit_connect = true;
+ __hci_update_background_scan(req);
+
+ BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
+ params->auto_connect);
+
+ return 0;
+}
+
+/* This function requires the caller holds hdev->lock */
+struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
+ u8 dst_type, u8 sec_level,
+ u16 conn_timeout, u8 role)
+{
+ struct hci_conn *conn;
+ struct hci_request req;
+ int err;
+
+ /* Let's make sure that le is enabled.*/
+ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
+ if (lmp_le_capable(hdev))
+ return ERR_PTR(-ECONNREFUSED);
+
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ /* Some devices send ATT messages as soon as the physical link is
+ * established. To be able to handle these ATT messages, the user-
+ * space first establishes the connection and then starts the pairing
+ * process.
+ *
+ * So if a hci_conn object already exists for the following connection
+ * attempt, we simply update pending_sec_level and auth_type fields
+ * and return the object found.
+ */
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+ if (conn) {
+ if (conn->pending_sec_level < sec_level)
+ conn->pending_sec_level = sec_level;
+ goto done;
+ }
+
+ BT_DBG("requesting refresh of dst_addr");
+
+ conn = hci_conn_add(hdev, LE_LINK, dst, role);
+ if (!conn)
+ return ERR_PTR(-ENOMEM);
+
+ hci_req_init(&req, hdev);
+
+ if (hci_explicit_conn_params_set(&req, dst, dst_type) < 0)
+ return ERR_PTR(-EBUSY);
+
+ conn->state = BT_CONNECT;
+ set_bit(HCI_CONN_SCANNING, &conn->flags);
+
+ err = hci_req_run(&req, hci_connect_le_scan_complete);
+ if (err && err != -ENODATA) {
+ hci_conn_del(conn);
+ return ERR_PTR(err);
+ }
+
+ conn->dst_type = dst_type;
+ conn->sec_level = BT_SECURITY_LOW;
+ conn->pending_sec_level = sec_level;
+ conn->conn_timeout = conn_timeout;
+
done:
hci_conn_hold(conn);
return conn;
return NULL;
}
+/* This function requires the caller holds hdev->lock */
+struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
+ bdaddr_t *addr,
+ u8 addr_type)
+{
+ struct hci_conn_params *param;
+
+ list_for_each_entry(param, &hdev->pend_le_conns, action) {
+ if (bacmp(¶m->addr, addr) == 0 &&
+ param->addr_type == addr_type &&
+ param->explicit_connect)
+ return param;
+ }
+
+ list_for_each_entry(param, &hdev->pend_le_reports, action) {
+ if (bacmp(¶m->addr, addr) == 0 &&
+ param->addr_type == addr_type &&
+ param->explicit_connect)
+ return param;
+ }
+
+ return NULL;
+}
+
/* This function requires the caller holds hdev->lock */
struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
bdaddr_t *addr, u8 addr_type)
list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
continue;
+
+ /* If trying to estabilish one time connection to disabled
+ * device, leave the params, but mark them as just once.
+ */
+ if (params->explicit_connect) {
+ params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
+ continue;
+ }
+
list_del(¶ms->list);
kfree(params);
}
hci_dev_set_flag(hdev, HCI_LE_ADV);
- conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+ conn = hci_lookup_le_connect(hdev);
if (conn)
queue_delayed_work(hdev->workqueue,
&conn->le_conn_timeout,
*/
hci_dev_clear_flag(hdev, HCI_LE_ADV);
- conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+ conn = hci_lookup_le_connect(hdev);
if (!conn) {
conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
if (!conn) {
/* If we're not connectable only connect devices that we have in
* our pend_le_conns list.
*/
- params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
- addr, addr_type);
+ params = hci_explicit_connect_lookup(hdev, addr, addr_type);
+
if (!params)
return NULL;
- switch (params->auto_connect) {
- case HCI_AUTO_CONN_DIRECT:
- /* Only devices advertising with ADV_DIRECT_IND are
- * triggering a connection attempt. This is allowing
- * incoming connections from slave devices.
- */
- if (adv_type != LE_ADV_DIRECT_IND)
+ if (!params->explicit_connect) {
+ switch (params->auto_connect) {
+ case HCI_AUTO_CONN_DIRECT:
+ /* Only devices advertising with ADV_DIRECT_IND are
+ * triggering a connection attempt. This is allowing
+ * incoming connections from slave devices.
+ */
+ if (adv_type != LE_ADV_DIRECT_IND)
+ return NULL;
+ break;
+ case HCI_AUTO_CONN_ALWAYS:
+ /* Devices advertising with ADV_IND or ADV_DIRECT_IND
+ * are triggering a connection attempt. This means
+ * that incoming connectioms from slave device are
+ * accepted and also outgoing connections to slave
+ * devices are established when found.
+ */
+ break;
+ default:
return NULL;
- break;
- case HCI_AUTO_CONN_ALWAYS:
- /* Devices advertising with ADV_IND or ADV_DIRECT_IND
- * are triggering a connection attempt. This means
- * that incoming connectioms from slave device are
- * accepted and also outgoing connections to slave
- * devices are established when found.
- */
- break;
- default:
- return NULL;
+ }
}
conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
if (!IS_ERR(conn)) {
- /* Store the pointer since we don't really have any
+ /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
+ * by higher layer that tried to connect, if no then
+ * store the pointer since we don't really have any
* other owner of the object besides the params that
* triggered it. This way we can abort the connection if
* the parameters get removed and keep the reference
* count consistent once the connection is established.
*/
- params->conn = hci_conn_get(conn);
+
+ if (!params->explicit_connect)
+ params->conn = hci_conn_get(conn);
+
return conn;
}
* address be updated at the next cycle.
*/
if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
- hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
+ hci_lookup_le_connect(hdev)) {
BT_DBG("Deferring random address update");
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
return;
void __hci_update_background_scan(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
- struct hci_conn *conn;
if (!test_bit(HCI_UP, &hdev->flags) ||
test_bit(HCI_INIT, &hdev->flags) ||
* since some controllers are not able to scan and connect at
* the same time.
*/
- conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
- if (conn)
+ if (hci_lookup_le_connect(hdev))
return;
/* If controller is currently scanning, we stop it to ensure we
else
role = HCI_ROLE_MASTER;
- hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
- HCI_LE_CONN_TIMEOUT, role);
+ hcon = hci_connect_le_scan(hdev, dst, dst_type,
+ chan->sec_level,
+ HCI_LE_CONN_TIMEOUT,
+ role);
} else {
u8 auth_type = l2cap_get_auth_type(chan);
hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
*/
hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
- conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
- sec_level, HCI_LE_CONN_TIMEOUT,
- HCI_ROLE_MASTER);
+ conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
+ addr_type, sec_level,
+ HCI_LE_CONN_TIMEOUT,
+ HCI_ROLE_MASTER);
}
if (IS_ERR(conn)) {
/* Don't let discovery abort an outgoing connection attempt
* that's using directed advertising.
*/
- if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
+ if (hci_lookup_le_connect(hdev)) {
*status = MGMT_STATUS_REJECTED;
return false;
}
switch (auto_connect) {
case HCI_AUTO_CONN_DISABLED:
case HCI_AUTO_CONN_LINK_LOSS:
+ /* If auto connect is being disabled when we're trying to
+ * connect to device, keep connecting.
+ */
+ if (params->explicit_connect)
+ list_add(¶ms->action, &hdev->pend_le_conns);
+
__hci_update_background_scan(req);
break;
case HCI_AUTO_CONN_REPORT:
dev->destructor = br_dev_free;
dev->ethtool_ops = &br_ethtool_ops;
SET_NETDEV_DEVTYPE(dev, &br_type);
- dev->tx_queue_len = 0;
- dev->priv_flags = IFF_EBRIDGE;
+ dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
break;
}
- if (skb_trimmed)
+ if (skb_trimmed && skb_trimmed != skb)
kfree_skb(skb_trimmed);
return err;
break;
}
- if (skb_trimmed)
+ if (skb_trimmed && skb_trimmed != skb)
kfree_skb(skb_trimmed);
return err;
.kind = "bridge",
.priv_size = sizeof(struct net_bridge),
.setup = br_dev_setup,
- .maxtype = IFLA_BRPORT_MAX,
+ .maxtype = IFLA_BR_MAX,
.policy = br_policy,
.validate = br_validate,
.newlink = br_dev_newlink,
skb->protocol = htons(ETH_P_CAIF);
/* Check if we need to handle xoff */
- if (likely(caifd->netdev->tx_queue_len == 0))
+ if (likely(caifd->netdev->priv_flags & IFF_NO_QUEUE))
goto noxoff;
if (unlikely(caifd->xoff))
dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
setup(dev);
+ if (!dev->tx_queue_len)
+ printk(KERN_WARNING "%s uses DEPRECATED zero tx_queue_len - convert driver to use IFF_NO_QUEUE instead.\n", name);
+
dev->num_tx_queues = txqs;
dev->real_num_tx_queues = txqs;
if (netif_alloc_netdev_queues(dev))
#include <net/net_namespace.h>
#include <linux/sched.h>
#include <linux/prefetch.h>
+#include <net/lwtunnel.h>
#include <net/dst.h>
#include <net/dst_metadata.h>
#ifdef CONFIG_IP_ROUTE_CLASSID
dst->tclassid = 0;
#endif
+ dst->lwtstate = NULL;
atomic_set(&dst->__refcnt, initial_ref);
dst->__use = 0;
dst->lastuse = jiffies;
kfree(dst);
else
kmem_cache_free(dst->ops->kmem_cachep, dst);
+ lwtstate_put(dst->lwtstate);
dst = child;
if (dst) {
*pfp = fp;
return 0;
}
+EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
void bpf_prog_destroy(struct bpf_prog *fp)
{
static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
{
struct sk_buff *skb = (struct sk_buff *) (long) r1;
- u32 is_pseudo = BPF_IS_PSEUDO_HEADER(flags);
+ bool is_pseudo = !!BPF_IS_PSEUDO_HEADER(flags);
int offset = (int) r2;
__sum16 sum, *ptr;
{
struct sk_buff *skb = (struct sk_buff *) (long) r1;
struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2;
- struct ip_tunnel_info *info = skb_tunnel_info(skb, AF_INET);
+ struct ip_tunnel_info *info = skb_tunnel_info(skb);
if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags || !info))
return -EINVAL;
to->tunnel_id = be64_to_cpu(info->key.tun_id);
- to->remote_ipv4 = be32_to_cpu(info->key.ipv4_src);
+ to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
return 0;
}
info = &md->u.tun_info;
info->mode = IP_TUNNEL_INFO_TX;
info->key.tun_id = cpu_to_be64(from->tunnel_id);
- info->key.ipv4_dst = cpu_to_be32(from->remote_ipv4);
+ info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
return 0;
}
}
EXPORT_SYMBOL(lwtunnel_cmp_encap);
-int __lwtunnel_output(struct sock *sk, struct sk_buff *skb,
- struct lwtunnel_state *lwtstate)
+int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
{
+ struct dst_entry *dst = skb_dst(skb);
const struct lwtunnel_encap_ops *ops;
+ struct lwtunnel_state *lwtstate;
int ret = -EINVAL;
- if (!lwtstate)
+ if (!dst)
goto drop;
+ lwtstate = dst->lwtstate;
if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
lwtstate->type > LWTUNNEL_ENCAP_MAX)
return ret;
}
+EXPORT_SYMBOL(lwtunnel_output);
-int lwtunnel_output6(struct sock *sk, struct sk_buff *skb)
+int lwtunnel_input(struct sk_buff *skb)
{
- struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
- struct lwtunnel_state *lwtstate = NULL;
+ struct dst_entry *dst = skb_dst(skb);
+ const struct lwtunnel_encap_ops *ops;
+ struct lwtunnel_state *lwtstate;
+ int ret = -EINVAL;
- if (rt) {
- lwtstate = rt->rt6i_lwtstate;
- skb->dev = rt->dst.dev;
- }
+ if (!dst)
+ goto drop;
+ lwtstate = dst->lwtstate;
- skb->protocol = htons(ETH_P_IPV6);
+ if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+ lwtstate->type > LWTUNNEL_ENCAP_MAX)
+ return 0;
- return __lwtunnel_output(sk, skb, lwtstate);
-}
-EXPORT_SYMBOL(lwtunnel_output6);
+ ret = -EOPNOTSUPP;
+ rcu_read_lock();
+ ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+ if (likely(ops && ops->input))
+ ret = ops->input(skb);
+ rcu_read_unlock();
-int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
-{
- struct rtable *rt = (struct rtable *)skb_dst(skb);
- struct lwtunnel_state *lwtstate = NULL;
+ if (ret == -EOPNOTSUPP)
+ goto drop;
- if (rt) {
- lwtstate = rt->rt_lwtstate;
- skb->dev = rt->dst.dev;
- }
+ return ret;
- skb->protocol = htons(ETH_P_IP);
+drop:
+ kfree_skb(skb);
- return __lwtunnel_output(sk, skb, lwtstate);
+ return ret;
}
-EXPORT_SYMBOL(lwtunnel_output);
+EXPORT_SYMBOL(lwtunnel_input);
* Otherwise returns the provided skb. Returns NULL in error cases
* (e.g. transport_len exceeds skb length or out-of-memory).
*
- * Caller needs to set the skb transport header and release the returned skb.
- * Provided skb is consumed.
+ * Caller needs to set the skb transport header and free any returned skb if it
+ * differs from the provided skb.
*/
static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
unsigned int transport_len)
unsigned int len = skb_transport_offset(skb) + transport_len;
int ret;
- if (skb->len < len) {
- kfree_skb(skb);
+ if (skb->len < len)
return NULL;
- } else if (skb->len == len) {
+ else if (skb->len == len)
return skb;
- }
skb_chk = skb_clone(skb, GFP_ATOMIC);
- kfree_skb(skb);
-
if (!skb_chk)
return NULL;
* If the skb has data beyond the given transport length, then a
* trimmed & cloned skb is checked and returned.
*
- * Caller needs to set the skb transport header and release the returned skb.
- * Provided skb is consumed.
+ * Caller needs to set the skb transport header and free any returned skb if it
+ * differs from the provided skb.
*/
struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
unsigned int transport_len,
skb_chk = skb_checksum_maybe_trim(skb, transport_len);
if (!skb_chk)
- return NULL;
+ goto err;
- if (!pskb_may_pull(skb_chk, offset)) {
- kfree_skb(skb_chk);
- return NULL;
- }
+ if (!pskb_may_pull(skb_chk, offset))
+ goto err;
__skb_pull(skb_chk, offset);
ret = skb_chkf(skb_chk);
__skb_push(skb_chk, offset);
- if (ret) {
- kfree_skb(skb_chk);
- return NULL;
- }
+ if (ret)
+ goto err;
return skb_chk;
+
+err:
+ if (skb_chk && skb_chk != skb)
+ kfree_skb(skb_chk);
+
+ return NULL;
+
}
EXPORT_SYMBOL(skb_checksum_trimmed);
EXPORT_SYMBOL(in6_pton);
void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
- __be32 from, __be32 to, int pseudohdr)
+ __be32 from, __be32 to, bool pseudohdr)
{
if (skb->ip_summed != CHECKSUM_PARTIAL) {
csum_replace4(sum, from, to);
void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
const __be32 *from, const __be32 *to,
- int pseudohdr)
+ bool pseudohdr)
{
__be32 diff[] = {
~from[0], ~from[1], ~from[2], ~from[3],
}
EXPORT_SYMBOL(inet_proto_csum_replace16);
+void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
+ __wsum diff, bool pseudohdr)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL) {
+ *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
+ if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
+ skb->csum = ~csum_add(diff, ~skb->csum);
+ } else if (pseudohdr) {
+ *sum = ~csum_fold(csum_add(diff, csum_unfold(*sum)));
+ }
+}
+EXPORT_SYMBOL(inet_proto_csum_replace_by_diff);
+
struct __net_random_once_work {
struct work_struct work;
struct static_key *key;
return 0;
}
+static int dsa_of_probe_links(struct dsa_platform_data *pd,
+ struct dsa_chip_data *cd,
+ int chip_index, int port_index,
+ struct device_node *port,
+ const char *port_name)
+{
+ struct device_node *link;
+ int link_index;
+ int ret;
+
+ for (link_index = 0;; link_index++) {
+ link = of_parse_phandle(port, "link", link_index);
+ if (!link)
+ break;
+
+ if (!strcmp(port_name, "dsa") && pd->nr_chips > 1) {
+ ret = dsa_of_setup_routing_table(pd, cd, chip_index,
+ port_index, link);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
+
static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
{
int i;
static int dsa_of_probe(struct device *dev)
{
struct device_node *np = dev->of_node;
- struct device_node *child, *mdio, *ethernet, *port, *link;
+ struct device_node *child, *mdio, *ethernet, *port;
struct mii_bus *mdio_bus, *mdio_bus_switch;
struct net_device *ethernet_dev;
struct dsa_platform_data *pd;
goto out_free_chip;
}
- link = of_parse_phandle(port, "link", 0);
-
- if (!strcmp(port_name, "dsa") && link &&
- pd->nr_chips > 1) {
- ret = dsa_of_setup_routing_table(pd, cd,
- chip_index, port_index, link);
- if (ret)
- goto out_free_chip;
- }
+ ret = dsa_of_probe_links(pd, cd, chip_index,
+ port_index, port, port_name);
+ if (ret)
+ goto out_free_chip;
}
}
slave_dev->features = master->vlan_features;
slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
eth_hw_addr_inherit(slave_dev, master);
- slave_dev->tx_queue_len = 0;
+ slave_dev->priv_flags |= IFF_NO_QUEUE;
slave_dev->netdev_ops = &dsa_slave_netdev_ops;
slave_dev->switchdev_ops = &dsa_slave_switchdev_ops;
dev->header_ops = &hsr_header_ops;
dev->netdev_ops = &hsr_device_ops;
SET_NETDEV_DEVTYPE(dev, &hsr_type);
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->destructor = hsr_dev_destroy;
#include <net/ieee802154_netdev.h>
#include <net/inet_frag.h>
+#include <net/6lowpan.h>
struct lowpan_create_arg {
u16 tag;
}
}
-struct lowpan_dev_record {
- struct net_device *ldev;
- struct list_head list;
-};
-
/* private device info */
struct lowpan_dev_info {
struct net_device *real_dev; /* real WPAN device ptr */
- struct mutex dev_list_mtx; /* mutex for list ops */
u16 fragment_tag;
};
static inline struct
lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
{
- return netdev_priv(dev);
+ return (struct lowpan_dev_info *)lowpan_priv(dev)->priv;
}
-extern struct list_head lowpan_devices;
-
int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
void lowpan_net_frag_exit(void);
int lowpan_net_frag_init(void);
#include "6lowpan_i.h"
-LIST_HEAD(lowpan_devices);
-static int lowpan_open_count;
+static int open_count;
static struct header_ops lowpan_header_ops = {
.create = lowpan_header_create,
dev->hard_header_len = 2 + 1 + 20 + 14;
dev->needed_tailroom = 2; /* FCS */
dev->mtu = IPV6_MIN_MTU;
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->flags = IFF_BROADCAST | IFF_MULTICAST;
dev->watchdog_timeo = 0;
struct nlattr *tb[], struct nlattr *data[])
{
struct net_device *real_dev;
- struct lowpan_dev_record *entry;
int ret;
ASSERT_RTNL();
return -EINVAL;
}
- lowpan_dev_info(dev)->real_dev = real_dev;
- mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
-
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
+ if (real_dev->ieee802154_ptr->lowpan_dev) {
dev_put(real_dev);
- lowpan_dev_info(dev)->real_dev = NULL;
- return -ENOMEM;
+ return -EBUSY;
}
- entry->ldev = dev;
-
+ lowpan_dev_info(dev)->real_dev = real_dev;
/* Set the lowpan hardware address to the wpan hardware address. */
memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
- mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
- INIT_LIST_HEAD(&entry->list);
- list_add_tail(&entry->list, &lowpan_devices);
- mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
+ lowpan_netdev_setup(dev, LOWPAN_LLTYPE_IEEE802154);
ret = register_netdevice(dev);
- if (ret >= 0) {
- if (!lowpan_open_count)
- lowpan_rx_init();
- lowpan_open_count++;
+ if (ret < 0) {
+ dev_put(real_dev);
+ return ret;
}
- return ret;
+ real_dev->ieee802154_ptr->lowpan_dev = dev;
+ if (!open_count)
+ lowpan_rx_init();
+
+ open_count++;
+
+ return 0;
}
static void lowpan_dellink(struct net_device *dev, struct list_head *head)
{
struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
struct net_device *real_dev = lowpan_dev->real_dev;
- struct lowpan_dev_record *entry, *tmp;
ASSERT_RTNL();
- lowpan_open_count--;
- if (!lowpan_open_count)
- lowpan_rx_exit();
-
- mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
- list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
- if (entry->ldev == dev) {
- list_del(&entry->list);
- kfree(entry);
- }
- }
- mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
+ open_count--;
- mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
-
- unregister_netdevice_queue(dev, head);
+ if (!open_count)
+ lowpan_rx_exit();
+ real_dev->ieee802154_ptr->lowpan_dev = NULL;
+ unregister_netdevice(dev);
dev_put(real_dev);
}
static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
.kind = "lowpan",
- .priv_size = sizeof(struct lowpan_dev_info),
+ .priv_size = LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev_info)),
.setup = lowpan_setup,
.newlink = lowpan_newlink,
.dellink = lowpan_dellink,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- LIST_HEAD(del_list);
- struct lowpan_dev_record *entry, *tmp;
if (dev->type != ARPHRD_IEEE802154)
goto out;
- if (event == NETDEV_UNREGISTER) {
- list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
- if (lowpan_dev_info(entry->ldev)->real_dev == dev)
- lowpan_dellink(entry->ldev, &del_list);
- }
-
- unregister_netdevice_many(&del_list);
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ /* Check if wpan interface is unregistered that we
+ * also delete possible lowpan interfaces which belongs
+ * to the wpan interface.
+ */
+ if (dev->ieee802154_ptr && dev->ieee802154_ptr->lowpan_dev)
+ lowpan_dellink(dev->ieee802154_ptr->lowpan_dev, NULL);
+ break;
+ default:
+ break;
}
out:
#include "6lowpan_i.h"
-static int lowpan_give_skb_to_devices(struct sk_buff *skb,
- struct net_device *dev)
+static int lowpan_give_skb_to_device(struct sk_buff *skb,
+ struct net_device *dev)
{
- struct lowpan_dev_record *entry;
- struct sk_buff *skb_cp;
- int stat = NET_RX_SUCCESS;
-
+ skb->dev = dev->ieee802154_ptr->lowpan_dev;
skb->protocol = htons(ETH_P_IPV6);
skb->pkt_type = PACKET_HOST;
- rcu_read_lock();
- list_for_each_entry_rcu(entry, &lowpan_devices, list)
- if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
- skb_cp = skb_copy(skb, GFP_ATOMIC);
- if (!skb_cp) {
- kfree_skb(skb);
- rcu_read_unlock();
- return NET_RX_DROP;
- }
-
- skb_cp->dev = entry->ldev;
- stat = netif_rx(skb_cp);
- if (stat == NET_RX_DROP)
- break;
- }
- rcu_read_unlock();
-
- consume_skb(skb);
-
- return stat;
+ return netif_rx(skb);
}
static int
struct ieee802154_hdr hdr;
int ret;
+ if (dev->type != ARPHRD_IEEE802154 ||
+ !dev->ieee802154_ptr->lowpan_dev)
+ goto drop;
+
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
goto drop;
if (skb->pkt_type == PACKET_OTHERHOST)
goto drop_skb;
- if (dev->type != ARPHRD_IEEE802154)
- goto drop_skb;
-
if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
goto drop_skb;
if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
/* Pull off the 1-byte of 6lowpan header. */
skb_pull(skb, 1);
- return lowpan_give_skb_to_devices(skb, NULL);
+ return lowpan_give_skb_to_device(skb, dev);
} else {
switch (skb->data[0] & 0xe0) {
case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
if (ret < 0)
goto drop_skb;
- return lowpan_give_skb_to_devices(skb, NULL);
+ return lowpan_give_skb_to_device(skb, dev);
case LOWPAN_DISPATCH_FRAG1: /* first fragment header */
ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1);
if (ret == 1) {
if (ret < 0)
goto drop_skb;
- return lowpan_give_skb_to_devices(skb, NULL);
+ return lowpan_give_skb_to_device(skb, dev);
} else if (ret == -1) {
return NET_RX_DROP;
} else {
if (ret < 0)
goto drop_skb;
- return lowpan_give_skb_to_devices(skb, NULL);
+ return lowpan_give_skb_to_device(skb, dev);
} else if (ret == -1) {
return NET_RX_DROP;
} else {
frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr);
if (IS_ERR(frag))
- return -PTR_ERR(frag);
+ return PTR_ERR(frag);
memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen);
memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len);
} else {
da.mode = IEEE802154_ADDR_LONG;
da.extended_addr = ieee802154_devaddr_from_raw(daddr);
- cb->ackreq = wpan_dev->frame_retries >= 0;
+ cb->ackreq = wpan_dev->ackreq;
}
return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
[NL802154_ATTR_WPAN_PHY_CAPS] = { .type = NLA_NESTED },
[NL802154_ATTR_SUPPORTED_COMMANDS] = { .type = NLA_NESTED },
+
+ [NL802154_ATTR_ACKREQ_DEFAULT] = { .type = NLA_U8 },
};
/* message building helper */
CMD(set_max_csma_backoffs, SET_MAX_CSMA_BACKOFFS);
CMD(set_max_frame_retries, SET_MAX_FRAME_RETRIES);
CMD(set_lbt_mode, SET_LBT_MODE);
+ CMD(set_ackreq_default, SET_ACKREQ_DEFAULT);
if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER)
CMD(set_tx_power, SET_TX_POWER);
if (nla_put_u8(msg, NL802154_ATTR_LBT_MODE, wpan_dev->lbt))
goto nla_put_failure;
+ /* ackreq default behaviour */
+ if (nla_put_u8(msg, NL802154_ATTR_ACKREQ_DEFAULT, wpan_dev->ackreq))
+ goto nla_put_failure;
+
genlmsg_end(msg, hdr);
return 0;
return rdev_set_lbt_mode(rdev, wpan_dev, mode);
}
+static int
+nl802154_set_ackreq_default(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ bool ackreq;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ if (!info->attrs[NL802154_ATTR_ACKREQ_DEFAULT])
+ return -EINVAL;
+
+ ackreq = !!nla_get_u8(info->attrs[NL802154_ATTR_ACKREQ_DEFAULT]);
+ return rdev_set_ackreq_default(rdev, wpan_dev, ackreq);
+}
+
#define NL802154_FLAG_NEED_WPAN_PHY 0x01
#define NL802154_FLAG_NEED_NETDEV 0x02
#define NL802154_FLAG_NEED_RTNL 0x04
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
+ {
+ .cmd = NL802154_CMD_SET_ACKREQ_DEFAULT,
+ .doit = nl802154_set_ackreq_default,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
};
/* initialisation/exit functions */
return ret;
}
+static inline int
+rdev_set_ackreq_default(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev, bool ackreq)
+{
+ int ret;
+
+ trace_802154_rdev_set_ackreq_default(&rdev->wpan_phy, wpan_dev,
+ ackreq);
+ ret = rdev->ops->set_ackreq_default(&rdev->wpan_phy, wpan_dev, ackreq);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
+}
+
#endif /* __CFG802154_RDEV_OPS */
WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->mode))
);
+TRACE_EVENT(802154_rdev_set_ackreq_default,
+ TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+ bool ackreq),
+ TP_ARGS(wpan_phy, wpan_dev, ackreq),
+ TP_STRUCT__entry(
+ WPAN_PHY_ENTRY
+ WPAN_DEV_ENTRY
+ __field(bool, ackreq)
+ ),
+ TP_fast_assign(
+ WPAN_PHY_ASSIGN;
+ WPAN_DEV_ASSIGN;
+ __entry->ackreq = ackreq;
+ ),
+ TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT
+ ", ackreq default: %s", WPAN_PHY_PR_ARG,
+ WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->ackreq))
+);
+
TRACE_EVENT(802154_rdev_return_int,
TP_PROTO(struct wpan_phy *wpan_phy, int ret),
TP_ARGS(wpan_phy, ret),
goto out;
}
- if (sk->sk_bound_dev_if) {
- struct net_device *dev;
-
- rcu_read_lock();
- dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
- if (dev)
- tb_id = vrf_dev_table_rcu(dev) ? : tb_id;
- rcu_read_unlock();
- }
+ tb_id = vrf_dev_table_ifindex(net, sk->sk_bound_dev_if) ? : tb_id;
chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
/* Not specified by any standard per-se, however it breaks too
#endif
-int fib_encap_match(struct net *net, u16 encap_type,
- struct nlattr *encap,
- int oif, const struct fib_nh *nh)
+static int fib_encap_match(struct net *net, u16 encap_type,
+ struct nlattr *encap,
+ int oif, const struct fib_nh *nh)
{
struct lwtunnel_state *lwtstate;
struct net_device *dev = NULL;
- int ret;
+ int ret, result = 0;
if (encap_type == LWTUNNEL_ENCAP_NONE)
return 0;
dev = __dev_get_by_index(net, oif);
ret = lwtunnel_build_state(dev, encap_type,
encap, &lwtstate);
- if (!ret)
- return lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate);
+ if (!ret) {
+ result = lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate);
+ lwtstate_free(lwtstate);
+ }
- return 0;
+ return result;
}
int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
if (tbl)
err = fib_table_lookup(tbl, &fl4, &res,
- FIB_LOOKUP_IGNORE_LINKSTATE);
- else
+ FIB_LOOKUP_IGNORE_LINKSTATE |
+ FIB_LOOKUP_NOREF);
+
+ /* on error or if no table given do full lookup. This
+ * is needed for example when nexthops are in the local
+ * table rather than the given table
+ */
+ if (!tbl || err) {
err = fib_lookup(net, &fl4, &res,
FIB_LOOKUP_IGNORE_LINKSTATE);
+ }
+
if (err) {
rcu_read_unlock();
return err;
key = l->key + 1;
iter->pos++;
- if (pos-- <= 0)
+ if (--pos <= 0)
break;
l = NULL;
__be16 *pd = data;
size_t start = ntohs(pd[0]);
size_t offset = ntohs(pd[1]);
- size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
+ size_t plen = sizeof(struct udphdr) + hdrlen +
+ max_t(size_t, offset + sizeof(u16), start);
+
+ if (skb->remcsum_offload)
+ return guehdr;
if (!pskb_may_pull(skb, plen))
return NULL;
static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
struct guehdr *guehdr, void *data,
- size_t hdrlen, u8 ipproto,
- struct gro_remcsum *grc, bool nopartial)
+ size_t hdrlen, struct gro_remcsum *grc,
+ bool nopartial)
{
__be16 *pd = data;
size_t start = ntohs(pd[0]);
size_t offset = ntohs(pd[1]);
- size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
if (skb->remcsum_offload)
- return NULL;
+ return guehdr;
if (!NAPI_GRO_CB(skb)->csum_valid)
return NULL;
- /* Pull checksum that will be written */
- if (skb_gro_header_hard(skb, off + plen)) {
- guehdr = skb_gro_header_slow(skb, off + plen, off);
- if (!guehdr)
- return NULL;
- }
-
- skb_gro_remcsum_process(skb, (void *)guehdr + hdrlen,
- start, offset, grc, nopartial);
+ guehdr = skb_gro_remcsum_process(skb, (void *)guehdr, off, hdrlen,
+ start, offset, grc, nopartial);
skb->remcsum_offload = 1;
if (flags & GUE_PFLAG_REMCSUM) {
guehdr = gue_gro_remcsum(skb, off, guehdr,
- data + doffset, hdrlen,
- guehdr->proto_ctype, &grc,
+ data + doffset, hdrlen, &grc,
!!(fou->flags &
FOU_F_REMCSUM_NOPARTIAL));
+
if (!guehdr)
goto out;
rcu_read_lock();
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
ops = rcu_dereference(offloads[guehdr->proto_ctype]);
- if (WARN_ON(!ops || !ops->callbacks.gro_receive))
+ if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
goto out_unlock;
pp = ops->callbacks.gro_receive(head, skb);
fl4.flowi4_mark = mark;
fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
fl4.flowi4_proto = IPPROTO_ICMP;
- fl4.flowi4_oif = vrf_master_ifindex_rcu(skb->dev) ? : skb->dev->ifindex;
+ fl4.flowi4_oif = vrf_master_ifindex(skb->dev) ? : skb->dev->ifindex;
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
fl4->flowi4_proto = IPPROTO_ICMP;
fl4->fl4_icmp_type = type;
fl4->fl4_icmp_code = code;
- fl4->flowi4_oif = vrf_master_ifindex_rcu(skb_in->dev) ? : skb_in->dev->ifindex;
+ fl4->flowi4_oif = vrf_master_ifindex(skb_in->dev) ? : skb_in->dev->ifindex;
security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
rt = __ip_route_output_key(net, fl4);
struct sk_buff *skb_chk;
unsigned int transport_len;
unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
- int ret;
+ int ret = -EINVAL;
transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
- skb_get(skb);
skb_chk = skb_checksum_trimmed(skb, transport_len,
ip_mc_validate_checksum);
if (!skb_chk)
- return -EINVAL;
+ goto err;
- if (!pskb_may_pull(skb_chk, len)) {
- kfree_skb(skb_chk);
- return -EINVAL;
- }
+ if (!pskb_may_pull(skb_chk, len))
+ goto err;
ret = ip_mc_check_igmp_msg(skb_chk);
- if (ret) {
- kfree_skb(skb_chk);
- return ret;
- }
+ if (ret)
+ goto err;
if (skb_trimmed)
*skb_trimmed = skb_chk;
- else
+ /* free now unneeded clone */
+ else if (skb_chk != skb)
kfree_skb(skb_chk);
- return 0;
+ ret = 0;
+
+err:
+ if (ret && skb_chk && skb_chk != skb)
+ kfree_skb(skb_chk);
+
+ return ret;
}
/**
* @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional)
*
* Checks whether an IPv4 packet is a valid IGMP packet. If so sets
- * skb network and transport headers accordingly and returns zero.
+ * skb transport header accordingly and returns zero.
*
* -EINVAL: A broken packet was detected, i.e. it violates some internet
* standard
* to leave the original skb and its full frame unchanged (which might be
* desirable for layer 2 frame jugglers).
*
- * The caller needs to release a reference count from any returned skb_trimmed.
+ * Caller needs to set the skb network header and free any returned skb if it
+ * differs from the provided skb.
*/
int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
{
}
spin_unlock(&queue->syn_wait_lock);
- if (del_timer_sync(&req->rsk_timer))
+ if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
reqsk_put(req);
return found;
}
return PACKET_REJECT;
info = &tun_dst->u.tun_info;
- info->key.ipv4_src = iph->saddr;
- info->key.ipv4_dst = iph->daddr;
- info->key.ipv4_tos = iph->tos;
- info->key.ipv4_ttl = iph->ttl;
+ info->key.u.ipv4.src = iph->saddr;
+ info->key.u.ipv4.dst = iph->daddr;
+ info->key.tos = iph->tos;
+ info->key.ttl = iph->ttl;
info->mode = IP_TUNNEL_INFO_RX;
info->key.tun_flags = tpi->flags &
__be16 df, flags;
int err;
- tun_info = skb_tunnel_info(skb, AF_INET);
+ tun_info = skb_tunnel_info(skb);
if (unlikely(!tun_info || tun_info->mode != IP_TUNNEL_INFO_TX))
goto err_free_skb;
key = &tun_info->key;
memset(&fl, 0, sizeof(fl));
- fl.daddr = key->ipv4_dst;
- fl.saddr = key->ipv4_src;
- fl.flowi4_tos = RT_TOS(key->ipv4_tos);
+ fl.daddr = key->u.ipv4.dst;
+ fl.saddr = key->u.ipv4.src;
+ fl.flowi4_tos = RT_TOS(key->tos);
fl.flowi4_mark = skb->mark;
fl.flowi4_proto = IPPROTO_GRE;
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
err = iptunnel_xmit(skb->sk, rt, skb, fl.saddr,
- key->ipv4_dst, IPPROTO_GRE,
- key->ipv4_tos, key->ipv4_ttl, df, false);
+ key->u.ipv4.dst, IPPROTO_GRE,
+ key->tos, key->ttl, df, false);
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
return;
}
EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
-static const struct nla_policy ip_tun_policy[IP_TUN_MAX + 1] = {
- [IP_TUN_ID] = { .type = NLA_U64 },
- [IP_TUN_DST] = { .type = NLA_U32 },
- [IP_TUN_SRC] = { .type = NLA_U32 },
- [IP_TUN_TTL] = { .type = NLA_U8 },
- [IP_TUN_TOS] = { .type = NLA_U8 },
- [IP_TUN_SPORT] = { .type = NLA_U16 },
- [IP_TUN_DPORT] = { .type = NLA_U16 },
- [IP_TUN_FLAGS] = { .type = NLA_U16 },
+static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = {
+ [LWTUNNEL_IP_ID] = { .type = NLA_U64 },
+ [LWTUNNEL_IP_DST] = { .type = NLA_U32 },
+ [LWTUNNEL_IP_SRC] = { .type = NLA_U32 },
+ [LWTUNNEL_IP_TTL] = { .type = NLA_U8 },
+ [LWTUNNEL_IP_TOS] = { .type = NLA_U8 },
+ [LWTUNNEL_IP_SPORT] = { .type = NLA_U16 },
+ [LWTUNNEL_IP_DPORT] = { .type = NLA_U16 },
+ [LWTUNNEL_IP_FLAGS] = { .type = NLA_U16 },
};
static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
{
struct ip_tunnel_info *tun_info;
struct lwtunnel_state *new_state;
- struct nlattr *tb[IP_TUN_MAX + 1];
+ struct nlattr *tb[LWTUNNEL_IP_MAX + 1];
int err;
- err = nla_parse_nested(tb, IP_TUN_MAX, attr, ip_tun_policy);
+ err = nla_parse_nested(tb, LWTUNNEL_IP_MAX, attr, ip_tun_policy);
if (err < 0)
return err;
tun_info = lwt_tun_info(new_state);
- if (tb[IP_TUN_ID])
- tun_info->key.tun_id = nla_get_u64(tb[IP_TUN_ID]);
+ if (tb[LWTUNNEL_IP_ID])
+ tun_info->key.tun_id = nla_get_u64(tb[LWTUNNEL_IP_ID]);
- if (tb[IP_TUN_DST])
- tun_info->key.ipv4_dst = nla_get_be32(tb[IP_TUN_DST]);
+ if (tb[LWTUNNEL_IP_DST])
+ tun_info->key.u.ipv4.dst = nla_get_be32(tb[LWTUNNEL_IP_DST]);
- if (tb[IP_TUN_SRC])
- tun_info->key.ipv4_src = nla_get_be32(tb[IP_TUN_SRC]);
+ if (tb[LWTUNNEL_IP_SRC])
+ tun_info->key.u.ipv4.src = nla_get_be32(tb[LWTUNNEL_IP_SRC]);
- if (tb[IP_TUN_TTL])
- tun_info->key.ipv4_ttl = nla_get_u8(tb[IP_TUN_TTL]);
+ if (tb[LWTUNNEL_IP_TTL])
+ tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]);
- if (tb[IP_TUN_TOS])
- tun_info->key.ipv4_tos = nla_get_u8(tb[IP_TUN_TOS]);
+ if (tb[LWTUNNEL_IP_TOS])
+ tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);
- if (tb[IP_TUN_SPORT])
- tun_info->key.tp_src = nla_get_be16(tb[IP_TUN_SPORT]);
+ if (tb[LWTUNNEL_IP_SPORT])
+ tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP_SPORT]);
- if (tb[IP_TUN_DPORT])
- tun_info->key.tp_dst = nla_get_be16(tb[IP_TUN_DPORT]);
+ if (tb[LWTUNNEL_IP_DPORT])
+ tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP_DPORT]);
- if (tb[IP_TUN_FLAGS])
- tun_info->key.tun_flags = nla_get_u16(tb[IP_TUN_FLAGS]);
+ if (tb[LWTUNNEL_IP_FLAGS])
+ tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP_FLAGS]);
tun_info->mode = IP_TUNNEL_INFO_TX;
tun_info->options = NULL;
{
struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
- if (nla_put_u64(skb, IP_TUN_ID, tun_info->key.tun_id) ||
- nla_put_be32(skb, IP_TUN_DST, tun_info->key.ipv4_dst) ||
- nla_put_be32(skb, IP_TUN_SRC, tun_info->key.ipv4_src) ||
- nla_put_u8(skb, IP_TUN_TOS, tun_info->key.ipv4_tos) ||
- nla_put_u8(skb, IP_TUN_TTL, tun_info->key.ipv4_ttl) ||
- nla_put_u16(skb, IP_TUN_SPORT, tun_info->key.tp_src) ||
- nla_put_u16(skb, IP_TUN_DPORT, tun_info->key.tp_dst) ||
- nla_put_u16(skb, IP_TUN_FLAGS, tun_info->key.tun_flags))
+ if (nla_put_u64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id) ||
+ nla_put_be32(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) ||
+ nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
+ nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
+ nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) ||
+ nla_put_u16(skb, LWTUNNEL_IP_SPORT, tun_info->key.tp_src) ||
+ nla_put_u16(skb, LWTUNNEL_IP_DPORT, tun_info->key.tp_dst) ||
+ nla_put_u16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags))
return -ENOMEM;
return 0;
static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
{
- return nla_total_size(8) /* IP_TUN_ID */
- + nla_total_size(4) /* IP_TUN_DST */
- + nla_total_size(4) /* IP_TUN_SRC */
- + nla_total_size(1) /* IP_TUN_TOS */
- + nla_total_size(1) /* IP_TUN_TTL */
- + nla_total_size(2) /* IP_TUN_SPORT */
- + nla_total_size(2) /* IP_TUN_DPORT */
- + nla_total_size(2); /* IP_TUN_FLAGS */
+ return nla_total_size(8) /* LWTUNNEL_IP_ID */
+ + nla_total_size(4) /* LWTUNNEL_IP_DST */
+ + nla_total_size(4) /* LWTUNNEL_IP_SRC */
+ + nla_total_size(1) /* LWTUNNEL_IP_TOS */
+ + nla_total_size(1) /* LWTUNNEL_IP_TTL */
+ + nla_total_size(2) /* LWTUNNEL_IP_SPORT */
+ + nla_total_size(2) /* LWTUNNEL_IP_DPORT */
+ + nla_total_size(2); /* LWTUNNEL_IP_FLAGS */
+}
+
+static int ip_tun_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+ return memcmp(lwt_tun_info(a), lwt_tun_info(b),
+ sizeof(struct ip_tunnel_info));
}
static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
.build_state = ip_tun_build_state,
.fill_encap = ip_tun_fill_encap_info,
.get_encap_size = ip_tun_encap_nlsize,
+ .cmp_encap = ip_tun_cmp_encap,
+};
+
+static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
+ [LWTUNNEL_IP6_ID] = { .type = NLA_U64 },
+ [LWTUNNEL_IP6_DST] = { .len = sizeof(struct in6_addr) },
+ [LWTUNNEL_IP6_SRC] = { .len = sizeof(struct in6_addr) },
+ [LWTUNNEL_IP6_HOPLIMIT] = { .type = NLA_U8 },
+ [LWTUNNEL_IP6_TC] = { .type = NLA_U8 },
+ [LWTUNNEL_IP6_SPORT] = { .type = NLA_U16 },
+ [LWTUNNEL_IP6_DPORT] = { .type = NLA_U16 },
+ [LWTUNNEL_IP6_FLAGS] = { .type = NLA_U16 },
+};
+
+static int ip6_tun_build_state(struct net_device *dev, struct nlattr *attr,
+ struct lwtunnel_state **ts)
+{
+ struct ip_tunnel_info *tun_info;
+ struct lwtunnel_state *new_state;
+ struct nlattr *tb[LWTUNNEL_IP6_MAX + 1];
+ int err;
+
+ err = nla_parse_nested(tb, LWTUNNEL_IP6_MAX, attr, ip6_tun_policy);
+ if (err < 0)
+ return err;
+
+ new_state = lwtunnel_state_alloc(sizeof(*tun_info));
+ if (!new_state)
+ return -ENOMEM;
+
+ new_state->type = LWTUNNEL_ENCAP_IP6;
+
+ tun_info = lwt_tun_info(new_state);
+
+ if (tb[LWTUNNEL_IP6_ID])
+ tun_info->key.tun_id = nla_get_u64(tb[LWTUNNEL_IP6_ID]);
+
+ if (tb[LWTUNNEL_IP6_DST])
+ tun_info->key.u.ipv6.dst = nla_get_in6_addr(tb[LWTUNNEL_IP6_DST]);
+
+ if (tb[LWTUNNEL_IP6_SRC])
+ tun_info->key.u.ipv6.src = nla_get_in6_addr(tb[LWTUNNEL_IP6_SRC]);
+
+ if (tb[LWTUNNEL_IP6_HOPLIMIT])
+ tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP6_HOPLIMIT]);
+
+ if (tb[LWTUNNEL_IP6_TC])
+ tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]);
+
+ if (tb[LWTUNNEL_IP6_SPORT])
+ tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP6_SPORT]);
+
+ if (tb[LWTUNNEL_IP6_DPORT])
+ tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP6_DPORT]);
+
+ if (tb[LWTUNNEL_IP6_FLAGS])
+ tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP6_FLAGS]);
+
+ tun_info->mode = IP_TUNNEL_INFO_TX;
+ tun_info->options = NULL;
+ tun_info->options_len = 0;
+
+ *ts = new_state;
+
+ return 0;
+}
+
+static int ip6_tun_fill_encap_info(struct sk_buff *skb,
+ struct lwtunnel_state *lwtstate)
+{
+ struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
+
+ if (nla_put_u64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id) ||
+ nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) ||
+ nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
+ nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) ||
+ nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) ||
+ nla_put_u16(skb, LWTUNNEL_IP6_SPORT, tun_info->key.tp_src) ||
+ nla_put_u16(skb, LWTUNNEL_IP6_DPORT, tun_info->key.tp_dst) ||
+ nla_put_u16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+ return nla_total_size(8) /* LWTUNNEL_IP6_ID */
+ + nla_total_size(16) /* LWTUNNEL_IP6_DST */
+ + nla_total_size(16) /* LWTUNNEL_IP6_SRC */
+ + nla_total_size(1) /* LWTUNNEL_IP6_HOPLIMIT */
+ + nla_total_size(1) /* LWTUNNEL_IP6_TC */
+ + nla_total_size(2) /* LWTUNNEL_IP6_SPORT */
+ + nla_total_size(2) /* LWTUNNEL_IP6_DPORT */
+ + nla_total_size(2); /* LWTUNNEL_IP6_FLAGS */
+}
+
+static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
+ .build_state = ip6_tun_build_state,
+ .fill_encap = ip6_tun_fill_encap_info,
+ .get_encap_size = ip6_tun_encap_nlsize,
+ .cmp_encap = ip_tun_cmp_encap,
};
void __init ip_tunnel_core_init(void)
{
lwtunnel_encap_add_ops(&ip_tun_lwt_ops, LWTUNNEL_ENCAP_IP);
+ lwtunnel_encap_add_ops(&ip6_tun_lwt_ops, LWTUNNEL_ENCAP_IP6);
}
struct static_key ip_tunnel_metadata_cnt = STATIC_KEY_INIT_FALSE;
default NFT_REJECT
tristate
+config NFT_DUP_IPV4
+ tristate "IPv4 nf_tables packet duplication support"
+ select NF_DUP_IPV4
+ help
+ This module enables IPv4 packet duplication support for nf_tables.
+
endif # NF_TABLES_IPV4
config NF_TABLES_ARP
endif # NF_TABLES
+config NF_DUP_IPV4
+ tristate "Netfilter IPv4 packet duplication to alternate destination"
+ help
+ This option enables the nf_dup_ipv4 core, which duplicates an IPv4
+ packet to be rerouted to another destination.
+
config NF_LOG_ARP
tristate "ARP packet logging"
default m if NETFILTER_ADVANCED=n
obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
obj-$(CONFIG_NFT_MASQ_IPV4) += nft_masq_ipv4.o
obj-$(CONFIG_NFT_REDIR_IPV4) += nft_redir_ipv4.o
+obj-$(CONFIG_NFT_DUP_IPV4) += nft_dup_ipv4.o
obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o
# generic IP tables
# just filtering instance of ARP tables for now
obj-$(CONFIG_IP_NF_ARPFILTER) += arptable_filter.o
+
+obj-$(CONFIG_NF_DUP_IPV4) += nf_dup_ipv4.o
tcph->cwr = einfo->proto.tcp.cwr;
inet_proto_csum_replace2(&tcph->check, skb,
- oldval, ((__be16 *)tcph)[6], 0);
+ oldval, ((__be16 *)tcph)[6], false);
return true;
}
return -EINVAL;
}
- h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple);
+ h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
if (h) {
struct sockaddr_in sin;
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
struct nf_conntrack_tuple innertuple, origtuple;
const struct nf_conntrack_l4proto *innerproto;
const struct nf_conntrack_tuple_hash *h;
- u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
+ const struct nf_conntrack_zone *zone;
+ struct nf_conntrack_zone tmp;
NF_CT_ASSERT(skb->nfct == NULL);
+ zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
/* Are they talking about one of our connections? */
if (!nf_ct_get_tuplepr(skb,
static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
struct sk_buff *skb)
{
- u16 zone = NF_CT_DEFAULT_ZONE;
-
+ u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
- if (skb->nfct)
- zone = nf_ct_zone((struct nf_conn *)skb->nfct);
+ if (skb->nfct) {
+ enum ip_conntrack_info ctinfo;
+ const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+
+ zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo));
+ }
#endif
if (nf_bridge_in_prerouting(skb))
- return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
+ return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id;
if (hooknum == NF_INET_PRE_ROUTING)
- return IP_DEFRAG_CONNTRACK_IN + zone;
+ return IP_DEFRAG_CONNTRACK_IN + zone_id;
else
- return IP_DEFRAG_CONNTRACK_OUT + zone;
+ return IP_DEFRAG_CONNTRACK_OUT + zone_id;
}
static unsigned int ipv4_conntrack_defrag(const struct nf_hook_ops *ops,
--- /dev/null
+/*
+ * (C) 2007 by Sebastian Claßen <sebastian.classen@freenet.ag>
+ * (C) 2007-2010 by Jan Engelhardt <jengelh@medozas.de>
+ *
+ * Extracted from xt_TEE.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 or later, as
+ * published by the Free Software Foundation.
+ */
+#include <linux/ip.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/route.h>
+#include <linux/skbuff.h>
+#include <net/checksum.h>
+#include <net/icmp.h>
+#include <net/ip.h>
+#include <net/route.h>
+#include <net/netfilter/ipv4/nf_dup_ipv4.h>
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
+static struct net *pick_net(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+ const struct dst_entry *dst;
+
+ if (skb->dev != NULL)
+ return dev_net(skb->dev);
+ dst = skb_dst(skb);
+ if (dst != NULL && dst->dev != NULL)
+ return dev_net(dst->dev);
+#endif
+ return &init_net;
+}
+
+static bool nf_dup_ipv4_route(struct sk_buff *skb, const struct in_addr *gw,
+ int oif)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ struct net *net = pick_net(skb);
+ struct rtable *rt;
+ struct flowi4 fl4;
+
+ memset(&fl4, 0, sizeof(fl4));
+ if (oif != -1)
+ fl4.flowi4_oif = oif;
+
+ fl4.daddr = gw->s_addr;
+ fl4.flowi4_tos = RT_TOS(iph->tos);
+ fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
+ fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
+ rt = ip_route_output_key(net, &fl4);
+ if (IS_ERR(rt))
+ return false;
+
+ skb_dst_drop(skb);
+ skb_dst_set(skb, &rt->dst);
+ skb->dev = rt->dst.dev;
+ skb->protocol = htons(ETH_P_IP);
+
+ return true;
+}
+
+void nf_dup_ipv4(struct sk_buff *skb, unsigned int hooknum,
+ const struct in_addr *gw, int oif)
+{
+ struct iphdr *iph;
+
+ if (this_cpu_read(nf_skb_duplicated))
+ return;
+ /*
+ * Copy the skb, and route the copy. Will later return %XT_CONTINUE for
+ * the original skb, which should continue on its way as if nothing has
+ * happened. The copy should be independently delivered to the gateway.
+ */
+ skb = pskb_copy(skb, GFP_ATOMIC);
+ if (skb == NULL)
+ return;
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ /* Avoid counting cloned packets towards the original connection. */
+ nf_conntrack_put(skb->nfct);
+ skb->nfct = &nf_ct_untracked_get()->ct_general;
+ skb->nfctinfo = IP_CT_NEW;
+ nf_conntrack_get(skb->nfct);
+#endif
+ /*
+ * If we are in PREROUTING/INPUT, the checksum must be recalculated
+ * since the length could have changed as a result of defragmentation.
+ *
+ * We also decrease the TTL to mitigate potential loops between two
+ * hosts.
+ *
+ * Set %IP_DF so that the original source is notified of a potentially
+ * decreased MTU on the clone route. IPv6 does this too.
+ */
+ iph = ip_hdr(skb);
+ iph->frag_off |= htons(IP_DF);
+ if (hooknum == NF_INET_PRE_ROUTING ||
+ hooknum == NF_INET_LOCAL_IN)
+ --iph->ttl;
+ ip_send_check(iph);
+
+ if (nf_dup_ipv4_route(skb, gw, oif)) {
+ __this_cpu_write(nf_skb_duplicated, true);
+ ip_local_out(skb);
+ __this_cpu_write(nf_skb_duplicated, false);
+ } else {
+ kfree_skb(skb);
+ }
+}
+EXPORT_SYMBOL_GPL(nf_dup_ipv4);
+
+MODULE_AUTHOR("Sebastian Claßen <sebastian.classen@freenet.ag>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_DESCRIPTION("nf_dup_ipv4: Duplicate IPv4 packet");
+MODULE_LICENSE("GPL");
oldip = iph->daddr;
newip = t->dst.u3.ip;
}
- inet_proto_csum_replace4(check, skb, oldip, newip, 1);
+ inet_proto_csum_replace4(check, skb, oldip, newip, true);
}
static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
}
} else
inet_proto_csum_replace2(check, skb,
- htons(oldlen), htons(datalen), 1);
+ htons(oldlen), htons(datalen), true);
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
hdr = (struct icmphdr *)(skb->data + hdroff);
inet_proto_csum_replace2(&hdr->checksum, skb,
- hdr->un.echo.id, tuple->src.u.icmp.id, 0);
+ hdr->un.echo.id, tuple->src.u.icmp.id, false);
hdr->un.echo.id = tuple->src.u.icmp.id;
return true;
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/ipv4/nf_dup_ipv4.h>
+
+struct nft_dup_ipv4 {
+ enum nft_registers sreg_addr:8;
+ enum nft_registers sreg_dev:8;
+};
+
+static void nft_dup_ipv4_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
+ struct in_addr gw = {
+ .s_addr = regs->data[priv->sreg_addr],
+ };
+ int oif = regs->data[priv->sreg_dev];
+
+ nf_dup_ipv4(pkt->skb, pkt->ops->hooknum, &gw, oif);
+}
+
+static int nft_dup_ipv4_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
+ int err;
+
+ if (tb[NFTA_DUP_SREG_ADDR] == NULL)
+ return -EINVAL;
+
+ priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]);
+ err = nft_validate_register_load(priv->sreg_addr, sizeof(struct in_addr));
+ if (err < 0)
+ return err;
+
+ if (tb[NFTA_DUP_SREG_DEV] != NULL) {
+ priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
+ return nft_validate_register_load(priv->sreg_dev, sizeof(int));
+ }
+ return 0;
+}
+
+static int nft_dup_ipv4_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
+
+ if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) ||
+ nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_dup_ipv4_type;
+static const struct nft_expr_ops nft_dup_ipv4_ops = {
+ .type = &nft_dup_ipv4_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_dup_ipv4)),
+ .eval = nft_dup_ipv4_eval,
+ .init = nft_dup_ipv4_init,
+ .dump = nft_dup_ipv4_dump,
+};
+
+static const struct nla_policy nft_dup_ipv4_policy[NFTA_DUP_MAX + 1] = {
+ [NFTA_DUP_SREG_ADDR] = { .type = NLA_U32 },
+ [NFTA_DUP_SREG_DEV] = { .type = NLA_U32 },
+};
+
+static struct nft_expr_type nft_dup_ipv4_type __read_mostly = {
+ .family = NFPROTO_IPV4,
+ .name = "dup",
+ .ops = &nft_dup_ipv4_ops,
+ .policy = nft_dup_ipv4_policy,
+ .maxattr = NFTA_DUP_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_dup_ipv4_module_init(void)
+{
+ return nft_register_expr(&nft_dup_ipv4_type);
+}
+
+static void __exit nft_dup_ipv4_module_exit(void)
+{
+ nft_unregister_expr(&nft_dup_ipv4_type);
+}
+
+module_init(nft_dup_ipv4_module_init);
+module_exit(nft_dup_ipv4_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "dup");
list_del(&rt->rt_uncached);
spin_unlock_bh(&ul->lock);
}
- lwtstate_put(rt->rt_lwtstate);
}
void rt_flush_dev(struct net_device *dev)
#ifdef CONFIG_IP_ROUTE_CLASSID
rt->dst.tclassid = nh->nh_tclassid;
#endif
- rt->rt_lwtstate = lwtstate_get(nh->nh_lwtstate);
+ rt->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
if (unlikely(fnhe))
cached = rt_bind_exception(rt, fnhe, daddr);
else if (!(rt->dst.flags & DST_NOCACHE))
rth->rt_gateway = 0;
rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached);
- rth->rt_lwtstate = NULL;
if (our) {
rth->dst.input= ip_local_deliver;
rth->rt_flags |= RTCF_LOCAL;
rth->rt_gateway = 0;
rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached);
- rth->rt_lwtstate = NULL;
RT_CACHE_STAT_INC(in_slow_tot);
rth->dst.input = ip_forward;
rth->dst.output = ip_output;
rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag);
- if (lwtunnel_output_redirect(rth->rt_lwtstate))
+ if (lwtunnel_output_redirect(rth->dst.lwtstate)) {
+ rth->dst.lwtstate->orig_output = rth->dst.output;
rth->dst.output = lwtunnel_output;
+ }
+ if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
+ rth->dst.lwtstate->orig_input = rth->dst.input;
+ rth->dst.input = lwtunnel_input;
+ }
skb_dst_set(skb, &rth->dst);
out:
err = 0;
by fib_lookup.
*/
- tun_info = skb_tunnel_info(skb, AF_INET);
+ tun_info = skb_tunnel_info(skb);
if (tun_info && tun_info->mode == IP_TUNNEL_INFO_RX)
fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
else
rth->rt_gateway = 0;
rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached);
- rth->rt_lwtstate = NULL;
RT_CACHE_STAT_INC(in_slow_tot);
if (res.type == RTN_UNREACHABLE) {
rth->rt_gateway = 0;
rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached);
- rth->rt_lwtstate = NULL;
RT_CACHE_STAT_INC(out_slow_tot);
if (flags & RTCF_LOCAL)
}
rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
- if (lwtunnel_output_redirect(rth->rt_lwtstate))
+ if (lwtunnel_output_redirect(rth->dst.lwtstate))
rth->dst.output = lwtunnel_output;
return rth;
rt->rt_uses_gateway = ort->rt_uses_gateway;
INIT_LIST_HEAD(&rt->rt_uncached);
- rt->rt_lwtstate = NULL;
dst_free(new);
}
static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
-static int min_sndbuf = SOCK_MIN_SNDBUF;
-static int min_rcvbuf = SOCK_MIN_RCVBUF;
/* Update system visible IP port range */
static void set_local_port_range(struct net *net, int range[2])
.maxlen = sizeof(sysctl_tcp_wmem),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &min_sndbuf,
+ .extra1 = &one,
},
{
.procname = "tcp_notsent_lowat",
.maxlen = sizeof(sysctl_tcp_rmem),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &min_rcvbuf,
+ .extra1 = &one,
},
{
.procname = "tcp_app_win",
.maxlen = sizeof(sysctl_udp_rmem_min),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &min_rcvbuf,
+ .extra1 = &one
},
{
.procname = "udp_wmem_min",
.maxlen = sizeof(sysctl_udp_wmem_min),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &min_sndbuf,
+ .extra1 = &one
},
{ }
};
static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
- int tos,
+ int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr)
{
memset(fl4, 0, sizeof(*fl4));
fl4->daddr = daddr->a4;
fl4->flowi4_tos = tos;
+ fl4->flowi4_oif = oif;
if (saddr)
fl4->saddr = saddr->a4;
return ERR_CAST(rt);
}
-static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
+static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr)
{
struct flowi4 fl4;
- return __xfrm4_dst_lookup(net, &fl4, tos, saddr, daddr);
+ return __xfrm4_dst_lookup(net, &fl4, tos, oif, saddr, daddr);
}
-static int xfrm4_get_saddr(struct net *net,
+static int xfrm4_get_saddr(struct net *net, int oif,
xfrm_address_t *saddr, xfrm_address_t *daddr)
{
struct dst_entry *dst;
struct flowi4 fl4;
- dst = __xfrm4_dst_lookup(net, &fl4, 0, NULL, daddr);
+ dst = __xfrm4_dst_lookup(net, &fl4, 0, oif, NULL, daddr);
if (IS_ERR(dst))
return -EHOSTUNREACH;
If unsure, say N.
+config IPV6_ILA
+ tristate "IPv6: Identifier Locator Addressing (ILA)"
+ select LWTUNNEL
+ ---help---
+ Support for IPv6 Identifier Locator Addressing (ILA).
+
+ ILA is a mechanism to do network virtualization without
+ encapsulation. The basic concept of ILA is that we split an
+ IPv6 address into a 64 bit locator and 64 bit identifier. The
+ identifier is the identity of an entity in communication
+ ("who") and the locator expresses the location of the
+ entity ("where").
+
+ ILA can be configured using the "encap ila" option with
+ "ip -6 route" command. ILA is described in
+ https://tools.ietf.org/html/draft-herbert-nvo3-ila-00.
+
+ If unsure, say N.
+
config INET6_XFRM_TUNNEL
tristate
select INET6_TUNNEL
obj-$(CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION) += xfrm6_mode_ro.o
obj-$(CONFIG_INET6_XFRM_MODE_BEET) += xfrm6_mode_beet.o
obj-$(CONFIG_IPV6_MIP6) += mip6.o
+obj-$(CONFIG_IPV6_ILA) += ila.o
obj-$(CONFIG_NETFILTER) += netfilter/
obj-$(CONFIG_IPV6_VTI) += ip6_vti.o
/* send a neighbour solicitation for our addr */
addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
- ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any);
+ ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any, NULL);
out:
in6_ifa_put(ifp);
rtnl_unlock();
--- /dev/null
+#include <linux/errno.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/types.h>
+#include <net/checksum.h>
+#include <net/ip.h>
+#include <net/ip6_fib.h>
+#include <net/lwtunnel.h>
+#include <net/protocol.h>
+#include <uapi/linux/ila.h>
+
+struct ila_params {
+ __be64 locator;
+};
+
+static inline struct ila_params *ila_params_lwtunnel(
+ struct lwtunnel_state *lwstate)
+{
+ return (struct ila_params *)lwstate->data;
+}
+
+static inline __wsum compute_csum_diff8(const __be32 *from, const __be32 *to)
+{
+ __be32 diff[] = {
+ ~from[0], ~from[1], to[0], to[1],
+ };
+
+ return csum_partial(diff, sizeof(diff), 0);
+}
+
+static inline __wsum get_csum_diff(struct ipv6hdr *ip6h, struct ila_params *p)
+{
+ return compute_csum_diff8((__be32 *)&ip6h->daddr,
+ (__be32 *)&p->locator);
+}
+
+static void update_ipv6_locator(struct sk_buff *skb, struct ila_params *p)
+{
+ __wsum diff;
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ size_t nhoff = sizeof(struct ipv6hdr);
+
+ /* First update checksum */
+ switch (ip6h->nexthdr) {
+ case NEXTHDR_TCP:
+ if (likely(pskb_may_pull(skb, nhoff + sizeof(struct tcphdr)))) {
+ struct tcphdr *th = (struct tcphdr *)
+ (skb_network_header(skb) + nhoff);
+
+ diff = get_csum_diff(ip6h, p);
+ inet_proto_csum_replace_by_diff(&th->check, skb,
+ diff, true);
+ }
+ break;
+ case NEXTHDR_UDP:
+ if (likely(pskb_may_pull(skb, nhoff + sizeof(struct udphdr)))) {
+ struct udphdr *uh = (struct udphdr *)
+ (skb_network_header(skb) + nhoff);
+
+ if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+ diff = get_csum_diff(ip6h, p);
+ inet_proto_csum_replace_by_diff(&uh->check, skb,
+ diff, true);
+ if (!uh->check)
+ uh->check = CSUM_MANGLED_0;
+ }
+ }
+ break;
+ case NEXTHDR_ICMP:
+ if (likely(pskb_may_pull(skb,
+ nhoff + sizeof(struct icmp6hdr)))) {
+ struct icmp6hdr *ih = (struct icmp6hdr *)
+ (skb_network_header(skb) + nhoff);
+
+ diff = get_csum_diff(ip6h, p);
+ inet_proto_csum_replace_by_diff(&ih->icmp6_cksum, skb,
+ diff, true);
+ }
+ break;
+ }
+
+ /* Now change destination address */
+ *(__be64 *)&ip6h->daddr = p->locator;
+}
+
+static int ila_output(struct sock *sk, struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+
+ if (skb->protocol != htons(ETH_P_IPV6))
+ goto drop;
+
+ update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
+
+ return dst->lwtstate->orig_output(sk, skb);
+
+drop:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+static int ila_input(struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+
+ if (skb->protocol != htons(ETH_P_IPV6))
+ goto drop;
+
+ update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
+
+ return dst->lwtstate->orig_input(skb);
+
+drop:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+static struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = {
+ [ILA_ATTR_LOCATOR] = { .type = NLA_U64, },
+};
+
+static int ila_build_state(struct net_device *dev, struct nlattr *nla,
+ struct lwtunnel_state **ts)
+{
+ struct ila_params *p;
+ struct nlattr *tb[ILA_ATTR_MAX + 1];
+ size_t encap_len = sizeof(*p);
+ struct lwtunnel_state *newts;
+ int ret;
+
+ ret = nla_parse_nested(tb, ILA_ATTR_MAX, nla,
+ ila_nl_policy);
+ if (ret < 0)
+ return ret;
+
+ if (!tb[ILA_ATTR_LOCATOR])
+ return -EINVAL;
+
+ newts = lwtunnel_state_alloc(encap_len);
+ if (!newts)
+ return -ENOMEM;
+
+ newts->len = encap_len;
+ p = ila_params_lwtunnel(newts);
+
+ p->locator = (__force __be64)nla_get_u64(tb[ILA_ATTR_LOCATOR]);
+
+ newts->type = LWTUNNEL_ENCAP_ILA;
+ newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT |
+ LWTUNNEL_STATE_INPUT_REDIRECT;
+
+ *ts = newts;
+
+ return 0;
+}
+
+static int ila_fill_encap_info(struct sk_buff *skb,
+ struct lwtunnel_state *lwtstate)
+{
+ struct ila_params *p = ila_params_lwtunnel(lwtstate);
+
+ if (nla_put_u64(skb, ILA_ATTR_LOCATOR, (__force u64)p->locator))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int ila_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+ /* No encapsulation overhead */
+ return 0;
+}
+
+static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+ struct ila_params *a_p = ila_params_lwtunnel(a);
+ struct ila_params *b_p = ila_params_lwtunnel(b);
+
+ return (a_p->locator != b_p->locator);
+}
+
+static const struct lwtunnel_encap_ops ila_encap_ops = {
+ .build_state = ila_build_state,
+ .output = ila_output,
+ .input = ila_input,
+ .fill_encap = ila_fill_encap_info,
+ .get_encap_size = ila_encap_nlsize,
+ .cmp_encap = ila_encap_cmp,
+};
+
+static int __init ila_init(void)
+{
+ return lwtunnel_encap_add_ops(&ila_encap_ops, LWTUNNEL_ENCAP_ILA);
+}
+
+static void __exit ila_fini(void)
+{
+ lwtunnel_encap_del_ops(&ila_encap_ops, LWTUNNEL_ENCAP_ILA);
+}
+
+module_init(ila_init);
+module_exit(ila_fini);
+MODULE_AUTHOR("Tom Herbert <tom@herbertland.com>");
+MODULE_LICENSE("GPL");
*ppcpu_rt = NULL;
}
}
+
+ non_pcpu_rt->rt6i_pcpu = NULL;
}
static void rt6_release(struct rt6_info *rt)
{
if (atomic_dec_and_test(&rt->rt6i_ref)) {
- lwtstate_put(rt->rt6i_lwtstate);
rt6_free_pcpu(rt);
dst_free(&rt->dst);
}
struct sk_buff *skb_chk = NULL;
unsigned int transport_len;
unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg);
- int ret;
+ int ret = -EINVAL;
transport_len = ntohs(ipv6_hdr(skb)->payload_len);
transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr);
- skb_get(skb);
skb_chk = skb_checksum_trimmed(skb, transport_len,
ipv6_mc_validate_checksum);
if (!skb_chk)
- return -EINVAL;
+ goto err;
- if (!pskb_may_pull(skb_chk, len)) {
- kfree_skb(skb_chk);
- return -EINVAL;
- }
+ if (!pskb_may_pull(skb_chk, len))
+ goto err;
ret = ipv6_mc_check_mld_msg(skb_chk);
- if (ret) {
- kfree_skb(skb_chk);
- return ret;
- }
+ if (ret)
+ goto err;
if (skb_trimmed)
*skb_trimmed = skb_chk;
- else
+ /* free now unneeded clone */
+ else if (skb_chk != skb)
kfree_skb(skb_chk);
- return 0;
+ ret = 0;
+
+err:
+ if (ret && skb_chk && skb_chk != skb)
+ kfree_skb(skb_chk);
+
+ return ret;
}
/**
* @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional)
*
* Checks whether an IPv6 packet is a valid MLD packet. If so sets
- * skb network and transport headers accordingly and returns zero.
+ * skb transport header accordingly and returns zero.
*
* -EINVAL: A broken packet was detected, i.e. it violates some internet
* standard
* to leave the original skb and its full frame unchanged (which might be
* desirable for layer 2 frame jugglers).
*
- * The caller needs to release a reference count from any returned skb_trimmed.
+ * Caller needs to set the skb network header and free any returned skb if it
+ * differs from the provided skb.
*/
int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed)
{
void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
const struct in6_addr *solicit,
- const struct in6_addr *daddr, const struct in6_addr *saddr)
+ const struct in6_addr *daddr, const struct in6_addr *saddr,
+ struct sk_buff *oskb)
{
struct sk_buff *skb;
struct in6_addr addr_buf;
ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR,
dev->dev_addr);
+ if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE) && oskb)
+ skb_dst_copy(skb, oskb);
+
ndisc_send_skb(skb, daddr, saddr);
}
"%s: trying to ucast probe in NUD_INVALID: %pI6\n",
__func__, target);
}
- ndisc_send_ns(dev, neigh, target, target, saddr);
+ ndisc_send_ns(dev, neigh, target, target, saddr, skb);
} else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) {
neigh_app_ns(neigh);
} else {
addrconf_addr_solict_mult(target, &mcaddr);
- ndisc_send_ns(dev, NULL, target, &mcaddr, saddr);
+ ndisc_send_ns(dev, NULL, target, &mcaddr, saddr, skb);
}
}
default NFT_REJECT
tristate
+config NFT_DUP_IPV6
+ tristate "IPv6 nf_tables packet duplication support"
+ select NF_DUP_IPV6
+ help
+ This module enables IPv6 packet duplication support for nf_tables.
+
endif # NF_TABLES_IPV6
endif # NF_TABLES
+config NF_DUP_IPV6
+ tristate "Netfilter IPv6 packet duplication to alternate destination"
+ help
+ This option enables the nf_dup_ipv6 core, which duplicates an IPv6
+ packet to be rerouted to another destination.
+
config NF_REJECT_IPV6
tristate "IPv6 packet rejection"
default m if NETFILTER_ADVANCED=n
# reject
obj-$(CONFIG_NF_REJECT_IPV6) += nf_reject_ipv6.o
+obj-$(CONFIG_NF_DUP_IPV6) += nf_dup_ipv6.o
+
# nf_tables
obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o
obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o
obj-$(CONFIG_NFT_REJECT_IPV6) += nft_reject_ipv6.o
obj-$(CONFIG_NFT_MASQ_IPV6) += nft_masq_ipv6.o
obj-$(CONFIG_NFT_REDIR_IPV6) += nft_redir_ipv6.o
+obj-$(CONFIG_NFT_DUP_IPV6) += nft_dup_ipv6.o
# matches
obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
if (*len < 0 || (unsigned int) *len < sizeof(sin6))
return -EINVAL;
- h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple);
+ h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
if (!h) {
pr_debug("IP6T_SO_ORIGINAL_DST: Can't find %pI6c/%u-%pI6c/%u.\n",
&tuple.src.u3.ip6, ntohs(tuple.src.u.tcp.port),
struct nf_conntrack_tuple intuple, origtuple;
const struct nf_conntrack_tuple_hash *h;
const struct nf_conntrack_l4proto *inproto;
- u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
+ struct nf_conntrack_zone tmp;
NF_CT_ASSERT(skb->nfct == NULL);
*ctinfo = IP_CT_RELATED;
- h = nf_conntrack_find_get(net, zone, &intuple);
+ h = nf_conntrack_find_get(net, nf_ct_zone_tmpl(tmpl, skb, &tmp),
+ &intuple);
if (!h) {
pr_debug("icmpv6_error: no match\n");
return -NF_ACCEPT;
static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
struct sk_buff *skb)
{
- u16 zone = NF_CT_DEFAULT_ZONE;
-
+ u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
- if (skb->nfct)
- zone = nf_ct_zone((struct nf_conn *)skb->nfct);
+ if (skb->nfct) {
+ enum ip_conntrack_info ctinfo;
+ const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+
+ zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo));
+ }
#endif
if (nf_bridge_in_prerouting(skb))
- return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
+ return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id;
if (hooknum == NF_INET_PRE_ROUTING)
- return IP6_DEFRAG_CONNTRACK_IN + zone;
+ return IP6_DEFRAG_CONNTRACK_IN + zone_id;
else
- return IP6_DEFRAG_CONNTRACK_OUT + zone;
-
+ return IP6_DEFRAG_CONNTRACK_OUT + zone_id;
}
static unsigned int ipv6_defrag(const struct nf_hook_ops *ops,
--- /dev/null
+/*
+ * (C) 2007 by Sebastian Claßen <sebastian.classen@freenet.ag>
+ * (C) 2007-2010 by Jan Engelhardt <jengelh@medozas.de>
+ *
+ * Extracted from xt_TEE.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 or later, as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/skbuff.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/netfilter/ipv6/nf_dup_ipv6.h>
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
+static struct net *pick_net(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+ const struct dst_entry *dst;
+
+ if (skb->dev != NULL)
+ return dev_net(skb->dev);
+ dst = skb_dst(skb);
+ if (dst != NULL && dst->dev != NULL)
+ return dev_net(dst->dev);
+#endif
+ return &init_net;
+}
+
+static bool nf_dup_ipv6_route(struct sk_buff *skb, const struct in6_addr *gw,
+ int oif)
+{
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
+ struct net *net = pick_net(skb);
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+
+ memset(&fl6, 0, sizeof(fl6));
+ if (oif != -1)
+ fl6.flowi6_oif = oif;
+
+ fl6.daddr = *gw;
+ fl6.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
+ (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
+ dst = ip6_route_output(net, NULL, &fl6);
+ if (dst->error) {
+ dst_release(dst);
+ return false;
+ }
+ skb_dst_drop(skb);
+ skb_dst_set(skb, dst);
+ skb->dev = dst->dev;
+ skb->protocol = htons(ETH_P_IPV6);
+
+ return true;
+}
+
+void nf_dup_ipv6(struct sk_buff *skb, unsigned int hooknum,
+ const struct in6_addr *gw, int oif)
+{
+ if (this_cpu_read(nf_skb_duplicated))
+ return;
+ skb = pskb_copy(skb, GFP_ATOMIC);
+ if (skb == NULL)
+ return;
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ nf_conntrack_put(skb->nfct);
+ skb->nfct = &nf_ct_untracked_get()->ct_general;
+ skb->nfctinfo = IP_CT_NEW;
+ nf_conntrack_get(skb->nfct);
+#endif
+ if (hooknum == NF_INET_PRE_ROUTING ||
+ hooknum == NF_INET_LOCAL_IN) {
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ --iph->hop_limit;
+ }
+ if (nf_dup_ipv6_route(skb, gw, oif)) {
+ __this_cpu_write(nf_skb_duplicated, true);
+ ip6_local_out(skb);
+ __this_cpu_write(nf_skb_duplicated, false);
+ } else {
+ kfree_skb(skb);
+ }
+}
+EXPORT_SYMBOL_GPL(nf_dup_ipv6);
+
+MODULE_AUTHOR("Sebastian Claßen <sebastian.classen@freenet.ag>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_DESCRIPTION("nf_dup_ipv6: IPv6 packet duplication");
+MODULE_LICENSE("GPL");
newip = &t->dst.u3.in6;
}
inet_proto_csum_replace16(check, skb, oldip->s6_addr32,
- newip->s6_addr32, 1);
+ newip->s6_addr32, true);
}
static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb,
}
} else
inet_proto_csum_replace2(check, skb,
- htons(oldlen), htons(datalen), 1);
+ htons(oldlen), htons(datalen), true);
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
hdr->icmp6_type == ICMPV6_ECHO_REPLY) {
inet_proto_csum_replace2(&hdr->icmp6_cksum, skb,
hdr->icmp6_identifier,
- tuple->src.u.icmp.id, 0);
+ tuple->src.u.icmp.id, false);
hdr->icmp6_identifier = tuple->src.u.icmp.id;
}
return true;
--- /dev/null
+/*
+ * Copyright (c) 2015 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/ipv6/nf_dup_ipv6.h>
+
+struct nft_dup_ipv6 {
+ enum nft_registers sreg_addr:8;
+ enum nft_registers sreg_dev:8;
+};
+
+static void nft_dup_ipv6_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
+ struct in6_addr *gw = (struct in6_addr *)®s->data[priv->sreg_addr];
+ int oif = regs->data[priv->sreg_dev];
+
+ nf_dup_ipv6(pkt->skb, pkt->ops->hooknum, gw, oif);
+}
+
+static int nft_dup_ipv6_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
+ int err;
+
+ if (tb[NFTA_DUP_SREG_ADDR] == NULL)
+ return -EINVAL;
+
+ priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]);
+ err = nft_validate_register_load(priv->sreg_addr, sizeof(struct in6_addr));
+ if (err < 0)
+ return err;
+
+ if (tb[NFTA_DUP_SREG_DEV] != NULL) {
+ priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
+ return nft_validate_register_load(priv->sreg_dev, sizeof(int));
+ }
+ return 0;
+}
+
+static int nft_dup_ipv6_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
+
+ if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) ||
+ nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_dup_ipv6_type;
+static const struct nft_expr_ops nft_dup_ipv6_ops = {
+ .type = &nft_dup_ipv6_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_dup_ipv6)),
+ .eval = nft_dup_ipv6_eval,
+ .init = nft_dup_ipv6_init,
+ .dump = nft_dup_ipv6_dump,
+};
+
+static const struct nla_policy nft_dup_ipv6_policy[NFTA_DUP_MAX + 1] = {
+ [NFTA_DUP_SREG_ADDR] = { .type = NLA_U32 },
+ [NFTA_DUP_SREG_DEV] = { .type = NLA_U32 },
+};
+
+static struct nft_expr_type nft_dup_ipv6_type __read_mostly = {
+ .family = NFPROTO_IPV6,
+ .name = "dup",
+ .ops = &nft_dup_ipv6_ops,
+ .policy = nft_dup_ipv6_policy,
+ .maxattr = NFTA_DUP_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_dup_ipv6_module_init(void)
+{
+ return nft_register_expr(&nft_dup_ipv6_type);
+}
+
+static void __exit nft_dup_ipv6_module_exit(void)
+{
+ nft_unregister_expr(&nft_dup_ipv6_type);
+}
+
+module_init(nft_dup_ipv6_module_init);
+module_exit(nft_dup_ipv6_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "dup");
#include <net/tcp.h>
#include <linux/rtnetlink.h>
#include <net/dst.h>
+#include <net/dst_metadata.h>
#include <net/xfrm.h>
#include <net/netevent.h>
#include <net/netlink.h>
#include <net/nexthop.h>
#include <net/lwtunnel.h>
+#include <net/ip_tunnels.h>
#include <asm/uaccess.h>
/* allocate dst with ip6_dst_ops */
static struct rt6_info *__ip6_dst_alloc(struct net *net,
struct net_device *dev,
- int flags,
- struct fib6_table *table)
+ int flags)
{
struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
0, DST_OBSOLETE_FORCE_CHK, flags);
static struct rt6_info *ip6_dst_alloc(struct net *net,
struct net_device *dev,
- int flags,
- struct fib6_table *table)
+ int flags)
{
- struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags, table);
+ struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
if (rt) {
rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
container_of(w, struct __rt6_probe_work, work);
addrconf_addr_solict_mult(&work->target, &mcaddr);
- ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
+ ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL, NULL);
dev_put(work->dev);
kfree(work);
}
if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
ort = (struct rt6_info *)ort->dst.from;
- rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev,
- 0, ort->rt6i_table);
+ rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0);
if (!rt)
return NULL;
struct rt6_info *pcpu_rt;
pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
- rt->dst.dev, rt->dst.flags,
- rt->rt6i_table);
+ rt->dst.dev, rt->dst.flags);
if (!pcpu_rt)
return NULL;
/* It should be called with read_lock_bh(&tb6_lock) acquired */
static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
{
- struct rt6_info *pcpu_rt, *prev, **p;
+ struct rt6_info *pcpu_rt, **p;
p = this_cpu_ptr(rt->rt6i_pcpu);
pcpu_rt = *p;
- if (pcpu_rt)
- goto done;
+ if (pcpu_rt) {
+ dst_hold(&pcpu_rt->dst);
+ rt6_dst_from_metrics_check(pcpu_rt);
+ }
+ return pcpu_rt;
+}
+
+static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
+{
+ struct fib6_table *table = rt->rt6i_table;
+ struct rt6_info *pcpu_rt, *prev, **p;
pcpu_rt = ip6_rt_pcpu_alloc(rt);
if (!pcpu_rt) {
struct net *net = dev_net(rt->dst.dev);
- pcpu_rt = net->ipv6.ip6_null_entry;
- goto done;
+ dst_hold(&net->ipv6.ip6_null_entry->dst);
+ return net->ipv6.ip6_null_entry;
}
- prev = cmpxchg(p, NULL, pcpu_rt);
- if (prev) {
- /* If someone did it before us, return prev instead */
+ read_lock_bh(&table->tb6_lock);
+ if (rt->rt6i_pcpu) {
+ p = this_cpu_ptr(rt->rt6i_pcpu);
+ prev = cmpxchg(p, NULL, pcpu_rt);
+ if (prev) {
+ /* If someone did it before us, return prev instead */
+ dst_destroy(&pcpu_rt->dst);
+ pcpu_rt = prev;
+ }
+ } else {
+ /* rt has been removed from the fib6 tree
+ * before we have a chance to acquire the read_lock.
+ * In this case, don't brother to create a pcpu rt
+ * since rt is going away anyway. The next
+ * dst_check() will trigger a re-lookup.
+ */
dst_destroy(&pcpu_rt->dst);
- pcpu_rt = prev;
+ pcpu_rt = rt;
}
-
-done:
dst_hold(&pcpu_rt->dst);
rt6_dst_from_metrics_check(pcpu_rt);
+ read_unlock_bh(&table->tb6_lock);
return pcpu_rt;
}
rt->dst.lastuse = jiffies;
rt->dst.__use++;
pcpu_rt = rt6_get_pcpu_route(rt);
- read_unlock_bh(&table->tb6_lock);
+
+ if (pcpu_rt) {
+ read_unlock_bh(&table->tb6_lock);
+ } else {
+ /* We have to do the read_unlock first
+ * because rt6_make_pcpu_route() may trigger
+ * ip6_dst_gc() which will take the write_lock.
+ */
+ dst_hold(&rt->dst);
+ read_unlock_bh(&table->tb6_lock);
+ pcpu_rt = rt6_make_pcpu_route(rt);
+ dst_release(&rt->dst);
+ }
return pcpu_rt;
+
}
}
const struct ipv6hdr *iph = ipv6_hdr(skb);
struct net *net = dev_net(skb->dev);
int flags = RT6_LOOKUP_F_HAS_SADDR;
+ struct ip_tunnel_info *tun_info;
struct flowi6 fl6 = {
.flowi6_iif = skb->dev->ifindex,
.daddr = iph->daddr,
.flowi6_proto = iph->nexthdr,
};
+ tun_info = skb_tunnel_info(skb);
+ if (tun_info && tun_info->mode == IP_TUNNEL_INFO_RX)
+ fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
+ skb_dst_drop(skb);
skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
}
if (unlikely(!idev))
return ERR_PTR(-ENODEV);
- rt = ip6_dst_alloc(net, dev, 0, NULL);
+ rt = ip6_dst_alloc(net, dev, 0);
if (unlikely(!rt)) {
in6_dev_put(idev);
dst = ERR_PTR(-ENOMEM);
if (!table)
goto out;
- rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
+ rt = ip6_dst_alloc(net, NULL,
+ (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
if (!rt) {
err = -ENOMEM;
cfg->fc_encap, &lwtstate);
if (err)
goto out;
- rt->rt6i_lwtstate = lwtstate_get(lwtstate);
- if (lwtunnel_output_redirect(rt->rt6i_lwtstate))
- rt->dst.output = lwtunnel_output6;
+ rt->dst.lwtstate = lwtstate_get(lwtstate);
+ if (lwtunnel_output_redirect(rt->dst.lwtstate)) {
+ rt->dst.lwtstate->orig_output = rt->dst.output;
+ rt->dst.output = lwtunnel_output;
+ }
+ if (lwtunnel_input_redirect(rt->dst.lwtstate)) {
+ rt->dst.lwtstate->orig_input = rt->dst.input;
+ rt->dst.input = lwtunnel_input;
+ }
}
ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
#endif
rt->rt6i_prefsrc = ort->rt6i_prefsrc;
rt->rt6i_table = ort->rt6i_table;
- rt->rt6i_lwtstate = lwtstate_get(ort->rt6i_lwtstate);
+ rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate);
}
#ifdef CONFIG_IPV6_ROUTE_INFO
{
struct net *net = dev_net(idev->dev);
struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
- DST_NOCOUNT, NULL);
+ DST_NOCOUNT);
if (!rt)
return ERR_PTR(-ENOMEM);
+ nla_total_size(sizeof(struct rta_cacheinfo))
+ nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
+ nla_total_size(1) /* RTA_PREF */
- + lwtunnel_get_encap_size(rt->rt6i_lwtstate);
+ + lwtunnel_get_encap_size(rt->dst.lwtstate);
}
static int rt6_fill_node(struct net *net,
if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
goto nla_put_failure;
- lwtunnel_fill_encap(skb, rt->rt6i_lwtstate);
+ lwtunnel_fill_encap(skb, rt->dst.lwtstate);
nlmsg_end(skb, nlh);
return 0;
return udp_proc_register(net, &udp6_seq_afinfo);
}
-void udp6_proc_exit(struct net *net) {
+void udp6_proc_exit(struct net *net)
+{
udp_proc_unregister(net, &udp6_seq_afinfo);
}
#endif /* CONFIG_PROC_FS */
static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
{
- const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
- if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph)))
+ if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
IP6_ECN_set_ce(inner_iph);
}
static struct xfrm_policy_afinfo xfrm6_policy_afinfo;
-static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos,
+static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr)
{
int err;
memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_oif = oif;
memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
if (saddr)
memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr));
return dst;
}
-static int xfrm6_get_saddr(struct net *net,
+static int xfrm6_get_saddr(struct net *net, int oif,
xfrm_address_t *saddr, xfrm_address_t *daddr)
{
struct dst_entry *dst;
struct net_device *dev;
- dst = xfrm6_dst_lookup(net, 0, NULL, daddr);
+ dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr);
if (IS_ERR(dst))
return -EHOSTUNREACH;
select CRYPTO_CCM
select CRYPTO_GCM
select CRC32
- select AVERAGE
---help---
This option enables the hardware independent IEEE 802.11
networking stack.
# mac80211 objects
mac80211-y := \
main.o status.o \
+ driver-ops.o \
sta_info.o \
wep.o \
wpa.o \
{
crypto_free_cipher(tfm);
}
-
-void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
- u8 *k1, u8 *k2)
-{
- u8 l[AES_BLOCK_SIZE] = {};
- struct ieee80211_key *key =
- container_of(keyconf, struct ieee80211_key, conf);
-
- crypto_cipher_encrypt_one(key->u.aes_cmac.tfm, l, l);
-
- memcpy(k1, l, AES_BLOCK_SIZE);
- gf_mulx(k1);
-
- memcpy(k2, k1, AES_BLOCK_SIZE);
- gf_mulx(k2);
-}
-EXPORT_SYMBOL(ieee80211_aes_cmac_calculate_k1_k2);
return 0;
}
+static void sta_apply_mesh_params(struct ieee80211_local *local,
+ struct sta_info *sta,
+ struct station_parameters *params)
+{
+#ifdef CONFIG_MAC80211_MESH
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ u32 changed = 0;
+
+ if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) {
+ switch (params->plink_state) {
+ case NL80211_PLINK_ESTAB:
+ if (sta->mesh->plink_state != NL80211_PLINK_ESTAB)
+ changed = mesh_plink_inc_estab_count(sdata);
+ sta->mesh->plink_state = params->plink_state;
+
+ ieee80211_mps_sta_status_update(sta);
+ changed |= ieee80211_mps_set_sta_local_pm(sta,
+ sdata->u.mesh.mshcfg.power_mode);
+ break;
+ case NL80211_PLINK_LISTEN:
+ case NL80211_PLINK_BLOCKED:
+ case NL80211_PLINK_OPN_SNT:
+ case NL80211_PLINK_OPN_RCVD:
+ case NL80211_PLINK_CNF_RCVD:
+ case NL80211_PLINK_HOLDING:
+ if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+ changed = mesh_plink_dec_estab_count(sdata);
+ sta->mesh->plink_state = params->plink_state;
+
+ ieee80211_mps_sta_status_update(sta);
+ changed |= ieee80211_mps_set_sta_local_pm(sta,
+ NL80211_MESH_POWER_UNKNOWN);
+ break;
+ default:
+ /* nothing */
+ break;
+ }
+ }
+
+ switch (params->plink_action) {
+ case NL80211_PLINK_ACTION_NO_ACTION:
+ /* nothing */
+ break;
+ case NL80211_PLINK_ACTION_OPEN:
+ changed |= mesh_plink_open(sta);
+ break;
+ case NL80211_PLINK_ACTION_BLOCK:
+ changed |= mesh_plink_block(sta);
+ break;
+ }
+
+ if (params->local_pm)
+ changed |= ieee80211_mps_set_sta_local_pm(sta,
+ params->local_pm);
+
+ ieee80211_mbss_info_change_notify(sdata, changed);
+#endif
+}
+
static int sta_apply_parameters(struct ieee80211_local *local,
struct sta_info *sta,
struct station_parameters *params)
}
if (mask & BIT(NL80211_STA_FLAG_MFP)) {
- sta->sta.mfp = !!(set & BIT(NL80211_STA_FLAG_MFP));
if (set & BIT(NL80211_STA_FLAG_MFP))
set_sta_flag(sta, WLAN_STA_MFP);
else
params->ext_capab[3] & WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH)
set_sta_flag(sta, WLAN_STA_TDLS_CHAN_SWITCH);
+ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
+ ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) &&
+ params->ext_capab_len >= 8 &&
+ params->ext_capab[7] & WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED)
+ set_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW);
+
if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD) {
sta->sta.uapsd_queues = params->uapsd_queues;
sta->sta.max_sp = params->max_sp;
band, false);
}
- if (ieee80211_vif_is_mesh(&sdata->vif)) {
-#ifdef CONFIG_MAC80211_MESH
- u32 changed = 0;
-
- if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) {
- switch (params->plink_state) {
- case NL80211_PLINK_ESTAB:
- if (sta->plink_state != NL80211_PLINK_ESTAB)
- changed = mesh_plink_inc_estab_count(
- sdata);
- sta->plink_state = params->plink_state;
-
- ieee80211_mps_sta_status_update(sta);
- changed |= ieee80211_mps_set_sta_local_pm(sta,
- sdata->u.mesh.mshcfg.power_mode);
- break;
- case NL80211_PLINK_LISTEN:
- case NL80211_PLINK_BLOCKED:
- case NL80211_PLINK_OPN_SNT:
- case NL80211_PLINK_OPN_RCVD:
- case NL80211_PLINK_CNF_RCVD:
- case NL80211_PLINK_HOLDING:
- if (sta->plink_state == NL80211_PLINK_ESTAB)
- changed = mesh_plink_dec_estab_count(
- sdata);
- sta->plink_state = params->plink_state;
-
- ieee80211_mps_sta_status_update(sta);
- changed |= ieee80211_mps_set_sta_local_pm(sta,
- NL80211_MESH_POWER_UNKNOWN);
- break;
- default:
- /* nothing */
- break;
- }
- }
-
- switch (params->plink_action) {
- case NL80211_PLINK_ACTION_NO_ACTION:
- /* nothing */
- break;
- case NL80211_PLINK_ACTION_OPEN:
- changed |= mesh_plink_open(sta);
- break;
- case NL80211_PLINK_ACTION_BLOCK:
- changed |= mesh_plink_block(sta);
- break;
- }
-
- if (params->local_pm)
- changed |=
- ieee80211_mps_set_sta_local_pm(sta,
- params->local_pm);
- ieee80211_mbss_info_change_notify(sdata, changed);
-#endif
- }
+ if (ieee80211_vif_is_mesh(&sdata->vif))
+ sta_apply_mesh_params(local, sta, params);
/* set the STA state after all sta info from usermode has been set */
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
const u8 *ap;
enum ieee80211_smps_mode old_req;
int err;
+ struct sta_info *sta;
+ bool tdls_peer_found = false;
lockdep_assert_held(&sdata->wdev.mtx);
ap = sdata->u.mgd.associated->bssid;
+ rcu_read_lock();
+ list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
+ if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
+ !test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+ continue;
+
+ tdls_peer_found = true;
+ break;
+ }
+ rcu_read_unlock();
+
if (smps_mode == IEEE80211_SMPS_AUTOMATIC) {
- if (sdata->u.mgd.powersave)
- smps_mode = IEEE80211_SMPS_DYNAMIC;
- else
+ if (tdls_peer_found || !sdata->u.mgd.powersave)
smps_mode = IEEE80211_SMPS_OFF;
+ else
+ smps_mode = IEEE80211_SMPS_DYNAMIC;
}
/* send SM PS frame to AP */
ap, ap);
if (err)
sdata->u.mgd.req_smps = old_req;
+ else if (smps_mode != IEEE80211_SMPS_OFF && tdls_peer_found)
+ ieee80211_teardown_tdls_peers(sdata);
return err;
}
sdata->rc_rateidx_mask[i] = mask->control[i].legacy;
memcpy(sdata->rc_rateidx_mcs_mask[i], mask->control[i].ht_mcs,
sizeof(mask->control[i].ht_mcs));
+ memcpy(sdata->rc_rateidx_vht_mcs_mask[i],
+ mask->control[i].vht_mcs,
+ sizeof(mask->control[i].vht_mcs));
sdata->rc_has_mcs_mask[i] = false;
+ sdata->rc_has_vht_mcs_mask[i] = false;
if (!sband)
continue;
- for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++)
- if (~sdata->rc_rateidx_mcs_mask[i][j]) {
+ for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) {
+ if (~sdata->rc_rateidx_mcs_mask[i][j])
sdata->rc_has_mcs_mask[i] = true;
+
+ if (~sdata->rc_rateidx_vht_mcs_mask[i][j])
+ sdata->rc_has_vht_mcs_mask[i] = true;
+
+ if (sdata->rc_has_mcs_mask[i] &&
+ sdata->rc_has_vht_mcs_mask[i])
break;
- }
+ }
}
return 0;
return NULL;
}
-static enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta)
+enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta)
{
switch (sta->bandwidth) {
case IEEE80211_STA_RX_BW_20:
case NL80211_IFTYPE_AP_VLAN:
width = ieee80211_get_max_required_bw(sdata);
break;
+ case NL80211_IFTYPE_STATION:
+ /*
+ * The ap's sta->bandwidth is not set yet at this
+ * point, so take the width from the chandef, but
+ * account also for TDLS peers
+ */
+ width = max(vif->bss_conf.chandef.width,
+ ieee80211_get_max_required_bw(sdata));
+ break;
case NL80211_IFTYPE_P2P_DEVICE:
continue;
- case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_MESH_POINT:
kfree_rcu(ctx, rcu_head);
}
-static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
- struct ieee80211_chanctx *ctx)
+void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
+ struct ieee80211_chanctx *ctx)
{
struct ieee80211_chanctx_conf *conf = &ctx->conf;
struct ieee80211_sub_if_data *sdata;
const struct cfg80211_chan_def *compat = NULL;
+ struct sta_info *sta;
lockdep_assert_held(&local->chanctx_mtx);
if (WARN_ON_ONCE(!compat))
break;
}
+
+ /* TDLS peers can sometimes affect the chandef width */
+ list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ if (!sta->uploaded ||
+ !test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) ||
+ !test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
+ !sta->tdls_chandef.chan)
+ continue;
+
+ compat = cfg80211_chandef_compatible(&sta->tdls_chandef,
+ compat);
+ if (WARN_ON_ONCE(!compat))
+ break;
+ }
rcu_read_unlock();
if (!compat)
FLAG(CHANCTX_STA_CSA),
FLAG(SUPPORTS_CLONED_SKBS),
FLAG(SINGLE_SCAN_ON_ALL_BANDS),
+ FLAG(TDLS_WIDER_BW),
/* keep last for the build bug below */
(void *)0x1
DEBUGFS_STATS_ADD(rx_handlers_queued);
DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc);
DEBUGFS_STATS_ADD(rx_handlers_drop_defrag);
- DEBUGFS_STATS_ADD(rx_handlers_drop_short);
DEBUGFS_STATS_ADD(tx_expand_skb_head);
DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned);
DEBUGFS_STATS_ADD(rx_expand_skb_head_defrag);
KEY_CONF_FILE(keyidx, D);
KEY_CONF_FILE(hw_key_idx, D);
KEY_FILE(flags, X);
-KEY_FILE(tx_rx_count, D);
KEY_READ(ifindex, sdata->name, "%s\n");
KEY_OPS(ifindex);
DEBUGFS_ADD(flags);
DEBUGFS_ADD(keyidx);
DEBUGFS_ADD(hw_key_idx);
- DEBUGFS_ADD(tx_rx_count);
DEBUGFS_ADD(algorithm);
DEBUGFS_ADD(tx_spec);
DEBUGFS_ADD(rx_spec);
IEEE80211_IF_FILE(rc_rateidx_mcs_mask_5ghz,
rc_rateidx_mcs_mask[IEEE80211_BAND_5GHZ], HEXARRAY);
+static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_2ghz(
+ const struct ieee80211_sub_if_data *sdata,
+ char *buf, int buflen)
+{
+ int i, len = 0;
+ const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[IEEE80211_BAND_2GHZ];
+
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+ len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]);
+ len += scnprintf(buf + len, buflen - len, "\n");
+
+ return len;
+}
+
+IEEE80211_IF_FILE_R(rc_rateidx_vht_mcs_mask_2ghz);
+
+static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_5ghz(
+ const struct ieee80211_sub_if_data *sdata,
+ char *buf, int buflen)
+{
+ int i, len = 0;
+ const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[IEEE80211_BAND_5GHZ];
+
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+ len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]);
+ len += scnprintf(buf + len, buflen - len, "\n");
+
+ return len;
+}
+
+IEEE80211_IF_FILE_R(rc_rateidx_vht_mcs_mask_5ghz);
+
IEEE80211_IF_FILE(flags, flags, HEX);
IEEE80211_IF_FILE(state, state, LHEX);
IEEE80211_IF_FILE(txpower, vif.bss_conf.txpower, DEC);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
+ DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_2ghz);
+ DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz);
DEBUGFS_ADD(hw_queues);
}
--- /dev/null
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <net/mac80211.h>
+#include "ieee80211_i.h"
+#include "trace.h"
+#include "driver-ops.h"
+
+__must_check
+int drv_sta_state(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ int ret = 0;
+
+ might_sleep();
+
+ sdata = get_bss_sdata(sdata);
+ if (!check_sdata_in_driver(sdata))
+ return -EIO;
+
+ trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state);
+ if (local->ops->sta_state) {
+ ret = local->ops->sta_state(&local->hw, &sdata->vif, &sta->sta,
+ old_state, new_state);
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC) {
+ ret = drv_sta_add(local, sdata, &sta->sta);
+ if (ret == 0)
+ sta->uploaded = true;
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH) {
+ drv_sta_remove(local, sdata, &sta->sta);
+ }
+ trace_drv_return_int(local, ret);
+ return ret;
+}
trace_drv_return_void(local);
}
-static inline __must_check
+__must_check
int drv_sta_state(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
enum ieee80211_sta_state old_state,
- enum ieee80211_sta_state new_state)
-{
- int ret = 0;
-
- might_sleep();
-
- sdata = get_bss_sdata(sdata);
- if (!check_sdata_in_driver(sdata))
- return -EIO;
-
- trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state);
- if (local->ops->sta_state) {
- ret = local->ops->sta_state(&local->hw, &sdata->vif, &sta->sta,
- old_state, new_state);
- } else if (old_state == IEEE80211_STA_AUTH &&
- new_state == IEEE80211_STA_ASSOC) {
- ret = drv_sta_add(local, sdata, &sta->sta);
- if (ret == 0)
- sta->uploaded = true;
- } else if (old_state == IEEE80211_STA_ASSOC &&
- new_state == IEEE80211_STA_AUTH) {
- drv_sta_remove(local, sdata, &sta->sta);
- }
- trace_drv_return_int(local, ret);
- return ret;
-}
+ enum ieee80211_sta_state new_state);
static inline void drv_sta_rc_update(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
#define IEEE80211_DEAUTH_FRAME_LEN (24 /* hdr */ + 2 /* reason */)
struct ieee80211_fragment_entry {
- unsigned long first_frag_time;
- unsigned int seq;
- unsigned int rx_queue;
- unsigned int last_frag;
- unsigned int extra_len;
struct sk_buff_head skb_list;
- int ccmp; /* Whether fragments were encrypted with CCMP */
+ unsigned long first_frag_time;
+ u16 seq;
+ u16 extra_len;
+ u16 last_frag;
+ u8 rx_queue;
+ bool ccmp; /* Whether fragments were encrypted with CCMP */
u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
};
/**
* enum ieee80211_packet_rx_flags - packet RX flags
- * @IEEE80211_RX_FRAGMENTED: fragmented frame
* @IEEE80211_RX_AMSDU: a-MSDU packet
* @IEEE80211_RX_MALFORMED_ACTION_FRM: action frame is malformed
* @IEEE80211_RX_DEFERRED_RELEASE: frame was subjected to receive reordering
* @rx_flags field of &struct ieee80211_rx_status.
*/
enum ieee80211_packet_rx_flags {
- IEEE80211_RX_FRAGMENTED = BIT(2),
IEEE80211_RX_AMSDU = BIT(3),
IEEE80211_RX_MALFORMED_ACTION_FRM = BIT(4),
IEEE80211_RX_DEFERRED_RELEASE = BIT(5),
* @IEEE80211_RX_CMNTR: received on cooked monitor already
* @IEEE80211_RX_BEACON_REPORTED: This frame was already reported
* to cfg80211_report_obss_beacon().
- * @IEEE80211_RX_REORDER_TIMER: this frame is released by the
- * reorder buffer timeout timer, not the normal RX path
*
* These flags are used across handling multiple interfaces
* for a single frame.
enum ieee80211_rx_flags {
IEEE80211_RX_CMNTR = BIT(0),
IEEE80211_RX_BEACON_REPORTED = BIT(1),
- IEEE80211_RX_REORDER_TIMER = BIT(2),
};
struct ieee80211_rx_data {
+ struct napi_struct *napi;
struct sk_buff *skb;
struct ieee80211_local *local;
struct ieee80211_sub_if_data *sdata;
* back to wireless media and to the local net stack.
* @IEEE80211_SDATA_DISCONNECT_RESUME: Disconnect after resume.
* @IEEE80211_SDATA_IN_DRIVER: indicates interface was added to driver
+ * @IEEE80211_SDATA_MU_MIMO_OWNER: indicates interface owns MU-MIMO capability
*/
enum ieee80211_sub_if_data_flags {
IEEE80211_SDATA_ALLMULTI = BIT(0),
IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3),
IEEE80211_SDATA_DISCONNECT_RESUME = BIT(4),
IEEE80211_SDATA_IN_DRIVER = BIT(5),
+ IEEE80211_SDATA_MU_MIMO_OWNER = BIT(6),
};
/**
bool rc_has_mcs_mask[IEEE80211_NUM_BANDS];
u8 rc_rateidx_mcs_mask[IEEE80211_NUM_BANDS][IEEE80211_HT_MCS_MASK_LEN];
+ bool rc_has_vht_mcs_mask[IEEE80211_NUM_BANDS];
+ u16 rc_rateidx_vht_mcs_mask[IEEE80211_NUM_BANDS][NL80211_VHT_NSS_MAX];
+
union {
struct ieee80211_if_ap ap;
struct ieee80211_if_wds wds;
IEEE80211_SDATA_QUEUE_AGG_STOP = 2,
IEEE80211_SDATA_QUEUE_RX_AGG_START = 3,
IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4,
- IEEE80211_SDATA_QUEUE_TDLS_CHSW = 5,
};
enum {
unsigned int rx_handlers_queued;
unsigned int rx_handlers_drop_nullfunc;
unsigned int rx_handlers_drop_defrag;
- unsigned int rx_handlers_drop_short;
unsigned int tx_expand_skb_head;
unsigned int tx_expand_skb_head_cloned;
unsigned int rx_expand_skb_head_defrag;
struct ieee80211_sub_if_data __rcu *p2p_sdata;
- struct napi_struct *napi;
-
/* virtual monitor interface */
struct ieee80211_sub_if_data __rcu *monitor_sdata;
struct cfg80211_chan_def monitor_chandef;
/* extended capabilities provided by mac80211 */
u8 ext_capa[8];
+
+ /* TDLS channel switch */
+ struct work_struct tdls_chsw_work;
+ struct sk_buff_head skb_queue_tdls_chsw;
};
static inline struct ieee80211_sub_if_data *
enum ieee80211_band band, bool nss_only);
void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta_vht_cap *vht_cap);
+void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
+ u16 vht_mask[NL80211_VHT_NSS_MAX]);
/* Spectrum management */
void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
/* utility functions/constants */
extern const void *const mac80211_wiphy_privid; /* for wiphy privid */
-u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
- enum nl80211_iftype type);
int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
int rate, int erp, int short_preamble,
int shift);
enum ieee80211_chanctx_mode chanmode,
u8 radar_detect);
int ieee80211_max_num_channels(struct ieee80211_local *local);
+enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta);
+void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
+ struct ieee80211_chanctx *ctx);
/* TDLS */
int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy,
struct net_device *dev,
const u8 *addr);
-void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb);
+void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata);
+void ieee80211_tdls_chsw_work(struct work_struct *wk);
extern const struct ethtool_ops ieee80211_ethtool_ops;
WLAN_BACK_RECIPIENT, 0,
false);
mutex_unlock(&local->sta_mtx);
- } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_TDLS_CHSW) {
- ieee80211_process_tdls_channel_switch(sdata, skb);
} else if (ieee80211_is_action(mgmt->frame_control) &&
mgmt->u.action.category == WLAN_CATEGORY_BACK) {
int len = skb->len;
sband = local->hw.wiphy->bands[i];
sdata->rc_rateidx_mask[i] =
sband ? (1 << sband->n_bitrates) - 1 : 0;
- if (sband)
+ if (sband) {
+ __le16 cap;
+ u16 *vht_rate_mask;
+
memcpy(sdata->rc_rateidx_mcs_mask[i],
sband->ht_cap.mcs.rx_mask,
sizeof(sdata->rc_rateidx_mcs_mask[i]));
- else
+
+ cap = sband->vht_cap.vht_mcs.rx_mcs_map;
+ vht_rate_mask = sdata->rc_rateidx_vht_mcs_mask[i];
+ ieee80211_get_vht_mask_from_cap(cap, vht_rate_mask);
+ } else {
memset(sdata->rc_rateidx_mcs_mask[i], 0,
sizeof(sdata->rc_rateidx_mcs_mask[i]));
+ memset(sdata->rc_rateidx_vht_mcs_mask[i], 0,
+ sizeof(sdata->rc_rateidx_vht_mcs_mask[i]));
+ }
}
ieee80211_set_default_queues(sdata);
ieee80211_check_fast_xmit(sta);
} else {
rcu_assign_pointer(sta->gtk[idx], new);
- sta->gtk_idx = idx;
}
} else {
defunikey = old &&
} gen;
} u;
- /* number of times this key has been used */
- int tx_rx_count;
-
#ifdef CONFIG_MAC80211_DEBUGFS
struct {
struct dentry *stalink;
INIT_WORK(&local->sched_scan_stopped_work,
ieee80211_sched_scan_stopped_work);
+ INIT_WORK(&local->tdls_chsw_work, ieee80211_tdls_chsw_work);
+
spin_lock_init(&local->ack_status_lock);
idr_init(&local->ack_status_frames);
skb_queue_head_init(&local->skb_queue);
skb_queue_head_init(&local->skb_queue_unreliable);
+ skb_queue_head_init(&local->skb_queue_tdls_chsw);
ieee80211_alloc_led_names(local);
}
EXPORT_SYMBOL(ieee80211_register_hw);
-void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi,
- struct net_device *napi_dev,
- int (*poll)(struct napi_struct *, int),
- int weight)
-{
- struct ieee80211_local *local = hw_to_local(hw);
-
- netif_napi_add(napi_dev, napi, poll, weight);
- local->napi = napi;
-}
-EXPORT_SYMBOL_GPL(ieee80211_napi_add);
-
void ieee80211_unregister_hw(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
cancel_work_sync(&local->restart_work);
cancel_work_sync(&local->reconfig_filter);
+ cancel_work_sync(&local->tdls_chsw_work);
flush_work(&local->sched_scan_stopped_work);
ieee80211_clear_tx_pending(local);
wiphy_warn(local->hw.wiphy, "skb_queue not empty\n");
skb_queue_purge(&local->skb_queue);
skb_queue_purge(&local->skb_queue_unreliable);
+ skb_queue_purge(&local->skb_queue_tdls_chsw);
destroy_workqueue(local->workqueue);
wiphy_unregister(local->hw.wiphy);
changed = mesh_accept_plinks_update(sdata);
if (!sdata->u.mesh.user_mpm) {
changed |= mesh_plink_deactivate(sta);
- del_timer_sync(&sta->plink_timer);
+ del_timer_sync(&sta->mesh->plink_timer);
}
if (changed)
#define MAX_PREQ_QUEUE_LEN 64
-/* Destination only */
-#define MP_F_DO 0x1
-/* Reply and forward */
-#define MP_F_RF 0x2
-/* Unknown Sequence Number */
-#define MP_F_USN 0x01
-/* Reason code Present */
-#define MP_F_RCODE 0x02
-
static void mesh_queue_preq(struct mesh_path *, u8);
static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae)
#define MSEC_TO_TU(x) (x*1000/1024)
#define SN_GT(x, y) ((s32)(y - x) < 0)
#define SN_LT(x, y) ((s32)(x - y) < 0)
+#define MAX_SANE_SN_DELTA 32
+
+static inline u32 SN_DELTA(u32 x, u32 y)
+{
+ return x >= y ? x - y : y - x;
+}
#define net_traversal_jiffies(s) \
msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
*pos++ = ttl;
/* number of destinations */
*pos++ = 1;
- /*
- * flags bit, bit 1 is unset if we know the sequence number and
- * bit 2 is set if we have a reason code
+ /* Flags field has AE bit only as defined in
+ * sec 8.4.2.117 IEEE802.11-2012
*/
*pos = 0;
- if (!target_sn)
- *pos |= MP_F_USN;
- if (target_rcode)
- *pos |= MP_F_RCODE;
pos++;
memcpy(pos, target, ETH_ALEN);
pos += ETH_ALEN;
failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
/* moving average, scaled to 100 */
- sta->fail_avg = ((80 * sta->fail_avg + 5) / 100 + 20 * failed);
- if (sta->fail_avg > 95)
+ sta->mesh->fail_avg =
+ ((80 * sta->mesh->fail_avg + 5) / 100 + 20 * failed);
+ if (sta->mesh->fail_avg > 95)
mesh_plink_broken(sta);
}
u32 tx_time, estimated_retx;
u64 result;
- if (sta->fail_avg >= 100)
+ if (sta->mesh->fail_avg >= 100)
return MAX_METRIC;
sta_set_rate_info_tx(sta, &sta->last_tx_rate, &rinfo);
if (WARN_ON(!rate))
return MAX_METRIC;
- err = (sta->fail_avg << ARITH_SHIFT) / 100;
+ err = (sta->mesh->fail_avg << ARITH_SHIFT) / 100;
/* bitrate is in units of 100 Kbps, while we need rate in units of
* 1Mbps. This will be corrected on tx_time computation.
process = false;
fresh_info = false;
}
+ } else if (!(mpath->flags & MESH_PATH_ACTIVE)) {
+ bool have_sn, newer_sn, bounced;
+
+ have_sn = mpath->flags & MESH_PATH_SN_VALID;
+ newer_sn = have_sn && SN_GT(orig_sn, mpath->sn);
+ bounced = have_sn &&
+ (SN_DELTA(orig_sn, mpath->sn) >
+ MAX_SANE_SN_DELTA);
+
+ if (!have_sn || newer_sn) {
+ /* if SN is newer than what we had
+ * then we can take it */;
+ } else if (bounced) {
+ /* if SN is way different than what
+ * we had then assume the other side
+ * rebooted or restarted */;
+ } else {
+ process = false;
+ fresh_info = false;
+ }
}
} else {
mpath = mesh_path_add(sdata, orig_addr);
SN_LT(mpath->sn, target_sn)) {
mpath->sn = target_sn;
mpath->flags |= MESH_PATH_SN_VALID;
- } else if ((!(target_flags & MP_F_DO)) &&
+ } else if ((!(target_flags & IEEE80211_PREQ_TO_FLAG)) &&
(mpath->flags & MESH_PATH_ACTIVE)) {
reply = true;
target_metric = mpath->metric;
target_sn = mpath->sn;
- if (target_flags & MP_F_RF)
- target_flags |= MP_F_DO;
- else
- forward = false;
+ /* Case E2 of sec 13.10.9.3 IEEE 802.11-2012*/
+ target_flags |= IEEE80211_PREQ_TO_FLAG;
}
}
rcu_read_unlock();
if (mpath->flags & MESH_PATH_ACTIVE &&
ether_addr_equal(ta, sta->sta.addr) &&
(!(mpath->flags & MESH_PATH_SN_VALID) ||
- SN_GT(target_sn, mpath->sn))) {
+ SN_GT(target_sn, mpath->sn) || target_sn == 0)) {
mpath->flags &= ~MESH_PATH_ACTIVE;
- mpath->sn = target_sn;
+ if (target_sn != 0)
+ mpath->sn = target_sn;
+ else
+ mpath->sn += 1;
spin_unlock_bh(&mpath->state_lock);
if (!ifmsh->mshcfg.dot11MeshForwarding)
goto endperr;
rcu_read_lock();
sta = sta_info_get(sdata, mgmt->sa);
- if (!sta || sta->plink_state != NL80211_PLINK_ESTAB) {
+ if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
rcu_read_unlock();
return;
}
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct mesh_preq_queue *preq_node;
struct mesh_path *mpath;
- u8 ttl, target_flags;
+ u8 ttl, target_flags = 0;
const u8 *da;
u32 lifetime;
}
if (preq_node->flags & PREQ_Q_F_REFRESH)
- target_flags = MP_F_DO;
+ target_flags |= IEEE80211_PREQ_TO_FLAG;
else
- target_flags = MP_F_RF;
+ target_flags &= ~IEEE80211_PREQ_TO_FLAG;
spin_unlock_bh(&mpath->state_lock);
da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr;
spin_unlock_bh(&mpath->state_lock);
mesh_queue_preq(mpath, 0);
} else {
- mpath->flags = 0;
+ mpath->flags &= ~(MESH_PATH_RESOLVING |
+ MESH_PATH_RESOLVED |
+ MESH_PATH_REQ_QUEUED);
mpath->exp_time = jiffies;
spin_unlock_bh(&mpath->state_lock);
if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
#include "rate.h"
#include "mesh.h"
+#define PLINK_CNF_AID(mgmt) ((mgmt)->u.action.u.self_prot.variable + 2)
#define PLINK_GET_LLID(p) (p + 2)
#define PLINK_GET_PLID(p) (p + 4)
-#define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \
+#define mod_plink_timer(s, t) (mod_timer(&s->mesh->plink_timer, \
jiffies + msecs_to_jiffies(t)))
enum plink_event {
[CLS_IGNR] = "CLS_IGNR"
};
-static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
- enum ieee80211_self_protected_actioncode action,
- u8 *da, u16 llid, u16 plid, u16 reason);
-
-
/* We only need a valid sta if user configured a minimum rssi_threshold. */
static bool rssi_threshold_check(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
{
s32 rssi_threshold = sdata->u.mesh.mshcfg.rssi_threshold;
return rssi_threshold == 0 ||
- (sta && (s8) -ewma_read(&sta->avg_signal) > rssi_threshold);
+ (sta && (s8) -ewma_signal_read(&sta->avg_signal) > rssi_threshold);
}
/**
*
* @sta: mesh peer link to restart
*
- * Locking: this function must be called holding sta->plink_lock
+ * Locking: this function must be called holding sta->mesh->plink_lock
*/
static inline void mesh_plink_fsm_restart(struct sta_info *sta)
{
- lockdep_assert_held(&sta->plink_lock);
- sta->plink_state = NL80211_PLINK_LISTEN;
- sta->llid = sta->plid = sta->reason = 0;
- sta->plink_retries = 0;
+ lockdep_assert_held(&sta->mesh->plink_lock);
+ sta->mesh->plink_state = NL80211_PLINK_LISTEN;
+ sta->mesh->llid = sta->mesh->plid = sta->mesh->reason = 0;
+ sta->mesh->plink_retries = 0;
}
/*
rcu_read_lock();
list_for_each_entry_rcu(sta, &local->sta_list, list) {
if (sdata != sta->sdata ||
- sta->plink_state != NL80211_PLINK_ESTAB)
+ sta->mesh->plink_state != NL80211_PLINK_ESTAB)
continue;
short_slot = false;
rcu_read_lock();
list_for_each_entry_rcu(sta, &local->sta_list, list) {
if (sdata != sta->sdata ||
- sta->plink_state != NL80211_PLINK_ESTAB)
+ sta->mesh->plink_state != NL80211_PLINK_ESTAB)
continue;
if (sta->sta.bandwidth > IEEE80211_STA_RX_BW_20)
return BSS_CHANGED_HT;
}
-/**
- * __mesh_plink_deactivate - deactivate mesh peer link
- *
- * @sta: mesh peer link to deactivate
- *
- * All mesh paths with this peer as next hop will be flushed
- * Returns beacon changed flag if the beacon content changed.
- *
- * Locking: the caller must hold sta->plink_lock
- */
-static u32 __mesh_plink_deactivate(struct sta_info *sta)
-{
- struct ieee80211_sub_if_data *sdata = sta->sdata;
- u32 changed = 0;
-
- lockdep_assert_held(&sta->plink_lock);
-
- if (sta->plink_state == NL80211_PLINK_ESTAB)
- changed = mesh_plink_dec_estab_count(sdata);
- sta->plink_state = NL80211_PLINK_BLOCKED;
- mesh_path_flush_by_nexthop(sta);
-
- ieee80211_mps_sta_status_update(sta);
- changed |= ieee80211_mps_set_sta_local_pm(sta,
- NL80211_MESH_POWER_UNKNOWN);
-
- return changed;
-}
-
-/**
- * mesh_plink_deactivate - deactivate mesh peer link
- *
- * @sta: mesh peer link to deactivate
- *
- * All mesh paths with this peer as next hop will be flushed
- */
-u32 mesh_plink_deactivate(struct sta_info *sta)
-{
- struct ieee80211_sub_if_data *sdata = sta->sdata;
- u32 changed;
-
- spin_lock_bh(&sta->plink_lock);
- changed = __mesh_plink_deactivate(sta);
- sta->reason = WLAN_REASON_MESH_PEER_CANCELED;
- mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
- sta->sta.addr, sta->llid, sta->plid,
- sta->reason);
- spin_unlock_bh(&sta->plink_lock);
-
- return changed;
-}
-
static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta,
enum ieee80211_self_protected_actioncode action,
u8 *da, u16 llid, u16 plid, u16 reason)
{
if (action == WLAN_SP_MESH_PEERING_CONFIRM) {
/* AID */
pos = skb_put(skb, 2);
- put_unaligned_le16(plid, pos);
+ put_unaligned_le16(sta->sta.aid, pos);
}
if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
return err;
}
+/**
+ * __mesh_plink_deactivate - deactivate mesh peer link
+ *
+ * @sta: mesh peer link to deactivate
+ *
+ * All mesh paths with this peer as next hop will be flushed
+ * Returns beacon changed flag if the beacon content changed.
+ *
+ * Locking: the caller must hold sta->mesh->plink_lock
+ */
+static u32 __mesh_plink_deactivate(struct sta_info *sta)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ u32 changed = 0;
+
+ lockdep_assert_held(&sta->mesh->plink_lock);
+
+ if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+ changed = mesh_plink_dec_estab_count(sdata);
+ sta->mesh->plink_state = NL80211_PLINK_BLOCKED;
+ mesh_path_flush_by_nexthop(sta);
+
+ ieee80211_mps_sta_status_update(sta);
+ changed |= ieee80211_mps_set_sta_local_pm(sta,
+ NL80211_MESH_POWER_UNKNOWN);
+
+ return changed;
+}
+
+/**
+ * mesh_plink_deactivate - deactivate mesh peer link
+ *
+ * @sta: mesh peer link to deactivate
+ *
+ * All mesh paths with this peer as next hop will be flushed
+ */
+u32 mesh_plink_deactivate(struct sta_info *sta)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ u32 changed;
+
+ spin_lock_bh(&sta->mesh->plink_lock);
+ changed = __mesh_plink_deactivate(sta);
+ sta->mesh->reason = WLAN_REASON_MESH_PEER_CANCELED;
+ mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_CLOSE,
+ sta->sta.addr, sta->mesh->llid, sta->mesh->plid,
+ sta->mesh->reason);
+ spin_unlock_bh(&sta->mesh->plink_lock);
+
+ return changed;
+}
+
static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
struct ieee802_11_elems *elems, bool insert)
sband = local->hw.wiphy->bands[band];
rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates);
- spin_lock_bh(&sta->plink_lock);
+ spin_lock_bh(&sta->mesh->plink_lock);
sta->last_rx = jiffies;
/* rates and capabilities don't change during peering */
- if (sta->plink_state == NL80211_PLINK_ESTAB && sta->processed_beacon)
+ if (sta->mesh->plink_state == NL80211_PLINK_ESTAB &&
+ sta->mesh->processed_beacon)
goto out;
- sta->processed_beacon = true;
+ sta->mesh->processed_beacon = true;
if (sta->sta.supp_rates[band] != rates)
changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
else
rate_control_rate_update(local, sband, sta, changed);
out:
- spin_unlock_bh(&sta->plink_lock);
+ spin_unlock_bh(&sta->mesh->plink_lock);
+}
+
+static int mesh_allocate_aid(struct ieee80211_sub_if_data *sdata)
+{
+ struct sta_info *sta;
+ unsigned long *aid_map;
+ int aid;
+
+ aid_map = kcalloc(BITS_TO_LONGS(IEEE80211_MAX_AID + 1),
+ sizeof(*aid_map), GFP_KERNEL);
+ if (!aid_map)
+ return -ENOMEM;
+
+ /* reserve aid 0 for mcast indication */
+ __set_bit(0, aid_map);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sta, &sdata->local->sta_list, list)
+ __set_bit(sta->sta.aid, aid_map);
+ rcu_read_unlock();
+
+ aid = find_first_zero_bit(aid_map, IEEE80211_MAX_AID + 1);
+ kfree(aid_map);
+
+ if (aid > IEEE80211_MAX_AID)
+ return -ENOBUFS;
+
+ return aid;
}
static struct sta_info *
__mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *hw_addr)
{
struct sta_info *sta;
+ int aid;
if (sdata->local->num_sta >= MESH_MAX_PLINKS)
return NULL;
+ aid = mesh_allocate_aid(sdata);
+ if (aid < 0)
+ return NULL;
+
sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL);
if (!sta)
return NULL;
- sta->plink_state = NL80211_PLINK_LISTEN;
+ sta->mesh->plink_state = NL80211_PLINK_LISTEN;
sta->sta.wme = true;
+ sta->sta.aid = aid;
sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
goto out;
if (mesh_peer_accepts_plinks(elems) &&
- sta->plink_state == NL80211_PLINK_LISTEN &&
+ sta->mesh->plink_state == NL80211_PLINK_LISTEN &&
sdata->u.mesh.accepting_plinks &&
sdata->u.mesh.mshcfg.auto_open_plinks &&
rssi_threshold_check(sdata, sta))
if (sta->sdata->local->quiescing)
return;
- spin_lock_bh(&sta->plink_lock);
+ spin_lock_bh(&sta->mesh->plink_lock);
/* If a timer fires just before a state transition on another CPU,
* we may have already extended the timeout and changed state by the
* time we've acquired the lock and arrived here. In that case,
* skip this timer and wait for the new one.
*/
- if (time_before(jiffies, sta->plink_timer.expires)) {
+ if (time_before(jiffies, sta->mesh->plink_timer.expires)) {
mpl_dbg(sta->sdata,
"Ignoring timer for %pM in state %s (timer adjusted)",
- sta->sta.addr, mplstates[sta->plink_state]);
- spin_unlock_bh(&sta->plink_lock);
+ sta->sta.addr, mplstates[sta->mesh->plink_state]);
+ spin_unlock_bh(&sta->mesh->plink_lock);
return;
}
/* del_timer() and handler may race when entering these states */
- if (sta->plink_state == NL80211_PLINK_LISTEN ||
- sta->plink_state == NL80211_PLINK_ESTAB) {
+ if (sta->mesh->plink_state == NL80211_PLINK_LISTEN ||
+ sta->mesh->plink_state == NL80211_PLINK_ESTAB) {
mpl_dbg(sta->sdata,
"Ignoring timer for %pM in state %s (timer deleted)",
- sta->sta.addr, mplstates[sta->plink_state]);
- spin_unlock_bh(&sta->plink_lock);
+ sta->sta.addr, mplstates[sta->mesh->plink_state]);
+ spin_unlock_bh(&sta->mesh->plink_lock);
return;
}
mpl_dbg(sta->sdata,
"Mesh plink timer for %pM fired on state %s\n",
- sta->sta.addr, mplstates[sta->plink_state]);
+ sta->sta.addr, mplstates[sta->mesh->plink_state]);
sdata = sta->sdata;
mshcfg = &sdata->u.mesh.mshcfg;
- switch (sta->plink_state) {
+ switch (sta->mesh->plink_state) {
case NL80211_PLINK_OPN_RCVD:
case NL80211_PLINK_OPN_SNT:
/* retry timer */
- if (sta->plink_retries < mshcfg->dot11MeshMaxRetries) {
+ if (sta->mesh->plink_retries < mshcfg->dot11MeshMaxRetries) {
u32 rand;
mpl_dbg(sta->sdata,
"Mesh plink for %pM (retry, timeout): %d %d\n",
- sta->sta.addr, sta->plink_retries,
- sta->plink_timeout);
+ sta->sta.addr, sta->mesh->plink_retries,
+ sta->mesh->plink_timeout);
get_random_bytes(&rand, sizeof(u32));
- sta->plink_timeout = sta->plink_timeout +
- rand % sta->plink_timeout;
- ++sta->plink_retries;
- mod_plink_timer(sta, sta->plink_timeout);
+ sta->mesh->plink_timeout = sta->mesh->plink_timeout +
+ rand % sta->mesh->plink_timeout;
+ ++sta->mesh->plink_retries;
+ mod_plink_timer(sta, sta->mesh->plink_timeout);
action = WLAN_SP_MESH_PEERING_OPEN;
break;
}
/* confirm timer */
if (!reason)
reason = WLAN_REASON_MESH_CONFIRM_TIMEOUT;
- sta->plink_state = NL80211_PLINK_HOLDING;
+ sta->mesh->plink_state = NL80211_PLINK_HOLDING;
mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout);
action = WLAN_SP_MESH_PEERING_CLOSE;
break;
case NL80211_PLINK_HOLDING:
/* holding timer */
- del_timer(&sta->plink_timer);
+ del_timer(&sta->mesh->plink_timer);
mesh_plink_fsm_restart(sta);
break;
default:
break;
}
- spin_unlock_bh(&sta->plink_lock);
+ spin_unlock_bh(&sta->mesh->plink_lock);
if (action)
- mesh_plink_frame_tx(sdata, action, sta->sta.addr,
- sta->llid, sta->plid, reason);
+ mesh_plink_frame_tx(sdata, sta, action, sta->sta.addr,
+ sta->mesh->llid, sta->mesh->plid, reason);
}
static inline void mesh_plink_timer_set(struct sta_info *sta, u32 timeout)
{
- sta->plink_timer.expires = jiffies + msecs_to_jiffies(timeout);
- sta->plink_timer.data = (unsigned long) sta;
- sta->plink_timer.function = mesh_plink_timer;
- sta->plink_timeout = timeout;
- add_timer(&sta->plink_timer);
+ sta->mesh->plink_timer.expires = jiffies + msecs_to_jiffies(timeout);
+ sta->mesh->plink_timer.data = (unsigned long) sta;
+ sta->mesh->plink_timer.function = mesh_plink_timer;
+ sta->mesh->plink_timeout = timeout;
+ add_timer(&sta->mesh->plink_timer);
}
static bool llid_in_use(struct ieee80211_sub_if_data *sdata,
rcu_read_lock();
list_for_each_entry_rcu(sta, &local->sta_list, list) {
- if (!memcmp(&sta->llid, &llid, sizeof(llid))) {
+ if (!memcmp(&sta->mesh->llid, &llid, sizeof(llid))) {
in_use = true;
break;
}
do {
get_random_bytes(&llid, sizeof(llid));
- /* for mesh PS we still only have the AID range for TIM bits */
- llid = (llid % IEEE80211_MAX_AID) + 1;
} while (llid_in_use(sdata, llid));
return llid;
if (!test_sta_flag(sta, WLAN_STA_AUTH))
return 0;
- spin_lock_bh(&sta->plink_lock);
- sta->llid = mesh_get_new_llid(sdata);
- if (sta->plink_state != NL80211_PLINK_LISTEN &&
- sta->plink_state != NL80211_PLINK_BLOCKED) {
- spin_unlock_bh(&sta->plink_lock);
+ spin_lock_bh(&sta->mesh->plink_lock);
+ sta->mesh->llid = mesh_get_new_llid(sdata);
+ if (sta->mesh->plink_state != NL80211_PLINK_LISTEN &&
+ sta->mesh->plink_state != NL80211_PLINK_BLOCKED) {
+ spin_unlock_bh(&sta->mesh->plink_lock);
return 0;
}
- sta->plink_state = NL80211_PLINK_OPN_SNT;
+ sta->mesh->plink_state = NL80211_PLINK_OPN_SNT;
mesh_plink_timer_set(sta, sdata->u.mesh.mshcfg.dot11MeshRetryTimeout);
- spin_unlock_bh(&sta->plink_lock);
+ spin_unlock_bh(&sta->mesh->plink_lock);
mpl_dbg(sdata,
"Mesh plink: starting establishment with %pM\n",
sta->sta.addr);
/* set the non-peer mode to active during peering */
changed = ieee80211_mps_local_status_update(sdata);
- mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
- sta->sta.addr, sta->llid, 0, 0);
+ mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_OPEN,
+ sta->sta.addr, sta->mesh->llid, 0, 0);
return changed;
}
{
u32 changed;
- spin_lock_bh(&sta->plink_lock);
+ spin_lock_bh(&sta->mesh->plink_lock);
changed = __mesh_plink_deactivate(sta);
- sta->plink_state = NL80211_PLINK_BLOCKED;
- spin_unlock_bh(&sta->plink_lock);
+ sta->mesh->plink_state = NL80211_PLINK_BLOCKED;
+ spin_unlock_bh(&sta->mesh->plink_lock);
return changed;
}
enum plink_event event)
{
struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg;
-
u16 reason = (event == CLS_ACPT) ?
WLAN_REASON_MESH_CLOSE : WLAN_REASON_MESH_CONFIG;
- sta->reason = reason;
- sta->plink_state = NL80211_PLINK_HOLDING;
+ sta->mesh->reason = reason;
+ sta->mesh->plink_state = NL80211_PLINK_HOLDING;
mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout);
}
struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg;
u32 changed = 0;
- del_timer(&sta->plink_timer);
- sta->plink_state = NL80211_PLINK_ESTAB;
+ del_timer(&sta->mesh->plink_timer);
+ sta->mesh->plink_state = NL80211_PLINK_ESTAB;
changed |= mesh_plink_inc_estab_count(sdata);
changed |= mesh_set_ht_prot_mode(sdata);
changed |= mesh_set_short_slot_time(sdata);
u32 changed = 0;
mpl_dbg(sdata, "peer %pM in state %s got event %s\n", sta->sta.addr,
- mplstates[sta->plink_state], mplevents[event]);
+ mplstates[sta->mesh->plink_state], mplevents[event]);
- spin_lock_bh(&sta->plink_lock);
- switch (sta->plink_state) {
+ spin_lock_bh(&sta->mesh->plink_lock);
+ switch (sta->mesh->plink_state) {
case NL80211_PLINK_LISTEN:
switch (event) {
case CLS_ACPT:
mesh_plink_fsm_restart(sta);
break;
case OPN_ACPT:
- sta->plink_state = NL80211_PLINK_OPN_RCVD;
- sta->llid = mesh_get_new_llid(sdata);
+ sta->mesh->plink_state = NL80211_PLINK_OPN_RCVD;
+ sta->mesh->llid = mesh_get_new_llid(sdata);
mesh_plink_timer_set(sta,
mshcfg->dot11MeshRetryTimeout);
break;
case OPN_ACPT:
/* retry timer is left untouched */
- sta->plink_state = NL80211_PLINK_OPN_RCVD;
+ sta->mesh->plink_state = NL80211_PLINK_OPN_RCVD;
action = WLAN_SP_MESH_PEERING_CONFIRM;
break;
case CNF_ACPT:
- sta->plink_state = NL80211_PLINK_CNF_RCVD;
+ sta->mesh->plink_state = NL80211_PLINK_CNF_RCVD;
mod_plink_timer(sta, mshcfg->dot11MeshConfirmTimeout);
break;
default:
case NL80211_PLINK_HOLDING:
switch (event) {
case CLS_ACPT:
- del_timer(&sta->plink_timer);
+ del_timer(&sta->mesh->plink_timer);
mesh_plink_fsm_restart(sta);
break;
case OPN_ACPT:
*/
break;
}
- spin_unlock_bh(&sta->plink_lock);
+ spin_unlock_bh(&sta->mesh->plink_lock);
if (action) {
- mesh_plink_frame_tx(sdata, action, sta->sta.addr,
- sta->llid, sta->plid, sta->reason);
+ mesh_plink_frame_tx(sdata, sta, action, sta->sta.addr,
+ sta->mesh->llid, sta->mesh->plid,
+ sta->mesh->reason);
/* also send confirm in open case */
if (action == WLAN_SP_MESH_PEERING_OPEN) {
- mesh_plink_frame_tx(sdata,
+ mesh_plink_frame_tx(sdata, sta,
WLAN_SP_MESH_PEERING_CONFIRM,
- sta->sta.addr, sta->llid,
- sta->plid, 0);
+ sta->sta.addr, sta->mesh->llid,
+ sta->mesh->plid, 0);
}
}
mpl_dbg(sdata, "Mesh plink: Action frame from non-authed peer\n");
goto out;
}
- if (sta->plink_state == NL80211_PLINK_BLOCKED)
+ if (sta->mesh->plink_state == NL80211_PLINK_BLOCKED)
goto out;
}
if (!matches_local)
event = OPN_RJCT;
if (!mesh_plink_free_count(sdata) ||
- (sta->plid && sta->plid != plid))
+ (sta->mesh->plid && sta->mesh->plid != plid))
event = OPN_IGNR;
else
event = OPN_ACPT;
if (!matches_local)
event = CNF_RJCT;
if (!mesh_plink_free_count(sdata) ||
- sta->llid != llid ||
- (sta->plid && sta->plid != plid))
+ sta->mesh->llid != llid ||
+ (sta->mesh->plid && sta->mesh->plid != plid))
event = CNF_IGNR;
else
event = CNF_ACPT;
break;
case WLAN_SP_MESH_PEERING_CLOSE:
- if (sta->plink_state == NL80211_PLINK_ESTAB)
+ if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
/* Do not check for llid or plid. This does not
* follow the standard but since multiple plinks
* per sta are not supported, it is necessary in
* restarted.
*/
event = CLS_ACPT;
- else if (sta->plid != plid)
+ else if (sta->mesh->plid != plid)
event = CLS_IGNR;
- else if (ie_len == 8 && sta->llid != llid)
+ else if (ie_len == 8 && sta->mesh->llid != llid)
event = CLS_IGNR;
else
event = CLS_ACPT;
mpl_dbg(sdata, "Mesh plink: failed to init peer!\n");
goto unlock_rcu;
}
- sta->plid = plid;
+ sta->mesh->plid = plid;
} else if (!sta && event == OPN_RJCT) {
- mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+ mesh_plink_frame_tx(sdata, NULL, WLAN_SP_MESH_PEERING_CLOSE,
mgmt->sa, 0, plid,
WLAN_REASON_MESH_CONFIG);
goto unlock_rcu;
goto unlock_rcu;
}
- /* 802.11-2012 13.3.7.2 - update plid on CNF if not set */
- if (!sta->plid && event == CNF_ACPT)
- sta->plid = plid;
+ if (event == CNF_ACPT) {
+ /* 802.11-2012 13.3.7.2 - update plid on CNF if not set */
+ if (!sta->mesh->plid)
+ sta->mesh->plid = plid;
+
+ sta->mesh->aid = get_unaligned_le16(PLINK_CNF_AID(mgmt));
+ }
changed |= mesh_plink_fsm(sdata, sta, event);
if (sdata != sta->sdata)
continue;
- switch (sta->plink_state) {
+ switch (sta->mesh->plink_state) {
case NL80211_PLINK_OPN_SNT:
case NL80211_PLINK_OPN_RCVD:
case NL80211_PLINK_CNF_RCVD:
peering = true;
break;
case NL80211_PLINK_ESTAB:
- if (sta->local_pm == NL80211_MESH_POWER_LIGHT_SLEEP)
+ if (sta->mesh->local_pm == NL80211_MESH_POWER_LIGHT_SLEEP)
light_sleep_cnt++;
- else if (sta->local_pm == NL80211_MESH_POWER_DEEP_SLEEP)
+ else if (sta->mesh->local_pm == NL80211_MESH_POWER_DEEP_SLEEP)
deep_sleep_cnt++;
break;
default:
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
- if (sta->local_pm == pm)
+ if (sta->mesh->local_pm == pm)
return 0;
mps_dbg(sdata, "local STA operates in mode %d with %pM\n",
pm, sta->sta.addr);
- sta->local_pm = pm;
+ sta->mesh->local_pm = pm;
/*
* announce peer-specific power mode transition
* (see IEEE802.11-2012 13.14.3.2 and 13.14.3.3)
*/
- if (sta->plink_state == NL80211_PLINK_ESTAB)
+ if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
mps_qos_null_tx(sta);
return ieee80211_mps_local_status_update(sdata);
if (is_unicast_ether_addr(hdr->addr1) &&
ieee80211_is_data_qos(hdr->frame_control) &&
- sta->plink_state == NL80211_PLINK_ESTAB)
- pm = sta->local_pm;
+ sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+ pm = sta->mesh->local_pm;
else
pm = sdata->u.mesh.nonpeer_pm;
* use peer-specific power mode if peering is established and the
* peer's power mode is known
*/
- if (sta->plink_state == NL80211_PLINK_ESTAB &&
- sta->peer_pm != NL80211_MESH_POWER_UNKNOWN)
- pm = sta->peer_pm;
+ if (sta->mesh->plink_state == NL80211_PLINK_ESTAB &&
+ sta->mesh->peer_pm != NL80211_MESH_POWER_UNKNOWN)
+ pm = sta->mesh->peer_pm;
else
- pm = sta->nonpeer_pm;
+ pm = sta->mesh->nonpeer_pm;
do_buffer = (pm != NL80211_MESH_POWER_ACTIVE);
/* clear the MPSP flags for non-peers or active STA */
- if (sta->plink_state != NL80211_PLINK_ESTAB) {
+ if (sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
clear_sta_flag(sta, WLAN_STA_MPSP_OWNER);
clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
} else if (!do_buffer) {
pm = NL80211_MESH_POWER_ACTIVE;
}
- if (sta->peer_pm == pm)
+ if (sta->mesh->peer_pm == pm)
return;
mps_dbg(sta->sdata, "STA %pM enters mode %d\n",
sta->sta.addr, pm);
- sta->peer_pm = pm;
+ sta->mesh->peer_pm = pm;
ieee80211_mps_sta_status_update(sta);
}
else
pm = NL80211_MESH_POWER_ACTIVE;
- if (sta->nonpeer_pm == pm)
+ if (sta->mesh->nonpeer_pm == pm)
return;
mps_dbg(sta->sdata, "STA %pM sets non-peer mode to %d\n",
sta->sta.addr, pm);
- sta->nonpeer_pm = pm;
+ sta->mesh->nonpeer_pm = pm;
ieee80211_mps_sta_status_update(sta);
}
} else {
if (eosp)
clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
- else if (sta->local_pm != NL80211_MESH_POWER_ACTIVE)
+ else if (sta->mesh->local_pm != NL80211_MESH_POWER_ACTIVE)
set_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
if (rspi && !test_and_set_sta_flag(sta, WLAN_STA_MPSP_OWNER))
int ac, buffer_local = 0;
bool has_buffered = false;
- if (sta->plink_state == NL80211_PLINK_ESTAB)
+ if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
has_buffered = ieee80211_check_tim(elems->tim, elems->tim_len,
- sta->llid);
+ sta->mesh->aid);
if (has_buffered)
mps_dbg(sta->sdata, "%pM indicates buffered frames\n",
if (!has_buffered && !buffer_local)
return;
- if (sta->plink_state == NL80211_PLINK_ESTAB)
+ if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
mpsp_trigger_send(sta, has_buffered, !buffer_local);
else
mps_frame_deliver(sta, 1);
/* Timing offset calculation (see 13.13.2.2.2) */
t_t = le64_to_cpu(mgmt->u.beacon.timestamp);
- sta->t_offset = t_t - t_r;
+ sta->mesh->t_offset = t_t - t_r;
if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
- s64 t_clockdrift = sta->t_offset_setpoint - sta->t_offset;
+ s64 t_clockdrift = sta->mesh->t_offset_setpoint - sta->mesh->t_offset;
msync_dbg(sdata,
- "STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld\n",
- sta->sta.addr, (long long) sta->t_offset,
- (long long) sta->t_offset_setpoint,
+ "STA %pM : t_offset=%lld, t_offset_setpoint=%lld, t_clockdrift=%lld\n",
+ sta->sta.addr, (long long) sta->mesh->t_offset,
+ (long long) sta->mesh->t_offset_setpoint,
(long long) t_clockdrift);
if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT ||
ifmsh->sync_offset_clockdrift_max = t_clockdrift;
spin_unlock_bh(&ifmsh->sync_offset_lock);
} else {
- sta->t_offset_setpoint = sta->t_offset - TOFFSET_SET_MARGIN;
+ sta->mesh->t_offset_setpoint = sta->mesh->t_offset - TOFFSET_SET_MARGIN;
set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
msync_dbg(sdata,
- "STA %pM : offset was invalid, sta->t_offset=%lld\n",
+ "STA %pM : offset was invalid, t_offset=%lld\n",
sta->sta.addr,
- (long long) sta->t_offset);
+ (long long) sta->mesh->t_offset);
}
no_sync:
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
}
+/* This function determines vht capability flags for the association
+ * and builds the IE.
+ * Note - the function may set the owner of the MU-MIMO capability
+ */
static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
struct ieee80211_supported_band *sband,
struct ieee80211_vht_cap *ap_vht_cap)
{
+ struct ieee80211_local *local = sdata->local;
u8 *pos;
u32 cap;
struct ieee80211_sta_vht_cap vht_cap;
*/
if (!(ap_vht_cap->vht_cap_info &
cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)))
- cap &= ~IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
+ cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+ else if (!(ap_vht_cap->vht_cap_info &
+ cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
+ cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+
+ /*
+ * If some other vif is using the MU-MIMO capablity we cannot associate
+ * using MU-MIMO - this will lead to contradictions in the group-id
+ * mechanism.
+ * Ownership is defined since association request, in order to avoid
+ * simultaneous associations with MU-MIMO.
+ */
+ if (cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) {
+ bool disable_mu_mimo = false;
+ struct ieee80211_sub_if_data *other;
+
+ list_for_each_entry_rcu(other, &local->interfaces, list) {
+ if (other->flags & IEEE80211_SDATA_MU_MIMO_OWNER) {
+ disable_mu_mimo = true;
+ break;
+ }
+ }
+ if (disable_mu_mimo)
+ cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+ else
+ sdata->flags |= IEEE80211_SDATA_MU_MIMO_OWNER;
+ }
mask = IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.chswitch_work);
}
-static void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
-{
- struct sta_info *sta;
- u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
-
- rcu_read_lock();
- list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
- if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
- !test_sta_flag(sta, WLAN_STA_AUTHORIZED))
- continue;
-
- ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr,
- NL80211_TDLS_TEARDOWN, reason,
- GFP_ATOMIC);
- }
- rcu_read_unlock();
-}
-
static void
ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
u64 timestamp, u32 device_timestamp,
memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
memset(&ifmgd->vht_capa, 0, sizeof(ifmgd->vht_capa));
memset(&ifmgd->vht_capa_mask, 0, sizeof(ifmgd->vht_capa_mask));
+ sdata->flags &= ~IEEE80211_SDATA_MU_MIMO_OWNER;
sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
eth_zero_addr(sdata->u.mgd.bssid);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
sdata->u.mgd.flags = 0;
+ sdata->flags &= ~IEEE80211_SDATA_MU_MIMO_OWNER;
mutex_lock(&sdata->local->mtx);
ieee80211_vif_release_channel(sdata);
mutex_unlock(&sdata->local->mtx);
rate_control_rate_init(sta);
- if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) {
+ if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED)
set_sta_flag(sta, WLAN_STA_MFP);
- sta->sta.mfp = true;
- } else {
- sta->sta.mfp = false;
- }
sta->sta.wme = elems.wmm_param && local->hw.queues >= IEEE80211_NUM_ACS;
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
- u32 changed = BSS_CHANGED_OCB;
+ u32 changed = BSS_CHANGED_OCB | BSS_CHANGED_BSSID;
int err;
if (ifocb->joined == true)
MODULE_PARM_DESC(ieee80211_default_rc_algo,
"Default rate control algorithm for mac80211 to use");
+void rate_control_rate_init(struct sta_info *sta)
+{
+ struct ieee80211_local *local = sta->sdata->local;
+ struct rate_control_ref *ref = sta->rate_ctrl;
+ struct ieee80211_sta *ista = &sta->sta;
+ void *priv_sta = sta->rate_ctrl_priv;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+
+ ieee80211_sta_set_rx_nss(sta);
+
+ if (!ref)
+ return;
+
+ rcu_read_lock();
+
+ chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
+ if (WARN_ON(!chanctx_conf)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
+
+ spin_lock_bh(&sta->rate_ctrl_lock);
+ ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista,
+ priv_sta);
+ spin_unlock_bh(&sta->rate_ctrl_lock);
+ rcu_read_unlock();
+ set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
+}
+
+void rate_control_rate_update(struct ieee80211_local *local,
+ struct ieee80211_supported_band *sband,
+ struct sta_info *sta, u32 changed)
+{
+ struct rate_control_ref *ref = local->rate_ctrl;
+ struct ieee80211_sta *ista = &sta->sta;
+ void *priv_sta = sta->rate_ctrl_priv;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+
+ if (ref && ref->ops->rate_update) {
+ rcu_read_lock();
+
+ chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
+ if (WARN_ON(!chanctx_conf)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock_bh(&sta->rate_ctrl_lock);
+ ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def,
+ ista, priv_sta, changed);
+ spin_unlock_bh(&sta->rate_ctrl_lock);
+ rcu_read_unlock();
+ }
+ drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
+}
+
int ieee80211_rate_control_register(const struct rate_control_ops *ops)
{
struct rate_control_alg *alg;
}
EXPORT_SYMBOL(rate_control_send_low);
-static bool rate_idx_match_legacy_mask(struct ieee80211_tx_rate *rate,
- int n_bitrates, u32 mask)
+static bool rate_idx_match_legacy_mask(s8 *rate_idx, int n_bitrates, u32 mask)
{
int j;
/* See whether the selected rate or anything below it is allowed. */
- for (j = rate->idx; j >= 0; j--) {
+ for (j = *rate_idx; j >= 0; j--) {
if (mask & (1 << j)) {
/* Okay, found a suitable rate. Use it. */
- rate->idx = j;
+ *rate_idx = j;
return true;
}
}
/* Try to find a higher rate that would be allowed */
- for (j = rate->idx + 1; j < n_bitrates; j++) {
+ for (j = *rate_idx + 1; j < n_bitrates; j++) {
if (mask & (1 << j)) {
/* Okay, found a suitable rate. Use it. */
- rate->idx = j;
+ *rate_idx = j;
return true;
}
}
return false;
}
-static bool rate_idx_match_mcs_mask(struct ieee80211_tx_rate *rate,
- u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+static bool rate_idx_match_mcs_mask(s8 *rate_idx, u8 *mcs_mask)
{
int i, j;
int ridx, rbit;
- ridx = rate->idx / 8;
- rbit = rate->idx % 8;
+ ridx = *rate_idx / 8;
+ rbit = *rate_idx % 8;
/* sanity check */
if (ridx < 0 || ridx >= IEEE80211_HT_MCS_MASK_LEN)
for (i = ridx; i >= 0; i--) {
for (j = rbit; j >= 0; j--)
if (mcs_mask[i] & BIT(j)) {
- rate->idx = i * 8 + j;
+ *rate_idx = i * 8 + j;
return true;
}
rbit = 7;
}
/* Try to find a higher rate that would be allowed */
- ridx = (rate->idx + 1) / 8;
- rbit = (rate->idx + 1) % 8;
+ ridx = (*rate_idx + 1) / 8;
+ rbit = (*rate_idx + 1) % 8;
for (i = ridx; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
for (j = rbit; j < 8; j++)
if (mcs_mask[i] & BIT(j)) {
- rate->idx = i * 8 + j;
+ *rate_idx = i * 8 + j;
return true;
}
rbit = 0;
return false;
}
+static bool rate_idx_match_vht_mcs_mask(s8 *rate_idx, u16 *vht_mask)
+{
+ int i, j;
+ int ridx, rbit;
+
+ ridx = *rate_idx >> 4;
+ rbit = *rate_idx & 0xf;
+
+ if (ridx < 0 || ridx >= NL80211_VHT_NSS_MAX)
+ return false;
+
+ /* See whether the selected rate or anything below it is allowed. */
+ for (i = ridx; i >= 0; i--) {
+ for (j = rbit; j >= 0; j--) {
+ if (vht_mask[i] & BIT(j)) {
+ *rate_idx = (i << 4) | j;
+ return true;
+ }
+ }
+ rbit = 15;
+ }
+ /* Try to find a higher rate that would be allowed */
+ ridx = (*rate_idx + 1) >> 4;
+ rbit = (*rate_idx + 1) & 0xf;
-static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
+ for (i = ridx; i < NL80211_VHT_NSS_MAX; i++) {
+ for (j = rbit; j < 16; j++) {
+ if (vht_mask[i] & BIT(j)) {
+ *rate_idx = (i << 4) | j;
+ return true;
+ }
+ }
+ rbit = 0;
+ }
+ return false;
+}
+
+static void rate_idx_match_mask(s8 *rate_idx, u16 *rate_flags,
struct ieee80211_supported_band *sband,
enum nl80211_chan_width chan_width,
u32 mask,
- u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+ u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN],
+ u16 vht_mask[NL80211_VHT_NSS_MAX])
{
- struct ieee80211_tx_rate alt_rate;
+ if (*rate_flags & IEEE80211_TX_RC_VHT_MCS) {
+ /* handle VHT rates */
+ if (rate_idx_match_vht_mcs_mask(rate_idx, vht_mask))
+ return;
+
+ *rate_idx = 0;
+ /* keep protection flags */
+ *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS |
+ IEEE80211_TX_RC_USE_CTS_PROTECT |
+ IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
- /* handle HT rates */
- if (rate->flags & IEEE80211_TX_RC_MCS) {
- if (rate_idx_match_mcs_mask(rate, mcs_mask))
+ *rate_flags |= IEEE80211_TX_RC_MCS;
+ if (chan_width == NL80211_CHAN_WIDTH_40)
+ *rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+
+ if (rate_idx_match_mcs_mask(rate_idx, mcs_mask))
return;
/* also try the legacy rates. */
- alt_rate.idx = 0;
+ *rate_flags &= ~(IEEE80211_TX_RC_MCS |
+ IEEE80211_TX_RC_40_MHZ_WIDTH);
+ if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates,
+ mask))
+ return;
+ } else if (*rate_flags & IEEE80211_TX_RC_MCS) {
+ /* handle HT rates */
+ if (rate_idx_match_mcs_mask(rate_idx, mcs_mask))
+ return;
+
+ /* also try the legacy rates. */
+ *rate_idx = 0;
/* keep protection flags */
- alt_rate.flags = rate->flags &
- (IEEE80211_TX_RC_USE_RTS_CTS |
- IEEE80211_TX_RC_USE_CTS_PROTECT |
- IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
- alt_rate.count = rate->count;
- if (rate_idx_match_legacy_mask(&alt_rate,
- sband->n_bitrates, mask)) {
- *rate = alt_rate;
+ *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS |
+ IEEE80211_TX_RC_USE_CTS_PROTECT |
+ IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
+ if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates,
+ mask))
return;
- }
- } else if (!(rate->flags & IEEE80211_TX_RC_VHT_MCS)) {
+ } else {
/* handle legacy rates */
- if (rate_idx_match_legacy_mask(rate, sband->n_bitrates, mask))
+ if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates,
+ mask))
return;
/* if HT BSS, and we handle a data frame, also try HT rates */
break;
}
- alt_rate.idx = 0;
+ *rate_idx = 0;
/* keep protection flags */
- alt_rate.flags = rate->flags &
- (IEEE80211_TX_RC_USE_RTS_CTS |
- IEEE80211_TX_RC_USE_CTS_PROTECT |
- IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
- alt_rate.count = rate->count;
+ *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS |
+ IEEE80211_TX_RC_USE_CTS_PROTECT |
+ IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
- alt_rate.flags |= IEEE80211_TX_RC_MCS;
+ *rate_flags |= IEEE80211_TX_RC_MCS;
if (chan_width == NL80211_CHAN_WIDTH_40)
- alt_rate.flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ *rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (rate_idx_match_mcs_mask(&alt_rate, mcs_mask)) {
- *rate = alt_rate;
+ if (rate_idx_match_mcs_mask(rate_idx, mcs_mask))
return;
- }
}
/*
}
}
+static bool rate_control_cap_mask(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, u32 *mask,
+ u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN],
+ u16 vht_mask[NL80211_VHT_NSS_MAX])
+{
+ u32 i, flags;
+
+ *mask = sdata->rc_rateidx_mask[sband->band];
+ flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((flags & sband->bitrates[i].flags) != flags)
+ *mask &= ~BIT(i);
+ }
+
+ if (*mask == (1 << sband->n_bitrates) - 1 &&
+ !sdata->rc_has_mcs_mask[sband->band] &&
+ !sdata->rc_has_vht_mcs_mask[sband->band])
+ return false;
+
+ if (sdata->rc_has_mcs_mask[sband->band])
+ memcpy(mcs_mask, sdata->rc_rateidx_mcs_mask[sband->band],
+ IEEE80211_HT_MCS_MASK_LEN);
+ else
+ memset(mcs_mask, 0xff, IEEE80211_HT_MCS_MASK_LEN);
+
+ if (sdata->rc_has_vht_mcs_mask[sband->band])
+ memcpy(vht_mask, sdata->rc_rateidx_vht_mcs_mask[sband->band],
+ sizeof(u16) * NL80211_VHT_NSS_MAX);
+ else
+ memset(vht_mask, 0xff, sizeof(u16) * NL80211_VHT_NSS_MAX);
+
+ if (sta) {
+ __le16 sta_vht_cap;
+ u16 sta_vht_mask[NL80211_VHT_NSS_MAX];
+
+ /* Filter out rates that the STA does not support */
+ *mask &= sta->supp_rates[sband->band];
+ for (i = 0; i < sizeof(mcs_mask); i++)
+ mcs_mask[i] &= sta->ht_cap.mcs.rx_mask[i];
+
+ sta_vht_cap = sta->vht_cap.vht_mcs.rx_mcs_map;
+ ieee80211_get_vht_mask_from_cap(sta_vht_cap, sta_vht_mask);
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+ vht_mask[i] &= sta_vht_mask[i];
+ }
+
+ return true;
+}
+
+static void
+rate_control_apply_mask_ratetbl(struct sta_info *sta,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_sta_rates *rates)
+{
+ int i;
+ u32 mask;
+ u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN];
+ u16 vht_mask[NL80211_VHT_NSS_MAX];
+ enum nl80211_chan_width chan_width;
+
+ if (!rate_control_cap_mask(sta->sdata, sband, &sta->sta, &mask,
+ mcs_mask, vht_mask))
+ return;
+
+ chan_width = sta->sdata->vif.bss_conf.chandef.width;
+ for (i = 0; i < IEEE80211_TX_RATE_TABLE_SIZE; i++) {
+ if (rates->rate[i].idx < 0)
+ break;
+
+ rate_idx_match_mask(&rates->rate[i].idx, &rates->rate[i].flags,
+ sband, chan_width, mask, mcs_mask,
+ vht_mask);
+ }
+}
+
static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband,
- struct ieee80211_tx_info *info,
struct ieee80211_tx_rate *rates,
int max_rates)
{
enum nl80211_chan_width chan_width;
u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN];
- bool has_mcs_mask;
u32 mask;
- u32 rate_flags;
+ u16 rate_flags, vht_mask[NL80211_VHT_NSS_MAX];
int i;
/*
* default mask (allow all rates) is used to save some processing for
* the common case.
*/
- mask = sdata->rc_rateidx_mask[info->band];
- has_mcs_mask = sdata->rc_has_mcs_mask[info->band];
- rate_flags =
- ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
- for (i = 0; i < sband->n_bitrates; i++)
- if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
- mask &= ~BIT(i);
-
- if (mask == (1 << sband->n_bitrates) - 1 && !has_mcs_mask)
+ if (!rate_control_cap_mask(sdata, sband, sta, &mask, mcs_mask,
+ vht_mask))
return;
- if (has_mcs_mask)
- memcpy(mcs_mask, sdata->rc_rateidx_mcs_mask[info->band],
- sizeof(mcs_mask));
- else
- memset(mcs_mask, 0xff, sizeof(mcs_mask));
-
- if (sta) {
- /* Filter out rates that the STA does not support */
- mask &= sta->supp_rates[info->band];
- for (i = 0; i < sizeof(mcs_mask); i++)
- mcs_mask[i] &= sta->ht_cap.mcs.rx_mask[i];
- }
-
/*
* Make sure the rate index selected for each TX rate is
* included in the configured mask and change the rate indexes
if (rates[i].idx < 0)
break;
- rate_idx_match_mask(&rates[i], sband, chan_width, mask,
- mcs_mask);
+ rate_flags = rates[i].flags;
+ rate_idx_match_mask(&rates[i].idx, &rate_flags, sband,
+ chan_width, mask, mcs_mask, vht_mask);
+ rates[i].flags = rate_flags;
}
}
sband = sdata->local->hw.wiphy->bands[info->band];
if (ieee80211_is_data(hdr->frame_control))
- rate_control_apply_mask(sdata, sta, sband, info, dest, max_rates);
+ rate_control_apply_mask(sdata, sta, sband, dest, max_rates);
if (dest[0].idx < 0)
__rate_control_send_low(&sdata->local->hw, sband, sta, info,
{
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
struct ieee80211_sta_rates *old;
+ struct ieee80211_supported_band *sband;
+ sband = hw->wiphy->bands[ieee80211_get_sdata_band(sta->sdata)];
+ rate_control_apply_mask_ratetbl(sta, sband, rates);
/*
* mac80211 guarantees that this function will not be called
* concurrently, so the following RCU access is safe, even without
spin_unlock_bh(&sta->rate_ctrl_lock);
}
-static inline void rate_control_rate_init(struct sta_info *sta)
-{
- struct ieee80211_local *local = sta->sdata->local;
- struct rate_control_ref *ref = sta->rate_ctrl;
- struct ieee80211_sta *ista = &sta->sta;
- void *priv_sta = sta->rate_ctrl_priv;
- struct ieee80211_supported_band *sband;
- struct ieee80211_chanctx_conf *chanctx_conf;
-
- ieee80211_sta_set_rx_nss(sta);
-
- if (!ref)
- return;
-
- rcu_read_lock();
-
- chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
- if (WARN_ON(!chanctx_conf)) {
- rcu_read_unlock();
- return;
- }
-
- sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
-
- spin_lock_bh(&sta->rate_ctrl_lock);
- ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista,
- priv_sta);
- spin_unlock_bh(&sta->rate_ctrl_lock);
- rcu_read_unlock();
- set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
-}
-
-static inline void rate_control_rate_update(struct ieee80211_local *local,
+void rate_control_rate_init(struct sta_info *sta);
+void rate_control_rate_update(struct ieee80211_local *local,
struct ieee80211_supported_band *sband,
- struct sta_info *sta, u32 changed)
-{
- struct rate_control_ref *ref = local->rate_ctrl;
- struct ieee80211_sta *ista = &sta->sta;
- void *priv_sta = sta->rate_ctrl_priv;
- struct ieee80211_chanctx_conf *chanctx_conf;
-
- if (ref && ref->ops->rate_update) {
- rcu_read_lock();
-
- chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
- if (WARN_ON(!chanctx_conf)) {
- rcu_read_unlock();
- return;
- }
-
- spin_lock_bh(&sta->rate_ctrl_lock);
- ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def,
- ista, priv_sta, changed);
- spin_unlock_bh(&sta->rate_ctrl_lock);
- rcu_read_unlock();
- }
- drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
-}
+ struct sta_info *sta, u32 changed);
static inline void *rate_control_alloc_sta(struct rate_control_ref *ref,
struct sta_info *sta, gfp_t gfp)
static inline void
minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list)
{
- int j = MAX_THR_RATES;
- struct minstrel_rate_stats *tmp_mrs = &mi->r[j - 1].stats;
+ int j;
+ struct minstrel_rate_stats *tmp_mrs;
struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats;
- while (j > 0 && (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) >
- minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))) {
- j--;
+ for (j = MAX_THR_RATES; j > 0; --j) {
tmp_mrs = &mi->r[tp_list[j - 1]].stats;
+ if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) <=
+ minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))
+ break;
}
if (j < MAX_THR_RATES - 1)
else
idx = index % MCS_GROUP_RATES + (group->streams - 1) * 8;
- if (offset > 0) {
+ /* enable RTS/CTS if needed:
+ * - if station is in dynamic SMPS (and streams > 1)
+ * - for fallback rates, to increase chances of getting through
+ */
+ if (offset > 0 &&
+ (mi->sta->smps_mode == IEEE80211_SMPS_DYNAMIC &&
+ group->streams > 1)) {
ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts;
flags |= IEEE80211_TX_RC_USE_RTS_CTS;
}
u64_stats_update_end(&tstats->syncp);
}
+static u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
+ enum nl80211_iftype type)
+{
+ __le16 fc = hdr->frame_control;
+
+ if (ieee80211_is_data(fc)) {
+ if (len < 24) /* drop incorrect hdr len (data) */
+ return NULL;
+
+ if (ieee80211_has_a4(fc))
+ return NULL;
+ if (ieee80211_has_tods(fc))
+ return hdr->addr1;
+ if (ieee80211_has_fromds(fc))
+ return hdr->addr2;
+
+ return hdr->addr3;
+ }
+
+ if (ieee80211_is_mgmt(fc)) {
+ if (len < 24) /* drop incorrect hdr len (mgmt) */
+ return NULL;
+ return hdr->addr3;
+ }
+
+ if (ieee80211_is_ctl(fc)) {
+ if (ieee80211_is_pspoll(fc))
+ return hdr->addr1;
+
+ if (ieee80211_is_back_req(fc)) {
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ return hdr->addr2;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ return hdr->addr1;
+ default:
+ break; /* fall through to the return */
+ }
+ }
+ }
+
+ return NULL;
+}
+
/*
* monitor mode reception
*
hdr = (void *)(skb->data + rtap_vendor_space);
if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
- RX_FLAG_FAILED_PLCP_CRC |
- RX_FLAG_AMPDU_IS_ZEROLEN))
+ RX_FLAG_FAILED_PLCP_CRC))
return true;
if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space))
cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
put_unaligned_le32(status->ampdu_reference, pos);
pos += 4;
- if (status->flag & RX_FLAG_AMPDU_REPORT_ZEROLEN)
- flags |= IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN;
- if (status->flag & RX_FLAG_AMPDU_IS_ZEROLEN)
- flags |= IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN;
if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
if (status->flag & RX_FLAG_AMPDU_IS_LAST)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
- if (unlikely(rx->skb->len < 16)) {
- I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
- return RX_DROP_MONITOR;
- }
-
/* Drop disallowed frame classes based on STA auth/assoc state;
* IEEE 802.11, Chap 5.5.
*
ieee80211_sta_ps_deliver_wakeup(sta);
}
-int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
+int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start)
{
- struct sta_info *sta_inf = container_of(sta, struct sta_info, sta);
+ struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
bool in_ps;
- WARN_ON(!ieee80211_hw_check(&sta_inf->local->hw, AP_LINK_PS));
+ WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS));
/* Don't let the same PS state be set twice */
- in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA);
+ in_ps = test_sta_flag(sta, WLAN_STA_PS_STA);
if ((start && in_ps) || (!start && !in_ps))
return -EINVAL;
if (start)
- sta_ps_start(sta_inf);
+ sta_ps_start(sta);
else
- sta_ps_end(sta_inf);
+ sta_ps_end(sta);
return 0;
}
sta->rx_bytes += rx->skb->len;
if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
sta->last_signal = status->signal;
- ewma_add(&sta->avg_signal, -status->signal);
+ ewma_signal_add(&sta->avg_signal, -status->signal);
}
if (status->chains) {
continue;
sta->chain_signal_last[i] = signal;
- ewma_add(&sta->chain_signal_avg[i], -signal);
+ ewma_signal_add(&sta->chain_signal_avg[i], -signal);
}
}
if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
return RX_DROP_MONITOR;
- rx->key->tx_rx_count++;
/* TODO: add threshold stuff again */
} else {
return RX_DROP_MONITOR;
/* Complete frame has been reassembled - process it now */
status = IEEE80211_SKB_RXCB(rx->skb);
- status->rx_flags |= IEEE80211_RX_FRAGMENTED;
out:
ieee80211_led_rx(rx->local);
/* deliver to local stack */
skb->protocol = eth_type_trans(skb, dev);
memset(skb->cb, 0, sizeof(skb->cb));
- if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) &&
- rx->local->napi)
- napi_gro_receive(rx->local->napi, skb);
+ if (rx->napi)
+ napi_gro_receive(rx->napi, skb);
else
netif_receive_skb(skb);
}
tf->category == WLAN_CATEGORY_TDLS &&
(tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ||
tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) {
- rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TDLS_CHSW;
- skb_queue_tail(&sdata->skb_queue, rx->skb);
- ieee80211_queue_work(&rx->local->hw, &sdata->work);
+ skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb);
+ schedule_work(&local->tdls_chsw_work);
if (rx->sta)
rx->sta->rx_packets++;
return RX_QUEUED;
}
-/* TODO: use IEEE80211_RX_FRAGMENTED */
static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
struct ieee80211_rate *rate)
{
/* This is OK -- must be QoS data frame */
.security_idx = tid,
.seqno_idx = tid,
- .flags = IEEE80211_RX_REORDER_TIMER,
+ .napi = NULL, /* must be NULL to not have races */
};
struct tid_ampdu_rx *tid_agg_rx;
case NL80211_IFTYPE_OCB:
if (!bssid)
return false;
- if (ieee80211_is_beacon(hdr->frame_control))
+ if (!ieee80211_is_data_present(hdr->frame_control))
return false;
if (!is_broadcast_ether_addr(bssid))
return false;
* be called with rcu_read_lock protection.
*/
static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct napi_struct *napi)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
memset(&rx, 0, sizeof(rx));
rx.skb = skb;
rx.local = local;
+ rx.napi = napi;
if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
* This is the receive path handler. It is called by a low level driver when an
* 802.11 MPDU is received from the hardware.
*/
-void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct napi_struct *napi)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_rate *rate = NULL;
ieee80211_tpt_led_trig_rx(local,
((struct ieee80211_hdr *)skb->data)->frame_control,
skb->len);
- __ieee80211_rx_handle_packet(hw, skb);
+ __ieee80211_rx_handle_packet(hw, skb, napi);
rcu_read_unlock();
drop:
kfree_skb(skb);
}
-EXPORT_SYMBOL(ieee80211_rx);
+EXPORT_SYMBOL(ieee80211_rx_napi);
/* This is a version of the rx handler that can be called from hard irq
* context. Post the skb on the queue and schedule the tasklet */
.nelem_hint = 3, /* start small */
.automatic_shrinking = true,
.head_offset = offsetof(struct sta_info, hash_node),
- .key_offset = offsetof(struct sta_info, sta.addr),
+ .key_offset = offsetof(struct sta_info, addr),
.key_len = ETH_ALEN,
.hashfn = sta_addr_hash,
.max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE,
if (sta->sta.txq[0])
kfree(to_txq_info(sta->sta.txq[0]));
kfree(rcu_dereference_raw(sta->sta.rates));
+#ifdef CONFIG_MAC80211_MESH
+ kfree(sta->mesh);
+#endif
kfree(sta);
}
INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
mutex_init(&sta->ampdu_mlme.mtx);
#ifdef CONFIG_MAC80211_MESH
- spin_lock_init(&sta->plink_lock);
- if (ieee80211_vif_is_mesh(&sdata->vif) &&
- !sdata->u.mesh.user_mpm)
- init_timer(&sta->plink_timer);
- sta->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
+ if (ieee80211_vif_is_mesh(&sdata->vif)) {
+ sta->mesh = kzalloc(sizeof(*sta->mesh), gfp);
+ if (!sta->mesh)
+ goto free;
+ spin_lock_init(&sta->mesh->plink_lock);
+ if (ieee80211_vif_is_mesh(&sdata->vif) &&
+ !sdata->u.mesh.user_mpm)
+ init_timer(&sta->mesh->plink_timer);
+ sta->mesh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
+ }
#endif
+ memcpy(sta->addr, addr, ETH_ALEN);
memcpy(sta->sta.addr, addr, ETH_ALEN);
sta->local = local;
sta->sdata = sdata;
ktime_get_ts(&uptime);
sta->last_connected = uptime.tv_sec;
- ewma_init(&sta->avg_signal, 1024, 8);
+ ewma_signal_init(&sta->avg_signal);
for (i = 0; i < ARRAY_SIZE(sta->chain_signal_avg); i++)
- ewma_init(&sta->chain_signal_avg[i], 1024, 8);
+ ewma_signal_init(&sta->chain_signal_avg[i]);
if (local->ops->wake_tx_queue) {
void *txq_data;
if (sta->sta.txq[0])
kfree(to_txq_info(sta->sta.txq[0]));
free:
+#ifdef CONFIG_MAC80211_MESH
+ kfree(sta->mesh);
+#endif
kfree(sta);
return NULL;
}
bool indicate_tim = false;
u8 ignore_for_tim = sta->sta.uapsd_queues;
int ac;
- u16 id;
+ u16 id = sta->sta.aid;
if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
return;
ps = &sta->sdata->bss->ps;
- id = sta->sta.aid;
#ifdef CONFIG_MAC80211_MESH
} else if (ieee80211_vif_is_mesh(&sta->sdata->vif)) {
ps = &sta->sdata->u.mesh.ps;
- /* TIM map only for 1 <= PLID <= IEEE80211_MAX_AID */
- id = sta->plid % (IEEE80211_MAX_AID + 1);
#endif
} else {
return;
}
if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))) {
- sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal);
+ sinfo->signal_avg =
+ (s8) -ewma_signal_read(&sta->avg_signal);
sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
}
}
for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
sinfo->chain_signal[i] = sta->chain_signal_last[i];
sinfo->chain_signal_avg[i] =
- (s8) -ewma_read(&sta->chain_signal_avg[i]);
+ (s8) -ewma_signal_read(&sta->chain_signal_avg[i]);
}
}
BIT(NL80211_STA_INFO_PEER_PM) |
BIT(NL80211_STA_INFO_NONPEER_PM);
- sinfo->llid = sta->llid;
- sinfo->plid = sta->plid;
- sinfo->plink_state = sta->plink_state;
+ sinfo->llid = sta->mesh->llid;
+ sinfo->plid = sta->mesh->plid;
+ sinfo->plink_state = sta->mesh->plink_state;
if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
sinfo->filled |= BIT(NL80211_STA_INFO_T_OFFSET);
- sinfo->t_offset = sta->t_offset;
+ sinfo->t_offset = sta->mesh->t_offset;
}
- sinfo->local_pm = sta->local_pm;
- sinfo->peer_pm = sta->peer_pm;
- sinfo->nonpeer_pm = sta->nonpeer_pm;
+ sinfo->local_pm = sta->mesh->local_pm;
+ sinfo->peer_pm = sta->mesh->peer_pm;
+ sinfo->nonpeer_pm = sta->mesh->nonpeer_pm;
#endif
}
* @WLAN_STA_TDLS_CHAN_SWITCH: This TDLS peer supports TDLS channel-switching
* @WLAN_STA_TDLS_OFF_CHANNEL: The local STA is currently off-channel with this
* TDLS peer
+ * @WLAN_STA_TDLS_WIDER_BW: This TDLS peer supports working on a wider bw on
+ * the BSS base channel.
* @WLAN_STA_UAPSD: Station requested unscheduled SP while driver was
* keeping station in power-save mode, reply when the driver
* unblocks the station.
WLAN_STA_TDLS_INITIATOR,
WLAN_STA_TDLS_CHAN_SWITCH,
WLAN_STA_TDLS_OFF_CHANNEL,
+ WLAN_STA_TDLS_WIDER_BW,
WLAN_STA_UAPSD,
WLAN_STA_SP,
WLAN_STA_4ADDR_EVENT,
struct rcu_head rcu_head;
};
+/**
+ * struct mesh_sta - mesh STA information
+ * @plink_lock: serialize access to plink fields
+ * @llid: Local link ID
+ * @plid: Peer link ID
+ * @aid: local aid supplied by peer
+ * @reason: Cancel reason on PLINK_HOLDING state
+ * @plink_retries: Retries in establishment
+ * @plink_state: peer link state
+ * @plink_timeout: timeout of peer link
+ * @plink_timer: peer link watch timer
+ * @t_offset: timing offset relative to this host
+ * @t_offset_setpoint: reference timing offset of this sta to be used when
+ * calculating clockdrift
+ * @local_pm: local link-specific power save mode
+ * @peer_pm: peer-specific power save mode towards local STA
+ * @nonpeer_pm: STA power save mode towards non-peer neighbors
+ * @processed_beacon: set to true after peer rates and capabilities are
+ * processed
+ * @fail_avg: moving percentage of failed MSDUs
+ */
+struct mesh_sta {
+ struct timer_list plink_timer;
+
+ s64 t_offset;
+ s64 t_offset_setpoint;
+
+ spinlock_t plink_lock;
+ u16 llid;
+ u16 plid;
+ u16 aid;
+ u16 reason;
+ u8 plink_retries;
+
+ bool processed_beacon;
+
+ enum nl80211_plink_state plink_state;
+ u32 plink_timeout;
+
+ /* mesh power save */
+ enum nl80211_mesh_power_mode local_pm;
+ enum nl80211_mesh_power_mode peer_pm;
+ enum nl80211_mesh_power_mode nonpeer_pm;
+
+ /* moving percentage of failed MSDUs */
+ unsigned int fail_avg;
+};
+
+DECLARE_EWMA(signal, 1024, 8)
+
/**
* struct sta_info - STA information
*
* @list: global linked list entry
* @free_list: list entry for keeping track of stations to free
* @hash_node: hash node for rhashtable
+ * @addr: station's MAC address - duplicated from public part to
+ * let the hash table work with just a single cacheline
* @local: pointer to the global information
* @sdata: virtual interface this station belongs to
* @ptk: peer keys negotiated with this station, if any
* @ptk_idx: last installed peer key index
* @gtk: group keys negotiated with this station, if any
- * @gtk_idx: last installed group key index
* @rate_ctrl: rate control algorithm reference
* @rate_ctrl_lock: spinlock used to protect rate control data
* (data inside the algorithm, so serializes calls there)
* @last_signal: signal of last received frame from this STA
* @avg_signal: moving average of signal of received frames from this STA
* @last_ack_signal: signal of last received Ack frame from this STA
- * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue)
+ * @last_seq_ctrl: last received seq/frag number from this STA (per TID
+ * plus one for non-QoS frames)
* @tx_filtered_count: number of frames the hardware filtered for this STA
* @tx_retry_failed: number of frames that failed retry
* @tx_retry_count: total number of retries for frames to this STA
- * @fail_avg: moving percentage of failed MSDUs
* @tx_packets: number of RX/TX MSDUs
* @tx_bytes: number of bytes transmitted to this STA
* @tid_seq: per-TID sequence numbers for sending to this STA
* @ampdu_mlme: A-MPDU state machine state
* @timer_to_tid: identity mapping to ID timers
- * @plink_lock: serialize access to plink fields
- * @llid: Local link ID
- * @plid: Peer link ID
- * @reason: Cancel reason on PLINK_HOLDING state
- * @plink_retries: Retries in establishment
- * @plink_state: peer link state
- * @plink_timeout: timeout of peer link
- * @plink_timer: peer link watch timer
- * @t_offset: timing offset relative to this host
- * @t_offset_setpoint: reference timing offset of this sta to be used when
- * calculating clockdrift
- * @local_pm: local link-specific power save mode
- * @peer_pm: peer-specific power save mode towards local STA
- * @nonpeer_pm: STA power save mode towards non-peer neighbors
+ * @mesh: mesh STA information
* @debugfs: debug filesystem info
* @dead: set to true when sta is unlinked
* @uploaded: set to true when sta is uploaded to the driver
* @rx_msdu: MSDUs received from this station, using IEEE80211_NUM_TID
* entry for non-QoS frames
* @fast_tx: TX fastpath information
- * @processed_beacon: set to true after peer rates and capabilities are
- * processed
+ * @tdls_chandef: a TDLS peer can have a wider chandef that is compatible to
+ * the BSS one.
*/
struct sta_info {
/* General information, mostly static */
struct list_head list, free_list;
struct rcu_head rcu_head;
struct rhash_head hash_node;
+ u8 addr[ETH_ALEN];
struct ieee80211_local *local;
struct ieee80211_sub_if_data *sdata;
struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
struct ieee80211_key __rcu *ptk[NUM_DEFAULT_KEYS];
- u8 gtk_idx;
u8 ptk_idx;
struct rate_control_ref *rate_ctrl;
void *rate_ctrl_priv;
struct ieee80211_fast_tx __rcu *fast_tx;
+#ifdef CONFIG_MAC80211_MESH
+ struct mesh_sta *mesh;
+#endif
+
struct work_struct drv_deliver_wk;
u16 listen_interval;
unsigned long rx_fragments;
unsigned long rx_dropped;
int last_signal;
- struct ewma avg_signal;
+ struct ewma_signal avg_signal;
int last_ack_signal;
u8 chains;
s8 chain_signal_last[IEEE80211_MAX_CHAINS];
- struct ewma chain_signal_avg[IEEE80211_MAX_CHAINS];
+ struct ewma_signal chain_signal_avg[IEEE80211_MAX_CHAINS];
/* Plus 1 for non-QoS frames */
__le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1];
/* Updated from TX status path only, no locking requirements */
unsigned long tx_filtered_count;
unsigned long tx_retry_failed, tx_retry_count;
- /* moving percentage of failed MSDUs */
- unsigned int fail_avg;
/* Updated from TX path only, no locking requirements */
u64 tx_packets[IEEE80211_NUM_ACS];
struct sta_ampdu_mlme ampdu_mlme;
u8 timer_to_tid[IEEE80211_NUM_TIDS];
-#ifdef CONFIG_MAC80211_MESH
- /*
- * Mesh peer link attributes, protected by plink_lock.
- * TODO: move to a sub-structure that is referenced with pointer?
- */
- spinlock_t plink_lock;
- u16 llid;
- u16 plid;
- u16 reason;
- u8 plink_retries;
- enum nl80211_plink_state plink_state;
- u32 plink_timeout;
- struct timer_list plink_timer;
-
- s64 t_offset;
- s64 t_offset_setpoint;
- /* mesh power save */
- enum nl80211_mesh_power_mode local_pm;
- enum nl80211_mesh_power_mode peer_pm;
- enum nl80211_mesh_power_mode nonpeer_pm;
- bool processed_beacon;
-#endif
-
#ifdef CONFIG_MAC80211_DEBUGFS
struct sta_info_debugfsdentries {
struct dentry *dir;
u8 reserved_tid;
+ struct cfg80211_chan_def tdls_chandef;
+
/* keep last! */
struct ieee80211_sta sta;
};
static inline enum nl80211_plink_state sta_plink_state(struct sta_info *sta)
{
#ifdef CONFIG_MAC80211_MESH
- return sta->plink_state;
+ return sta->mesh->plink_state;
#endif
return NL80211_PLINK_LISTEN;
}
_sta_bucket_idx(tbl, _addr), \
hash_node) \
/* compare address and run code only if it matches */ \
- if (ether_addr_equal(_sta->sta.addr, (_addr)))
+ if (ether_addr_equal(_sta->addr, (_addr)))
/*
* Get STA info by index, BROKEN!
if (!sdata) {
skb->dev = NULL;
- } else if (info->flags & IEEE80211_TX_INTFL_MLME_CONN_TX) {
+ } else {
unsigned int hdr_size =
ieee80211_hdrlen(hdr->frame_control);
ieee80211_mgd_conn_tx_status(sdata,
hdr->frame_control,
acked);
- } else {
- /* we assign ack frame ID for the others */
- WARN_ON(1);
}
rcu_read_unlock();
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2014, Intel Corporation
* Copyright 2014 Intel Mobile Communications GmbH
+ * Copyright 2015 Intel Deutschland GmbH
*
* This file is GPLv2 as found in COPYING.
*/
#include <linux/ieee80211.h>
#include <linux/log2.h>
#include <net/cfg80211.h>
+#include <linux/rtnetlink.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
mutex_unlock(&local->mtx);
}
-static void ieee80211_tdls_add_ext_capab(struct ieee80211_local *local,
+static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
- u8 *pos = (void *)skb_put(skb, 7);
+ struct ieee80211_local *local = sdata->local;
bool chan_switch = local->hw.wiphy->features &
NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
+ bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW);
+ enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+ struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
+ bool vht = sband && sband->vht_cap.vht_supported;
+ u8 *pos = (void *)skb_put(skb, 10);
*pos++ = WLAN_EID_EXT_CAPABILITY;
- *pos++ = 5; /* len */
+ *pos++ = 8; /* len */
*pos++ = 0x0;
*pos++ = 0x0;
*pos++ = 0x0;
*pos++ = chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0;
*pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
+ *pos++ = 0;
+ *pos++ = 0;
+ *pos++ = (vht && wider_band) ? WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED : 0;
}
static u8
}
}
+static void
+ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta)
+{
+ /* IEEE802.11ac-2013 Table E-4 */
+ u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 };
+ struct cfg80211_chan_def uc = sta->tdls_chandef;
+ enum nl80211_chan_width max_width = ieee80211_get_sta_bw(&sta->sta);
+ int i;
+
+ /* only support upgrading non-narrow channels up to 80Mhz */
+ if (max_width == NL80211_CHAN_WIDTH_5 ||
+ max_width == NL80211_CHAN_WIDTH_10)
+ return;
+
+ if (max_width > NL80211_CHAN_WIDTH_80)
+ max_width = NL80211_CHAN_WIDTH_80;
+
+ if (uc.width == max_width)
+ return;
+ /*
+ * Channel usage constrains in the IEEE802.11ac-2013 specification only
+ * allow expanding a 20MHz channel to 80MHz in a single way. In
+ * addition, there are no 40MHz allowed channels that are not part of
+ * the allowed 80MHz range in the 5GHz spectrum (the relevant one here).
+ */
+ for (i = 0; i < ARRAY_SIZE(centers_80mhz); i++)
+ if (abs(uc.chan->center_freq - centers_80mhz[i]) <= 30) {
+ uc.center_freq1 = centers_80mhz[i];
+ uc.width = NL80211_CHAN_WIDTH_80;
+ break;
+ }
+
+ if (!uc.center_freq1)
+ return;
+
+ /* proceed to downgrade the chandef until usable or the same */
+ while (uc.width > max_width &&
+ !cfg80211_reg_can_beacon(sdata->local->hw.wiphy,
+ &uc, sdata->wdev.iftype))
+ ieee80211_chandef_downgrade(&uc);
+
+ if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) {
+ tdls_dbg(sdata, "TDLS ch width upgraded %d -> %d\n",
+ sta->tdls_chandef.width, uc.width);
+
+ /*
+ * the station is not yet authorized when BW upgrade is done,
+ * locking is not required
+ */
+ sta->tdls_chandef = uc;
+ }
+}
+
static void
ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, const u8 *peer,
offset = noffset;
}
- ieee80211_tdls_add_ext_capab(local, skb);
+ ieee80211_tdls_add_ext_capab(sdata, skb);
/* add the QoS element if we support it */
if (local->hw.queues >= IEEE80211_NUM_ACS &&
offset = noffset;
}
- rcu_read_lock();
+ mutex_lock(&local->sta_mtx);
/* we should have the peer STA if we're already responding */
if (action_code == WLAN_TDLS_SETUP_RESPONSE) {
sta = sta_info_get(sdata, peer);
if (WARN_ON_ONCE(!sta)) {
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
return;
}
+
+ sta->tdls_chandef = sdata->vif.bss_conf.chandef;
}
ieee80211_tdls_add_oper_classes(sdata, skb);
ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap);
} else if (action_code == WLAN_TDLS_SETUP_RESPONSE &&
ht_cap.ht_supported && sta->sta.ht_cap.ht_supported) {
- /* disable SMPS in TDLS responder */
- sta->sta.ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED
- << IEEE80211_HT_CAP_SM_PS_SHIFT;
-
/* the peer caps are already intersected with our own */
memcpy(&ht_cap, &sta->sta.ht_cap, sizeof(ht_cap));
pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap);
+
+ /*
+ * if both peers support WIDER_BW, we can expand the chandef to
+ * a wider compatible one, up to 80MHz
+ */
+ if (test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
+ ieee80211_tdls_chandef_vht_upgrade(sdata, sta);
}
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
/* add any remaining IEs */
if (extra_ies_len) {
enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
u8 *pos;
- rcu_read_lock();
+ mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, peer);
ap_sta = sta_info_get(sdata, ifmgd->bssid);
if (WARN_ON_ONCE(!sta || !ap_sta)) {
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
return;
}
+ sta->tdls_chandef = sdata->vif.bss_conf.chandef;
+
/* add any custom IEs that go before the QoS IE */
if (extra_ies_len) {
static const u8 before_qos[] = {
/* only include VHT-operation if not on the 2.4GHz band */
if (band != IEEE80211_BAND_2GHZ && sta->sta.vht_cap.vht_supported) {
+ /*
+ * if both peers support WIDER_BW, we can expand the chandef to
+ * a wider compatible one, up to 80MHz
+ */
+ if (test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
+ ieee80211_tdls_chandef_vht_upgrade(sdata, sta);
+
pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation));
ieee80211_ie_build_vht_oper(pos, &sta->sta.vht_cap,
- &sdata->vif.bss_conf.chandef);
+ &sta->tdls_chandef);
}
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
/* add any remaining IEs */
if (extra_ies_len) {
max(sizeof(struct ieee80211_mgmt),
sizeof(struct ieee80211_tdls_data)) +
50 + /* supported rates */
- 7 + /* ext capab */
+ 10 + /* ext capab */
26 + /* max(WMM-info, WMM-param) */
2 + max(sizeof(struct ieee80211_ht_cap),
sizeof(struct ieee80211_ht_operation)) +
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
+ enum ieee80211_smps_mode smps_mode = sdata->u.mgd.driver_smps_mode;
int ret;
+ /* don't support setup with forced SMPS mode that's not off */
+ if (smps_mode != IEEE80211_SMPS_AUTOMATIC &&
+ smps_mode != IEEE80211_SMPS_OFF) {
+ tdls_dbg(sdata, "Aborting TDLS setup due to SMPS mode %d\n",
+ smps_mode);
+ return -ENOTSUPP;
+ }
+
mutex_lock(&local->mtx);
/* we don't support concurrent TDLS peer setups */
return ret;
}
+static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_chanctx_conf *conf;
+ struct ieee80211_chanctx *ctx;
+
+ mutex_lock(&local->chanctx_mtx);
+ conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+ lockdep_is_held(&local->chanctx_mtx));
+ if (conf) {
+ ctx = container_of(conf, struct ieee80211_chanctx, conf);
+ ieee80211_recalc_chanctx_chantype(local, ctx);
+ }
+ mutex_unlock(&local->chanctx_mtx);
+}
+
int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
const u8 *peer, enum nl80211_tdls_operation oper)
{
break;
}
+ iee80211_tdls_recalc_chanctx(sdata);
+
rcu_read_lock();
sta = sta_info_get(sdata, peer);
if (!sta) {
ieee80211_flush_queues(local, sdata, false);
ret = sta_info_destroy_addr(sdata, peer);
+ iee80211_tdls_recalc_chanctx(sdata);
break;
default:
ret = -ENOTSUPP;
eth_zero_addr(sdata->u.mgd.tdls_peer);
}
+ if (ret == 0)
+ ieee80211_queue_work(&sdata->local->hw,
+ &sdata->u.mgd.request_smps_work);
+
mutex_unlock(&local->mtx);
return ret;
}
return -EINVAL;
}
+ if (!elems.sec_chan_offs) {
+ chan_type = NL80211_CHAN_HT20;
+ } else {
+ switch (elems.sec_chan_offs->sec_chan_offs) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ chan_type = NL80211_CHAN_HT40PLUS;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ chan_type = NL80211_CHAN_HT40MINUS;
+ break;
+ default:
+ chan_type = NL80211_CHAN_HT20;
+ break;
+ }
+ }
+
+ cfg80211_chandef_create(&chandef, chan, chan_type);
+
+ /* we will be active on the TDLS link */
+ if (!cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &chandef,
+ sdata->wdev.iftype)) {
+ tdls_dbg(sdata, "TDLS chan switch to forbidden channel\n");
+ return -EINVAL;
+ }
+
mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, tf->sa);
if (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) {
goto out;
}
- if (!sta->sta.ht_cap.ht_supported) {
- chan_type = NL80211_CHAN_NO_HT;
- } else if (!elems.sec_chan_offs) {
- chan_type = NL80211_CHAN_HT20;
- } else {
- switch (elems.sec_chan_offs->sec_chan_offs) {
- case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- chan_type = NL80211_CHAN_HT40PLUS;
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- chan_type = NL80211_CHAN_HT40MINUS;
- break;
- default:
- chan_type = NL80211_CHAN_HT20;
- break;
- }
+ /* peer should have known better */
+ if (!sta->sta.ht_cap.ht_supported && elems.sec_chan_offs &&
+ elems.sec_chan_offs->sec_chan_offs) {
+ tdls_dbg(sdata, "TDLS chan switch - wide chan unsupported\n");
+ ret = -ENOTSUPP;
+ goto out;
}
- cfg80211_chandef_create(&chandef, chan, chan_type);
params.chandef = &chandef;
-
params.switch_time = le16_to_cpu(elems.ch_sw_timing->switch_time);
params.switch_timeout = le16_to_cpu(elems.ch_sw_timing->switch_timeout);
return ret;
}
-void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb)
+static void
+ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
{
struct ieee80211_tdls_data *tf = (void *)skb->data;
struct wiphy *wiphy = sdata->local->hw.wiphy;
+ ASSERT_RTNL();
+
/* make sure the driver supports it */
if (!(wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH))
return;
return;
}
}
+
+void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
+{
+ struct sta_info *sta;
+ u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
+ if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
+ !test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+ continue;
+
+ ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr,
+ NL80211_TDLS_TEARDOWN, reason,
+ GFP_ATOMIC);
+ }
+ rcu_read_unlock();
+}
+
+void ieee80211_tdls_chsw_work(struct work_struct *wk)
+{
+ struct ieee80211_local *local =
+ container_of(wk, struct ieee80211_local, tdls_chsw_work);
+ struct ieee80211_sub_if_data *sdata;
+ struct sk_buff *skb;
+ struct ieee80211_tdls_data *tf;
+
+ rtnl_lock();
+ while ((skb = skb_dequeue(&local->skb_queue_tdls_chsw))) {
+ tf = (struct ieee80211_tdls_data *)skb->data;
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (!ieee80211_sdata_running(sdata) ||
+ sdata->vif.type != NL80211_IFTYPE_STATION ||
+ !ether_addr_equal(tf->da, sdata->vif.addr))
+ continue;
+
+ ieee80211_process_tdls_channel_switch(sdata, skb);
+ break;
+ }
+
+ kfree_skb(skb);
+ }
+ rtnl_unlock();
+}
if (tx->sdata->vif.type == NL80211_IFTYPE_WDS)
return TX_CONTINUE;
- if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
- return TX_CONTINUE;
-
if (tx->flags & IEEE80211_TX_PS_BUFFERED)
return TX_CONTINUE;
if (tx->key) {
bool skip_hw = false;
- tx->key->tx_rx_count++;
/* TODO: add threshold stuff again */
switch (tx->key->conf.cipher) {
txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
- tx->sdata->vif.type == NL80211_IFTYPE_ADHOC);
+ tx->sdata->vif.type == NL80211_IFTYPE_ADHOC ||
+ tx->sdata->vif.type == NL80211_IFTYPE_OCB);
/* set up RTS protection if desired */
if (len > tx->local->hw.wiphy->rts_threshold) {
sdata->sequence_number += 0x10;
}
- sta->tx_msdu[tid]++;
+ if (skb_shinfo(skb)->gso_size)
+ sta->tx_msdu[tid] +=
+ DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
+ else
+ sta->tx_msdu[tid]++;
info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
rcu_read_unlock();
}
+static u8 __ieee80211_csa_update_counter(struct beacon_data *beacon)
+{
+ beacon->csa_current_counter--;
+
+ /* the counter should never reach 0 */
+ WARN_ON_ONCE(!beacon->csa_current_counter);
+
+ return beacon->csa_current_counter;
+}
+
u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
if (!beacon)
goto unlock;
- beacon->csa_current_counter--;
-
- /* the counter should never reach 0 */
- WARN_ON_ONCE(!beacon->csa_current_counter);
- count = beacon->csa_current_counter;
+ count = __ieee80211_csa_update_counter(beacon);
unlock:
rcu_read_unlock();
if (beacon) {
if (beacon->csa_counter_offsets[0]) {
if (!is_template)
- ieee80211_csa_update_counter(vif);
+ __ieee80211_csa_update_counter(beacon);
ieee80211_set_csa(sdata, beacon);
}
if (beacon->csa_counter_offsets[0]) {
if (!is_template)
- ieee80211_csa_update_counter(vif);
+ __ieee80211_csa_update_counter(beacon);
ieee80211_set_csa(sdata, beacon);
}
* for now we leave it consistent with overall
* mac80211's behavior.
*/
- ieee80211_csa_update_counter(vif);
+ __ieee80211_csa_update_counter(beacon);
ieee80211_set_csa(sdata, beacon);
}
}
EXPORT_SYMBOL(wiphy_to_ieee80211_hw);
-u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
- enum nl80211_iftype type)
-{
- __le16 fc = hdr->frame_control;
-
- /* drop ACK/CTS frames and incorrect hdr len (ctrl) */
- if (len < 16)
- return NULL;
-
- if (ieee80211_is_data(fc)) {
- if (len < 24) /* drop incorrect hdr len (data) */
- return NULL;
-
- if (ieee80211_has_a4(fc))
- return NULL;
- if (ieee80211_has_tods(fc))
- return hdr->addr1;
- if (ieee80211_has_fromds(fc))
- return hdr->addr2;
-
- return hdr->addr3;
- }
-
- if (ieee80211_is_mgmt(fc)) {
- if (len < 24) /* drop incorrect hdr len (mgmt) */
- return NULL;
- return hdr->addr3;
- }
-
- if (ieee80211_is_ctl(fc)) {
- if (ieee80211_is_pspoll(fc))
- return hdr->addr1;
-
- if (ieee80211_is_back_req(fc)) {
- switch (type) {
- case NL80211_IFTYPE_STATION:
- return hdr->addr2;
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_AP_VLAN:
- return hdr->addr1;
- default:
- break; /* fall through to the return */
- }
- }
- }
-
- return NULL;
-}
-
void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
{
struct sk_buff *skb;
struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif)
{
- struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_sub_if_data *sdata;
+
+ if (!vif)
+ return NULL;
+
+ sdata = vif_to_sdata(vif);
if (!ieee80211_sdata_running(sdata) ||
!(sdata->flags & IEEE80211_SDATA_IN_DRIVER))
local->resuming = false;
local->suspended = false;
local->started = false;
+ local->in_reconfig = false;
/* scheduled scan clearly can't be running any more, but tell
* cfg80211 and clear local state
struct ieee80211_sub_if_data *sched_scan_sdata;
struct cfg80211_sched_scan_request *sched_scan_req;
bool sched_scan_stopped = false;
+ bool suspended = local->suspended;
/* nothing to do if HW shouldn't run */
if (!local->open_count)
goto wake_up;
#ifdef CONFIG_PM
- if (local->suspended)
+ if (suspended)
local->resuming = true;
if (local->wowlan) {
+ /*
+ * In the wowlan case, both mac80211 and the device
+ * are functional when the resume op is called, so
+ * clear local->suspended so the device could operate
+ * normally (e.g. pass rx frames).
+ */
+ local->suspended = false;
res = drv_resume(local);
local->wowlan = false;
if (res < 0) {
/*
* res is 1, which means the driver requested
* to go through a regular reset on wakeup.
+ * restore local->suspended in this case.
*/
reconfig_due_to_wowlan = true;
+ local->suspended = true;
}
#endif
*/
res = drv_start(local);
if (res) {
- if (local->suspended)
+ if (suspended)
WARN(1, "Hardware became unavailable upon resume. This could be a software issue prior to suspend or a hardware issue.\n");
else
WARN(1, "Hardware became unavailable during restart.\n");
* If this is for hw restart things are still running.
* We may want to change that later, however.
*/
- if (local->open_count && (!local->suspended || reconfig_due_to_wowlan))
+ if (local->open_count && (!suspended || reconfig_due_to_wowlan))
drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
- if (!local->suspended)
+ if (!suspended)
return 0;
#ifdef CONFIG_PM
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
enum ieee80211_sta_rx_bandwidth bw;
+ enum nl80211_chan_width bss_width = sdata->vif.bss_conf.chandef.width;
- bw = ieee80211_chan_width_to_rx_bw(sdata->vif.bss_conf.chandef.width);
- bw = min(bw, ieee80211_sta_cap_rx_bw(sta));
+ bw = ieee80211_sta_cap_rx_bw(sta);
bw = min(bw, sta->cur_max_bandwidth);
+ /* do not cap the BW of TDLS WIDER_BW peers by the bss */
+ if (!test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
+ bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
+
return bw;
}
if (changed > 0)
rate_control_rate_update(local, sband, sta, changed);
}
+
+void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
+ u16 vht_mask[NL80211_VHT_NSS_MAX])
+{
+ int i;
+ u16 mask, cap = le16_to_cpu(vht_cap);
+
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ mask = (cap >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
+ switch (mask) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7:
+ vht_mask[i] = 0x00FF;
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8:
+ vht_mask[i] = 0x01FF;
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9:
+ vht_mask[i] = 0x03FF;
+ break;
+ case IEEE80211_VHT_MCS_NOT_SUPPORTED:
+ default:
+ vht_mask[i] = 0;
+ break;
+ }
+ }
+}
return RX_DROP_UNUSABLE;
}
- ccmp_hdr2pn(pn, skb->data + hdrlen);
+ if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
+ ccmp_hdr2pn(pn, skb->data + hdrlen);
- queue = rx->security_idx;
+ queue = rx->security_idx;
- if (memcmp(pn, key->u.ccmp.rx_pn[queue], IEEE80211_CCMP_PN_LEN) <= 0) {
- key->u.ccmp.replays++;
- return RX_DROP_UNUSABLE;
- }
+ if (memcmp(pn, key->u.ccmp.rx_pn[queue],
+ IEEE80211_CCMP_PN_LEN) <= 0) {
+ key->u.ccmp.replays++;
+ return RX_DROP_UNUSABLE;
+ }
- if (!(status->flag & RX_FLAG_DECRYPTED)) {
- u8 aad[2 * AES_BLOCK_SIZE];
- u8 b_0[AES_BLOCK_SIZE];
- /* hardware didn't decrypt/verify MIC */
- ccmp_special_blocks(skb, pn, b_0, aad);
+ if (!(status->flag & RX_FLAG_DECRYPTED)) {
+ u8 aad[2 * AES_BLOCK_SIZE];
+ u8 b_0[AES_BLOCK_SIZE];
+ /* hardware didn't decrypt/verify MIC */
+ ccmp_special_blocks(skb, pn, b_0, aad);
+
+ if (ieee80211_aes_ccm_decrypt(
+ key->u.ccmp.tfm, b_0, aad,
+ skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN,
+ data_len,
+ skb->data + skb->len - mic_len, mic_len))
+ return RX_DROP_UNUSABLE;
+ }
- if (ieee80211_aes_ccm_decrypt(
- key->u.ccmp.tfm, b_0, aad,
- skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN,
- data_len,
- skb->data + skb->len - mic_len, mic_len))
- return RX_DROP_UNUSABLE;
+ memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
}
- memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
-
/* Remove CCMP header and MIC */
if (pskb_trim(skb, skb->len - mic_len))
return RX_DROP_UNUSABLE;
return RX_DROP_UNUSABLE;
}
- gcmp_hdr2pn(pn, skb->data + hdrlen);
+ if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
+ gcmp_hdr2pn(pn, skb->data + hdrlen);
- queue = rx->security_idx;
+ queue = rx->security_idx;
- if (memcmp(pn, key->u.gcmp.rx_pn[queue], IEEE80211_GCMP_PN_LEN) <= 0) {
- key->u.gcmp.replays++;
- return RX_DROP_UNUSABLE;
- }
+ if (memcmp(pn, key->u.gcmp.rx_pn[queue],
+ IEEE80211_GCMP_PN_LEN) <= 0) {
+ key->u.gcmp.replays++;
+ return RX_DROP_UNUSABLE;
+ }
- if (!(status->flag & RX_FLAG_DECRYPTED)) {
- u8 aad[2 * AES_BLOCK_SIZE];
- u8 j_0[AES_BLOCK_SIZE];
- /* hardware didn't decrypt/verify MIC */
- gcmp_special_blocks(skb, pn, j_0, aad);
+ if (!(status->flag & RX_FLAG_DECRYPTED)) {
+ u8 aad[2 * AES_BLOCK_SIZE];
+ u8 j_0[AES_BLOCK_SIZE];
+ /* hardware didn't decrypt/verify MIC */
+ gcmp_special_blocks(skb, pn, j_0, aad);
+
+ if (ieee80211_aes_gcm_decrypt(
+ key->u.gcmp.tfm, j_0, aad,
+ skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN,
+ data_len,
+ skb->data + skb->len -
+ IEEE80211_GCMP_MIC_LEN))
+ return RX_DROP_UNUSABLE;
+ }
- if (ieee80211_aes_gcm_decrypt(
- key->u.gcmp.tfm, j_0, aad,
- skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN,
- data_len,
- skb->data + skb->len - IEEE80211_GCMP_MIC_LEN))
- return RX_DROP_UNUSABLE;
+ memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
}
- memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
-
/* Remove GCMP header and MIC */
if (pskb_trim(skb, skb->len - IEEE80211_GCMP_MIC_LEN))
return RX_DROP_UNUSABLE;
{
ASSERT_RTNL();
- if (wpan_dev->min_be == min_be &&
- wpan_dev->max_be == max_be)
- return 0;
-
wpan_dev->min_be = min_be;
wpan_dev->max_be = max_be;
return 0;
{
ASSERT_RTNL();
- if (wpan_dev->short_addr == short_addr)
- return 0;
-
wpan_dev->short_addr = short_addr;
return 0;
}
{
ASSERT_RTNL();
- if (wpan_dev->csma_retries == max_csma_backoffs)
- return 0;
-
wpan_dev->csma_retries = max_csma_backoffs;
return 0;
}
{
ASSERT_RTNL();
- if (wpan_dev->frame_retries == max_frame_retries)
- return 0;
-
wpan_dev->frame_retries = max_frame_retries;
return 0;
}
{
ASSERT_RTNL();
- if (wpan_dev->lbt == mode)
- return 0;
-
wpan_dev->lbt = mode;
return 0;
}
+static int
+ieee802154_set_ackreq_default(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev, bool ackreq)
+{
+ ASSERT_RTNL();
+
+ wpan_dev->ackreq = ackreq;
+ return 0;
+}
+
const struct cfg802154_ops mac802154_config_ops = {
.add_virtual_intf_deprecated = ieee802154_add_iface_deprecated,
.del_virtual_intf_deprecated = ieee802154_del_iface_deprecated,
.set_max_csma_backoffs = ieee802154_set_max_csma_backoffs,
.set_max_frame_retries = ieee802154_set_max_frame_retries,
.set_lbt_mode = ieee802154_set_lbt_mode,
+ .set_ackreq_default = ieee802154_set_ackreq_default,
};
if (netif_running(dev))
return -EBUSY;
+ /* lowpan need to be down for update
+ * SLAAC address after ifup
+ */
+ if (sdata->wpan_dev.lowpan_dev) {
+ if (netif_running(sdata->wpan_dev.lowpan_dev))
+ return -EBUSY;
+ }
+
ieee802154_be64_to_le64(&extended_addr, addr->sa_data);
if (!ieee802154_is_valid_extended_unicast_addr(extended_addr))
return -EINVAL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
sdata->wpan_dev.extended_addr = extended_addr;
+ /* update lowpan interface mac address when
+ * wpan mac has been changed
+ */
+ if (sdata->wpan_dev.lowpan_dev)
+ memcpy(sdata->wpan_dev.lowpan_dev->dev_addr, dev->dev_addr,
+ dev->addr_len);
+
return mac802154_wpan_update_llsec(dev);
}
wpan_dev->min_be = 3;
wpan_dev->max_be = 5;
wpan_dev->csma_retries = 4;
- /* for compatibility, actual default is 3 */
- wpan_dev->frame_retries = -1;
+ wpan_dev->frame_retries = 3;
wpan_dev->pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
wpan_dev->short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
phy->supported.max_minbe = 8;
phy->supported.min_maxbe = 3;
phy->supported.max_maxbe = 8;
- phy->supported.min_frame_retries = -1;
+ phy->supported.min_frame_retries = 0;
phy->supported.max_frame_retries = 7;
phy->supported.max_csma_backoffs = 5;
phy->supported.lbt = NL802154_SUPPORTED_BOOL_FALSE;
}
if (!(hw->flags & IEEE802154_HW_FRAME_RETRIES)) {
- /* TODO should be 3, but our default value is -1 which means
- * no ARET handling.
- */
- local->phy->supported.min_frame_retries = -1;
- local->phy->supported.max_frame_retries = -1;
+ local->phy->supported.min_frame_retries = 3;
+ local->phy->supported.max_frame_retries = 3;
}
if (hw->flags & IEEE802154_HW_PROMISCUOUS)
struct dst_entry *dst = skb_dst(skb);
struct rtable *rt = NULL;
struct rt6_info *rt6 = NULL;
- struct lwtunnel_state *lwtstate = NULL;
int err = 0;
bool bos;
int i;
if (skb->protocol == htons(ETH_P_IP)) {
ttl = ip_hdr(skb)->ttl;
rt = (struct rtable *)dst;
- lwtstate = rt->rt_lwtstate;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
ttl = ipv6_hdr(skb)->hop_limit;
rt6 = (struct rt6_info *)dst;
- lwtstate = rt6->rt6i_lwtstate;
} else {
goto drop;
}
/* Find the output device */
out_dev = dst->dev;
if (!mpls_output_possible(out_dev) ||
- !lwtstate || skb_warn_if_lro(skb))
+ !dst->lwtstate || skb_warn_if_lro(skb))
goto drop;
skb_forward_csum(skb);
- tun_encap_info = mpls_lwtunnel_encap(lwtstate);
+ tun_encap_info = mpls_lwtunnel_encap(dst->lwtstate);
/* Verify the destination can hold the packet */
new_header_size = mpls_encap_size(tun_encap_info);
depends on NETFILTER_ADVANCED
depends on IPV6 || IPV6=n
depends on !NF_CONNTRACK || NF_CONNTRACK
+ select NF_DUP_IPV4
+ select NF_DUP_IPV6 if IP6_NF_IPTABLES
---help---
This option adds a "TEE" target with which a packet can be cloned and
this clone be rerouted to another nexthop.
" for conn " FMT_CONN "\n",
__func__, ARG_TUPLE(&tuple), ARG_CONN(cp));
- h = nf_conntrack_find_get(ip_vs_conn_net(cp), NF_CT_DEFAULT_ZONE,
+ h = nf_conntrack_find_get(ip_vs_conn_net(cp), &nf_ct_zone_dflt,
&tuple);
if (h) {
ct = nf_ct_tuplehash_to_ctrack(h);
unsigned int nf_conntrack_hash_rnd __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
-static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
+static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple)
{
unsigned int n;
* three bytes manually.
*/
n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
- return jhash2((u32 *)tuple, n, zone ^ nf_conntrack_hash_rnd ^
+ return jhash2((u32 *)tuple, n, nf_conntrack_hash_rnd ^
(((__force __u16)tuple->dst.u.all << 16) |
tuple->dst.protonum));
}
}
static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
- u16 zone, unsigned int size)
+ unsigned int size)
{
- return __hash_bucket(hash_conntrack_raw(tuple, zone), size);
+ return __hash_bucket(hash_conntrack_raw(tuple), size);
}
-static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
+static inline u_int32_t hash_conntrack(const struct net *net,
const struct nf_conntrack_tuple *tuple)
{
- return __hash_conntrack(tuple, zone, net->ct.htable_size);
+ return __hash_conntrack(tuple, net->ct.htable_size);
}
bool
}
/* Released via destroy_conntrack() */
-struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
+ const struct nf_conntrack_zone *zone,
+ gfp_t flags)
{
struct nf_conn *tmpl;
tmpl->status = IPS_TEMPLATE;
write_pnet(&tmpl->ct_net, net);
-#ifdef CONFIG_NF_CONNTRACK_ZONES
- if (zone) {
- struct nf_conntrack_zone *nf_ct_zone;
+ if (nf_ct_zone_add(tmpl, flags, zone) < 0)
+ goto out_free;
- nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, flags);
- if (!nf_ct_zone)
- goto out_free;
- nf_ct_zone->id = zone;
- }
-#endif
atomic_set(&tmpl->ct_general.use, 0);
return tmpl;
-#ifdef CONFIG_NF_CONNTRACK_ZONES
out_free:
kfree(tmpl);
return NULL;
-#endif
}
EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
{
struct net *net = nf_ct_net(ct);
unsigned int hash, reply_hash;
- u16 zone = nf_ct_zone(ct);
unsigned int sequence;
nf_ct_helper_destroy(ct);
local_bh_disable();
do {
sequence = read_seqcount_begin(&net->ct.generation);
- hash = hash_conntrack(net, zone,
+ hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
- reply_hash = hash_conntrack(net, zone,
+ reply_hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
static inline bool
nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
- const struct nf_conntrack_tuple *tuple,
- u16 zone)
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone)
{
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
* so we need to check that the conntrack is confirmed
*/
return nf_ct_tuple_equal(tuple, &h->tuple) &&
- nf_ct_zone(ct) == zone &&
- nf_ct_is_confirmed(ct);
+ nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
+ nf_ct_is_confirmed(ct);
}
/*
* and recheck nf_ct_tuple_equal(tuple, &h->tuple)
*/
static struct nf_conntrack_tuple_hash *
-____nf_conntrack_find(struct net *net, u16 zone,
+____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple, u32 hash)
{
struct nf_conntrack_tuple_hash *h;
/* Find a connection corresponding to a tuple. */
static struct nf_conntrack_tuple_hash *
-__nf_conntrack_find_get(struct net *net, u16 zone,
+__nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple, u32 hash)
{
struct nf_conntrack_tuple_hash *h;
}
struct nf_conntrack_tuple_hash *
-nf_conntrack_find_get(struct net *net, u16 zone,
+nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple)
{
return __nf_conntrack_find_get(net, zone, tuple,
- hash_conntrack_raw(tuple, zone));
+ hash_conntrack_raw(tuple));
}
EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
int
nf_conntrack_hash_check_insert(struct nf_conn *ct)
{
+ const struct nf_conntrack_zone *zone;
struct net *net = nf_ct_net(ct);
unsigned int hash, reply_hash;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
- u16 zone;
unsigned int sequence;
zone = nf_ct_zone(ct);
local_bh_disable();
do {
sequence = read_seqcount_begin(&net->ct.generation);
- hash = hash_conntrack(net, zone,
+ hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
- reply_hash = hash_conntrack(net, zone,
+ reply_hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
&h->tuple) &&
- zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+ nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+ NF_CT_DIRECTION(h)))
goto out;
hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
&h->tuple) &&
- zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+ nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+ NF_CT_DIRECTION(h)))
goto out;
add_timer(&ct->timeout);
int
__nf_conntrack_confirm(struct sk_buff *skb)
{
+ const struct nf_conntrack_zone *zone;
unsigned int hash, reply_hash;
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
struct hlist_nulls_node *n;
enum ip_conntrack_info ctinfo;
struct net *net;
- u16 zone;
unsigned int sequence;
ct = nf_ct_get(skb, &ctinfo);
/* reuse the hash saved before */
hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
hash = hash_bucket(hash, net);
- reply_hash = hash_conntrack(net, zone,
+ reply_hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
&h->tuple) &&
- zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+ nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+ NF_CT_DIRECTION(h)))
goto out;
hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
&h->tuple) &&
- zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+ nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+ NF_CT_DIRECTION(h)))
goto out;
/* Timer relative to confirmation time, not original
const struct nf_conn *ignored_conntrack)
{
struct net *net = nf_ct_net(ignored_conntrack);
+ const struct nf_conntrack_zone *zone;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
struct nf_conn *ct;
- u16 zone = nf_ct_zone(ignored_conntrack);
- unsigned int hash = hash_conntrack(net, zone, tuple);
+ unsigned int hash;
+
+ zone = nf_ct_zone(ignored_conntrack);
+ hash = hash_conntrack(net, tuple);
/* Disable BHs the entire time since we need to disable them at
* least once for the stats anyway.
ct = nf_ct_tuplehash_to_ctrack(h);
if (ct != ignored_conntrack &&
nf_ct_tuple_equal(tuple, &h->tuple) &&
- nf_ct_zone(ct) == zone) {
+ nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h))) {
NF_CT_STAT_INC(net, found);
rcu_read_unlock_bh();
return 1;
}
static struct nf_conn *
-__nf_conntrack_alloc(struct net *net, u16 zone,
+__nf_conntrack_alloc(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *orig,
const struct nf_conntrack_tuple *repl,
gfp_t gfp, u32 hash)
if (unlikely(!nf_conntrack_hash_rnd)) {
init_nf_conntrack_hash_rnd();
/* recompute the hash as nf_conntrack_hash_rnd is initialized */
- hash = hash_conntrack_raw(orig, zone);
+ hash = hash_conntrack_raw(orig);
}
/* We don't want any race condition at early drop stage */
* SLAB_DESTROY_BY_RCU.
*/
ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
- if (ct == NULL) {
- atomic_dec(&net->ct.count);
- return ERR_PTR(-ENOMEM);
- }
+ if (ct == NULL)
+ goto out;
+
spin_lock_init(&ct->lock);
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
memset(&ct->__nfct_init_offset[0], 0,
offsetof(struct nf_conn, proto) -
offsetof(struct nf_conn, __nfct_init_offset[0]));
-#ifdef CONFIG_NF_CONNTRACK_ZONES
- if (zone) {
- struct nf_conntrack_zone *nf_ct_zone;
- nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC);
- if (!nf_ct_zone)
- goto out_free;
- nf_ct_zone->id = zone;
- }
-#endif
+ if (zone && nf_ct_zone_add(ct, GFP_ATOMIC, zone) < 0)
+ goto out_free;
+
/* Because we use RCU lookups, we set ct_general.use to zero before
* this is inserted in any list.
*/
atomic_set(&ct->ct_general.use, 0);
return ct;
-
-#ifdef CONFIG_NF_CONNTRACK_ZONES
out_free:
- atomic_dec(&net->ct.count);
kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
+out:
+ atomic_dec(&net->ct.count);
return ERR_PTR(-ENOMEM);
-#endif
}
-struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
+struct nf_conn *nf_conntrack_alloc(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *orig,
const struct nf_conntrack_tuple *repl,
gfp_t gfp)
struct nf_conntrack_tuple repl_tuple;
struct nf_conntrack_ecache *ecache;
struct nf_conntrack_expect *exp = NULL;
- u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
+ const struct nf_conntrack_zone *zone;
struct nf_conn_timeout *timeout_ext;
+ struct nf_conntrack_zone tmp;
unsigned int *timeouts;
if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
return NULL;
}
+ zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
hash);
if (IS_ERR(ct))
int *set_reply,
enum ip_conntrack_info *ctinfo)
{
+ const struct nf_conntrack_zone *zone;
struct nf_conntrack_tuple tuple;
struct nf_conntrack_tuple_hash *h;
+ struct nf_conntrack_zone tmp;
struct nf_conn *ct;
- u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
u32 hash;
if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
}
/* look for tuple match */
- hash = hash_conntrack_raw(&tuple, zone);
+ zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
+ hash = hash_conntrack_raw(&tuple);
h = __nf_conntrack_find_get(net, zone, &tuple, hash);
if (!h) {
h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
}
EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
+/* Built-in default zone used e.g. by modules. */
+const struct nf_conntrack_zone nf_ct_zone_dflt = {
+ .id = NF_CT_DEFAULT_ZONE_ID,
+ .dir = NF_CT_DEFAULT_ZONE_DIR,
+};
+EXPORT_SYMBOL_GPL(nf_ct_zone_dflt);
+
#ifdef CONFIG_NF_CONNTRACK_ZONES
static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
.len = sizeof(struct nf_conntrack_zone),
struct nf_conntrack_tuple_hash, hnnode);
ct = nf_ct_tuplehash_to_ctrack(h);
hlist_nulls_del_rcu(&h->hnnode);
- bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct),
- hashsize);
+ bucket = __hash_conntrack(&h->tuple, hashsize);
hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
}
}
}
struct nf_conntrack_expect *
-__nf_ct_expect_find(struct net *net, u16 zone,
+__nf_ct_expect_find(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_expect *i;
h = nf_ct_expect_dst_hash(tuple);
hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) {
if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
- nf_ct_zone(i->master) == zone)
+ nf_ct_zone_equal_any(i->master, zone))
return i;
}
return NULL;
/* Just find a expectation corresponding to a tuple. */
struct nf_conntrack_expect *
-nf_ct_expect_find_get(struct net *net, u16 zone,
+nf_ct_expect_find_get(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_expect *i;
/* If an expectation for this connection is found, it gets delete from
* global list then returned. */
struct nf_conntrack_expect *
-nf_ct_find_expectation(struct net *net, u16 zone,
+nf_ct_find_expectation(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_expect *i, *exp = NULL;
hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) {
if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
- nf_ct_zone(i->master) == zone) {
+ nf_ct_zone_equal_any(i->master, zone)) {
exp = i;
break;
}
}
return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
- nf_ct_zone(a->master) == nf_ct_zone(b->master);
+ nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
}
static inline int expect_matches(const struct nf_conntrack_expect *a,
const struct nf_conntrack_expect *b)
{
return a->master == b->master && a->class == b->class &&
- nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
- nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
- nf_ct_zone(a->master) == nf_ct_zone(b->master);
+ nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
+ nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
+ nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
}
/* Generally a bad idea to call this: could have matched already. */
return ret;
}
+static inline int
+ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype,
+ const struct nf_conntrack_zone *zone, int dir)
+{
+ if (zone->id == NF_CT_DEFAULT_ZONE_ID || zone->dir != dir)
+ return 0;
+ if (nla_put_be16(skb, attrtype, htons(zone->id)))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
static inline int
ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
{
ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
struct nf_conn *ct)
{
+ const struct nf_conntrack_zone *zone;
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
struct nlattr *nest_parms;
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = 0;
+ zone = nf_ct_zone(ct);
+
nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
if (!nest_parms)
goto nla_put_failure;
if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
goto nla_put_failure;
+ if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+ NF_CT_ZONE_DIR_ORIG) < 0)
+ goto nla_put_failure;
nla_nest_end(skb, nest_parms);
nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
goto nla_put_failure;
if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
goto nla_put_failure;
+ if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+ NF_CT_ZONE_DIR_REPL) < 0)
+ goto nla_put_failure;
nla_nest_end(skb, nest_parms);
- if (nf_ct_zone(ct) &&
- nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
+ if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
+ NF_CT_DEFAULT_ZONE_DIR) < 0)
goto nla_put_failure;
if (ctnetlink_dump_status(skb, ct) < 0 ||
+ nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
#endif
#ifdef CONFIG_NF_CONNTRACK_ZONES
- + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
+ + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
#endif
+ ctnetlink_proto_size(ct)
+ ctnetlink_label_size(ct)
static int
ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
{
+ const struct nf_conntrack_zone *zone;
struct net *net;
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
nfmsg->res_id = 0;
rcu_read_lock();
+ zone = nf_ct_zone(ct);
+
nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
if (!nest_parms)
goto nla_put_failure;
if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
goto nla_put_failure;
+ if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+ NF_CT_ZONE_DIR_ORIG) < 0)
+ goto nla_put_failure;
nla_nest_end(skb, nest_parms);
nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
goto nla_put_failure;
if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
goto nla_put_failure;
+ if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+ NF_CT_ZONE_DIR_REPL) < 0)
+ goto nla_put_failure;
nla_nest_end(skb, nest_parms);
- if (nf_ct_zone(ct) &&
- nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
+ if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
+ NF_CT_DEFAULT_ZONE_DIR) < 0)
goto nla_put_failure;
if (ctnetlink_dump_id(skb, ct) < 0)
return ret;
}
+static int
+ctnetlink_parse_zone(const struct nlattr *attr,
+ struct nf_conntrack_zone *zone)
+{
+ nf_ct_zone_init(zone, NF_CT_DEFAULT_ZONE_ID,
+ NF_CT_DEFAULT_ZONE_DIR, 0);
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+ if (attr)
+ zone->id = ntohs(nla_get_be16(attr));
+#else
+ if (attr)
+ return -EOPNOTSUPP;
+#endif
+ return 0;
+}
+
+static int
+ctnetlink_parse_tuple_zone(struct nlattr *attr, enum ctattr_type type,
+ struct nf_conntrack_zone *zone)
+{
+ int ret;
+
+ if (zone->id != NF_CT_DEFAULT_ZONE_ID)
+ return -EINVAL;
+
+ ret = ctnetlink_parse_zone(attr, zone);
+ if (ret < 0)
+ return ret;
+
+ if (type == CTA_TUPLE_REPLY)
+ zone->dir = NF_CT_ZONE_DIR_REPL;
+ else
+ zone->dir = NF_CT_ZONE_DIR_ORIG;
+
+ return 0;
+}
+
static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
[CTA_TUPLE_IP] = { .type = NLA_NESTED },
[CTA_TUPLE_PROTO] = { .type = NLA_NESTED },
+ [CTA_TUPLE_ZONE] = { .type = NLA_U16 },
};
static int
ctnetlink_parse_tuple(const struct nlattr * const cda[],
struct nf_conntrack_tuple *tuple,
- enum ctattr_type type, u_int8_t l3num)
+ enum ctattr_type type, u_int8_t l3num,
+ struct nf_conntrack_zone *zone)
{
struct nlattr *tb[CTA_TUPLE_MAX+1];
int err;
if (err < 0)
return err;
+ if (tb[CTA_TUPLE_ZONE]) {
+ if (!zone)
+ return -EINVAL;
+
+ err = ctnetlink_parse_tuple_zone(tb[CTA_TUPLE_ZONE],
+ type, zone);
+ if (err < 0)
+ return err;
+ }
+
/* orig and expect tuples get DIR_ORIGINAL */
if (type == CTA_TUPLE_REPLY)
tuple->dst.dir = IP_CT_DIR_REPLY;
return 0;
}
-static int
-ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone)
-{
- if (attr)
-#ifdef CONFIG_NF_CONNTRACK_ZONES
- *zone = ntohs(nla_get_be16(attr));
-#else
- return -EOPNOTSUPP;
-#endif
- else
- *zone = 0;
-
- return 0;
-}
-
static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
[CTA_HELP_NAME] = { .type = NLA_NUL_STRING,
.len = NF_CT_HELPER_NAME_LEN - 1 },
struct nf_conn *ct;
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u_int8_t u3 = nfmsg->nfgen_family;
- u16 zone;
+ struct nf_conntrack_zone zone;
int err;
err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
return err;
if (cda[CTA_TUPLE_ORIG])
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
+ u3, &zone);
else if (cda[CTA_TUPLE_REPLY])
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
+ u3, &zone);
else {
return ctnetlink_flush_conntrack(net, cda,
NETLINK_CB(skb).portid,
if (err < 0)
return err;
- h = nf_conntrack_find_get(net, zone, &tuple);
+ h = nf_conntrack_find_get(net, &zone, &tuple);
if (!h)
return -ENOENT;
struct sk_buff *skb2 = NULL;
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u_int8_t u3 = nfmsg->nfgen_family;
- u16 zone;
+ struct nf_conntrack_zone zone;
int err;
if (nlh->nlmsg_flags & NLM_F_DUMP) {
return err;
if (cda[CTA_TUPLE_ORIG])
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
+ u3, &zone);
else if (cda[CTA_TUPLE_REPLY])
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
+ u3, &zone);
else
return -EINVAL;
if (err < 0)
return err;
- h = nf_conntrack_find_get(net, zone, &tuple);
+ h = nf_conntrack_find_get(net, &zone, &tuple);
if (!h)
return -ENOENT;
}
static struct nf_conn *
-ctnetlink_create_conntrack(struct net *net, u16 zone,
+ctnetlink_create_conntrack(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nlattr * const cda[],
struct nf_conntrack_tuple *otuple,
struct nf_conntrack_tuple *rtuple,
struct nf_conntrack_tuple_hash *master_h;
struct nf_conn *master_ct;
- err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, u3);
+ err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER,
+ u3, NULL);
if (err < 0)
goto err2;
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
struct nf_conn *ct;
u_int8_t u3 = nfmsg->nfgen_family;
- u16 zone;
+ struct nf_conntrack_zone zone;
int err;
err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
return err;
if (cda[CTA_TUPLE_ORIG]) {
- err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3);
+ err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG,
+ u3, &zone);
if (err < 0)
return err;
}
if (cda[CTA_TUPLE_REPLY]) {
- err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, u3);
+ err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY,
+ u3, &zone);
if (err < 0)
return err;
}
if (cda[CTA_TUPLE_ORIG])
- h = nf_conntrack_find_get(net, zone, &otuple);
+ h = nf_conntrack_find_get(net, &zone, &otuple);
else if (cda[CTA_TUPLE_REPLY])
- h = nf_conntrack_find_get(net, zone, &rtuple);
+ h = nf_conntrack_find_get(net, &zone, &rtuple);
if (h == NULL) {
err = -ENOENT;
if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
return -EINVAL;
- ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
+ ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple,
&rtuple, u3);
if (IS_ERR(ct))
return PTR_ERR(ct);
+ nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
#endif
#ifdef CONFIG_NF_CONNTRACK_ZONES
- + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
+ + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
#endif
+ ctnetlink_proto_size(ct)
;
static int
ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
{
+ const struct nf_conntrack_zone *zone;
struct nlattr *nest_parms;
rcu_read_lock();
+ zone = nf_ct_zone(ct);
+
nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
if (!nest_parms)
goto nla_put_failure;
if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
goto nla_put_failure;
+ if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+ NF_CT_ZONE_DIR_ORIG) < 0)
+ goto nla_put_failure;
nla_nest_end(skb, nest_parms);
nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
goto nla_put_failure;
if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
goto nla_put_failure;
+ if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+ NF_CT_ZONE_DIR_REPL) < 0)
+ goto nla_put_failure;
nla_nest_end(skb, nest_parms);
- if (nf_ct_zone(ct)) {
- if (nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
- goto nla_put_failure;
- }
+ if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
+ NF_CT_DEFAULT_ZONE_DIR) < 0)
+ goto nla_put_failure;
if (ctnetlink_dump_id(skb, ct) < 0)
goto nla_put_failure;
int err;
err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE,
- nf_ct_l3num(ct));
+ nf_ct_l3num(ct), NULL);
if (err < 0)
return err;
return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK,
- nf_ct_l3num(ct));
+ nf_ct_l3num(ct), NULL);
}
static int
struct nf_conntrack_tuple tuple;
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
- u16 zone = 0;
+ struct nf_conntrack_zone zone;
struct netlink_dump_control c = {
.dump = ctnetlink_exp_ct_dump_table,
.done = ctnetlink_exp_done,
};
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
+ u3, NULL);
if (err < 0)
return err;
- if (cda[CTA_EXPECT_ZONE]) {
- err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
- if (err < 0)
- return err;
- }
+ err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
+ if (err < 0)
+ return err;
- h = nf_conntrack_find_get(net, zone, &tuple);
+ h = nf_conntrack_find_get(net, &zone, &tuple);
if (!h)
return -ENOENT;
struct sk_buff *skb2;
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u_int8_t u3 = nfmsg->nfgen_family;
- u16 zone;
+ struct nf_conntrack_zone zone;
int err;
if (nlh->nlmsg_flags & NLM_F_DUMP) {
return err;
if (cda[CTA_EXPECT_TUPLE])
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+ u3, NULL);
else if (cda[CTA_EXPECT_MASTER])
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
+ u3, NULL);
else
return -EINVAL;
if (err < 0)
return err;
- exp = nf_ct_expect_find_get(net, zone, &tuple);
+ exp = nf_ct_expect_find_get(net, &zone, &tuple);
if (!exp)
return -ENOENT;
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
struct hlist_node *next;
u_int8_t u3 = nfmsg->nfgen_family;
+ struct nf_conntrack_zone zone;
unsigned int i;
- u16 zone;
int err;
if (cda[CTA_EXPECT_TUPLE]) {
if (err < 0)
return err;
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+ u3, NULL);
if (err < 0)
return err;
/* bump usage count to 2 */
- exp = nf_ct_expect_find_get(net, zone, &tuple);
+ exp = nf_ct_expect_find_get(net, &zone, &tuple);
if (!exp)
return -ENOENT;
return -EINVAL;
err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
- &nat_tuple, CTA_EXPECT_NAT_TUPLE, u3);
+ &nat_tuple, CTA_EXPECT_NAT_TUPLE,
+ u3, NULL);
if (err < 0)
return err;
}
static int
-ctnetlink_create_expect(struct net *net, u16 zone,
+ctnetlink_create_expect(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nlattr * const cda[],
u_int8_t u3, u32 portid, int report)
{
int err;
/* caller guarantees that those three CTA_EXPECT_* exist */
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+ u3, NULL);
if (err < 0)
return err;
- err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
+ err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK,
+ u3, NULL);
if (err < 0)
return err;
- err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
+ err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER,
+ u3, NULL);
if (err < 0)
return err;
struct nf_conntrack_expect *exp;
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u_int8_t u3 = nfmsg->nfgen_family;
- u16 zone;
+ struct nf_conntrack_zone zone;
int err;
if (!cda[CTA_EXPECT_TUPLE]
if (err < 0)
return err;
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+ u3, NULL);
if (err < 0)
return err;
spin_lock_bh(&nf_conntrack_expect_lock);
- exp = __nf_ct_expect_find(net, zone, &tuple);
-
+ exp = __nf_ct_expect_find(net, &zone, &tuple);
if (!exp) {
spin_unlock_bh(&nf_conntrack_expect_lock);
err = -ENOENT;
if (nlh->nlmsg_flags & NLM_F_CREATE) {
- err = ctnetlink_create_expect(net, zone, cda,
- u3,
+ err = ctnetlink_create_expect(net, &zone, cda, u3,
NETLINK_CB(skb).portid,
nlmsg_report(nlh));
}
const struct nf_conntrack_tuple *t)
{
const struct nf_conntrack_tuple_hash *h;
+ const struct nf_conntrack_zone *zone;
struct nf_conntrack_expect *exp;
struct nf_conn *sibling;
- u16 zone = nf_ct_zone(ct);
pr_debug("trying to timeout ct or exp for tuple ");
nf_ct_dump_tuple(t);
+ zone = nf_ct_zone(ct);
h = nf_conntrack_find_get(net, zone, t);
if (h) {
sibling = nf_ct_tuplehash_to_ctrack(h);
ntohl(sack->end_seq), ntohl(new_end_seq));
inet_proto_csum_replace4(&tcph->check, skb,
- sack->start_seq, new_start_seq, 0);
+ sack->start_seq, new_start_seq, false);
inet_proto_csum_replace4(&tcph->check, skb,
- sack->end_seq, new_end_seq, 0);
+ sack->end_seq, new_end_seq, false);
sack->start_seq = new_start_seq;
sack->end_seq = new_end_seq;
sackoff += sizeof(*sack);
newseq = htonl(ntohl(tcph->seq) + seqoff);
newack = htonl(ntohl(tcph->ack_seq) - ackoff);
- inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
- inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
+ inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, false);
+ inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack,
+ false);
pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
}
#endif
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+static void ct_show_zone(struct seq_file *s, const struct nf_conn *ct,
+ int dir)
+{
+ const struct nf_conntrack_zone *zone = nf_ct_zone(ct);
+
+ if (zone->dir != dir)
+ return;
+ switch (zone->dir) {
+ case NF_CT_DEFAULT_ZONE_DIR:
+ seq_printf(s, "zone=%u ", zone->id);
+ break;
+ case NF_CT_ZONE_DIR_ORIG:
+ seq_printf(s, "zone-orig=%u ", zone->id);
+ break;
+ case NF_CT_ZONE_DIR_REPL:
+ seq_printf(s, "zone-reply=%u ", zone->id);
+ break;
+ default:
+ break;
+ }
+}
+#else
+static inline void ct_show_zone(struct seq_file *s, const struct nf_conn *ct,
+ int dir)
+{
+}
+#endif
+
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
static void ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
{
print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
l3proto, l4proto);
+ ct_show_zone(s, ct, NF_CT_ZONE_DIR_ORIG);
+
if (seq_has_overflowed(s))
goto release;
print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
l3proto, l4proto);
+ ct_show_zone(s, ct, NF_CT_ZONE_DIR_REPL);
+
if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
goto release;
#endif
ct_show_secctx(s, ct);
-
-#ifdef CONFIG_NF_CONNTRACK_ZONES
- seq_printf(s, "zone=%u ", nf_ct_zone(ct));
-#endif
-
+ ct_show_zone(s, ct, NF_CT_DEFAULT_ZONE_DIR);
ct_show_delta_time(s, ct);
seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use));
/* We keep an extra hash for each conntrack, for fast searching. */
static inline unsigned int
-hash_by_src(const struct net *net, u16 zone,
- const struct nf_conntrack_tuple *tuple)
+hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple)
{
unsigned int hash;
/* Original src, to ensure we map it consistently if poss. */
hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
- tuple->dst.protonum ^ zone ^ nf_conntrack_hash_rnd);
+ tuple->dst.protonum ^ nf_conntrack_hash_rnd);
return reciprocal_scale(hash, net->ct.nat_htable_size);
}
/* Only called for SRC manip */
static int
-find_appropriate_src(struct net *net, u16 zone,
+find_appropriate_src(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_nat_l3proto *l3proto,
const struct nf_nat_l4proto *l4proto,
const struct nf_conntrack_tuple *tuple,
struct nf_conntrack_tuple *result,
const struct nf_nat_range *range)
{
- unsigned int h = hash_by_src(net, zone, tuple);
+ unsigned int h = hash_by_src(net, tuple);
const struct nf_conn_nat *nat;
const struct nf_conn *ct;
hlist_for_each_entry_rcu(nat, &net->ct.nat_bysource[h], bysource) {
ct = nat->ct;
- if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
+ if (same_src(ct, tuple) &&
+ nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
/* Copy source part from reply tuple. */
nf_ct_invert_tuplepr(result,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
* the ip with the lowest src-ip/dst-ip/proto usage.
*/
static void
-find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
+find_best_ips_proto(const struct nf_conntrack_zone *zone,
+ struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
const struct nf_conn *ct,
enum nf_nat_manip_type maniptype)
*/
j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
range->flags & NF_NAT_RANGE_PERSISTENT ?
- 0 : (__force u32)tuple->dst.u3.all[max] ^ zone);
+ 0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id);
full_range = false;
for (i = 0; i <= max; i++) {
struct nf_conn *ct,
enum nf_nat_manip_type maniptype)
{
+ const struct nf_conntrack_zone *zone;
const struct nf_nat_l3proto *l3proto;
const struct nf_nat_l4proto *l4proto;
struct net *net = nf_ct_net(ct);
- u16 zone = nf_ct_zone(ct);
+
+ zone = nf_ct_zone(ct);
rcu_read_lock();
l3proto = __nf_nat_l3proto_find(orig_tuple->src.l3num);
if (maniptype == NF_NAT_MANIP_SRC) {
unsigned int srchash;
- srchash = hash_by_src(net, nf_ct_zone(ct),
+ srchash = hash_by_src(net,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
spin_lock_bh(&nf_nat_lock);
/* nf_conntrack_alter_reply might re-allocate extension aera */
l3proto->csum_update(skb, iphdroff, &hdr->dccph_checksum,
tuple, maniptype);
inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport,
- 0);
+ false);
return true;
}
return true;
l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
- inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, 0);
+ inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, false);
return true;
}
l3proto->csum_update(skb, iphdroff, &hdr->check,
tuple, maniptype);
inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport,
- 0);
+ false);
if (!hdr->check)
hdr->check = CSUM_MANGLED_0;
}
}
l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
- inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, 0);
+ inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, false);
if (!hdr->check)
hdr->check = CSUM_MANGLED_0;
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_tcpudp.h>
#include <linux/netfilter/xt_SYNPROXY.h>
+
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
+#include <net/netfilter/nf_conntrack_zones.h>
int synproxy_net_id;
EXPORT_SYMBOL_GPL(synproxy_net_id);
synproxy->tsoff);
}
inet_proto_csum_replace4(&th->check, skb,
- old, *ptr, 0);
+ old, *ptr, false);
return 1;
}
optoff += op[1];
struct nf_conn *ct;
int err = -ENOMEM;
- ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL);
+ ct = nf_ct_tmpl_alloc(net, &nf_ct_zone_dflt, GFP_KERNEL);
if (!ct)
goto err1;
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_DESCRIPTION("nfacct: Extended Netfilter accounting infrastructure");
-static LIST_HEAD(nfnl_acct_list);
-
struct nf_acct {
atomic64_t pkts;
atomic64_t bytes;
const struct nlmsghdr *nlh, const struct nlattr * const tb[])
{
struct nf_acct *nfacct, *matching = NULL;
+ struct net *net = sock_net(nfnl);
char *acct_name;
unsigned int size = 0;
u32 flags = 0;
if (strlen(acct_name) == 0)
return -EINVAL;
- list_for_each_entry(nfacct, &nfnl_acct_list, head) {
+ list_for_each_entry(nfacct, &net->nfnl_acct_list, head) {
if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0)
continue;
be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS])));
}
atomic_set(&nfacct->refcnt, 1);
- list_add_tail_rcu(&nfacct->head, &nfnl_acct_list);
+ list_add_tail_rcu(&nfacct->head, &net->nfnl_acct_list);
return 0;
}
static int
nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
+ struct net *net = sock_net(skb->sk);
struct nf_acct *cur, *last;
const struct nfacct_filter *filter = cb->data;
cb->args[1] = 0;
rcu_read_lock();
- list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
+ list_for_each_entry_rcu(cur, &net->nfnl_acct_list, head) {
if (last) {
if (cur != last)
continue;
nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
const struct nlmsghdr *nlh, const struct nlattr * const tb[])
{
+ struct net *net = sock_net(nfnl);
int ret = -ENOENT;
struct nf_acct *cur;
char *acct_name;
return -EINVAL;
acct_name = nla_data(tb[NFACCT_NAME]);
- list_for_each_entry(cur, &nfnl_acct_list, head) {
+ list_for_each_entry(cur, &net->nfnl_acct_list, head) {
struct sk_buff *skb2;
if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
nfnl_acct_del(struct sock *nfnl, struct sk_buff *skb,
const struct nlmsghdr *nlh, const struct nlattr * const tb[])
{
+ struct net *net = sock_net(nfnl);
char *acct_name;
struct nf_acct *cur;
int ret = -ENOENT;
if (!tb[NFACCT_NAME]) {
- list_for_each_entry(cur, &nfnl_acct_list, head)
+ list_for_each_entry(cur, &net->nfnl_acct_list, head)
nfnl_acct_try_del(cur);
return 0;
}
acct_name = nla_data(tb[NFACCT_NAME]);
- list_for_each_entry(cur, &nfnl_acct_list, head) {
+ list_for_each_entry(cur, &net->nfnl_acct_list, head) {
if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX) != 0)
continue;
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ACCT);
-struct nf_acct *nfnl_acct_find_get(const char *acct_name)
+struct nf_acct *nfnl_acct_find_get(struct net *net, const char *acct_name)
{
struct nf_acct *cur, *acct = NULL;
rcu_read_lock();
- list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
+ list_for_each_entry_rcu(cur, &net->nfnl_acct_list, head) {
if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
continue;
void nfnl_acct_put(struct nf_acct *acct)
{
- atomic_dec(&acct->refcnt);
+ if (atomic_dec_and_test(&acct->refcnt))
+ kfree_rcu(acct, rcu_head);
+
module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(nfnl_acct_put);
}
EXPORT_SYMBOL_GPL(nfnl_acct_overquota);
+static int __net_init nfnl_acct_net_init(struct net *net)
+{
+ INIT_LIST_HEAD(&net->nfnl_acct_list);
+
+ return 0;
+}
+
+static void __net_exit nfnl_acct_net_exit(struct net *net)
+{
+ struct nf_acct *cur, *tmp;
+
+ list_for_each_entry_safe(cur, tmp, &net->nfnl_acct_list, head) {
+ list_del_rcu(&cur->head);
+
+ if (atomic_dec_and_test(&cur->refcnt))
+ kfree_rcu(cur, rcu_head);
+ }
+}
+
+static struct pernet_operations nfnl_acct_ops = {
+ .init = nfnl_acct_net_init,
+ .exit = nfnl_acct_net_exit,
+};
+
static int __init nfnl_acct_init(void)
{
int ret;
+ ret = register_pernet_subsys(&nfnl_acct_ops);
+ if (ret < 0) {
+ pr_err("nfnl_acct_init: failed to register pernet ops\n");
+ goto err_out;
+ }
+
pr_info("nfnl_acct: registering with nfnetlink.\n");
ret = nfnetlink_subsys_register(&nfnl_acct_subsys);
if (ret < 0) {
pr_err("nfnl_acct_init: cannot register with nfnetlink.\n");
- goto err_out;
+ goto cleanup_pernet;
}
return 0;
+
+cleanup_pernet:
+ unregister_pernet_subsys(&nfnl_acct_ops);
err_out:
return ret;
}
static void __exit nfnl_acct_exit(void)
{
- struct nf_acct *cur, *tmp;
-
pr_info("nfnl_acct: unregistering from nfnetlink.\n");
nfnetlink_subsys_unregister(&nfnl_acct_subsys);
-
- list_for_each_entry_safe(cur, tmp, &nfnl_acct_list, head) {
- list_del_rcu(&cur->head);
- /* We are sure that our objects have no clients at this point,
- * it's safe to release them all without checking refcnt. */
- kfree_rcu(cur, rcu_head);
- }
+ unregister_pernet_subsys(&nfnl_acct_ops);
}
module_init(nfnl_acct_init);
#include <net/netfilter/nf_tables.h>
struct nft_counter {
- seqlock_t lock;
u64 bytes;
u64 packets;
};
+struct nft_counter_percpu {
+ struct nft_counter counter;
+ struct u64_stats_sync syncp;
+};
+
+struct nft_counter_percpu_priv {
+ struct nft_counter_percpu __percpu *counter;
+};
+
static void nft_counter_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
- struct nft_counter *priv = nft_expr_priv(expr);
-
- write_seqlock_bh(&priv->lock);
- priv->bytes += pkt->skb->len;
- priv->packets++;
- write_sequnlock_bh(&priv->lock);
+ struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
+ struct nft_counter_percpu *this_cpu;
+
+ local_bh_disable();
+ this_cpu = this_cpu_ptr(priv->counter);
+ u64_stats_update_begin(&this_cpu->syncp);
+ this_cpu->counter.bytes += pkt->skb->len;
+ this_cpu->counter.packets++;
+ u64_stats_update_end(&this_cpu->syncp);
+ local_bh_enable();
}
static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
- struct nft_counter *priv = nft_expr_priv(expr);
+ struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
+ struct nft_counter_percpu *cpu_stats;
+ struct nft_counter total;
+ u64 bytes, packets;
unsigned int seq;
- u64 bytes;
- u64 packets;
-
- do {
- seq = read_seqbegin(&priv->lock);
- bytes = priv->bytes;
- packets = priv->packets;
- } while (read_seqretry(&priv->lock, seq));
-
- if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(bytes)))
- goto nla_put_failure;
- if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(packets)))
+ int cpu;
+
+ memset(&total, 0, sizeof(total));
+ for_each_possible_cpu(cpu) {
+ cpu_stats = per_cpu_ptr(priv->counter, cpu);
+ do {
+ seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+ bytes = cpu_stats->counter.bytes;
+ packets = cpu_stats->counter.packets;
+ } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
+
+ total.packets += packets;
+ total.bytes += bytes;
+ }
+
+ if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)) ||
+ nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets)))
goto nla_put_failure;
return 0;
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
- struct nft_counter *priv = nft_expr_priv(expr);
+ struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
+ struct nft_counter_percpu __percpu *cpu_stats;
+ struct nft_counter_percpu *this_cpu;
+
+ cpu_stats = netdev_alloc_pcpu_stats(struct nft_counter_percpu);
+ if (cpu_stats == NULL)
+ return ENOMEM;
+
+ preempt_disable();
+ this_cpu = this_cpu_ptr(cpu_stats);
+ if (tb[NFTA_COUNTER_PACKETS]) {
+ this_cpu->counter.packets =
+ be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
+ }
+ if (tb[NFTA_COUNTER_BYTES]) {
+ this_cpu->counter.bytes =
+ be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
+ }
+ preempt_enable();
+ priv->counter = cpu_stats;
+ return 0;
+}
- if (tb[NFTA_COUNTER_PACKETS])
- priv->packets = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
- if (tb[NFTA_COUNTER_BYTES])
- priv->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
+static void nft_counter_destroy(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
- seqlock_init(&priv->lock);
- return 0;
+ free_percpu(priv->counter);
}
static struct nft_expr_type nft_counter_type;
static const struct nft_expr_ops nft_counter_ops = {
.type = &nft_counter_type,
- .size = NFT_EXPR_SIZE(sizeof(struct nft_counter)),
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_counter_percpu_priv)),
.eval = nft_counter_eval,
.init = nft_counter_init,
+ .destroy = nft_counter_destroy,
.dump = nft_counter_dump,
};
static DEFINE_SPINLOCK(limit_lock);
struct nft_limit {
+ u64 last;
u64 tokens;
+ u64 tokens_max;
u64 rate;
- u64 unit;
- unsigned long stamp;
+ u64 nsecs;
+ u32 burst;
};
-static void nft_limit_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost)
{
- struct nft_limit *priv = nft_expr_priv(expr);
+ u64 now, tokens;
+ s64 delta;
spin_lock_bh(&limit_lock);
- if (time_after_eq(jiffies, priv->stamp)) {
- priv->tokens = priv->rate;
- priv->stamp = jiffies + priv->unit * HZ;
- }
-
- if (priv->tokens >= 1) {
- priv->tokens--;
+ now = ktime_get_ns();
+ tokens = limit->tokens + now - limit->last;
+ if (tokens > limit->tokens_max)
+ tokens = limit->tokens_max;
+
+ limit->last = now;
+ delta = tokens - cost;
+ if (delta >= 0) {
+ limit->tokens = delta;
spin_unlock_bh(&limit_lock);
- return;
+ return false;
}
+ limit->tokens = tokens;
spin_unlock_bh(&limit_lock);
-
- regs->verdict.code = NFT_BREAK;
+ return true;
}
-static const struct nla_policy nft_limit_policy[NFTA_LIMIT_MAX + 1] = {
- [NFTA_LIMIT_RATE] = { .type = NLA_U64 },
- [NFTA_LIMIT_UNIT] = { .type = NLA_U64 },
-};
-
-static int nft_limit_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
+static int nft_limit_init(struct nft_limit *limit,
const struct nlattr * const tb[])
{
- struct nft_limit *priv = nft_expr_priv(expr);
+ u64 unit;
if (tb[NFTA_LIMIT_RATE] == NULL ||
tb[NFTA_LIMIT_UNIT] == NULL)
return -EINVAL;
- priv->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
- priv->unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
- priv->stamp = jiffies + priv->unit * HZ;
- priv->tokens = priv->rate;
+ limit->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
+ unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
+ limit->nsecs = unit * NSEC_PER_SEC;
+ if (limit->rate == 0 || limit->nsecs < unit)
+ return -EOVERFLOW;
+ limit->tokens = limit->tokens_max = limit->nsecs;
+
+ if (tb[NFTA_LIMIT_BURST]) {
+ u64 rate;
+
+ limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));
+
+ rate = limit->rate + limit->burst;
+ if (rate < limit->rate)
+ return -EOVERFLOW;
+
+ limit->rate = rate;
+ }
+ limit->last = ktime_get_ns();
+
return 0;
}
-static int nft_limit_dump(struct sk_buff *skb, const struct nft_expr *expr)
+static int nft_limit_dump(struct sk_buff *skb, const struct nft_limit *limit,
+ enum nft_limit_type type)
{
- const struct nft_limit *priv = nft_expr_priv(expr);
+ u64 secs = div_u64(limit->nsecs, NSEC_PER_SEC);
+ u64 rate = limit->rate - limit->burst;
- if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(priv->rate)))
- goto nla_put_failure;
- if (nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(priv->unit)))
+ if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(rate)) ||
+ nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs)) ||
+ nla_put_be32(skb, NFTA_LIMIT_BURST, htonl(limit->burst)) ||
+ nla_put_be32(skb, NFTA_LIMIT_TYPE, htonl(type)))
goto nla_put_failure;
return 0;
return -1;
}
+struct nft_limit_pkts {
+ struct nft_limit limit;
+ u64 cost;
+};
+
+static void nft_limit_pkts_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_limit_pkts *priv = nft_expr_priv(expr);
+
+ if (nft_limit_eval(&priv->limit, priv->cost))
+ regs->verdict.code = NFT_BREAK;
+}
+
+static const struct nla_policy nft_limit_policy[NFTA_LIMIT_MAX + 1] = {
+ [NFTA_LIMIT_RATE] = { .type = NLA_U64 },
+ [NFTA_LIMIT_UNIT] = { .type = NLA_U64 },
+ [NFTA_LIMIT_BURST] = { .type = NLA_U32 },
+ [NFTA_LIMIT_TYPE] = { .type = NLA_U32 },
+};
+
+static int nft_limit_pkts_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_limit_pkts *priv = nft_expr_priv(expr);
+ int err;
+
+ err = nft_limit_init(&priv->limit, tb);
+ if (err < 0)
+ return err;
+
+ priv->cost = div_u64(priv->limit.nsecs, priv->limit.rate);
+ return 0;
+}
+
+static int nft_limit_pkts_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_limit_pkts *priv = nft_expr_priv(expr);
+
+ return nft_limit_dump(skb, &priv->limit, NFT_LIMIT_PKTS);
+}
+
static struct nft_expr_type nft_limit_type;
-static const struct nft_expr_ops nft_limit_ops = {
+static const struct nft_expr_ops nft_limit_pkts_ops = {
+ .type = &nft_limit_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_limit_pkts)),
+ .eval = nft_limit_pkts_eval,
+ .init = nft_limit_pkts_init,
+ .dump = nft_limit_pkts_dump,
+};
+
+static void nft_limit_pkt_bytes_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_limit *priv = nft_expr_priv(expr);
+ u64 cost = div_u64(priv->nsecs * pkt->skb->len, priv->rate);
+
+ if (nft_limit_eval(priv, cost))
+ regs->verdict.code = NFT_BREAK;
+}
+
+static int nft_limit_pkt_bytes_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_limit *priv = nft_expr_priv(expr);
+
+ return nft_limit_init(priv, tb);
+}
+
+static int nft_limit_pkt_bytes_dump(struct sk_buff *skb,
+ const struct nft_expr *expr)
+{
+ const struct nft_limit *priv = nft_expr_priv(expr);
+
+ return nft_limit_dump(skb, priv, NFT_LIMIT_PKT_BYTES);
+}
+
+static const struct nft_expr_ops nft_limit_pkt_bytes_ops = {
.type = &nft_limit_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_limit)),
- .eval = nft_limit_eval,
- .init = nft_limit_init,
- .dump = nft_limit_dump,
+ .eval = nft_limit_pkt_bytes_eval,
+ .init = nft_limit_pkt_bytes_init,
+ .dump = nft_limit_pkt_bytes_dump,
};
+static const struct nft_expr_ops *
+nft_limit_select_ops(const struct nft_ctx *ctx,
+ const struct nlattr * const tb[])
+{
+ if (tb[NFTA_LIMIT_TYPE] == NULL)
+ return &nft_limit_pkts_ops;
+
+ switch (ntohl(nla_get_be32(tb[NFTA_LIMIT_TYPE]))) {
+ case NFT_LIMIT_PKTS:
+ return &nft_limit_pkts_ops;
+ case NFT_LIMIT_PKT_BYTES:
+ return &nft_limit_pkt_bytes_ops;
+ }
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static struct nft_expr_type nft_limit_type __read_mostly = {
.name = "limit",
- .ops = &nft_limit_ops,
+ .select_ops = nft_limit_select_ops,
.policy = nft_limit_policy,
.maxattr = NFTA_LIMIT_MAX,
.flags = NFT_EXPR_STATEFUL,
*/
#include <linux/kernel.h>
+#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
+/* add vlan header into the user buffer for if tag was removed by offloads */
+static bool
+nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
+{
+ int mac_off = skb_mac_header(skb) - skb->data;
+ u8 vlan_len, *vlanh, *dst_u8 = (u8 *) d;
+ struct vlan_ethhdr veth;
+
+ vlanh = (u8 *) &veth;
+ if (offset < ETH_HLEN) {
+ u8 ethlen = min_t(u8, len, ETH_HLEN - offset);
+
+ if (skb_copy_bits(skb, mac_off, &veth, ETH_HLEN))
+ return false;
+
+ veth.h_vlan_proto = skb->vlan_proto;
+
+ memcpy(dst_u8, vlanh + offset, ethlen);
+
+ len -= ethlen;
+ if (len == 0)
+ return true;
+
+ dst_u8 += ethlen;
+ offset = ETH_HLEN;
+ } else if (offset >= VLAN_ETH_HLEN) {
+ offset -= VLAN_HLEN;
+ goto skip;
+ }
+
+ veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
+ veth.h_vlan_encapsulated_proto = skb->protocol;
+
+ vlanh += offset;
+
+ vlan_len = min_t(u8, len, VLAN_ETH_HLEN - offset);
+ memcpy(dst_u8, vlanh, vlan_len);
+
+ len -= vlan_len;
+ if (!len)
+ return true;
+
+ dst_u8 += vlan_len;
+ skip:
+ return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
+}
+
static void nft_payload_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
u32 *dest = ®s->data[priv->dreg];
int offset;
+ dest[priv->len / NFT_REG32_SIZE] = 0;
switch (priv->base) {
case NFT_PAYLOAD_LL_HEADER:
if (!skb_mac_header_was_set(skb))
goto err;
+
+ if (skb_vlan_tag_present(skb)) {
+ if (!nft_payload_copy_vlan(dest, skb,
+ priv->offset, priv->len))
+ goto err;
+ return;
+ }
offset = skb_mac_header(skb) - skb->data;
break;
case NFT_PAYLOAD_NETWORK_HEADER:
}
offset += priv->offset;
- dest[priv->len / NFT_REG32_SIZE] = 0;
if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
goto err;
return;
#endif
}
+static u16 xt_ct_flags_to_dir(const struct xt_ct_target_info_v1 *info)
+{
+ switch (info->flags & (XT_CT_ZONE_DIR_ORIG |
+ XT_CT_ZONE_DIR_REPL)) {
+ case XT_CT_ZONE_DIR_ORIG:
+ return NF_CT_ZONE_DIR_ORIG;
+ case XT_CT_ZONE_DIR_REPL:
+ return NF_CT_ZONE_DIR_REPL;
+ default:
+ return NF_CT_DEFAULT_ZONE_DIR;
+ }
+}
+
static int xt_ct_tg_check(const struct xt_tgchk_param *par,
struct xt_ct_target_info_v1 *info)
{
+ struct nf_conntrack_zone zone;
struct nf_conn *ct;
int ret = -EOPNOTSUPP;
}
#ifndef CONFIG_NF_CONNTRACK_ZONES
- if (info->zone)
+ if (info->zone || info->flags & (XT_CT_ZONE_DIR_ORIG |
+ XT_CT_ZONE_DIR_REPL |
+ XT_CT_ZONE_MARK))
goto err1;
#endif
if (ret < 0)
goto err1;
- ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL);
+ memset(&zone, 0, sizeof(zone));
+ zone.id = info->zone;
+ zone.dir = xt_ct_flags_to_dir(info);
+ if (info->flags & XT_CT_ZONE_MARK)
+ zone.flags |= NF_CT_FLAG_MARK;
+
+ ct = nf_ct_tmpl_alloc(par->net, &zone, GFP_KERNEL);
if (!ct) {
ret = -ENOMEM;
goto err2;
inet_proto_csum_replace2(&tcph->check, skb,
htons(oldmss), htons(newmss),
- 0);
+ false);
return 0;
}
}
memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
inet_proto_csum_replace2(&tcph->check, skb,
- htons(len), htons(len + TCPOLEN_MSS), 1);
+ htons(len), htons(len + TCPOLEN_MSS), true);
opt[0] = TCPOPT_MSS;
opt[1] = TCPOLEN_MSS;
opt[2] = (newmss & 0xff00) >> 8;
opt[3] = newmss & 0x00ff;
- inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), 0);
+ inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), false);
oldval = ((__be16 *)tcph)[6];
tcph->doff += TCPOLEN_MSS/4;
inet_proto_csum_replace2(&tcph->check, skb,
- oldval, ((__be16 *)tcph)[6], 0);
+ oldval, ((__be16 *)tcph)[6], false);
return TCPOLEN_MSS;
}
n <<= 8;
}
inet_proto_csum_replace2(&tcph->check, skb, htons(o),
- htons(n), 0);
+ htons(n), false);
}
memset(opt + i, TCPOPT_NOP, optl);
}
* modify it under the terms of the GNU General Public License
* version 2 or later, as published by the Free Software Foundation.
*/
-#include <linux/ip.h>
#include <linux/module.h>
-#include <linux/percpu.h>
-#include <linux/route.h>
#include <linux/skbuff.h>
-#include <linux/notifier.h>
-#include <net/checksum.h>
-#include <net/icmp.h>
-#include <net/ip.h>
-#include <net/ipv6.h>
-#include <net/ip6_route.h>
-#include <net/route.h>
+#include <linux/route.h>
#include <linux/netfilter/x_tables.h>
+#include <net/route.h>
+#include <net/netfilter/ipv4/nf_dup_ipv4.h>
+#include <net/netfilter/ipv6/nf_dup_ipv6.h>
#include <linux/netfilter/xt_TEE.h>
-#if IS_ENABLED(CONFIG_NF_CONNTRACK)
-# define WITH_CONNTRACK 1
-# include <net/netfilter/nf_conntrack.h>
-#endif
-
struct xt_tee_priv {
struct notifier_block notifier;
struct xt_tee_tginfo *tginfo;
static const union nf_inet_addr tee_zero_address;
-static struct net *pick_net(struct sk_buff *skb)
-{
-#ifdef CONFIG_NET_NS
- const struct dst_entry *dst;
-
- if (skb->dev != NULL)
- return dev_net(skb->dev);
- dst = skb_dst(skb);
- if (dst != NULL && dst->dev != NULL)
- return dev_net(dst->dev);
-#endif
- return &init_net;
-}
-
-static bool
-tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
-{
- const struct iphdr *iph = ip_hdr(skb);
- struct net *net = pick_net(skb);
- struct rtable *rt;
- struct flowi4 fl4;
-
- memset(&fl4, 0, sizeof(fl4));
- if (info->priv) {
- if (info->priv->oif == -1)
- return false;
- fl4.flowi4_oif = info->priv->oif;
- }
- fl4.daddr = info->gw.ip;
- fl4.flowi4_tos = RT_TOS(iph->tos);
- fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
- fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
- rt = ip_route_output_key(net, &fl4);
- if (IS_ERR(rt))
- return false;
-
- skb_dst_drop(skb);
- skb_dst_set(skb, &rt->dst);
- skb->dev = rt->dst.dev;
- skb->protocol = htons(ETH_P_IP);
- return true;
-}
-
static unsigned int
tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_tee_tginfo *info = par->targinfo;
- struct iphdr *iph;
- if (__this_cpu_read(nf_skb_duplicated))
- return XT_CONTINUE;
- /*
- * Copy the skb, and route the copy. Will later return %XT_CONTINUE for
- * the original skb, which should continue on its way as if nothing has
- * happened. The copy should be independently delivered to the TEE
- * --gateway.
- */
- skb = pskb_copy(skb, GFP_ATOMIC);
- if (skb == NULL)
- return XT_CONTINUE;
-
-#ifdef WITH_CONNTRACK
- /* Avoid counting cloned packets towards the original connection. */
- nf_conntrack_put(skb->nfct);
- skb->nfct = &nf_ct_untracked_get()->ct_general;
- skb->nfctinfo = IP_CT_NEW;
- nf_conntrack_get(skb->nfct);
-#endif
- /*
- * If we are in PREROUTING/INPUT, the checksum must be recalculated
- * since the length could have changed as a result of defragmentation.
- *
- * We also decrease the TTL to mitigate potential TEE loops
- * between two hosts.
- *
- * Set %IP_DF so that the original source is notified of a potentially
- * decreased MTU on the clone route. IPv6 does this too.
- */
- iph = ip_hdr(skb);
- iph->frag_off |= htons(IP_DF);
- if (par->hooknum == NF_INET_PRE_ROUTING ||
- par->hooknum == NF_INET_LOCAL_IN)
- --iph->ttl;
- ip_send_check(iph);
+ nf_dup_ipv4(skb, par->hooknum, &info->gw.in, info->priv->oif);
- if (tee_tg_route4(skb, info)) {
- __this_cpu_write(nf_skb_duplicated, true);
- ip_local_out(skb);
- __this_cpu_write(nf_skb_duplicated, false);
- } else {
- kfree_skb(skb);
- }
return XT_CONTINUE;
}
#if IS_ENABLED(CONFIG_IPV6)
-static bool
-tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
-{
- const struct ipv6hdr *iph = ipv6_hdr(skb);
- struct net *net = pick_net(skb);
- struct dst_entry *dst;
- struct flowi6 fl6;
-
- memset(&fl6, 0, sizeof(fl6));
- if (info->priv) {
- if (info->priv->oif == -1)
- return false;
- fl6.flowi6_oif = info->priv->oif;
- }
- fl6.daddr = info->gw.in6;
- fl6.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
- (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
- fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
- dst = ip6_route_output(net, NULL, &fl6);
- if (dst->error) {
- dst_release(dst);
- return false;
- }
- skb_dst_drop(skb);
- skb_dst_set(skb, dst);
- skb->dev = dst->dev;
- skb->protocol = htons(ETH_P_IPV6);
- return true;
-}
-
static unsigned int
tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_tee_tginfo *info = par->targinfo;
- if (__this_cpu_read(nf_skb_duplicated))
- return XT_CONTINUE;
- skb = pskb_copy(skb, GFP_ATOMIC);
- if (skb == NULL)
- return XT_CONTINUE;
+ nf_dup_ipv6(skb, par->hooknum, &info->gw.in6, info->priv->oif);
-#ifdef WITH_CONNTRACK
- nf_conntrack_put(skb->nfct);
- skb->nfct = &nf_ct_untracked_get()->ct_general;
- skb->nfctinfo = IP_CT_NEW;
- nf_conntrack_get(skb->nfct);
-#endif
- if (par->hooknum == NF_INET_PRE_ROUTING ||
- par->hooknum == NF_INET_LOCAL_IN) {
- struct ipv6hdr *iph = ipv6_hdr(skb);
- --iph->hop_limit;
- }
- if (tee_tg_route6(skb, info)) {
- __this_cpu_write(nf_skb_duplicated, true);
- ip6_local_out(skb);
- __this_cpu_write(nf_skb_duplicated, false);
- } else {
- kfree_skb(skb);
- }
return XT_CONTINUE;
}
#endif
static unsigned int check_hlist(struct net *net,
struct hlist_head *head,
const struct nf_conntrack_tuple *tuple,
- u16 zone,
+ const struct nf_conntrack_zone *zone,
bool *addit)
{
const struct nf_conntrack_tuple_hash *found;
count_tree(struct net *net, struct rb_root *root,
const struct nf_conntrack_tuple *tuple,
const union nf_inet_addr *addr, const union nf_inet_addr *mask,
- u8 family, u16 zone)
+ u8 family, const struct nf_conntrack_zone *zone)
{
struct xt_connlimit_rb *gc_nodes[CONNLIMIT_GC_MAX_NODES];
struct rb_node **rbnode, *parent;
const struct nf_conntrack_tuple *tuple,
const union nf_inet_addr *addr,
const union nf_inet_addr *mask,
- u_int8_t family, u16 zone)
+ u_int8_t family,
+ const struct nf_conntrack_zone *zone)
{
struct rb_root *root;
int count;
union nf_inet_addr addr;
struct nf_conntrack_tuple tuple;
const struct nf_conntrack_tuple *tuple_ptr = &tuple;
+ const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
enum ip_conntrack_info ctinfo;
const struct nf_conn *ct;
unsigned int connections;
- u16 zone = NF_CT_DEFAULT_ZONE;
ct = nf_ct_get(skb, &ctinfo);
if (ct != NULL) {
struct xt_nfacct_match_info *info = par->matchinfo;
struct nf_acct *nfacct;
- nfacct = nfnl_acct_find_get(info->name);
+ nfacct = nfnl_acct_find_get(par->net, info->name);
if (nfacct == NULL) {
pr_info("xt_nfacct: accounting object with name `%s' "
"does not exists\n", info->name);
if (nh->protocol == IPPROTO_TCP) {
if (likely(transport_len >= sizeof(struct tcphdr)))
inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
- addr, new_addr, 1);
+ addr, new_addr, true);
} else if (nh->protocol == IPPROTO_UDP) {
if (likely(transport_len >= sizeof(struct udphdr))) {
struct udphdr *uh = udp_hdr(skb);
if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
inet_proto_csum_replace4(&uh->check, skb,
- addr, new_addr, 1);
+ addr, new_addr, true);
if (!uh->check)
uh->check = CSUM_MANGLED_0;
}
if (l4_proto == NEXTHDR_TCP) {
if (likely(transport_len >= sizeof(struct tcphdr)))
inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
- addr, new_addr, 1);
+ addr, new_addr, true);
} else if (l4_proto == NEXTHDR_UDP) {
if (likely(transport_len >= sizeof(struct udphdr))) {
struct udphdr *uh = udp_hdr(skb);
if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
inet_proto_csum_replace16(&uh->check, skb,
- addr, new_addr, 1);
+ addr, new_addr, true);
if (!uh->check)
uh->check = CSUM_MANGLED_0;
}
} else if (l4_proto == NEXTHDR_ICMP) {
if (likely(transport_len >= sizeof(struct icmp6hdr)))
inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
- skb, addr, new_addr, 1);
+ skb, addr, new_addr, true);
}
}
static void set_tp_port(struct sk_buff *skb, __be16 *port,
__be16 new_port, __sum16 *check)
{
- inet_proto_csum_replace2(check, skb, *port, new_port, 0);
+ inet_proto_csum_replace2(check, skb, *port, new_port, false);
*port = new_port;
}
tun_flags |= TUNNEL_KEY;
break;
case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
- SW_FLOW_KEY_PUT(match, tun_key.ipv4_src,
+ SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.src,
nla_get_in_addr(a), is_mask);
break;
case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
- SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst,
+ SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.dst,
nla_get_in_addr(a), is_mask);
break;
case OVS_TUNNEL_KEY_ATTR_TOS:
- SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos,
+ SW_FLOW_KEY_PUT(match, tun_key.tos,
nla_get_u8(a), is_mask);
break;
case OVS_TUNNEL_KEY_ATTR_TTL:
- SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl,
+ SW_FLOW_KEY_PUT(match, tun_key.ttl,
nla_get_u8(a), is_mask);
ttl = true;
break;
}
if (!is_mask) {
- if (!match->key->tun_key.ipv4_dst) {
+ if (!match->key->tun_key.u.ipv4.dst) {
OVS_NLERR(log, "IPv4 tunnel dst address is zero");
return -EINVAL;
}
if (output->tun_flags & TUNNEL_KEY &&
nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
return -EMSGSIZE;
- if (output->ipv4_src &&
+ if (output->u.ipv4.src &&
nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
- output->ipv4_src))
+ output->u.ipv4.src))
return -EMSGSIZE;
- if (output->ipv4_dst &&
+ if (output->u.ipv4.dst &&
nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
- output->ipv4_dst))
+ output->u.ipv4.dst))
return -EMSGSIZE;
- if (output->ipv4_tos &&
- nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos))
+ if (output->tos &&
+ nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->tos))
return -EMSGSIZE;
- if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl))
+ if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ttl))
return -EMSGSIZE;
if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
/* The userspace does not send tunnel attributes that
* are 0, but we should not wildcard them nonetheless.
*/
- if (match->key->tun_key.ipv4_dst)
+ if (match->key->tun_key.u.ipv4.dst)
SW_FLOW_KEY_MEMSET_FIELD(match, tun_key,
0xff, true);
if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
goto nla_put_failure;
- if ((swkey->tun_key.ipv4_dst || is_mask)) {
+ if ((swkey->tun_key.u.ipv4.dst || is_mask)) {
const void *opts = NULL;
if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
static int flow_key_start(const struct sw_flow_key *key)
{
- if (key->tun_key.ipv4_dst)
+ if (key->tun_key.u.ipv4.dst)
return 0;
else
return rounddown(offsetof(struct sw_flow_key, phy),
}
err = geneve_xmit_skb(geneve_port->gs, rt, skb, fl.saddr,
- tun_key->ipv4_dst, tun_key->ipv4_tos,
- tun_key->ipv4_ttl, df, sport, dport,
+ tun_key->u.ipv4.dst, tun_key->tos,
+ tun_key->ttl, df, sport, dport,
tun_key->tun_flags, vni, opts_len, opts,
!!(tun_key->tun_flags & TUNNEL_CSUM), false);
if (err < 0)
skb_push(skb, ETH_HLEN);
ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
- ovs_vport_receive(vport, skb, skb_tunnel_info(skb, AF_INET));
+ ovs_vport_receive(vport, skb, skb_tunnel_info(skb));
return;
error:
* saddr, tp_src and tp_dst
*/
__ip_tunnel_info_init(egress_tun_info,
- fl.saddr, tun_key->ipv4_dst,
- tun_key->ipv4_tos,
- tun_key->ipv4_ttl,
+ fl.saddr, tun_key->u.ipv4.dst,
+ tun_key->tos,
+ tun_key->ttl,
tp_src, tp_dst,
tun_key->tun_id,
tun_key->tun_flags,
struct rtable *rt;
memset(fl, 0, sizeof(*fl));
- fl->daddr = key->ipv4_dst;
- fl->saddr = key->ipv4_src;
- fl->flowi4_tos = RT_TOS(key->ipv4_tos);
+ fl->daddr = key->u.ipv4.dst;
+ fl->saddr = key->u.ipv4.src;
+ fl->flowi4_tos = RT_TOS(key->tos);
fl->flowi4_mark = mark;
fl->flowi4_proto = protocol;
#ifdef CONFIG_INET
#include <net/inet_common.h>
#endif
+#include <linux/bpf.h>
#include "internal.h"
return skb_get_queue_mapping(skb) % num;
}
+static unsigned int fanout_demux_bpf(struct packet_fanout *f,
+ struct sk_buff *skb,
+ unsigned int num)
+{
+ struct bpf_prog *prog;
+ unsigned int ret = 0;
+
+ rcu_read_lock();
+ prog = rcu_dereference(f->bpf_prog);
+ if (prog)
+ ret = BPF_PROG_RUN(prog, skb) % num;
+ rcu_read_unlock();
+
+ return ret;
+}
+
static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
{
return f->flags & (flag >> 8);
case PACKET_FANOUT_ROLLOVER:
idx = fanout_demux_rollover(f, skb, 0, false, num);
break;
+ case PACKET_FANOUT_CBPF:
+ case PACKET_FANOUT_EBPF:
+ idx = fanout_demux_bpf(f, skb, num);
+ break;
}
if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
return false;
}
+static void fanout_init_data(struct packet_fanout *f)
+{
+ switch (f->type) {
+ case PACKET_FANOUT_LB:
+ atomic_set(&f->rr_cur, 0);
+ break;
+ case PACKET_FANOUT_CBPF:
+ case PACKET_FANOUT_EBPF:
+ RCU_INIT_POINTER(f->bpf_prog, NULL);
+ break;
+ }
+}
+
+static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
+{
+ struct bpf_prog *old;
+
+ spin_lock(&f->lock);
+ old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
+ rcu_assign_pointer(f->bpf_prog, new);
+ spin_unlock(&f->lock);
+
+ if (old) {
+ synchronize_net();
+ bpf_prog_destroy(old);
+ }
+}
+
+static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
+ unsigned int len)
+{
+ struct bpf_prog *new;
+ struct sock_fprog fprog;
+ int ret;
+
+ if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
+ return -EPERM;
+ if (len != sizeof(fprog))
+ return -EINVAL;
+ if (copy_from_user(&fprog, data, len))
+ return -EFAULT;
+
+ ret = bpf_prog_create_from_user(&new, &fprog, NULL);
+ if (ret)
+ return ret;
+
+ __fanout_set_data_bpf(po->fanout, new);
+ return 0;
+}
+
+static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
+ unsigned int len)
+{
+ struct bpf_prog *new;
+ u32 fd;
+
+ if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
+ return -EPERM;
+ if (len != sizeof(fd))
+ return -EINVAL;
+ if (copy_from_user(&fd, data, len))
+ return -EFAULT;
+
+ new = bpf_prog_get(fd);
+ if (IS_ERR(new))
+ return PTR_ERR(new);
+ if (new->type != BPF_PROG_TYPE_SOCKET_FILTER) {
+ bpf_prog_put(new);
+ return -EINVAL;
+ }
+
+ __fanout_set_data_bpf(po->fanout, new);
+ return 0;
+}
+
+static int fanout_set_data(struct packet_sock *po, char __user *data,
+ unsigned int len)
+{
+ switch (po->fanout->type) {
+ case PACKET_FANOUT_CBPF:
+ return fanout_set_data_cbpf(po, data, len);
+ case PACKET_FANOUT_EBPF:
+ return fanout_set_data_ebpf(po, data, len);
+ default:
+ return -EINVAL;
+ };
+}
+
+static void fanout_release_data(struct packet_fanout *f)
+{
+ switch (f->type) {
+ case PACKET_FANOUT_CBPF:
+ case PACKET_FANOUT_EBPF:
+ __fanout_set_data_bpf(f, NULL);
+ };
+}
+
static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
{
struct packet_sock *po = pkt_sk(sk);
case PACKET_FANOUT_CPU:
case PACKET_FANOUT_RND:
case PACKET_FANOUT_QM:
+ case PACKET_FANOUT_CBPF:
+ case PACKET_FANOUT_EBPF:
break;
default:
return -EINVAL;
match->id = id;
match->type = type;
match->flags = flags;
- atomic_set(&match->rr_cur, 0);
INIT_LIST_HEAD(&match->list);
spin_lock_init(&match->lock);
atomic_set(&match->sk_ref, 0);
+ fanout_init_data(match);
match->prot_hook.type = po->prot_hook.type;
match->prot_hook.dev = po->prot_hook.dev;
match->prot_hook.func = packet_rcv_fanout;
if (atomic_dec_and_test(&f->sk_ref)) {
list_del(&f->list);
dev_remove_pack(&f->prot_hook);
+ fanout_release_data(f);
kfree(f);
}
mutex_unlock(&fanout_mutex);
return fanout_add(sk, val & 0xffff, val >> 16);
}
+ case PACKET_FANOUT_DATA:
+ {
+ if (!po->fanout)
+ return -EINVAL;
+
+ return fanout_set_data(po, optval, optlen);
+ }
case PACKET_TX_HAS_OFF:
{
unsigned int val;
u16 id;
u8 type;
u8 flags;
- atomic_t rr_cur;
+ union {
+ atomic_t rr_cur;
+ struct bpf_prog __rcu *bpf_prog;
+ };
struct list_head list;
struct sock *arr[PACKET_FANOUT_MAX];
spinlock_t lock;
config RFKILL_GPIO
tristate "GPIO RFKILL driver"
- depends on RFKILL && GPIOLIB
+ depends on RFKILL
+ depends on GPIOLIB || COMPILE_TEST
default n
help
If you say yes here you get support of a generic gpio RFKILL
#ifdef CONFIG_ACPI
static const struct acpi_device_id rfkill_acpi_match[] = {
{ "BCM2E1A", RFKILL_TYPE_BLUETOOTH },
- { "BCM2E39", RFKILL_TYPE_BLUETOOTH },
{ "BCM2E3D", RFKILL_TYPE_BLUETOOTH },
{ "BCM2E40", RFKILL_TYPE_BLUETOOTH },
{ "BCM2E64", RFKILL_TYPE_BLUETOOTH },
struct nf_conntrack_tuple tuple;
enum ip_conntrack_info ctinfo;
struct tcf_connmark_info *ca = a->priv;
+ struct nf_conntrack_zone zone;
struct nf_conn *c;
int proto;
proto, &tuple))
goto out;
- thash = nf_conntrack_find_get(dev_net(skb->dev), ca->zone, &tuple);
+ zone.id = ca->zone;
+ zone.dir = NF_CT_DEFAULT_ZONE_DIR;
+
+ thash = nf_conntrack_find_get(dev_net(skb->dev), &zone, &tuple);
if (!thash)
goto out;
goto drop;
tcph = (void *)(skb_network_header(skb) + ihl);
- inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, 1);
+ inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr,
+ true);
break;
}
case IPPROTO_UDP:
udph = (void *)(skb_network_header(skb) + ihl);
if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
inet_proto_csum_replace4(&udph->check, skb, addr,
- new_addr, 1);
+ new_addr, true);
if (!udph->check)
udph->check = CSUM_MANGLED_0;
}
iph->saddr = new_addr;
inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
- 0);
+ false);
break;
}
default:
bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
if (opt == NULL) {
- u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
+ u32 limit = qdisc_dev(sch)->tx_queue_len;
if (is_bfifo)
limit *= psched_mtu(qdisc_dev(sch));
{
struct Qdisc *qdisc = &noqueue_qdisc;
- if (dev->tx_queue_len) {
+ if (dev->tx_queue_len && !(dev->priv_flags & IFF_NO_QUEUE)) {
qdisc = qdisc_create_dflt(dev_queue,
default_qdisc_ops, TC_H_ROOT);
if (!qdisc) {
txq = netdev_get_tx_queue(dev, 0);
- if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) {
+ if (!netif_is_multiqueue(dev) ||
+ dev->tx_queue_len == 0 ||
+ dev->priv_flags & IFF_NO_QUEUE) {
netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
dev->qdisc = txq->qdisc_sleeping;
atomic_inc(&dev->qdisc->refcnt);
if (tb[TCA_GRED_LIMIT])
sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
- else {
- u32 qlen = qdisc_dev(sch)->tx_queue_len ? : 1;
-
- sch->limit = qlen * psched_mtu(qdisc_dev(sch));
- }
+ else
+ sch->limit = qdisc_dev(sch)->tx_queue_len
+ * psched_mtu(qdisc_dev(sch));
return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
}
if (tb[TCA_HTB_DIRECT_QLEN])
q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
- else {
+ else
q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
- if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
- q->direct_qlen = 2;
- }
+
if ((q->rate2quantum = gopt->rate2quantum) < 1)
q->rate2quantum = 1;
q->defcls = gopt->defcls;
q->unplug_indefinite = false;
if (opt == NULL) {
- /* We will set a default limit of 100 pkts (~150kB)
- * in case tx_queue_len is not available. The
- * default value is completely arbitrary.
- */
- u32 pkt_limit = qdisc_dev(sch)->tx_queue_len ? : 100;
- q->limit = pkt_limit * psched_mtu(qdisc_dev(sch));
+ q->limit = qdisc_dev(sch)->tx_queue_len
+ * psched_mtu(qdisc_dev(sch));
} else {
struct tc_plug_qopt *ctl = nla_data(opt);
limit = ctl->limit;
if (limit == 0)
- limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
+ limit = qdisc_dev(sch)->tx_queue_len;
child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit);
if (IS_ERR(child))
l->state = LINK_RESET;
break;
case LINK_ESTABLISH_EVT:
+ case LINK_SYNCH_END_EVT:
break;
case LINK_SYNCH_BEGIN_EVT:
l->state = LINK_SYNCHING;
break;
- case LINK_SYNCH_END_EVT:
case LINK_FAILOVER_BEGIN_EVT:
case LINK_FAILOVER_END_EVT:
default:
u16 peers_snd_nxt = msg_next_sent(hdr);
u16 peers_tol = msg_link_tolerance(hdr);
u16 peers_prio = msg_linkprio(hdr);
+ u16 rcv_nxt = l->rcv_nxt;
char *if_name;
int rc = 0;
break;
/* Send NACK if peer has sent pkts we haven't received yet */
- if (more(peers_snd_nxt, l->rcv_nxt))
+ if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
rcvgap = peers_snd_nxt - l->rcv_nxt;
if (rcvgap || (msg_probe(hdr)))
tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
}
len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
- if (TLV_GET_LEN(msg.req) && !TLV_OK(msg.req, len)) {
+ if (len && !TLV_OK(msg.req, len)) {
msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
err = -EOPNOTSUPP;
goto send;
/* There is still a working link => initiate failover */
tnl = node_active_link(n, 0);
+ tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
+ tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
n->sync_point = tnl->rcv_nxt + (U16_MAX / 2 - 1);
tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
tipc_link_reset(l);
goto exit;
}
tipc_link_reset(l);
+ if (n->state == NODE_FAILINGOVER)
+ tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
le->link = l;
n->link_cnt++;
tipc_node_calculate_timer(n, l);
u16 exp_pkts = msg_msgcnt(hdr);
u16 rcv_nxt, syncpt, dlv_nxt;
int state = n->state;
- struct tipc_link *l, *pl = NULL;
+ struct tipc_link *l, *tnl, *pl = NULL;
struct tipc_media_addr *maddr;
int i, pb_id;
}
/* Open parallel link when tunnel link reaches synch point */
- if ((n->state == NODE_FAILINGOVER) && !tipc_link_is_failingover(l)) {
+ if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
if (!more(rcv_nxt, n->sync_point))
return true;
tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
return true;
}
+ /* No synching needed if only one link */
+ if (!pl || !tipc_link_is_up(pl))
+ return true;
+
/* Initiate or update synch mode if applicable */
if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) {
syncpt = iseqno + exp_pkts - 1;
/* Open tunnel link when parallel link reaches synch point */
if ((n->state == NODE_SYNCHING) && tipc_link_is_synching(l)) {
- if (pl)
- dlv_nxt = mod(pl->rcv_nxt - skb_queue_len(pl->inputq));
- if (!pl || more(dlv_nxt, n->sync_point)) {
- tipc_link_fsm_evt(l, LINK_SYNCH_END_EVT);
+ if (tipc_link_is_synching(l)) {
+ tnl = l;
+ } else {
+ tnl = pl;
+ pl = l;
+ }
+ dlv_nxt = pl->rcv_nxt - mod(skb_queue_len(pl->inputq));
+ if (more(dlv_nxt, n->sync_point)) {
+ tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
return true;
}
+ if (l == pl)
+ return true;
if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
return true;
if (usr == LINK_PROTOCOL)
INIT_LIST_HEAD(&rdev->bss_list);
INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done);
INIT_WORK(&rdev->sched_scan_results_wk, __cfg80211_sched_scan_results);
+ INIT_LIST_HEAD(&rdev->mlme_unreg);
+ spin_lock_init(&rdev->mlme_unreg_lock);
+ INIT_WORK(&rdev->mlme_unreg_wk, cfg80211_mlme_unreg_wk);
INIT_DELAYED_WORK(&rdev->dfs_update_channels_wk,
cfg80211_dfs_channels_update_work);
#ifdef CONFIG_CFG80211_WEXT
cancel_delayed_work_sync(&rdev->dfs_update_channels_wk);
flush_work(&rdev->destroy_work);
flush_work(&rdev->sched_scan_stop_wk);
+ flush_work(&rdev->mlme_unreg_wk);
#ifdef CONFIG_PM
if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup)
switch (wdev->iftype) {
case NL80211_IFTYPE_P2P_DEVICE:
+ cfg80211_mlme_purge_registrations(wdev);
cfg80211_stop_p2p_device(rdev, wdev);
break;
default:
struct list_head beacon_registrations;
spinlock_t beacon_registrations_lock;
+ struct list_head mlme_unreg;
+ spinlock_t mlme_unreg_lock;
+ struct work_struct mlme_unreg_wk;
+
/* protected by RTNL only */
int num_running_ifaces;
int num_running_monitor_ifaces;
int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
u16 frame_type, const u8 *match_data,
int match_len);
+void cfg80211_mlme_unreg_wk(struct work_struct *wk);
void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid);
void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev);
int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
* cfg80211 MLME SAP interface
*
* Copyright (c) 2009, Jouni Malinen <j@w1.fi>
+ * Copyright (c) 2015 Intel Deutschland GmbH
*/
#include <linux/kernel.h>
struct cfg80211_mgmt_registration {
struct list_head list;
+ struct wireless_dev *wdev;
u32 nlportid;
u8 match[];
};
+static void
+cfg80211_process_mlme_unregistrations(struct cfg80211_registered_device *rdev)
+{
+ struct cfg80211_mgmt_registration *reg;
+
+ ASSERT_RTNL();
+
+ spin_lock_bh(&rdev->mlme_unreg_lock);
+ while ((reg = list_first_entry_or_null(&rdev->mlme_unreg,
+ struct cfg80211_mgmt_registration,
+ list))) {
+ list_del(®->list);
+ spin_unlock_bh(&rdev->mlme_unreg_lock);
+
+ if (rdev->ops->mgmt_frame_register) {
+ u16 frame_type = le16_to_cpu(reg->frame_type);
+
+ rdev_mgmt_frame_register(rdev, reg->wdev,
+ frame_type, false);
+ }
+
+ kfree(reg);
+
+ spin_lock_bh(&rdev->mlme_unreg_lock);
+ }
+ spin_unlock_bh(&rdev->mlme_unreg_lock);
+}
+
+void cfg80211_mlme_unreg_wk(struct work_struct *wk)
+{
+ struct cfg80211_registered_device *rdev;
+
+ rdev = container_of(wk, struct cfg80211_registered_device,
+ mlme_unreg_wk);
+
+ rtnl_lock();
+ cfg80211_process_mlme_unregistrations(rdev);
+ rtnl_unlock();
+}
+
int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
u16 frame_type, const u8 *match_data,
int match_len)
nreg->match_len = match_len;
nreg->nlportid = snd_portid;
nreg->frame_type = cpu_to_le16(frame_type);
+ nreg->wdev = wdev;
list_add(&nreg->list, &wdev->mgmt_registrations);
+ spin_unlock_bh(&wdev->mgmt_registrations_lock);
+
+ /* process all unregistrations to avoid driver confusion */
+ cfg80211_process_mlme_unregistrations(rdev);
if (rdev->ops->mgmt_frame_register)
rdev_mgmt_frame_register(rdev, wdev, frame_type, true);
+ return 0;
+
out:
spin_unlock_bh(&wdev->mgmt_registrations_lock);
if (reg->nlportid != nlportid)
continue;
- if (rdev->ops->mgmt_frame_register) {
- u16 frame_type = le16_to_cpu(reg->frame_type);
-
- rdev_mgmt_frame_register(rdev, wdev,
- frame_type, false);
- }
-
list_del(®->list);
- kfree(reg);
+ spin_lock(&rdev->mlme_unreg_lock);
+ list_add_tail(®->list, &rdev->mlme_unreg);
+ spin_unlock(&rdev->mlme_unreg_lock);
+
+ schedule_work(&rdev->mlme_unreg_wk);
}
spin_unlock_bh(&wdev->mgmt_registrations_lock);
void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
{
- struct cfg80211_mgmt_registration *reg, *tmp;
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
spin_lock_bh(&wdev->mgmt_registrations_lock);
-
- list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) {
- list_del(®->list);
- kfree(reg);
- }
-
+ spin_lock(&rdev->mlme_unreg_lock);
+ list_splice_tail_init(&wdev->mgmt_registrations, &rdev->mlme_unreg);
+ spin_unlock(&rdev->mlme_unreg_lock);
spin_unlock_bh(&wdev->mgmt_registrations_lock);
+
+ cfg80211_process_mlme_unregistrations(rdev);
}
int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
rdev->wiphy.frag_threshold = old_frag_threshold;
rdev->wiphy.rts_threshold = old_rts_threshold;
rdev->wiphy.coverage_class = old_coverage_class;
+ return result;
}
}
return 0;
int err;
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_OCB)
return -EOPNOTSUPP;
if (!rdev->ops->set_mcast_rate)
rdev_mgmt_frame_register(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev, u16 frame_type, bool reg)
{
+ might_sleep();
+
trace_rdev_mgmt_frame_register(&rdev->wiphy, wdev , frame_type, reg);
rdev->ops->mgmt_frame_register(&rdev->wiphy, wdev , frame_type, reg);
trace_rdev_return_void(&rdev->wiphy);
static const struct ieee80211_reg_rule *
freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
- const struct ieee80211_regdomain *regd)
+ const struct ieee80211_regdomain *regd, u32 bw)
{
int i;
bool band_rule_found = false;
if (!band_rule_found)
band_rule_found = freq_in_rule_band(fr, center_freq);
- bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(20));
+ bw_fits = reg_does_bw_fit(fr, center_freq, bw);
if (band_rule_found && bw_fits)
return rr;
return ERR_PTR(-EINVAL);
}
-const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
- u32 center_freq)
+const struct ieee80211_reg_rule *__freq_reg_info(struct wiphy *wiphy,
+ u32 center_freq, u32 min_bw)
{
- const struct ieee80211_regdomain *regd;
+ const struct ieee80211_regdomain *regd = reg_get_regdomain(wiphy);
+ const struct ieee80211_reg_rule *reg_rule = NULL;
+ u32 bw;
- regd = reg_get_regdomain(wiphy);
+ for (bw = MHZ_TO_KHZ(20); bw >= min_bw; bw = bw / 2) {
+ reg_rule = freq_reg_info_regd(wiphy, center_freq, regd, bw);
+ if (!IS_ERR(reg_rule))
+ return reg_rule;
+ }
- return freq_reg_info_regd(wiphy, center_freq, regd);
+ return reg_rule;
+}
+
+const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
+ u32 center_freq)
+{
+ return __freq_reg_info(wiphy, center_freq, MHZ_TO_KHZ(20));
}
EXPORT_SYMBOL(freq_reg_info);
if (reg_rule->flags & NL80211_RRF_AUTO_BW)
max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
+ /* If we get a reg_rule we can assume that at least 5Mhz fit */
+ if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+ MHZ_TO_KHZ(10)))
+ bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+ if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+ MHZ_TO_KHZ(20)))
+ bw_flags |= IEEE80211_CHAN_NO_20MHZ;
+
+ if (max_bandwidth_khz < MHZ_TO_KHZ(10))
+ bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+ if (max_bandwidth_khz < MHZ_TO_KHZ(20))
+ bw_flags |= IEEE80211_CHAN_NO_20MHZ;
if (max_bandwidth_khz < MHZ_TO_KHZ(40))
- bw_flags = IEEE80211_CHAN_NO_HT40;
+ bw_flags |= IEEE80211_CHAN_NO_HT40;
if (max_bandwidth_khz < MHZ_TO_KHZ(80))
bw_flags |= IEEE80211_CHAN_NO_80MHZ;
if (max_bandwidth_khz < MHZ_TO_KHZ(160))
const struct ieee80211_power_rule *power_rule = NULL;
const struct ieee80211_freq_range *freq_range = NULL;
u32 max_bandwidth_khz;
+ u32 bw;
- reg_rule = freq_reg_info_regd(wiphy, MHZ_TO_KHZ(chan->center_freq),
- regd);
+ for (bw = MHZ_TO_KHZ(20); bw >= MHZ_TO_KHZ(5); bw = bw / 2) {
+ reg_rule = freq_reg_info_regd(wiphy,
+ MHZ_TO_KHZ(chan->center_freq),
+ regd, bw);
+ if (!IS_ERR(reg_rule))
+ break;
+ }
if (IS_ERR(reg_rule)) {
REG_DBG_PRINT("Disabling freq %d MHz as custom regd has no rule that fits it\n",
if (reg_rule->flags & NL80211_RRF_AUTO_BW)
max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
+ /* If we get a reg_rule we can assume that at least 5Mhz fit */
+ if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+ MHZ_TO_KHZ(10)))
+ bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+ if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+ MHZ_TO_KHZ(20)))
+ bw_flags |= IEEE80211_CHAN_NO_20MHZ;
+
+ if (max_bandwidth_khz < MHZ_TO_KHZ(10))
+ bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+ if (max_bandwidth_khz < MHZ_TO_KHZ(20))
+ bw_flags |= IEEE80211_CHAN_NO_20MHZ;
if (max_bandwidth_khz < MHZ_TO_KHZ(40))
- bw_flags = IEEE80211_CHAN_NO_HT40;
+ bw_flags |= IEEE80211_CHAN_NO_HT40;
if (max_bandwidth_khz < MHZ_TO_KHZ(80))
bw_flags |= IEEE80211_CHAN_NO_80MHZ;
if (max_bandwidth_khz < MHZ_TO_KHZ(160))
reg_process_hint_core(reg_request);
return;
case NL80211_REGDOM_SET_BY_USER:
- treatment = reg_process_hint_user(reg_request);
- if (treatment == REG_REQ_IGNORE ||
- treatment == REG_REQ_ALREADY_SET)
- return;
+ reg_process_hint_user(reg_request);
return;
case NL80211_REGDOM_SET_BY_DRIVER:
if (!wiphy)
goto out_free;
}
- /* This is required so that the orig_* parameters are saved */
+ /* This is required so that the orig_* parameters are saved.
+ * NOTE: treatment must be set for any case that reaches here!
+ */
if (treatment == REG_REQ_ALREADY_SET && wiphy &&
wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
wiphy_update_regulatory(wiphy, reg_request->initiator);
rcu_read_unlock();
}
-static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
+static inline struct dst_entry *__xfrm_dst_lookup(struct net *net,
+ int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr,
int family)
if (unlikely(afinfo == NULL))
return ERR_PTR(-EAFNOSUPPORT);
- dst = afinfo->dst_lookup(net, tos, saddr, daddr);
+ dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr);
xfrm_policy_put_afinfo(afinfo);
return dst;
}
-static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
+static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
+ int tos, int oif,
xfrm_address_t *prev_saddr,
xfrm_address_t *prev_daddr,
int family)
daddr = x->coaddr;
}
- dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
+ dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family);
if (!IS_ERR(dst)) {
if (prev_saddr != saddr)
}
static int
-xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
- unsigned short family)
+xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
+ xfrm_address_t *remote, unsigned short family)
{
int err;
struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
if (unlikely(afinfo == NULL))
return -EINVAL;
- err = afinfo->get_saddr(net, local, remote);
+ err = afinfo->get_saddr(net, oif, local, remote);
xfrm_policy_put_afinfo(afinfo);
return err;
}
remote = &tmpl->id.daddr;
local = &tmpl->saddr;
if (xfrm_addr_any(local, tmpl->encap_family)) {
- error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
+ error = xfrm_get_saddr(net, fl->flowi_oif,
+ &tmp, remote,
+ tmpl->encap_family);
if (error)
goto fail;
local = &tmp;
if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
family = xfrm[i]->props.family;
- dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
- family);
+ dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
+ &saddr, &daddr, family);
err = PTR_ERR(dst);
if (IS_ERR(dst))
goto put_states;
return err;
if (attrs[XFRMA_ADDRESS_FILTER]) {
- filter = kmalloc(sizeof(*filter), GFP_KERNEL);
+ filter = kmemdup(nla_data(attrs[XFRMA_ADDRESS_FILTER]),
+ sizeof(*filter), GFP_KERNEL);
if (filter == NULL)
return -ENOMEM;
-
- memcpy(filter, nla_data(attrs[XFRMA_ADDRESS_FILTER]),
- sizeof(*filter));
}
if (attrs[XFRMA_PROTO])
SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
+ SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2211, "Thinkpad W541", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
bool
select SND_DMAENGINE_PCM
+config SND_SOC_TOPOLOGY
+ bool
+
# All the supported SoCs
source "sound/soc/adi/Kconfig"
source "sound/soc/atmel/Kconfig"
snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o
snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o soc-devres.o soc-ops.o
+
+ifneq ($(CONFIG_SND_SOC_TOPOLOGY),)
snd-soc-core-objs += soc-topology.o
+endif
ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),)
snd-soc-core-objs += soc-generic-dmaengine-pcm.o
int err = -ENODEV;
down_read(&chip->shutdown_rwsem);
- if (chip->probing && chip->in_pm)
+ if (chip->probing || chip->in_pm)
err = 0;
else if (!chip->shutdown)
err = usb_autopm_get_interface(chip->pm_intf);
prefix ?= $(HOME)
endif
bindir_relative = bin
-bindir = $(prefix)/$(bindir_relative)
+bindir = $(abspath $(prefix)/$(bindir_relative))
mandir = share/man
infodir = share/info
perfexecdir = libexec/perf-core
else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
- update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]);
else if (perf_stat_evsel__is(counter, TRANSACTION_START))
update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
else if (perf_stat_evsel__is(counter, ELISION_START))
" # %5.2f%% aborted cycles ",
100.0 * ((total2-avg) / total));
} else if (perf_stat_evsel__is(evsel, TRANSACTION_START) &&
- avg > 0 &&
runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
- if (total)
+ if (avg)
ratio = total / avg;
fprintf(out, " # %8.0f cycles / transaction ", ratio);
} else if (perf_stat_evsel__is(evsel, ELISION_START) &&
- avg > 0 &&
runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
- if (total)
+ if (avg)
ratio = total / avg;
fprintf(out, " # %8.0f cycles / elision ", ratio);
* - PACKET_FANOUT_LB
* - PACKET_FANOUT_CPU
* - PACKET_FANOUT_ROLLOVER
+ * - PACKET_FANOUT_CBPF
+ * - PACKET_FANOUT_EBPF
*
* Todo:
* - functionality: PACKET_FANOUT_FLAG_DEFRAG
#include <arpa/inet.h>
#include <errno.h>
#include <fcntl.h>
+#include <linux/unistd.h> /* for __NR_bpf */
#include <linux/filter.h>
+#include <linux/bpf.h>
#include <linux/if_packet.h>
#include <net/ethernet.h>
#include <netinet/ip.h>
return fd;
}
+static void sock_fanout_set_ebpf(int fd)
+{
+ const int len_off = __builtin_offsetof(struct __sk_buff, len);
+ struct bpf_insn prog[] = {
+ { BPF_ALU64 | BPF_MOV | BPF_X, 6, 1, 0, 0 },
+ { BPF_LDX | BPF_W | BPF_MEM, 0, 6, len_off, 0 },
+ { BPF_JMP | BPF_JGE | BPF_K, 0, 0, 1, DATA_LEN },
+ { BPF_JMP | BPF_JA | BPF_K, 0, 0, 4, 0 },
+ { BPF_LD | BPF_B | BPF_ABS, 0, 0, 0, 0x50 },
+ { BPF_JMP | BPF_JEQ | BPF_K, 0, 0, 2, DATA_CHAR },
+ { BPF_JMP | BPF_JEQ | BPF_K, 0, 0, 1, DATA_CHAR_1 },
+ { BPF_ALU | BPF_MOV | BPF_K, 0, 0, 0, 0 },
+ { BPF_JMP | BPF_EXIT, 0, 0, 0, 0 }
+ };
+ char log_buf[512];
+ union bpf_attr attr;
+ int pfd;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+ attr.insns = (unsigned long) prog;
+ attr.insn_cnt = sizeof(prog) / sizeof(prog[0]);
+ attr.license = (unsigned long) "GPL";
+ attr.log_buf = (unsigned long) log_buf,
+ attr.log_size = sizeof(log_buf),
+ attr.log_level = 1,
+
+ pfd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
+ if (pfd < 0) {
+ perror("bpf");
+ fprintf(stderr, "bpf verifier:\n%s\n", log_buf);
+ exit(1);
+ }
+
+ if (setsockopt(fd, SOL_PACKET, PACKET_FANOUT_DATA, &pfd, sizeof(pfd))) {
+ perror("fanout data ebpf");
+ exit(1);
+ }
+
+ if (close(pfd)) {
+ perror("close ebpf");
+ exit(1);
+ }
+}
+
static char *sock_fanout_open_ring(int fd)
{
struct tpacket_req req = {
ring = mmap(0, req.tp_block_size * req.tp_block_nr,
PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
- if (!ring) {
- fprintf(stderr, "packetsock ring mmap\n");
+ if (ring == MAP_FAILED) {
+ perror("packetsock ring mmap");
exit(1);
}
{
const int expect0[] = { 0, 0 };
char *rings[2];
+ uint8_t type = typeflags & 0xFF;
int fds[2], fds_udp[2][2], ret;
fprintf(stderr, "test: datapath 0x%hx\n", typeflags);
fprintf(stderr, "ERROR: failed open\n");
exit(1);
}
+ if (type == PACKET_FANOUT_CBPF)
+ sock_setfilter(fds[0], SOL_PACKET, PACKET_FANOUT_DATA);
+ else if (type == PACKET_FANOUT_EBPF)
+ sock_fanout_set_ebpf(fds[0]);
+
rings[0] = sock_fanout_open_ring(fds[0]);
rings[1] = sock_fanout_open_ring(fds[1]);
pair_udp_open(fds_udp[0], PORT_BASE);
/* Send data, but not enough to overflow a queue */
pair_udp_send(fds_udp[0], 15);
- pair_udp_send(fds_udp[1], 5);
+ pair_udp_send_char(fds_udp[1], 5, DATA_CHAR_1);
ret = sock_fanout_read(fds, rings, expect1);
/* Send more data, overflow the queue */
- pair_udp_send(fds_udp[0], 15);
+ pair_udp_send_char(fds_udp[0], 15, DATA_CHAR_1);
/* TODO: ensure consistent order between expect1 and expect2 */
ret |= sock_fanout_read(fds, rings, expect2);
const int expect_rb[2][2] = { { 15, 5 }, { 20, 15 } };
const int expect_cpu0[2][2] = { { 20, 0 }, { 20, 0 } };
const int expect_cpu1[2][2] = { { 0, 20 }, { 0, 20 } };
+ const int expect_bpf[2][2] = { { 15, 5 }, { 15, 20 } };
int port_off = 2, tries = 5, ret;
test_control_single();
ret |= test_datapath(PACKET_FANOUT_ROLLOVER,
port_off, expect_rb[0], expect_rb[1]);
+ ret |= test_datapath(PACKET_FANOUT_CBPF,
+ port_off, expect_bpf[0], expect_bpf[1]);
+ ret |= test_datapath(PACKET_FANOUT_EBPF,
+ port_off, expect_bpf[0], expect_bpf[1]);
+
set_cpuaffinity(0);
ret |= test_datapath(PACKET_FANOUT_CPU, port_off,
expect_cpu0[0], expect_cpu0[1]);
#define DATA_LEN 100
#define DATA_CHAR 'a'
+#define DATA_CHAR_1 'b'
#define PORT_BASE 8000
# define __maybe_unused __attribute__ ((__unused__))
#endif
-static __maybe_unused void pair_udp_setfilter(int fd)
+static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum)
{
struct sock_filter bpf_filter[] = {
{ 0x80, 0, 0, 0x00000000 }, /* LD pktlen */
- { 0x35, 0, 5, DATA_LEN }, /* JGE DATA_LEN [f goto nomatch]*/
+ { 0x35, 0, 4, DATA_LEN }, /* JGE DATA_LEN [f goto nomatch]*/
{ 0x30, 0, 0, 0x00000050 }, /* LD ip[80] */
- { 0x15, 0, 3, DATA_CHAR }, /* JEQ DATA_CHAR [f goto nomatch]*/
- { 0x30, 0, 0, 0x00000051 }, /* LD ip[81] */
- { 0x15, 0, 1, DATA_CHAR }, /* JEQ DATA_CHAR [f goto nomatch]*/
+ { 0x15, 1, 0, DATA_CHAR }, /* JEQ DATA_CHAR [t goto match]*/
+ { 0x15, 0, 1, DATA_CHAR_1}, /* JEQ DATA_CHAR_1 [t goto match]*/
{ 0x06, 0, 0, 0x00000060 }, /* RET match */
{ 0x06, 0, 0, 0x00000000 }, /* RET no match */
};
struct sock_fprog bpf_prog;
+ if (lvl == SOL_PACKET && optnum == PACKET_FANOUT_DATA)
+ bpf_filter[5].code = 0x16; /* RET A */
+
bpf_prog.filter = bpf_filter;
bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter);
- if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &bpf_prog,
+ if (setsockopt(fd, lvl, optnum, &bpf_prog,
sizeof(bpf_prog))) {
perror("setsockopt SO_ATTACH_FILTER");
exit(1);
}
}
+static __maybe_unused void pair_udp_setfilter(int fd)
+{
+ sock_setfilter(fd, SOL_SOCKET, SO_ATTACH_FILTER);
+}
+
static __maybe_unused void pair_udp_open(int fds[], uint16_t port)
{
struct sockaddr_in saddr, daddr;
}
}
-static __maybe_unused void pair_udp_send(int fds[], int num)
+static __maybe_unused void pair_udp_send_char(int fds[], int num, char payload)
{
char buf[DATA_LEN], rbuf[DATA_LEN];
- memset(buf, DATA_CHAR, sizeof(buf));
+ memset(buf, payload, sizeof(buf));
while (num--) {
/* Should really handle EINTR and EAGAIN */
if (write(fds[0], buf, sizeof(buf)) != sizeof(buf)) {
}
}
+static __maybe_unused void pair_udp_send(int fds[], int num)
+{
+ return pair_udp_send_char(fds, num, DATA_CHAR);
+}
+
static __maybe_unused void pair_udp_close(int fds[])
{
close(fds[0]);