<http://www.kroah.com/log/linux/maintainer-03.html>
<http://www.kroah.com/log/linux/maintainer-04.html>
<http://www.kroah.com/log/linux/maintainer-05.html>
+ <http://www.kroah.com/log/linux/maintainer-06.html>
NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people!
<https://lkml.org/lkml/2005/7/11/336>
* DMA client
Required properties:
-- dmas: a list of <[DMA multiplexer phandle] [SRS/DRS value]> pairs,
- where SRS/DRS values are fixed handles, specified in the SoC
- manual as the value that would be written into the PDMACHCR.
+- dmas: a list of <[DMA multiplexer phandle] [SRS << 8 | DRS]> pairs.
+ where SRS/DRS are specified in the SoC manual.
+ It will be written into PDMACHCR as high 16-bit parts.
- dma-names: a list of DMA channel names, one per "dmas" entry
Example:
keycode generated by each GPIO. Linux keycodes are defined in
<dt-bindings/input/input.h>.
+- linux,gpio-keymap: When enabled, the SPT_GPIOPWN_T19 object sends messages
+ on GPIO bit changes. An array of up to 8 entries can be provided
+ indicating the Linux keycode mapped to each bit of the status byte,
+ starting at the LSB. Linux keycodes are defined in
+ <dt-bindings/input/input.h>.
+
+ Note: the numbering of the GPIOs and the bit they start at varies between
+ maXTouch devices. You must either refer to the documentation, or
+ experiment to determine which bit corresponds to which input. Use
+ KEY_RESERVED for unused padding values.
+
Example:
touch@4b {
further clocks may be specified in derived bindings.
- clock-names: One name for each entry in the clocks property, the
first one should be "stmmaceth".
+- clk_ptp_ref: this is the PTP reference clock; in case of the PTP is
+ available this clock is used for programming the Timestamp Addend Register.
+ If not passed then the system clock will be used and this is fine on some
+ platforms.
Examples:
infet5-supply = <&some_reg>;
infet6-supply = <&some_reg>;
infet7-supply = <&some_reg>;
- vsys_l1-supply = <&some_reg>;
- vsys_l2-supply = <&some_reg>;
+ vsys-l1-supply = <&some_reg>;
+ vsys-l2-supply = <&some_reg>;
regulators {
dcdc1 {
ADI AXI-SPDIF controller
Required properties:
- - compatible : Must be "adi,axi-spdif-1.00.a"
+ - compatible : Must be "adi,axi-spdif-tx-1.00.a"
- reg : Must contain SPDIF core's registers location and length
- clocks : Pairs of phandle and specifier referencing the controller's clocks.
The controller expects two clocks, the clock used for the AXI interface and
===================
Required properties:
-- compatible: "composite-connector" or "svideo-connector"
+- compatible: "composite-video-connector" or "svideo-connector"
Optional properties:
- label: a symbolic name for the connector
-------
tv: connector {
- compatible = "composite-connector";
+ compatible = "composite-video-connector";
label = "tv";
port {
- Build, install, reboot
The NFS/RDMA code will be enabled automatically if NFS and RDMA
- are turned on. The NFS/RDMA client and server are configured via the hidden
- SUNRPC_XPRT_RDMA config option that depends on SUNRPC and INFINIBAND. The
- value of SUNRPC_XPRT_RDMA will be:
+ are turned on. The NFS/RDMA client and server are configured via the
+ SUNRPC_XPRT_RDMA_CLIENT and SUNRPC_XPRT_RDMA_SERVER config options that both
+ depend on SUNRPC and INFINIBAND. The default value of both options will be:
- N if either SUNRPC or INFINIBAND are N, in this case the NFS/RDMA client
and server will not be built
- Start the NFS server
- If the NFS/RDMA server was built as a module (CONFIG_SUNRPC_XPRT_RDMA=m in
- kernel config), load the RDMA transport module:
+ If the NFS/RDMA server was built as a module
+ (CONFIG_SUNRPC_XPRT_RDMA_SERVER=m in kernel config), load the RDMA
+ transport module:
$ modprobe svcrdma
- On the client system
- If the NFS/RDMA client was built as a module (CONFIG_SUNRPC_XPRT_RDMA=m in
- kernel config), load the RDMA client module:
+ If the NFS/RDMA client was built as a module
+ (CONFIG_SUNRPC_XPRT_RDMA_CLIENT=m in kernel config), load the RDMA client
+ module:
$ modprobe xprtrdma.ko
private field of the seq_file structure; that value can then be retrieved
by the iterator functions.
+There is also a wrapper function to seq_open() called seq_open_private(). It
+kmallocs a zero filled block of memory and stores a pointer to it in the
+private field of the seq_file structure, returning 0 on success. The
+block size is specified in a third parameter to the function, e.g.:
+
+ static int ct_open(struct inode *inode, struct file *file)
+ {
+ return seq_open_private(file, &ct_seq_ops,
+ sizeof(struct mystruct));
+ }
+
+There is also a variant function, __seq_open_private(), which is functionally
+identical except that, if successful, it returns the pointer to the allocated
+memory block, allowing further initialisation e.g.:
+
+ static int ct_open(struct inode *inode, struct file *file)
+ {
+ struct mystruct *p =
+ __seq_open_private(file, &ct_seq_ops, sizeof(*p));
+
+ if (!p)
+ return -ENOMEM;
+
+ p->foo = bar; /* initialize my stuff */
+ ...
+ p->baz = true;
+
+ return 0;
+ }
+
+A corresponding close function, seq_release_private() is available which
+frees the memory allocated in the corresponding open.
+
The other operations of interest - read(), llseek(), and release() - are
all implemented by the seq_file code itself. So a virtual file's
file_operations structure will look like:
if and only if no GPIO has been assigned to the device/function/index triplet,
other error codes are used for cases where a GPIO has been assigned but an error
occurred while trying to acquire it. This is useful to discriminate between mere
-errors and an absence of GPIO for optional GPIO parameters.
+errors and an absence of GPIO for optional GPIO parameters. For the common
+pattern where a GPIO is optional, the gpiod_get_optional() and
+gpiod_get_index_optional() functions can be used. These functions return NULL
+instead of -ENOENT if no GPIO has been assigned to the requested function:
+
+
+ struct gpio_desc *gpiod_get_optional(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags)
+
+ struct gpio_desc *gpiod_get_index_optional(struct device *dev,
+ const char *con_id,
+ unsigned int index,
+ enum gpiod_flags flags)
Device-managed variants of these functions are also defined:
unsigned int idx,
enum gpiod_flags flags)
+ struct gpio_desc *devm_gpiod_get_optional(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags)
+
+ struct gpio_desc * devm_gpiod_get_index_optional(struct device *dev,
+ const char *con_id,
+ unsigned int index,
+ enum gpiod_flags flags)
+
A GPIO descriptor can be disposed of using the gpiod_put() function:
void gpiod_put(struct gpio_desc *desc)
I2C to communicate with your device. SMBus commands are preferred if
the device supports them. Both are illustrated below.
- __u8 register = 0x10; /* Device register to access */
+ __u8 reg = 0x10; /* Device register to access */
__s32 res;
char buf[10];
/* Using SMBus commands */
- res = i2c_smbus_read_word_data(file, register);
+ res = i2c_smbus_read_word_data(file, reg);
if (res < 0) {
/* ERROR HANDLING: i2c transaction failed */
} else {
}
/* Using I2C Write, equivalent of
- i2c_smbus_write_word_data(file, register, 0x6543) */
- buf[0] = register;
+ i2c_smbus_write_word_data(file, reg, 0x6543) */
+ buf[0] = reg;
buf[1] = 0x43;
buf[2] = 0x65;
- if (write(file, buf, 3) ! =3) {
+ if (write(file, buf, 3) != 3) {
/* ERROR HANDLING: i2c transaction failed */
}
from the device. It supports blocking operations, poll/select and
fasync operation modes. You must read 1 bytes from the device. The
result is number of free-fall interrupts since the last successful
-read (or 255 if number of interrupts would not fit). See the hpfall.c
+read (or 255 if number of interrupts would not fit). See the freefall.c
file for an example on using the device.
on all its consumers) and change operating mode (if necessary and permitted)
to best match the current operating load.
-The load_uA value can be determined from the consumers datasheet. e.g.most
-datasheets have tables showing the max current consumed in certain situations.
+The load_uA value can be determined from the consumer's datasheet. e.g. most
+datasheets have tables showing the maximum current consumed in certain
+situations.
Most consumers will use indirect operating mode control since they have no
knowledge of the regulator or whether the regulator is shared with other
int regulator_register_notifier(struct regulator *regulator,
struct notifier_block *nb);
-Consumers can uregister interest by calling :-
+Consumers can unregister interest by calling :-
int regulator_unregister_notifier(struct regulator *regulator,
struct notifier_block *nb);
- Errors in regulator configuration can have very serious consequences
for the system, potentially including lasting hardware damage.
- - It is not possible to automatically determine the power confugration
+ - It is not possible to automatically determine the power configuration
of the system - software-equivalent variants of the same chip may
- have different power requirments, and not all components with power
+ have different power requirements, and not all components with power
requirements are visible to software.
=> The API should make no changes to the hardware state unless it has
- specific knowledge that these changes are safe to do perform on
- this particular system.
+ specific knowledge that these changes are safe to perform on this
+ particular system.
Consumer use cases
------------------
+-> [Consumer B @ 3.3V]
The drivers for consumers A & B must be mapped to the correct regulator in
-order to control their power supply. This mapping can be achieved in machine
+order to control their power supplies. This mapping can be achieved in machine
initialisation code by creating a struct regulator_consumer_supply for
each regulator.
Constraints can now be registered by defining a struct regulator_init_data
for each regulator power domain. This structure also maps the consumers
-to their supply regulator :-
+to their supply regulators :-
static struct regulator_init_data regulator1_data = {
.constraints = {
Consumers can be classified into two types:-
Static: consumer does not change its supply voltage or
- current limit. It only needs to enable or disable it's
+ current limit. It only needs to enable or disable its
power supply. Its supply voltage is set by the hardware,
bootloader, firmware or kernel board initialisation code.
- Dynamic: consumer needs to change it's supply voltage or
+ Dynamic: consumer needs to change its supply voltage or
current limit to meet operation demands.
This interface is for machine specific code and allows the creation of
voltage/current domains (with constraints) for each regulator. It can
provide regulator constraints that will prevent device damage through
- overvoltage or over current caused by buggy client drivers. It also
+ overvoltage or overcurrent caused by buggy client drivers. It also
allows the creation of a regulator tree whereby some regulators are
supplied by others (similar to a clock tree).
struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
const struct regulator_config *config);
-This will register the regulators capabilities and operations to the regulator
+This will register the regulator's capabilities and operations to the regulator
core.
Regulators can be unregistered by calling :-
Regulator Events
================
-Regulators can send events (e.g. over temp, under voltage, etc) to consumer
-drivers by calling :-
+Regulators can send events (e.g. overtemperature, undervoltage, etc) to
+consumer drivers by calling :-
int regulator_notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data);
F: drivers/pinctrl/sh-pfc/
PIN CONTROLLER - SAMSUNG
-M: Tomasz Figa <t.figa@samsung.com>
+M: Tomasz Figa <tomasz.figa@gmail.com>
M: Thomas Abraham <thomas.abraham@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
F: drivers/media/i2c/s5k5baf.c
SAMSUNG SOC CLOCK DRIVERS
-M: Tomasz Figa <t.figa@samsung.com>
+M: Sylwester Nawrocki <s.nawrocki@samsung.com>
+M: Tomasz Figa <tomasz.figa@gmail.com>
S: Supported
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
F: drivers/clk/samsung/
F: arch/x86/
X86 PLATFORM DRIVERS
-M: Matthew Garrett <matthew.garrett@nebula.com>
+M: Darren Hart <dvhart@infradead.org>
L: platform-driver-x86@vger.kernel.org
-T: git git://cavan.codon.org.uk/platform-drivers-x86.git
+T: git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git
S: Maintained
F: drivers/platform/x86/
VERSION = 3
PATCHLEVEL = 17
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
NAME = Shuffling Zombie Juror
# *DOCUMENTATION*
static void __ic_line_inv_vaddr_helper(void *info)
{
- struct ic_inv *ic_inv_args = (struct ic_inv_args *) info;
+ struct ic_inv_args *ic_inv = info;
__ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
}
usb1: usb@48390000 {
compatible = "synopsys,dwc3";
- reg = <0x48390000 0x17000>;
+ reg = <0x48390000 0x10000>;
interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usb2_phy1>;
phy-names = "usb2-phy";
usb2: usb@483d0000 {
compatible = "synopsys,dwc3";
- reg = <0x483d0000 0x17000>;
+ reg = <0x483d0000 0x10000>;
interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usb2_phy2>;
phy-names = "usb2-phy";
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins>;
- clock-frequency = <400000>;
+ clock-frequency = <100000>;
tps65218: tps65218@24 {
reg = <0x24>;
ranges = <0 0 0 0x01000000>; /* minimum GPMC partition = 16MB */
nand@0,0 {
reg = <0 0 4>; /* device IO registers */
- ti,nand-ecc-opt = "bch8";
+ ti,nand-ecc-opt = "bch16";
ti,elm-id = <&elm>;
nand-bus-width = <8>;
gpmc,device-width = <1>;
gpmc,rd-cycle-ns = <40>;
gpmc,wr-cycle-ns = <40>;
gpmc,wait-pin = <0>;
- gpmc,wait-on-read;
- gpmc,wait-on-write;
gpmc,bus-turnaround-ns = <0>;
gpmc,cycle2cycle-delay-ns = <0>;
gpmc,clk-activation-ns = <0>;
};
&gpmc {
- status = "okay";
+ status = "okay"; /* Disable QSPI when enabling GPMC (NAND) */
pinctrl-names = "default";
pinctrl-0 = <&nand_flash_x8>;
ranges = <0 0 0x08000000 0x10000000>; /* CS0: NAND */
nand@0,0 {
reg = <0 0 0>; /* CS0, offset 0 */
- ti,nand-ecc-opt = "bch8";
+ ti,nand-ecc-opt = "bch16";
ti,elm-id = <&elm>;
nand-bus-width = <8>;
gpmc,device-width = <1>;
gpmc,access-ns = <30>; /* tCEA + 4*/
gpmc,rd-cycle-ns = <40>;
gpmc,wr-cycle-ns = <40>;
- gpmc,wait-on-read = "true";
- gpmc,wait-on-write = "true";
+ gpmc,wait-pin = <0>;
gpmc,bus-turnaround-ns = <0>;
gpmc,cycle2cycle-delay-ns = <0>;
gpmc,clk-activation-ns = <0>;
};
&qspi {
- status = "okay";
+ status = "disabled"; /* Disable GPMC (NAND) when enabling QSPI */
pinctrl-names = "default";
pinctrl-0 = <&qspi1_default>;
usb: usbck {
compatible = "atmel,at91rm9200-clk-usb";
#clock-cells = <0>;
- atmel,clk-divisors = <1 2>;
+ atmel,clk-divisors = <1 2 0 0>;
clocks = <&pllb>;
};
};
pllb: pllbck {
+ compatible = "atmel,at91sam9g20-clk-pllb";
atmel,clk-input-range = <2000000 32000000>;
atmel,pll-clk-output-ranges = <30000000 100000000 0 0>;
};
/dts-v1/;
#include "dra74x.dtsi"
+#include <dt-bindings/gpio/gpio.h>
/ {
model = "TI DRA742";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
+
+ vtt_fixed: fixedregulator-vtt {
+ compatible = "regulator-fixed";
+ regulator-name = "vtt_fixed";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-boot-on;
+ enable-active-high;
+ gpio = <&gpio7 11 GPIO_ACTIVE_HIGH>;
+ };
};
&dra7_pmx_core {
+ pinctrl-names = "default";
+ pinctrl-0 = <&vtt_pin>;
+
+ vtt_pin: pinmux_vtt_pin {
+ pinctrl-single,pins = <
+ 0x3b4 (PIN_OUTPUT | MUX_MODE14) /* spi1_cs1.gpio7_11 */
+ >;
+ };
+
i2c1_pins: pinmux_i2c1_pins {
pinctrl-single,pins = <
0x400 (PIN_INPUT | MUX_MODE0) /* i2c1_sda */
i2c3_pins: pinmux_i2c3_pins {
pinctrl-single,pins = <
- 0x410 (PIN_INPUT | MUX_MODE0) /* i2c3_sda */
- 0x414 (PIN_INPUT | MUX_MODE0) /* i2c3_scl */
+ 0x288 (PIN_INPUT | MUX_MODE9) /* gpio6_14.i2c3_sda */
+ 0x28c (PIN_INPUT | MUX_MODE9) /* gpio6_15.i2c3_scl */
>;
};
mcspi1_pins: pinmux_mcspi1_pins {
pinctrl-single,pins = <
- 0x3a4 (PIN_INPUT | MUX_MODE0) /* spi2_clk */
- 0x3a8 (PIN_INPUT | MUX_MODE0) /* spi2_d1 */
- 0x3ac (PIN_INPUT | MUX_MODE0) /* spi2_d0 */
- 0x3b0 (PIN_INPUT_SLEW | MUX_MODE0) /* spi2_cs0 */
- 0x3b4 (PIN_INPUT_SLEW | MUX_MODE0) /* spi2_cs1 */
- 0x3b8 (PIN_INPUT_SLEW | MUX_MODE6) /* spi2_cs2 */
- 0x3bc (PIN_INPUT_SLEW | MUX_MODE6) /* spi2_cs3 */
+ 0x3a4 (PIN_INPUT | MUX_MODE0) /* spi1_sclk */
+ 0x3a8 (PIN_INPUT | MUX_MODE0) /* spi1_d1 */
+ 0x3ac (PIN_INPUT | MUX_MODE0) /* spi1_d0 */
+ 0x3b0 (PIN_INPUT_SLEW | MUX_MODE0) /* spi1_cs0 */
+ 0x3b8 (PIN_INPUT_SLEW | MUX_MODE6) /* spi1_cs2.hdmi1_hpd */
+ 0x3bc (PIN_INPUT_SLEW | MUX_MODE6) /* spi1_cs3.hdmi1_cec */
>;
};
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&i2c3_pins>;
- clock-frequency = <3400000>;
+ clock-frequency = <400000>;
};
&mcspi1 {
reg = <0x001c0000 0x00020000>;
};
partition@7 {
- label = "NAND.u-boot-env";
+ label = "NAND.u-boot-env.backup1";
reg = <0x001e0000 0x00020000>;
};
partition@8 {
&usb2_phy2 {
phy-supply = <&ldousb_reg>;
};
+
+&gpio7 {
+ ti,no-reset-on-init;
+ ti,no-idle-on-init;
+};
};
tv: connector {
- compatible = "composite-connector";
+ compatible = "composite-video-connector";
label = "tv";
port {
ti,bit-shift = <0x1e>;
reg = <0x0d00>;
ti,set-bit-to-disable;
+ ti,set-rate-parent;
};
dpll4_m6_ck: dpll4_m6_ck {
msp2: msp@80117000 {
pinctrl-names = "default";
pinctrl-0 = <&msp2_default_mode>;
- status = "okay";
};
msp3: msp@80125000 {
EXPORT_SYMBOL(edma_assign_channel_eventq);
static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
- struct edma *edma_cc)
+ struct edma *edma_cc, int cc_id)
{
int i;
u32 value, cccfg;
s8 (*queue_priority_map)[2];
/* Decode the eDMA3 configuration from CCCFG register */
- cccfg = edma_read(0, EDMA_CCCFG);
+ cccfg = edma_read(cc_id, EDMA_CCCFG);
value = GET_NUM_REGN(cccfg);
edma_cc->num_region = BIT(value);
value = GET_NUM_EVQUE(cccfg);
edma_cc->num_tc = value + 1;
- dev_dbg(dev, "eDMA3 HW configuration (cccfg: 0x%08x):\n", cccfg);
+ dev_dbg(dev, "eDMA3 CC%d HW configuration (cccfg: 0x%08x):\n", cc_id,
+ cccfg);
dev_dbg(dev, "num_region: %u\n", edma_cc->num_region);
dev_dbg(dev, "num_channel: %u\n", edma_cc->num_channels);
dev_dbg(dev, "num_slot: %u\n", edma_cc->num_slots);
return -ENOMEM;
/* Get eDMA3 configuration from IP */
- ret = edma_setup_from_hw(dev, info[j], edma_cc[j]);
+ ret = edma_setup_from_hw(dev, info[j], edma_cc[j], j);
if (ret)
return ret;
else
kvm_vcpu_block(vcpu);
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+
return 1;
}
mrc p15, 0, r0, c10, c2, 1
mcr p15, 4, r0, c10, c2, 1
+ @ Invalidate the stale TLBs from Bootloader
+ mcr p15, 4, r0, c8, c7, 0 @ TLBIALLH
+ dsb ish
+
@ Set the HSCTLR to:
@ - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel)
@ - Endianness: Kernel config
#include <linux/gpio.h>
#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/clk-provider.h>
#include <asm/setup.h>
#include <asm/irq.h>
of_irq_init(irq_of_match);
}
+static void __init at91rm9200_dt_timer_init(void)
+{
+#if defined(CONFIG_COMMON_CLK)
+ of_clk_init(NULL);
+#endif
+ at91rm9200_timer_init();
+}
+
static const char *at91rm9200_dt_board_compat[] __initdata = {
"atmel,at91rm9200",
NULL
};
DT_MACHINE_START(at91rm9200_dt, "Atmel AT91RM9200 (Device Tree)")
- .init_time = at91rm9200_timer_init,
+ .init_time = at91rm9200_dt_timer_init,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = at91rm9200_dt_initialize,
}
}
- if ((p->wait_on_read || p->wait_on_write) &&
- (p->wait_pin > gpmc_nr_waitpins)) {
+ if (p->wait_pin > gpmc_nr_waitpins) {
pr_err("%s: invalid wait-pin (%d)\n", __func__, p->wait_pin);
return -EINVAL;
}
p->wait_on_write = of_property_read_bool(np,
"gpmc,wait-on-write");
if (!p->wait_on_read && !p->wait_on_write)
- pr_warn("%s: read/write wait monitoring not enabled!\n",
- __func__);
+ pr_debug("%s: rd/wr wait monitoring not enabled!\n",
+ __func__);
}
}
kernel_neon_begin_partial(28);
sha2_ce_transform(blocks, data, sctx->state, NULL, len);
kernel_neon_end();
- data += blocks * SHA256_BLOCK_SIZE;
}
static int sha224_finup(struct shash_desc *desc, const u8 *data,
*/
#define ARM_MAX_BRP 16
#define ARM_MAX_WRP 16
-#define ARM_MAX_HBP_SLOTS (ARM_MAX_BRP + ARM_MAX_WRP)
/* Virtual debug register bases. */
#define AARCH64_DBG_REG_BVR 0
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
#define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc)
-#define KSTK_ESP(tsk) ((unsigned long)task_pt_regs(tsk)->sp)
+#define KSTK_ESP(tsk) user_stack_pointer(task_pt_regs(tsk))
/*
* Prefetching support
(!((regs)->pstate & PSR_F_BIT))
#define user_stack_pointer(regs) \
- (!compat_user_mode(regs)) ? ((regs)->sp) : ((regs)->compat_sp)
+ (!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp)
static inline unsigned long regs_return_value(struct pt_regs *regs)
{
case CPU_PM_ENTER:
if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE))
fpsimd_save_state(¤t->thread.fpsimd_state);
+ this_cpu_write(fpsimd_last_state, NULL);
break;
case CPU_PM_EXIT:
if (current->mm)
.long 0
.popsection
- .align 3
-2: .quad .
- .quad PAGE_OFFSET
-
#ifdef CONFIG_SMP
.align 3
1: .quad .
return regs->compat_lr;
}
+ if ((u32)idx == PERF_REG_ARM64_SP)
+ return regs->sp;
+
+ if ((u32)idx == PERF_REG_ARM64_PC)
+ return regs->pc;
+
return regs->regs[idx];
}
break;
}
}
- for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) {
+
+ for (i = 0; i < ARM_MAX_WRP; ++i) {
if (current->thread.debug.hbp_watch[i] == bp) {
info.si_errno = -((i << 1) + 1);
break;
kbuf += sizeof(reg);
} else {
ret = copy_to_user(ubuf, ®, sizeof(reg));
- if (ret)
+ if (ret) {
+ ret = -EFAULT;
break;
+ }
ubuf += sizeof(reg);
}
kbuf += sizeof(reg);
} else {
ret = copy_from_user(®, ubuf, sizeof(reg));
- if (ret)
- return ret;
+ if (ret) {
+ ret = -EFAULT;
+ break;
+ }
ubuf += sizeof(reg);
}
#endif
static const char *cpu_name;
+static const char *machine_name;
phys_addr_t __fdt_pointer __initdata;
/*
while (true)
cpu_relax();
}
+
+ machine_name = of_flat_dt_get_machine_name();
}
/*
{
int i;
- /*
- * Dump out the common processor features in a single line. Userspace
- * should read the hwcaps with getauxval(AT_HWCAP) rather than
- * attempting to parse this.
- */
- seq_puts(m, "features\t:");
- for (i = 0; hwcap_str[i]; i++)
- if (elf_hwcap & (1 << i))
- seq_printf(m, " %s", hwcap_str[i]);
- seq_puts(m, "\n\n");
+ seq_printf(m, "Processor\t: %s rev %d (%s)\n",
+ cpu_name, read_cpuid_id() & 15, ELF_PLATFORM);
for_each_online_cpu(i) {
- struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
- u32 midr = cpuinfo->reg_midr;
-
/*
* glibc reads /proc/cpuinfo to determine the number of
* online processors, looking for lines beginning with
#ifdef CONFIG_SMP
seq_printf(m, "processor\t: %d\n", i);
#endif
- seq_printf(m, "implementer\t: 0x%02x\n",
- MIDR_IMPLEMENTOR(midr));
- seq_printf(m, "variant\t\t: 0x%x\n", MIDR_VARIANT(midr));
- seq_printf(m, "partnum\t\t: 0x%03x\n", MIDR_PARTNUM(midr));
- seq_printf(m, "revision\t: 0x%x\n\n", MIDR_REVISION(midr));
}
+ /* dump out the processor features */
+ seq_puts(m, "Features\t: ");
+
+ for (i = 0; hwcap_str[i]; i++)
+ if (elf_hwcap & (1 << i))
+ seq_printf(m, "%s ", hwcap_str[i]);
+
+ seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
+ seq_printf(m, "CPU architecture: AArch64\n");
+ seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15);
+ seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff);
+ seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
+
+ seq_puts(m, "\n");
+
+ seq_printf(m, "Hardware\t: %s\n", machine_name);
+
return 0;
}
else
kvm_vcpu_block(vcpu);
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+
return 1;
}
msr mair_el2, x4
isb
+ /* Invalidate the stale TLBs from Bootloader */
+ tlbi alle2
+ dsb sy
+
mrs x4, sctlr_el2
and x4, x4, #SCTLR_EL2_EE // preserve endianness of EL2
ldr x5, =SCTLR_EL2_FLAGS
#include <uapi/asm/unistd.h>
-#define NR_syscalls 352
+#define NR_syscalls 354
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
#define __NR_sched_setattr 349
#define __NR_sched_getattr 350
#define __NR_renameat2 351
+#define __NR_getrandom 352
+#define __NR_memfd_create 353
#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
.long sys_sched_setattr
.long sys_sched_getattr /* 350 */
.long sys_renameat2
+ .long sys_getrandom
+ .long sys_memfd_create
endmenu
-menu "Advanced setup"
+menu "Kernel features"
config ADVANCED_OPTIONS
bool "Prompt for advanced kernel configuration options"
endchoice
-endmenu
-
source "mm/Kconfig"
+endmenu
+
menu "Executable file formats"
source "fs/Kconfig.binfmt"
#include <asm/percpu.h>
#include <asm/ptrace.h>
+#include <linux/linkage.h>
/*
* These are per-cpu variables required in entry.S, among other
if ((get_fs().seg < ((unsigned long)addr)) ||
(get_fs().seg < ((unsigned long)addr + size - 1))) {
- pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
+ pr_devel("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
type ? "WRITE" : "READ ", (__force u32)addr, (u32)size,
(u32)get_fs().seg);
return 0;
}
ok:
- pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
+ pr_devel("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
type ? "WRITE" : "READ ", (__force u32)addr, (u32)size,
(u32)get_fs().seg);
return 1;
#endif /* __ASSEMBLY__ */
-#define __NR_syscalls 381
+#define __NR_syscalls 387
#endif /* _ASM_MICROBLAZE_UNISTD_H */
CONFIG_NR_CPUS=4
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=15
CONFIG_NR_CPUS=4
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=15
CONFIG_SMP=y
CONFIG_NR_CPUS=24
CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_SMP=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_TASKSTATS=y
CONFIG_NR_CPUS=2
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_RD_LZMA=y
CONFIG_NR_CPUS=2048
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_AUDIT=y
CONFIG_AUDITSYSCALL=y
CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_CPU_LITTLE_ENDIAN=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_AUDIT=y
CONFIG_AUDITSYSCALL=y
CONFIG_IRQ_DOMAIN_DEBUG=y
STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
#define STACK_FRAME_MARKER 12
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define STACK_FRAME_MIN_SIZE 32
+#else
+#define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
+#endif
+
/* Size of dummy stack frame allocated when calling signal handler. */
#define __SIGNAL_FRAMESIZE 128
#define __SIGNAL_FRAMESIZE32 64
#define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773)
#define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
#define STACK_FRAME_MARKER 2
+#define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
/* Size of stack frame allocated when calling signal handler. */
#define __SIGNAL_FRAMESIZE 64
SYSCALL_SPU(sched_setattr)
SYSCALL_SPU(sched_getattr)
SYSCALL_SPU(renameat2)
+SYSCALL_SPU(seccomp)
+SYSCALL_SPU(getrandom)
+SYSCALL_SPU(memfd_create)
#include <uapi/asm/unistd.h>
-#define __NR_syscalls 358
+#define __NR_syscalls 361
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
#define __NR_sched_setattr 355
#define __NR_sched_getattr 356
#define __NR_renameat2 357
+#define __NR_seccomp 358
+#define __NR_getrandom 359
+#define __NR_memfd_create 360
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
}
kvm->arch.hpt_cma_alloc = 0;
- page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT));
+ page = kvm_alloc_hpt(1ul << (order - PAGE_SHIFT));
if (page) {
hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
- memset((void *)hpt, 0, (1 << order));
+ memset((void *)hpt, 0, (1ul << order));
kvm->arch.hpt_cma_alloc = 1;
}
return 0; /* must be 16-byte aligned */
if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
return 0;
- if (sp >= prev_sp + STACK_FRAME_OVERHEAD)
+ if (sp >= prev_sp + STACK_FRAME_MIN_SIZE)
return 1;
/*
* sp could decrease when we jump off an interrupt stack
#include <asm/opal.h>
#include <asm/cputable.h>
+#include <asm/machdep.h>
static int opal_hmi_handler_nb_init;
struct OpalHmiEvtNode {
}
return 0;
}
-subsys_initcall(opal_hmi_handler_init);
+machine_subsys_initcall(powernv, opal_hmi_handler_init);
static int pseries_remove_mem_node(struct device_node *np)
{
const char *type;
- const unsigned int *regs;
+ const __be32 *regs;
unsigned long base;
unsigned int lmb_size;
int ret = -EINVAL;
if (!regs)
return ret;
- base = *(unsigned long *)regs;
- lmb_size = regs[3];
+ base = be64_to_cpu(*(unsigned long *)regs);
+ lmb_size = be32_to_cpu(regs[3]);
pseries_remove_memblock(base, lmb_size);
return 0;
static int pseries_add_mem_node(struct device_node *np)
{
const char *type;
- const unsigned int *regs;
+ const __be32 *regs;
unsigned long base;
unsigned int lmb_size;
int ret = -EINVAL;
if (!regs)
return ret;
- base = *(unsigned long *)regs;
- lmb_size = regs[3];
+ base = be64_to_cpu(*(unsigned long *)regs);
+ lmb_size = be32_to_cpu(regs[3]);
/*
* Update memory region to represent the memory add
struct of_drconf_cell *new_drmem, *old_drmem;
unsigned long memblock_size;
u32 entries;
- u32 *p;
+ __be32 *p;
int i, rc = -EINVAL;
memblock_size = pseries_memory_block_size();
if (!memblock_size)
return -EINVAL;
- p = (u32 *) pr->old_prop->value;
+ p = (__be32 *) pr->old_prop->value;
if (!p)
return -EINVAL;
* entries. Get the niumber of entries and skip to the array of
* of_drconf_cell's.
*/
- entries = *p++;
+ entries = be32_to_cpu(*p++);
old_drmem = (struct of_drconf_cell *)p;
- p = (u32 *)pr->prop->value;
+ p = (__be32 *)pr->prop->value;
p++;
new_drmem = (struct of_drconf_cell *)p;
for (i = 0; i < entries; i++) {
- if ((old_drmem[i].flags & DRCONF_MEM_ASSIGNED) &&
- (!(new_drmem[i].flags & DRCONF_MEM_ASSIGNED))) {
- rc = pseries_remove_memblock(old_drmem[i].base_addr,
+ if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) &&
+ (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) {
+ rc = pseries_remove_memblock(
+ be64_to_cpu(old_drmem[i].base_addr),
memblock_size);
break;
- } else if ((!(old_drmem[i].flags & DRCONF_MEM_ASSIGNED)) &&
- (new_drmem[i].flags & DRCONF_MEM_ASSIGNED)) {
- rc = memblock_add(old_drmem[i].base_addr,
+ } else if ((!(be32_to_cpu(old_drmem[i].flags) &
+ DRCONF_MEM_ASSIGNED)) &&
+ (be32_to_cpu(new_drmem[i].flags) &
+ DRCONF_MEM_ASSIGNED)) {
+ rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr),
memblock_size);
rc = (rc < 0) ? -EINVAL : 0;
break;
}
}
-
return rc;
}
#define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \
sizeof(struct ipl_block_fcp))
-#define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 8)
+#define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 16)
#define IPL_PARM_BLK_CCW_LEN (sizeof(struct ipl_list_hdr) + \
sizeof(struct ipl_block_ccw))
-#define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 8)
+#define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 16)
#define IPL_MAX_SUPPORTED_VERSION (0)
u8 pbt;
u8 flags;
u16 reserved2;
+ u8 loadparm[8];
} __attribute__((packed));
struct ipl_block_fcp {
- u8 reserved1[313-1];
+ u8 reserved1[305-1];
u8 opt;
u8 reserved2[3];
u16 reserved3;
offsetof(struct ipl_block_fcp, scp_data)))
struct ipl_block_ccw {
- u8 load_parm[8];
u8 reserved1[84];
u8 reserved2[2];
u16 devno;
unsigned long addr, pte_t *ptep)
{
pgste_t pgste;
- pte_t pte;
+ pte_t pte, oldpte;
int young;
if (mm_has_pgste(vma->vm_mm)) {
pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste);
}
- pte = *ptep;
+ oldpte = pte = *ptep;
ptep_flush_direct(vma->vm_mm, addr, ptep);
young = pte_young(pte);
pte = pte_mkold(pte);
if (mm_has_pgste(vma->vm_mm)) {
+ pgste = pgste_update_all(&oldpte, pgste, vma->vm_mm);
pgste = pgste_set_pte(ptep, pgste, pte);
pgste_set_unlock(ptep, pgste);
} else
ptep_flush_direct(vma->vm_mm, address, ptep);
if (mm_has_pgste(vma->vm_mm)) {
+ pgste_set_key(ptep, pgste, entry, vma->vm_mm);
pgste = pgste_set_pte(ptep, pgste, entry);
pgste_set_unlock(ptep, pgste);
} else
DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n", (unsigned long long)
IPL_PARMBLOCK_START->ipl_info.fcp.br_lba);
-static struct attribute *ipl_fcp_attrs[] = {
- &sys_ipl_type_attr.attr,
- &sys_ipl_device_attr.attr,
- &sys_ipl_fcp_wwpn_attr.attr,
- &sys_ipl_fcp_lun_attr.attr,
- &sys_ipl_fcp_bootprog_attr.attr,
- &sys_ipl_fcp_br_lba_attr.attr,
- NULL,
-};
-
-static struct attribute_group ipl_fcp_attr_group = {
- .attrs = ipl_fcp_attrs,
-};
-
-/* CCW ipl device attributes */
-
static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
static struct kobj_attribute sys_ipl_ccw_loadparm_attr =
__ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL);
+static struct attribute *ipl_fcp_attrs[] = {
+ &sys_ipl_type_attr.attr,
+ &sys_ipl_device_attr.attr,
+ &sys_ipl_fcp_wwpn_attr.attr,
+ &sys_ipl_fcp_lun_attr.attr,
+ &sys_ipl_fcp_bootprog_attr.attr,
+ &sys_ipl_fcp_br_lba_attr.attr,
+ &sys_ipl_ccw_loadparm_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ipl_fcp_attr_group = {
+ .attrs = ipl_fcp_attrs,
+};
+
+/* CCW ipl device attributes */
+
static struct attribute *ipl_ccw_attrs_vm[] = {
&sys_ipl_type_attr.attr,
&sys_ipl_device_attr.attr,
DEFINE_IPL_ATTR_RW(reipl_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
reipl_block_fcp->ipl_info.fcp.devno);
-static struct attribute *reipl_fcp_attrs[] = {
- &sys_reipl_fcp_device_attr.attr,
- &sys_reipl_fcp_wwpn_attr.attr,
- &sys_reipl_fcp_lun_attr.attr,
- &sys_reipl_fcp_bootprog_attr.attr,
- &sys_reipl_fcp_br_lba_attr.attr,
- NULL,
-};
-
-static struct attribute_group reipl_fcp_attr_group = {
- .attrs = reipl_fcp_attrs,
-};
-
-/* CCW reipl device attributes */
-
-DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
- reipl_block_ccw->ipl_info.ccw.devno);
-
static void reipl_get_ascii_loadparm(char *loadparm,
struct ipl_parameter_block *ibp)
{
- memcpy(loadparm, ibp->ipl_info.ccw.load_parm, LOADPARM_LEN);
+ memcpy(loadparm, ibp->hdr.loadparm, LOADPARM_LEN);
EBCASC(loadparm, LOADPARM_LEN);
loadparm[LOADPARM_LEN] = 0;
strim(loadparm);
return -EINVAL;
}
/* initialize loadparm with blanks */
- memset(ipb->ipl_info.ccw.load_parm, ' ', LOADPARM_LEN);
+ memset(ipb->hdr.loadparm, ' ', LOADPARM_LEN);
/* copy and convert to ebcdic */
- memcpy(ipb->ipl_info.ccw.load_parm, buf, lp_len);
- ASCEBC(ipb->ipl_info.ccw.load_parm, LOADPARM_LEN);
+ memcpy(ipb->hdr.loadparm, buf, lp_len);
+ ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN);
return len;
}
+/* FCP wrapper */
+static ssize_t reipl_fcp_loadparm_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return reipl_generic_loadparm_show(reipl_block_fcp, page);
+}
+
+static ssize_t reipl_fcp_loadparm_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t len)
+{
+ return reipl_generic_loadparm_store(reipl_block_fcp, buf, len);
+}
+
+static struct kobj_attribute sys_reipl_fcp_loadparm_attr =
+ __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_fcp_loadparm_show,
+ reipl_fcp_loadparm_store);
+
+static struct attribute *reipl_fcp_attrs[] = {
+ &sys_reipl_fcp_device_attr.attr,
+ &sys_reipl_fcp_wwpn_attr.attr,
+ &sys_reipl_fcp_lun_attr.attr,
+ &sys_reipl_fcp_bootprog_attr.attr,
+ &sys_reipl_fcp_br_lba_attr.attr,
+ &sys_reipl_fcp_loadparm_attr.attr,
+ NULL,
+};
+
+static struct attribute_group reipl_fcp_attr_group = {
+ .attrs = reipl_fcp_attrs,
+};
+
+/* CCW reipl device attributes */
+
+DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
+ reipl_block_ccw->ipl_info.ccw.devno);
+
/* NSS wrapper */
static ssize_t reipl_nss_loadparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
/* LOADPARM */
/* check if read scp info worked and set loadparm */
if (sclp_ipl_info.is_valid)
- memcpy(ipb->ipl_info.ccw.load_parm,
- &sclp_ipl_info.loadparm, LOADPARM_LEN);
+ memcpy(ipb->hdr.loadparm, &sclp_ipl_info.loadparm, LOADPARM_LEN);
else
/* read scp info failed: set empty loadparm (EBCDIC blanks) */
- memset(ipb->ipl_info.ccw.load_parm, 0x40, LOADPARM_LEN);
+ memset(ipb->hdr.loadparm, 0x40, LOADPARM_LEN);
ipb->hdr.flags = DIAG308_FLAGS_LP_VALID;
/* VM PARM */
return rc;
}
- if (ipl_info.type == IPL_TYPE_FCP)
+ if (ipl_info.type == IPL_TYPE_FCP) {
memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE);
- else {
+ /*
+ * Fix loadparm: There are systems where the (SCSI) LOADPARM
+ * is invalid in the SCSI IPL parameter block, so take it
+ * always from sclp_ipl_info.
+ */
+ memcpy(reipl_block_fcp->hdr.loadparm, sclp_ipl_info.loadparm,
+ LOADPARM_LEN);
+ } else {
reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN;
static int __init s390_ipl_init(void)
{
+ char str[8] = {0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40};
+
sclp_get_ipl_info(&sclp_ipl_info);
+ /*
+ * Fix loadparm: There are systems where the (SCSI) LOADPARM
+ * returned by read SCP info is invalid (contains EBCDIC blanks)
+ * when the system has been booted via diag308. In that case we use
+ * the value from diag308, if available.
+ *
+ * There are also systems where diag308 store does not work in
+ * case the system is booted from HMC. Fortunately in this case
+ * READ SCP info provides the correct value.
+ */
+ if (memcmp(sclp_ipl_info.loadparm, str, sizeof(str)) == 0 &&
+ diag308_set_works)
+ memcpy(sclp_ipl_info.loadparm, ipl_block.hdr.loadparm,
+ LOADPARM_LEN);
shutdown_actions_init();
shutdown_triggers_init();
return 0;
basr %r5,0
0: al %r5,21f-0b(%r5) /* get &_vdso_data */
chi %r2,__CLOCK_REALTIME
- je 10f
+ je 11f
chi %r2,__CLOCK_MONOTONIC
jne 19f
/* CLOCK_MONOTONIC */
- ltr %r3,%r3
- jz 9f /* tp == NULL */
1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */
jnz 1b
j 6b
8: st %r2,0(%r3) /* store tp->tv_sec */
st %r1,4(%r3) /* store tp->tv_nsec */
-9: lhi %r2,0
+ lhi %r2,0
br %r14
/* CLOCK_REALTIME */
-10: ltr %r3,%r3 /* tp == NULL */
- jz 18f
11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */
jnz 11b
j 15b
17: st %r2,0(%r3) /* store tp->tv_sec */
st %r1,4(%r3) /* store tp->tv_nsec */
-18: lhi %r2,0
+ lhi %r2,0
br %r14
/* Fallback to system call */
.cfi_startproc
larl %r5,_vdso_data
cghi %r2,__CLOCK_REALTIME
- je 4f
+ je 5f
cghi %r2,__CLOCK_THREAD_CPUTIME_ID
je 9f
cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
jne 12f
/* CLOCK_MONOTONIC */
- ltgr %r3,%r3
- jz 3f /* tp == NULL */
0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 0b
j 1b
2: stg %r0,0(%r3) /* store tp->tv_sec */
stg %r1,8(%r3) /* store tp->tv_nsec */
-3: lghi %r2,0
+ lghi %r2,0
br %r14
/* CLOCK_REALTIME */
-4: ltr %r3,%r3 /* tp == NULL */
- jz 8f
5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 5b
j 6b
7: stg %r0,0(%r3) /* store tp->tv_sec */
stg %r1,8(%r3) /* store tp->tv_nsec */
-8: lghi %r2,0
+ lghi %r2,0
br %r14
/* CLOCK_THREAD_CPUTIME_ID for this thread */
return -EINVAL;
}
- switch (kvm_run->exit_reason) {
- case KVM_EXIT_S390_SIEIC:
- case KVM_EXIT_UNKNOWN:
- case KVM_EXIT_INTR:
- case KVM_EXIT_S390_RESET:
- case KVM_EXIT_S390_UCONTROL:
- case KVM_EXIT_S390_TSCH:
- case KVM_EXIT_DEBUG:
- break;
- default:
- BUG();
- }
-
vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
pte_t *ptep;
down_read(&mm->mmap_sem);
+retry:
ptep = get_locked_pte(current->mm, addr, &ptl);
if (unlikely(!ptep)) {
up_read(&mm->mmap_sem);
return -EFAULT;
}
+ if (!(pte_val(*ptep) & _PAGE_INVALID) &&
+ (pte_val(*ptep) & _PAGE_PROTECT)) {
+ pte_unmap_unlock(*ptep, ptl);
+ if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) {
+ up_read(&mm->mmap_sem);
+ return -EFAULT;
+ }
+ goto retry;
+ }
new = old = pgste_get_lock(ptep);
pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
get_page(page);
+ __flush_anon_page(page, addr);
+ flush_dcache_page(page);
pages[*nr] = page;
(*nr)++;
extern pmd_t level2_kernel_pgt[512];
extern pmd_t level2_fixmap_pgt[512];
extern pmd_t level2_ident_pgt[512];
+extern pte_t level1_fixmap_pgt[512];
extern pgd_t init_level4_pgt[];
#define swapper_pg_dir init_level4_pgt
*
* We can construct this by grafting the Xen provided pagetable into
* head_64.S's preconstructed pagetables. We copy the Xen L2's into
- * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
- * means that only the kernel has a physical mapping to start with -
- * but that's enough to get __va working. We need to fill in the rest
- * of the physical mapping once some sort of allocator has been set
- * up.
- * NOTE: for PVH, the page tables are native.
+ * level2_ident_pgt, and level2_kernel_pgt. This means that only the
+ * kernel has a physical mapping to start with - but that's enough to
+ * get __va working. We need to fill in the rest of the physical
+ * mapping once some sort of allocator has been set up. NOTE: for
+ * PVH, the page tables are native.
*/
void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
{
/* L3_i[0] -> level2_ident_pgt */
convert_pfn_mfn(level3_ident_pgt);
/* L3_k[510] -> level2_kernel_pgt
- * L3_i[511] -> level2_fixmap_pgt */
+ * L3_k[511] -> level2_fixmap_pgt */
convert_pfn_mfn(level3_kernel_pgt);
+
+ /* L3_k[511][506] -> level1_fixmap_pgt */
+ convert_pfn_mfn(level2_fixmap_pgt);
}
/* We get [511][511] and have Xen's version of level2_kernel_pgt */
l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
addr[1] = (unsigned long)l3;
addr[2] = (unsigned long)l2;
/* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
- * Both L4[272][0] and L4[511][511] have entries that point to the same
+ * Both L4[272][0] and L4[511][510] have entries that point to the same
* L2 (PMD) tables. Meaning that if you modify it in __va space
* it will be also modified in the __ka space! (But if you just
* modify the PMD table to point to other PTE's or none, then you
* are OK - which is what cleanup_highmap does) */
copy_page(level2_ident_pgt, l2);
- /* Graft it onto L4[511][511] */
+ /* Graft it onto L4[511][510] */
copy_page(level2_kernel_pgt, l2);
- /* Get [511][510] and graft that in level2_fixmap_pgt */
- l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
- l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
- copy_page(level2_fixmap_pgt, l2);
- /* Note that we don't do anything with level1_fixmap_pgt which
- * we don't need. */
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
/* Make pagetable pieces RO */
set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
/* Pin down new L4 */
pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
struct asymmetric_key_subtype public_key_subtype = {
.owner = THIS_MODULE,
.name = "public_key",
+ .name_len = sizeof("public_key") - 1,
.describe = public_key_describe,
.destroy = public_key_destroy,
.verify_signature = public_key_verify_signature_2,
{
struct win_certificate wrapper;
const u8 *pkcs7;
+ unsigned len;
if (ctx->sig_len < sizeof(wrapper)) {
pr_debug("Signature wrapper too short\n");
return -ENOTSUPP;
}
- /* Looks like actual pkcs signature length is in wrapper->length.
- * size obtained from data dir entries lists the total size of
- * certificate table which is also aligned to octawrod boundary.
- *
- * So set signature length field appropriately.
+ /* It looks like the pkcs signature length in wrapper->length and the
+ * size obtained from the data dir entries, which lists the total size
+ * of certificate table, are both aligned to an octaword boundary, so
+ * we may have to deal with some padding.
*/
ctx->sig_len = wrapper.length;
ctx->sig_offset += sizeof(wrapper);
ctx->sig_len -= sizeof(wrapper);
- if (ctx->sig_len == 0) {
+ if (ctx->sig_len < 4) {
pr_debug("Signature data missing\n");
return -EKEYREJECTED;
}
- /* What's left should a PKCS#7 cert */
+ /* What's left should be a PKCS#7 cert */
pkcs7 = pebuf + ctx->sig_offset;
- if (pkcs7[0] == (ASN1_CONS_BIT | ASN1_SEQ)) {
- if (pkcs7[1] == 0x82 &&
- pkcs7[2] == (((ctx->sig_len - 4) >> 8) & 0xff) &&
- pkcs7[3] == ((ctx->sig_len - 4) & 0xff))
- return 0;
- if (pkcs7[1] == 0x80)
- return 0;
- if (pkcs7[1] > 0x82)
- return -EMSGSIZE;
+ if (pkcs7[0] != (ASN1_CONS_BIT | ASN1_SEQ))
+ goto not_pkcs7;
+
+ switch (pkcs7[1]) {
+ case 0 ... 0x7f:
+ len = pkcs7[1] + 2;
+ goto check_len;
+ case ASN1_INDEFINITE_LENGTH:
+ return 0;
+ case 0x81:
+ len = pkcs7[2] + 3;
+ goto check_len;
+ case 0x82:
+ len = ((pkcs7[2] << 8) | pkcs7[3]) + 4;
+ goto check_len;
+ case 0x83 ... 0xff:
+ return -EMSGSIZE;
+ default:
+ goto not_pkcs7;
}
+check_len:
+ if (len <= ctx->sig_len) {
+ /* There may be padding */
+ ctx->sig_len = len;
+ return 0;
+ }
+not_pkcs7:
pr_debug("Signature data not PKCS#7\n");
return -ELIBBAD;
}
void *handler_context, void *region_context)
{
int i;
- u8 *value = (u8 *)&value64;
+ u8 *value = (u8 *)value64;
if (address > 0xff || !value64)
return AE_BAD_PARAMETER;
return acpi_dev_suspend_late(dev);
}
-static int acpi_lpss_restore_early(struct device *dev)
+static int acpi_lpss_resume_early(struct device *dev)
{
int ret = acpi_dev_resume_early(dev);
static struct dev_pm_domain acpi_lpss_pm_domain = {
.ops = {
#ifdef CONFIG_PM_SLEEP
- .suspend_late = acpi_lpss_suspend_late,
- .restore_early = acpi_lpss_restore_early,
.prepare = acpi_subsys_prepare,
.complete = acpi_subsys_complete,
.suspend = acpi_subsys_suspend,
- .resume_early = acpi_subsys_resume_early,
+ .suspend_late = acpi_lpss_suspend_late,
+ .resume_early = acpi_lpss_resume_early,
.freeze = acpi_subsys_freeze,
.poweroff = acpi_subsys_suspend,
- .poweroff_late = acpi_subsys_suspend_late,
+ .poweroff_late = acpi_lpss_suspend_late,
+ .restore_early = acpi_lpss_resume_early,
#endif
#ifdef CONFIG_PM_RUNTIME
.runtime_suspend = acpi_lpss_runtime_suspend,
acpi_ns_check_package_list(info, package, elements, count);
break;
+ case ACPI_PTYPE2_UUID_PAIR:
+
+ /* The package must contain pairs of (UUID + type) */
+
+ if (count & 1) {
+ expected_count = count + 1;
+ goto package_too_small;
+ }
+
+ while (count > 0) {
+ status = acpi_ns_check_object_type(info, elements,
+ package->ret_info.
+ object_type1, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Validate length of the UUID buffer */
+
+ if ((*elements)->buffer.length != 16) {
+ ACPI_WARN_PREDEFINED((AE_INFO,
+ info->full_pathname,
+ info->node_flags,
+ "Invalid length for UUID Buffer"));
+ return (AE_AML_OPERAND_VALUE);
+ }
+
+ status = acpi_ns_check_object_type(info, elements + 1,
+ package->ret_info.
+ object_type2, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ elements += 2;
+ count -= 2;
+ }
+ break;
+
default:
/* Should not get here if predefined info table is correct */
" invalid.\n");
}
- /*
- * When fully charged, some batteries wrongly report
- * capacity_now = design_capacity instead of = full_charge_capacity
- */
- if (battery->capacity_now > battery->full_charge_capacity
- && battery->full_charge_capacity != ACPI_BATTERY_VALUE_UNKNOWN) {
- if (battery->capacity_now != battery->design_capacity)
- printk_once(KERN_WARNING FW_BUG
- "battery: reported current charge level (%d) "
- "is higher than reported maximum charge level (%d).\n",
- battery->capacity_now, battery->full_charge_capacity);
- battery->capacity_now = battery->full_charge_capacity;
- }
-
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
&& battery->capacity_now >= 0 && battery->capacity_now <= 100)
battery->capacity_now = (battery->capacity_now *
DMI_MATCH(DMI_SYS_VENDOR, "Quanta"),
DMI_MATCH(DMI_PRODUCT_NAME, "TW9/SW9"),}, NULL},
{
+ ec_flag_msi, "Clevo W350etq", {
+ DMI_MATCH(DMI_SYS_VENDOR, "CLEVO CO."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "W35_37ET"),}, NULL},
+ {
ec_validate_ecdt, "ASUS hardware", {
DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
{
if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
- cpuidle_pause_and_lock();
/* Protect against cpu-hotplug */
get_online_cpus();
+ cpuidle_pause_and_lock();
/* Disable all cpuidle devices */
for_each_online_cpu(cpu) {
cpuidle_enable_device(dev);
}
}
- put_online_cpus();
cpuidle_resume_and_unlock();
+ put_online_cpus();
}
return 0;
acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
+ acpi_status status;
+ unsigned long long sun;
+
+ status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
- return sprintf(buf, "%lu\n", acpi_dev->pnp.sun);
+ return sprintf(buf, "%llu\n", sun);
}
static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
{
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status;
- unsigned long long sun;
int result = 0;
/*
if (dev->pnp.unique_id)
result = device_create_file(&dev->dev, &dev_attr_uid);
- status = acpi_evaluate_integer(dev->handle, "_SUN", NULL, &sun);
- if (ACPI_SUCCESS(status)) {
- dev->pnp.sun = (unsigned long)sun;
+ if (acpi_has_method(dev->handle, "_SUN")) {
result = device_create_file(&dev->dev, &dev_attr_sun);
if (result)
goto end;
- } else {
- dev->pnp.sun = (unsigned long)-1;
}
if (acpi_has_method(dev->handle, "_STA")) {
* For Windows 8 systems: used to decide if video module
* should skip registering backlight interface of its own.
*/
-static int use_native_backlight_param = 1;
+static int use_native_backlight_param = -1;
module_param_named(use_native_backlight, use_native_backlight_param, int, 0444);
-static bool use_native_backlight_dmi = false;
+static bool use_native_backlight_dmi = true;
static int register_count;
static struct mutex video_list_lock;
return 0;
}
+static int __init video_disable_native_backlight(const struct dmi_system_id *d)
+{
+ use_native_backlight_dmi = false;
+ return 0;
+}
+
static struct dmi_system_id video_dmi_table[] __initdata = {
/*
* Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"),
},
},
+
+ /*
+ * These models have a working acpi_video backlight control, and using
+ * native backlight causes a regression where backlight does not work
+ * when userspace is not handling brightness key events. Disable
+ * native_backlight on these to fix this:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=81691
+ */
+ {
+ .callback = video_disable_native_backlight,
+ .ident = "ThinkPad T420",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T420"),
+ },
+ },
+ {
+ .callback = video_disable_native_backlight,
+ .ident = "ThinkPad T520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T520"),
+ },
+ },
+
+ /* The native backlight controls do not work on some older machines */
+ {
+ /* https://bugs.freedesktop.org/show_bug.cgi?id=81515 */
+ .callback = video_disable_native_backlight,
+ .ident = "HP ENVY 15 Notebook",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"),
+ },
+ },
{}
};
{ PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9182 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9182),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
+ /*
+ * The JMicron chip 361/363 contains one SATA controller and one
+ * PATA controller,for powering on these both controllers, we must
+ * follow the sequence one by one, otherwise one of them can not be
+ * powered on successfully, so here we disable the async suspend
+ * method for these chips.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_JMICRON &&
+ (pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 ||
+ pdev->device == PCI_DEVICE_ID_JMICRON_JMB361))
+ device_disable_async_suspend(&pdev->dev);
+
/* acquire resources */
rc = pcim_enable_device(pdev);
if (rc)
*/
#include <linux/ahci_platform.h>
-#include <linux/reset.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+
+#include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h>
+
#include "ahci.h"
#define SATA_CONFIGURATION_0 0x180
/* Pad calibration */
- /* FIXME Always use calibration 0. Change this to read the calibration
- * fuse once the fuse driver has landed. */
- val = 0;
+ ret = tegra_fuse_readl(FUSE_SATA_CALIB, &val);
+ if (ret) {
+ dev_err(&tegra->pdev->dev,
+ "failed to read calibration fuse: %d\n", ret);
+ return ret;
+ }
calib = tegra124_pad_calibration[val & FUSE_SATA_CALIB_MASK];
#define CFG_MEM_RAM_SHUTDOWN 0x00000070
#define BLOCK_MEM_RDY 0x00000074
+/* Max retry for link down */
+#define MAX_LINK_DOWN_RETRY 3
+
struct xgene_ahci_context {
struct ahci_host_priv *hpriv;
struct device *dev;
return rc;
}
+static bool xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx)
+{
+ void __iomem *diagcsr = ctx->csr_diag;
+
+ return (readl(diagcsr + CFG_MEM_RAM_SHUTDOWN) == 0 &&
+ readl(diagcsr + BLOCK_MEM_RDY) == 0xFFFFFFFF);
+}
+
/**
* xgene_ahci_read_id - Read ID data from the specified device
* @dev: device
* and Gen1 (1.5Gbps). Otherwise during long IO stress test, the PHY will
* report disparity error and etc. In addition, during COMRESET, there can
* be error reported in the register PORT_SCR_ERR. For SERR_DISPARITY and
- * SERR_10B_8B_ERR, the PHY receiver line must be reseted. The following
- * algorithm is followed to proper configure the hardware PHY during COMRESET:
+ * SERR_10B_8B_ERR, the PHY receiver line must be reseted. Also during long
+ * reboot cycle regression, sometimes the PHY reports link down even if the
+ * device is present because of speed negotiation failure. so need to retry
+ * the COMRESET to get the link up. The following algorithm is followed to
+ * proper configure the hardware PHY during COMRESET:
*
* Alg Part 1:
* 1. Start the PHY at Gen3 speed (default setting)
* Alg Part 2:
* 1. On link up, if there are any SERR_DISPARITY and SERR_10B_8B_ERR error
* reported in the register PORT_SCR_ERR, then reset the PHY receiver line
- * 2. Go to Alg Part 3
+ * 2. Go to Alg Part 4
*
* Alg Part 3:
+ * 1. Check the PORT_SCR_STAT to see whether device presence detected but PHY
+ * communication establishment failed and maximum link down attempts are
+ * less than Max attempts 3 then goto Alg Part 1.
+ * 2. Go to Alg Part 4.
+ *
+ * Alg Part 4:
* 1. Clear any pending from register PORT_SCR_ERR.
*
* NOTE: For the initial version, we will NOT support Gen1/Gen2. In addition
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
void __iomem *port_mmio = ahci_port_base(ap);
struct ata_taskfile tf;
+ int link_down_retry = 0;
int rc;
- u32 val;
-
- /* clear D2H reception area to properly wait for D2H FIS */
- ata_tf_init(link->device, &tf);
- tf.command = ATA_BUSY;
- ata_tf_to_fis(&tf, 0, 0, d2h_fis);
- rc = sata_link_hardreset(link, timing, deadline, online,
+ u32 val, sstatus;
+
+ do {
+ /* clear D2H reception area to properly wait for D2H FIS */
+ ata_tf_init(link->device, &tf);
+ tf.command = ATA_BUSY;
+ ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+ rc = sata_link_hardreset(link, timing, deadline, online,
ahci_check_ready);
+ if (*online) {
+ val = readl(port_mmio + PORT_SCR_ERR);
+ if (val & (SERR_DISPARITY | SERR_10B_8B_ERR))
+ dev_warn(ctx->dev, "link has error\n");
+ break;
+ }
- val = readl(port_mmio + PORT_SCR_ERR);
- if (val & (SERR_DISPARITY | SERR_10B_8B_ERR))
- dev_warn(ctx->dev, "link has error\n");
+ sata_scr_read(link, SCR_STATUS, &sstatus);
+ } while (link_down_retry++ < MAX_LINK_DOWN_RETRY &&
+ (sstatus & 0xff) == 0x1);
/* clear all errors if any pending */
val = readl(port_mmio + PORT_SCR_ERR);
return -ENODEV;
}
+ if (xgene_ahci_is_memram_inited(ctx)) {
+ dev_info(dev, "skip clock and PHY initialization\n");
+ goto skip_clk_phy;
+ }
+
/* Due to errata, HW requires full toggle transition */
rc = ahci_platform_enable_clks(hpriv);
if (rc)
/* Configure the host controller */
xgene_ahci_hw_init(hpriv);
-
+skip_clk_phy:
hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_NCQ;
rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info);
{ 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
/* SATA Controller IDE (Coleto Creek) */
{ 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (9 Series) */
+ { 0x8086, 0x8c88, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+ /* SATA Controller IDE (9 Series) */
+ { 0x8086, 0x8c89, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+ /* SATA Controller IDE (9 Series) */
+ { 0x8086, 0x8c80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (9 Series) */
+ { 0x8086, 0x8c81, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
{ } /* terminate list */
};
};
const struct ata_port_info *ppi[] = { &info, NULL };
+ /*
+ * The JMicron chip 361/363 contains one SATA controller and one
+ * PATA controller,for powering on these both controllers, we must
+ * follow the sequence one by one, otherwise one of them can not be
+ * powered on successfully, so here we disable the async suspend
+ * method for these chips.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_JMICRON &&
+ (pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 ||
+ pdev->device == PCI_DEVICE_ID_JMICRON_JMB361))
+ device_disable_async_suspend(&pdev->dev);
+
return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0);
}
enum regcache_type type;
int (*init)(struct regmap *map);
int (*exit)(struct regmap *map);
+#ifdef CONFIG_DEBUG_FS
+ void (*debugfs_init)(struct regmap *map);
+#endif
int (*read)(struct regmap *map, unsigned int reg, unsigned int *value);
int (*write)(struct regmap *map, unsigned int reg, unsigned int value);
int (*sync)(struct regmap *map, unsigned int min, unsigned int max);
{
debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
}
-#else
-static void rbtree_debugfs_init(struct regmap *map)
-{
-}
#endif
static int regcache_rbtree_init(struct regmap *map)
goto err;
}
- rbtree_debugfs_init(map);
-
return 0;
err:
.name = "rbtree",
.init = regcache_rbtree_init,
.exit = regcache_rbtree_exit,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = rbtree_debugfs_init,
+#endif
.read = regcache_rbtree_read,
.write = regcache_rbtree_write,
.sync = regcache_rbtree_sync,
unsigned int block_base, unsigned int start,
unsigned int end)
{
- if (regmap_can_raw_write(map))
+ if (regmap_can_raw_write(map) && !map->use_single_rw)
return regcache_sync_block_raw(map, block, cache_present,
block_base, start, end);
else
next = rb_next(&range_node->node);
}
+
+ if (map->cache_ops && map->cache_ops->debugfs_init)
+ map->cache_ops->debugfs_init(map);
}
void regmap_debugfs_exit(struct regmap *map)
bool regmap_volatile(struct regmap *map, unsigned int reg)
{
- if (!regmap_readable(map, reg))
+ if (!map->format.format_write && !regmap_readable(map, reg))
return false;
if (map->volatile_reg)
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) }, /* 0xA8DB */
{ 0, },
};
MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl);
set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
- rbd_dev->rq_wq = alloc_workqueue(rbd_dev->disk->disk_name, 0, 0);
- if (!rbd_dev->rq_wq)
+ rbd_dev->rq_wq = alloc_workqueue("%s", 0, 0, rbd_dev->disk->disk_name);
+ if (!rbd_dev->rq_wq) {
+ ret = -ENOMEM;
goto err_out_mapping;
+ }
ret = rbd_bus_add_dev(rbd_dev);
if (ret)
return 0;
}
+static void arm_ccn_pmu_event_destroy(struct perf_event *event)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+
+ if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) {
+ clear_bit(CCN_IDX_PMU_CYCLE_COUNTER, ccn->dt.pmu_counters_mask);
+ } else {
+ struct arm_ccn_component *source =
+ ccn->dt.pmu_counters[hw->idx].source;
+
+ if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP &&
+ CCN_CONFIG_EVENT(event->attr.config) ==
+ CCN_EVENT_WATCHPOINT)
+ clear_bit(hw->config_base, source->xp.dt_cmp_mask);
+ else
+ clear_bit(hw->config_base, source->pmu_events_mask);
+ clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
+ }
+
+ ccn->dt.pmu_counters[hw->idx].source = NULL;
+ ccn->dt.pmu_counters[hw->idx].event = NULL;
+}
+
static int arm_ccn_pmu_event_init(struct perf_event *event)
{
struct arm_ccn *ccn;
return -ENOENT;
ccn = pmu_to_arm_ccn(event->pmu);
+ event->destroy = arm_ccn_pmu_event_destroy;
if (hw->sample_period) {
dev_warn(ccn->dev, "Sampling not supported!\n");
return 0;
}
-static void arm_ccn_pmu_event_free(struct perf_event *event)
-{
- struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
- struct hw_perf_event *hw = &event->hw;
-
- if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) {
- clear_bit(CCN_IDX_PMU_CYCLE_COUNTER, ccn->dt.pmu_counters_mask);
- } else {
- struct arm_ccn_component *source =
- ccn->dt.pmu_counters[hw->idx].source;
-
- if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP &&
- CCN_CONFIG_EVENT(event->attr.config) ==
- CCN_EVENT_WATCHPOINT)
- clear_bit(hw->config_base, source->xp.dt_cmp_mask);
- else
- clear_bit(hw->config_base, source->pmu_events_mask);
- clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
- }
-
- ccn->dt.pmu_counters[hw->idx].source = NULL;
- ccn->dt.pmu_counters[hw->idx].event = NULL;
-}
-
static u64 arm_ccn_pmu_read_counter(struct arm_ccn *ccn, int idx)
{
u64 res;
static void arm_ccn_pmu_event_del(struct perf_event *event, int flags)
{
arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE);
-
- arm_ccn_pmu_event_free(event);
}
static void arm_ccn_pmu_event_read(struct perf_event *event)
goto out;
}
- freq_table = kcalloc(sizeof(*freq_table), (max_opps + 1), GFP_ATOMIC);
+ freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC);
if (!freq_table) {
ret = -ENOMEM;
goto out;
static int intel_pstate_set_policy(struct cpufreq_policy *policy)
{
- struct cpudata *cpu;
-
- cpu = all_cpu_data[policy->cpu];
-
if (!policy->cpuinfo.max_freq)
return -ENODEV;
vchan_cyclic_callback(&chan->desc->vdesc);
} else {
if (chan->next_sg == chan->desc->num_sgs) {
- chan->desc = NULL;
+ list_del(&chan->desc->vdesc.node);
vchan_cookie_complete(&chan->desc->vdesc);
+ chan->desc = NULL;
}
}
}
bgwrite(~0x0, BT848_INT_STAT);
bgwrite(0x0, BT848_GPIO_OUT_EN);
- iounmap(bg->mmio);
- release_mem_region(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
pci_disable_device(pdev);
}
{
struct ast_private *ast = dev->dev_private;
uint32_t data, jreg;
+ ast_open_key(ast);
if (dev->pdev->device == PCI_CHIP_AST1180) {
ast->chip = AST1100;
}
ast->vga2_clone = false;
} else {
- ast->chip = 2000;
+ ast->chip = AST2000;
DRM_INFO("AST 2000 detected\n");
}
}
intel_power_domains_init_hw(dev_priv);
+ /*
+ * We enable some interrupt sources in our postinstall hooks, so mark
+ * interrupts as enabled _before_ actually enabling them to avoid
+ * special cases in our ordering checks.
+ */
+ dev_priv->pm._irqs_disabled = false;
+
ret = drm_irq_install(dev, dev->pdev->irq);
if (ret)
goto cleanup_gem_stolen;
- dev_priv->pm._irqs_disabled = false;
-
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
if ((1 << (domain)) & (mask))
struct drm_i915_private;
+struct i915_mm_struct;
struct i915_mmu_object;
enum intel_dpll_id {
struct i915_gtt gtt; /* VM representing the global address space */
struct i915_gem_mm mm;
-#if defined(CONFIG_MMU_NOTIFIER)
- DECLARE_HASHTABLE(mmu_notifiers, 7);
-#endif
+ DECLARE_HASHTABLE(mm_structs, 7);
+ struct mutex mm_lock;
/* Kernel Modesetting */
unsigned workers :4;
#define I915_GEM_USERPTR_MAX_WORKERS 15
- struct mm_struct *mm;
- struct i915_mmu_object *mn;
+ struct i915_mm_struct *mm;
+ struct i915_mmu_object *mmu_object;
struct work_struct *work;
} userptr;
};
out:
switch (ret) {
case -EIO:
- /* If this -EIO is due to a gpu hang, give the reset code a
- * chance to clean up the mess. Otherwise return the proper
- * SIGBUS. */
- if (i915_terminally_wedged(&dev_priv->gpu_error)) {
+ /*
+ * We eat errors when the gpu is terminally wedged to avoid
+ * userspace unduly crashing (gl has no provisions for mmaps to
+ * fail). But any other -EIO isn't ours (e.g. swap in failure)
+ * and so needs to be reported.
+ */
+ if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
ret = VM_FAULT_SIGBUS;
break;
}
#include <linux/mempolicy.h>
#include <linux/swap.h>
+struct i915_mm_struct {
+ struct mm_struct *mm;
+ struct drm_device *dev;
+ struct i915_mmu_notifier *mn;
+ struct hlist_node node;
+ struct kref kref;
+ struct work_struct work;
+};
+
#if defined(CONFIG_MMU_NOTIFIER)
#include <linux/interval_tree.h>
struct mmu_notifier mn;
struct rb_root objects;
struct list_head linear;
- struct drm_device *dev;
- struct mm_struct *mm;
- struct work_struct work;
- unsigned long count;
unsigned long serial;
bool has_linear;
};
struct i915_mmu_object {
- struct i915_mmu_notifier *mmu;
+ struct i915_mmu_notifier *mn;
struct interval_tree_node it;
struct list_head link;
struct drm_i915_gem_object *obj;
unsigned long start,
unsigned long end)
{
- struct i915_mmu_object *mmu;
+ struct i915_mmu_object *mo;
unsigned long serial;
restart:
serial = mn->serial;
- list_for_each_entry(mmu, &mn->linear, link) {
+ list_for_each_entry(mo, &mn->linear, link) {
struct drm_i915_gem_object *obj;
- if (mmu->it.last < start || mmu->it.start > end)
+ if (mo->it.last < start || mo->it.start > end)
continue;
- obj = mmu->obj;
+ obj = mo->obj;
drm_gem_object_reference(&obj->base);
spin_unlock(&mn->lock);
};
static struct i915_mmu_notifier *
-__i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_mmu_notifier *mmu;
-
- /* Protected by dev->struct_mutex */
- hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm)
- if (mmu->mm == mm)
- return mmu;
-
- return NULL;
-}
-
-static struct i915_mmu_notifier *
-i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
+i915_mmu_notifier_create(struct mm_struct *mm)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_mmu_notifier *mmu;
+ struct i915_mmu_notifier *mn;
int ret;
- lockdep_assert_held(&dev->struct_mutex);
-
- mmu = __i915_mmu_notifier_lookup(dev, mm);
- if (mmu)
- return mmu;
-
- mmu = kmalloc(sizeof(*mmu), GFP_KERNEL);
- if (mmu == NULL)
+ mn = kmalloc(sizeof(*mn), GFP_KERNEL);
+ if (mn == NULL)
return ERR_PTR(-ENOMEM);
- spin_lock_init(&mmu->lock);
- mmu->dev = dev;
- mmu->mn.ops = &i915_gem_userptr_notifier;
- mmu->mm = mm;
- mmu->objects = RB_ROOT;
- mmu->count = 0;
- mmu->serial = 1;
- INIT_LIST_HEAD(&mmu->linear);
- mmu->has_linear = false;
-
- /* Protected by mmap_sem (write-lock) */
- ret = __mmu_notifier_register(&mmu->mn, mm);
+ spin_lock_init(&mn->lock);
+ mn->mn.ops = &i915_gem_userptr_notifier;
+ mn->objects = RB_ROOT;
+ mn->serial = 1;
+ INIT_LIST_HEAD(&mn->linear);
+ mn->has_linear = false;
+
+ /* Protected by mmap_sem (write-lock) */
+ ret = __mmu_notifier_register(&mn->mn, mm);
if (ret) {
- kfree(mmu);
+ kfree(mn);
return ERR_PTR(ret);
}
- /* Protected by dev->struct_mutex */
- hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm);
- return mmu;
+ return mn;
}
-static void
-__i915_mmu_notifier_destroy_worker(struct work_struct *work)
+static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn)
{
- struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work);
- mmu_notifier_unregister(&mmu->mn, mmu->mm);
- kfree(mmu);
-}
-
-static void
-__i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu)
-{
- lockdep_assert_held(&mmu->dev->struct_mutex);
-
- /* Protected by dev->struct_mutex */
- hash_del(&mmu->node);
-
- /* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex.
- * We enter the function holding struct_mutex, therefore we need
- * to drop our mutex prior to calling mmu_notifier_unregister in
- * order to prevent lock inversion (and system-wide deadlock)
- * between the mmap_sem and struct-mutex. Hence we defer the
- * unregistration to a workqueue where we hold no locks.
- */
- INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker);
- schedule_work(&mmu->work);
-}
-
-static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
-{
- if (++mmu->serial == 0)
- mmu->serial = 1;
-}
-
-static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mmu)
-{
- struct i915_mmu_object *mn;
-
- list_for_each_entry(mn, &mmu->linear, link)
- if (mn->is_linear)
- return true;
-
- return false;
-}
-
-static void
-i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
- struct i915_mmu_object *mn)
-{
- lockdep_assert_held(&mmu->dev->struct_mutex);
-
- spin_lock(&mmu->lock);
- list_del(&mn->link);
- if (mn->is_linear)
- mmu->has_linear = i915_mmu_notifier_has_linear(mmu);
- else
- interval_tree_remove(&mn->it, &mmu->objects);
- __i915_mmu_notifier_update_serial(mmu);
- spin_unlock(&mmu->lock);
-
- /* Protected against _add() by dev->struct_mutex */
- if (--mmu->count == 0)
- __i915_mmu_notifier_destroy(mmu);
+ if (++mn->serial == 0)
+ mn->serial = 1;
}
static int
-i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
- struct i915_mmu_object *mn)
+i915_mmu_notifier_add(struct drm_device *dev,
+ struct i915_mmu_notifier *mn,
+ struct i915_mmu_object *mo)
{
struct interval_tree_node *it;
int ret;
- ret = i915_mutex_lock_interruptible(mmu->dev);
+ ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
* remove the objects from the interval tree) before we do
* the check for overlapping objects.
*/
- i915_gem_retire_requests(mmu->dev);
+ i915_gem_retire_requests(dev);
- spin_lock(&mmu->lock);
- it = interval_tree_iter_first(&mmu->objects,
- mn->it.start, mn->it.last);
+ spin_lock(&mn->lock);
+ it = interval_tree_iter_first(&mn->objects,
+ mo->it.start, mo->it.last);
if (it) {
struct drm_i915_gem_object *obj;
obj = container_of(it, struct i915_mmu_object, it)->obj;
if (!obj->userptr.workers)
- mmu->has_linear = mn->is_linear = true;
+ mn->has_linear = mo->is_linear = true;
else
ret = -EAGAIN;
} else
- interval_tree_insert(&mn->it, &mmu->objects);
+ interval_tree_insert(&mo->it, &mn->objects);
if (ret == 0) {
- list_add(&mn->link, &mmu->linear);
- __i915_mmu_notifier_update_serial(mmu);
+ list_add(&mo->link, &mn->linear);
+ __i915_mmu_notifier_update_serial(mn);
}
- spin_unlock(&mmu->lock);
- mutex_unlock(&mmu->dev->struct_mutex);
+ spin_unlock(&mn->lock);
+ mutex_unlock(&dev->struct_mutex);
return ret;
}
+static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
+{
+ struct i915_mmu_object *mo;
+
+ list_for_each_entry(mo, &mn->linear, link)
+ if (mo->is_linear)
+ return true;
+
+ return false;
+}
+
+static void
+i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
+ struct i915_mmu_object *mo)
+{
+ spin_lock(&mn->lock);
+ list_del(&mo->link);
+ if (mo->is_linear)
+ mn->has_linear = i915_mmu_notifier_has_linear(mn);
+ else
+ interval_tree_remove(&mo->it, &mn->objects);
+ __i915_mmu_notifier_update_serial(mn);
+ spin_unlock(&mn->lock);
+}
+
static void
i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
{
- struct i915_mmu_object *mn;
+ struct i915_mmu_object *mo;
- mn = obj->userptr.mn;
- if (mn == NULL)
+ mo = obj->userptr.mmu_object;
+ if (mo == NULL)
return;
- i915_mmu_notifier_del(mn->mmu, mn);
- obj->userptr.mn = NULL;
+ i915_mmu_notifier_del(mo->mn, mo);
+ kfree(mo);
+
+ obj->userptr.mmu_object = NULL;
+}
+
+static struct i915_mmu_notifier *
+i915_mmu_notifier_find(struct i915_mm_struct *mm)
+{
+ if (mm->mn == NULL) {
+ down_write(&mm->mm->mmap_sem);
+ mutex_lock(&to_i915(mm->dev)->mm_lock);
+ if (mm->mn == NULL)
+ mm->mn = i915_mmu_notifier_create(mm->mm);
+ mutex_unlock(&to_i915(mm->dev)->mm_lock);
+ up_write(&mm->mm->mmap_sem);
+ }
+ return mm->mn;
}
static int
i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
unsigned flags)
{
- struct i915_mmu_notifier *mmu;
- struct i915_mmu_object *mn;
+ struct i915_mmu_notifier *mn;
+ struct i915_mmu_object *mo;
int ret;
if (flags & I915_USERPTR_UNSYNCHRONIZED)
return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
- down_write(&obj->userptr.mm->mmap_sem);
- ret = i915_mutex_lock_interruptible(obj->base.dev);
- if (ret == 0) {
- mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm);
- if (!IS_ERR(mmu))
- mmu->count++; /* preemptive add to act as a refcount */
- else
- ret = PTR_ERR(mmu);
- mutex_unlock(&obj->base.dev->struct_mutex);
- }
- up_write(&obj->userptr.mm->mmap_sem);
- if (ret)
- return ret;
+ if (WARN_ON(obj->userptr.mm == NULL))
+ return -EINVAL;
- mn = kzalloc(sizeof(*mn), GFP_KERNEL);
- if (mn == NULL) {
- ret = -ENOMEM;
- goto destroy_mmu;
- }
+ mn = i915_mmu_notifier_find(obj->userptr.mm);
+ if (IS_ERR(mn))
+ return PTR_ERR(mn);
- mn->mmu = mmu;
- mn->it.start = obj->userptr.ptr;
- mn->it.last = mn->it.start + obj->base.size - 1;
- mn->obj = obj;
+ mo = kzalloc(sizeof(*mo), GFP_KERNEL);
+ if (mo == NULL)
+ return -ENOMEM;
- ret = i915_mmu_notifier_add(mmu, mn);
- if (ret)
- goto free_mn;
+ mo->mn = mn;
+ mo->it.start = obj->userptr.ptr;
+ mo->it.last = mo->it.start + obj->base.size - 1;
+ mo->obj = obj;
- obj->userptr.mn = mn;
+ ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
+ if (ret) {
+ kfree(mo);
+ return ret;
+ }
+
+ obj->userptr.mmu_object = mo;
return 0;
+}
+
+static void
+i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
+ struct mm_struct *mm)
+{
+ if (mn == NULL)
+ return;
-free_mn:
+ mmu_notifier_unregister(&mn->mn, mm);
kfree(mn);
-destroy_mmu:
- mutex_lock(&obj->base.dev->struct_mutex);
- if (--mmu->count == 0)
- __i915_mmu_notifier_destroy(mmu);
- mutex_unlock(&obj->base.dev->struct_mutex);
- return ret;
}
#else
return 0;
}
+
+static void
+i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
+ struct mm_struct *mm)
+{
+}
+
#endif
+static struct i915_mm_struct *
+__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
+{
+ struct i915_mm_struct *mm;
+
+ /* Protected by dev_priv->mm_lock */
+ hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
+ if (mm->mm == real)
+ return mm;
+
+ return NULL;
+}
+
+static int
+i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct i915_mm_struct *mm;
+ int ret = 0;
+
+ /* During release of the GEM object we hold the struct_mutex. This
+ * precludes us from calling mmput() at that time as that may be
+ * the last reference and so call exit_mmap(). exit_mmap() will
+ * attempt to reap the vma, and if we were holding a GTT mmap
+ * would then call drm_gem_vm_close() and attempt to reacquire
+ * the struct mutex. So in order to avoid that recursion, we have
+ * to defer releasing the mm reference until after we drop the
+ * struct_mutex, i.e. we need to schedule a worker to do the clean
+ * up.
+ */
+ mutex_lock(&dev_priv->mm_lock);
+ mm = __i915_mm_struct_find(dev_priv, current->mm);
+ if (mm == NULL) {
+ mm = kmalloc(sizeof(*mm), GFP_KERNEL);
+ if (mm == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ kref_init(&mm->kref);
+ mm->dev = obj->base.dev;
+
+ mm->mm = current->mm;
+ atomic_inc(¤t->mm->mm_count);
+
+ mm->mn = NULL;
+
+ /* Protected by dev_priv->mm_lock */
+ hash_add(dev_priv->mm_structs,
+ &mm->node, (unsigned long)mm->mm);
+ } else
+ kref_get(&mm->kref);
+
+ obj->userptr.mm = mm;
+out:
+ mutex_unlock(&dev_priv->mm_lock);
+ return ret;
+}
+
+static void
+__i915_mm_struct_free__worker(struct work_struct *work)
+{
+ struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
+ i915_mmu_notifier_free(mm->mn, mm->mm);
+ mmdrop(mm->mm);
+ kfree(mm);
+}
+
+static void
+__i915_mm_struct_free(struct kref *kref)
+{
+ struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
+
+ /* Protected by dev_priv->mm_lock */
+ hash_del(&mm->node);
+ mutex_unlock(&to_i915(mm->dev)->mm_lock);
+
+ INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
+ schedule_work(&mm->work);
+}
+
+static void
+i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
+{
+ if (obj->userptr.mm == NULL)
+ return;
+
+ kref_put_mutex(&obj->userptr.mm->kref,
+ __i915_mm_struct_free,
+ &to_i915(obj->base.dev)->mm_lock);
+ obj->userptr.mm = NULL;
+}
+
struct get_pages_work {
struct work_struct work;
struct drm_i915_gem_object *obj;
struct task_struct *task;
};
-
#if IS_ENABLED(CONFIG_SWIOTLB)
#define swiotlb_active() swiotlb_nr_tbl()
#else
if (pvec == NULL)
pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
if (pvec != NULL) {
- struct mm_struct *mm = obj->userptr.mm;
+ struct mm_struct *mm = obj->userptr.mm->mm;
down_read(&mm->mmap_sem);
while (pinned < num_pages) {
pvec = NULL;
pinned = 0;
- if (obj->userptr.mm == current->mm) {
+ if (obj->userptr.mm->mm == current->mm) {
pvec = kmalloc(num_pages*sizeof(struct page *),
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (pvec == NULL) {
i915_gem_userptr_release(struct drm_i915_gem_object *obj)
{
i915_gem_userptr_release__mmu_notifier(obj);
-
- if (obj->userptr.mm) {
- mmput(obj->userptr.mm);
- obj->userptr.mm = NULL;
- }
+ i915_gem_userptr_release__mm_struct(obj);
}
static int
i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
{
- if (obj->userptr.mn)
+ if (obj->userptr.mmu_object)
return 0;
return i915_gem_userptr_init__mmu_notifier(obj, 0);
return -ENODEV;
}
- /* Allocate the new object */
obj = i915_gem_object_alloc(dev);
if (obj == NULL)
return -ENOMEM;
* at binding. This means that we need to hook into the mmu_notifier
* in order to detect if the mmu is destroyed.
*/
- ret = -ENOMEM;
- if ((obj->userptr.mm = get_task_mm(current)))
+ ret = i915_gem_userptr_init__mm_struct(obj);
+ if (ret == 0)
ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
if (ret == 0)
ret = drm_gem_handle_create(file, &obj->base, &handle);
int
i915_gem_init_userptr(struct drm_device *dev)
{
-#if defined(CONFIG_MMU_NOTIFIER)
struct drm_i915_private *dev_priv = to_i915(dev);
- hash_init(dev_priv->mmu_notifiers);
-#endif
+ mutex_init(&dev_priv->mm_lock);
+ hash_init(dev_priv->mm_structs);
return 0;
}
#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
-#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
+
+#define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2))
+#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
-#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
-#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
+#define BLT_WRITE_A (2<<20)
+#define BLT_WRITE_RGB (1<<20)
+#define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A)
#define BLT_DEPTH_8 (0<<24)
#define BLT_DEPTH_16_565 (1<<24)
#define BLT_DEPTH_16_1555 (2<<24)
#define BLT_DEPTH_32 (3<<24)
-#define BLT_ROP_GXCOPY (0xcc<<16)
+#define BLT_ROP_SRC_COPY (0xcc<<16)
+#define BLT_ROP_COLOR_COPY (0xf0<<16)
#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */
#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */
#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
}
}
-static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
+static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
{
DRM_DEBUG_KMS("Falling back to manually reading VBT from "
"VBIOS ROM for %s\n",
.destroy = intel_encoder_destroy,
};
-static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id)
+static int intel_no_crt_dmi_callback(const struct dmi_system_id *id)
{
DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
return 1;
if (need_vtd_wa(dev) && alignment < 256 * 1024)
alignment = 256 * 1024;
+ /*
+ * Global gtt pte registers are special registers which actually forward
+ * writes to a chunk of system memory. Which means that there is no risk
+ * that the register values disappear as soon as we call
+ * intel_runtime_pm_put(), so it is correct to wrap only the
+ * pin/unpin/fence and not more.
+ */
+ intel_runtime_pm_get(dev_priv);
+
dev_priv->mm.interruptible = false;
ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
if (ret)
i915_gem_object_pin_fence(obj);
dev_priv->mm.interruptible = true;
+ intel_runtime_pm_put(dev_priv);
return 0;
err_unpin:
i915_gem_object_unpin_from_display_plane(obj);
err_interruptible:
dev_priv->mm.interruptible = true;
+ intel_runtime_pm_put(dev_priv);
return ret;
}
intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
intel_disable_pipe(dev_priv, pipe);
-
- if (intel_crtc->config.dp_encoder_is_mst)
- intel_ddi_set_vc_payload_alloc(crtc, false);
-
ironlake_pfit_disable(intel_crtc);
for_each_encoder_on_crtc(dev, crtc, encoder)
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
intel_disable_pipe(dev_priv, pipe);
+ if (intel_crtc->config.dp_encoder_is_mst)
+ intel_ddi_set_vc_payload_alloc(crtc, false);
+
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
ironlake_pfit_disable(intel_crtc);
goto fail_locked;
}
+ /*
+ * Global gtt pte registers are special registers which actually
+ * forward writes to a chunk of system memory. Which means that
+ * there is no risk that the register values disappear as soon
+ * as we call intel_runtime_pm_put(), so it is correct to wrap
+ * only the pin/unpin/fence and not more.
+ */
+ intel_runtime_pm_get(dev_priv);
+
/* Note that the w/a also requires 2 PTE of padding following
* the bo. We currently fill all unused PTE with the shadow
* page and so we should always have valid PTE following the
ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
if (ret) {
DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
+ intel_runtime_pm_put(dev_priv);
goto fail_locked;
}
ret = i915_gem_object_put_fence(obj);
if (ret) {
DRM_DEBUG_KMS("failed to release fence for cursor");
+ intel_runtime_pm_put(dev_priv);
goto fail_unpin;
}
addr = i915_gem_obj_ggtt_offset(obj);
+
+ intel_runtime_pm_put(dev_priv);
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = i915_gem_object_attach_phys(obj, align);
/* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
{ 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
+ /* Acer C720 Chromebook (Core i3 4005U) */
+ { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
+
/* Toshiba CB35 Chromebook (Celeron 2955U) */
{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
return intel_dp_detect_dpcd(intel_dp);
}
-static enum drm_connector_status
-g4x_dp_detect(struct intel_dp *intel_dp)
+static int g4x_digital_port_connected(struct drm_device *dev,
+ struct intel_digital_port *intel_dig_port)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
uint32_t bit;
- /* Can't disconnect eDP, but you can close the lid... */
- if (is_edp(intel_dp)) {
- enum drm_connector_status status;
-
- status = intel_panel_detect(dev);
- if (status == connector_status_unknown)
- status = connector_status_connected;
- return status;
- }
-
if (IS_VALLEYVIEW(dev)) {
switch (intel_dig_port->port) {
case PORT_B:
bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
break;
default:
- return connector_status_unknown;
+ return -EINVAL;
}
} else {
switch (intel_dig_port->port) {
bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
break;
default:
- return connector_status_unknown;
+ return -EINVAL;
}
}
if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
+ return 0;
+ return 1;
+}
+
+static enum drm_connector_status
+g4x_dp_detect(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ int ret;
+
+ /* Can't disconnect eDP, but you can close the lid... */
+ if (is_edp(intel_dp)) {
+ enum drm_connector_status status;
+
+ status = intel_panel_detect(dev);
+ if (status == connector_status_unknown)
+ status = connector_status_connected;
+ return status;
+ }
+
+ ret = g4x_digital_port_connected(dev, intel_dig_port);
+ if (ret == -EINVAL)
+ return connector_status_unknown;
+ else if (ret == 0)
return connector_status_disconnected;
return intel_dp_detect_dpcd(intel_dp);
intel_display_power_get(dev_priv, power_domain);
if (long_hpd) {
- if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
- goto mst_fail;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
+ goto mst_fail;
+ } else {
+ if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
+ goto mst_fail;
+ }
if (!intel_dp_get_dpcd(intel_dp)) {
goto mst_fail;
.destroy = intel_encoder_destroy,
};
-static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
+static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
{
DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
return 1;
cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
if (cpu_ctl2 & BLM_PWM_ENABLE) {
- WARN(1, "cpu backlight already enabled\n");
+ DRM_DEBUG_KMS("cpu backlight already enabled\n");
cpu_ctl2 &= ~BLM_PWM_ENABLE;
I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
}
ctl = I915_READ(BLC_PWM_CTL);
if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
- WARN(1, "backlight already enabled\n");
+ DRM_DEBUG_KMS("backlight already enabled\n");
I915_WRITE(BLC_PWM_CTL, 0);
}
ctl2 = I915_READ(BLC_PWM_CTL2);
if (ctl2 & BLM_PWM_ENABLE) {
- WARN(1, "backlight already enabled\n");
+ DRM_DEBUG_KMS("backlight already enabled\n");
ctl2 &= ~BLM_PWM_ENABLE;
I915_WRITE(BLC_PWM_CTL2, ctl2);
}
ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
if (ctl2 & BLM_PWM_ENABLE) {
- WARN(1, "backlight already enabled\n");
+ DRM_DEBUG_KMS("backlight already enabled\n");
ctl2 &= ~BLM_PWM_ENABLE;
I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
}
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
#define I830_BATCH_LIMIT (256*1024)
+#define I830_TLB_ENTRIES (2)
+#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
i830_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
unsigned flags)
{
+ u32 cs_offset = ring->scratch.gtt_offset;
int ret;
- if (flags & I915_DISPATCH_PINNED) {
- ret = intel_ring_begin(ring, 4);
- if (ret)
- return ret;
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
- intel_ring_emit(ring, MI_BATCH_BUFFER);
- intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
- intel_ring_emit(ring, offset + len - 8);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
- } else {
- u32 cs_offset = ring->scratch.gtt_offset;
+ /* Evict the invalid PTE TLBs */
+ intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
+ intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
+ intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
+ intel_ring_emit(ring, cs_offset);
+ intel_ring_emit(ring, 0xdeadbeef);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ if ((flags & I915_DISPATCH_PINNED) == 0) {
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
- ret = intel_ring_begin(ring, 9+3);
+ ret = intel_ring_begin(ring, 6 + 2);
if (ret)
return ret;
- /* Blit the batch (which has now all relocs applied) to the stable batch
- * scratch bo area (so that the CS never stumbles over its tlb
- * invalidation bug) ... */
- intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
- XY_SRC_COPY_BLT_WRITE_ALPHA |
- XY_SRC_COPY_BLT_WRITE_RGB);
- intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
+
+ /* Blit the batch (which has now all relocs applied) to the
+ * stable batch scratch bo area (so that the CS never
+ * stumbles over its tlb invalidation bug) ...
+ */
+ intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
+ intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
+ intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 1024);
intel_ring_emit(ring, cs_offset);
- intel_ring_emit(ring, 0);
intel_ring_emit(ring, 4096);
intel_ring_emit(ring, offset);
+
intel_ring_emit(ring, MI_FLUSH);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
/* ... and execute it. */
- intel_ring_emit(ring, MI_BATCH_BUFFER);
- intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
- intel_ring_emit(ring, cs_offset + len - 8);
- intel_ring_advance(ring);
+ offset = cs_offset;
}
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, offset + len - 8);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
return 0;
}
/* Workaround batchbuffer to combat CS tlb bug. */
if (HAS_BROKEN_CS_TLB(dev)) {
- obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
+ obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
if (obj == NULL) {
DRM_ERROR("Failed to allocate batch bo\n");
return -ENOMEM;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ /* Prevents vblank waits from timing out in intel_tv_detect_type() */
+ intel_wait_for_vblank(encoder->base.dev,
+ to_intel_crtc(encoder->base.crtc)->pipe);
+
I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
}
{
struct drm_display_mode mode;
struct intel_tv *intel_tv = intel_attached_tv(connector);
+ enum drm_connector_status status;
int type;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) {
type = intel_tv_detect_type(intel_tv, connector);
intel_release_load_detect_pipe(connector, &tmp);
+ status = type < 0 ?
+ connector_status_disconnected :
+ connector_status_connected;
} else
- return connector_status_unknown;
+ status = connector_status_unknown;
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
} else
return connector->status;
- if (type < 0)
- return connector_status_disconnected;
+ if (status != connector_status_connected)
+ return status;
intel_tv->type = type;
intel_tv_find_better_format(connector);
priv->hdmi_pdev = pdev;
}
+#ifdef CONFIG_OF
+static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
+{
+ int gpio = of_get_named_gpio(of_node, name, 0);
+ if (gpio < 0) {
+ char name2[32];
+ snprintf(name2, sizeof(name2), "%s-gpio", name);
+ gpio = of_get_named_gpio(of_node, name2, 0);
+ if (gpio < 0) {
+ dev_err(dev, "failed to get gpio: %s (%d)\n",
+ name, gpio);
+ gpio = -1;
+ }
+ }
+ return gpio;
+}
+#endif
+
static int hdmi_bind(struct device *dev, struct device *master, void *data)
{
static struct hdmi_platform_config config = {};
#ifdef CONFIG_OF
struct device_node *of_node = dev->of_node;
- int get_gpio(const char *name)
- {
- int gpio = of_get_named_gpio(of_node, name, 0);
- if (gpio < 0) {
- char name2[32];
- snprintf(name2, sizeof(name2), "%s-gpio", name);
- gpio = of_get_named_gpio(of_node, name2, 0);
- if (gpio < 0) {
- dev_err(dev, "failed to get gpio: %s (%d)\n",
- name, gpio);
- gpio = -1;
- }
- }
- return gpio;
- }
-
if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8074")) {
static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
}
config.mmio_name = "core_physical";
- config.ddc_clk_gpio = get_gpio("qcom,hdmi-tx-ddc-clk");
- config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data");
- config.hpd_gpio = get_gpio("qcom,hdmi-tx-hpd");
- config.mux_en_gpio = get_gpio("qcom,hdmi-tx-mux-en");
- config.mux_sel_gpio = get_gpio("qcom,hdmi-tx-mux-sel");
- config.mux_lpm_gpio = get_gpio("qcom,hdmi-tx-mux-lpm");
+ config.ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk");
+ config.ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data");
+ config.hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd");
+ config.mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en");
+ config.mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel");
+ config.mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm");
#else
static const char *hpd_clk_names[] = {
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#ifdef CONFIG_COMMON_CLK
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#endif
#include "hdmi.h"
struct hdmi_phy_8960 {
struct hdmi_phy base;
struct hdmi *hdmi;
+#ifdef CONFIG_COMMON_CLK
struct clk_hw pll_hw;
struct clk *pll;
unsigned long pixclk;
+#endif
};
#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
+
+#ifdef CONFIG_COMMON_CLK
#define clk_to_phy(x) container_of(x, struct hdmi_phy_8960, pll_hw)
/*
.parent_names = hdmi_pll_parents,
.num_parents = ARRAY_SIZE(hdmi_pll_parents),
};
-
+#endif
/*
* HDMI Phy:
{
struct hdmi_phy_8960 *phy_8960;
struct hdmi_phy *phy = NULL;
- int ret, i;
+ int ret;
+#ifdef CONFIG_COMMON_CLK
+ int i;
/* sanity check: */
for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++)
if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate))
return ERR_PTR(-EINVAL);
+#endif
phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL);
if (!phy_8960) {
phy_8960->hdmi = hdmi;
+#ifdef CONFIG_COMMON_CLK
phy_8960->pll_hw.init = &pll_init;
phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw);
if (IS_ERR(phy_8960->pll)) {
phy_8960->pll = NULL;
goto fail;
}
+#endif
return phy;
#define reglog 0
#endif
-static char *vram;
+static char *vram = "16m";
MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
module_param(vram, charp, 0);
sclass = nv_parent(parent)->sclass;
while (sclass) {
if (++nr < size)
- lclass[nr] = sclass->oclass->handle;
+ lclass[nr] = sclass->oclass->handle & 0xffff;
sclass = sclass->sclass;
}
if (engine && (oclass = engine->sclass)) {
while (oclass->ofuncs) {
if (++nr < size)
- lclass[nr] = oclass->handle;
+ lclass[nr] = oclass->handle & 0xffff;
oclass++;
}
}
u8 msg[DP_DPCD_SIZE];
int ret;
- char dpcd_hex_dump[DP_DPCD_SIZE * 3];
-
ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
DP_DPCD_SIZE);
if (ret > 0) {
memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
- hex_dump_to_buffer(dig_connector->dpcd, sizeof(dig_connector->dpcd),
- 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
- DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
+ DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
+ dig_connector->dpcd);
radeon_dp_probe_oui(radeon_connector);
radeon_ring_write(ring, lower_32_bits(addr));
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
- /* PFP_SYNC_ME packet only exists on 7xx+ */
- if (emit_wait && (rdev->family >= CHIP_RV770)) {
+ /* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */
+ if (emit_wait && (rdev->family >= CHIP_CEDAR)) {
/* Prevent the PFP from running ahead of the semaphore wait */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
}
}
+ /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
+ if ((dev->pdev->device == 0x9805) &&
+ (dev->pdev->subsystem_vendor == 0x1734) &&
+ (dev->pdev->subsystem_device == 0x11bd)) {
+ if (*connector_type == DRM_MODE_CONNECTOR_VGA)
+ return false;
+ }
return true;
}
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
rdev->pm.int_thermal_type = THERMAL_TYPE_KV;
- } else if ((controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
- (controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) ||
- (controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
- DRM_INFO("Special thermal controller config\n");
+ } else if (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
+ DRM_INFO("External GPIO thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
+ } else if (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
+ DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
+ } else if (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
+ DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
pp_lib_thermal_controller_names[controller->ucType],
controller->ucI2cAddress >> 1,
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
if (rdev->pm.i2c_bus) {
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore)
{
- uint32_t *cpu_addr;
+ uint64_t *cpu_addr;
int i, r;
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
res,
id_loc - sw_context->buf_start);
if (unlikely(ret != 0))
- goto out_err;
+ return ret;
ret = vmw_resource_val_add(sw_context, res, &node);
if (unlikely(ret != 0))
- goto out_err;
+ return ret;
if (res_type == vmw_res_context && dev_priv->has_mob &&
node->first_usage) {
ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
if (unlikely(ret != 0))
- goto out_err;
+ return ret;
node->staged_bindings =
kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
if (node->staged_bindings == NULL) {
DRM_ERROR("Failed to allocate context binding "
"information.\n");
- goto out_err;
+ return -ENOMEM;
}
INIT_LIST_HEAD(&node->staged_bindings->list);
}
if (p_val)
*p_val = node;
-out_err:
- return ret;
+ return 0;
}
mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
- vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
+ ;
dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
data->conf |= (resol << DS1621_REG_CONFIG_RESOL_SHIFT);
i2c_smbus_write_byte_data(client, DS1621_REG_CONF, data->conf);
data->update_interval = ds1721_convrates[resol];
+ data->zbits = 7 - resol;
mutex_unlock(&data->update_lock);
return count;
unsigned twi_cwgr_reg;
struct at91_twi_pdata *pdata;
bool use_dma;
+ bool recv_len_abort;
struct at91_twi_dma dma;
};
*dev->buf = at91_twi_read(dev, AT91_TWI_RHR) & 0xff;
--dev->buf_len;
+ /* return if aborting, we only needed to read RHR to clear RXRDY*/
+ if (dev->recv_len_abort)
+ return;
+
/* handle I2C_SMBUS_BLOCK_DATA */
if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) {
- dev->msg->flags &= ~I2C_M_RECV_LEN;
- dev->buf_len += *dev->buf;
- dev->msg->len = dev->buf_len + 1;
- dev_dbg(dev->dev, "received block length %d\n", dev->buf_len);
+ /* ensure length byte is a valid value */
+ if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) {
+ dev->msg->flags &= ~I2C_M_RECV_LEN;
+ dev->buf_len += *dev->buf;
+ dev->msg->len = dev->buf_len + 1;
+ dev_dbg(dev->dev, "received block length %d\n",
+ dev->buf_len);
+ } else {
+ /* abort and send the stop by reading one more byte */
+ dev->recv_len_abort = true;
+ dev->buf_len = 1;
+ }
}
/* send stop if second but last byte has been read */
}
}
- ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
- dev->adapter.timeout);
+ ret = wait_for_completion_io_timeout(&dev->cmd_complete,
+ dev->adapter.timeout);
if (ret == 0) {
dev_err(dev->dev, "controller timed out\n");
at91_init_twi_bus(dev);
ret = -EIO;
goto error;
}
+ if (dev->recv_len_abort) {
+ dev_err(dev->dev, "invalid smbus block length recvd\n");
+ ret = -EPROTO;
+ goto error;
+ }
+
dev_dbg(dev->dev, "transfer complete\n");
return 0;
dev->buf_len = m_start->len;
dev->buf = m_start->buf;
dev->msg = m_start;
+ dev->recv_len_abort = false;
ret = at91_do_twi_transfer(dev);
}
tclk = clk_get_rate(drv_data->clk);
- rc = of_property_read_u32(np, "clock-frequency", &bus_freq);
- if (rc)
+ if (of_property_read_u32(np, "clock-frequency", &bus_freq))
bus_freq = 100000; /* 100kHz by default */
if (!mv64xxx_find_baud_factors(bus_freq, tclk,
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
/* register offsets */
#define ICSCR 0x00 /* slave ctrl */
struct i2c_msg *msg;
struct clk *clk;
+ spinlock_t lock;
wait_queue_head_t wait;
int pos;
struct rcar_i2c_priv *priv = ptr;
u32 msr;
+ /*-------------- spin lock -----------------*/
+ spin_lock(&priv->lock);
+
msr = rcar_i2c_read(priv, ICMSR);
+ /* Only handle interrupts that are currently enabled */
+ msr &= rcar_i2c_read(priv, ICMIER);
+
/* Arbitration lost */
if (msr & MAL) {
rcar_i2c_flags_set(priv, (ID_DONE | ID_ARBLOST));
goto out;
}
- /* Stop */
- if (msr & MST) {
- rcar_i2c_flags_set(priv, ID_DONE);
- goto out;
- }
-
/* Nack */
if (msr & MNR) {
/* go to stop phase */
goto out;
}
+ /* Stop */
+ if (msr & MST) {
+ rcar_i2c_flags_set(priv, ID_DONE);
+ goto out;
+ }
+
if (rcar_i2c_is_recv(priv))
rcar_i2c_flags_set(priv, rcar_i2c_irq_recv(priv, msr));
else
wake_up(&priv->wait);
}
+ spin_unlock(&priv->lock);
+ /*-------------- spin unlock -----------------*/
+
return IRQ_HANDLED;
}
{
struct rcar_i2c_priv *priv = i2c_get_adapdata(adap);
struct device *dev = rcar_i2c_priv_to_dev(priv);
+ unsigned long flags;
int i, ret, timeout;
pm_runtime_get_sync(dev);
+ /*-------------- spin lock -----------------*/
+ spin_lock_irqsave(&priv->lock, flags);
+
rcar_i2c_init(priv);
/* start clock */
rcar_i2c_write(priv, ICCCR, priv->icccr);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ /*-------------- spin unlock -----------------*/
+
ret = rcar_i2c_bus_barrier(priv);
if (ret < 0)
goto out;
break;
}
+ /*-------------- spin lock -----------------*/
+ spin_lock_irqsave(&priv->lock, flags);
+
/* init each data */
priv->msg = &msgs[i];
priv->pos = 0;
ret = rcar_i2c_prepare_msg(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ /*-------------- spin unlock -----------------*/
+
if (ret < 0)
break;
irq = platform_get_irq(pdev, 0);
init_waitqueue_head(&priv->wait);
+ spin_lock_init(&priv->lock);
adap = &priv->adap;
adap->nr = pdev->id;
/* ack interrupt */
i2c_writel(i2c, REG_INT_MBRF, REG_IPD);
+ /* Can only handle a maximum of 32 bytes at a time */
+ if (len > 32)
+ len = 32;
+
/* read the data from receive buffer */
for (i = 0; i < len; ++i) {
if (i % 4 == 0)
return err;
}
+static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
+ u64 *reg_id)
+{
+ void *ib_flow;
+ union ib_flow_spec *ib_spec;
+ struct mlx4_dev *dev = to_mdev(qp->device)->dev;
+ int err = 0;
+
+ if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ return 0; /* do nothing */
+
+ ib_flow = flow_attr + 1;
+ ib_spec = (union ib_flow_spec *)ib_flow;
+
+ if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
+ return 0; /* do nothing */
+
+ err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
+ flow_attr->port, qp->qp_num,
+ MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
+ reg_id);
+ return err;
+}
+
static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
int domain)
i++;
}
+ if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
+ err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
+ if (err)
+ goto err_free;
+ }
+
return &mflow->ibflow;
err_free:
}
}
- if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
+ if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
MLX4_IB_LINK_TYPE_ETH;
+ if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ /* set QP to receive both tunneled & non-tunneled packets */
+ if (!(context->flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)))
+ context->srqn = cpu_to_be32(7 << 28);
+ }
+ }
if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
int is_eth = rdma_port_get_link_layer(
}
EXPORT_SYMBOL(input_mt_report_pointer_emulation);
+static void __input_mt_drop_unused(struct input_dev *dev, struct input_mt *mt)
+{
+ int i;
+
+ for (i = 0; i < mt->num_slots; i++) {
+ if (!input_mt_is_used(mt, &mt->slots[i])) {
+ input_mt_slot(dev, i);
+ input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
+ }
+ }
+}
+
/**
* input_mt_drop_unused() - Inactivate slots not seen in this frame
* @dev: input device with allocated MT slots
void input_mt_drop_unused(struct input_dev *dev)
{
struct input_mt *mt = dev->mt;
- int i;
- if (!mt)
- return;
-
- for (i = 0; i < mt->num_slots; i++) {
- if (!input_mt_is_used(mt, &mt->slots[i])) {
- input_mt_slot(dev, i);
- input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
- }
+ if (mt) {
+ __input_mt_drop_unused(dev, mt);
+ mt->frame++;
}
-
- mt->frame++;
}
EXPORT_SYMBOL(input_mt_drop_unused);
return;
if (mt->flags & INPUT_MT_DROP_UNUSED)
- input_mt_drop_unused(dev);
+ __input_mt_drop_unused(dev, mt);
if ((mt->flags & INPUT_MT_POINTER) && !(mt->flags & INPUT_MT_SEMI_MT))
use_count = true;
input_mt_report_pointer_emulation(dev, use_count);
+
+ mt->frame++;
}
EXPORT_SYMBOL(input_mt_sync_frame);
#define CAP1106_REG_SENSOR_CONFIG 0x22
#define CAP1106_REG_SENSOR_CONFIG2 0x23
#define CAP1106_REG_SAMPLING_CONFIG 0x24
-#define CAP1106_REG_CALIBRATION 0x25
-#define CAP1106_REG_INT_ENABLE 0x26
+#define CAP1106_REG_CALIBRATION 0x26
+#define CAP1106_REG_INT_ENABLE 0x27
#define CAP1106_REG_REPEAT_RATE 0x28
#define CAP1106_REG_MT_CONFIG 0x2a
#define CAP1106_REG_MT_PATTERN_CONFIG 0x2b
}
if (pdata->clustered_irq > 0) {
- err = request_irq(pdata->clustered_irq,
+ err = request_any_context_irq(pdata->clustered_irq,
matrix_keypad_interrupt,
pdata->clustered_irq_flags,
"matrix-keypad", keypad);
- if (err) {
+ if (err < 0) {
dev_err(&pdev->dev,
"Unable to acquire clustered interrupt\n");
goto err_free_rows;
}
} else {
for (i = 0; i < pdata->num_row_gpios; i++) {
- err = request_irq(gpio_to_irq(pdata->row_gpios[i]),
+ err = request_any_context_irq(
+ gpio_to_irq(pdata->row_gpios[i]),
matrix_keypad_interrupt,
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING,
"matrix-keypad", keypad);
- if (err) {
+ if (err < 0) {
dev_err(&pdev->dev,
"Unable to acquire interrupt for GPIO line %i\n",
pdata->row_gpios[i]);
return 0;
}
- psmouse_info(psmouse,
- "Unknown ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec);
+ psmouse_dbg(psmouse,
+ "Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec);
return -EINVAL;
}
dev2->keybit[BIT_WORD(BTN_LEFT)] =
BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT);
+ __set_bit(INPUT_PROP_POINTER, dev2->propbit);
+ if (priv->flags & ALPS_DUALPOINT)
+ __set_bit(INPUT_PROP_POINTING_STICK, dev2->propbit);
+
if (input_register_device(priv->dev2))
goto init_fail;
#include <linux/input/mt.h>
#include <linux/serio.h>
#include <linux/libps2.h>
+#include <asm/unaligned.h>
#include "psmouse.h"
#include "elantech.h"
input_sync(dev);
}
+static void elantech_report_trackpoint(struct psmouse *psmouse,
+ int packet_type)
+{
+ /*
+ * byte 0: 0 0 sx sy 0 M R L
+ * byte 1:~sx 0 0 0 0 0 0 0
+ * byte 2:~sy 0 0 0 0 0 0 0
+ * byte 3: 0 0 ~sy ~sx 0 1 1 0
+ * byte 4: x7 x6 x5 x4 x3 x2 x1 x0
+ * byte 5: y7 y6 y5 y4 y3 y2 y1 y0
+ *
+ * x and y are written in two's complement spread
+ * over 9 bits with sx/sy the relative top bit and
+ * x7..x0 and y7..y0 the lower bits.
+ * The sign of y is opposite to what the input driver
+ * expects for a relative movement
+ */
+
+ struct elantech_data *etd = psmouse->private;
+ struct input_dev *tp_dev = etd->tp_dev;
+ unsigned char *packet = psmouse->packet;
+ int x, y;
+ u32 t;
+
+ if (dev_WARN_ONCE(&psmouse->ps2dev.serio->dev,
+ !tp_dev,
+ psmouse_fmt("Unexpected trackpoint message\n"))) {
+ if (etd->debug == 1)
+ elantech_packet_dump(psmouse);
+ return;
+ }
+
+ t = get_unaligned_le32(&packet[0]);
+
+ switch (t & ~7U) {
+ case 0x06000030U:
+ case 0x16008020U:
+ case 0x26800010U:
+ case 0x36808000U:
+ x = packet[4] - (int)((packet[1]^0x80) << 1);
+ y = (int)((packet[2]^0x80) << 1) - packet[5];
+
+ input_report_key(tp_dev, BTN_LEFT, packet[0] & 0x01);
+ input_report_key(tp_dev, BTN_RIGHT, packet[0] & 0x02);
+ input_report_key(tp_dev, BTN_MIDDLE, packet[0] & 0x04);
+
+ input_report_rel(tp_dev, REL_X, x);
+ input_report_rel(tp_dev, REL_Y, y);
+
+ input_sync(tp_dev);
+
+ break;
+
+ default:
+ /* Dump unexpected packet sequences if debug=1 (default) */
+ if (etd->debug == 1)
+ elantech_packet_dump(psmouse);
+
+ break;
+ }
+}
+
/*
* Interpret complete data packets and report absolute mode input events for
* hardware version 3. (12 byte packets for two fingers)
if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c)
return PACKET_V3_TAIL;
+ if ((packet[3] & 0x0f) == 0x06)
+ return PACKET_TRACKPOINT;
}
return PACKET_UNKNOWN;
case 3:
packet_type = elantech_packet_check_v3(psmouse);
- /* ignore debounce */
- if (packet_type == PACKET_DEBOUNCE)
- return PSMOUSE_FULL_PACKET;
-
- if (packet_type == PACKET_UNKNOWN)
+ switch (packet_type) {
+ case PACKET_UNKNOWN:
return PSMOUSE_BAD_DATA;
- elantech_report_absolute_v3(psmouse, packet_type);
+ case PACKET_DEBOUNCE:
+ /* ignore debounce */
+ break;
+
+ case PACKET_TRACKPOINT:
+ elantech_report_trackpoint(psmouse, packet_type);
+ break;
+
+ default:
+ elantech_report_absolute_v3(psmouse, packet_type);
+ break;
+ }
+
break;
case 4:
* Asus UX31 0x361f00 20, 15, 0e clickpad
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
* Avatar AVIU-145A2 0x361f00 ? clickpad
+ * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
* Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
* Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
+ * Lenovo L530 0x350f02 b9, 15, 0c 2 hw buttons (*)
* Samsung NF210 0x150b00 78, 14, 0a 2 hw buttons
* Samsung NP770Z5E 0x575f01 10, 15, 0f clickpad
* Samsung NP700Z5B 0x361f06 21, 15, 0f clickpad
* Samsung RF710 0x450f00 ? 2 hw buttons
* System76 Pangolin 0x250f01 ? 2 hw buttons
* (*) + 3 trackpoint buttons
+ * (**) + 0 trackpoint buttons
+ * Note: Lenovo L430 and Lenovo L430 have the same fw_version/caps
*/
static void elantech_set_buttonpad_prop(struct psmouse *psmouse)
{
if (param[1] == 0)
return true;
+ /*
+ * Some models have a revision higher then 20. Meaning param[2] may
+ * be 10 or 20, skip the rates check for these.
+ */
+ if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40)
+ return true;
+
for (i = 0; i < ARRAY_SIZE(rates); i++)
if (param[2] == rates[i])
return false;
*/
static void elantech_disconnect(struct psmouse *psmouse)
{
+ struct elantech_data *etd = psmouse->private;
+
+ if (etd->tp_dev)
+ input_unregister_device(etd->tp_dev);
sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj,
&elantech_attr_group);
kfree(psmouse->private);
int elantech_init(struct psmouse *psmouse)
{
struct elantech_data *etd;
- int i, error;
+ int i;
+ int error = -EINVAL;
unsigned char param[3];
+ struct input_dev *tp_dev;
psmouse->private = etd = kzalloc(sizeof(struct elantech_data), GFP_KERNEL);
if (!etd)
goto init_fail;
}
+ /* The MSB indicates the presence of the trackpoint */
+ if ((etd->capabilities[0] & 0x80) == 0x80) {
+ tp_dev = input_allocate_device();
+
+ if (!tp_dev) {
+ error = -ENOMEM;
+ goto init_fail_tp_alloc;
+ }
+
+ etd->tp_dev = tp_dev;
+ snprintf(etd->tp_phys, sizeof(etd->tp_phys), "%s/input1",
+ psmouse->ps2dev.serio->phys);
+ tp_dev->phys = etd->tp_phys;
+ tp_dev->name = "Elantech PS/2 TrackPoint";
+ tp_dev->id.bustype = BUS_I8042;
+ tp_dev->id.vendor = 0x0002;
+ tp_dev->id.product = PSMOUSE_ELANTECH;
+ tp_dev->id.version = 0x0000;
+ tp_dev->dev.parent = &psmouse->ps2dev.serio->dev;
+ tp_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
+ tp_dev->relbit[BIT_WORD(REL_X)] =
+ BIT_MASK(REL_X) | BIT_MASK(REL_Y);
+ tp_dev->keybit[BIT_WORD(BTN_LEFT)] =
+ BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) |
+ BIT_MASK(BTN_RIGHT);
+
+ __set_bit(INPUT_PROP_POINTER, tp_dev->propbit);
+ __set_bit(INPUT_PROP_POINTING_STICK, tp_dev->propbit);
+
+ error = input_register_device(etd->tp_dev);
+ if (error < 0)
+ goto init_fail_tp_reg;
+ }
+
psmouse->protocol_handler = elantech_process_byte;
psmouse->disconnect = elantech_disconnect;
psmouse->reconnect = elantech_reconnect;
psmouse->pktsize = etd->hw_version > 1 ? 6 : 4;
return 0;
-
+ init_fail_tp_reg:
+ input_free_device(tp_dev);
+ init_fail_tp_alloc:
+ sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj,
+ &elantech_attr_group);
init_fail:
+ psmouse_reset(psmouse);
kfree(etd);
- return -1;
+ return error;
}
#define PACKET_V4_HEAD 0x05
#define PACKET_V4_MOTION 0x06
#define PACKET_V4_STATUS 0x07
+#define PACKET_TRACKPOINT 0x08
/*
* track up to 5 fingers for v4 hardware
};
struct elantech_data {
+ struct input_dev *tp_dev; /* Relative device for trackpoint */
+ char tp_phys[32];
unsigned char reg_07;
unsigned char reg_10;
unsigned char reg_11;
__set_bit(REL_X, input_dev->relbit);
__set_bit(REL_Y, input_dev->relbit);
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
psmouse->set_rate = psmouse_set_rate;
psmouse->set_resolution = psmouse_set_resolution;
psmouse->poll = psmouse_poll;
((buf[0] & 0x04) >> 1) |
((buf[3] & 0x04) >> 2));
+ if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
+ SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) &&
+ hw->w == 2) {
+ synaptics_parse_agm(buf, priv, hw);
+ return 1;
+ }
+
+ hw->x = (((buf[3] & 0x10) << 8) |
+ ((buf[1] & 0x0f) << 8) |
+ buf[4]);
+ hw->y = (((buf[3] & 0x20) << 7) |
+ ((buf[1] & 0xf0) << 4) |
+ buf[5]);
+ hw->z = buf[2];
+
hw->left = (buf[0] & 0x01) ? 1 : 0;
hw->right = (buf[0] & 0x02) ? 1 : 0;
- if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
+ if (SYN_CAP_FORCEPAD(priv->ext_cap_0c)) {
+ /*
+ * ForcePads, like Clickpads, use middle button
+ * bits to report primary button clicks.
+ * Unfortunately they report primary button not
+ * only when user presses on the pad above certain
+ * threshold, but also when there are more than one
+ * finger on the touchpad, which interferes with
+ * out multi-finger gestures.
+ */
+ if (hw->z == 0) {
+ /* No contacts */
+ priv->press = priv->report_press = false;
+ } else if (hw->w >= 4 && ((buf[0] ^ buf[3]) & 0x01)) {
+ /*
+ * Single-finger touch with pressure above
+ * the threshold. If pressure stays long
+ * enough, we'll start reporting primary
+ * button. We rely on the device continuing
+ * sending data even if finger does not
+ * move.
+ */
+ if (!priv->press) {
+ priv->press_start = jiffies;
+ priv->press = true;
+ } else if (time_after(jiffies,
+ priv->press_start +
+ msecs_to_jiffies(50))) {
+ priv->report_press = true;
+ }
+ } else {
+ priv->press = false;
+ }
+
+ hw->left = priv->report_press;
+
+ } else if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
/*
* Clickpad's button is transmitted as middle button,
* however, since it is primary button, we will report
hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0;
}
- if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
- SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) &&
- hw->w == 2) {
- synaptics_parse_agm(buf, priv, hw);
- return 1;
- }
-
- hw->x = (((buf[3] & 0x10) << 8) |
- ((buf[1] & 0x0f) << 8) |
- buf[4]);
- hw->y = (((buf[3] & 0x20) << 7) |
- ((buf[1] & 0xf0) << 4) |
- buf[5]);
- hw->z = buf[2];
-
if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) &&
((buf[0] ^ buf[3]) & 0x02)) {
switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) {
* 2 0x08 image sensor image sensor tracks 5 fingers, but only
* reports 2.
* 2 0x20 report min query 0x0f gives min coord reported
+ * 2 0x80 forcepad forcepad is a variant of clickpad that
+ * does not have physical buttons but rather
+ * uses pressure above certain threshold to
+ * report primary clicks. Forcepads also have
+ * clickpad bit set.
*/
#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */
#define SYN_CAP_ADV_GESTURE(ex0c) ((ex0c) & 0x080000)
#define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400)
#define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800)
+#define SYN_CAP_FORCEPAD(ex0c) ((ex0c) & 0x008000)
/* synaptics modes query bits */
#define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7))
*/
struct synaptics_hw_state agm;
bool agm_pending; /* new AGM packet received */
+
+ /* ForcePad handling */
+ unsigned long press_start;
+ bool press;
+ bool report_press;
};
void synaptics_module_init(void);
__set_bit(EV_REL, input_dev->evbit);
__set_bit(REL_X, input_dev->relbit);
__set_bit(REL_Y, input_dev->relbit);
+ __set_bit(INPUT_PROP_POINTING_STICK, input_dev->propbit);
input_set_abs_params(input_dev, ABS_PRESSURE, 0, 127, 0, 0);
} else {
input_set_abs_params(input_dev, ABS_X,
__set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
}
+ if (synusb->flags & SYNUSB_TOUCHSCREEN)
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+ else
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
__set_bit(BTN_LEFT, input_dev->keybit);
__set_bit(BTN_RIGHT, input_dev->keybit);
__set_bit(BTN_MIDDLE, input_dev->keybit);
if ((button_info & 0x0f) >= 3)
__set_bit(BTN_MIDDLE, psmouse->dev->keybit);
+ __set_bit(INPUT_PROP_POINTER, psmouse->dev->propbit);
+ __set_bit(INPUT_PROP_POINTING_STICK, psmouse->dev->propbit);
+
trackpoint_defaults(psmouse->private);
error = trackpoint_power_on_reset(&psmouse->ps2dev);
#define I8042_MUX_PHYS_DESC "sparcps2/serio%d"
static void __iomem *kbd_iobase;
-static struct resource *kbd_res;
#define I8042_COMMAND_REG (kbd_iobase + 0x64UL)
#define I8042_DATA_REG (kbd_iobase + 0x60UL)
#ifdef CONFIG_PCI
+static struct resource *kbd_res;
+
#define OBP_PS2KBD_NAME1 "kb_ps2"
#define OBP_PS2KBD_NAME2 "keyboard"
#define OBP_PS2MS_NAME1 "kdmouse"
#include <linux/init.h>
#include <linux/serio.h>
#include <linux/tty.h>
+#include <linux/compat.h>
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Input device TTY line discipline");
return 0;
}
+static void serport_set_type(struct tty_struct *tty, unsigned long type)
+{
+ struct serport *serport = tty->disc_data;
+
+ serport->id.proto = type & 0x000000ff;
+ serport->id.id = (type & 0x0000ff00) >> 8;
+ serport->id.extra = (type & 0x00ff0000) >> 16;
+}
+
/*
* serport_ldisc_ioctl() allows to set the port protocol, and device ID
*/
-static int serport_ldisc_ioctl(struct tty_struct * tty, struct file * file, unsigned int cmd, unsigned long arg)
+static int serport_ldisc_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
{
- struct serport *serport = (struct serport*) tty->disc_data;
- unsigned long type;
-
if (cmd == SPIOCSTYPE) {
+ unsigned long type;
+
if (get_user(type, (unsigned long __user *) arg))
return -EFAULT;
- serport->id.proto = type & 0x000000ff;
- serport->id.id = (type & 0x0000ff00) >> 8;
- serport->id.extra = (type & 0x00ff0000) >> 16;
+ serport_set_type(tty, type);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+#ifdef CONFIG_COMPAT
+#define COMPAT_SPIOCSTYPE _IOW('q', 0x01, compat_ulong_t)
+static long serport_ldisc_compat_ioctl(struct tty_struct *tty,
+ struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ if (cmd == COMPAT_SPIOCSTYPE) {
+ void __user *uarg = compat_ptr(arg);
+ compat_ulong_t compat_type;
+
+ if (get_user(compat_type, (compat_ulong_t __user *)uarg))
+ return -EFAULT;
+ serport_set_type(tty, compat_type);
return 0;
}
return -EINVAL;
}
+#endif
static void serport_ldisc_write_wakeup(struct tty_struct * tty)
{
.close = serport_ldisc_close,
.read = serport_ldisc_read,
.ioctl = serport_ldisc_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = serport_ldisc_compat_ioctl,
+#endif
.receive_buf = serport_ldisc_receive,
.write_wakeup = serport_ldisc_write_wakeup
};
count = data->msg_buf[0];
if (count == 0) {
- dev_warn(dev, "Interrupt triggered but zero messages\n");
+ /*
+ * This condition is caused by the CHG line being configured
+ * in Mode 0. It results in unnecessary I2C operations but it
+ * is benign.
+ */
+ dev_dbg(dev, "Interrupt triggered but zero messages\n");
return IRQ_NONE;
} else if (count > data->max_reportid) {
dev_err(dev, "T44 count %d exceeded max report id\n", count);
return 0;
}
-static void mxt_free_object_table(struct mxt_data *data)
+static void mxt_free_input_device(struct mxt_data *data)
{
- input_unregister_device(data->input_dev);
- data->input_dev = NULL;
+ if (data->input_dev) {
+ input_unregister_device(data->input_dev);
+ data->input_dev = NULL;
+ }
+}
+static void mxt_free_object_table(struct mxt_data *data)
+{
kfree(data->object_table);
data->object_table = NULL;
kfree(data->msg_buf);
ret = mxt_lookup_bootloader_address(data, 0);
if (ret)
goto release_firmware;
+
+ mxt_free_input_device(data);
+ mxt_free_object_table(data);
} else {
enable_irq(data->irq);
}
- mxt_free_object_table(data);
reinit_completion(&data->bl_completion);
ret = mxt_check_bootloader(data, MXT_WAITING_BOOTLOAD_CMD, false);
return 0;
err_free_object:
+ mxt_free_input_device(data);
mxt_free_object_table(data);
err_free_irq:
free_irq(client->irq, data);
sysfs_remove_group(&client->dev.kobj, &mxt_attr_group);
free_irq(data->irq, data);
- input_unregister_device(data->input_dev);
+ mxt_free_input_device(data);
mxt_free_object_table(data);
kfree(data);
*/
static int rpu = 8;
module_param(rpu, int, 0);
-MODULE_PARM_DESC(rpu, "Set internal pull up resitor for pen detect.");
+MODULE_PARM_DESC(rpu, "Set internal pull up resistor for pen detect.");
/*
* Set current used for pressure measurement.
*/
static int rpu = 8;
module_param(rpu, int, 0);
-MODULE_PARM_DESC(rpu, "Set internal pull up resitor for pen detect.");
+MODULE_PARM_DESC(rpu, "Set internal pull up resistor for pen detect.");
/*
* Set current used for pressure measurement.
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/device.h>
+#include <linux/timer.h>
#include <linux/err.h>
#include <linux/ctype.h>
#include <linux/leds.h>
-#include <linux/workqueue.h>
#include "leds.h"
static struct class *leds_class;
NULL,
};
-static void led_work_function(struct work_struct *ws)
+static void led_timer_function(unsigned long data)
{
- struct led_classdev *led_cdev =
- container_of(ws, struct led_classdev, blink_work.work);
+ struct led_classdev *led_cdev = (void *)data;
unsigned long brightness;
unsigned long delay;
}
}
- queue_delayed_work(system_wq, &led_cdev->blink_work,
- msecs_to_jiffies(delay));
+ mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay));
}
static void set_brightness_delayed(struct work_struct *ws)
INIT_WORK(&led_cdev->set_brightness_work, set_brightness_delayed);
- INIT_DELAYED_WORK(&led_cdev->blink_work, led_work_function);
+ init_timer(&led_cdev->blink_timer);
+ led_cdev->blink_timer.function = led_timer_function;
+ led_cdev->blink_timer.data = (unsigned long)led_cdev;
#ifdef CONFIG_LEDS_TRIGGERS
led_trigger_set_default(led_cdev);
#include <linux/module.h>
#include <linux/rwsem.h>
#include <linux/leds.h>
-#include <linux/workqueue.h>
#include "leds.h"
DECLARE_RWSEM(leds_list_lock);
return;
}
- queue_delayed_work(system_wq, &led_cdev->blink_work, 1);
+ mod_timer(&led_cdev->blink_timer, jiffies + 1);
}
unsigned long *delay_on,
unsigned long *delay_off)
{
- cancel_delayed_work_sync(&led_cdev->blink_work);
+ del_timer_sync(&led_cdev->blink_timer);
led_cdev->flags &= ~LED_BLINK_ONESHOT;
led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP;
int invert)
{
if ((led_cdev->flags & LED_BLINK_ONESHOT) &&
- delayed_work_pending(&led_cdev->blink_work))
+ timer_pending(&led_cdev->blink_timer))
return;
led_cdev->flags |= LED_BLINK_ONESHOT;
void led_stop_software_blink(struct led_classdev *led_cdev)
{
- cancel_delayed_work_sync(&led_cdev->blink_work);
+ del_timer_sync(&led_cdev->blink_timer);
led_cdev->blink_delay_on = 0;
led_cdev->blink_delay_off = 0;
}
void led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
- /* delay brightness setting if need to stop soft-blink work */
+ /* delay brightness setting if need to stop soft-blink timer */
if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) {
led_cdev->delayed_set_value = brightness;
schedule_work(&led_cdev->set_brightness_work);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, base, FL_LOCKING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
/* Enter lock register command */
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1,
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
vp->tx_ring[entry].frag[i+1].addr =
- cpu_to_le32(pci_map_single(
- VORTEX_PCI(vp),
- (void *)skb_frag_address(frag),
- skb_frag_size(frag), PCI_DMA_TODEVICE));
+ cpu_to_le32(skb_frag_dma_map(
+ &VORTEX_PCI(vp)->dev,
+ frag,
+ frag->page_offset, frag->size, DMA_TO_DEVICE));
if (i == skb_shinfo(skb)->nr_frags-1)
vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
}
+static inline void greth_enable_tx_and_irq(struct greth_private *greth)
+{
+ wmb(); /* BDs must been written to memory before enabling TX */
+ GRETH_REGORIN(greth->regs->control, GRETH_TXEN | GRETH_TXI);
+}
+
static inline void greth_disable_tx(struct greth_private *greth)
{
GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
return err;
}
+static inline u16 greth_num_free_bds(u16 tx_last, u16 tx_next)
+{
+ if (tx_next < tx_last)
+ return (tx_last - tx_next) - 1;
+ else
+ return GRETH_TXBD_NUM - (tx_next - tx_last) - 1;
+}
static netdev_tx_t
greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
{
struct greth_private *greth = netdev_priv(dev);
struct greth_bd *bdp;
- u32 status = 0, dma_addr, ctrl;
+ u32 status, dma_addr;
int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
unsigned long flags;
+ u16 tx_last;
nr_frags = skb_shinfo(skb)->nr_frags;
+ tx_last = greth->tx_last;
+ rmb(); /* tx_last is updated by the poll task */
- /* Clean TX Ring */
- greth_clean_tx_gbit(dev);
-
- if (greth->tx_free < nr_frags + 1) {
- spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
- ctrl = GRETH_REGLOAD(greth->regs->control);
- /* Enable TX IRQ only if not already in poll() routine */
- if (ctrl & GRETH_RXI)
- GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
+ if (greth_num_free_bds(tx_last, greth->tx_next) < nr_frags + 1) {
netif_stop_queue(dev);
- spin_unlock_irqrestore(&greth->devlock, flags);
err = NETDEV_TX_BUSY;
goto out;
}
/* Linear buf */
if (nr_frags != 0)
status = GRETH_TXBD_MORE;
+ else
+ status = GRETH_BD_IE;
if (skb->ip_summed == CHECKSUM_PARTIAL)
status |= GRETH_TXBD_CSALL;
/* Enable the descriptor chain by enabling the first descriptor */
bdp = greth->tx_bd_base + greth->tx_next;
- greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
- greth->tx_next = curr_tx;
- greth->tx_free -= nr_frags + 1;
-
- wmb();
+ greth_write_bd(&bdp->stat,
+ greth_read_bd(&bdp->stat) | GRETH_BD_EN);
spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
- greth_enable_tx(greth);
+ greth->tx_next = curr_tx;
+ greth_enable_tx_and_irq(greth);
spin_unlock_irqrestore(&greth->devlock, flags);
return NETDEV_TX_OK;
if (greth->tx_free > 0) {
netif_wake_queue(dev);
}
-
}
static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
{
struct greth_private *greth;
struct greth_bd *bdp, *bdp_last_frag;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
u32 stat;
int nr_frags, i;
+ u16 tx_last;
greth = netdev_priv(dev);
+ tx_last = greth->tx_last;
- while (greth->tx_free < GRETH_TXBD_NUM) {
+ while (tx_last != greth->tx_next) {
- skb = greth->tx_skbuff[greth->tx_last];
+ skb = greth->tx_skbuff[tx_last];
nr_frags = skb_shinfo(skb)->nr_frags;
/* We only clean fully completed SKBs */
- bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
+ bdp_last_frag = greth->tx_bd_base + SKIP_TX(tx_last, nr_frags);
GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
mb();
if (stat & GRETH_BD_EN)
break;
- greth->tx_skbuff[greth->tx_last] = NULL;
+ greth->tx_skbuff[tx_last] = NULL;
greth_update_tx_stats(dev, stat);
dev->stats.tx_bytes += skb->len;
- bdp = greth->tx_bd_base + greth->tx_last;
+ bdp = greth->tx_bd_base + tx_last;
- greth->tx_last = NEXT_TX(greth->tx_last);
+ tx_last = NEXT_TX(tx_last);
dma_unmap_single(greth->dev,
greth_read_bd(&bdp->addr),
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- bdp = greth->tx_bd_base + greth->tx_last;
+ bdp = greth->tx_bd_base + tx_last;
dma_unmap_page(greth->dev,
greth_read_bd(&bdp->addr),
skb_frag_size(frag),
DMA_TO_DEVICE);
- greth->tx_last = NEXT_TX(greth->tx_last);
+ tx_last = NEXT_TX(tx_last);
}
- greth->tx_free += nr_frags+1;
dev_kfree_skb(skb);
}
+ if (skb) { /* skb is set only if the above while loop was entered */
+ wmb();
+ greth->tx_last = tx_last;
- if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
- netif_wake_queue(dev);
+ if (netif_queue_stopped(dev) &&
+ (greth_num_free_bds(tx_last, greth->tx_next) >
+ (MAX_SKB_FRAGS+1)))
+ netif_wake_queue(dev);
+ }
}
static int greth_rx(struct net_device *dev, int limit)
greth = container_of(napi, struct greth_private, napi);
restart_txrx_poll:
- if (netif_queue_stopped(greth->netdev)) {
- if (greth->gbit_mac)
- greth_clean_tx_gbit(greth->netdev);
- else
- greth_clean_tx(greth->netdev);
- }
-
if (greth->gbit_mac) {
+ greth_clean_tx_gbit(greth->netdev);
work_done += greth_rx_gbit(greth->netdev, budget - work_done);
} else {
+ if (netif_queue_stopped(greth->netdev))
+ greth_clean_tx(greth->netdev);
work_done += greth_rx(greth->netdev, budget - work_done);
}
spin_lock_irqsave(&greth->devlock, flags);
ctrl = GRETH_REGLOAD(greth->regs->control);
- if (netif_queue_stopped(greth->netdev)) {
+ if ((greth->gbit_mac && (greth->tx_last != greth->tx_next)) ||
+ (!greth->gbit_mac && netif_queue_stopped(greth->netdev))) {
GRETH_REGSAVE(greth->regs->control,
ctrl | GRETH_TXI | GRETH_RXI);
mask = GRETH_INT_RX | GRETH_INT_RE |
u16 tx_next;
u16 tx_last;
- u16 tx_free;
+ u16 tx_free; /* only used on 10/100Mbit */
u16 rx_cur;
struct greth_regs *regs; /* Address of controller registers. */
struct xgbe_prv_data *pdata = filp->private_data;
unsigned int value;
- value = pdata->hw_if.read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
- pdata->debugfs_xpcs_reg);
+ value = XMDIO_READ(pdata, pdata->debugfs_xpcs_mmd,
+ pdata->debugfs_xpcs_reg);
return xgbe_common_read(buffer, count, ppos, value);
}
if (len < 0)
return len;
- pdata->hw_if.write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
- pdata->debugfs_xpcs_reg, value);
+ XMDIO_WRITE(pdata, pdata->debugfs_xpcs_mmd, pdata->debugfs_xpcs_reg,
+ value);
return len;
}
/* Clear MAC flow control */
max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
- q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count);
+ q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
reg = MAC_Q0TFCR;
for (i = 0; i < q_count; i++) {
reg_val = XGMAC_IOREAD(pdata, reg);
/* Set MAC flow control */
max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
- q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count);
+ q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
reg = MAC_Q0TFCR;
for (i = 0; i < q_count; i++) {
reg_val = XGMAC_IOREAD(pdata, reg);
XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
/* Enable all counter interrupts */
- XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff);
- XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xff);
+ XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
+ XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
}
static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
{
unsigned int i, count;
+ if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
+ return 0;
+
for (i = 0; i < pdata->tx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
}
-static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
- unsigned char queue_count)
+static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
+ unsigned int queue_count)
{
unsigned int q_fifo_size = 0;
enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
q_fifo_size = XGBE_FIFO_SIZE_KB(256);
break;
}
+
+ /* The configured value is not the actual amount of fifo RAM */
+ q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
+
q_fifo_size = q_fifo_size / queue_count;
/* Set the queue fifo size programmable value */
xgbe_disable_rx_vlan_stripping(pdata);
}
+static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
+{
+ bool read_hi;
+ u64 val;
+
+ switch (reg_lo) {
+ /* These registers are always 64 bit */
+ case MMC_TXOCTETCOUNT_GB_LO:
+ case MMC_TXOCTETCOUNT_G_LO:
+ case MMC_RXOCTETCOUNT_GB_LO:
+ case MMC_RXOCTETCOUNT_G_LO:
+ read_hi = true;
+ break;
+
+ default:
+ read_hi = false;
+ };
+
+ val = XGMAC_IOREAD(pdata, reg_lo);
+
+ if (read_hi)
+ val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
+
+ return val;
+}
+
static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
{
struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
stats->txoctetcount_gb +=
- XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
stats->txframecount_gb +=
- XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
stats->txbroadcastframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
stats->txmulticastframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
stats->tx64octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
stats->tx65to127octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
stats->tx128to255octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
stats->tx256to511octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
stats->tx512to1023octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
stats->tx1024tomaxoctets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
stats->txunicastframes_gb +=
- XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
stats->txmulticastframes_gb +=
- XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
stats->txbroadcastframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
stats->txunderflowerror +=
- XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
+ xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
stats->txoctetcount_g +=
- XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
stats->txframecount_g +=
- XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
stats->txpauseframes +=
- XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
+ xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
stats->txvlanframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
}
static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
stats->rxframecount_gb +=
- XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
stats->rxoctetcount_gb +=
- XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
stats->rxoctetcount_g +=
- XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
stats->rxbroadcastframes_g +=
- XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
stats->rxmulticastframes_g +=
- XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
stats->rxcrcerror +=
- XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
+ xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
stats->rxrunterror +=
- XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
+ xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
stats->rxjabbererror +=
- XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
+ xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
stats->rxundersize_g +=
- XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
+ xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
stats->rxoversize_g +=
- XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
+ xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
stats->rx64octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
stats->rx65to127octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
stats->rx128to255octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
stats->rx256to511octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
stats->rx512to1023octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
stats->rx1024tomaxoctets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
stats->rxunicastframes_g +=
- XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
stats->rxlengtherror +=
- XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
+ xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
stats->rxoutofrangetype +=
- XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
+ xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
stats->rxpauseframes +=
- XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
+ xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
stats->rxfifooverflow +=
- XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
+ xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
stats->rxvlanframes_gb +=
- XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
stats->rxwatchdogerror +=
- XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
+ xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
}
static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
stats->txoctetcount_gb +=
- XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
stats->txframecount_gb +=
- XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
stats->txbroadcastframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
stats->txmulticastframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
stats->tx64octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
stats->tx65to127octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
stats->tx128to255octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
stats->tx256to511octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
stats->tx512to1023octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
stats->tx1024tomaxoctets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
stats->txunicastframes_gb +=
- XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
stats->txmulticastframes_gb +=
- XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
stats->txbroadcastframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
stats->txunderflowerror +=
- XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
+ xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
stats->txoctetcount_g +=
- XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
stats->txframecount_g +=
- XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
stats->txpauseframes +=
- XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
+ xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
stats->txvlanframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
stats->rxframecount_gb +=
- XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
stats->rxoctetcount_gb +=
- XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
stats->rxoctetcount_g +=
- XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
stats->rxbroadcastframes_g +=
- XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
stats->rxmulticastframes_g +=
- XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
stats->rxcrcerror +=
- XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
+ xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
stats->rxrunterror +=
- XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
+ xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
stats->rxjabbererror +=
- XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
+ xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
stats->rxundersize_g +=
- XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
+ xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
stats->rxoversize_g +=
- XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
+ xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
stats->rx64octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
stats->rx65to127octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
stats->rx128to255octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
stats->rx256to511octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
stats->rx512to1023octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
stats->rx1024tomaxoctets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
stats->rxunicastframes_g +=
- XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
stats->rxlengtherror +=
- XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
+ xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
stats->rxoutofrangetype +=
- XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
+ xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
stats->rxpauseframes +=
- XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
+ xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
stats->rxfifooverflow +=
- XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
+ xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
stats->rxvlanframes_gb +=
- XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
stats->rxwatchdogerror +=
- XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
+ xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
/* Un-freeze counters */
XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
memset(hw_feat, 0, sizeof(*hw_feat));
+ hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
+
/* Hardware feature register 0 */
hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
struct ethtool_drvinfo *drvinfo)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
- XGMAC_IOREAD_BITS(pdata, MAC_VR, USERVER),
- XGMAC_IOREAD_BITS(pdata, MAC_VR, DEVID),
- XGMAC_IOREAD_BITS(pdata, MAC_VR, SNPSVER));
+ XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
+ XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
+ XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
drvinfo->n_stats = XGBE_STATS_COUNT;
}
}
if (i < pdata->rx_ring_count) {
- spin_lock_init(&tx_ring->lock);
+ spin_lock_init(&rx_ring->lock);
channel->rx_ring = rx_ring++;
}
#define XGMAC_DRIVER_CONTEXT 1
#define XGMAC_IOCTL_CONTEXT 2
+#define XGBE_FIFO_MAX 81920
#define XGBE_FIFO_SIZE_B(x) (x)
#define XGBE_FIFO_SIZE_KB(x) (x * 1024)
* or configurations are present in the device.
*/
struct xgbe_hw_features {
+ /* HW Version */
+ unsigned int version;
+
/* HW Feature Register0 */
unsigned int gmii; /* 1000 Mbps support */
unsigned int vlhash; /* VLAN Hash Filter */
config NET_XGENE
tristate "APM X-Gene SoC Ethernet Driver"
+ depends on HAS_DMA
select PHYLIB
help
This is the Ethernet driver for the on-chip ethernet interface on the
config CNIC
tristate "QLogic CNIC support"
- depends on PCI
+ depends on PCI && (IPV6 || IPV6=n)
select BNX2
select UIO
---help---
u32 reserved3; /* Offset 0x14C */
u32 reserved4; /* Offset 0x150 */
u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */
- #define LINK_ATTR_SYNC_KR2_ENABLE (1<<0)
+ #define LINK_ATTR_SYNC_KR2_ENABLE 0x00000001
+ #define LINK_SFP_EEPROM_COMP_CODE_MASK 0x0000ff00
+ #define LINK_SFP_EEPROM_COMP_CODE_SHIFT 8
+ #define LINK_SFP_EEPROM_COMP_CODE_SR 0x00001000
+ #define LINK_SFP_EEPROM_COMP_CODE_LR 0x00002000
+ #define LINK_SFP_EEPROM_COMP_CODE_LRM 0x00004000
u32 reserved5[2];
u32 reserved6[PORT_MAX];
LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
#define SFP_EEPROM_CON_TYPE_ADDR 0x2
+ #define SFP_EEPROM_CON_TYPE_VAL_UNKNOWN 0x0
#define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
#define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
#define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22
-#define SFP_EEPROM_COMP_CODE_ADDR 0x3
- #define SFP_EEPROM_COMP_CODE_SR_MASK (1<<4)
- #define SFP_EEPROM_COMP_CODE_LR_MASK (1<<5)
- #define SFP_EEPROM_COMP_CODE_LRM_MASK (1<<6)
+#define SFP_EEPROM_10G_COMP_CODE_ADDR 0x3
+ #define SFP_EEPROM_10G_COMP_CODE_SR_MASK (1<<4)
+ #define SFP_EEPROM_10G_COMP_CODE_LR_MASK (1<<5)
+ #define SFP_EEPROM_10G_COMP_CODE_LRM_MASK (1<<6)
+
+#define SFP_EEPROM_1G_COMP_CODE_ADDR 0x6
+ #define SFP_EEPROM_1G_COMP_CODE_SX (1<<0)
+ #define SFP_EEPROM_1G_COMP_CODE_LX (1<<1)
+ #define SFP_EEPROM_1G_COMP_CODE_CX (1<<2)
+ #define SFP_EEPROM_1G_COMP_CODE_BASE_T (1<<3)
#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8
#define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
reg_set[i].val);
/* Start KR2 work-around timer which handles BCM8073 link-parner */
- vars->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
- bnx2x_update_link_attr(params, vars->link_attr_sync);
+ params->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
+ bnx2x_update_link_attr(params, params->link_attr_sync);
}
static void bnx2x_disable_kr2(struct link_params *params,
for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
- vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
- bnx2x_update_link_attr(params, vars->link_attr_sync);
+ params->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
+ bnx2x_update_link_attr(params, params->link_attr_sync);
vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
}
~FEATURE_CONFIG_PFC_ENABLED;
if (SHMEM2_HAS(bp, link_attr_sync))
- vars->link_attr_sync = SHMEM2_RD(bp,
+ params->link_attr_sync = SHMEM2_RD(bp,
link_attr_sync[params->port]);
DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n",
{
struct bnx2x *bp = params->bp;
u32 sync_offset = 0, phy_idx, media_types;
- u8 gport, val[2], check_limiting_mode = 0;
+ u8 val[SFP_EEPROM_FC_TX_TECH_ADDR + 1], check_limiting_mode = 0;
*edc_mode = EDC_MODE_LIMITING;
phy->media_type = ETH_PHY_UNSPECIFIED;
/* First check for copper cable */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
I2C_DEV_ADDR_A0,
- SFP_EEPROM_CON_TYPE_ADDR,
- 2,
+ 0,
+ SFP_EEPROM_FC_TX_TECH_ADDR + 1,
(u8 *)val) != 0) {
DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
return -EINVAL;
}
-
- switch (val[0]) {
+ params->link_attr_sync &= ~LINK_SFP_EEPROM_COMP_CODE_MASK;
+ params->link_attr_sync |= val[SFP_EEPROM_10G_COMP_CODE_ADDR] <<
+ LINK_SFP_EEPROM_COMP_CODE_SHIFT;
+ bnx2x_update_link_attr(params, params->link_attr_sync);
+ switch (val[SFP_EEPROM_CON_TYPE_ADDR]) {
case SFP_EEPROM_CON_TYPE_VAL_COPPER:
{
u8 copper_module_type;
/* Check if its active cable (includes SFP+ module)
* of passive cable
*/
- if (bnx2x_read_sfp_module_eeprom(phy,
- params,
- I2C_DEV_ADDR_A0,
- SFP_EEPROM_FC_TX_TECH_ADDR,
- 1,
- &copper_module_type) != 0) {
- DP(NETIF_MSG_LINK,
- "Failed to read copper-cable-type"
- " from SFP+ EEPROM\n");
- return -EINVAL;
- }
+ copper_module_type = val[SFP_EEPROM_FC_TX_TECH_ADDR];
if (copper_module_type &
SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
}
break;
}
+ case SFP_EEPROM_CON_TYPE_VAL_UNKNOWN:
case SFP_EEPROM_CON_TYPE_VAL_LC:
case SFP_EEPROM_CON_TYPE_VAL_RJ45:
check_limiting_mode = 1;
- if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK |
- SFP_EEPROM_COMP_CODE_LR_MASK |
- SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
+ if ((val[SFP_EEPROM_10G_COMP_CODE_ADDR] &
+ (SFP_EEPROM_10G_COMP_CODE_SR_MASK |
+ SFP_EEPROM_10G_COMP_CODE_LR_MASK |
+ SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) {
DP(NETIF_MSG_LINK, "1G SFP module detected\n");
- gport = params->port;
phy->media_type = ETH_PHY_SFP_1G_FIBER;
if (phy->req_line_speed != SPEED_1000) {
+ u8 gport = params->port;
phy->req_line_speed = SPEED_1000;
if (!CHIP_IS_E1x(bp)) {
gport = BP_PATH(bp) +
"Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n",
gport);
}
+ if (val[SFP_EEPROM_1G_COMP_CODE_ADDR] &
+ SFP_EEPROM_1G_COMP_CODE_BASE_T) {
+ bnx2x_sfp_set_transmitter(params, phy, 0);
+ msleep(40);
+ bnx2x_sfp_set_transmitter(params, phy, 1);
+ }
} else {
int idx, cfg_idx = 0;
DP(NETIF_MSG_LINK, "10G Optic module detected\n");
break;
default:
DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
- val[0]);
+ val[SFP_EEPROM_CON_TYPE_ADDR]);
return -EINVAL;
}
sync_offset = params->shmem_base +
sigdet = bnx2x_warpcore_get_sigdet(phy, params);
if (!sigdet) {
- if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+ if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
bnx2x_kr2_recovery(params, vars, phy);
DP(NETIF_MSG_LINK, "No sigdet\n");
}
/* CL73 has not begun yet */
if (base_page == 0) {
- if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+ if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
bnx2x_kr2_recovery(params, vars, phy);
DP(NETIF_MSG_LINK, "No BP\n");
}
((next_page & 0xe0) == 0x20))));
/* In case KR2 is already disabled, check if we need to re-enable it */
- if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+ if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
if (!not_kr2_device) {
DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page,
next_page);
#define LINK_FLAGS_INT_DISABLED (1<<0)
#define PHY_INITIALIZED (1<<1)
u32 lfa_base;
+
+ /* The same definitions as the shmem2 parameter */
+ u32 link_attr_sync;
};
/* Output parameters */
u8 rx_tx_asic_rst;
u8 turn_to_run_wc_rt;
u16 rsrv2;
- /* The same definitions as the shmem2 parameter */
- u32 link_attr_sync;
};
/***********************************************************/
bnx2x_release_phy_lock(bp);
}
+static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
+{
+ REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
+ REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
+ REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
+ REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
+ REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
+
+ /* make sure this value is 0 */
+ REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
+
+ REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
+ REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
+ REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
+ REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
+}
+
+static void bnx2x_set_endianity(struct bnx2x *bp)
+{
+#ifdef __BIG_ENDIAN
+ bnx2x_config_endianity(bp, 1);
+#else
+ bnx2x_config_endianity(bp, 0);
+#endif
+}
+
+static void bnx2x_reset_endianity(struct bnx2x *bp)
+{
+ bnx2x_config_endianity(bp, 0);
+}
+
/**
* bnx2x_init_hw_common - initialize the HW at the COMMON phase.
*
bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
bnx2x_init_pxp(bp);
-
-#ifdef __BIG_ENDIAN
- REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
- REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
- REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
- REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
- REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
- /* make sure this value is 0 */
- REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
-
-/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
- REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
- REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
- REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
- REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
-#endif
-
+ bnx2x_set_endianity(bp);
bnx2x_ilt_init_page_size(bp, INITOP_SET);
if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
bnx2x_iov_remove_one(bp);
/* Power on: we can't let PCI layer write to us while we are in D3 */
- if (IS_PF(bp))
+ if (IS_PF(bp)) {
bnx2x_set_power_state(bp, PCI_D0);
+ /* Set endianity registers to reset values in case next driver
+ * boots in different endianty environment.
+ */
+ bnx2x_reset_endianity(bp);
+ }
+
/* Disable MSI/MSI-X */
bnx2x_disable_msi(bp);
#include <linux/if_vlan.h>
#include <linux/prefetch.h>
#include <linux/random.h>
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define BCM_VLAN 1
#endif
#include <net/ip.h>
static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
struct dst_entry **dst)
{
-#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_IPV6)
struct flowi6 fl6;
memset(&fl6, 0, sizeof(fl6));
struct tg3 *tp = netdev_priv(dev);
int err;
+ if (tp->pcierr_recovery) {
+ netdev_err(dev, "Failed to open device. PCI error recovery "
+ "in progress\n");
+ return -EAGAIN;
+ }
+
if (tp->fw_needed) {
err = tg3_request_firmware(tp);
if (tg3_asic_rev(tp) == ASIC_REV_57766) {
{
struct tg3 *tp = netdev_priv(dev);
+ if (tp->pcierr_recovery) {
+ netdev_err(dev, "Failed to close device. PCI error recovery "
+ "in progress\n");
+ return -EAGAIN;
+ }
+
tg3_ptp_fini(tp);
tg3_stop(tp);
tp->rx_mode = TG3_DEF_RX_MODE;
tp->tx_mode = TG3_DEF_TX_MODE;
tp->irq_sync = 1;
+ tp->pcierr_recovery = false;
if (tg3_debug > 0)
tp->msg_enable = tg3_debug;
rtnl_lock();
+ tp->pcierr_recovery = true;
+
/* We probably don't have netdev yet */
if (!netdev || !netif_running(netdev))
goto done;
tg3_phy_start(tp);
done:
+ tp->pcierr_recovery = false;
rtnl_unlock();
}
struct device *hwmon_dev;
bool link_up;
+ bool pcierr_recovery;
};
/* Accessor macros for chip and asic attributes
* For TSO, the TCP checksum field is seeded with pseudo-header sum
* excluding the length field.
*/
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
/* Do we really need these? */
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ __be16 net_proto = vlan_get_protocol(skb);
u8 proto = 0;
- if (skb->protocol == htons(ETH_P_IP))
+ if (net_proto == htons(ETH_P_IP))
proto = ip_hdr(skb)->protocol;
#ifdef NETIF_F_IPV6_CSUM
- else if (skb->protocol == htons(ETH_P_IPV6)) {
+ else if (net_proto == htons(ETH_P_IPV6)) {
/* nexthdr may not be TCP immediately. */
proto = ipv6_hdr(skb)->nexthdr;
}
config NET_CALXEDA_XGMAC
tristate "Calxeda 1G/10G XGMAC Ethernet driver"
depends on HAS_IOMEM && HAS_DMA
+ depends on ARCH_HIGHBANK || COMPILE_TEST
select CRC32
help
This is the driver for the XGMAC Ethernet IP block found on Calxeda
goto freeout;
}
- t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
+ t4_write_reg(adap, is_t4(adap->params.chip) ?
+ MPS_TRC_RSS_CONTROL :
+ MPS_T5_TRC_RSS_CONTROL,
RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
return 0;
0xd004, 0xd03c,
0xdfc0, 0xdfe0,
0xe000, 0xea7c,
- 0xf000, 0x11190,
+ 0xf000, 0x11110,
+ 0x11118, 0x11190,
0x19040, 0x1906c,
0x19078, 0x19080,
0x1908c, 0x19124,
0xd004, 0xd03c,
0xdfc0, 0xdfe0,
0xe000, 0x11088,
- 0x1109c, 0x1117c,
+ 0x1109c, 0x11110,
+ 0x11118, 0x1117c,
0x11190, 0x11204,
0x19040, 0x1906c,
0x19078, 0x19080,
params[3] = FW_PARAM_PFVF(CQ_END);
params[4] = FW_PARAM_PFVF(OCQ_START);
params[5] = FW_PARAM_PFVF(OCQ_END);
- ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
+ val);
if (ret < 0)
goto bye;
adap->vres.qp.start = val[0];
params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
- ret = t4_query_params(adap, 0, 0, 0, 2, params, val);
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
+ val);
if (ret < 0) {
adap->params.max_ordird_qp = 8;
adap->params.max_ird_adapter = 32 * adap->tids.ntids;
t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0);
}
+/*
+ * t4_report_fw_error - report firmware error
+ * @adap: the adapter
+ *
+ * The adapter firmware can indicate error conditions to the host.
+ * If the firmware has indicated an error, print out the reason for
+ * the firmware error.
+ */
+static void t4_report_fw_error(struct adapter *adap)
+{
+ static const char *const reason[] = {
+ "Crash", /* PCIE_FW_EVAL_CRASH */
+ "During Device Preparation", /* PCIE_FW_EVAL_PREP */
+ "During Device Configuration", /* PCIE_FW_EVAL_CONF */
+ "During Device Initialization", /* PCIE_FW_EVAL_INIT */
+ "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
+ "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
+ "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
+ "Reserved", /* reserved */
+ };
+ u32 pcie_fw;
+
+ pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
+ if (pcie_fw & FW_PCIE_FW_ERR)
+ dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
+ reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]);
+}
+
/*
* Get the reply to a mailbox command and store it in @rpl in big-endian order.
*/
dump_mbox(adap, mbox, data_reg);
dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
*(const u8 *)cmd, mbox);
+ t4_report_fw_error(adap);
return -ETIMEDOUT;
}
#define VPD_BASE 0x400
#define VPD_BASE_OLD 0
#define VPD_LEN 1024
+#define CHELSIO_VPD_UNIQUE_ID 0x82
/**
* t4_seeprom_wp - enable/disable EEPROM write protection
ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
if (ret < 0)
goto out;
- addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
+
+ /* The VPD shall have a unique identifier specified by the PCI SIG.
+ * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
+ * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
+ * is expected to automatically put this entry at the
+ * beginning of the VPD.
+ */
+ addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
if (ret < 0)
i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
strim(p->sn);
+ i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
memcpy(p->pn, vpd + pn, min(i, PN_LEN));
strim(p->pn);
int fat;
- fat = t4_handle_intr_status(adapter,
- PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
- sysbus_intr_info) +
- t4_handle_intr_status(adapter,
- PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
- pcie_port_intr_info) +
- t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
- is_t4(adapter->params.chip) ?
- pcie_intr_info : t5_pcie_intr_info);
+ if (is_t4(adapter->params.chip))
+ fat = t4_handle_intr_status(adapter,
+ PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+ sysbus_intr_info) +
+ t4_handle_intr_status(adapter,
+ PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+ pcie_port_intr_info) +
+ t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
+ pcie_intr_info);
+ else
+ fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
+ t5_pcie_intr_info);
if (fat)
t4_fatal_err(adapter);
int fat;
+ if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR)
+ t4_report_fw_error(adapter);
+
fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
cim_intr_info) +
t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
{
u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
- if (status & MEM_PERR_INT_CAUSE)
+ if (status & MEM_PERR_INT_CAUSE) {
dev_alert(adap->pdev_dev,
"MA parity error, parity status %#x\n",
t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
+ if (is_t5(adap->params.chip))
+ dev_alert(adap->pdev_dev,
+ "MA parity error, parity status %#x\n",
+ t4_read_reg(adap,
+ MA_PARITY_ERROR_STATUS2));
+ }
if (status & MEM_WRAP_INT_CAUSE) {
v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
dev_alert(adap->pdev_dev, "MA address wrap-around error by "
/*
* Issue the HELLO command to the firmware. If it's not successful
* but indicates that we got a "busy" or "timeout" condition, retry
- * the HELLO until we exhaust our retry limit.
+ * the HELLO until we exhaust our retry limit. If we do exceed our
+ * retry limit, check to see if the firmware left us any error
+ * information and report that if so.
*/
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret < 0) {
if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
goto retry;
+ if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR)
+ t4_report_fw_error(adap);
return ret;
}
lc->link_ok = link_ok;
lc->speed = speed;
lc->fc = fc;
+ lc->supported = be16_to_cpu(p->u.info.pcap);
t4_os_link_changed(adap, port, link_ok);
}
if (mod != pi->mod_type) {
#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
#define MA_PCIE_FW 0x30b8
#define MA_PARITY_ERROR_STATUS 0x77f4
+#define MA_PARITY_ERROR_STATUS2 0x7804
#define MA_EXT_MEMORY1_BAR 0x7808
#define EDC_0_BASE_ADDR 0x7900
#define TRCMULTIFILTER 0x00000001U
#define MPS_TRC_RSS_CONTROL 0x9808
+#define MPS_T5_TRC_RSS_CONTROL 0xa00c
#define RSSCONTROL_MASK 0x00ff0000U
#define RSSCONTROL_SHIFT 16
#define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT)
#define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT)
#define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \
FW_PCIE_FW_MASTER_MASK)
+#define FW_PCIE_FW_EVAL_MASK 0x7
+#define FW_PCIE_FW_EVAL_SHIFT 24
+#define FW_PCIE_FW_EVAL_GET(x) (((x) >> FW_PCIE_FW_EVAL_SHIFT) & \
+ FW_PCIE_FW_EVAL_MASK)
struct fw_hdr {
u8 ver;
{
swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
- if (skb->protocol != htons(ETH_P_IP))
+ if (vlan_get_protocol(skb) != htons(ETH_P_IP))
return;
if (skb->ip_summed == CHECKSUM_PARTIAL)
#define E1000_TX_FLAGS_VLAN_SHIFT 16
static int e1000_tso(struct e1000_adapter *adapter,
- struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
+ struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
+ __be16 protocol)
{
struct e1000_context_desc *context_desc;
struct e1000_buffer *buffer_info;
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
0);
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb_transport_offset(skb) - 1;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
}
static bool e1000_tx_csum(struct e1000_adapter *adapter,
- struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
+ struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
+ __be16 protocol)
{
struct e1000_context_desc *context_desc;
struct e1000_buffer *buffer_info;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return false;
- switch (skb->protocol) {
+ switch (protocol) {
case cpu_to_be16(ETH_P_IP):
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
cmd_len |= E1000_TXD_CMD_TCP;
int count = 0;
int tso;
unsigned int f;
+ __be16 protocol = vlan_get_protocol(skb);
/* This goes back to the question of how to logically map a Tx queue
* to a flow. Right now, performance is impacted slightly negatively
first = tx_ring->next_to_use;
- tso = e1000_tso(adapter, tx_ring, skb);
+ tso = e1000_tso(adapter, tx_ring, skb, protocol);
if (tso < 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
if (likely(hw->mac_type != e1000_82544))
tx_ring->last_tx_tso = true;
tx_flags |= E1000_TX_FLAGS_TSO;
- } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
+ } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
tx_flags |= E1000_TX_FLAGS_CSUM;
- if (likely(skb->protocol == htons(ETH_P_IP)))
+ if (protocol == htons(ETH_P_IP))
tx_flags |= E1000_TX_FLAGS_IPV4;
if (unlikely(skb->no_fcs))
#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
#define E1000_TX_FLAGS_VLAN_SHIFT 16
-static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
+static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
+ __be16 protocol)
{
struct e1000_context_desc *context_desc;
struct e1000_buffer *buffer_info;
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
return 1;
}
-static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
+static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb,
+ __be16 protocol)
{
struct e1000_adapter *adapter = tx_ring->adapter;
struct e1000_context_desc *context_desc;
unsigned int i;
u8 css;
u32 cmd_len = E1000_TXD_CMD_DEXT;
- __be16 protocol;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return false;
- if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
- protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
- else
- protocol = skb->protocol;
-
switch (protocol) {
case cpu_to_be16(ETH_P_IP):
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
int count = 0;
int tso;
unsigned int f;
+ __be16 protocol = vlan_get_protocol(skb);
if (test_bit(__E1000_DOWN, &adapter->state)) {
dev_kfree_skb_any(skb);
first = tx_ring->next_to_use;
- tso = e1000_tso(tx_ring, skb);
+ tso = e1000_tso(tx_ring, skb, protocol);
if (tso < 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
if (tso)
tx_flags |= E1000_TX_FLAGS_TSO;
- else if (e1000_tx_csum(tx_ring, skb))
+ else if (e1000_tx_csum(tx_ring, skb, protocol))
tx_flags |= E1000_TX_FLAGS_CSUM;
/* Old method was to assume IPv4 packet by default if TSO was enabled.
* 82571 hardware supports TSO capabilities for IPv6 as well...
* no longer assume, we must.
*/
- if (skb->protocol == htons(ETH_P_IP))
+ if (protocol == htons(ETH_P_IP))
tx_flags |= E1000_TX_FLAGS_IPV4;
if (unlikely(skb->no_fcs))
goto out_drop;
/* obtain protocol of skb */
- protocol = skb->protocol;
+ protocol = vlan_get_protocol(skb);
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_bi[tx_ring->next_to_use];
goto out_drop;
/* obtain protocol of skb */
- protocol = skb->protocol;
+ protocol = vlan_get_protocol(skb);
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_bi[tx_ring->next_to_use];
#include <linux/mbus.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/if_vlan.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <linux/io.h>
{
if (skb->ip_summed == CHECKSUM_PARTIAL) {
int ip_hdr_len = 0;
+ __be16 l3_proto = vlan_get_protocol(skb);
u8 l4_proto;
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (l3_proto == htons(ETH_P_IP)) {
struct iphdr *ip4h = ip_hdr(skb);
/* Calculate IPv4 checksum and L4 checksum */
ip_hdr_len = ip4h->ihl;
l4_proto = ip4h->protocol;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (l3_proto == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = ipv6_hdr(skb);
/* Read l4_protocol from one of IPv6 extra headers */
return MVNETA_TX_L4_CSUM_NOT;
return mvneta_txq_desc_csum(skb_network_offset(skb),
- skb->protocol, ip_hdr_len, l4_proto);
+ l3_proto, ip_hdr_len, l4_proto);
}
return MVNETA_TX_L4_CSUM_NOT;
int qpn, u64 *reg_id)
{
int err;
- struct mlx4_spec_list spec_eth_outer = { {NULL} };
- struct mlx4_spec_list spec_vxlan = { {NULL} };
- struct mlx4_spec_list spec_eth_inner = { {NULL} };
-
- struct mlx4_net_trans_rule rule = {
- .queue_mode = MLX4_NET_TRANS_Q_FIFO,
- .exclusive = 0,
- .allow_loopback = 1,
- .promisc_mode = MLX4_FS_REGULAR,
- .priority = MLX4_DOMAIN_NIC,
- };
-
- __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
return 0; /* do nothing */
- rule.port = priv->port;
- rule.qpn = qpn;
- INIT_LIST_HEAD(&rule.list);
-
- spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
- memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
- memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
-
- spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
- spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
-
- list_add_tail(&spec_eth_outer.list, &rule.list);
- list_add_tail(&spec_vxlan.list, &rule.list);
- list_add_tail(&spec_eth_inner.list, &rule.list);
-
- err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id);
+ err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
+ MLX4_DOMAIN_NIC, reg_id);
if (err) {
en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_flow_detach);
+int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
+ int port, int qpn, u16 prio, u64 *reg_id)
+{
+ int err;
+ struct mlx4_spec_list spec_eth_outer = { {NULL} };
+ struct mlx4_spec_list spec_vxlan = { {NULL} };
+ struct mlx4_spec_list spec_eth_inner = { {NULL} };
+
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .allow_loopback = 1,
+ .promisc_mode = MLX4_FS_REGULAR,
+ };
+
+ __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ rule.port = port;
+ rule.qpn = qpn;
+ rule.priority = prio;
+ INIT_LIST_HEAD(&rule.list);
+
+ spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
+ memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+
+ spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
+ spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
+
+ list_add_tail(&spec_eth_outer.list, &rule.list);
+ list_add_tail(&spec_vxlan.list, &rule.list);
+ list_add_tail(&spec_eth_inner.list, &rule.list);
+
+ err = mlx4_flow_attach(dev, &rule, reg_id);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_tunnel_steer_add);
+
int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
u32 max_range_qpn)
{
int rx_head = priv->rx_head;
int rx = 0;
- while (1) {
+ while (rx < budget) {
desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
desc0 = readl(desc + RX_REG_OFFSET_DESC0);
net_dbg_ratelimited("packet error\n");
priv->stats.rx_dropped++;
priv->stats.rx_errors++;
- continue;
+ goto rx_next;
}
len = desc0 & RX_DESC0_FRAME_LEN_MASK;
if (len > RX_BUF_SIZE)
len = RX_BUF_SIZE;
- skb = build_skb(priv->rx_buf[rx_head], priv->rx_buf_size);
+ dma_sync_single_for_cpu(&ndev->dev,
+ priv->rx_mapping[rx_head],
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+ skb = netdev_alloc_skb_ip_align(ndev, len);
+
if (unlikely(!skb)) {
- net_dbg_ratelimited("build_skb failed\n");
+ net_dbg_ratelimited("netdev_alloc_skb_ip_align failed\n");
priv->stats.rx_dropped++;
priv->stats.rx_errors++;
+ goto rx_next;
}
+ memcpy(skb->data, priv->rx_buf[rx_head], len);
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, ndev);
napi_gro_receive(&priv->napi, skb);
if (desc0 & RX_DESC0_MULTICAST)
priv->stats.multicast++;
+rx_next:
writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
rx_head = RX_NEXT(rx_head);
priv->rx_head = rx_head;
-
- if (rx >= budget)
- break;
}
if (rx < budget) {
- napi_gro_flush(napi, false);
- __napi_complete(napi);
+ napi_complete(napi);
}
priv->reg_imr |= RPKT_FINISH_M;
len = ETH_ZLEN;
}
- txdes1 = readl(desc + TX_REG_OFFSET_DESC1);
- txdes1 |= TX_DESC1_LTS | TX_DESC1_FTS;
- txdes1 &= ~(TX_DESC1_FIFO_COMPLETE | TX_DESC1_INTR_COMPLETE);
- txdes1 |= (len & TX_DESC1_BUF_SIZE_MASK);
+ dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head],
+ priv->tx_buf_size, DMA_TO_DEVICE);
+
+ txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
+ if (tx_head == TX_DESC_NUM_MASK)
+ txdes1 |= TX_DESC1_END;
writel(txdes1, desc + TX_REG_OFFSET_DESC1);
writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
spin_lock_init(&priv->txlock);
priv->tx_buf_size = TX_BUF_SIZE;
- priv->rx_buf_size = RX_BUF_SIZE +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ priv->rx_buf_size = RX_BUF_SIZE;
priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
TX_DESC_NUM, &priv->tx_base,
__lpc_eth_clock_enable(pldat, true);
+ /* Suspended PHY makes LPC ethernet core block, so resume now */
+ phy_resume(pldat->phy_dev);
+
/* Reset and initialize */
__lpc_eth_reset(pldat);
__lpc_eth_init(pldat);
if (skb_is_gso(skb)) {
int err;
+ __be16 l3_proto = vlan_get_protocol(skb);
err = skb_cow_head(skb, 0);
if (err < 0)
<< OB_MAC_TRANSPORT_HDR_SHIFT);
mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
- if (likely(skb->protocol == htons(ETH_P_IP))) {
+ if (likely(l3_proto == htons(ETH_P_IP))) {
struct iphdr *iph = ip_hdr(skb);
iph->check = 0;
mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
iph->daddr, 0,
IPPROTO_TCP,
0);
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (l3_proto == htons(ETH_P_IPV6)) {
mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
config SH_ETH
tristate "Renesas SuperH Ethernet support"
depends on HAS_DMA
+ depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST
select CRC32
select MII
select MDIO_BITBANG
#include "stmmac.h"
-static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
+static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
{
struct stmmac_priv *priv = (struct stmmac_priv *)p;
unsigned int txsize = priv->dma_tx_size;
desc->des2 = dma_map_single(priv->device, skb->data,
bmax, DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ return -1;
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE);
while (len != 0) {
desc->des2 = dma_map_single(priv->device,
(skb->data + bmax * i),
bmax, DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ return -1;
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
STMMAC_CHAIN_MODE);
priv->hw->desc->set_tx_owner(desc);
desc->des2 = dma_map_single(priv->device,
(skb->data + bmax * i), len,
DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ return -1;
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
STMMAC_CHAIN_MODE);
priv->hw->desc->set_tx_owner(desc);
handle_tx = 0x8,
};
-#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 1)
-#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 2)
-#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 3)
-#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 4)
+#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0)
+#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1)
+#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2)
+#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3)
#define CORE_PCS_ANE_COMPLETE (1 << 5)
#define CORE_PCS_LINK_STATUS (1 << 6)
/* Default LPI timers */
#define STMMAC_DEFAULT_LIT_LS 0x3E8
-#define STMMAC_DEFAULT_TWT_LS 0x0
+#define STMMAC_DEFAULT_TWT_LS 0x1E
#define STMMAC_CHAIN_MODE 0x1
#define STMMAC_RING_MODE 0x2
void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
unsigned int extend_desc);
unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
- unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
+ int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum);
int (*set_16kib_bfsize)(int mtu);
void (*init_desc3)(struct dma_desc *p);
void (*refill_desc3) (void *priv, struct dma_desc *p);
int multicast_filter_bins;
int unicast_filter_entries;
int mcast_bits_log2;
+ unsigned int rx_csum;
};
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
- GMAC_CONTROL_BE)
+ GMAC_CONTROL_BE | GMAC_CONTROL_DCRS)
/* GMAC Frame Filter defines */
#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONTROL);
- value |= GMAC_CONTROL_IPC;
+ if (hw->rx_csum)
+ value |= GMAC_CONTROL_IPC;
+ else
+ value &= ~GMAC_CONTROL_IPC;
+
writel(value, ioaddr + GMAC_CONTROL);
value = readl(ioaddr + GMAC_CONTROL);
unsigned int mmc_rx_octetcount_g;
unsigned int mmc_rx_broadcastframe_g;
unsigned int mmc_rx_multicastframe_g;
- unsigned int mmc_rx_crc_errror;
+ unsigned int mmc_rx_crc_error;
unsigned int mmc_rx_align_error;
unsigned int mmc_rx_run_error;
unsigned int mmc_rx_jabber_error;
mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G);
mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G);
mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G);
- mmc->mmc_rx_crc_errror += readl(ioaddr + MMC_RX_CRC_ERRROR);
+ mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERRROR);
mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR);
mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR);
mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR);
#include "stmmac.h"
-static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
+static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
{
struct stmmac_priv *priv = (struct stmmac_priv *)p;
unsigned int txsize = priv->dma_tx_size;
desc->des2 = dma_map_single(priv->device, skb->data,
bmax, DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ return -1;
+
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
STMMAC_RING_MODE);
desc->des2 = dma_map_single(priv->device, skb->data + bmax,
len, DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ return -1;
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
STMMAC_RING_MODE);
} else {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ return -1;
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
STMMAC_RING_MODE);
#include <linux/ptp_clock_kernel.h>
#include <linux/reset.h>
+struct stmmac_tx_info {
+ dma_addr_t buf;
+ bool map_as_page;
+};
+
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
u32 tx_count_frames;
u32 tx_coal_frames;
u32 tx_coal_timer;
- dma_addr_t *tx_skbuff_dma;
+ struct stmmac_tx_info *tx_skbuff_dma;
dma_addr_t dma_tx_phy;
int tx_coalesce;
int hwts_tx_en;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_ops;
unsigned int default_addend;
+ struct clk *clk_ptp_ref;
+ unsigned int clk_ptp_rate;
u32 adv_ts;
int use_riwt;
int irq_wake;
STMMAC_MMC_STAT(mmc_rx_octetcount_g),
STMMAC_MMC_STAT(mmc_rx_broadcastframe_g),
STMMAC_MMC_STAT(mmc_rx_multicastframe_g),
- STMMAC_MMC_STAT(mmc_rx_crc_errror),
+ STMMAC_MMC_STAT(mmc_rx_crc_error),
STMMAC_MMC_STAT(mmc_rx_align_error),
STMMAC_MMC_STAT(mmc_rx_run_error),
STMMAC_MMC_STAT(mmc_rx_jabber_error),
*/
bool stmmac_eee_init(struct stmmac_priv *priv)
{
+ char *phy_bus_name = priv->plat->phy_bus_name;
bool ret = false;
/* Using PCS we cannot dial with the phy registers at this stage
(priv->pcs == STMMAC_PCS_RTBI))
goto out;
+ /* Never init EEE in case of a switch is attached */
+ if (phy_bus_name && (!strcmp(phy_bus_name, "fixed")))
+ goto out;
+
/* MAC core supports the EEE feature. */
if (priv->dma_cap.eee) {
int tx_lpi_timer = priv->tx_lpi_timer;
priv->hw->mac->set_eee_timer(priv->hw,
STMMAC_DEFAULT_LIT_LS,
tx_lpi_timer);
- } else
- /* Set HW EEE according to the speed */
- priv->hw->mac->set_eee_pls(priv->hw,
- priv->phydev->link);
+ }
+ /* Set HW EEE according to the speed */
+ priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
/* calculate default added value:
* formula is :
* addend = (2^32)/freq_div_ratio;
- * where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz
- * hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK;
- * NOTE: STMMAC_SYSCLOCK should be >= 50MHz to
+ * where, freq_div_ratio = clk_ptp_ref_i/50MHz
+ * hence, addend = ((2^32) * 50MHz)/clk_ptp_ref_i;
+ * NOTE: clk_ptp_ref_i should be >= 50MHz to
* achive 20ns accuracy.
*
* 2^x * y == (y << x), hence
* 2^32 * 50000000 ==> (50000000 << 32)
*/
temp = (u64) (50000000ULL << 32);
- priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK);
+ priv->default_addend = div_u64(temp, priv->clk_ptp_rate);
priv->hw->ptp->config_addend(priv->ioaddr,
priv->default_addend);
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
+ /* Fall-back to main clock in case of no PTP ref is passed */
+ priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref");
+ if (IS_ERR(priv->clk_ptp_ref)) {
+ priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk);
+ priv->clk_ptp_ref = NULL;
+ } else {
+ clk_prepare_enable(priv->clk_ptp_ref);
+ priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref);
+ }
+
priv->adv_ts = 0;
if (priv->dma_cap.atime_stamp && priv->extend_desc)
priv->adv_ts = 1;
static void stmmac_release_ptp(struct stmmac_priv *priv)
{
+ if (priv->clk_ptp_ref)
+ clk_disable_unprepare(priv->clk_ptp_ref);
stmmac_ptp_unregister(priv);
}
else
p = priv->dma_tx + i;
p->des2 = 0;
- priv->tx_skbuff_dma[i] = 0;
+ priv->tx_skbuff_dma[i].buf = 0;
+ priv->tx_skbuff_dma[i].map_as_page = false;
priv->tx_skbuff[i] = NULL;
}
else
p = priv->dma_tx + i;
- if (priv->tx_skbuff_dma[i]) {
- dma_unmap_single(priv->device,
- priv->tx_skbuff_dma[i],
- priv->hw->desc->get_tx_len(p),
- DMA_TO_DEVICE);
- priv->tx_skbuff_dma[i] = 0;
+ if (priv->tx_skbuff_dma[i].buf) {
+ if (priv->tx_skbuff_dma[i].map_as_page)
+ dma_unmap_page(priv->device,
+ priv->tx_skbuff_dma[i].buf,
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(priv->device,
+ priv->tx_skbuff_dma[i].buf,
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
}
if (priv->tx_skbuff[i] != NULL) {
dev_kfree_skb_any(priv->tx_skbuff[i]);
priv->tx_skbuff[i] = NULL;
+ priv->tx_skbuff_dma[i].buf = 0;
+ priv->tx_skbuff_dma[i].map_as_page = false;
}
}
}
if (!priv->rx_skbuff)
goto err_rx_skbuff;
- priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
+ priv->tx_skbuff_dma = kmalloc_array(txsize,
+ sizeof(*priv->tx_skbuff_dma),
GFP_KERNEL);
if (!priv->tx_skbuff_dma)
goto err_tx_skbuff_dma;
pr_debug("%s: curr %d, dirty %d\n", __func__,
priv->cur_tx, priv->dirty_tx);
- if (likely(priv->tx_skbuff_dma[entry])) {
- dma_unmap_single(priv->device,
- priv->tx_skbuff_dma[entry],
- priv->hw->desc->get_tx_len(p),
- DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = 0;
+ if (likely(priv->tx_skbuff_dma[entry].buf)) {
+ if (priv->tx_skbuff_dma[entry].map_as_page)
+ dma_unmap_page(priv->device,
+ priv->tx_skbuff_dma[entry].buf,
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(priv->device,
+ priv->tx_skbuff_dma[entry].buf,
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[entry].buf = 0;
+ priv->tx_skbuff_dma[entry].map_as_page = false;
}
priv->hw->mode->clean_desc3(priv, p);
/* Initialize the MAC Core */
priv->hw->mac->core_init(priv->hw, dev->mtu);
+ ret = priv->hw->mac->rx_ipc(priv->hw);
+ if (!ret) {
+ pr_warn(" RX IPC Checksum Offload disabled\n");
+ priv->plat->rx_coe = STMMAC_RX_COE_NONE;
+ priv->hw->rx_csum = 0;
+ }
+
/* Enable the MAC Rx/Tx */
stmmac_set_mac(priv->ioaddr, true);
if (likely(!is_jumbo)) {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ goto dma_map_err;
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
csum_insertion, priv->mode);
} else {
desc = first;
entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
+ if (unlikely(entry < 0))
+ goto dma_map_err;
}
for (i = 0; i < nfrags; i++) {
desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ goto dma_map_err; /* should reuse desc w/o issues */
+
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
+ priv->tx_skbuff_dma[entry].map_as_page = true;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
priv->mode);
wmb();
priv->hw->dma->enable_dma_transmission(priv->ioaddr);
spin_unlock(&priv->tx_lock);
+ return NETDEV_TX_OK;
+dma_map_err:
+ dev_err(priv->device, "Tx dma map failed\n");
+ dev_kfree_skb(skb);
+ priv->dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
priv->rx_skbuff_dma[entry] =
dma_map_single(priv->device, skb->data, bfsize,
DMA_FROM_DEVICE);
-
+ if (dma_mapping_error(priv->device,
+ priv->rx_skbuff_dma[entry])) {
+ dev_err(priv->device, "Rx dma map failed\n");
+ dev_kfree_skb(skb);
+ break;
+ }
p->des2 = priv->rx_skbuff_dma[entry];
priv->hw->mode->refill_desc3(priv, p);
unsigned int entry = priv->cur_rx % rxsize;
unsigned int next_entry;
unsigned int count = 0;
- int coe = priv->plat->rx_coe;
+ int coe = priv->hw->rx_csum;
if (netif_msg_rx_status(priv)) {
pr_debug("%s: descriptor ring:\n", __func__);
if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
features &= ~NETIF_F_RXCSUM;
- else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1)
- features &= ~NETIF_F_IPV6_CSUM;
+
if (!priv->plat->tx_coe)
features &= ~NETIF_F_ALL_CSUM;
return features;
}
+static int stmmac_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+
+ /* Keep the COE Type in case of csum is supporting */
+ if (features & NETIF_F_RXCSUM)
+ priv->hw->rx_csum = priv->plat->rx_coe;
+ else
+ priv->hw->rx_csum = 0;
+ /* No check needed because rx_coe has been set before and it will be
+ * fixed in case of issue.
+ */
+ priv->hw->mac->rx_ipc(priv->hw);
+
+ return 0;
+}
+
/**
* stmmac_interrupt - main ISR
* @irq: interrupt number.
.ndo_stop = stmmac_release,
.ndo_change_mtu = stmmac_change_mtu,
.ndo_fix_features = stmmac_fix_features,
+ .ndo_set_features = stmmac_set_features,
.ndo_set_rx_mode = stmmac_set_rx_mode,
.ndo_tx_timeout = stmmac_tx_timeout,
.ndo_do_ioctl = stmmac_ioctl,
*/
static int stmmac_hw_init(struct stmmac_priv *priv)
{
- int ret;
struct mac_device_info *mac;
/* Identify the MAC HW device */
/* To use alternate (extended) or normal descriptor structures */
stmmac_selec_desc_mode(priv);
- ret = priv->hw->mac->rx_ipc(priv->hw);
- if (!ret) {
- pr_warn(" RX IPC Checksum Offload not configured.\n");
- priv->plat->rx_coe = STMMAC_RX_COE_NONE;
- }
-
- if (priv->plat->rx_coe)
+ if (priv->plat->rx_coe) {
+ priv->hw->rx_csum = priv->plat->rx_coe;
pr_info(" RX Checksum Offload Engine supported (type %d)\n",
priv->plat->rx_coe);
+ }
if (priv->plat->tx_coe)
pr_info(" TX Checksum insertion supported\n");
{
if (priv->ptp_clock) {
ptp_clock_unregister(priv->ptp_clock);
+ priv->ptp_clock = NULL;
pr_debug("Removed PTP HW clock successfully on %s\n",
priv->dev->name);
}
#ifndef __STMMAC_PTP_H__
#define __STMMAC_PTP_H__
-#define STMMAC_SYSCLOCK 62500000
-
/* IEEE 1588 PTP register offsets */
#define PTP_TCR 0x0700 /* Timestamp Control Reg */
#define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */
#define PCI_MEM64BIT (2<<1) /* Base addr anywhere in 64 Bit range */
#define PCI_MEMSPACE 0x00000001L /* Bit 0: Memory Space Indic. */
-/* PCI_BASE_2ND 32 bit 2nd Base address */
-#define PCI_IOBASE 0xffffff00L /* Bit 31..8: I/O Base address */
-#define PCI_IOSIZE 0x000000fcL /* Bit 7..2: I/O Size Requirements */
-#define PCI_IOSPACE 0x00000001L /* Bit 0: I/O Space Indicator */
-
/* PCI_SUB_VID 16 bit Subsystem Vendor ID */
/* PCI_SUB_ID 16 bit Subsystem ID */
/* First check if the EEE ability is supported */
eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
MDIO_MMD_PCS, phydev->addr);
- if (eee_cap < 0)
- return eee_cap;
+ if (eee_cap <= 0)
+ goto eee_exit_err;
cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
if (!cap)
- return -EPROTONOSUPPORT;
+ goto eee_exit_err;
/* Check which link settings negotiated and verify it in
* the EEE advertising registers.
*/
eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
MDIO_MMD_AN, phydev->addr);
- if (eee_lp < 0)
- return eee_lp;
+ if (eee_lp <= 0)
+ goto eee_exit_err;
eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
MDIO_MMD_AN, phydev->addr);
- if (eee_adv < 0)
- return eee_adv;
+ if (eee_adv <= 0)
+ goto eee_exit_err;
adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
idx = phy_find_setting(phydev->speed, phydev->duplex);
if (!(lp & adv & settings[idx].setting))
- return -EPROTONOSUPPORT;
+ goto eee_exit_err;
if (clk_stop_enable) {
/* Configure the PHY to stop receiving xMII
return 0; /* EEE supported */
}
-
+eee_exit_err:
return -EPROTONOSUPPORT;
}
EXPORT_SYMBOL(phy_init_eee);
if (!netdev_mc_empty(netdev)) {
new_table = vmxnet3_copy_mc(netdev);
if (new_table) {
- new_mode |= VMXNET3_RXM_MCAST;
rxConf->mfTableLen = cpu_to_le16(
netdev_mc_count(netdev) * ETH_ALEN);
new_table_pa = dma_map_single(
new_table,
rxConf->mfTableLen,
PCI_DMA_TODEVICE);
+ }
+
+ if (new_table_pa) {
+ new_mode |= VMXNET3_RXM_MCAST;
rxConf->mfTablePA = cpu_to_le64(new_table_pa);
} else {
- netdev_info(netdev, "failed to copy mcast list"
- ", setting ALL_MULTI\n");
+ netdev_info(netdev,
+ "failed to copy mcast list, setting ALL_MULTI\n");
new_mode |= VMXNET3_RXM_ALL_MULTI;
}
}
-
if (!(new_mode & VMXNET3_RXM_MCAST)) {
rxConf->mfTableLen = 0;
rxConf->mfTablePA = 0;
VMXNET3_CMD_UPDATE_MAC_FILTERS);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
- if (new_table) {
+ if (new_table_pa)
dma_unmap_single(&adapter->pdev->dev, new_table_pa,
rxConf->mfTableLen, PCI_DMA_TODEVICE);
- kfree(new_table);
- }
+ kfree(new_table);
}
void
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.2.0.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.2.1.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01020000
+#define VMXNET3_DRIVER_VERSION_NUM 0x01020100
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
} else if (vxlan->flags & VXLAN_F_L3MISS) {
union vxlan_addr ipa = {
.sin.sin_addr.s_addr = tip,
- .sa.sa_family = AF_INET,
+ .sin.sin_family = AF_INET,
};
vxlan_ip_miss(dev, &ipa);
} else if (vxlan->flags & VXLAN_F_L3MISS) {
union vxlan_addr ipa = {
.sin6.sin6_addr = msg->target,
- .sa.sa_family = AF_INET6,
+ .sin6.sin6_family = AF_INET6,
};
vxlan_ip_miss(dev, &ipa);
if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
union vxlan_addr ipa = {
.sin.sin_addr.s_addr = pip->daddr,
- .sa.sa_family = AF_INET,
+ .sin.sin_family = AF_INET,
};
vxlan_ip_miss(dev, &ipa);
if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
union vxlan_addr ipa = {
.sin6.sin6_addr = pip6->daddr,
- .sa.sa_family = AF_INET6,
+ .sin6.sin6_family = AF_INET6,
};
vxlan_ip_miss(dev, &ipa);
kfree_skb(priv->rx_skb);
- usb_put_dev(priv->udev);
-
at76_dbg(DBG_PROC_ENTRY, "%s: before freeing priv/ieee80211_hw",
__func__);
ieee80211_free_hw(priv->hw);
wiphy_info(priv->hw->wiphy, "disconnecting\n");
at76_delete_device(priv);
+ usb_put_dev(priv->udev);
dev_info(&interface->dev, "disconnected\n");
}
if (strncmp("trigger", buf, 7) == 0) {
ath9k_spectral_scan_trigger(sc->hw);
- } else if (strncmp("background", buf, 9) == 0) {
+ } else if (strncmp("background", buf, 10) == 0) {
ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND);
ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n");
} else if (strncmp("chanscan", buf, 8) == 0) {
config IWLDVM
tristate "Intel Wireless WiFi DVM Firmware support"
- depends on m
default IWLWIFI
help
This is the driver that supports the DVM firmware which is
config IWLMVM
tristate "Intel Wireless WiFi MVM Firmware support"
- depends on m
help
This is the driver that supports the MVM firmware which is
currently only available for 7260 and 3160 devices.
/* recalculate basic rates */
iwl_calc_basic_rates(priv, ctx);
+ /*
+ * force CTS-to-self frames protection if RTS-CTS is not preferred
+ * one aggregation protection method
+ */
+ if (!priv->hw_params.use_rts_for_aggregation)
+ ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+
if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
else
ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+ if (bss_conf->use_cts_prot)
+ ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
+
memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
if (vif->type == NL80211_IFTYPE_AP ||
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX 9
-#define IWL3160_UCODE_API_MAX 9
+#define IWL7260_UCODE_API_MAX 10
+#define IWL3160_UCODE_API_MAX 10
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 9
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX 9
+#define IWL8000_UCODE_API_MAX 10
/* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 8
bool is_legacy = false;
- if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_B))
+ if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_G))
is_legacy = true;
return is_legacy;
{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0df6, 0x0070, rtl92cu_hal_cfg)}, /*Sitecom - 150N */
{RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/
{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
{RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
init_waitqueue_head(&queue->dealloc_wq);
atomic_set(&queue->inflight_packets, 0);
+ netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
+ XENVIF_NAPI_WEIGHT);
+
if (tx_evtchn == rx_evtchn) {
/* feature-split-event-channels == 0 */
err = bind_interdomain_evtchn_to_irqhandler(
wake_up_process(queue->task);
wake_up_process(queue->dealloc_task);
- netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
- XENVIF_NAPI_WEIGHT);
-
return 0;
err_rx_unbind:
controller, such as the one emulated by kvmtool.
config PCIE_SPEAR13XX
- tristate "STMicroelectronics SPEAr PCIe controller"
+ bool "STMicroelectronics SPEAr PCIe controller"
depends on ARCH_SPEAR13XX
select PCIEPORTBUS
select PCIE_DW
.irq_mask = byt_irq_mask,
.irq_unmask = byt_irq_unmask,
.irq_set_type = byt_irq_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
};
static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
struct dentry *debug;
unsigned long cfg;
bool has_hw_rfkill_switch;
- bool has_touchpad_control;
};
static bool no_bt_rfkill;
int type;
};
-const const struct ideapad_rfk_data ideapad_rfk_data[] = {
+static const struct ideapad_rfk_data ideapad_rfk_data[] = {
{ "ideapad_wlan", CFG_WIFI_BIT, VPCCMD_W_WIFI, RFKILL_TYPE_WLAN },
{ "ideapad_bluetooth", CFG_BT_BIT, VPCCMD_W_BT, RFKILL_TYPE_BLUETOOTH },
{ "ideapad_3g", CFG_3G_BIT, VPCCMD_W_3G, RFKILL_TYPE_WWAN },
{
unsigned long value;
- if (!priv->has_touchpad_control)
- return;
-
/* Without reading from EC touchpad LED doesn't switch state */
if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) {
/* Some IdeaPads don't really turn off touchpad - they only
* always results in 0 on these models, causing ideapad_laptop to wrongly
* report all radios as hardware-blocked.
*/
-static struct dmi_system_id no_hw_rfkill_list[] = {
- {
- .ident = "Lenovo Yoga 2 11 / 13 / Pro",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 2"),
- },
- },
- {}
-};
-
-/*
- * Some models don't offer touchpad ctrl through the ideapad interface, causing
- * ideapad_sync_touchpad_state to send wrong touchpad enable/disable events.
- */
-static struct dmi_system_id no_touchpad_ctrl_list[] = {
- {
- .ident = "Lenovo Yoga 1 series",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga"),
- },
- },
+static const struct dmi_system_id no_hw_rfkill_list[] = {
{
.ident = "Lenovo Yoga 2 11 / 13 / Pro",
.matches = {
priv->adev = adev;
priv->platform_device = pdev;
priv->has_hw_rfkill_switch = !dmi_check_system(no_hw_rfkill_list);
- priv->has_touchpad_control = !dmi_check_system(no_touchpad_ctrl_list);
ret = ideapad_sysfs_init(priv);
if (ret)
const char *buf, size_t count)
{
struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
- int mode = -1;
- int time = -1;
+ int mode;
+ int time;
+ int ret;
- if (sscanf(buf, "%i", &mode) != 1 && (mode != 2 || mode != 1))
+
+ ret = kstrtoint(buf, 0, &mode);
+ if (ret)
+ return ret;
+ if (mode != SCI_KBD_MODE_FNZ && mode != SCI_KBD_MODE_AUTO)
return -EINVAL;
/* Set the Keyboard Backlight Mode where:
* Auto - KBD backlight turns off automatically in given time
* FN-Z - KBD backlight "toggles" when hotkey pressed
*/
- if (mode != -1 && toshiba->kbd_mode != mode) {
+ if (toshiba->kbd_mode != mode) {
time = toshiba->kbd_time << HCI_MISC_SHIFT;
time = time + toshiba->kbd_mode;
- if (toshiba_kbd_illum_status_set(toshiba, time) < 0)
- return -EIO;
+ ret = toshiba_kbd_illum_status_set(toshiba, time);
+ if (ret)
+ return ret;
toshiba->kbd_mode = mode;
}
{
struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device));
u32 result;
+ acpi_status status;
+
+ if (dev->hotkey_dev) {
+ status = acpi_evaluate_object(dev->acpi_dev->handle, "ENAB",
+ NULL, NULL);
+ if (ACPI_FAILURE(status))
+ pr_info("Unable to re-enable hotkeys\n");
- if (dev->hotkey_dev)
hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_ENABLE, &result);
+ }
return 0;
}
{ X86_VENDOR_INTEL, 6, 0x3a},/* Ivy Bridge */
{ X86_VENDOR_INTEL, 6, 0x3c},/* Haswell */
{ X86_VENDOR_INTEL, 6, 0x3d},/* Broadwell */
+ { X86_VENDOR_INTEL, 6, 0x3f},/* Haswell */
{ X86_VENDOR_INTEL, 6, 0x45},/* Haswell ULT */
/* TODO: Add more CPU IDs after testing */
{}
for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
/* use physical package id to read counters */
- if (!rapl_check_domain(cpu, i))
+ if (!rapl_check_domain(cpu, i)) {
rp->domain_map |= 1 << i;
- else
- pr_warn("RAPL domain %s detection failed\n",
- rapl_domain_names[i]);
+ pr_info("Found RAPL domain %s\n", rapl_domain_names[i]);
+ }
}
rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX);
if (!rp->nr_domains) {
* strings when running as a module.
*/
static char *dasd[256];
-module_param_array(dasd, charp, NULL, 0);
+module_param_array(dasd, charp, NULL, S_IRUGO);
/*
* Single spinlock to protect devmap and servermap structures and lists.
extern const struct attribute_group *qeth_osn_attr_groups[];
extern struct workqueue_struct *qeth_wq;
+int qeth_card_hw_is_reachable(struct qeth_card *);
const char *qeth_get_cardname_short(struct qeth_card *);
int qeth_realloc_buffer_pool(struct qeth_card *, int);
int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
struct workqueue_struct *qeth_wq;
EXPORT_SYMBOL_GPL(qeth_wq);
+int qeth_card_hw_is_reachable(struct qeth_card *card)
+{
+ return (card->state == CARD_STATE_SOFTSETUP) ||
+ (card->state == CARD_STATE_UP);
+}
+EXPORT_SYMBOL_GPL(qeth_card_hw_is_reachable);
+
static void qeth_close_dev_handler(struct work_struct *work)
{
struct qeth_card *card;
struct qeth_card *card = netdev->ml_priv;
enum qeth_link_types link_type;
struct carrier_info carrier_info;
+ int rc;
u32 speed;
if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
/* Check if we can obtain more accurate information. */
/* If QUERY_CARD_INFO command is not supported or fails, */
/* just return the heuristics that was filled above. */
- if (qeth_query_card_info(card, &carrier_info) != 0)
+ if (!qeth_card_hw_is_reachable(card))
+ return -ENODEV;
+ rc = qeth_query_card_info(card, &carrier_info);
+ if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */
return 0;
+ if (rc) /* report error from the hardware operation */
+ return rc;
+ /* on success, fill in the information got from the hardware */
netdev_dbg(netdev,
"card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
#include <linux/slab.h>
#include <asm/ebcdic.h>
+#include "qeth_core.h"
#include "qeth_l2.h"
#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
-static int qeth_card_hw_is_reachable(struct qeth_card *card)
-{
- return (card->state == CARD_STATE_SOFTSETUP) ||
- (card->state == CARD_STATE_UP);
-}
-
static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
struct device_attribute *attr, char *buf,
int show_state)
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432b) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432c) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4350) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4351) },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, b43_pci_bridge_tbl);
if (g0 != panels[i].g0)
continue;
if (r0 == panels[i].r0 && b0 == panels[i].b0)
- fb->panel->caps = panels[i].caps & CLCD_CAP_RGB;
- if (r0 == panels[i].b0 && b0 == panels[i].r0)
- fb->panel->caps = panels[i].caps & CLCD_CAP_BGR;
+ fb->panel->caps = panels[i].caps;
}
return fb->panel->caps ? 0 : -EINVAL;
rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT);
if (rc) {
- pr_info("%s: add_memory() failed: %i\n", __func__, rc);
- return BP_EAGAIN;
+ pr_warn("Cannot add additional memory (%i)\n", rc);
+ return BP_ECANCELED;
}
balloon_hotplug -= credit;
int i, rc, readonly;
LIST_HEAD(queue_gref);
LIST_HEAD(queue_file);
- struct gntalloc_gref *gref;
+ struct gntalloc_gref *gref, *next;
readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE);
rc = -ENOMEM;
goto undo;
/* Grant foreign access to the page. */
- gref->gref_id = gnttab_grant_foreign_access(op->domid,
+ rc = gnttab_grant_foreign_access(op->domid,
pfn_to_mfn(page_to_pfn(gref->page)), readonly);
- if ((int)gref->gref_id < 0) {
- rc = gref->gref_id;
+ if (rc < 0)
goto undo;
- }
- gref_ids[i] = gref->gref_id;
+ gref_ids[i] = gref->gref_id = rc;
}
/* Add to gref lists. */
mutex_lock(&gref_mutex);
gref_size -= (op->count - i);
- list_for_each_entry(gref, &queue_file, next_file) {
- /* __del_gref does not remove from queue_file */
+ list_for_each_entry_safe(gref, next, &queue_file, next_file) {
+ list_del(&gref->next_file);
__del_gref(gref);
}
gref->notify.flags = 0;
- if (gref->gref_id > 0) {
+ if (gref->gref_id) {
if (gnttab_query_foreign_access(gref->gref_id))
return;
shutting_down = SHUTDOWN_SUSPEND;
-#ifdef CONFIG_PREEMPT
- /* If the kernel is preemptible, we need to freeze all the processes
- to prevent them from being in the middle of a pagetable update
- during suspend. */
err = freeze_processes();
if (err) {
pr_err("%s: freeze failed %d\n", __func__, err);
goto out;
}
-#endif
err = dpm_suspend_start(PMSG_FREEZE);
if (err) {
dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
out_thaw:
-#ifdef CONFIG_PREEMPT
thaw_processes();
out:
-#endif
shutting_down = SHUTDOWN_INVALID;
}
#endif /* CONFIG_HIBERNATE_CALLBACKS */
for (i = 0; i < table->nr; ++i) {
struct kioctx *ctx = table->table[i];
+ struct completion requests_done =
+ COMPLETION_INITIALIZER_ONSTACK(requests_done);
if (!ctx)
continue;
* that it needs to unmap the area, just set it to 0.
*/
ctx->mmap_size = 0;
- kill_ioctx(mm, ctx, NULL);
+ kill_ioctx(mm, ctx, &requests_done);
+
+ /* Wait until all IO for the context are done. */
+ wait_for_completion(&requests_done);
}
RCU_INIT_POINTER(mm->ioctx_table, NULL);
tail = ring->tail;
kunmap_atomic(ring);
+ /*
+ * Ensure that once we've read the current tail pointer, that
+ * we also see the events that were stored up to the tail.
+ */
+ smp_rmb();
+
pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
if (head == tail)
support for OS/2 and Windows ME and similar servers is provided as
well.
+ The module also provides optional support for the followon
+ protocols for CIFS including SMB3, which enables
+ useful performance and security features (see the description
+ of CONFIG_CIFS_SMB2).
+
The cifs module provides an advanced network file system
client for mounting to CIFS compliant servers. It includes
support for DFS (hierarchical name space), secure per-user
depends on CIFS_XATTR && KEYS
help
Allows fetching CIFS/NTFS ACL from the server. The DACL blob
- is handed over to the application/caller.
+ is handed over to the application/caller. See the man
+ page for getcifsacl for more information.
config CIFS_DEBUG
bool "Enable CIFS debugging routines"
Allows NFS server to export a CIFS mounted share (nfsd over cifs)
config CIFS_SMB2
- bool "SMB2 network file system support"
+ bool "SMB2 and SMB3 network file system support"
depends on CIFS && INET
select NLS
select KEYS
select DNS_RESOLVER
help
- This enables experimental support for the SMB2 (Server Message Block
- version 2) protocol. The SMB2 protocol is the successor to the
- popular CIFS and SMB network file sharing protocols. SMB2 is the
- native file sharing mechanism for recent versions of Windows
- operating systems (since Vista). SMB2 enablement will eventually
- allow users better performance, security and features, than would be
- possible with cifs. Note that smb2 mount options also are simpler
- (compared to cifs) due to protocol improvements.
-
- Unless you are a developer or tester, say N.
+ This enables support for the Server Message Block version 2
+ family of protocols, including SMB3. SMB3 support is
+ enabled on mount by specifying "vers=3.0" in the mount
+ options. These protocols are the successors to the popular
+ CIFS and SMB network file sharing protocols. SMB3 is the
+ native file sharing mechanism for the more recent
+ versions of Windows (Windows 8 and Windows 2012 and
+ later) and Samba server and many others support SMB3 well.
+ In general SMB3 enables better performance, security
+ and features, than would be possible with CIFS (Note that
+ when mounting to Samba, due to the CIFS POSIX extensions,
+ CIFS mounts can provide slightly better POSIX compatibility
+ than SMB3 mounts do though). Note that SMB2/SMB3 mount
+ options are also slightly simpler (compared to CIFS) due
+ to protocol improvements.
config CIFS_FSCACHE
bool "Provide CIFS client caching support"
#define SERVER_NAME_LENGTH 40
#define SERVER_NAME_LEN_WITH_NULL (SERVER_NAME_LENGTH + 1)
-/* used to define string lengths for reversing unicode strings */
-/* (256+1)*2 = 514 */
-/* (max path length + 1 for null) * 2 for unicode */
-#define MAX_NAME 514
-
/* SMB echo "timeout" -- FIXME: tunable? */
#define SMB_ECHO_INTERVAL (60 * HZ)
struct TCP_Server_Info *server = p;
unsigned int pdu_length;
char *buf = NULL;
- struct task_struct *task_to_wake = NULL;
struct mid_q_entry *mid_entry;
current->flags |= PF_MEMALLOC;
if (server->smallbuf) /* no sense logging a debug message if NULL */
cifs_small_buf_release(server->smallbuf);
- task_to_wake = xchg(&server->tsk, NULL);
clean_demultiplex_info(server);
-
- /* if server->tsk was NULL then wait for a signal before exiting */
- if (!task_to_wake) {
- set_current_state(TASK_INTERRUPTIBLE);
- while (!signal_pending(current)) {
- schedule();
- set_current_state(TASK_INTERRUPTIBLE);
- }
- set_current_state(TASK_RUNNING);
- }
-
module_put_and_exit(0);
}
tmp_end++;
if (!(tmp_end < end && tmp_end[1] == delim)) {
/* No it is not. Set the password to NULL */
+ kfree(vol->password);
vol->password = NULL;
break;
}
options = end;
}
+ kfree(vol->password);
/* Now build new password string */
temp_len = strlen(value);
vol->password = kzalloc(temp_len+1, GFP_KERNEL);
static void
cifs_put_tcp_session(struct TCP_Server_Info *server)
{
- struct task_struct *task;
-
spin_lock(&cifs_tcp_ses_lock);
if (--server->srv_count > 0) {
spin_unlock(&cifs_tcp_ses_lock);
kfree(server->session_key.response);
server->session_key.response = NULL;
server->session_key.len = 0;
-
- task = xchg(&server->tsk, NULL);
- if (task)
- force_sig(SIGKILL, task);
}
static struct TCP_Server_Info *
goto out;
}
+ if (file->f_flags & O_DIRECT &&
+ CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
+ if (CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+ file->f_op = &cifs_file_direct_nobrl_ops;
+ else
+ file->f_op = &cifs_file_direct_ops;
+ }
+
file_info = cifs_new_fileinfo(&fid, file, tlink, oplock);
if (file_info == NULL) {
if (server->ops->close)
cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
inode, file->f_flags, full_path);
+ if (file->f_flags & O_DIRECT &&
+ cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+ file->f_op = &cifs_file_direct_nobrl_ops;
+ else
+ file->f_op = &cifs_file_direct_ops;
+ }
+
if (server->oplocks)
oplock = REQ_OPLOCK;
else
unlink_target:
/* Try unlinking the target dentry if it's not negative */
if (target_dentry->d_inode && (rc == -EACCES || rc == -EEXIST)) {
- tmprc = cifs_unlink(target_dir, target_dentry);
+ if (d_is_dir(target_dentry))
+ tmprc = cifs_rmdir(target_dir, target_dentry);
+ else
+ tmprc = cifs_unlink(target_dir, target_dentry);
if (tmprc)
goto cifs_rename_exit;
rc = cifs_do_rename(xid, source_dentry, from_name,
if (server->ops->dir_needs_close(cfile)) {
cfile->invalidHandle = true;
spin_unlock(&cifs_file_list_lock);
- if (server->ops->close)
- server->ops->close(xid, tcon, &cfile->fid);
+ if (server->ops->close_dir)
+ server->ops->close_dir(xid, tcon, &cfile->fid);
} else
spin_unlock(&cifs_file_list_lock);
if (cfile->srch_inf.ntwrk_buf_start) {
kfree(ses->serverOS);
ses->serverOS = kzalloc(len + 1, GFP_KERNEL);
- if (ses->serverOS)
+ if (ses->serverOS) {
strncpy(ses->serverOS, bcc_ptr, len);
- if (strncmp(ses->serverOS, "OS/2", 4) == 0)
- cifs_dbg(FYI, "OS/2 server\n");
+ if (strncmp(ses->serverOS, "OS/2", 4) == 0)
+ cifs_dbg(FYI, "OS/2 server\n");
+ }
bcc_ptr += len + 1;
bleft -= len + 1;
goto out;
}
- smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+ smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
GFP_KERNEL);
if (smb2_data == NULL) {
rc = -ENOMEM;
*adjust_tz = false;
*symlink = false;
- smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+ smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
GFP_KERNEL);
if (smb2_data == NULL)
return -ENOMEM;
int rc;
struct smb2_file_all_info *smb2_data;
- smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+ smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
GFP_KERNEL);
if (smb2_data == NULL)
return -ENOMEM;
if (keep_size == false)
return -EOPNOTSUPP;
- /*
+ /*
* Must check if file sparse since fallocate -z (zero range) assumes
* non-sparse allocation
*/
struct smb2_sess_setup_rsp *rsp = NULL;
struct kvec iov[2];
int rc = 0;
- int resp_buftype;
+ int resp_buftype = CIFS_NO_BUFFER;
__le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
struct TCP_Server_Info *server = ses->server;
u16 blob_length = 0;
rsp = (struct smb2_close_rsp *)iov[0].iov_base;
if (rc != 0) {
- if (tcon)
- cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
+ cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
goto close_exit;
}
{
return query_info(xid, tcon, persistent_fid, volatile_fid,
FILE_ALL_INFORMATION,
- sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+ sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
sizeof(struct smb2_file_all_info), data);
}
goto error_tgt_fput;
/* Check if EPOLLWAKEUP is allowed */
- ep_take_care_of_epollwakeup(&epds);
+ if (ep_op_has_event(op))
+ ep_take_care_of_epollwakeup(&epds);
/*
* We have to check that the file structure underneath the file descriptor
&new.de, &new.inlined);
if (IS_ERR(new.bh)) {
retval = PTR_ERR(new.bh);
+ new.bh = NULL;
goto end_rename;
}
if (new.bh) {
&new.de, &new.inlined);
if (IS_ERR(new.bh)) {
retval = PTR_ERR(new.bh);
+ new.bh = NULL;
goto end_rename;
}
bh = bclean(handle, sb, block);
if (IS_ERR(bh)) {
err = PTR_ERR(bh);
+ bh = NULL;
goto out;
}
overhead = ext4_group_overhead_blocks(sb, group);
bh = bclean(handle, sb, block);
if (IS_ERR(bh)) {
err = PTR_ERR(bh);
+ bh = NULL;
goto out;
}
mounted as f2fs. Each file shows the whole f2fs information.
/sys/kernel/debug/f2fs/status includes:
- - major file system information managed by f2fs currently
+ - major filesystem information managed by f2fs currently
- average SIT information about whole segments
- current memory footprint consumed by f2fs.
bool "F2FS consistency checking feature"
depends on F2FS_FS
help
- Enables BUG_ONs which check the file system consistency in runtime.
+ Enables BUG_ONs which check the filesystem consistency in runtime.
If you want to improve the performance, say N.
goto redirty_out;
if (wbc->for_reclaim)
goto redirty_out;
-
- /* Should not write any meta pages, if any IO error was occurred */
- if (unlikely(is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)))
- goto no_write;
+ if (unlikely(f2fs_cp_error(sbi)))
+ goto redirty_out;
f2fs_wait_on_page_writeback(page, META);
write_meta_page(sbi, page);
-no_write:
dec_page_count(sbi, F2FS_DIRTY_META);
unlock_page(page);
return 0;
return e ? true : false;
}
-static void release_dirty_inode(struct f2fs_sb_info *sbi)
+void release_dirty_inode(struct f2fs_sb_info *sbi)
{
struct ino_entry *e, *tmp;
int i;
struct f2fs_orphan_block *orphan_blk = NULL;
unsigned int nentries = 0;
unsigned short index;
- unsigned short orphan_blocks = (unsigned short)((sbi->n_orphans +
- (F2FS_ORPHANS_PER_BLOCK - 1)) / F2FS_ORPHANS_PER_BLOCK);
+ unsigned short orphan_blocks =
+ (unsigned short)GET_ORPHAN_BLOCKS(sbi->n_orphans);
struct page *page = NULL;
struct ino_entry *orphan = NULL;
/*
* Freeze all the FS-operations for checkpoint.
*/
-static void block_operations(struct f2fs_sb_info *sbi)
+static int block_operations(struct f2fs_sb_info *sbi)
{
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.for_reclaim = 0,
};
struct blk_plug plug;
+ int err = 0;
blk_start_plug(&plug);
if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
f2fs_unlock_all(sbi);
sync_dirty_dir_inodes(sbi);
+ if (unlikely(f2fs_cp_error(sbi))) {
+ err = -EIO;
+ goto out;
+ }
goto retry_flush_dents;
}
/*
- * POR: we should ensure that there is no dirty node pages
+ * POR: we should ensure that there are no dirty node pages
* until finishing nat/sit flush.
*/
retry_flush_nodes:
if (get_pages(sbi, F2FS_DIRTY_NODES)) {
up_write(&sbi->node_write);
sync_node_pages(sbi, 0, &wbc);
+ if (unlikely(f2fs_cp_error(sbi))) {
+ f2fs_unlock_all(sbi);
+ err = -EIO;
+ goto out;
+ }
goto retry_flush_nodes;
}
+out:
blk_finish_plug(&plug);
+ return err;
}
static void unblock_operations(struct f2fs_sb_info *sbi)
discard_next_dnode(sbi, NEXT_FREE_BLKADDR(sbi, curseg));
/* Flush all the NAT/SIT pages */
- while (get_pages(sbi, F2FS_DIRTY_META))
+ while (get_pages(sbi, F2FS_DIRTY_META)) {
sync_meta_pages(sbi, META, LONG_MAX);
+ if (unlikely(f2fs_cp_error(sbi)))
+ return;
+ }
next_free_nid(sbi, &last_nid);
ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi));
ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
- for (i = 0; i < 3; i++) {
+ for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
ckpt->cur_node_segno[i] =
cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
ckpt->cur_node_blkoff[i] =
ckpt->alloc_type[i + CURSEG_HOT_NODE] =
curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
}
- for (i = 0; i < 3; i++) {
+ for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
ckpt->cur_data_segno[i] =
cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
ckpt->cur_data_blkoff[i] =
/* 2 cp + n data seg summary + orphan inode blocks */
data_sum_blocks = npages_for_summary_flush(sbi);
- if (data_sum_blocks < 3)
+ if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
else
clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
- orphan_blocks = (sbi->n_orphans + F2FS_ORPHANS_PER_BLOCK - 1)
- / F2FS_ORPHANS_PER_BLOCK;
+ orphan_blocks = GET_ORPHAN_BLOCKS(sbi->n_orphans);
ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
orphan_blocks);
if (is_umount) {
set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
- ckpt->cp_pack_total_block_count = cpu_to_le32(2 +
+ ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS+
cp_payload_blks + data_sum_blocks +
orphan_blocks + NR_CURSEG_NODE_TYPE);
} else {
clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
- ckpt->cp_pack_total_block_count = cpu_to_le32(2 +
+ ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS +
cp_payload_blks + data_sum_blocks +
orphan_blocks);
}
/* wait for previous submitted node/meta pages writeback */
wait_on_all_pages_writeback(sbi);
+ if (unlikely(f2fs_cp_error(sbi)))
+ return;
+
filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LONG_MAX);
filemap_fdatawait_range(META_MAPPING(sbi), 0, LONG_MAX);
/* Here, we only have one bio having CP pack */
sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
- if (!is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) {
- clear_prefree_segments(sbi);
- release_dirty_inode(sbi);
- F2FS_RESET_SB_DIRT(sbi);
- }
+ release_dirty_inode(sbi);
+
+ if (unlikely(f2fs_cp_error(sbi)))
+ return;
+
+ clear_prefree_segments(sbi);
+ F2FS_RESET_SB_DIRT(sbi);
}
/*
- * We guarantee that this checkpoint procedure should not fail.
+ * We guarantee that this checkpoint procedure will not fail.
*/
void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
{
trace_f2fs_write_checkpoint(sbi->sb, is_umount, "start block_ops");
mutex_lock(&sbi->cp_mutex);
- block_operations(sbi);
+
+ if (!sbi->s_dirty)
+ goto out;
+ if (unlikely(f2fs_cp_error(sbi)))
+ goto out;
+ if (block_operations(sbi))
+ goto out;
trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish block_ops");
do_checkpoint(sbi, is_umount);
unblock_operations(sbi);
- mutex_unlock(&sbi->cp_mutex);
-
stat_inc_cp_count(sbi->stat_info);
+out:
+ mutex_unlock(&sbi->cp_mutex);
trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish checkpoint");
}
* for cp pack we can have max 1020*504 orphan entries
*/
sbi->n_orphans = 0;
- sbi->max_orphans = (sbi->blocks_per_seg - 2 - NR_CURSEG_TYPE)
- * F2FS_ORPHANS_PER_BLOCK;
+ sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
+ NR_CURSEG_TYPE) * F2FS_ORPHANS_PER_BLOCK;
}
int __init create_checkpoint_caches(void)
struct page *page = bvec->bv_page;
if (unlikely(err)) {
- SetPageError(page);
+ set_page_dirty(page);
set_bit(AS_EIO, &page->mapping->flags);
f2fs_stop_checkpoint(sbi);
}
allocated = true;
blkaddr = dn.data_blkaddr;
}
- /* Give more consecutive addresses for the read ahead */
+ /* Give more consecutive addresses for the readahead */
if (blkaddr == (bh_result->b_blocknr + ofs)) {
ofs++;
dn.ofs_in_node++;
trace_f2fs_readpage(page, DATA);
- /* If the file has inline data, try to read it directlly */
+ /* If the file has inline data, try to read it directly */
if (f2fs_has_inline_data(inode))
ret = f2fs_read_inline_data(inode, page);
else
/* Dentry blocks are controlled by checkpoint */
if (S_ISDIR(inode->i_mode)) {
+ if (unlikely(f2fs_cp_error(sbi)))
+ goto redirty_out;
err = do_write_data_page(page, &fio);
goto done;
}
+ /* we should bypass data pages to proceed the kworkder jobs */
+ if (unlikely(f2fs_cp_error(sbi))) {
+ SetPageError(page);
+ unlock_page(page);
+ return 0;
+ }
+
if (!wbc->for_reclaim)
need_balance_fs = true;
else if (has_not_enough_free_secs(sbi, 0))
if (to > inode->i_size) {
truncate_pagecache(inode, inode->i_size);
- truncate_blocks(inode, inode->i_size);
+ truncate_blocks(inode, inode->i_size, true);
}
}
f2fs_balance_fs(sbi);
repeat:
- err = f2fs_convert_inline_data(inode, pos + len);
+ err = f2fs_convert_inline_data(inode, pos + len, NULL);
if (err)
goto fail;
struct f2fs_stat_info *si = F2FS_STAT(sbi);
int i;
- /* valid check of the segment numbers */
+ /* validation check of the segment numbers */
si->hit_ext = sbi->read_hit_ext;
si->total_ext = sbi->total_hit_ext;
si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
si->base_mem += NR_DIRTY_TYPE * f2fs_bitmap_size(TOTAL_SEGS(sbi));
si->base_mem += f2fs_bitmap_size(TOTAL_SECS(sbi));
- /* buld nm */
+ /* build nm */
si->base_mem += sizeof(struct f2fs_nm_info);
si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
/*
* For the most part, it should be a bug when name_len is zero.
- * We stop here for figuring out where the bugs are occurred.
+ * We stop here for figuring out where the bugs has occurred.
*/
f2fs_bug_on(!de->name_len);
error:
/* once the failed inode becomes a bad inode, i_mode is S_IFREG */
truncate_inode_pages(&inode->i_data, 0);
- truncate_blocks(inode, 0);
+ truncate_blocks(inode, 0, false);
remove_dirty_dir_inode(inode);
remove_inode_page(inode);
return ERR_PTR(err);
}
/*
- * It only removes the dentry from the dentry page,corresponding name
+ * It only removes the dentry from the dentry page, corresponding name
* entry in name page does not need to be touched during deletion.
*/
void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
#define f2fs_bug_on(condition) BUG_ON(condition)
#define f2fs_down_write(x, y) down_write_nest_lock(x, y)
#else
-#define f2fs_bug_on(condition)
+#define f2fs_bug_on(condition) WARN_ON(condition)
#define f2fs_down_write(x, y) down_write(x)
#endif
};
/*
- * The below are the page types of bios used in submti_bio().
+ * The below are the page types of bios used in submit_bio().
* The available types are:
* DATA User data pages. It operates as async mode.
* NODE Node pages. It operates as async mode.
struct list_head dir_inode_list; /* dir inode list */
spinlock_t dir_inode_lock; /* for dir inode list lock */
- /* basic file system units */
+ /* basic filesystem units */
unsigned int log_sectors_per_block; /* log2 sectors per block */
unsigned int log_blocksize; /* log2 block size */
unsigned int blocksize; /* block size */
/*
* odd numbered checkpoint should at cp segment 0
- * and even segent must be at cp segment 1
+ * and even segment must be at cp segment 1
*/
if (!(ckpt_version & 1))
start_addr += sbi->blocks_per_seg;
return sb->s_flags & MS_RDONLY;
}
+static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
+{
+ return is_set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
+}
+
static inline void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi)
{
set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
*/
int f2fs_sync_file(struct file *, loff_t, loff_t, int);
void truncate_data_blocks(struct dnode_of_data *);
-int truncate_blocks(struct inode *, u64);
+int truncate_blocks(struct inode *, u64, bool);
void f2fs_truncate(struct inode *);
int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
int f2fs_setattr(struct dentry *, struct iattr *);
bool alloc_nid(struct f2fs_sb_info *, nid_t *);
void alloc_nid_done(struct f2fs_sb_info *, nid_t);
void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
-void recover_node_page(struct f2fs_sb_info *, struct page *,
- struct f2fs_summary *, struct node_info *, block_t);
void recover_inline_xattr(struct inode *, struct page *);
-bool recover_xattr_data(struct inode *, struct page *, block_t);
+void recover_xattr_data(struct inode *, struct page *, block_t);
int recover_inode_page(struct f2fs_sb_info *, struct page *);
int restore_node_summary(struct f2fs_sb_info *, unsigned int,
struct f2fs_summary_block *);
void rewrite_data_page(struct page *, block_t, struct f2fs_io_info *);
void recover_data_page(struct f2fs_sb_info *, struct page *,
struct f2fs_summary *, block_t, block_t);
-void rewrite_node_page(struct f2fs_sb_info *, struct page *,
- struct f2fs_summary *, block_t, block_t);
void allocate_data_block(struct f2fs_sb_info *, struct page *,
block_t, block_t *, struct f2fs_summary *, int);
void f2fs_wait_on_page_writeback(struct page *, enum page_type);
long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
void add_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
void remove_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
+void release_dirty_inode(struct f2fs_sb_info *);
bool exist_written_data(struct f2fs_sb_info *, nid_t, int);
int acquire_orphan_inode(struct f2fs_sb_info *);
void release_orphan_inode(struct f2fs_sb_info *);
*/
bool f2fs_may_inline(struct inode *);
int f2fs_read_inline_data(struct inode *, struct page *);
-int f2fs_convert_inline_data(struct inode *, pgoff_t);
+int f2fs_convert_inline_data(struct inode *, pgoff_t, struct page *);
int f2fs_write_inline_data(struct inode *, struct page *, unsigned int);
void truncate_inline_data(struct inode *, u64);
-int recover_inline_data(struct inode *, struct page *);
+bool recover_inline_data(struct inode *, struct page *);
#endif
sb_start_pagefault(inode->i_sb);
+ /* force to convert with normal data indices */
+ err = f2fs_convert_inline_data(inode, MAX_INLINE_DATA + 1, page);
+ if (err)
+ goto out;
+
/* block allocation */
f2fs_lock_op(sbi);
set_new_dnode(&dn, inode, NULL, NULL, 0);
return 1;
}
+static inline bool need_do_checkpoint(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ bool need_cp = false;
+
+ if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
+ need_cp = true;
+ else if (file_wrong_pino(inode))
+ need_cp = true;
+ else if (!space_for_roll_forward(sbi))
+ need_cp = true;
+ else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
+ need_cp = true;
+ else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi)))
+ need_cp = true;
+
+ return need_cp;
+}
+
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
struct inode *inode = file->f_mapping->host;
/* guarantee free sections for fsync */
f2fs_balance_fs(sbi);
- down_read(&fi->i_sem);
-
/*
* Both of fdatasync() and fsync() are able to be recovered from
* sudden-power-off.
*/
- if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
- need_cp = true;
- else if (file_wrong_pino(inode))
- need_cp = true;
- else if (!space_for_roll_forward(sbi))
- need_cp = true;
- else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
- need_cp = true;
- else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi)))
- need_cp = true;
-
+ down_read(&fi->i_sem);
+ need_cp = need_do_checkpoint(inode);
up_read(&fi->i_sem);
if (need_cp) {
if (err && err != -ENOENT) {
goto fail;
} else if (err == -ENOENT) {
- /* direct node is not exist */
+ /* direct node does not exists */
if (whence == SEEK_DATA) {
pgofs = PGOFS_OF_NEXT_DNODE(pgofs,
F2FS_I(inode));
f2fs_put_page(page, 1);
}
-int truncate_blocks(struct inode *inode, u64 from)
+int truncate_blocks(struct inode *inode, u64 from, bool lock)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
unsigned int blocksize = inode->i_sb->s_blocksize;
free_from = (pgoff_t)
((from + blocksize - 1) >> (sbi->log_blocksize));
- f2fs_lock_op(sbi);
+ if (lock)
+ f2fs_lock_op(sbi);
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
if (err) {
if (err == -ENOENT)
goto free_next;
- f2fs_unlock_op(sbi);
+ if (lock)
+ f2fs_unlock_op(sbi);
trace_f2fs_truncate_blocks_exit(inode, err);
return err;
}
f2fs_put_dnode(&dn);
free_next:
err = truncate_inode_blocks(inode, free_from);
- f2fs_unlock_op(sbi);
+ if (lock)
+ f2fs_unlock_op(sbi);
done:
/* lastly zero out the first data page */
truncate_partial_data_page(inode, from);
trace_f2fs_truncate(inode);
- if (!truncate_blocks(inode, i_size_read(inode))) {
+ if (!truncate_blocks(inode, i_size_read(inode), true)) {
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
mark_inode_dirty(inode);
}
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size != i_size_read(inode)) {
- err = f2fs_convert_inline_data(inode, attr->ia_size);
+ err = f2fs_convert_inline_data(inode, attr->ia_size, NULL);
if (err)
return err;
loff_t off_start, off_end;
int ret = 0;
- ret = f2fs_convert_inline_data(inode, MAX_INLINE_DATA + 1);
+ ret = f2fs_convert_inline_data(inode, MAX_INLINE_DATA + 1, NULL);
if (ret)
return ret;
if (ret)
return ret;
- ret = f2fs_convert_inline_data(inode, offset + len);
+ ret = f2fs_convert_inline_data(inode, offset + len, NULL);
if (ret)
return ret;
* 3. IO subsystem is idle by checking the # of requests in
* bdev's request list.
*
- * Note) We have to avoid triggering GCs too much frequently.
+ * Note) We have to avoid triggering GCs frequently.
* Because it is possible that some segments can be
* invalidated soon after by user update or deletion.
* So, I'd like to wait some time to collect dirty segments.
u = (vblocks * 100) >> sbi->log_blocks_per_seg;
- /* Handle if the system time is changed by user */
+ /* Handle if the system time has changed by the user */
if (mtime < sit_i->min_mtime)
sit_i->min_mtime = mtime;
if (mtime > sit_i->max_mtime)
if (phase == 2) {
inode = f2fs_iget(sb, dni.ino);
- if (IS_ERR(inode))
+ if (IS_ERR(inode) || is_bad_inode(inode))
continue;
start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
gc_more:
if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
goto stop;
- if (unlikely(is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)))
+ if (unlikely(f2fs_cp_error(sbi)))
goto stop;
if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
block_t invalid_user_blocks = sbi->user_block_count -
written_block_count(sbi);
/*
- * Background GC is triggered with the following condition.
+ * Background GC is triggered with the following conditions.
* 1. There are a number of invalid blocks.
* 2. There is not enough free space.
*/
buf[1] += b1;
}
-static void str2hashbuf(const char *msg, size_t len, unsigned int *buf, int num)
+static void str2hashbuf(const unsigned char *msg, size_t len,
+ unsigned int *buf, int num)
{
unsigned pad, val;
int i;
{
__u32 hash;
f2fs_hash_t f2fs_hash;
- const char *p;
+ const unsigned char *p;
__u32 in[8], buf[4];
- const char *name = name_info->name;
+ const unsigned char *name = name_info->name;
size_t len = name_info->len;
if ((len <= 2) && (name[0] == '.') &&
static int __f2fs_convert_inline_data(struct inode *inode, struct page *page)
{
- int err;
+ int err = 0;
struct page *ipage;
struct dnode_of_data dn;
void *src_addr, *dst_addr;
goto out;
}
+ /* someone else converted inline_data already */
+ if (!f2fs_has_inline_data(inode))
+ goto out;
+
/*
* i_addr[0] is not used for inline data,
* so reserving new block will not destroy inline data
return err;
}
-int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size)
+int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size,
+ struct page *page)
{
- struct page *page;
+ struct page *new_page = page;
int err;
if (!f2fs_has_inline_data(inode))
else if (to_size <= MAX_INLINE_DATA)
return 0;
- page = grab_cache_page(inode->i_mapping, 0);
- if (!page)
- return -ENOMEM;
+ if (!page || page->index != 0) {
+ new_page = grab_cache_page(inode->i_mapping, 0);
+ if (!new_page)
+ return -ENOMEM;
+ }
- err = __f2fs_convert_inline_data(inode, page);
- f2fs_put_page(page, 1);
+ err = __f2fs_convert_inline_data(inode, new_page);
+ if (!page || page->index != 0)
+ f2fs_put_page(new_page, 1);
return err;
}
int f2fs_write_inline_data(struct inode *inode,
- struct page *page, unsigned size)
+ struct page *page, unsigned size)
{
void *src_addr, *dst_addr;
struct page *ipage;
f2fs_put_page(ipage, 1);
}
-int recover_inline_data(struct inode *inode, struct page *npage)
+bool recover_inline_data(struct inode *inode, struct page *npage)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
struct f2fs_inode *ri = NULL;
ri = F2FS_INODE(npage);
if (f2fs_has_inline_data(inode) &&
- ri && ri->i_inline & F2FS_INLINE_DATA) {
+ ri && (ri->i_inline & F2FS_INLINE_DATA)) {
process_inline:
ipage = get_node_page(sbi, inode->i_ino);
f2fs_bug_on(IS_ERR(ipage));
memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
update_inode(inode, ipage);
f2fs_put_page(ipage, 1);
- return -1;
+ return true;
}
if (f2fs_has_inline_data(inode)) {
clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
update_inode(inode, ipage);
f2fs_put_page(ipage, 1);
- } else if (ri && ri->i_inline & F2FS_INLINE_DATA) {
- truncate_blocks(inode, 0);
+ } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
+ truncate_blocks(inode, 0, false);
set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
goto process_inline;
}
- return 0;
+ return false;
}
return 0;
out:
clear_nlink(inode);
- unlock_new_inode(inode);
- make_bad_inode(inode);
- iput(inode);
+ iget_failed(inode);
alloc_nid_failed(sbi, ino);
return err;
}
f2fs_delete_entry(de, page, inode);
f2fs_unlock_op(sbi);
- /* In order to evict this inode, we set it dirty */
+ /* In order to evict this inode, we set it dirty */
mark_inode_dirty(inode);
fail:
trace_f2fs_unlink_exit(inode, err);
return err;
out:
clear_nlink(inode);
- unlock_new_inode(inode);
- make_bad_inode(inode);
- iput(inode);
+ iget_failed(inode);
alloc_nid_failed(sbi, inode->i_ino);
return err;
}
out_fail:
clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
clear_nlink(inode);
- unlock_new_inode(inode);
- make_bad_inode(inode);
- iput(inode);
+ iget_failed(inode);
alloc_nid_failed(sbi, inode->i_ino);
return err;
}
return 0;
out:
clear_nlink(inode);
- unlock_new_inode(inode);
- make_bad_inode(inode);
- iput(inode);
+ iget_failed(inode);
alloc_nid_failed(sbi, inode->i_ino);
return err;
}
out:
f2fs_unlock_op(sbi);
clear_nlink(inode);
- unlock_new_inode(inode);
- make_bad_inode(inode);
- iput(inode);
+ iget_failed(inode);
alloc_nid_failed(sbi, inode->i_ino);
return err;
}
.mkdir = f2fs_mkdir,
.rmdir = f2fs_rmdir,
.mknod = f2fs_mknod,
- .rename = f2fs_rename,
.rename2 = f2fs_rename2,
.tmpfile = f2fs_tmpfile,
.getattr = f2fs_getattr,
nat_get_blkaddr(e) != NULL_ADDR &&
new_blkaddr == NEW_ADDR);
- /* increament version no as node is removed */
+ /* increment version no as node is removed */
if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
unsigned char version = nat_get_version(e);
nat_set_version(e, inc_node_version(version));
}
/*
- * This function returns always success
+ * This function always returns success
*/
void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
{
/* get indirect nodes in the path */
for (i = 0; i < idx + 1; i++) {
- /* refernece count'll be increased */
+ /* reference count'll be increased */
pages[i] = get_node_page(sbi, nid[i]);
if (IS_ERR(pages[i])) {
err = PTR_ERR(pages[i]);
*/
void remove_inode_page(struct inode *inode)
{
- struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
- struct page *page;
- nid_t ino = inode->i_ino;
struct dnode_of_data dn;
- page = get_node_page(sbi, ino);
- if (IS_ERR(page))
+ set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
+ if (get_dnode_of_data(&dn, 0, LOOKUP_NODE))
return;
- if (truncate_xattr_node(inode, page)) {
- f2fs_put_page(page, 1);
+ if (truncate_xattr_node(inode, dn.inode_page)) {
+ f2fs_put_dnode(&dn);
return;
}
- /* 0 is possible, after f2fs_new_inode() is failed */
+
+ /* remove potential inline_data blocks */
+ if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode))
+ truncate_data_blocks_range(&dn, 1);
+
+ /* 0 is possible, after f2fs_new_inode() has failed */
f2fs_bug_on(inode->i_blocks != 0 && inode->i_blocks != 1);
- set_new_dnode(&dn, inode, page, page, ino);
+
+ /* will put inode & node pages */
truncate_node(&dn);
}
set_fsync_mark(page, 0);
set_dentry_mark(page, 0);
}
- NODE_MAPPING(sbi)->a_ops->writepage(page, wbc);
- wrote++;
+
+ if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc))
+ unlock_page(page);
+ else
+ wrote++;
if (--wbc->nr_to_write == 0)
break;
if (unlikely(sbi->por_doing))
goto redirty_out;
+ if (unlikely(f2fs_cp_error(sbi)))
+ goto redirty_out;
f2fs_wait_on_page_writeback(page, NODE);
kmem_cache_free(free_nid_slab, i);
}
-void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
- struct f2fs_summary *sum, struct node_info *ni,
- block_t new_blkaddr)
-{
- rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
- set_node_addr(sbi, ni, new_blkaddr, false);
- clear_node_page_dirty(page);
-}
-
void recover_inline_xattr(struct inode *inode, struct page *page)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
struct page *ipage;
struct f2fs_inode *ri;
- if (!f2fs_has_inline_xattr(inode))
- return;
-
- if (!IS_INODE(page))
- return;
-
- ri = F2FS_INODE(page);
- if (!(ri->i_inline & F2FS_INLINE_XATTR))
- return;
-
ipage = get_node_page(sbi, inode->i_ino);
f2fs_bug_on(IS_ERR(ipage));
+ ri = F2FS_INODE(page);
+ if (!(ri->i_inline & F2FS_INLINE_XATTR)) {
+ clear_inode_flag(F2FS_I(inode), FI_INLINE_XATTR);
+ goto update_inode;
+ }
+
dst_addr = inline_xattr_addr(ipage);
src_addr = inline_xattr_addr(page);
inline_size = inline_xattr_size(inode);
f2fs_wait_on_page_writeback(ipage, NODE);
memcpy(dst_addr, src_addr, inline_size);
-
+update_inode:
update_inode(inode, ipage);
f2fs_put_page(ipage, 1);
}
-bool recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
+void recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
nid_t new_xnid = nid_of_node(page);
struct node_info ni;
- if (!f2fs_has_xattr_block(ofs_of_node(page)))
- return false;
-
/* 1: invalidate the previous xattr nid */
if (!prev_xnid)
goto recover_xnid;
set_node_addr(sbi, &ni, blkaddr, false);
update_inode_page(inode);
- return true;
}
int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
if (!ipage)
return -ENOMEM;
- /* Should not use this inode from free nid list */
+ /* Should not use this inode from free nid list */
remove_free_nid(NM_I(sbi), ino);
SetPageUptodate(ipage);
dst->i_blocks = cpu_to_le64(1);
dst->i_links = cpu_to_le32(1);
dst->i_xattr_nid = 0;
+ dst->i_inline = src->i_inline & F2FS_INLINE_XATTR;
new_ni = old_ni;
new_ni.ino = ino;
WARN_ON(1);
set_node_addr(sbi, &new_ni, NEW_ADDR, false);
inc_valid_inode_count(sbi);
+ set_page_dirty(ipage);
f2fs_put_page(ipage, 1);
return 0;
}
/*
* ra_sum_pages() merge contiguous pages into one bio and submit.
- * these pre-readed pages are alloced in bd_inode's mapping tree.
+ * these pre-read pages are allocated in bd_inode's mapping tree.
*/
static int ra_sum_pages(struct f2fs_sb_info *sbi, struct page **pages,
int start, int nrpages)
for (i = 0; !err && i < last_offset; i += nrpages, addr += nrpages) {
nrpages = min(last_offset - i, bio_blocks);
- /* read ahead node pages */
+ /* readahead node pages */
nrpages = ra_sum_pages(sbi, pages, addr, nrpages);
if (!nrpages)
return -ENOMEM;
nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
/* not used nids: 0, node, meta, (and root counted as valid node) */
- nm_i->available_nids = nm_i->max_nid - 3;
+ nm_i->available_nids = nm_i->max_nid - F2FS_RESERVED_NODE_NUM;
nm_i->fcnt = 0;
nm_i->nat_cnt = 0;
nm_i->ram_thresh = DEF_RAM_THRESHOLD;
}
retry:
de = f2fs_find_entry(dir, &name, &page);
- if (de && inode->i_ino == le32_to_cpu(de->ino))
+ if (de && inode->i_ino == le32_to_cpu(de->ino)) {
+ clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
goto out_unmap_put;
+ }
if (de) {
einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
if (IS_ERR(einode)) {
struct node_info ni;
int err = 0, recovered = 0;
- recover_inline_xattr(inode, page);
-
- if (recover_inline_data(inode, page))
+ /* step 1: recover xattr */
+ if (IS_INODE(page)) {
+ recover_inline_xattr(inode, page);
+ } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
+ recover_xattr_data(inode, page, blkaddr);
goto out;
+ }
- if (recover_xattr_data(inode, page, blkaddr))
+ /* step 2: recover inline data */
+ if (recover_inline_data(inode, page))
goto out;
+ /* step 3: recover data indices */
start = start_bidx_of_node(ofs_of_node(page), fi);
end = start + ADDRS_PER_PAGE(page, fi);
fill_node_footer(dn.node_page, dn.nid, ni.ino,
ofs_of_node(page), false);
set_page_dirty(dn.node_page);
-
- recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
err:
f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi);
/* step #1: find fsynced inode numbers */
sbi->por_doing = true;
+ /* prevent checkpoint */
+ mutex_lock(&sbi->cp_mutex);
+
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
err = find_fsync_dnodes(sbi, &inode_list);
/* step #2: recover data */
err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
- f2fs_bug_on(!list_empty(&inode_list));
+ if (!err)
+ f2fs_bug_on(!list_empty(&inode_list));
out:
destroy_fsync_dnodes(&inode_list);
kmem_cache_destroy(fsync_entry_slab);
/* Flush all the NAT/SIT pages */
while (get_pages(sbi, F2FS_DIRTY_META))
sync_meta_pages(sbi, META, LONG_MAX);
+ set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
+ mutex_unlock(&sbi->cp_mutex);
} else if (need_writecp) {
+ mutex_unlock(&sbi->cp_mutex);
write_checkpoint(sbi, false);
+ } else {
+ mutex_unlock(&sbi->cp_mutex);
}
return err;
}
}
/*
- * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c becasue
+ * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
* f2fs_set_bit makes MSB and LSB reversed in a byte.
* Example:
* LSB <--> MSB
}
/*
- * This function always allocates a used segment (from dirty seglist) by SSR
+ * This function always allocates a used segment(from dirty seglist) by SSR
* manner, so it should recover the existing segment information of valid blocks
*/
static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
mutex_unlock(&curseg->curseg_mutex);
}
-void rewrite_node_page(struct f2fs_sb_info *sbi,
- struct page *page, struct f2fs_summary *sum,
- block_t old_blkaddr, block_t new_blkaddr)
-{
- struct sit_info *sit_i = SIT_I(sbi);
- int type = CURSEG_WARM_NODE;
- struct curseg_info *curseg;
- unsigned int segno, old_cursegno;
- block_t next_blkaddr = next_blkaddr_of_node(page);
- unsigned int next_segno = GET_SEGNO(sbi, next_blkaddr);
- struct f2fs_io_info fio = {
- .type = NODE,
- .rw = WRITE_SYNC,
- };
-
- curseg = CURSEG_I(sbi, type);
-
- mutex_lock(&curseg->curseg_mutex);
- mutex_lock(&sit_i->sentry_lock);
-
- segno = GET_SEGNO(sbi, new_blkaddr);
- old_cursegno = curseg->segno;
-
- /* change the current segment */
- if (segno != curseg->segno) {
- curseg->next_segno = segno;
- change_curseg(sbi, type, true);
- }
- curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
- __add_sum_entry(sbi, type, sum);
-
- /* change the current log to the next block addr in advance */
- if (next_segno != segno) {
- curseg->next_segno = next_segno;
- change_curseg(sbi, type, true);
- }
- curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, next_blkaddr);
-
- /* rewrite node page */
- set_page_writeback(page);
- f2fs_submit_page_mbio(sbi, page, new_blkaddr, &fio);
- f2fs_submit_merged_bio(sbi, NODE, WRITE);
- refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
- locate_dirty_segment(sbi, old_cursegno);
-
- mutex_unlock(&sit_i->sentry_lock);
- mutex_unlock(&curseg->curseg_mutex);
-}
-
static inline bool is_merged_page(struct f2fs_sb_info *sbi,
struct page *page, enum page_type type)
{
}
/*
- * Summary block is always treated as invalid block
+ * Summary block is always treated as an invalid block
*/
static inline void check_block_count(struct f2fs_sb_info *sbi,
int segno, struct f2fs_sit_entry *raw_sit)
stop_gc_thread(sbi);
/* We don't need to do checkpoint when it's clean */
- if (sbi->s_dirty && get_pages(sbi, F2FS_DIRTY_NODES))
+ if (sbi->s_dirty)
write_checkpoint(sbi, true);
+ /*
+ * normally superblock is clean, so we need to release this.
+ * In addition, EIO will skip do checkpoint, we need this as well.
+ */
+ release_dirty_inode(sbi);
+
iput(sbi->node_inode);
iput(sbi->meta_inode);
trace_f2fs_sync_fs(sb, sync);
- if (!sbi->s_dirty && !get_pages(sbi, F2FS_DIRTY_NODES))
- return 0;
-
if (sync) {
mutex_lock(&sbi->gc_mutex);
write_checkpoint(sbi, false);
buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count;
buf->f_bavail = user_block_count - valid_user_blocks(sbi);
- buf->f_files = sbi->total_node_count;
- buf->f_ffree = sbi->total_node_count - valid_inode_count(sbi);
+ buf->f_files = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
+ buf->f_ffree = buf->f_files - valid_inode_count(sbi);
buf->f_namelen = F2FS_NAME_LEN;
buf->f_fsid.val[0] = (u32)id;
if (need_restart_gc) {
if (start_gc_thread(sbi))
f2fs_msg(sbi->sb, KERN_WARNING,
- "background gc thread is stop");
+ "background gc thread has stopped");
} else if (need_stop_gc) {
stop_gc_thread(sbi);
}
if (unlikely(fsmeta >= total))
return 1;
- if (unlikely(is_set_ckpt_flags(ckpt, CP_ERROR_FLAG))) {
+ if (unlikely(f2fs_cp_error(sbi))) {
f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
return 1;
}
struct buffer_head *raw_super_buf;
struct inode *root;
long err = -EINVAL;
+ bool retry = true;
int i;
+try_onemore:
/* allocate memory for f2fs-specific super block info */
sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
if (!sbi)
/* recover fsynced data */
if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
err = recover_fsync_data(sbi);
- if (err)
+ if (err) {
f2fs_msg(sb, KERN_ERR,
"Cannot recover all fsync data errno=%ld", err);
+ goto free_kobj;
+ }
}
/*
brelse(raw_super_buf);
free_sbi:
kfree(sbi);
+
+ /* give only one another chance */
+ if (retry) {
+ retry = 0;
+ shrink_dcache_sb(sb);
+ goto try_onemore;
+ }
return err;
}
int free;
/*
* If value is NULL, it is remove operation.
- * In case of update operation, we caculate free.
+ * In case of update operation, we calculate free.
*/
free = MIN_OFFSET(inode) - ((char *)last - (char *)base_addr);
if (found)
error = make_socks(serv, net);
if (error < 0)
- goto err_socks;
+ goto err_bind;
set_grace_period(net);
dprintk("lockd_up_net: per-net data created; net=%p\n", net);
return 0;
-err_socks:
- svc_rpcb_cleanup(serv, net);
err_bind:
ln->nlmsvc_users--;
return error;
head.first->pprev = &head.first;
INIT_HLIST_HEAD(&unmounted);
+ /* undo decrements we'd done in umount_tree() */
+ hlist_for_each_entry(mnt, &head, mnt_hash)
+ if (mnt->mnt_ex_mountpoint.mnt)
+ mntget(mnt->mnt_ex_mountpoint.mnt);
+
up_write(&namespace_sem);
synchronize_rcu();
hlist_add_head(&p->mnt_hash, &tmp_list);
}
+ hlist_for_each_entry(p, &tmp_list, mnt_hash)
+ list_del_init(&p->mnt_child);
+
if (how)
propagate_umount(&tmp_list);
p->mnt_ns = NULL;
if (how < 2)
p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
- list_del_init(&p->mnt_child);
if (mnt_has_parent(p)) {
put_mountpoint(p->mnt_mp);
+ mnt_add_count(p->mnt_parent, -1);
/* move the reference to mountpoint into ->mnt_ex_mountpoint */
p->mnt_ex_mountpoint.dentry = p->mnt_mountpoint;
p->mnt_ex_mountpoint.mnt = &p->mnt_parent->mnt;
struct xdr_stream *xdr = cd->xdr;
int start_offset = xdr->buf->len;
int cookie_offset;
+ u32 name_and_cookie;
int entry_bytes;
__be32 nfserr = nfserr_toosmall;
__be64 wire_offset;
cd->rd_maxcount -= entry_bytes;
if (!cd->rd_dircount)
goto fail;
- cd->rd_dircount--;
+ /*
+ * RFC 3530 14.2.24 describes rd_dircount as only a "hint", so
+ * let's always let through the first entry, at least:
+ */
+ name_and_cookie = 4 * XDR_QUADLEN(namlen) + 8;
+ if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
+ goto fail;
+ cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
cd->cookie_offset = cookie_offset;
skip_entry:
cd->common.err = nfs_ok;
}
maxcount = min_t(int, maxcount-16, bytes_left);
+ /* RFC 3530 14.2.24 allows us to ignore dircount when it's 0: */
+ if (!readdir->rd_dircount)
+ readdir->rd_dircount = INT_MAX;
+
readdir->xdr = xdr;
readdir->rd_maxcount = maxcount;
readdir->common.err = 0;
{
struct {
struct file_handle handle;
- u8 pad[64];
+ u8 pad[MAX_HANDLE_SZ];
} f;
int size, ret, i;
size = f.handle.handle_bytes >> 2;
ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0);
- if ((ret == 255) || (ret == -ENOSPC)) {
+ if ((ret == FILEID_INVALID) || (ret < 0)) {
WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret);
return 0;
}
* other children
*/
if (child && list_empty(&child->mnt_mounts)) {
+ list_del_init(&child->mnt_child);
hlist_del_init_rcu(&child->mnt_hash);
hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash);
}
return ret;
return __sync_filesystem(sb, 1);
}
-EXPORT_SYMBOL_GPL(sync_filesystem);
+EXPORT_SYMBOL(sync_filesystem);
static void sync_inodes_one_sb(struct super_block *sb, void *arg)
{
udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1);
}
-struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
+struct inode *udf_new_inode(struct inode *dir, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct udf_sb_info *sbi = UDF_SB(sb);
struct udf_inode_info *iinfo;
struct udf_inode_info *dinfo = UDF_I(dir);
struct logicalVolIntegrityDescImpUse *lvidiu;
+ int err;
inode = new_inode(sb);
- if (!inode) {
- *err = -ENOMEM;
- return NULL;
- }
- *err = -ENOSPC;
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
iinfo = UDF_I(inode);
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) {
}
if (!iinfo->i_ext.i_data) {
iput(inode);
- *err = -ENOMEM;
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
+ err = -ENOSPC;
block = udf_new_block(dir->i_sb, NULL,
dinfo->i_location.partitionReferenceNum,
- start, err);
- if (*err) {
+ start, &err);
+ if (err) {
iput(inode);
- return NULL;
+ return ERR_PTR(err);
}
lvidiu = udf_sb_lvidiu(sb);
if (lvidiu) {
iinfo->i_unique = lvid_get_unique_id(sb);
+ inode->i_generation = iinfo->i_unique;
mutex_lock(&sbi->s_alloc_mutex);
if (S_ISDIR(mode))
le32_add_cpu(&lvidiu->numDirs, 1);
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
inode->i_mtime = inode->i_atime = inode->i_ctime =
iinfo->i_crtime = current_fs_time(inode->i_sb);
- insert_inode_hash(inode);
+ if (unlikely(insert_inode_locked(inode) < 0)) {
+ make_bad_inode(inode);
+ iput(inode);
+ return ERR_PTR(-EIO);
+ }
mark_inode_dirty(inode);
- *err = 0;
return inode;
}
static umode_t udf_convert_permissions(struct fileEntry *);
static int udf_update_inode(struct inode *, int);
-static void udf_fill_inode(struct inode *, struct buffer_head *);
static int udf_sync_inode(struct inode *inode);
static int udf_alloc_i_data(struct inode *inode, size_t size);
static sector_t inode_getblk(struct inode *, sector_t, int *, int *);
return 0;
}
-static void __udf_read_inode(struct inode *inode)
+/*
+ * Maximum length of linked list formed by ICB hierarchy. The chosen number is
+ * arbitrary - just that we hopefully don't limit any real use of rewritten
+ * inode on write-once media but avoid looping for too long on corrupted media.
+ */
+#define UDF_MAX_ICB_NESTING 1024
+
+static int udf_read_inode(struct inode *inode)
{
struct buffer_head *bh = NULL;
struct fileEntry *fe;
+ struct extendedFileEntry *efe;
uint16_t ident;
struct udf_inode_info *iinfo = UDF_I(inode);
+ struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
+ struct kernel_lb_addr *iloc = &iinfo->i_location;
+ unsigned int link_count;
+ unsigned int indirections = 0;
+ int ret = -EIO;
+
+reread:
+ if (iloc->logicalBlockNum >=
+ sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) {
+ udf_debug("block=%d, partition=%d out of range\n",
+ iloc->logicalBlockNum, iloc->partitionReferenceNum);
+ return -EIO;
+ }
/*
* Set defaults, but the inode is still incomplete!
* i_nlink = 1
* i_op = NULL;
*/
- bh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 0, &ident);
+ bh = udf_read_ptagged(inode->i_sb, iloc, 0, &ident);
if (!bh) {
udf_err(inode->i_sb, "(ino %ld) failed !bh\n", inode->i_ino);
- make_bad_inode(inode);
- return;
+ return -EIO;
}
if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
ident != TAG_IDENT_USE) {
udf_err(inode->i_sb, "(ino %ld) failed ident=%d\n",
inode->i_ino, ident);
- brelse(bh);
- make_bad_inode(inode);
- return;
+ goto out;
}
fe = (struct fileEntry *)bh->b_data;
+ efe = (struct extendedFileEntry *)bh->b_data;
if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
struct buffer_head *ibh;
- ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1,
- &ident);
+ ibh = udf_read_ptagged(inode->i_sb, iloc, 1, &ident);
if (ident == TAG_IDENT_IE && ibh) {
- struct buffer_head *nbh = NULL;
struct kernel_lb_addr loc;
struct indirectEntry *ie;
ie = (struct indirectEntry *)ibh->b_data;
loc = lelb_to_cpu(ie->indirectICB.extLocation);
- if (ie->indirectICB.extLength &&
- (nbh = udf_read_ptagged(inode->i_sb, &loc, 0,
- &ident))) {
- if (ident == TAG_IDENT_FE ||
- ident == TAG_IDENT_EFE) {
- memcpy(&iinfo->i_location,
- &loc,
- sizeof(struct kernel_lb_addr));
- brelse(bh);
- brelse(ibh);
- brelse(nbh);
- __udf_read_inode(inode);
- return;
+ if (ie->indirectICB.extLength) {
+ brelse(ibh);
+ memcpy(&iinfo->i_location, &loc,
+ sizeof(struct kernel_lb_addr));
+ if (++indirections > UDF_MAX_ICB_NESTING) {
+ udf_err(inode->i_sb,
+ "too many ICBs in ICB hierarchy"
+ " (max %d supported)\n",
+ UDF_MAX_ICB_NESTING);
+ goto out;
}
- brelse(nbh);
+ brelse(bh);
+ goto reread;
}
}
brelse(ibh);
} else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
udf_err(inode->i_sb, "unsupported strategy type: %d\n",
le16_to_cpu(fe->icbTag.strategyType));
- brelse(bh);
- make_bad_inode(inode);
- return;
+ goto out;
}
- udf_fill_inode(inode, bh);
-
- brelse(bh);
-}
-
-static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
-{
- struct fileEntry *fe;
- struct extendedFileEntry *efe;
- struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
- struct udf_inode_info *iinfo = UDF_I(inode);
- unsigned int link_count;
-
- fe = (struct fileEntry *)bh->b_data;
- efe = (struct extendedFileEntry *)bh->b_data;
-
if (fe->icbTag.strategyType == cpu_to_le16(4))
iinfo->i_strat4096 = 0;
else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
iinfo->i_efe = 1;
iinfo->i_use = 0;
- if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
- sizeof(struct extendedFileEntry))) {
- make_bad_inode(inode);
- return;
- }
+ ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+ sizeof(struct extendedFileEntry));
+ if (ret)
+ goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct extendedFileEntry),
inode->i_sb->s_blocksize -
} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
iinfo->i_efe = 0;
iinfo->i_use = 0;
- if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
- sizeof(struct fileEntry))) {
- make_bad_inode(inode);
- return;
- }
+ ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+ sizeof(struct fileEntry));
+ if (ret)
+ goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct fileEntry),
inode->i_sb->s_blocksize - sizeof(struct fileEntry));
iinfo->i_lenAlloc = le32_to_cpu(
((struct unallocSpaceEntry *)bh->b_data)->
lengthAllocDescs);
- if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
- sizeof(struct unallocSpaceEntry))) {
- make_bad_inode(inode);
- return;
- }
+ ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+ sizeof(struct unallocSpaceEntry));
+ if (ret)
+ goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct unallocSpaceEntry),
inode->i_sb->s_blocksize -
sizeof(struct unallocSpaceEntry));
- return;
+ return 0;
}
+ ret = -EIO;
read_lock(&sbi->s_cred_lock);
i_uid_write(inode, le32_to_cpu(fe->uid));
if (!uid_valid(inode->i_uid) ||
read_unlock(&sbi->s_cred_lock);
link_count = le16_to_cpu(fe->fileLinkCount);
- if (!link_count)
- link_count = 1;
+ if (!link_count) {
+ ret = -ESTALE;
+ goto out;
+ }
set_nlink(inode, link_count);
inode->i_size = le64_to_cpu(fe->informationLength);
iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
}
+ inode->i_generation = iinfo->i_unique;
switch (fe->icbTag.fileType) {
case ICBTAG_FILE_TYPE_DIRECTORY:
default:
udf_err(inode->i_sb, "(ino %ld) failed unknown file type=%d\n",
inode->i_ino, fe->icbTag.fileType);
- make_bad_inode(inode);
- return;
+ goto out;
}
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
struct deviceSpec *dsea =
le32_to_cpu(dsea->minorDeviceIdent)));
/* Developer ID ??? */
} else
- make_bad_inode(inode);
+ goto out;
}
+ ret = 0;
+out:
+ brelse(bh);
+ return ret;
}
static int udf_alloc_i_data(struct inode *inode, size_t size)
FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
fe->permissions = cpu_to_le32(udfperms);
- if (S_ISDIR(inode->i_mode))
+ if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0)
fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
else
fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
{
unsigned long block = udf_get_lb_pblock(sb, ino, 0);
struct inode *inode = iget_locked(sb, block);
+ int err;
if (!inode)
- return NULL;
-
- if (inode->i_state & I_NEW) {
- memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
- __udf_read_inode(inode);
- unlock_new_inode(inode);
- }
+ return ERR_PTR(-ENOMEM);
- if (is_bad_inode(inode))
- goto out_iput;
+ if (!(inode->i_state & I_NEW))
+ return inode;
- if (ino->logicalBlockNum >= UDF_SB(sb)->
- s_partmaps[ino->partitionReferenceNum].s_partition_len) {
- udf_debug("block=%d, partition=%d out of range\n",
- ino->logicalBlockNum, ino->partitionReferenceNum);
- make_bad_inode(inode);
- goto out_iput;
+ memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
+ err = udf_read_inode(inode);
+ if (err < 0) {
+ iget_failed(inode);
+ return ERR_PTR(err);
}
+ unlock_new_inode(inode);
return inode;
-
- out_iput:
- iput(inode);
- return NULL;
}
int udf_add_aext(struct inode *inode, struct extent_position *epos,
NULL, 0),
};
inode = udf_iget(dir->i_sb, lb);
- if (!inode) {
- return ERR_PTR(-EACCES);
- }
+ if (IS_ERR(inode))
+ return inode;
} else
#endif /* UDF_RECOVERY */
loc = lelb_to_cpu(cfi.icb.extLocation);
inode = udf_iget(dir->i_sb, &loc);
- if (!inode) {
- return ERR_PTR(-EACCES);
- }
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
}
return d_splice_alias(inode, dentry);
return udf_write_fi(inode, cfi, fi, fibh, NULL, NULL);
}
-static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
+static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
{
+ struct udf_inode_info *iinfo = UDF_I(inode);
+ struct inode *dir = dentry->d_parent->d_inode;
struct udf_fileident_bh fibh;
- struct inode *inode;
struct fileIdentDesc cfi, *fi;
int err;
- struct udf_inode_info *iinfo;
-
- inode = udf_new_inode(dir, mode, &err);
- if (!inode) {
- return err;
- }
-
- iinfo = UDF_I(inode);
- if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
- inode->i_data.a_ops = &udf_adinicb_aops;
- else
- inode->i_data.a_ops = &udf_aops;
- inode->i_op = &udf_file_inode_operations;
- inode->i_fop = &udf_file_operations;
- mark_inode_dirty(inode);
fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
- if (!fi) {
+ if (unlikely(!fi)) {
inode_dec_link_count(inode);
+ unlock_new_inode(inode);
iput(inode);
return err;
}
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
+ unlock_new_inode(inode);
d_instantiate(dentry, inode);
return 0;
}
-static int udf_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ bool excl)
{
- struct inode *inode;
- struct udf_inode_info *iinfo;
- int err;
+ struct inode *inode = udf_new_inode(dir, mode);
- inode = udf_new_inode(dir, mode, &err);
- if (!inode)
- return err;
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
- iinfo = UDF_I(inode);
- if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
+ if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
inode->i_data.a_ops = &udf_adinicb_aops;
else
inode->i_data.a_ops = &udf_aops;
inode->i_fop = &udf_file_operations;
mark_inode_dirty(inode);
+ return udf_add_nondir(dentry, inode);
+}
+
+static int udf_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+ struct inode *inode = udf_new_inode(dir, mode);
+
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+ if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
+ inode->i_data.a_ops = &udf_adinicb_aops;
+ else
+ inode->i_data.a_ops = &udf_aops;
+ inode->i_op = &udf_file_inode_operations;
+ inode->i_fop = &udf_file_operations;
+ mark_inode_dirty(inode);
d_tmpfile(dentry, inode);
+ unlock_new_inode(inode);
return 0;
}
dev_t rdev)
{
struct inode *inode;
- struct udf_fileident_bh fibh;
- struct fileIdentDesc cfi, *fi;
- int err;
- struct udf_inode_info *iinfo;
if (!old_valid_dev(rdev))
return -EINVAL;
- err = -EIO;
- inode = udf_new_inode(dir, mode, &err);
- if (!inode)
- goto out;
+ inode = udf_new_inode(dir, mode);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
- iinfo = UDF_I(inode);
init_special_inode(inode, mode, rdev);
- fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
- if (!fi) {
- inode_dec_link_count(inode);
- iput(inode);
- return err;
- }
- cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
- cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
- *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
- cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL);
- udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
- if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
- mark_inode_dirty(dir);
- mark_inode_dirty(inode);
-
- if (fibh.sbh != fibh.ebh)
- brelse(fibh.ebh);
- brelse(fibh.sbh);
- d_instantiate(dentry, inode);
- err = 0;
-
-out:
- return err;
+ return udf_add_nondir(dentry, inode);
}
static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
struct udf_inode_info *dinfo = UDF_I(dir);
struct udf_inode_info *iinfo;
- err = -EIO;
- inode = udf_new_inode(dir, S_IFDIR | mode, &err);
- if (!inode)
- goto out;
+ inode = udf_new_inode(dir, S_IFDIR | mode);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
iinfo = UDF_I(inode);
inode->i_op = &udf_dir_inode_operations;
fi = udf_add_entry(inode, NULL, &fibh, &cfi, &err);
if (!fi) {
inode_dec_link_count(inode);
+ unlock_new_inode(inode);
iput(inode);
goto out;
}
if (!fi) {
clear_nlink(inode);
mark_inode_dirty(inode);
+ unlock_new_inode(inode);
iput(inode);
goto out;
}
udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
inc_nlink(dir);
mark_inode_dirty(dir);
+ unlock_new_inode(inode);
d_instantiate(dentry, inode);
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
static int udf_symlink(struct inode *dir, struct dentry *dentry,
const char *symname)
{
- struct inode *inode;
+ struct inode *inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO);
struct pathComponent *pc;
const char *compstart;
- struct udf_fileident_bh fibh;
struct extent_position epos = {};
int eoffset, elen = 0;
- struct fileIdentDesc *fi;
- struct fileIdentDesc cfi;
uint8_t *ea;
int err;
int block;
struct udf_inode_info *iinfo;
struct super_block *sb = dir->i_sb;
- inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO, &err);
- if (!inode)
- goto out;
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
iinfo = UDF_I(inode);
down_write(&iinfo->i_data_sem);
mark_inode_dirty(inode);
up_write(&iinfo->i_data_sem);
- fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
- if (!fi)
- goto out_fail;
- cfi.icb.extLength = cpu_to_le32(sb->s_blocksize);
- cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
- if (UDF_SB(inode->i_sb)->s_lvid_bh) {
- *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
- cpu_to_le32(lvid_get_unique_id(sb));
- }
- udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
- if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
- mark_inode_dirty(dir);
- if (fibh.sbh != fibh.ebh)
- brelse(fibh.ebh);
- brelse(fibh.sbh);
- d_instantiate(dentry, inode);
- err = 0;
-
+ err = udf_add_nondir(dentry, inode);
out:
kfree(name);
return err;
out_no_entry:
up_write(&iinfo->i_data_sem);
-out_fail:
inode_dec_link_count(inode);
+ unlock_new_inode(inode);
iput(inode);
goto out;
}
struct udf_fileident_bh fibh;
if (!udf_find_entry(child->d_inode, &dotdot, &fibh, &cfi))
- goto out_unlock;
+ return ERR_PTR(-EACCES);
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
tloc = lelb_to_cpu(cfi.icb.extLocation);
inode = udf_iget(child->d_inode->i_sb, &tloc);
- if (!inode)
- goto out_unlock;
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
return d_obtain_alias(inode);
-out_unlock:
- return ERR_PTR(-EACCES);
}
loc.partitionReferenceNum = partref;
inode = udf_iget(sb, &loc);
- if (inode == NULL)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
if (generation && inode->i_generation != generation) {
iput(inode);
metadata_fe = udf_iget(sb, &addr);
- if (metadata_fe == NULL)
+ if (IS_ERR(metadata_fe)) {
udf_warn(sb, "metadata inode efe not found\n");
- else if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
+ return metadata_fe;
+ }
+ if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n");
iput(metadata_fe);
- metadata_fe = NULL;
+ return ERR_PTR(-EIO);
}
return metadata_fe;
struct udf_part_map *map;
struct udf_meta_data *mdata;
struct kernel_lb_addr addr;
+ struct inode *fe;
map = &sbi->s_partmaps[partition];
mdata = &map->s_type_specific.s_metadata;
udf_debug("Metadata file location: block = %d part = %d\n",
mdata->s_meta_file_loc, map->s_partition_num);
- mdata->s_metadata_fe = udf_find_metadata_inode_efe(sb,
- mdata->s_meta_file_loc, map->s_partition_num);
-
- if (mdata->s_metadata_fe == NULL) {
+ fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc,
+ map->s_partition_num);
+ if (IS_ERR(fe)) {
/* mirror file entry */
udf_debug("Mirror metadata file location: block = %d part = %d\n",
mdata->s_mirror_file_loc, map->s_partition_num);
- mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb,
- mdata->s_mirror_file_loc, map->s_partition_num);
+ fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc,
+ map->s_partition_num);
- if (mdata->s_mirror_fe == NULL) {
+ if (IS_ERR(fe)) {
udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
- return -EIO;
+ return PTR_ERR(fe);
}
- }
+ mdata->s_mirror_fe = fe;
+ } else
+ mdata->s_metadata_fe = fe;
+
/*
* bitmap file entry
udf_debug("Bitmap file location: block = %d part = %d\n",
addr.logicalBlockNum, addr.partitionReferenceNum);
- mdata->s_bitmap_fe = udf_iget(sb, &addr);
- if (mdata->s_bitmap_fe == NULL) {
+ fe = udf_iget(sb, &addr);
+ if (IS_ERR(fe)) {
if (sb->s_flags & MS_RDONLY)
udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
else {
udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
- return -EIO;
+ return PTR_ERR(fe);
}
- }
+ } else
+ mdata->s_bitmap_fe = fe;
}
udf_debug("udf_load_metadata_files Ok\n");
phd->unallocSpaceTable.extPosition),
.partitionReferenceNum = p_index,
};
+ struct inode *inode;
- map->s_uspace.s_table = udf_iget(sb, &loc);
- if (!map->s_uspace.s_table) {
+ inode = udf_iget(sb, &loc);
+ if (IS_ERR(inode)) {
udf_debug("cannot load unallocSpaceTable (part %d)\n",
p_index);
- return -EIO;
+ return PTR_ERR(inode);
}
+ map->s_uspace.s_table = inode;
map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
udf_debug("unallocSpaceTable (part %d) @ %ld\n",
p_index, map->s_uspace.s_table->i_ino);
phd->freedSpaceTable.extPosition),
.partitionReferenceNum = p_index,
};
+ struct inode *inode;
- map->s_fspace.s_table = udf_iget(sb, &loc);
- if (!map->s_fspace.s_table) {
+ inode = udf_iget(sb, &loc);
+ if (IS_ERR(inode)) {
udf_debug("cannot load freedSpaceTable (part %d)\n",
p_index);
- return -EIO;
+ return PTR_ERR(inode);
}
-
+ map->s_fspace.s_table = inode;
map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE;
udf_debug("freedSpaceTable (part %d) @ %ld\n",
p_index, map->s_fspace.s_table->i_ino);
struct udf_part_map *map = &sbi->s_partmaps[p_index];
sector_t vat_block;
struct kernel_lb_addr ino;
+ struct inode *inode;
/*
* VAT file entry is in the last recorded block. Some broken disks have
ino.partitionReferenceNum = type1_index;
for (vat_block = start_block;
vat_block >= map->s_partition_root &&
- vat_block >= start_block - 3 &&
- !sbi->s_vat_inode; vat_block--) {
+ vat_block >= start_block - 3; vat_block--) {
ino.logicalBlockNum = vat_block - map->s_partition_root;
- sbi->s_vat_inode = udf_iget(sb, &ino);
+ inode = udf_iget(sb, &ino);
+ if (!IS_ERR(inode)) {
+ sbi->s_vat_inode = inode;
+ break;
+ }
}
}
/* assign inodes by physical block number */
/* perhaps it's not extensible enough, but for now ... */
inode = udf_iget(sb, &rootdir);
- if (!inode) {
+ if (IS_ERR(inode)) {
udf_err(sb, "Error in udf_iget, block=%d, partition=%d\n",
rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
- ret = -EIO;
+ ret = PTR_ERR(inode);
goto error_out;
}
extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *);
extern struct buffer_head *udf_bread(struct inode *, int, int, int *);
extern int udf_setsize(struct inode *, loff_t);
-extern void udf_read_inode(struct inode *);
extern void udf_evict_inode(struct inode *);
extern int udf_write_inode(struct inode *, struct writeback_control *wbc);
extern long udf_block_map(struct inode *, sector_t);
/* ialloc.c */
extern void udf_free_inode(struct inode *);
-extern struct inode *udf_new_inode(struct inode *, umode_t, int *);
+extern struct inode *udf_new_inode(struct inode *, umode_t);
/* truncate.c */
extern void udf_truncate_tail_extent(struct inode *);
invalidate_inode_buffers(inode);
clear_inode(inode);
- if (want_delete) {
- lock_ufs(inode->i_sb);
- ufs_free_inode (inode);
- unlock_ufs(inode->i_sb);
- }
+ if (want_delete)
+ ufs_free_inode(inode);
}
if (l > sb->s_blocksize)
goto out_notlocked;
- lock_ufs(dir->i_sb);
inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO);
err = PTR_ERR(inode);
if (IS_ERR(inode))
- goto out;
+ goto out_notlocked;
+ lock_ufs(dir->i_sb);
if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) {
/* slow symlink */
inode->i_op = &ufs_symlink_inode_operations;
struct inode * inode;
int err;
- lock_ufs(dir->i_sb);
- inode_inc_link_count(dir);
-
inode = ufs_new_inode(dir, S_IFDIR|mode);
- err = PTR_ERR(inode);
if (IS_ERR(inode))
- goto out_dir;
+ return PTR_ERR(inode);
inode->i_op = &ufs_dir_inode_operations;
inode->i_fop = &ufs_dir_operations;
inode_inc_link_count(inode);
+ lock_ufs(dir->i_sb);
+ inode_inc_link_count(dir);
+
err = ufs_make_empty(inode, dir);
if (err)
goto out_fail;
inode_dec_link_count(inode);
inode_dec_link_count(inode);
iput (inode);
-out_dir:
inode_dec_link_count(dir);
unlock_ufs(dir->i_sb);
goto out;
struct xfs_bmap_free *flist,
int num_exts)
{
- struct xfs_btree_cur *cur;
+ struct xfs_btree_cur *cur = NULL;
struct xfs_bmbt_rec_host *gotp;
struct xfs_bmbt_irec got;
struct xfs_bmbt_irec left;
int error = 0;
int i;
int whichfork = XFS_DATA_FORK;
- int logflags;
+ int logflags = 0;
xfs_filblks_t blockcount = 0;
int total_extents;
}
}
- /* We are going to change core inode */
- logflags = XFS_ILOG_CORE;
if (ifp->if_flags & XFS_IFBROOT) {
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_private.b.firstblock = *firstblock;
cur->bc_private.b.flist = flist;
cur->bc_private.b.flags = 0;
- } else {
- cur = NULL;
- logflags |= XFS_ILOG_DEXT;
}
/*
blockcount = left.br_blockcount +
got.br_blockcount;
xfs_iext_remove(ip, *current_ext, 1, 0);
+ logflags |= XFS_ILOG_CORE;
if (cur) {
error = xfs_btree_delete(cur, &i);
if (error)
goto del_cursor;
XFS_WANT_CORRUPTED_GOTO(i == 1, del_cursor);
+ } else {
+ logflags |= XFS_ILOG_DEXT;
}
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
got.br_startoff = startoff;
}
+ logflags |= XFS_ILOG_CORE;
if (cur) {
error = xfs_bmbt_update(cur, got.br_startoff,
got.br_startblock,
got.br_state);
if (error)
goto del_cursor;
+ } else {
+ logflags |= XFS_ILOG_DEXT;
}
(*current_ext)++;
xfs_btree_del_cursor(cur,
error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
- xfs_trans_log_inode(tp, ip, logflags);
+ if (logflags)
+ xfs_trans_log_inode(tp, ip, logflags);
return error;
}
return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
}
+/*
+ * This is basically a copy of __set_page_dirty_buffers() with one
+ * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
+ * dirty, we'll never be able to clean them because we don't write buffers
+ * beyond EOF, and that means we can't invalidate pages that span EOF
+ * that have been marked dirty. Further, the dirty state can leak into
+ * the file interior if the file is extended, resulting in all sorts of
+ * bad things happening as the state does not match the underlying data.
+ *
+ * XXX: this really indicates that bufferheads in XFS need to die. Warts like
+ * this only exist because of bufferheads and how the generic code manages them.
+ */
+STATIC int
+xfs_vm_set_page_dirty(
+ struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+ struct inode *inode = mapping->host;
+ loff_t end_offset;
+ loff_t offset;
+ int newly_dirty;
+
+ if (unlikely(!mapping))
+ return !TestSetPageDirty(page);
+
+ end_offset = i_size_read(inode);
+ offset = page_offset(page);
+
+ spin_lock(&mapping->private_lock);
+ if (page_has_buffers(page)) {
+ struct buffer_head *head = page_buffers(page);
+ struct buffer_head *bh = head;
+
+ do {
+ if (offset < end_offset)
+ set_buffer_dirty(bh);
+ bh = bh->b_this_page;
+ offset += 1 << inode->i_blkbits;
+ } while (bh != head);
+ }
+ newly_dirty = !TestSetPageDirty(page);
+ spin_unlock(&mapping->private_lock);
+
+ if (newly_dirty) {
+ /* sigh - __set_page_dirty() is static, so copy it here, too */
+ unsigned long flags;
+
+ spin_lock_irqsave(&mapping->tree_lock, flags);
+ if (page->mapping) { /* Race with truncate? */
+ WARN_ON_ONCE(!PageUptodate(page));
+ account_page_dirtied(page, mapping);
+ radix_tree_tag_set(&mapping->page_tree,
+ page_index(page), PAGECACHE_TAG_DIRTY);
+ }
+ spin_unlock_irqrestore(&mapping->tree_lock, flags);
+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+ }
+ return newly_dirty;
+}
+
const struct address_space_operations xfs_address_space_operations = {
.readpage = xfs_vm_readpage,
.readpages = xfs_vm_readpages,
.writepage = xfs_vm_writepage,
.writepages = xfs_vm_writepages,
+ .set_page_dirty = xfs_vm_set_page_dirty,
.releasepage = xfs_vm_releasepage,
.invalidatepage = xfs_vm_invalidatepage,
.write_begin = xfs_vm_write_begin,
start_fsb = XFS_B_TO_FSB(mp, offset + len);
shift_fsb = XFS_B_TO_FSB(mp, len);
+ /*
+ * Writeback the entire file and force remove any post-eof blocks. The
+ * writeback prevents changes to the extent list via concurrent
+ * writeback and the eofblocks trim prevents the extent shift algorithm
+ * from running into a post-eof delalloc extent.
+ *
+ * XXX: This is a temporary fix until the extent shift loop below is
+ * converted to use offsets and lookups within the ILOCK rather than
+ * carrying around the index into the extent list for the next
+ * iteration.
+ */
+ error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
+ if (error)
+ return error;
+ if (xfs_can_free_eofblocks(ip, true)) {
+ error = xfs_free_eofblocks(mp, ip, false);
+ if (error)
+ return error;
+ }
+
error = xfs_free_file_space(ip, offset, len);
if (error)
return error;
if (inode->i_mapping->nrpages) {
ret = filemap_write_and_wait_range(
VFS_I(ip)->i_mapping,
- pos, -1);
+ pos, pos + size - 1);
if (ret) {
xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
return ret;
}
- truncate_pagecache_range(VFS_I(ip), pos, -1);
+
+ /*
+ * Invalidate whole pages. This can return an error if
+ * we fail to invalidate a page, but this should never
+ * happen on XFS. Warn if it does fail.
+ */
+ ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
+ pos >> PAGE_CACHE_SHIFT,
+ (pos + size - 1) >> PAGE_CACHE_SHIFT);
+ WARN_ON_ONCE(ret);
+ ret = 0;
}
xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
}
if (mapping->nrpages) {
ret = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
- pos, -1);
+ pos, pos + count - 1);
if (ret)
goto out;
- truncate_pagecache_range(VFS_I(ip), pos, -1);
+ /*
+ * Invalidate whole pages. This can return an error if
+ * we fail to invalidate a page, but this should never
+ * happen on XFS. Warn if it does fail.
+ */
+ ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
+ pos >> PAGE_CACHE_SHIFT,
+ (pos + count - 1) >> PAGE_CACHE_SHIFT);
+ WARN_ON_ONCE(ret);
+ ret = 0;
}
/*
acpi_device_name device_name; /* Driver-determined */
acpi_device_class device_class; /* " */
union acpi_object *str_obj; /* unicode string for _STR method */
- unsigned long sun; /* _SUN */
};
#define acpi_device_bid(d) ((d)->pnp.bus_id)
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
+/* 0, 1(node nid), 2(meta nid) are reserved node id */
+#define F2FS_RESERVED_NODE_NUM 3
+
#define F2FS_ROOT_INO(sbi) (sbi->root_ino_num)
#define F2FS_NODE_INO(sbi) (sbi->node_ino_num)
#define F2FS_META_INO(sbi) (sbi->meta_ino_num)
#define CP_ORPHAN_PRESENT_FLAG 0x00000002
#define CP_UMOUNT_FLAG 0x00000001
+#define F2FS_CP_PACKS 2 /* # of checkpoint packs */
+
struct f2fs_checkpoint {
__le64 checkpoint_ver; /* checkpoint block version number */
__le64 user_block_count; /* # of user blocks */
*/
#define F2FS_ORPHANS_PER_BLOCK 1020
+#define GET_ORPHAN_BLOCKS(n) ((n + F2FS_ORPHANS_PER_BLOCK - 1) / \
+ F2FS_ORPHANS_PER_BLOCK)
+
struct f2fs_orphan_block {
__le32 ino[F2FS_ORPHANS_PER_BLOCK]; /* inode numbers */
__le32 reserved; /* reserved */
#define F2FS_NAME_LEN 255
#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */
#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
+#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
#define ADDRS_PER_INODE(fi) addrs_per_inode(fi)
#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
#define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \
F2FS_INLINE_XATTR_ADDRS - 1))
-#define INLINE_DATA_OFFSET (PAGE_CACHE_SIZE - sizeof(struct node_footer) \
- - sizeof(__le32) * (DEF_ADDRS_PER_INODE + 5 - 1))
+#define INLINE_DATA_OFFSET (PAGE_CACHE_SIZE - sizeof(struct node_footer) -\
+ sizeof(__le32) * (DEF_ADDRS_PER_INODE + \
+ DEF_NIDS_PER_INODE - 1))
struct f2fs_inode {
__le16 i_mode; /* file mode */
__le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
- __le32 i_nid[5]; /* direct(2), indirect(2),
+ __le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2),
double_indirect(1) node id */
} __packed;
struct gpio_desc *__must_check __gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
-#define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags)
-#define gpiod_get(varargs...) __gpiod_get(varargs, 0)
struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags);
-#define __gpiod_get_index(dev, con_id, index, flags, ...) \
- __gpiod_get_index(dev, con_id, index, flags)
-#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, 0)
struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
-#define __gpiod_get_optional(dev, con_id, flags, ...) \
- __gpiod_get_optional(dev, con_id, flags)
-#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, 0)
struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
const char *con_id,
unsigned int index,
enum gpiod_flags flags);
-#define __gpiod_get_index_optional(dev, con_id, index, flags, ...) \
- __gpiod_get_index_optional(dev, con_id, index, flags)
-#define gpiod_get_index_optional(varargs...) \
- __gpiod_get_index_optional(varargs, 0)
-
void gpiod_put(struct gpio_desc *desc);
struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
-#define __devm_gpiod_get(dev, con_id, flags, ...) \
- __devm_gpiod_get(dev, con_id, flags)
-#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, 0)
struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags);
-#define __devm_gpiod_get_index(dev, con_id, index, flags, ...) \
- __devm_gpiod_get_index(dev, con_id, index, flags)
-#define devm_gpiod_get_index(varargs...) __devm_gpiod_get_index(varargs, 0)
struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
-#define __devm_gpiod_get_optional(dev, con_id, flags, ...) \
- __devm_gpiod_get_optional(dev, con_id, flags)
-#define devm_gpiod_get_optional(varargs...) \
- __devm_gpiod_get_optional(varargs, 0)
struct gpio_desc *__must_check
__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags);
-#define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...) \
- __devm_gpiod_get_index_optional(dev, con_id, index, flags)
-#define devm_gpiod_get_index_optional(varargs...) \
- __devm_gpiod_get_index_optional(varargs, 0)
-
void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
int gpiod_get_direction(const struct gpio_desc *desc);
#else /* CONFIG_GPIOLIB */
-static inline struct gpio_desc *__must_check gpiod_get(struct device *dev,
- const char *con_id)
+static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
-static inline struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
- const char *con_id,
- unsigned int idx)
+static inline struct gpio_desc *__must_check
+__gpiod_get_index(struct device *dev,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
-gpiod_get_optional(struct device *dev, const char *con_id)
+__gpiod_get_optional(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
-gpiod_get_index_optional(struct device *dev, const char *con_id,
- unsigned int index)
+__gpiod_get_index_optional(struct device *dev, const char *con_id,
+ unsigned int index, enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
WARN_ON(1);
}
-static inline struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
- const char *con_id)
+static inline struct gpio_desc *__must_check
+__devm_gpiod_get(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline
-struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
- const char *con_id,
- unsigned int idx)
+struct gpio_desc *__must_check
+__devm_gpiod_get_index(struct device *dev,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
-devm_gpiod_get_optional(struct device *dev, const char *con_id)
+__devm_gpiod_get_optional(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
-devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
- unsigned int index)
+__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
+ unsigned int index, enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
return -EINVAL;
}
-
#endif /* CONFIG_GPIOLIB */
+/*
+ * Vararg-hacks! This is done to transition the kernel to always pass
+ * the options flags argument to the below functions. During a transition
+ * phase these vararg macros make both old-and-newstyle code compile,
+ * but when all calls to the elder API are removed, these should go away
+ * and the __gpiod_get() etc functions above be renamed just gpiod_get()
+ * etc.
+ */
+#define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags)
+#define gpiod_get(varargs...) __gpiod_get(varargs, 0)
+#define __gpiod_get_index(dev, con_id, index, flags, ...) \
+ __gpiod_get_index(dev, con_id, index, flags)
+#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, 0)
+#define __gpiod_get_optional(dev, con_id, flags, ...) \
+ __gpiod_get_optional(dev, con_id, flags)
+#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, 0)
+#define __gpiod_get_index_optional(dev, con_id, index, flags, ...) \
+ __gpiod_get_index_optional(dev, con_id, index, flags)
+#define gpiod_get_index_optional(varargs...) \
+ __gpiod_get_index_optional(varargs, 0)
+#define __devm_gpiod_get(dev, con_id, flags, ...) \
+ __devm_gpiod_get(dev, con_id, flags)
+#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, 0)
+#define __devm_gpiod_get_index(dev, con_id, index, flags, ...) \
+ __devm_gpiod_get_index(dev, con_id, index, flags)
+#define devm_gpiod_get_index(varargs...) __devm_gpiod_get_index(varargs, 0)
+#define __devm_gpiod_get_optional(dev, con_id, flags, ...) \
+ __devm_gpiod_get_optional(dev, con_id, flags)
+#define devm_gpiod_get_optional(varargs...) \
+ __devm_gpiod_get_optional(varargs, 0)
+#define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...) \
+ __devm_gpiod_get_index_optional(dev, con_id, index, flags)
+#define devm_gpiod_get_index_optional(varargs...) \
+ __devm_gpiod_get_index_optional(varargs, 0)
+
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/rwsem.h>
+#include <linux/timer.h>
#include <linux/workqueue.h>
struct device;
const char *default_trigger; /* Trigger to use */
unsigned long blink_delay_on, blink_delay_off;
- struct delayed_work blink_work;
+ struct timer_list blink_timer;
int blink_brightness;
struct work_struct set_brightness_work;
enum mlx4_net_trans_rule_id id);
int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
+int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
+ int port, int qpn, u16 prio, u64 *reg_id);
+
void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
int i, int val);
: 0;
}
-/**
+/*
* struct nand_sdr_timings - SDR NAND chip timings
*
* This struct defines the timing requirements of a SDR NAND chip.
}
/**
- * __dev_uc_unsync - Remove synchonized addresses from device
+ * __dev_uc_unsync - Remove synchronized addresses from device
* @dev: device to sync
* @unsync: function to call if address should be removed
*
}
/**
- * __dev_mc_unsync - Remove synchonized addresses from device
+ * __dev_mc_unsync - Remove synchronized addresses from device
* @dev: device to sync
* @unsync: function to call if address should be removed
*
#include <linux/in6.h>
#include <linux/wait.h>
#include <linux/list.h>
+#include <linux/static_key.h>
#include <uapi/linux/netfilter.h>
#ifdef CONFIG_NETFILTER
static inline int NF_DROP_GETERR(int verdict)
extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
-#if defined(CONFIG_JUMP_LABEL)
-#include <linux/static_key.h>
+#ifdef HAVE_JUMP_LABEL
extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+
static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
{
if (__builtin_constant_p(pf) &&
struct mutex lock;
struct dev_power_governor *gov;
struct work_struct power_off_work;
- char *name;
+ const char *name;
unsigned int in_progress; /* Number of devices being suspended now */
atomic_t sd_count; /* Number of subdomains with power "on" */
enum gpd_status status; /* Current state of the domain */
* @linear_min_sel: Minimal selector for starting linear mapping
* @fixed_uV: Fixed voltage of rails.
* @ramp_delay: Time to settle down after voltage change (unit: uV/us)
+ * @linear_ranges: A constant table of possible voltage ranges.
+ * @n_linear_ranges: Number of entries in the @linear_ranges table.
* @volt_table: Voltage mapping table (if table based mapping)
*
* @vsel_reg: Register for selector when using regulator_regmap_X_voltage_
* bootloader then it will be enabled when the constraints are
* applied.
* @apply_uV: Apply the voltage constraint when initialising.
+ * @ramp_disable: Disable ramp delay when initialising or when setting voltage.
*
* @input_uV: Input voltage for regulator when supplied by another regulator.
*
extern void tick_nohz_init(void);
extern void __tick_nohz_full_check(void);
+extern void tick_nohz_full_kick(void);
extern void tick_nohz_full_kick_cpu(int cpu);
-
-static inline void tick_nohz_full_kick(void)
-{
- tick_nohz_full_kick_cpu(smp_processor_id());
-}
-
extern void tick_nohz_full_kick_all(void);
extern void __tick_nohz_task_switch(struct task_struct *tsk);
#else
HCI_AUTO_CONN_ALWAYS,
HCI_AUTO_CONN_LINK_LOSS,
} auto_connect;
+
+ struct hci_conn *conn;
};
extern struct list_head hci_dev_list;
struct netns_ieee802154_lowpan {
struct netns_sysctl_lowpan sysctl;
struct netns_frags frags;
- int max_dsize;
};
#endif
struct ieee80211_regdomain {
struct rcu_head rcu_head;
u32 n_reg_rules;
- char alpha2[2];
+ char alpha2[3];
enum nl80211_dfs_regions dfs_region;
struct ieee80211_reg_rule reg_rules[];
};
return asoc ? asoc->assoc_id : 0;
}
+static inline enum sctp_sstat_state
+sctp_assoc_to_state(const struct sctp_association *asoc)
+{
+ /* SCTP's uapi always had SCTP_EMPTY(=0) as a dummy state, but we
+ * got rid of it in kernel space. Therefore SCTP_CLOSED et al
+ * start at =1 in user space, but actually as =0 in kernel space.
+ * Now that we can not break user space and SCTP_EMPTY is exposed
+ * there, we need to fix it up with an ugly offset not to break
+ * applications. :(
+ */
+ return asoc->state + 1;
+}
+
/* Look up the association by its id. */
struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id);
*/
if (sock_flag(sk, SOCK_RCVTSTAMP) ||
(sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
- (kt.tv64 &&
- (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE ||
- skb_shinfo(skb)->tx_flags & SKBTX_ANY_SW_TSTAMP)) ||
+ (kt.tv64 && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
(hwtstamps->hwtstamp.tv64 &&
(sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
__sock_recv_timestamp(msg, sk, skb);
* This operation has to be synchronous, and return only when the
* reset is complete. In case of having had to resort to bus/cold
* reset implying a device disconnection, the call is allowed to
- * return inmediately.
+ * return immediately.
* NOTE: wimax_dev->mutex is NOT locked when this op is being
* called; however, wimax_dev->mutex_reset IS locked to ensure
* serialization of calls to wimax_reset().
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE | \
SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK, \
.tlv.c = (snd_soc_bytes_tlv_callback), \
- .info = snd_soc_info_bytes_ext, \
+ .info = snd_soc_bytes_info_ext, \
.private_value = (unsigned long)&(struct soc_bytes_ext) \
{.max = xcount, .get = xhandler_get, .put = xhandler_put, } }
#define SOC_SINGLE_XR_SX(xname, xregbase, xregcount, xnbits, \
* @vec_nr: softirq vector number
*
* When used in combination with the softirq_exit tracepoint
- * we can determine the softirq handler runtine.
+ * we can determine the softirq handler routine.
*/
DEFINE_EVENT(softirq, softirq_entry,
* @vec_nr: softirq vector number
*
* When used in combination with the softirq_entry tracepoint
- * we can determine the softirq handler runtine.
+ * we can determine the softirq handler routine.
*/
DEFINE_EVENT(softirq, softirq_exit,
header-y += mdio.h
header-y += media.h
header-y += mei.h
+header-y += memfd.h
header-y += mempolicy.h
header-y += meye.h
header-y += mic_common.h
#define INPUT_PROP_BUTTONPAD 0x02 /* has button(s) under pad */
#define INPUT_PROP_SEMI_MT 0x03 /* touch rectangle only */
#define INPUT_PROP_TOPBUTTONPAD 0x04 /* softbuttons at top of pad */
+#define INPUT_PROP_POINTING_STICK 0x05 /* is a pointing stick */
#define INPUT_PROP_MAX 0x1f
#define INPUT_PROP_CNT (INPUT_PROP_MAX + 1)
css_get(&cgrp->self);
}
+static bool cgroup_tryget(struct cgroup *cgrp)
+{
+ return css_tryget(&cgrp->self);
+}
+
static void cgroup_put(struct cgroup *cgrp)
{
css_put(&cgrp->self);
* protection against removal. Ensure @cgrp stays accessible and
* break the active_ref protection.
*/
- cgroup_get(cgrp);
+ if (!cgroup_tryget(cgrp))
+ return NULL;
kernfs_break_active_protection(kn);
mutex_lock(&cgroup_mutex);
{
struct cftype *cft;
- for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
- cft->flags |= __CFTYPE_NOT_ON_DFL;
+ /*
+ * If legacy_flies_on_dfl, we want to show the legacy files on the
+ * dfl hierarchy but iff the target subsystem hasn't been updated
+ * for the dfl hierarchy yet.
+ */
+ if (!cgroup_legacy_files_on_dfl ||
+ ss->dfl_cftypes != ss->legacy_cftypes) {
+ for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
+ cft->flags |= __CFTYPE_NOT_ON_DFL;
+ }
+
return cgroup_add_cftypes(ss, cfts);
}
/* cgroup release path */
cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
cgrp->id = -1;
+
+ /*
+ * There are two control paths which try to determine
+ * cgroup from dentry without going through kernfs -
+ * cgroupstats_build() and css_tryget_online_from_dir().
+ * Those are supported by RCU protecting clearing of
+ * cgrp->kn->priv backpointer.
+ */
+ RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL);
}
mutex_unlock(&cgroup_mutex);
struct cftype *base_files;
int ssid, ret;
+ /* Do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable.
+ */
+ if (strchr(name, '\n'))
+ return -EINVAL;
+
parent = cgroup_kn_lock_live(parent_kn);
if (!parent)
return -ENODEV;
cgroup_kn_unlock(kn);
- /*
- * There are two control paths which try to determine cgroup from
- * dentry without going through kernfs - cgroupstats_build() and
- * css_tryget_online_from_dir(). Those are supported by RCU
- * protecting clearing of cgrp->kn->priv backpointer, which should
- * happen after all files under it have been removed.
- */
- if (!ret)
- RCU_INIT_POINTER(*(void __rcu __force **)&kn->priv, NULL);
-
cgroup_put(cgrp);
return ret;
}
/*
* This path doesn't originate from kernfs and @kn could already
* have been or be removed at any point. @kn->priv is RCU
- * protected for this access. See cgroup_rmdir() for details.
+ * protected for this access. See css_release_work_fn() for details.
*/
cgrp = rcu_dereference(kn->priv);
if (cgrp)
ret = hrtimer_nanosleep_restart(restart);
set_fs(oldfs);
- if (ret) {
+ if (ret == -ERESTART_RESTARTBLOCK) {
rmtp = restart->nanosleep.compat_rmtp;
if (rmtp && compat_put_timespec(&rmt, rmtp))
HRTIMER_MODE_REL, CLOCK_MONOTONIC);
set_fs(oldfs);
- if (ret) {
+ /*
+ * hrtimer_nanosleep() can only return 0 or
+ * -ERESTART_RESTARTBLOCK here because:
+ *
+ * - we call it with HRTIMER_MODE_REL and therefor exclude the
+ * -ERESTARTNOHAND return path.
+ *
+ * - we supply the rmtp argument from the task stack (due to
+ * the necessary compat conversion. So the update cannot
+ * fail, which excludes the -EFAULT return path as well. If
+ * it fails nevertheless we have a bigger problem and wont
+ * reach this place anymore.
+ *
+ * - if the return value is 0, we do not have to update rmtp
+ * because there is no remaining time.
+ *
+ * We check for -ERESTART_RESTARTBLOCK nevertheless if the
+ * core implementation decides to return random nonsense.
+ */
+ if (ret == -ERESTART_RESTARTBLOCK) {
struct restart_block *restart
= ¤t_thread_info()->restart_block;
if (rmtp && compat_put_timespec(&rmt, rmtp))
return -EFAULT;
}
-
return ret;
}
chip->irq_eoi(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
+EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
/**
* handle_edge_irq - edge type IRQ handler
*/
static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
{
- long ret;
+ long t1, t2;
- ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type);
+ t1 = kptr_obfuscate((long)v1, type);
+ t2 = kptr_obfuscate((long)v2, type);
- return (ret < 0) | ((ret > 0) << 1);
+ return (t1 < t2) | ((t1 > t2) << 1);
}
/* The caller must have pinned the task */
#ifdef CONFIG_SUSPEND
/* kernel/power/suspend.c */
+extern const char *pm_labels[];
extern const char *pm_states[];
extern int suspend_devices_and_enter(suspend_state_t state);
#include "power.h"
-static const char *pm_labels[] = { "mem", "standby", "freeze", };
+const char *pm_labels[] = { "mem", "standby", "freeze", NULL };
const char *pm_states[PM_SUSPEND_MAX];
static const struct platform_suspend_ops *suspend_ops;
* at startup time. They're normally disabled, for faster boot and because
* we can't know which states really work on this particular system.
*/
-static suspend_state_t test_state __initdata = PM_SUSPEND_ON;
+static const char *test_state_label __initdata;
static char warn_bad_state[] __initdata =
KERN_WARNING "PM: can't test '%s' suspend state\n";
static int __init setup_test_suspend(char *value)
{
- suspend_state_t i;
+ int i;
/* "=mem" ==> "mem" */
value++;
- for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
- if (!strcmp(pm_states[i], value)) {
- test_state = i;
+ for (i = 0; pm_labels[i]; i++)
+ if (!strcmp(pm_labels[i], value)) {
+ test_state_label = pm_labels[i];
return 0;
}
struct rtc_device *rtc = NULL;
struct device *dev;
+ suspend_state_t test_state;
/* PM is initialized by now; is that state testable? */
- if (test_state == PM_SUSPEND_ON)
- goto done;
- if (!pm_states[test_state]) {
- printk(warn_bad_state, pm_states[test_state]);
- goto done;
+ if (!test_state_label)
+ return 0;
+
+ for (test_state = PM_SUSPEND_MIN; test_state < PM_SUSPEND_MAX; test_state++) {
+ const char *state_label = pm_states[test_state];
+
+ if (state_label && !strcmp(test_state_label, state_label))
+ break;
+ }
+ if (test_state == PM_SUSPEND_MAX) {
+ printk(warn_bad_state, test_state_label);
+ return 0;
}
/* RTCs have initialized by now too ... can we use one? */
rtc = rtc_class_open(dev_name(dev));
if (!rtc) {
printk(warn_no_rtc);
- goto done;
+ return 0;
}
/* go for it */
test_wakealarm(rtc, test_state);
rtc_class_close(rtc);
-done:
return 0;
}
late_initcall(test_suspend);
raw_spin_lock(&logbuf_lock);
logbuf_cpu = this_cpu;
- if (recursion_bug) {
+ if (unlikely(recursion_bug)) {
static const char recursion_msg[] =
"BUG: recent printk recursion!";
recursion_bug = 0;
- text_len = strlen(recursion_msg);
/* emit KERN_CRIT message */
printed_len += log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
- NULL, 0, recursion_msg, text_len);
+ NULL, 0, recursion_msg,
+ strlen(recursion_msg));
}
/*
struct rcu_head **nocb_gp_tail;
long nocb_gp_count;
long nocb_gp_count_lazy;
- bool nocb_leader_wake; /* Is the nocb leader thread awake? */
+ bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */
struct rcu_data *nocb_next_follower;
/* Next follower in wakeup chain. */
if (!ACCESS_ONCE(rdp_leader->nocb_kthread))
return;
- if (!ACCESS_ONCE(rdp_leader->nocb_leader_wake) || force) {
+ if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
/* Prior xchg orders against prior callback enqueue. */
- ACCESS_ONCE(rdp_leader->nocb_leader_wake) = true;
+ ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
wake_up(&rdp_leader->nocb_wq);
}
}
if (!rcu_nocb_poll) {
trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep");
wait_event_interruptible(my_rdp->nocb_wq,
- ACCESS_ONCE(my_rdp->nocb_leader_wake));
+ !ACCESS_ONCE(my_rdp->nocb_leader_sleep));
/* Memory barrier handled by smp_mb() calls below and repoll. */
} else if (firsttime) {
firsttime = false; /* Don't drown trace log with "Poll"! */
schedule_timeout_interruptible(1);
/* Rescan in case we were a victim of memory ordering. */
- my_rdp->nocb_leader_wake = false;
- smp_mb(); /* Ensure _wake false before scan. */
+ my_rdp->nocb_leader_sleep = true;
+ smp_mb(); /* Ensure _sleep true before scan. */
for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower)
if (ACCESS_ONCE(rdp->nocb_head)) {
/* Found CB, so short-circuit next wait. */
- my_rdp->nocb_leader_wake = true;
+ my_rdp->nocb_leader_sleep = false;
break;
}
goto wait_again;
rcu_nocb_wait_gp(my_rdp);
/*
- * We left ->nocb_leader_wake set to reduce cache thrashing.
- * We clear it now, but recheck for new callbacks while
+ * We left ->nocb_leader_sleep unset to reduce cache thrashing.
+ * We set it now, but recheck for new callbacks while
* traversing our follower list.
*/
- my_rdp->nocb_leader_wake = false;
- smp_mb(); /* Ensure _wake false before scan of ->nocb_head. */
+ my_rdp->nocb_leader_sleep = true;
+ smp_mb(); /* Ensure _sleep true before scan of ->nocb_head. */
/* Each pass through the following loop wakes a follower, if needed. */
for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
if (ACCESS_ONCE(rdp->nocb_head))
- my_rdp->nocb_leader_wake = true; /* No need to wait. */
+ my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/
if (!rdp->nocb_gp_head)
continue; /* No CBs, so no need to wake follower. */
.func = nohz_full_kick_work_func,
};
+/*
+ * Kick this CPU if it's full dynticks in order to force it to
+ * re-evaluate its dependency on the tick and restart it if necessary.
+ * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(),
+ * is NMI safe.
+ */
+void tick_nohz_full_kick(void)
+{
+ if (!tick_nohz_full_cpu(smp_processor_id()))
+ return;
+
+ irq_work_queue(&__get_cpu_var(nohz_full_kick_work));
+}
+
/*
* Kick the CPU if it's full dynticks in order to force it to
* re-evaluate its dependency on the tick and restart it if necessary.
tk->ntp_error = 0;
ntp_clear();
}
- update_vsyscall(tk);
- update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
tk_update_ktime_data(tk);
+ update_vsyscall(tk);
+ update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
+
if (action & TK_MIRROR)
memcpy(&shadow_timekeeper, &tk_core.timekeeper,
sizeof(tk_core.timekeeper));
shortcut = assoc_array_ptr_to_shortcut(ptr);
slot = shortcut->parent_slot;
cursor = shortcut->back_pointer;
+ if (!cursor)
+ goto gc_complete;
} else {
slot = node->parent_slot;
cursor = ptr;
}
- BUG_ON(!ptr);
+ BUG_ON(!cursor);
node = assoc_array_ptr_to_node(cursor);
slot++;
goto continue_node;
gc_complete:
edit->set[0].to = new_root;
assoc_array_apply_edit(edit);
- edit->array->nr_leaves_on_tree = nr_leaves_on_tree;
+ array->nr_leaves_on_tree = nr_leaves_on_tree;
return 0;
enomem:
if (nid != NUMA_NO_NODE && nid != m_nid)
continue;
+ /* skip hotpluggable memory regions if needed */
+ if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
+ continue;
+
if (!type_b) {
if (out_start)
*out_start = m_start;
unsigned long long size;
int ret = 0;
+ if (mem_cgroup_is_root(memcg))
+ goto done;
retry:
if (consume_stock(memcg, nr_pages))
goto done;
if (!(gfp_mask & __GFP_NOFAIL))
return -ENOMEM;
bypass:
- memcg = root_mem_cgroup;
- ret = -EINTR;
- goto retry;
+ return -EINTR;
done_restock:
if (batch > nr_pages)
{
unsigned long bytes = nr_pages * PAGE_SIZE;
+ if (mem_cgroup_is_root(memcg))
+ return;
+
res_counter_uncharge(&memcg->res, bytes);
if (do_swap_account)
res_counter_uncharge(&memcg->memsw, bytes);
{
unsigned long bytes = nr_pages * PAGE_SIZE;
+ if (mem_cgroup_is_root(memcg))
+ return;
+
res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
if (do_swap_account)
res_counter_uncharge_until(&memcg->memsw,
return retval;
}
+static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
+ enum mem_cgroup_stat_index idx)
+{
+ struct mem_cgroup *iter;
+ long val = 0;
+
+ /* Per-cpu values can be negative, use a signed accumulator */
+ for_each_mem_cgroup_tree(iter, memcg)
+ val += mem_cgroup_read_stat(iter, idx);
+
+ if (val < 0) /* race ? */
+ val = 0;
+ return val;
+}
+
+static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
+{
+ u64 val;
+
+ if (!mem_cgroup_is_root(memcg)) {
+ if (!swap)
+ return res_counter_read_u64(&memcg->res, RES_USAGE);
+ else
+ return res_counter_read_u64(&memcg->memsw, RES_USAGE);
+ }
+
+ /*
+ * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS
+ * as well as in MEM_CGROUP_STAT_RSS_HUGE.
+ */
+ val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
+ val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
+
+ if (swap)
+ val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP);
+
+ return val << PAGE_SHIFT;
+}
+
+
static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
switch (type) {
case _MEM:
+ if (name == RES_USAGE)
+ return mem_cgroup_usage(memcg, false);
return res_counter_read_u64(&memcg->res, name);
case _MEMSWAP:
+ if (name == RES_USAGE)
+ return mem_cgroup_usage(memcg, true);
return res_counter_read_u64(&memcg->memsw, name);
case _KMEM:
return res_counter_read_u64(&memcg->kmem, name);
if (!t)
goto unlock;
- if (!swap)
- usage = res_counter_read_u64(&memcg->res, RES_USAGE);
- else
- usage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
+ usage = mem_cgroup_usage(memcg, swap);
/*
* current_threshold points to threshold just below or equal to usage.
if (type == _MEM) {
thresholds = &memcg->thresholds;
- usage = res_counter_read_u64(&memcg->res, RES_USAGE);
+ usage = mem_cgroup_usage(memcg, false);
} else if (type == _MEMSWAP) {
thresholds = &memcg->memsw_thresholds;
- usage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
+ usage = mem_cgroup_usage(memcg, true);
} else
BUG();
if (type == _MEM) {
thresholds = &memcg->thresholds;
- usage = res_counter_read_u64(&memcg->res, RES_USAGE);
+ usage = mem_cgroup_usage(memcg, false);
} else if (type == _MEMSWAP) {
thresholds = &memcg->memsw_thresholds;
- usage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
+ usage = mem_cgroup_usage(memcg, true);
} else
BUG();
* core guarantees its existence.
*/
} else {
- res_counter_init(&memcg->res, &root_mem_cgroup->res);
- res_counter_init(&memcg->memsw, &root_mem_cgroup->memsw);
- res_counter_init(&memcg->kmem, &root_mem_cgroup->kmem);
+ res_counter_init(&memcg->res, NULL);
+ res_counter_init(&memcg->memsw, NULL);
+ res_counter_init(&memcg->kmem, NULL);
/*
* Deeper hierachy with use_hierarchy == false doesn't make
* much sense so let cgroup subsystem know about this
/* we must fixup refcnts and charges */
if (mc.moved_swap) {
/* uncharge swap account from the old cgroup */
- res_counter_uncharge(&mc.from->memsw,
- PAGE_SIZE * mc.moved_swap);
+ if (!mem_cgroup_is_root(mc.from))
+ res_counter_uncharge(&mc.from->memsw,
+ PAGE_SIZE * mc.moved_swap);
for (i = 0; i < mc.moved_swap; i++)
css_put(&mc.from->css);
* we charged both to->res and to->memsw, so we should
* uncharge to->res.
*/
- res_counter_uncharge(&mc.to->res,
- PAGE_SIZE * mc.moved_swap);
+ if (!mem_cgroup_is_root(mc.to))
+ res_counter_uncharge(&mc.to->res,
+ PAGE_SIZE * mc.moved_swap);
/* we've already done css_get(mc.to) */
mc.moved_swap = 0;
}
rcu_read_lock();
memcg = mem_cgroup_lookup(id);
if (memcg) {
- res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
+ if (!mem_cgroup_is_root(memcg))
+ res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
mem_cgroup_swap_statistics(memcg, false);
css_put(&memcg->css);
}
{
unsigned long flags;
- if (nr_mem)
- res_counter_uncharge(&memcg->res, nr_mem * PAGE_SIZE);
- if (nr_memsw)
- res_counter_uncharge(&memcg->memsw, nr_memsw * PAGE_SIZE);
-
- memcg_oom_recover(memcg);
+ if (!mem_cgroup_is_root(memcg)) {
+ if (nr_mem)
+ res_counter_uncharge(&memcg->res,
+ nr_mem * PAGE_SIZE);
+ if (nr_memsw)
+ res_counter_uncharge(&memcg->memsw,
+ nr_memsw * PAGE_SIZE);
+ memcg_oom_recover(memcg);
+ }
local_irq_save(flags);
__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
struct vm_area_struct *vma;
vma = rb_entry(nd, struct vm_area_struct, vm_rb);
if (vma->vm_start < prev) {
- pr_info("vm_start %lx prev %lx\n", vma->vm_start, prev);
+ pr_emerg("vm_start %lx prev %lx\n", vma->vm_start, prev);
bug = 1;
}
if (vma->vm_start < pend) {
- pr_info("vm_start %lx pend %lx\n", vma->vm_start, pend);
+ pr_emerg("vm_start %lx pend %lx\n", vma->vm_start, pend);
bug = 1;
}
if (vma->vm_start > vma->vm_end) {
- pr_info("vm_end %lx < vm_start %lx\n",
+ pr_emerg("vm_end %lx < vm_start %lx\n",
vma->vm_end, vma->vm_start);
bug = 1;
}
if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
- pr_info("free gap %lx, correct %lx\n",
+ pr_emerg("free gap %lx, correct %lx\n",
vma->rb_subtree_gap,
vma_compute_subtree_gap(vma));
bug = 1;
for (nd = pn; nd; nd = rb_prev(nd))
j++;
if (i != j) {
- pr_info("backwards %d, forwards %d\n", j, i);
+ pr_emerg("backwards %d, forwards %d\n", j, i);
bug = 1;
}
return bug ? -1 : i;
i++;
}
if (i != mm->map_count) {
- pr_info("map_count %d vm_next %d\n", mm->map_count, i);
+ pr_emerg("map_count %d vm_next %d\n", mm->map_count, i);
bug = 1;
}
if (highest_address != mm->highest_vm_end) {
- pr_info("mm->highest_vm_end %lx, found %lx\n",
+ pr_emerg("mm->highest_vm_end %lx, found %lx\n",
mm->highest_vm_end, highest_address);
bug = 1;
}
i = browse_rb(&mm->mm_rb);
if (i != mm->map_count) {
- pr_info("map_count %d rb %d\n", mm->map_count, i);
+ pr_emerg("map_count %d rb %d\n", mm->map_count, i);
bug = 1;
}
BUG_ON(bug);
phys_addr_t start, end;
u64 i;
+ memblock_clear_hotplug(0, -1);
+
for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL)
count += __free_memory_core(start, end);
int page_start, int page_end)
{
const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
- unsigned int cpu;
+ unsigned int cpu, tcpu;
int i;
for_each_possible_cpu(cpu) {
struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
*pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
- if (!*pagep) {
- pcpu_free_pages(chunk, pages, populated,
- page_start, page_end);
- return -ENOMEM;
- }
+ if (!*pagep)
+ goto err;
}
}
return 0;
+
+err:
+ while (--i >= page_start)
+ __free_page(pages[pcpu_page_idx(cpu, i)]);
+
+ for_each_possible_cpu(tcpu) {
+ if (tcpu == cpu)
+ break;
+ for (i = page_start; i < page_end; i++)
+ __free_page(pages[pcpu_page_idx(tcpu, i)]);
+ }
+ return -ENOMEM;
}
/**
__pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
page_end - page_start);
}
+ pcpu_post_unmap_tlb_flush(chunk, page_start, page_end);
return err;
}
if (pcpu_setup_first_chunk(ai, fc) < 0)
panic("Failed to initialize percpu areas.");
+
+ pcpu_free_alloc_info(ai);
}
#endif /* CONFIG_SMP */
void hci_le_conn_failed(struct hci_conn *conn, u8 status)
{
struct hci_dev *hdev = conn->hdev;
+ struct hci_conn_params *params;
+
+ params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
+ conn->dst_type);
+ if (params && params->conn) {
+ hci_conn_drop(params->conn);
+ params->conn = NULL;
+ }
conn->state = BT_CLOSED;
{
struct hci_conn_params *p;
- list_for_each_entry(p, &hdev->le_conn_params, list)
+ list_for_each_entry(p, &hdev->le_conn_params, list) {
+ if (p->conn) {
+ hci_conn_drop(p->conn);
+ p->conn = NULL;
+ }
list_del_init(&p->action);
+ }
BT_DBG("All LE pending actions cleared");
}
hci_dev_lock(hdev);
hci_inquiry_cache_flush(hdev);
- hci_conn_hash_flush(hdev);
hci_pend_le_actions_clear(hdev);
+ hci_conn_hash_flush(hdev);
hci_dev_unlock(hdev);
hci_notify(hdev, HCI_DEV_DOWN);
if (!params)
return;
+ if (params->conn)
+ hci_conn_drop(params->conn);
+
list_del(¶ms->action);
list_del(¶ms->list);
kfree(params);
struct hci_conn_params *params, *tmp;
list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
+ if (params->conn)
+ hci_conn_drop(params->conn);
list_del(¶ms->action);
list_del(¶ms->list);
kfree(params);
hci_proto_connect_cfm(conn, ev->status);
params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
- if (params)
+ if (params) {
list_del_init(¶ms->action);
+ if (params->conn) {
+ hci_conn_drop(params->conn);
+ params->conn = NULL;
+ }
+ }
unlock:
hci_update_background_scan(hdev);
conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
- if (!IS_ERR(conn))
+ if (!IS_ERR(conn)) {
+ /* Store the pointer since we don't really have any
+ * other owner of the object besides the params that
+ * triggered it. This way we can abort the connection if
+ * the parameters get removed and keep the reference
+ * count consistent once the connection is established.
+ */
+ params->conn = conn;
return;
+ }
switch (PTR_ERR(conn)) {
case -EBUSY:
#include "auth_x.h"
#include "auth_x_protocol.h"
-#define TEMP_TICKET_BUF_LEN 256
-
static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed);
static int ceph_x_is_authenticated(struct ceph_auth_client *ac)
}
static int ceph_x_decrypt(struct ceph_crypto_key *secret,
- void **p, void *end, void *obuf, size_t olen)
+ void **p, void *end, void **obuf, size_t olen)
{
struct ceph_x_encrypt_header head;
size_t head_len = sizeof(head);
return -EINVAL;
dout("ceph_x_decrypt len %d\n", len);
- ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen,
- *p, len);
+ if (*obuf == NULL) {
+ *obuf = kmalloc(len, GFP_NOFS);
+ if (!*obuf)
+ return -ENOMEM;
+ olen = len;
+ }
+
+ ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len);
if (ret)
return ret;
if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC)
kfree(th);
}
-static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
- struct ceph_crypto_key *secret,
- void *buf, void *end)
+static int process_one_ticket(struct ceph_auth_client *ac,
+ struct ceph_crypto_key *secret,
+ void **p, void *end)
{
struct ceph_x_info *xi = ac->private;
- int num;
- void *p = buf;
+ int type;
+ u8 tkt_struct_v, blob_struct_v;
+ struct ceph_x_ticket_handler *th;
+ void *dbuf = NULL;
+ void *dp, *dend;
+ int dlen;
+ char is_enc;
+ struct timespec validity;
+ struct ceph_crypto_key old_key;
+ void *ticket_buf = NULL;
+ void *tp, *tpend;
+ struct ceph_timespec new_validity;
+ struct ceph_crypto_key new_session_key;
+ struct ceph_buffer *new_ticket_blob;
+ unsigned long new_expires, new_renew_after;
+ u64 new_secret_id;
int ret;
- char *dbuf;
- char *ticket_buf;
- u8 reply_struct_v;
- dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
- if (!dbuf)
- return -ENOMEM;
+ ceph_decode_need(p, end, sizeof(u32) + 1, bad);
- ret = -ENOMEM;
- ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
- if (!ticket_buf)
- goto out_dbuf;
+ type = ceph_decode_32(p);
+ dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
- ceph_decode_need(&p, end, 1 + sizeof(u32), bad);
- reply_struct_v = ceph_decode_8(&p);
- if (reply_struct_v != 1)
+ tkt_struct_v = ceph_decode_8(p);
+ if (tkt_struct_v != 1)
goto bad;
- num = ceph_decode_32(&p);
- dout("%d tickets\n", num);
- while (num--) {
- int type;
- u8 tkt_struct_v, blob_struct_v;
- struct ceph_x_ticket_handler *th;
- void *dp, *dend;
- int dlen;
- char is_enc;
- struct timespec validity;
- struct ceph_crypto_key old_key;
- void *tp, *tpend;
- struct ceph_timespec new_validity;
- struct ceph_crypto_key new_session_key;
- struct ceph_buffer *new_ticket_blob;
- unsigned long new_expires, new_renew_after;
- u64 new_secret_id;
-
- ceph_decode_need(&p, end, sizeof(u32) + 1, bad);
-
- type = ceph_decode_32(&p);
- dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
-
- tkt_struct_v = ceph_decode_8(&p);
- if (tkt_struct_v != 1)
- goto bad;
-
- th = get_ticket_handler(ac, type);
- if (IS_ERR(th)) {
- ret = PTR_ERR(th);
- goto out;
- }
- /* blob for me */
- dlen = ceph_x_decrypt(secret, &p, end, dbuf,
- TEMP_TICKET_BUF_LEN);
- if (dlen <= 0) {
- ret = dlen;
- goto out;
- }
- dout(" decrypted %d bytes\n", dlen);
- dend = dbuf + dlen;
- dp = dbuf;
+ th = get_ticket_handler(ac, type);
+ if (IS_ERR(th)) {
+ ret = PTR_ERR(th);
+ goto out;
+ }
- tkt_struct_v = ceph_decode_8(&dp);
- if (tkt_struct_v != 1)
- goto bad;
+ /* blob for me */
+ dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0);
+ if (dlen <= 0) {
+ ret = dlen;
+ goto out;
+ }
+ dout(" decrypted %d bytes\n", dlen);
+ dp = dbuf;
+ dend = dp + dlen;
- memcpy(&old_key, &th->session_key, sizeof(old_key));
- ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
- if (ret)
- goto out;
+ tkt_struct_v = ceph_decode_8(&dp);
+ if (tkt_struct_v != 1)
+ goto bad;
- ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
- ceph_decode_timespec(&validity, &new_validity);
- new_expires = get_seconds() + validity.tv_sec;
- new_renew_after = new_expires - (validity.tv_sec / 4);
- dout(" expires=%lu renew_after=%lu\n", new_expires,
- new_renew_after);
+ memcpy(&old_key, &th->session_key, sizeof(old_key));
+ ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
+ if (ret)
+ goto out;
- /* ticket blob for service */
- ceph_decode_8_safe(&p, end, is_enc, bad);
- tp = ticket_buf;
- if (is_enc) {
- /* encrypted */
- dout(" encrypted ticket\n");
- dlen = ceph_x_decrypt(&old_key, &p, end, ticket_buf,
- TEMP_TICKET_BUF_LEN);
- if (dlen < 0) {
- ret = dlen;
- goto out;
- }
- dlen = ceph_decode_32(&tp);
- } else {
- /* unencrypted */
- ceph_decode_32_safe(&p, end, dlen, bad);
- ceph_decode_need(&p, end, dlen, bad);
- ceph_decode_copy(&p, ticket_buf, dlen);
+ ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
+ ceph_decode_timespec(&validity, &new_validity);
+ new_expires = get_seconds() + validity.tv_sec;
+ new_renew_after = new_expires - (validity.tv_sec / 4);
+ dout(" expires=%lu renew_after=%lu\n", new_expires,
+ new_renew_after);
+
+ /* ticket blob for service */
+ ceph_decode_8_safe(p, end, is_enc, bad);
+ if (is_enc) {
+ /* encrypted */
+ dout(" encrypted ticket\n");
+ dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0);
+ if (dlen < 0) {
+ ret = dlen;
+ goto out;
}
- tpend = tp + dlen;
- dout(" ticket blob is %d bytes\n", dlen);
- ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
- blob_struct_v = ceph_decode_8(&tp);
- new_secret_id = ceph_decode_64(&tp);
- ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
- if (ret)
+ tp = ticket_buf;
+ dlen = ceph_decode_32(&tp);
+ } else {
+ /* unencrypted */
+ ceph_decode_32_safe(p, end, dlen, bad);
+ ticket_buf = kmalloc(dlen, GFP_NOFS);
+ if (!ticket_buf) {
+ ret = -ENOMEM;
goto out;
-
- /* all is well, update our ticket */
- ceph_crypto_key_destroy(&th->session_key);
- if (th->ticket_blob)
- ceph_buffer_put(th->ticket_blob);
- th->session_key = new_session_key;
- th->ticket_blob = new_ticket_blob;
- th->validity = new_validity;
- th->secret_id = new_secret_id;
- th->expires = new_expires;
- th->renew_after = new_renew_after;
- dout(" got ticket service %d (%s) secret_id %lld len %d\n",
- type, ceph_entity_type_name(type), th->secret_id,
- (int)th->ticket_blob->vec.iov_len);
- xi->have_keys |= th->service;
+ }
+ tp = ticket_buf;
+ ceph_decode_need(p, end, dlen, bad);
+ ceph_decode_copy(p, ticket_buf, dlen);
}
+ tpend = tp + dlen;
+ dout(" ticket blob is %d bytes\n", dlen);
+ ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
+ blob_struct_v = ceph_decode_8(&tp);
+ new_secret_id = ceph_decode_64(&tp);
+ ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
+ if (ret)
+ goto out;
+
+ /* all is well, update our ticket */
+ ceph_crypto_key_destroy(&th->session_key);
+ if (th->ticket_blob)
+ ceph_buffer_put(th->ticket_blob);
+ th->session_key = new_session_key;
+ th->ticket_blob = new_ticket_blob;
+ th->validity = new_validity;
+ th->secret_id = new_secret_id;
+ th->expires = new_expires;
+ th->renew_after = new_renew_after;
+ dout(" got ticket service %d (%s) secret_id %lld len %d\n",
+ type, ceph_entity_type_name(type), th->secret_id,
+ (int)th->ticket_blob->vec.iov_len);
+ xi->have_keys |= th->service;
- ret = 0;
out:
kfree(ticket_buf);
-out_dbuf:
kfree(dbuf);
return ret;
goto out;
}
+static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
+ struct ceph_crypto_key *secret,
+ void *buf, void *end)
+{
+ void *p = buf;
+ u8 reply_struct_v;
+ u32 num;
+ int ret;
+
+ ceph_decode_8_safe(&p, end, reply_struct_v, bad);
+ if (reply_struct_v != 1)
+ return -EINVAL;
+
+ ceph_decode_32_safe(&p, end, num, bad);
+ dout("%d tickets\n", num);
+
+ while (num--) {
+ ret = process_one_ticket(ac, secret, &p, end);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+
+bad:
+ return -EINVAL;
+}
+
static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
struct ceph_x_ticket_handler *th,
struct ceph_x_authorizer *au)
struct ceph_x_ticket_handler *th;
int ret = 0;
struct ceph_x_authorize_reply reply;
+ void *preply = &reply;
void *p = au->reply_buf;
void *end = p + sizeof(au->reply_buf);
th = get_ticket_handler(ac, au->service);
if (IS_ERR(th))
return PTR_ERR(th);
- ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));
+ ret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply));
if (ret < 0)
return ret;
if (ret != sizeof(reply))
if (!m) {
pr_info("alloc_msg unknown type %d\n", type);
*skip = 1;
+ } else if (front_len > m->front_alloc_len) {
+ pr_warning("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n",
+ front_len, m->front_alloc_len,
+ (unsigned int)con->peer_name.type,
+ le64_to_cpu(con->peer_name.num));
+ ceph_msg_put(m);
+ m = ceph_msg_new(type, front_len, GFP_NOFS, false);
}
+
return m;
}
EXPORT_SYMBOL(__skb_checksum_complete);
/**
- * skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec.
+ * skb_copy_and_csum_datagram_iovec - Copy and checksum skb to user iovec.
* @skb: skbuff
* @hlen: hardware length
* @iov: io vector
return harmonize_features(skb, features);
}
- features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX);
+ features = netdev_intersect_features(features,
+ skb->dev->vlan_features |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX);
if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
- features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
- NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX;
+ features = netdev_intersect_features(features,
+ NETIF_F_SG |
+ NETIF_F_HIGHDMA |
+ NETIF_F_FRAGLIST |
+ NETIF_F_GEN_CSUM |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX);
return harmonize_features(skb, features);
}
if (adj->master)
sysfs_remove_link(&(dev->dev.kobj), "master");
- if (netdev_adjacent_is_neigh_list(dev, dev_list))
+ if (netdev_adjacent_is_neigh_list(dev, dev_list) &&
+ net_eq(dev_net(dev),dev_net(adj_dev)))
netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
list_del_rcu(&adj->list);
}
EXPORT_SYMBOL(netdev_upper_dev_unlink);
+void netdev_adjacent_add_links(struct net_device *dev)
+{
+ struct netdev_adjacent *iter;
+
+ struct net *net = dev_net(dev);
+
+ list_for_each_entry(iter, &dev->adj_list.upper, list) {
+ if (!net_eq(net,dev_net(iter->dev)))
+ continue;
+ netdev_adjacent_sysfs_add(iter->dev, dev,
+ &iter->dev->adj_list.lower);
+ netdev_adjacent_sysfs_add(dev, iter->dev,
+ &dev->adj_list.upper);
+ }
+
+ list_for_each_entry(iter, &dev->adj_list.lower, list) {
+ if (!net_eq(net,dev_net(iter->dev)))
+ continue;
+ netdev_adjacent_sysfs_add(iter->dev, dev,
+ &iter->dev->adj_list.upper);
+ netdev_adjacent_sysfs_add(dev, iter->dev,
+ &dev->adj_list.lower);
+ }
+}
+
+void netdev_adjacent_del_links(struct net_device *dev)
+{
+ struct netdev_adjacent *iter;
+
+ struct net *net = dev_net(dev);
+
+ list_for_each_entry(iter, &dev->adj_list.upper, list) {
+ if (!net_eq(net,dev_net(iter->dev)))
+ continue;
+ netdev_adjacent_sysfs_del(iter->dev, dev->name,
+ &iter->dev->adj_list.lower);
+ netdev_adjacent_sysfs_del(dev, iter->dev->name,
+ &dev->adj_list.upper);
+ }
+
+ list_for_each_entry(iter, &dev->adj_list.lower, list) {
+ if (!net_eq(net,dev_net(iter->dev)))
+ continue;
+ netdev_adjacent_sysfs_del(iter->dev, dev->name,
+ &iter->dev->adj_list.upper);
+ netdev_adjacent_sysfs_del(dev, iter->dev->name,
+ &dev->adj_list.lower);
+ }
+}
+
void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
{
struct netdev_adjacent *iter;
+ struct net *net = dev_net(dev);
+
list_for_each_entry(iter, &dev->adj_list.upper, list) {
+ if (!net_eq(net,dev_net(iter->dev)))
+ continue;
netdev_adjacent_sysfs_del(iter->dev, oldname,
&iter->dev->adj_list.lower);
netdev_adjacent_sysfs_add(iter->dev, dev,
}
list_for_each_entry(iter, &dev->adj_list.lower, list) {
+ if (!net_eq(net,dev_net(iter->dev)))
+ continue;
netdev_adjacent_sysfs_del(iter->dev, oldname,
&iter->dev->adj_list.upper);
netdev_adjacent_sysfs_add(iter->dev, dev,
/* Send a netdev-removed uevent to the old namespace */
kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
+ netdev_adjacent_del_links(dev);
/* Actually switch the network namespace */
dev_net_set(dev, net);
/* Send a netdev-add uevent to the new namespace */
kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
+ netdev_adjacent_add_links(dev);
/* Fixup kobjects */
err = device_rename(&dev->dev, dev->name);
* as destination. A new timer with the interval specified in the
* configuration TLV is created. Upon each interval, the latest statistics
* will be read from &bstats and the estimated rate will be stored in
- * &rate_est with the statistics lock grabed during this period.
+ * &rate_est with the statistics lock grabbed during this period.
*
* Returns 0 on success or a negative error code.
*
* @st: application specific statistics data
* @len: length of data
*
- * Appends the application sepecific statistics to the top level TLV created by
+ * Appends the application specific statistics to the top level TLV created by
* gnet_stats_start_copy() and remembers the data for XSTATS if the dumping
* handle is in backward compatibility mode.
*
* skb_seq_read() will return the remaining part of the block.
*
* Note 1: The size of each block of data returned can be arbitrary,
- * this limitation is the cost for zerocopy seqeuental
+ * this limitation is the cost for zerocopy sequential
* reads of potentially non linear data.
*
* Note 2: Fragment lists within fragments are not implemented
/**
* skb_append_datato_frags - append the user data to a skb
* @sk: sock structure
- * @skb: skb structure to be appened with user data.
+ * @skb: skb structure to be appended with user data.
* @getfrag: call back function to be used for getting the user data
* @from: pointer to user message iov
* @length: length of the iov message
/**
* sk_capable - Socket global capability test
* @sk: Socket to use a capability on or through
- * @cap: The global capbility to use
+ * @cap: The global capability to use
*
* Test to see if the opener of the socket had when the socket was
* created and the current process has the capability @cap in all user
* @sk: Socket to use a capability on or through
* @cap: The capability to use
*
- * Test to see if the opener of the socket had when the socke was created
+ * Test to see if the opener of the socket had when the socket was created
* and the current process has the capability @cap over the network namespace
* the socket is a member of.
*/
order);
if (page)
goto fill_page;
+ /* Do not retry other high order allocations */
+ order = 1;
+ max_page_order = 0;
}
order--;
}
* no guarantee that allocations succeed. Therefore, @sz MUST be
* less or equal than PAGE_SIZE.
*/
-bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
+bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
{
- int order;
-
if (pfrag->page) {
if (atomic_read(&pfrag->page->_count) == 1) {
pfrag->offset = 0;
put_page(pfrag->page);
}
- order = SKB_FRAG_PAGE_ORDER;
- do {
- gfp_t gfp = prio;
-
- if (order)
- gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
- pfrag->page = alloc_pages(gfp, order);
+ pfrag->offset = 0;
+ if (SKB_FRAG_PAGE_ORDER) {
+ pfrag->page = alloc_pages(gfp | __GFP_COMP |
+ __GFP_NOWARN | __GFP_NORETRY,
+ SKB_FRAG_PAGE_ORDER);
if (likely(pfrag->page)) {
- pfrag->offset = 0;
- pfrag->size = PAGE_SIZE << order;
+ pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
return true;
}
- } while (--order >= 0);
-
+ }
+ pfrag->page = alloc_page(gfp);
+ if (likely(pfrag->page)) {
+ pfrag->size = PAGE_SIZE;
+ return true;
+ }
return false;
}
EXPORT_SYMBOL(skb_page_frag_refill);
return ERR_PTR(-rc);
}
} else {
- frag = ERR_PTR(ENOMEM);
+ frag = ERR_PTR(-ENOMEM);
}
return frag;
/* Frame Control + Sequence Number + Address fields + Security Header */
dev->hard_header_len = 2 + 1 + 20 + 14;
dev->needed_tailroom = 2; /* FCS */
- dev->mtu = 1281;
+ dev->mtu = IPV6_MIN_MTU;
dev->tx_queue_len = 0;
dev->flags = IFF_BROADCAST | IFF_MULTICAST;
dev->watchdog_timeo = 0;
struct net *net = dev_net(skb->dev);
struct lowpan_frag_info *frag_info = lowpan_cb(skb);
struct ieee802154_addr source, dest;
- struct netns_ieee802154_lowpan *ieee802154_lowpan =
- net_ieee802154_lowpan(net);
int err;
source = mac_cb(skb)->source;
if (err < 0)
goto err;
- if (frag_info->d_size > ieee802154_lowpan->max_dsize)
+ if (frag_info->d_size > IPV6_MIN_MTU) {
+ net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
goto err;
+ }
fq = fq_find(net, frag_info, &source, &dest);
if (fq != NULL) {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- {
- .procname = "6lowpanfrag_max_datagram_size",
- .data = &init_net.ieee802154_lowpan.max_dsize,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec
- },
{ }
};
table[1].data = &ieee802154_lowpan->frags.low_thresh;
table[1].extra2 = &ieee802154_lowpan->frags.high_thresh;
table[2].data = &ieee802154_lowpan->frags.timeout;
- table[3].data = &ieee802154_lowpan->max_dsize;
/* Don't export sysctls to unprivileged users */
if (net->user_ns != &init_user_ns)
ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
- ieee802154_lowpan->max_dsize = 0xFFFF;
inet_frags_init_net(&ieee802154_lowpan->frags);
help
This option enables the ARP support for nf_tables.
+config NF_NAT_IPV4
+ tristate "IPv4 NAT"
+ depends on NF_CONNTRACK_IPV4
+ default m if NETFILTER_ADVANCED=n
+ select NF_NAT
+ help
+ The IPv4 NAT option allows masquerading, port forwarding and other
+ forms of full Network Address Port Translation. This can be
+ controlled by iptables or nft.
+
+if NF_NAT_IPV4
+
+config NF_NAT_SNMP_BASIC
+ tristate "Basic SNMP-ALG support"
+ depends on NF_CONNTRACK_SNMP
+ depends on NETFILTER_ADVANCED
+ default NF_NAT && NF_CONNTRACK_SNMP
+ ---help---
+
+ This module implements an Application Layer Gateway (ALG) for
+ SNMP payloads. In conjunction with NAT, it allows a network
+ management system to access multiple private networks with
+ conflicting addresses. It works by modifying IP addresses
+ inside SNMP payloads to match IP-layer NAT mapping.
+
+ This is the "basic" form of SNMP-ALG, as described in RFC 2962
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config NF_NAT_PROTO_GRE
+ tristate
+ depends on NF_CT_PROTO_GRE
+
+config NF_NAT_PPTP
+ tristate
+ depends on NF_CONNTRACK
+ default NF_CONNTRACK_PPTP
+ select NF_NAT_PROTO_GRE
+
+config NF_NAT_H323
+ tristate
+ depends on NF_CONNTRACK
+ default NF_CONNTRACK_H323
+
+endif # NF_NAT_IPV4
+
config IP_NF_IPTABLES
tristate "IP tables support (required for filtering/masq/NAT)"
default m if NETFILTER_ADVANCED=n
To compile it as a module, choose M here. If unsure, say N.
# NAT + specific targets: nf_conntrack
-config NF_NAT_IPV4
- tristate "IPv4 NAT"
+config IP_NF_NAT
+ tristate "iptables NAT support"
depends on NF_CONNTRACK_IPV4
default m if NETFILTER_ADVANCED=n
select NF_NAT
+ select NF_NAT_IPV4
+ select NETFILTER_XT_NAT
help
- The IPv4 NAT option allows masquerading, port forwarding and other
- forms of full Network Address Port Translation. It is controlled by
- the `nat' table in iptables: see the man page for iptables(8).
+ This enables the `nat' table in iptables. This allows masquerading,
+ port forwarding and other forms of full Network Address Port
+ Translation.
To compile it as a module, choose M here. If unsure, say N.
-if NF_NAT_IPV4
+if IP_NF_NAT
config IP_NF_TARGET_MASQUERADE
tristate "MASQUERADE target support"
(e.g. when running oldconfig). It selects
CONFIG_NETFILTER_XT_TARGET_REDIRECT.
-endif
-
-config NF_NAT_SNMP_BASIC
- tristate "Basic SNMP-ALG support"
- depends on NF_CONNTRACK_SNMP && NF_NAT_IPV4
- depends on NETFILTER_ADVANCED
- default NF_NAT && NF_CONNTRACK_SNMP
- ---help---
-
- This module implements an Application Layer Gateway (ALG) for
- SNMP payloads. In conjunction with NAT, it allows a network
- management system to access multiple private networks with
- conflicting addresses. It works by modifying IP addresses
- inside SNMP payloads to match IP-layer NAT mapping.
-
- This is the "basic" form of SNMP-ALG, as described in RFC 2962
-
- To compile it as a module, choose M here. If unsure, say N.
-
-# If they want FTP, set to $CONFIG_IP_NF_NAT (m or y),
-# or $CONFIG_IP_NF_FTP (m or y), whichever is weaker.
-# From kconfig-language.txt:
-#
-# <expr> '&&' <expr> (6)
-#
-# (6) Returns the result of min(/expr/, /expr/).
-
-config NF_NAT_PROTO_GRE
- tristate
- depends on NF_NAT_IPV4 && NF_CT_PROTO_GRE
-
-config NF_NAT_PPTP
- tristate
- depends on NF_CONNTRACK && NF_NAT_IPV4
- default NF_NAT_IPV4 && NF_CONNTRACK_PPTP
- select NF_NAT_PROTO_GRE
-
-config NF_NAT_H323
- tristate
- depends on NF_CONNTRACK && NF_NAT_IPV4
- default NF_NAT_IPV4 && NF_CONNTRACK_H323
+endif # IP_NF_NAT
# mangle + specific targets
config IP_NF_MANGLE
# the three instances of ip_tables
obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o
obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o
-obj-$(CONFIG_NF_NAT_IPV4) += iptable_nat.o
+obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o
obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o
addrconf_mod_dad_work(ifp, 0);
}
-/* Join to solicited addr multicast group. */
-
+/* Join to solicited addr multicast group.
+ * caller must hold RTNL */
void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
{
struct in6_addr maddr;
- ASSERT_RTNL();
-
if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
return;
ipv6_dev_mc_inc(dev, &maddr);
}
+/* caller must hold RTNL */
void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
{
struct in6_addr maddr;
- ASSERT_RTNL();
-
if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
return;
__ipv6_dev_mc_dec(idev, &maddr);
}
+/* caller must hold RTNL */
static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
- ASSERT_RTNL();
-
if (ifp->prefix_len >= 127) /* RFC 6164 */
return;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
ipv6_dev_ac_inc(ifp->idev->dev, &addr);
}
+/* caller must hold RTNL */
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
- ASSERT_RTNL();
-
if (ifp->prefix_len >= 127) /* RFC 6164 */
return;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
addrconf_leave_solict(ifp->idev, &ifp->addr);
if (!ipv6_addr_any(&ifp->peer_addr)) {
struct rt6_info *rt;
- struct net_device *dev = ifp->idev->dev;
-
- rt = rt6_lookup(dev_net(dev), &ifp->peer_addr, NULL,
- dev->ifindex, 1);
- if (rt) {
- dst_hold(&rt->dst);
- if (ip6_del_rt(rt))
- dst_free(&rt->dst);
- }
+
+ rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
+ ifp->idev->dev, 0, 0);
+ if (rt && ip6_del_rt(rt))
+ dst_free(&rt->dst);
}
dst_hold(&ifp->rt->dst);
pac->acl_next = NULL;
pac->acl_addr = *addr;
+ rtnl_lock();
rcu_read_lock();
if (ifindex == 0) {
struct rt6_info *rt;
error:
rcu_read_unlock();
+ rtnl_unlock();
if (pac)
sock_kfree_s(sk, pac, sizeof(*pac));
return err;
spin_unlock_bh(&ipv6_sk_ac_lock);
+ rtnl_lock();
rcu_read_lock();
dev = dev_get_by_index_rcu(net, pac->acl_ifindex);
if (dev)
ipv6_dev_ac_dec(dev, &pac->acl_addr);
rcu_read_unlock();
+ rtnl_unlock();
sock_kfree_s(sk, pac, sizeof(*pac));
return 0;
spin_unlock_bh(&ipv6_sk_ac_lock);
prev_index = 0;
+ rtnl_lock();
rcu_read_lock();
while (pac) {
struct ipv6_ac_socklist *next = pac->acl_next;
pac = next;
}
rcu_read_unlock();
+ rtnl_unlock();
}
static void aca_put(struct ifacaddr6 *ac)
struct rt6_info *rt;
int err;
+ ASSERT_RTNL();
+
idev = in6_dev_get(dev);
if (idev == NULL)
{
struct ifacaddr6 *aca, *prev_aca;
+ ASSERT_RTNL();
+
write_lock_bh(&idev->lock);
prev_aca = NULL;
for (aca = idev->ac_list; aca; aca = aca->aca_next) {
mc_lst->next = NULL;
mc_lst->addr = *addr;
+ rtnl_lock();
rcu_read_lock();
if (ifindex == 0) {
struct rt6_info *rt;
if (dev == NULL) {
rcu_read_unlock();
+ rtnl_unlock();
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
return -ENODEV;
}
if (err) {
rcu_read_unlock();
+ rtnl_unlock();
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
return err;
}
spin_unlock(&ipv6_sk_mc_lock);
rcu_read_unlock();
+ rtnl_unlock();
return 0;
}
if (!ipv6_addr_is_multicast(addr))
return -EINVAL;
+ rtnl_lock();
spin_lock(&ipv6_sk_mc_lock);
for (lnk = &np->ipv6_mc_list;
(mc_lst = rcu_dereference_protected(*lnk,
} else
(void) ip6_mc_leave_src(sk, mc_lst, NULL);
rcu_read_unlock();
+ rtnl_unlock();
+
atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
kfree_rcu(mc_lst, rcu);
return 0;
}
}
spin_unlock(&ipv6_sk_mc_lock);
+ rtnl_unlock();
return -EADDRNOTAVAIL;
}
if (!rcu_access_pointer(np->ipv6_mc_list))
return;
+ rtnl_lock();
spin_lock(&ipv6_sk_mc_lock);
while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list,
lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) {
spin_lock(&ipv6_sk_mc_lock);
}
spin_unlock(&ipv6_sk_mc_lock);
+ rtnl_unlock();
}
int ip6_mc_source(int add, int omode, struct sock *sk,
struct ifmcaddr6 *mc;
struct inet6_dev *idev;
+ ASSERT_RTNL();
+
/* we need to take a reference on idev */
idev = in6_dev_get(dev);
{
struct ifmcaddr6 *ma, **map;
+ ASSERT_RTNL();
+
write_lock_bh(&idev->lock);
for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) {
if (ipv6_addr_equal(&ma->mca_addr, addr)) {
config NF_LOG_IPV6
tristate "IPv6 packet logging"
- depends on NETFILTER_ADVANCED
+ default m if NETFILTER_ADVANCED=n
select NF_LOG_COMMON
+config NF_NAT_IPV6
+ tristate "IPv6 NAT"
+ depends on NF_CONNTRACK_IPV6
+ depends on NETFILTER_ADVANCED
+ select NF_NAT
+ help
+ The IPv6 NAT option allows masquerading, port forwarding and other
+ forms of full Network Address Port Translation. This can be
+ controlled by iptables or nft.
+
config IP6_NF_IPTABLES
tristate "IP6 tables support (required for filtering)"
depends on INET && IPV6
If unsure, say N.
-config NF_NAT_IPV6
- tristate "IPv6 NAT"
+config IP6_NF_NAT
+ tristate "ip6tables NAT support"
depends on NF_CONNTRACK_IPV6
depends on NETFILTER_ADVANCED
select NF_NAT
+ select NF_NAT_IPV6
+ select NETFILTER_XT_NAT
help
- The IPv6 NAT option allows masquerading, port forwarding and other
- forms of full Network Address Port Translation. It is controlled by
- the `nat' table in ip6tables, see the man page for ip6tables(8).
+ This enables the `nat' table in ip6tables. This allows masquerading,
+ port forwarding and other forms of full Network Address Port
+ Translation.
To compile it as a module, choose M here. If unsure, say N.
-if NF_NAT_IPV6
+if IP6_NF_NAT
config IP6_NF_TARGET_MASQUERADE
tristate "MASQUERADE target support"
To compile it as a module, choose M here. If unsure, say N.
-endif # NF_NAT_IPV6
+endif # IP6_NF_NAT
endif # IP6_NF_IPTABLES
obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
-obj-$(CONFIG_NF_NAT_IPV6) += ip6table_nat.o
+obj-$(CONFIG_IP6_NF_NAT) += ip6table_nat.o
# objects for l3 independent conntrack
nf_conntrack_ipv6-y := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
/* If PMTU discovery was enabled, use the MTU that was discovered */
dst = sk_dst_get(tunnel->sock);
if (dst != NULL) {
- u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock));
+ u32 pmtu = dst_mtu(dst);
+
if (pmtu != 0)
session->mtu = session->mru = pmtu -
PPPOL2TP_HEADER_OVERHEAD;
continue;
if (rcu_access_pointer(sdata->vif.chanctx_conf) != conf)
continue;
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ continue;
if (!compat)
compat = &sdata->vif.bss_conf.chandef;
p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
sta->ampdu_mlme.dialog_token_allocator + 1);
p += scnprintf(p, sizeof(buf) + buf - p,
- "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tpending\n");
+ "TID\t\tRX\tDTKN\tSSN\t\tTX\tDTKN\tpending\n");
for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[i]);
if (sta) {
u16 last_seq;
- last_seq = le16_to_cpu(
- sta->last_seq_ctrl[rx_agg->tid]);
+ last_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(
+ sta->last_seq_ctrl[rx_agg->tid]));
__ieee80211_start_rx_ba_session(sta,
0, 0,
if (!matches_local)
event = CNF_RJCT;
if (!mesh_plink_free_count(sdata) ||
- (sta->llid != llid || sta->plid != plid))
+ sta->llid != llid ||
+ (sta->plid && sta->plid != plid))
event = CNF_IGNR;
else
event = CNF_ACPT;
goto unlock_rcu;
}
+ /* 802.11-2012 13.3.7.2 - update plid on CNF if not set */
+ if (!sta->plid && event == CNF_ACPT)
+ sta->plid = plid;
+
changed |= mesh_plink_fsm(sdata, sta, event);
unlock_rcu:
rcu_read_unlock();
if (bss->wmm_used && bss->uapsd_supported &&
- (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD) &&
- sdata->wmm_acm != 0xff) {
+ (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
assoc_data->uapsd = true;
ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
} else {
unsigned long flags;
struct ps_data *ps;
- if (sdata->vif.type == NL80211_IFTYPE_AP ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
+ u.ap);
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP)
ps = &sdata->bss->ps;
else if (ieee80211_vif_is_mesh(&sdata->vif))
ps = &sdata->u.mesh.ps;
skb->pkt_type = PACKET_OTHERHOST;
break;
default:
- break;
+ spin_unlock_bh(&sdata->mib_lock);
+ pr_debug("invalid dest mode\n");
+ kfree_skb(skb);
+ return NET_RX_DROP;
}
spin_unlock_bh(&sdata->mib_lock);
ret = mac802154_parse_frame_start(skb, &hdr);
if (ret) {
pr_debug("got invalid frame\n");
+ kfree_skb(skb);
return;
}
config NFT_NAT
depends on NF_TABLES
depends on NF_CONNTRACK
- depends on NF_NAT
+ select NF_NAT
tristate "Netfilter nf_tables nat module"
help
This option adds the "nat" expression that you can use to perform
config NETFILTER_XT_TARGET_LOG
tristate "LOG target support"
- depends on NF_LOG_IPV4 && NF_LOG_IPV6
+ select NF_LOG_COMMON
+ select NF_LOG_IPV4
+ select NF_LOG_IPV6 if IPV6
default m if NETFILTER_ADVANCED=n
help
This option adds a `LOG' target, which allows you to create rules in
(e.g. when running oldconfig). It selects
CONFIG_NETFILTER_XT_MARK (combined mark/MARK module).
+config NETFILTER_XT_NAT
+ tristate '"SNAT and DNAT" targets support'
+ depends on NF_NAT
+ ---help---
+ This option enables the SNAT and DNAT targets.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_TARGET_NETMAP
tristate '"NETMAP" target support'
depends on NF_NAT
obj-$(CONFIG_NETFILTER_XT_MARK) += xt_mark.o
obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o
obj-$(CONFIG_NETFILTER_XT_SET) += xt_set.o
-obj-$(CONFIG_NF_NAT) += xt_nat.o
+obj-$(CONFIG_NETFILTER_XT_NAT) += xt_nat.o
# targets
obj-$(CONFIG_NETFILTER_XT_TARGET_AUDIT) += xt_AUDIT.o
struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly;
EXPORT_SYMBOL(nf_hooks);
-#if defined(CONFIG_JUMP_LABEL)
+#ifdef HAVE_JUMP_LABEL
struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
EXPORT_SYMBOL(nf_hooks_needed);
#endif
}
list_add_rcu(®->list, elem->list.prev);
mutex_unlock(&nf_hook_mutex);
-#if defined(CONFIG_JUMP_LABEL)
+#ifdef HAVE_JUMP_LABEL
static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
#endif
return 0;
mutex_lock(&nf_hook_mutex);
list_del_rcu(®->list);
mutex_unlock(&nf_hook_mutex);
-#if defined(CONFIG_JUMP_LABEL)
+#ifdef HAVE_JUMP_LABEL
static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
#endif
synchronize_net();
{
.hook = ip_vs_local_reply6,
.owner = THIS_MODULE,
- .pf = NFPROTO_IPV4,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP6_PRI_NAT_DST + 1,
},
#include <net/route.h> /* for ip_route_output */
#include <net/ipv6.h>
#include <net/ip6_route.h>
+#include <net/ip_tunnels.h>
#include <net/addrconf.h>
#include <linux/icmpv6.h>
#include <linux/netfilter.h>
old_iph = ip_hdr(skb);
}
- skb->transport_header = skb->network_header;
-
/* fix old IP header checksum */
ip_send_check(old_iph);
+ skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP);
+ if (IS_ERR(skb))
+ goto tx_error;
+
+ skb->transport_header = skb->network_header;
+
skb_push(skb, sizeof(struct iphdr));
skb_reset_network_header(skb);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
return NF_STOLEN;
tx_error:
- kfree_skb(skb);
+ if (!IS_ERR(skb))
+ kfree_skb(skb);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
old_iph = ipv6_hdr(skb);
}
+ /* GSO: we need to provide proper SKB_GSO_ value for IPv6 */
+ skb = iptunnel_handle_offloads(skb, false, 0); /* SKB_GSO_SIT/IPV6 */
+ if (IS_ERR(skb))
+ goto tx_error;
+
skb->transport_header = skb->network_header;
skb_push(skb, sizeof(struct ipv6hdr));
return NF_STOLEN;
tx_error:
- kfree_skb(skb);
+ if (!IS_ERR(skb))
+ kfree_skb(skb);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
if (info->invert & ~1)
return -EINVAL;
- return info->id ? 0 : -EINVAL;
+ return 0;
}
static bool
upcall.key = &key;
upcall.userdata = NULL;
upcall.portid = ovs_vport_find_upcall_portid(p, skb);
- ovs_dp_upcall(dp, skb, &upcall);
- consume_skb(skb);
+ error = ovs_dp_upcall(dp, skb, &upcall);
+ if (unlikely(error))
+ kfree_skb(skb);
+ else
+ consume_skb(skb);
stats_counter = &stats->n_missed;
goto out;
}
{
struct ovs_header *upcall;
struct sk_buff *nskb = NULL;
- struct sk_buff *user_skb; /* to be queued to userspace */
+ struct sk_buff *user_skb = NULL; /* to be queued to userspace */
struct nlattr *nla;
struct genl_info info = {
.dst_sk = ovs_dp_get_net(dp)->genl_sock,
((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
+ user_skb = NULL;
out:
if (err)
skb_tx_error(skb);
+ kfree_skb(user_skb);
kfree_skb(nskb);
return err;
}
{ "BCM2E1A", RFKILL_TYPE_BLUETOOTH },
{ "BCM2E39", RFKILL_TYPE_BLUETOOTH },
{ "BCM2E3D", RFKILL_TYPE_BLUETOOTH },
+ { "BCM2E64", RFKILL_TYPE_BLUETOOTH },
{ "BCM4752", RFKILL_TYPE_GPS },
{ "LNV4752", RFKILL_TYPE_GPS },
{ },
transport = asoc->peer.primary_path;
status.sstat_assoc_id = sctp_assoc2id(asoc);
- status.sstat_state = asoc->state;
+ status.sstat_state = sctp_assoc_to_state(asoc);
status.sstat_rwnd = asoc->peer.rwnd;
status.sstat_unackdata = asoc->unack_data;
}
memset(&tss, 0, sizeof(tss));
- if ((sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE ||
- skb_shinfo(skb)->tx_flags & SKBTX_ANY_SW_TSTAMP) &&
+ if ((sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) &&
ktime_to_timespec_cond(skb->tstamp, tss.ts + 0))
empty = 0;
if (shhwtstamps &&
*
* This function is called by a protocol handler that wants to
* advertise its address family, and have it linked into the
- * socket interface. The value ops->family coresponds to the
+ * socket interface. The value ops->family corresponds to the
* socket system call protocol family.
*/
int sock_register(const struct net_proto_family *ops)
# Check for improperly formed commit descriptions
if ($in_commit_log &&
$line =~ /\bcommit\s+[0-9a-f]{5,}/i &&
- $line !~ /\b[Cc]ommit [0-9a-f]{12,40} \("/) {
+ !($line =~ /\b[Cc]ommit [0-9a-f]{12,40} \("/ ||
+ ($line =~ /\b[Cc]ommit [0-9a-f]{12,40}\s*$/ &&
+ defined $rawlines[$linenr] &&
+ $rawlines[$linenr] =~ /^\s*\("/))) {
$line =~ /\b(c)ommit\s+([0-9a-f]{5,})/i;
my $init_char = $1;
my $orig_commit = lc($2);
struct rb_root key_user_tree; /* tree of quota records indexed by UID */
DEFINE_SPINLOCK(key_user_lock);
-unsigned int key_quota_root_maxkeys = 200; /* root's key count quota */
-unsigned int key_quota_root_maxbytes = 20000; /* root's key space quota */
+unsigned int key_quota_root_maxkeys = 1000000; /* root's key count quota */
+unsigned int key_quota_root_maxbytes = 25000000; /* root's key space quota */
unsigned int key_quota_maxkeys = 200; /* general key count quota */
unsigned int key_quota_maxbytes = 20000; /* general key space quota */
static void update_pcm_pointers(struct amdtp_stream *s,
struct snd_pcm_substream *pcm,
unsigned int frames)
-{ unsigned int ptr;
+{
+ unsigned int ptr;
+
+ /*
+ * In IEC 61883-6, one data block represents one event. In ALSA, one
+ * event equals to one PCM frame. But Dice has a quirk to transfer
+ * two PCM frames in one data block.
+ */
+ if (s->double_pcm_frames)
+ frames *= 2;
ptr = s->pcm_buffer_pointer + frames;
if (ptr >= pcm->runtime->buffer_size)
unsigned int pcm_buffer_pointer;
unsigned int pcm_period_pointer;
bool pointer_flush;
+ bool double_pcm_frames;
struct snd_rawmidi_substream *midi[AMDTP_MAX_CHANNELS_FOR_MIDI * 8];
return err;
/*
- * At rates above 96 kHz, pretend that the stream runs at half the
- * actual sample rate with twice the number of channels; two samples
- * of a channel are stored consecutively in the packet. Requires
- * blocking mode and PCM buffer size should be aligned to SYT_INTERVAL.
+ * At 176.4/192.0 kHz, Dice has a quirk to transfer two PCM frames in
+ * one data block of AMDTP packet. Thus sampling transfer frequency is
+ * a half of PCM sampling frequency, i.e. PCM frames at 192.0 kHz are
+ * transferred on AMDTP packets at 96 kHz. Two successive samples of a
+ * channel are stored consecutively in the packet. This quirk is called
+ * as 'Dual Wire'.
+ * For this quirk, blocking mode is required and PCM buffer size should
+ * be aligned to SYT_INTERVAL.
*/
channels = params_channels(hw_params);
if (rate_index > 4) {
return err;
}
- for (i = 0; i < channels; i++) {
- dice->stream.pcm_positions[i * 2] = i;
- dice->stream.pcm_positions[i * 2 + 1] = i + channels;
- }
-
rate /= 2;
channels *= 2;
+ dice->stream.double_pcm_frames = true;
+ } else {
+ dice->stream.double_pcm_frames = false;
}
mode = rate_index_to_mode(rate_index);
amdtp_stream_set_parameters(&dice->stream, rate, channels,
dice->rx_midi_ports[mode]);
+ if (rate_index > 4) {
+ channels /= 2;
+
+ for (i = 0; i < channels; i++) {
+ dice->stream.pcm_positions[i] = i * 2;
+ dice->stream.pcm_positions[i + channels] = i * 2 + 1;
+ }
+ }
+
amdtp_stream_set_pcm_format(&dice->stream,
params_format(hw_params));
CXT_FIXUP_HEADPHONE_MIC_PIN,
CXT_FIXUP_HEADPHONE_MIC,
CXT_FIXUP_GPIO1,
+ CXT_FIXUP_ASPIRE_DMIC,
CXT_FIXUP_THINKPAD_ACPI,
CXT_FIXUP_OLPC_XO,
CXT_FIXUP_CAP_MIX_AMP,
{ }
},
},
+ [CXT_FIXUP_ASPIRE_DMIC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cxt_fixup_stereo_dmic,
+ .chained = true,
+ .chain_id = CXT_FIXUP_GPIO1,
+ },
[CXT_FIXUP_THINKPAD_ACPI] = {
.type = HDA_FIXUP_FUNC,
.v.func = hda_fixup_thinkpad_acpi,
static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
- SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_GPIO1),
+ SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
case 0x10ec0885:
case 0x10ec0887:
/*case 0x10ec0889:*/ /* this causes an SPDIF problem */
+ case 0x10ec0900:
alc889_coef_init(codec);
break;
case 0x10ec0888:
switch (codec->vendor_id) {
case 0x10ec0882:
case 0x10ec0885:
+ case 0x10ec0900:
break;
default:
/* ALC883 and variants */
/*64k*/
{8192000, 64000, 1, 0},
- {1228800, 64000, 1, 1},
- {1693440, 64000, 1, 2},
- {2457600, 64000, 1, 3},
- {3276800, 64000, 1, 4},
+ {12288000, 64000, 1, 1},
+ {16934400, 64000, 1, 2},
+ {24576000, 64000, 1, 3},
+ {32768000, 64000, 1, 4},
/* 88.2k */
{11289600, 88200, 1, 0},
index = cs4265_get_clk_index(cs4265->sysclk, params_rate(params));
if (index >= 0) {
snd_soc_update_bits(codec, CS4265_ADC_CTL,
- CS4265_ADC_FM, clk_map_table[index].fm_mode);
+ CS4265_ADC_FM, clk_map_table[index].fm_mode << 6);
snd_soc_update_bits(codec, CS4265_MCLK_FREQ,
CS4265_MCLK_FREQ_MASK,
- clk_map_table[index].mclkdiv);
+ clk_map_table[index].mclkdiv << 4);
} else {
dev_err(codec->dev, "can't get correct mclk\n");
*/
#ifndef __DA732X_H_
-#define __DA732X_H
+#define __DA732X_H_
#include <sound/soc.h>
static const struct regmap_config rt5640_regmap = {
.reg_bits = 8,
.val_bits = 16,
+ .use_single_rw = true,
.max_register = RT5640_VENDOR_ID2 + 1 + (ARRAY_SIZE(rt5640_ranges) *
RT5640_PR_SPACING),
{ "BST2", NULL, "IN2P" },
{ "BST2", NULL, "IN2N" },
- { "IN1P", NULL, "micbias1" },
- { "IN1N", NULL, "micbias1" },
- { "IN2P", NULL, "micbias1" },
- { "IN2N", NULL, "micbias1" },
+ { "IN1P", NULL, "MICBIAS1" },
+ { "IN1N", NULL, "MICBIAS1" },
+ { "IN2P", NULL, "MICBIAS1" },
+ { "IN2N", NULL, "MICBIAS1" },
{ "ADC 1", NULL, "BST1" },
{ "ADC 1", NULL, "ADC 1 power" },
snd_soc_card_set_drvdata(&priv->snd_card, priv);
ret = devm_snd_soc_register_card(&pdev->dev, &priv->snd_card);
+ if (ret >= 0)
+ return ret;
err:
asoc_simple_card_unref(pdev);
return ret;
}
+static int asoc_simple_card_remove(struct platform_device *pdev)
+{
+ return asoc_simple_card_unref(pdev);
+}
+
static const struct of_device_id asoc_simple_of_match[] = {
{ .compatible = "simple-audio-card", },
{},
.of_match_table = asoc_simple_of_match,
},
.probe = asoc_simple_card_probe,
+ .remove = asoc_simple_card_remove,
};
module_platform_driver(asoc_simple_card);
.stream_name = "TWL4030 Voice",
.cpu_dai_name = "omap-mcbsp.3",
.codec_dai_name = "twl4030-voice",
- .platform_name = "omap-mcbsp.2",
+ .platform_name = "omap-mcbsp.3",
.codec_name = "twl4030-codec",
.dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_IB_NF |
SND_SOC_DAIFMT_CBM_CFM,
};
/* it shouldn't happen */
- if (use_dvc & !use_src)
+ if (use_dvc && !use_src)
dev_err(dev, "DVC is selected without SRC\n");
/* use SSIU or SSI ? */
device_initialize(rtd->dev);
rtd->dev->parent = rtd->card->dev;
rtd->dev->release = rtd_release;
- rtd->dev->init_name = name;
+ dev_set_name(rtd->dev, "%s", name);
dev_set_drvdata(rtd->dev, rtd);
mutex_init(&rtd->pcm_mutex);
INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients);
*/
#ifndef __TEGRA_ASOC_UTILS_H__
-#define __TEGRA_ASOC_UTILS_H_
+#define __TEGRA_ASOC_UTILS_H__
struct clk;
struct device;