select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H
++ +++ select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT if MMU
select CLONE_BACKWARDS
select HAVE_MOD_ARCH_SPECIFIC if ARM_UNWIND
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
select HAVE_PERF_EVENTS
+++++ select HAVE_PERF_REGS
+++++ select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16
select GENERIC_CLOCKEVENTS
select HAVE_IDE
select ISA
----- select NEED_MACH_GPIO_H
select NEED_MACH_MEMORY_H
select SPARSE_IRQ
help
for (multi-)cluster based systems, such as big.LITTLE based
systems.
+++++config BIG_LITTLE
+++++ bool "big.LITTLE support (Experimental)"
+++++ depends on CPU_V7 && SMP
+++++ select MCPM
+++++ help
+++++ This option enables support selections for the big.LITTLE
+++++ system architecture.
+++++
+++++config BL_SWITCHER
+++++ bool "big.LITTLE switcher support"
+++++ depends on BIG_LITTLE && MCPM && HOTPLUG_CPU
+++++ select CPU_PM
+++++ select ARM_CPU_SUSPEND
+++++ help
+++++ The big.LITTLE "switcher" provides the core functionality to
+++++ transparently handle transition between a cluster of A15's
+++++ and a cluster of A7's in a big.LITTLE system.
+++++
+++++config BL_SWITCHER_DUMMY_IF
+++++ tristate "Simple big.LITTLE switcher user interface"
+++++ depends on BL_SWITCHER && DEBUG_KERNEL
+++++ help
+++++ This is a simple and dummy char dev interface to control
+++++ the big.LITTLE switcher core code. It is meant for
+++++ debugging purposes only.
+++++
choice
prompt "Memory split"
default VMSPLIT_3G
config KERNEL_MODE_NEON
bool "Support for NEON in kernel mode"
---- default n
---- depends on NEON
++++ depends on NEON && AEABI
help
Say Y to include support for NEON in kernel mode.
sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
}
+++++extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2];
+++++
+++++void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
+++++ unsigned long poke_phys_addr, unsigned long poke_val)
+++++{
+++++ unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0];
+++++ poke[0] = poke_phys_addr;
+++++ poke[1] = poke_val;
+++++ __cpuc_flush_dcache_area((void *)poke, 8);
+++++ outer_clean_range(__pa(poke), __pa(poke + 2));
+++++}
+++++
static const struct mcpm_platform_ops *platform_ops;
int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
{
phys_reset_t phys_reset;
- ---- BUG_ON(!platform_ops);
+ ++++ if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down))
+ ++++ return;
BUG_ON(!irqs_disabled());
/*
{
phys_reset_t phys_reset;
- ---- BUG_ON(!platform_ops);
+ ++++ if (WARN_ON_ONCE(!platform_ops || !platform_ops->suspend))
+ ++++ return;
BUG_ON(!irqs_disabled());
/* Very similar to mcpm_cpu_power_down() */
generic-y += serial.h
generic-y += shmbuf.h
generic-y += siginfo.h
+++++generic-y += simd.h
generic-y += sizes.h
generic-y += socket.h
generic-y += sockios.h
generic-y += termios.h
generic-y += timex.h
generic-y += trace_clock.h
- ----generic-y += types.h
generic-y += unaligned.h
*/
void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
+++++/*
+++++ * This sets an early poke i.e a value to be poked into some address
+++++ * from very early assembly code before the CPU is ungated. The
+++++ * address must be physical, and if 0 then nothing will happen.
+++++ */
+++++void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
+++++ unsigned long poke_phys_addr, unsigned long poke_val);
+++++
/*
* CPU/cluster power operations API for higher subsystems to use.
*/
*
* This must be called with interrupts disabled.
*
- ---- * This does not return. Re-entry in the kernel is expected via
- ---- * mcpm_entry_point.
+ ++++ * On success this does not return. Re-entry in the kernel is expected
+ ++++ * via mcpm_entry_point.
+ ++++ *
+ ++++ * This will return if mcpm_platform_register() has not been called
+ ++++ * previously in which case the caller should take appropriate action.
*/
void mcpm_cpu_power_down(void);
*
* This must be called with interrupts disabled.
*
- ---- * This does not return. Re-entry in the kernel is expected via
- ---- * mcpm_entry_point.
+ ++++ * On success this does not return. Re-entry in the kernel is expected
+ ++++ * via mcpm_entry_point.
+ ++++ *
+ ++++ * This will return if mcpm_platform_register() has not been called
+ ++++ * previously in which case the caller should take appropriate action.
*/
void mcpm_cpu_suspend(u64 expected_residency);
#error SMP not supported on pre-ARMv6 CPUs
#endif
-----#include <asm/processor.h>
+++++#include <linux/prefetch.h>
/*
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
* extensions, so when running on UP, we have to patch these instructions away.
*/
-----#define ALT_SMP(smp, up) \
----- "9998: " smp "\n" \
----- " .pushsection \".alt.smp.init\", \"a\"\n" \
----- " .long 9998b\n" \
----- " " up "\n" \
----- " .popsection\n"
-----
#ifdef CONFIG_THUMB2_KERNEL
-----#define SEV ALT_SMP("sev.w", "nop.w")
/*
* For Thumb-2, special care is needed to ensure that the conditional WFE
* instruction really does assemble to exactly 4 bytes (as required by
* the assembler won't change IT instructions which are explicitly present
* in the input.
*/
-----#define WFE(cond) ALT_SMP( \
+++++#define WFE(cond) __ALT_SMP_ASM( \
"it " cond "\n\t" \
"wfe" cond ".n", \
\
"nop.w" \
)
#else
-----#define SEV ALT_SMP("sev", "nop")
-----#define WFE(cond) ALT_SMP("wfe" cond, "nop")
+++++#define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop")
#endif
+++++#define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop))
+++++
static inline void dsb_sev(void)
{
#if __LINUX_ARM_ARCH__ >= 7
u32 newval;
arch_spinlock_t lockval;
+++++ prefetchw(&lock->slock);
__asm__ __volatile__(
"1: ldrex %0, [%3]\n"
" add %1, %0, %4\n"
unsigned long contended, res;
u32 slock;
+++++ prefetchw(&lock->slock);
do {
__asm__ __volatile__(
" ldrex %0, [%3]\n"
dsb_sev();
}
++ +++static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
++ +++{
++ +++ return lock.tickets.owner == lock.tickets.next;
++ +++}
++ +++
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
-- --- struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
-- --- return tickets.owner != tickets.next;
++ +++ return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
}
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
unsigned long tmp;
+++++ prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
{
unsigned long contended, res;
+++++ prefetchw(&rw->lock);
do {
__asm__ __volatile__(
" ldrex %0, [%2]\n"
}
/* write_can_lock - would write_trylock() succeed? */
-----#define arch_write_can_lock(x) ((x)->lock == 0)
+++++#define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0)
/*
* Read locks are a bit more hairy:
{
unsigned long tmp, tmp2;
+++++ prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
smp_mb();
+++++ prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" sub %0, %0, #1\n"
{
unsigned long contended, res;
+++++ prefetchw(&rw->lock);
do {
__asm__ __volatile__(
" ldrex %0, [%2]\n"
}
/* read_can_lock - would read_trylock() succeed? */
-----#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
+++++#define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
and r0, r0, #0xc0000000 @ multiprocessing extensions and
teq r0, #0x80000000 @ not part of a uniprocessor system?
- ---- moveq pc, lr @ yes, assume SMP
+ ++++ bne __fixup_smp_on_up @ no, assume UP
+ ++++
+ ++++ @ Core indicates it is SMP. Check for Aegis SOC where a single
+ ++++ @ Cortex-A9 CPU is present but SMP operations fault.
+ ++++ mov r4, #0x41000000
+ ++++ orr r4, r4, #0x0000c000
+ ++++ orr r4, r4, #0x00000090
+ ++++ teq r3, r4 @ Check for ARM Cortex-A9
+ ++++ movne pc, lr @ Not ARM Cortex-A9,
+ ++++
+ ++++ @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
+ ++++ @ below address check will need to be #ifdef'd or equivalent
+ ++++ @ for the Aegis platform.
+ ++++ mrc p15, 4, r0, c15, c0 @ get SCU base address
+ ++++ teq r0, #0x0 @ '0' on actual UP A9 hardware
+ ++++ beq __fixup_smp_on_up @ So its an A9 UP
+ ++++ ldr r0, [r0, #4] @ read SCU Config
+ ++++ and r0, r0, #0x3 @ number of CPUs
+ ++++ teq r0, #0x0 @ is 1?
+ ++++ movne pc, lr
__fixup_smp_on_up:
adr r0, 1f
ldmfd sp!, {r4 - r6, pc}
ENDPROC(fixup_smp)
+++++#ifdef __ARMEB_
+++++#define LOW_OFFSET 0x4
+++++#define HIGH_OFFSET 0x0
+++++#else
+++++#define LOW_OFFSET 0x0
+++++#define HIGH_OFFSET 0x4
+++++#endif
+++++
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
/* __fixup_pv_table - patch the stub instructions with the delta between
__HEAD
__fixup_pv_table:
adr r0, 1f
----- ldmia r0, {r3-r5, r7}
----- sub r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
+++++ ldmia r0, {r3-r7}
+++++ mvn ip, #0
+++++ subs r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
add r4, r4, r3 @ adjust table start address
add r5, r5, r3 @ adjust table end address
----- add r7, r7, r3 @ adjust __pv_phys_offset address
----- str r8, [r7] @ save computed PHYS_OFFSET to __pv_phys_offset
+++++ add r6, r6, r3 @ adjust __pv_phys_offset address
+++++ add r7, r7, r3 @ adjust __pv_offset address
+++++ str r8, [r6, #LOW_OFFSET] @ save computed PHYS_OFFSET to __pv_phys_offset
+++++ strcc ip, [r7, #HIGH_OFFSET] @ save to __pv_offset high bits
mov r6, r3, lsr #24 @ constant for add/sub instructions
teq r3, r6, lsl #24 @ must be 16MiB aligned
THUMB( it ne @ cross section branch )
bne __error
----- str r6, [r7, #4] @ save to __pv_offset
+++++ str r3, [r7, #LOW_OFFSET] @ save to __pv_offset low bits
b __fixup_a_pv_table
ENDPROC(__fixup_pv_table)
.long __pv_table_begin
.long __pv_table_end
2: .long __pv_phys_offset
+++++ .long __pv_offset
.text
__fixup_a_pv_table:
+++++ adr r0, 3f
+++++ ldr r6, [r0]
+++++ add r6, r6, r3
+++++ ldr r0, [r6, #HIGH_OFFSET] @ pv_offset high word
+++++ ldr r6, [r6, #LOW_OFFSET] @ pv_offset low word
+++++ mov r6, r6, lsr #24
+++++ cmn r0, #1
#ifdef CONFIG_THUMB2_KERNEL
+++++ moveq r0, #0x200000 @ set bit 21, mov to mvn instruction
lsls r6, #24
beq 2f
clz r7, r6
b 2f
1: add r7, r3
ldrh ip, [r7, #2]
----- and ip, 0x8f00
----- orr ip, r6 @ mask in offset bits 31-24
+++++ tst ip, #0x4000
+++++ and ip, #0x8f00
+++++ orrne ip, r6 @ mask in offset bits 31-24
+++++ orreq ip, r0 @ mask in offset bits 7-0
strh ip, [r7, #2]
+++++ ldrheq ip, [r7]
+++++ biceq ip, #0x20
+++++ orreq ip, ip, r0, lsr #16
+++++ strheq ip, [r7]
2: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
bcc 1b
bx lr
#else
+++++ moveq r0, #0x400000 @ set bit 22, mov to mvn instruction
b 2f
1: ldr ip, [r7, r3]
bic ip, ip, #0x000000ff
----- orr ip, ip, r6 @ mask in offset bits 31-24
+++++ tst ip, #0xf00 @ check the rotation field
+++++ orrne ip, ip, r6 @ mask in offset bits 31-24
+++++ biceq ip, ip, #0x400000 @ clear bit 22
+++++ orreq ip, ip, r0 @ mask in offset bits 7-0
str ip, [r7, r3]
2: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
#endif
ENDPROC(__fixup_a_pv_table)
+++++3: .long __pv_offset
+++++
ENTRY(fixup_pv_table)
stmfd sp!, {r4 - r7, lr}
----- ldr r2, 2f @ get address of __pv_phys_offset
mov r3, #0 @ no offset
mov r4, r0 @ r0 = table start
add r5, r0, r1 @ r1 = table size
----- ldr r6, [r2, #4] @ get __pv_offset
bl __fixup_a_pv_table
ldmfd sp!, {r4 - r7, pc}
ENDPROC(fixup_pv_table)
----- .align
-----2: .long __pv_phys_offset
-----
.data
.globl __pv_phys_offset
.type __pv_phys_offset, %object
__pv_phys_offset:
----- .long 0
----- .size __pv_phys_offset, . - __pv_phys_offset
+++++ .quad 0
+++++ .size __pv_phys_offset, . -__pv_phys_offset
+++++
+++++ .globl __pv_offset
+++++ .type __pv_offset, %object
__pv_offset:
----- .long 0
+++++ .quad 0
+++++ .size __pv_offset, . -__pv_offset
#endif
#include "head-common.S"
#endif
extern void paging_init(const struct machine_desc *desc);
+++++extern void early_paging_init(const struct machine_desc *,
+++++ struct proc_info_list *);
extern void sanity_check_meminfo(void);
extern enum reboot_mode reboot_mode;
extern void setup_dma_zone(const struct machine_desc *desc);
elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
#endif
++ +++ erratum_a15_798181_init();
++ +++
feat_v6_fixup();
cacheid_init();
parse_early_param();
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+++++
+++++ early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
sanity_check_meminfo();
arm_memblock_init(&meminfo, mdesc);
/* needed to ensure proper operation of coherent allocations
* later, in case driver doesn't set it explicitly */
---- - dma_set_mask(&viodev->dev, DMA_BIT_MASK(64));
---- - dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64));
++++ + dma_set_mask_and_coherent(&viodev->dev, DMA_BIT_MASK(64));
}
/* register with generic device framework */
const char *cp;
dn = dev->of_node;
----- if (!dn)
----- return -ENODEV;
+++++ if (!dn) {
+++++ strcat(buf, "\n");
+++++ return strlen(buf);
+++++ }
cp = of_get_property(dn, "compatible", NULL);
----- if (!cp)
----- return -ENODEV;
+++++ if (!cp) {
+++++ strcat(buf, "\n");
+++++ return strlen(buf);
+++++ }
return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
}
struct edma_cc *ecc;
int ret;
++++ + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
++++ + if (ret)
++++ + return ret;
++++ +
ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL);
if (!ecc) {
dev_err(&pdev->dev, "Can't allocate controller\n");
static const struct platform_device_info edma_dev_info0 = {
.name = "edma-dma-engine",
.id = 0,
++++ + .dma_mask = DMA_BIT_MASK(32),
};
static const struct platform_device_info edma_dev_info1 = {
.name = "edma-dma-engine",
.id = 1,
++++ + .dma_mask = DMA_BIT_MASK(32),
};
static int edma_init(void)
ret = PTR_ERR(pdev0);
goto out;
}
---- - pdev0->dev.dma_mask = &pdev0->dev.coherent_dma_mask;
---- - pdev0->dev.coherent_dma_mask = DMA_BIT_MASK(32);
}
if (EDMA_CTLRS == 2) {
platform_device_unregister(pdev0);
ret = PTR_ERR(pdev1);
}
---- - pdev1->dev.dma_mask = &pdev1->dev.coherent_dma_mask;
---- - pdev1->dev.coherent_dma_mask = DMA_BIT_MASK(32);
}
out:
}
module_exit(edma_exit);
-----MODULE_AUTHOR("Matt Porter <mporter@ti.com>");
+++++MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
MODULE_DESCRIPTION("TI EDMA DMA engine driver");
MODULE_LICENSE("GPL v2");
attn.sig[3] = REG_RD(bp,
MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
port*4);
+++++ /* Since MCP attentions can't be disabled inside the block, we need to
+++++ * read AEU registers to see whether they're currently disabled
+++++ */
+++++ attn.sig[3] &= ((REG_RD(bp,
+++++ !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
+++++ : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
+++++ MISC_AEU_ENABLE_MCP_PRTY_BITS) |
+++++ ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
if (!CHIP_IS_E1x(bp))
attn.sig[4] = REG_RD(bp,
if (IS_PF(bp) &&
!BP_NOMCP(bp)) {
int mb_idx = BP_FW_MB_IDX(bp);
----- u32 drv_pulse;
----- u32 mcp_pulse;
+++++ u16 drv_pulse;
+++++ u16 mcp_pulse;
++bp->fw_drv_pulse_wr_seq;
bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
----- /* TBD - add SYSTEM_TIME */
drv_pulse = bp->fw_drv_pulse_wr_seq;
bnx2x_drv_pulse(bp);
mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
MCP_PULSE_SEQ_MASK);
/* The delta between driver pulse and mcp response
----- * should be 1 (before mcp response) or 0 (after mcp response)
+++++ * should not get too big. If the MFW is more than 5 pulses
+++++ * behind, we should worry about it enough to generate an error
+++++ * log.
*/
----- if ((drv_pulse != mcp_pulse) &&
----- (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
----- /* someone lost a heartbeat... */
----- BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
+++++ if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
+++++ BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
drv_pulse, mcp_pulse);
----- }
}
if (bp->state == BNX2X_STATE_OPEN)
else if (bp->wol) {
u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
u8 *mac_addr = bp->dev->dev_addr;
+++++ struct pci_dev *pdev = bp->pdev;
u32 val;
u16 pmc;
EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
/* Enable the PME and clear the status */
----- pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
+++++ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
----- pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
+++++ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
break;
}
----- pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
+++++ pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
BNX2X_DEV_INFO("%sWoL capable\n",
{
struct device *dev = &bp->pdev->dev;
---- - if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
++++ + if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) == 0) {
bp->flags |= USING_DAC_FLAG;
---- - if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
---- - dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
---- - return -EIO;
---- - }
---- - } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
++++ + } else if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
dev_err(dev, "System does not support DMA, aborting\n");
return -EIO;
}
}
if (IS_PF(bp)) {
----- bp->pm_cap = pdev->pm_cap;
----- if (bp->pm_cap == 0) {
+++++ if (!pdev->pm_cap) {
dev_err(&bp->pdev->dev,
"Cannot find power management capability, aborting\n");
rc = -EIO;
cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
+++++ DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
+++++ BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
+++++ cp->iscsi_l2_cid);
+++++
if (NO_ISCSI_OOO(bp))
cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
}
unsigned int eth_hdr_len;
struct iphdr *ip;
----- /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
+++++ /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
* may cause a transmit stall on that port. So the work-around is to
----- * pad such packets to a 36-byte length.
+++++ * pad short packets (<= 32 bytes) to a 36-byte length.
*/
----- if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
+++++ if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
if (skb_padto(skb, 36))
goto tx_drop;
skb->len = 36;
status = be_cmd_vlan_config(adapter, adapter->if_handle,
vids, num, 1, 0);
----- /* Set to VLAN promisc mode as setting VLAN filter failed */
if (status) {
----- dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
----- dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
----- goto set_vlan_promisc;
+++++ /* Set to VLAN promisc mode as setting VLAN filter failed */
+++++ if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
+++++ goto set_vlan_promisc;
+++++ dev_err(&adapter->pdev->dev,
+++++ "Setting HW VLAN filtering failed.\n");
+++++ } else {
+++++ if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
+++++ /* hw VLAN filtering re-enabled. */
+++++ status = be_cmd_rx_filter(adapter,
+++++ BE_FLAGS_VLAN_PROMISC, OFF);
+++++ if (!status) {
+++++ dev_info(&adapter->pdev->dev,
+++++ "Disabling VLAN Promiscuous mode.\n");
+++++ adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
+++++ dev_info(&adapter->pdev->dev,
+++++ "Re-Enabling HW VLAN filtering\n");
+++++ }
+++++ }
}
return status;
set_vlan_promisc:
----- status = be_cmd_vlan_config(adapter, adapter->if_handle,
----- NULL, 0, 1, 1);
+++++ dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
+++++
+++++ status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
+++++ if (!status) {
+++++ dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
+++++ dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
+++++ adapter->flags |= BE_FLAGS_VLAN_PROMISC;
+++++ } else
+++++ dev_err(&adapter->pdev->dev,
+++++ "Failed to enable VLAN Promiscuous mode.\n");
return status;
}
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
----- if (!lancer_chip(adapter) && !be_physfn(adapter)) {
----- status = -EINVAL;
----- goto ret;
----- }
/* Packets with VID 0 are always received by Lancer by default */
if (lancer_chip(adapter) && vid == 0)
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
----- if (!lancer_chip(adapter) && !be_physfn(adapter)) {
----- status = -EINVAL;
----- goto ret;
----- }
-----
/* Packets with VID 0 are always received by Lancer by default */
if (lancer_chip(adapter) && vid == 0)
goto ret;
vi->vf = vf;
vi->tx_rate = vf_cfg->tx_rate;
----- vi->vlan = vf_cfg->vlan_tag;
----- vi->qos = 0;
+++++ vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
+++++ vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
return 0;
int vf, u16 vlan, u8 qos)
{
struct be_adapter *adapter = netdev_priv(netdev);
+++++ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
int status = 0;
if (!sriov_enabled(adapter))
return -EPERM;
----- if (vf >= adapter->num_vfs || vlan > 4095)
+++++ if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
return -EINVAL;
----- if (vlan) {
----- if (adapter->vf_cfg[vf].vlan_tag != vlan) {
+++++ if (vlan || qos) {
+++++ vlan |= qos << VLAN_PRIO_SHIFT;
+++++ if (vf_cfg->vlan_tag != vlan) {
/* If this is new value, program it. Else skip. */
----- adapter->vf_cfg[vf].vlan_tag = vlan;
-----
----- status = be_cmd_set_hsw_config(adapter, vlan,
----- vf + 1, adapter->vf_cfg[vf].if_handle, 0);
+++++ vf_cfg->vlan_tag = vlan;
+++++ status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
+++++ vf_cfg->if_handle, 0);
}
} else {
/* Reset Transparent Vlan Tagging. */
----- adapter->vf_cfg[vf].vlan_tag = 0;
----- vlan = adapter->vf_cfg[vf].def_vid;
+++++ vf_cfg->vlan_tag = 0;
+++++ vlan = vf_cfg->def_vid;
status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
----- adapter->vf_cfg[vf].if_handle, 0);
+++++ vf_cfg->if_handle, 0);
}
struct be_resources res = {0};
struct be_vf_cfg *vf_cfg;
u32 cap_flags, en_flags, vf;
----- int status;
+++++ int status = 0;
cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST;
if (adapter->function_mode & FLEX10_MODE)
res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
+++++ else if (adapter->function_mode & UMC_ENABLED)
+++++ res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
else
res->max_vlans = BE_NUM_VLANS_SUPPORTED;
res->max_mcast_mac = BE_MAX_MC;
adapter->netdev = netdev;
SET_NETDEV_DEV(netdev, &pdev->dev);
---- - status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
++++ + status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!status) {
---- - status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
---- - if (status < 0) {
---- - dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
---- - goto free_netdev;
---- - }
netdev->features |= NETIF_F_HIGHDMA;
} else {
---- - status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
---- - if (!status)
---- - status = dma_set_coherent_mask(&pdev->dev,
---- - DMA_BIT_MASK(32));
++++ + status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (status) {
dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
goto free_netdev;
*/
if ((hw->phy.type == e1000_phy_igp_3 ||
hw->phy.type == e1000_phy_bm) &&
----- (hw->mac.autoneg == true) &&
+++++ hw->mac.autoneg &&
(adapter->link_speed == SPEED_10 ||
adapter->link_speed == SPEED_100) &&
(adapter->link_duplex == HALF_DUPLEX)) {
return err;
pci_using_dac = 0;
---- - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
++++ + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
---- - err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
---- - if (!err)
---- - pci_using_dac = 1;
++++ + pci_using_dac = 1;
} else {
---- - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
++++ + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
---- - err = dma_set_coherent_mask(&pdev->dev,
---- - DMA_BIT_MASK(32));
---- - if (err) {
---- - dev_err(&pdev->dev,
---- - "No usable DMA configuration, aborting\n");
---- - goto err_dma;
---- - }
++++ + dev_err(&pdev->dev,
++++ + "No usable DMA configuration, aborting\n");
++++ + goto err_dma;
}
}
{
struct ixgbe_hw *hw = &adapter->hw;
int i;
----- u32 rxctrl;
+++++ u32 rxctrl, rfctl;
/* disable receives while setting up the descriptors */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
ixgbe_setup_psrtype(adapter);
ixgbe_setup_rdrxctl(adapter);
+++++ /* RSC Setup */
+++++ rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
+++++ rfctl &= ~IXGBE_RFCTL_RSC_DIS;
+++++ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
+++++ rfctl |= IXGBE_RFCTL_RSC_DIS;
+++++ IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
+++++
/* Program registers for the distribution of queues */
ixgbe_setup_mrqc(adapter);
adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
speed = hw->phy.autoneg_advertised;
----- if ((!speed) && (hw->mac.ops.get_link_capabilities))
+++++ if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
+++++
+++++ /* setup the highest link when no autoneg */
+++++ if (!autoneg) {
+++++ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+++++ speed = IXGBE_LINK_SPEED_10GB_FULL;
+++++ }
+++++ }
+++++
if (hw->mac.ops.setup_link)
hw->mac.ops.setup_link(hw, speed, true);
if (err)
return err;
---- - if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
---- - !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
++++ + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
---- - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
++++ + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
---- - err = dma_set_coherent_mask(&pdev->dev,
---- - DMA_BIT_MASK(32));
---- - if (err) {
---- - dev_err(&pdev->dev,
---- - "No usable DMA configuration, aborting\n");
---- - goto err_dma;
---- - }
++++ + dev_err(&pdev->dev,
++++ + "No usable DMA configuration, aborting\n");
++++ + goto err_dma;
}
pci_using_dac = 0;
}
struct list_head encoder_list;
struct list_head connector_list;
struct mutex mutex;
----- int references;
int pipes;
struct drm_fbdev_cma *fbhelper;
};
}
}
----- imxdrm->references++;
-----
return imxdrm->drm;
unwind_crtc:
list_for_each_entry(enc, &imxdrm->encoder_list, list)
module_put(enc->owner);
----- imxdrm->references--;
-----
mutex_unlock(&imxdrm->mutex);
}
EXPORT_SYMBOL_GPL(imx_drm_device_put);
mutex_lock(&imxdrm->mutex);
----- if (imxdrm->references) {
+++++ if (imxdrm->drm->open_count) {
ret = -EBUSY;
goto err_busy;
}
mutex_lock(&imxdrm->mutex);
----- if (imxdrm->references) {
+++++ if (imxdrm->drm->open_count) {
ret = -EBUSY;
goto err_busy;
}
mutex_lock(&imxdrm->mutex);
----- if (imxdrm->references) {
+++++ if (imxdrm->drm->open_count) {
ret = -EBUSY;
goto err_busy;
}
static int imx_drm_platform_probe(struct platform_device *pdev)
{
++++ + int ret;
++++ +
++++ + ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
++++ + if (ret)
++++ + return ret;
++++ +
imx_drm_device->dev = &pdev->dev;
return drm_platform_init(&imx_drm_driver, pdev);
goto err_pdev;
}
---- - imx_drm_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32),
---- -
ret = platform_driver_register(&imx_drm_pdrv);
if (ret)
goto err_pdrv;
pdata.phy = data->phy;
---- - if (!pdev->dev.dma_mask)
---- - pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
---- - if (!pdev->dev.coherent_dma_mask)
---- - pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
++++ + ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
++++ + if (ret)
++++ + goto err_clk;
if (data->usbmisc_data) {
ret = imx_usbmisc_init(data->usbmisc_data);
if (ret) {
dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n",
ret);
----- goto err_clk;
+++++ goto err_phy;
}
}
dev_err(&pdev->dev,
"Can't register ci_hdrc platform device, err=%d\n",
ret);
----- goto err_clk;
+++++ goto err_phy;
}
if (data->usbmisc_data) {
disable_device:
ci_hdrc_remove_device(data->ci_pdev);
+++++err_phy:
+++++ if (data->phy)
+++++ usb_phy_shutdown(data->phy);
err_clk:
clk_disable_unprepare(data->clk);
return ret;
* generic hardware linkage
*/
.irq = ehci_irq,
----- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+++++ .flags = HCD_MEMORY | HCD_USB2,
/*
* basic lifecycle operations
* We can DMA from anywhere. But the descriptors must be in
* the lower 4GB.
*/
---- - pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
pdev->dev.dma_mask = &ehci_octeon_dma_mask;
++++ + ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
++++ + if (ret)
++++ + return ret;
hcd = usb_create_hcd(&ehci_octeon_hc_driver, &pdev->dev, "octeon");
if (!hcd)