]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
net: remove netdev_alloc_page and use __GFP_COLD
authorEric Dumazet <eric.dumazet@gmail.com>
Tue, 22 Nov 2011 10:57:41 +0000 (10:57 +0000)
committerDavid S. Miller <davem@davemloft.net>
Tue, 22 Nov 2011 21:43:32 +0000 (16:43 -0500)
Given we dont use anymore the struct net_device *dev argument, and this
interface brings litle benefit, remove netdev_{alloc|free}_page(), to
debloat include/linux/skbuff.h a bit.

(Some drivers used a mix of these interfaces and alloc_pages())

When allocating a page given to device for DMA transfer (device to
memory), it makes sense to use a cold one (__GFP_COLD)

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
CC: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
CC: Dimitris Michailidis <dm@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/usb/cdc-phonet.c
drivers/usb/gadget/f_phonet.c
include/linux/skbuff.h

index 140254c7cba900aea3c80336f7033db651c957ba..2dae7959f00082c46c9f00be8b26c80675ae4c22 100644 (file)
@@ -491,7 +491,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
        __be64 *d = &q->desc[q->pidx];
        struct rx_sw_desc *sd = &q->sdesc[q->pidx];
 
-       gfp |= __GFP_NOWARN;         /* failures are expected */
+       gfp |= __GFP_NOWARN | __GFP_COLD;
 
 #if FL_PG_ORDER > 0
        /*
@@ -528,7 +528,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
 #endif
 
        while (n--) {
-               pg = __netdev_alloc_page(adap->port[0], gfp);
+               pg = alloc_page(gfp);
                if (unlikely(!pg)) {
                        q->alloc_failed++;
                        break;
@@ -537,7 +537,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
                mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
                                       PCI_DMA_FROMDEVICE);
                if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
-                       netdev_free_page(adap->port[0], pg);
+                       put_page(pg);
                        goto out;
                }
                *d++ = cpu_to_be64(mapping);
index 8d5d55ad102d57a0ee2d5ff3f68f5c1861099019..c381db23e71365cedf0ffe7d6c3e518fcb45190e 100644 (file)
@@ -653,8 +653,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
 
 alloc_small_pages:
        while (n--) {
-               page = __netdev_alloc_page(adapter->port[0],
-                                          gfp | __GFP_NOWARN);
+               page = alloc_page(gfp | __GFP_NOWARN | __GFP_COLD);
                if (unlikely(!page)) {
                        fl->alloc_failed++;
                        break;
@@ -664,7 +663,7 @@ alloc_small_pages:
                dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
                                       PCI_DMA_FROMDEVICE);
                if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
-                       netdev_free_page(adapter->port[0], page);
+                       put_page(page);
                        break;
                }
                *d++ = cpu_to_be64(dma_addr);
index bd9b30e6ae9d0bdb0bc74e18c3f109465c14d368..b66b8aa751e725d939574bc5fe947768ce55392e 100644 (file)
@@ -6135,7 +6135,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
                return true;
 
        if (!page) {
-               page = netdev_alloc_page(rx_ring->netdev);
+               page = alloc_page(GFP_ATOMIC | __GFP_COLD);
                bi->page = page;
                if (unlikely(!page)) {
                        rx_ring->rx_stats.alloc_failed++;
index 820fc040c241580229e10085fd76ef8f846fef17..1b28ed9d8cc12f289dbfcc46cb4c3e21de9a78dc 100644 (file)
@@ -1140,7 +1140,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
 
                if (ring_is_ps_enabled(rx_ring)) {
                        if (!bi->page) {
-                               bi->page = netdev_alloc_page(rx_ring->netdev);
+                               bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
                                if (!bi->page) {
                                        rx_ring->rx_stats.alloc_rx_page_failed++;
                                        goto no_buffers;
index 0c39bb1ac3bb4462274779b0803eec5badbd60dc..5d1a64398169494b9498efaa4b6a911bd64bc08f 100644 (file)
@@ -366,7 +366,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
                if (!bi->page_dma &&
                    (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
                        if (!bi->page) {
-                               bi->page = netdev_alloc_page(adapter->netdev);
+                               bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
                                if (!bi->page) {
                                        adapter->alloc_rx_page_failed++;
                                        goto no_buffers;
index a60d0069cc458c0f48e006ec7648e8a2daa24d2f..331e44056f5ae7fa05bf6c7661960eabe737d943 100644 (file)
@@ -130,7 +130,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
        struct page *page;
        int err;
 
-       page = __netdev_alloc_page(dev, gfp_flags);
+       page = alloc_page(gfp_flags);
        if (!page)
                return -ENOMEM;
 
@@ -140,7 +140,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
        err = usb_submit_urb(req, gfp_flags);
        if (unlikely(err)) {
                dev_dbg(&dev->dev, "RX submit error (%d)\n", err);
-               netdev_free_page(dev, page);
+               put_page(page);
        }
        return err;
 }
@@ -208,9 +208,9 @@ static void rx_complete(struct urb *req)
        dev->stats.rx_errors++;
 resubmit:
        if (page)
-               netdev_free_page(dev, page);
+               put_page(page);
        if (req)
-               rx_submit(pnd, req, GFP_ATOMIC);
+               rx_submit(pnd, req, GFP_ATOMIC | __GFP_COLD);
 }
 
 static int usbpn_close(struct net_device *dev);
@@ -229,7 +229,7 @@ static int usbpn_open(struct net_device *dev)
        for (i = 0; i < rxq_size; i++) {
                struct urb *req = usb_alloc_urb(0, GFP_KERNEL);
 
-               if (!req || rx_submit(pnd, req, GFP_KERNEL)) {
+               if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) {
                        usbpn_close(dev);
                        return -ENOMEM;
                }
index 16a509ae517b6b5a317422aa2553ce1316da46c5..7cdcb63b21ff6b3605591d87de5d413bc470c61e 100644 (file)
@@ -298,11 +298,10 @@ static void pn_net_setup(struct net_device *dev)
 static int
 pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
 {
-       struct net_device *dev = fp->dev;
        struct page *page;
        int err;
 
-       page = __netdev_alloc_page(dev, gfp_flags);
+       page = alloc_page(gfp_flags);
        if (!page)
                return -ENOMEM;
 
@@ -312,7 +311,7 @@ pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
 
        err = usb_ep_queue(fp->out_ep, req, gfp_flags);
        if (unlikely(err))
-               netdev_free_page(dev, page);
+               put_page(page);
        return err;
 }
 
@@ -374,9 +373,9 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
        }
 
        if (page)
-               netdev_free_page(dev, page);
+               put_page(page);
        if (req)
-               pn_rx_submit(fp, req, GFP_ATOMIC);
+               pn_rx_submit(fp, req, GFP_ATOMIC | __GFP_COLD);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -436,7 +435,7 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
 
                        netif_carrier_on(dev);
                        for (i = 0; i < phonet_rxq_size; i++)
-                               pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC);
+                               pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC | __GFP_COLD);
                }
                spin_unlock(&port->lock);
                return 0;
index 09b7ea566d666506fb37af4bb5623134fe09987b..cec0657d0d32e35dd96694a1e9050109e5b8febd 100644 (file)
@@ -1668,38 +1668,6 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
        return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
 }
 
-/**
- *     __netdev_alloc_page - allocate a page for ps-rx on a specific device
- *     @dev: network device to receive on
- *     @gfp_mask: alloc_pages_node mask
- *
- *     Allocate a new page. dev currently unused.
- *
- *     %NULL is returned if there is no free memory.
- */
-static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
-{
-       return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0);
-}
-
-/**
- *     netdev_alloc_page - allocate a page for ps-rx on a specific device
- *     @dev: network device to receive on
- *
- *     Allocate a new page. dev currently unused.
- *
- *     %NULL is returned if there is no free memory.
- */
-static inline struct page *netdev_alloc_page(struct net_device *dev)
-{
-       return __netdev_alloc_page(dev, GFP_ATOMIC);
-}
-
-static inline void netdev_free_page(struct net_device *dev, struct page *page)
-{
-       __free_page(page);
-}
-
 /**
  * skb_frag_page - retrieve the page refered to by a paged fragment
  * @frag: the paged fragment