/* allocate descriptors */
dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
- &dd->dd_desc_paddr, GFP_ATOMIC);
+ &dd->dd_desc_paddr, GFP_KERNEL);
if (dd->dd_desc == NULL) {
error = -ENOMEM;
goto fail;
/* allocate buffers */
bsize = sizeof(struct ath_buf) * nbuf;
- bf = kmalloc(bsize, GFP_KERNEL);
+ bf = kzalloc(bsize, GFP_KERNEL);
if (bf == NULL) {
error = -ENOMEM;
goto fail2;
}
- memset(bf, 0, bsize);
dd->dd_bufptr = bf;
INIT_LIST_HEAD(head);
return (tsf & ~0x7fff) | rstamp;
}
-static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, u32 len)
+static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, u32 len, gfp_t gfp_mask)
{
struct sk_buff *skb;
u32 off;
* Unfortunately this means we may get 8 KB here from the
* kernel... and that is actually what is observed on some
* systems :( */
- skb = dev_alloc_skb(len + sc->cachelsz - 1);
+ skb = __dev_alloc_skb(len + sc->cachelsz - 1, gfp_mask);
if (skb != NULL) {
off = ((unsigned long) skb->data) % sc->cachelsz;
if (off != 0)
}
list_for_each_entry(bf, &sc->rx.rxbuf, list) {
- skb = ath_rxbuf_alloc(sc, sc->rx.bufsize);
+ skb = ath_rxbuf_alloc(sc, sc->rx.bufsize, GFP_KERNEL);
if (skb == NULL) {
error = -ENOMEM;
break;
/* Ensure we always have an skb to requeue once we are done
* processing the current buffer's skb */
- requeue_skb = ath_rxbuf_alloc(sc, sc->rx.bufsize);
+ requeue_skb = ath_rxbuf_alloc(sc, sc->rx.bufsize, GFP_ATOMIC);
/* If there is no memory we ignore the current RX'd frame,
* tell hardware it can give us a new frame using the old