unsigned long plen;
 
        plen = res->end - res->start + 1;
-       if ((plen & (PAGE_SIZE-1)) != 0) BUG();
+       BUG_ON((plen & (PAGE_SIZE-1)) != 0);
        sparc_unmapiorange(res->start, plen);
        release_resource(res);
 }
 dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
     int direction)
 {
-       if (direction == PCI_DMA_NONE)
-               BUG();
+       BUG_ON(direction == PCI_DMA_NONE);
        /* IIep is write-through, not flushing. */
        return virt_to_phys(ptr);
 }
 void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
     int direction)
 {
-       if (direction == PCI_DMA_NONE)
-               BUG();
+       BUG_ON(direction == PCI_DMA_NONE);
        if (direction != PCI_DMA_TODEVICE) {
                mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
                    (size + PAGE_SIZE-1) & PAGE_MASK);
 dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
                        unsigned long offset, size_t size, int direction)
 {
-       if (direction == PCI_DMA_NONE)
-               BUG();
+       BUG_ON(direction == PCI_DMA_NONE);
        /* IIep is write-through, not flushing. */
        return page_to_phys(page) + offset;
 }
 void pci_unmap_page(struct pci_dev *hwdev,
                        dma_addr_t dma_address, size_t size, int direction)
 {
-       if (direction == PCI_DMA_NONE)
-               BUG();
+       BUG_ON(direction == PCI_DMA_NONE);
        /* mmu_inval_dma_area XXX */
 }
 
 {
        int n;
 
-       if (direction == PCI_DMA_NONE)
-               BUG();
+       BUG_ON(direction == PCI_DMA_NONE);
        /* IIep is write-through, not flushing. */
        for (n = 0; n < nents; n++) {
-               if (page_address(sg->page) == NULL) BUG();
+               BUG_ON(page_address(sg->page) == NULL);
                sg->dvma_address = virt_to_phys(page_address(sg->page));
                sg->dvma_length = sg->length;
                sg++;
 {
        int n;
 
-       if (direction == PCI_DMA_NONE)
-               BUG();
+       BUG_ON(direction == PCI_DMA_NONE);
        if (direction != PCI_DMA_TODEVICE) {
                for (n = 0; n < nents; n++) {
-                       if (page_address(sg->page) == NULL) BUG();
+                       BUG_ON(page_address(sg->page) == NULL);
                        mmu_inval_dma_area(
                            (unsigned long) page_address(sg->page),
                            (sg->length + PAGE_SIZE-1) & PAGE_MASK);
  */
 void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
 {
-       if (direction == PCI_DMA_NONE)
-               BUG();
+       BUG_ON(direction == PCI_DMA_NONE);
        if (direction != PCI_DMA_TODEVICE) {
                mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
                    (size + PAGE_SIZE-1) & PAGE_MASK);
 
 void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
 {
-       if (direction == PCI_DMA_NONE)
-               BUG();
+       BUG_ON(direction == PCI_DMA_NONE);
        if (direction != PCI_DMA_TODEVICE) {
                mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
                    (size + PAGE_SIZE-1) & PAGE_MASK);
 {
        int n;
 
-       if (direction == PCI_DMA_NONE)
-               BUG();
+       BUG_ON(direction == PCI_DMA_NONE);
        if (direction != PCI_DMA_TODEVICE) {
                for (n = 0; n < nents; n++) {
-                       if (page_address(sg->page) == NULL) BUG();
+                       BUG_ON(page_address(sg->page) == NULL);
                        mmu_inval_dma_area(
                            (unsigned long) page_address(sg->page),
                            (sg->length + PAGE_SIZE-1) & PAGE_MASK);
 {
        int n;
 
-       if (direction == PCI_DMA_NONE)
-               BUG();
+       BUG_ON(direction == PCI_DMA_NONE);
        if (direction != PCI_DMA_TODEVICE) {
                for (n = 0; n < nents; n++) {
-                       if (page_address(sg->page) == NULL) BUG();
+                       BUG_ON(page_address(sg->page) == NULL);
                        mmu_inval_dma_area(
                            (unsigned long) page_address(sg->page),
                            (sg->length + PAGE_SIZE-1) & PAGE_MASK);