#define SGE_PAGE_SHIFT                 PAGE_SHIFT
 #define SGE_PAGE_ALIGN(addr)           PAGE_ALIGN(addr)
 
-#define BCM_RX_ETH_PAYLOAD_ALIGN       64
-
 /* SGE ring related macros */
 #define NUM_RX_SGE_PAGES               2
 #define RX_SGE_CNT             (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
 #define ETH_MAX_PACKET_SIZE            1500
 #define ETH_MAX_JUMBO_PACKET_SIZE      9600
 
+       /* Max supported alignment is 256 (8 shift) */
+#define BNX2X_RX_ALIGN_SHIFT           ((L1_CACHE_SHIFT < 8) ? \
+                                        L1_CACHE_SHIFT : 8)
+#define BNX2X_RX_ALIGN                 (1 << BNX2X_RX_ALIGN_SHIFT)
+
        struct host_def_status_block *def_status_blk;
 #define DEF_SB_ID                      16
        u16                     def_c_idx;
 
        u16 ring_prod, cqe_ring_prod;
        int i, j;
 
-       bp->rx_buf_size = bp->dev->mtu;
-       bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
-               BCM_RX_ETH_PAYLOAD_ALIGN;
+       bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD + BNX2X_RX_ALIGN;
+       DP(NETIF_MSG_IFUP,
+          "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
 
        if (bp->flags & TPA_ENABLE_FLAG) {
-               DP(NETIF_MSG_IFUP,
-                  "rx_buf_size %d  effective_mtu %d\n",
-                  bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
 
                for_each_rx_queue(bp, j) {
                        struct bnx2x_fastpath *fp = &bp->fp[j];
                context->ustorm_st_context.common.flags =
                        USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
                context->ustorm_st_context.common.mc_alignment_log_size =
-                       6 /*BCM_RX_ETH_PAYLOAD_ALIGN*/;
+                                               BNX2X_RX_ALIGN_SHIFT;
                context->ustorm_st_context.common.bd_buff_size =
                                                bp->rx_buf_size;
                context->ustorm_st_context.common.bd_page_base_hi =