/* We don't do dynamic PCI IRQ allocation */
}
+#define HAVE_PCI_MMAP
+
+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine);
+
/*
* Dynamic DMA mapping stuff.
* MIPS has everything mapped statically.
EXPORT_SYMBOL(PCIBIOS_MIN_MEM);
#endif
+int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine)
+{
+ unsigned long prot;
+
+ /*
+ * I/O space can be accessed via normal processor loads and stores on
+ * this platform but for now we elect not to do this and portable
+ * drivers should not do this anyway.
+ */
+ if (mmap_state == pci_mmap_io)
+ return -EINVAL;
+
+ /*
+ * Ignore write-combine; for now only return uncached mappings.
+ */
+ prot = pgprot_val(vma->vm_page_prot);
+ prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
+ vma->vm_page_prot = __pgprot(prot);
+
+ return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+}
+
char * (*pcibios_plat_setup)(char *str) __devinitdata;
char *__devinit pcibios_setup(char *str)
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/utsname.h>
+#include <linux/socket.h>
+#include <linux/un.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
/* long to avoid size mismatch warnings from gcc */
long sock;
int err;
- char file[256];
+ char file[UNIX_PATH_MAX];
if (umid_file_name("mconsole", file, sizeof(file)))
return -1;
static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
{
if (class_pktcdvd) {
- pd->dev = device_create(class_pktcdvd, NULL, pd->pkt_dev, NULL,
+ pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL,
"%s", pd->name);
if (IS_ERR(pd->dev))
pd->dev = NULL;
packet->ack = RCODE_SEND_ERROR;
return -1;
}
+ packet->payload_bus = payload_bus;
d[2].req_count = cpu_to_le16(packet->payload_length);
d[2].data_address = cpu_to_le32(payload_bus);
struct driver_data *driver_data;
struct fw_packet *packet;
struct fw_ohci *ohci = context->ohci;
- dma_addr_t payload_bus;
int evt;
if (last->transfer_status == 0)
/* This packet was cancelled, just continue. */
return 1;
- payload_bus = le32_to_cpu(last->data_address);
- if (payload_bus != 0)
- dma_unmap_single(ohci->card.device, payload_bus,
+ if (packet->payload_bus)
+ dma_unmap_single(ohci->card.device, packet->payload_bus,
packet->payload_length, DMA_TO_DEVICE);
evt = le16_to_cpu(last->transfer_status) & 0x1f;
if (packet->ack != 0)
goto out;
+ if (packet->payload_bus)
+ dma_unmap_single(ohci->card.device, packet->payload_bus,
+ packet->payload_length, DMA_TO_DEVICE);
+
log_ar_at_event('T', packet->speed, packet->header, 0x20);
driver_data->packet = NULL;
packet->ack = RCODE_CANCELLED;
packet->speed = speed;
packet->generation = generation;
packet->ack = 0;
+ packet->payload_bus = 0;
}
/**
BUG();
return;
}
+
+ response->payload_bus = 0;
}
EXPORT_SYMBOL(fw_fill_response);
#include <linux/list.h>
#include <linux/spinlock_types.h>
#include <linux/timer.h>
+#include <linux/types.h>
#include <linux/workqueue.h>
#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
size_t header_length;
void *payload;
size_t payload_length;
+ dma_addr_t payload_bus;
u32 timestamp;
/*
g = get_hpsb_generation(host);
for (i = 0; i < 4 ; i++) {
msleep_interruptible(63);
+ try_to_freeze();
if (kthread_should_stop())
goto exit;
/* Sleep 3 seconds */
for (i = 3000/200; i; i--) {
msleep_interruptible(200);
+ try_to_freeze();
if (kthread_should_stop())
goto exit;
return err;
}
+static struct i2c_device_id ds1672_id[] = {
+ { "ds1672", 0 },
+ { }
+};
+
static struct i2c_driver ds1672_driver = {
.driver = {
.name = "rtc-ds1672",
},
.probe = &ds1672_probe,
.remove = &ds1672_remove,
+ .id_table = ds1672_id,
};
static int __init ds1672_init(void)
return 0;
}
+static struct i2c_device_id max6900_id[] = {
+ { "max6900", 0 },
+ { }
+};
+
static struct i2c_driver max6900_driver = {
.driver = {
.name = "rtc-max6900",
},
.probe = max6900_probe,
.remove = max6900_remove,
+ .id_table = max6900_id,
};
static int __init max6900_init(void)
}
#else
-#define omap_rtc_ioctl NULL
+#define twl4030_rtc_ioctl NULL
#endif
static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc)
MODULE_DESCRIPTION("Samsung S3C2440,S3C2442 SoC Serial port driver");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_LICENSE("GPLi v2");
+MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:s3c2440-uart");
softback_buf = 0UL;
for (i = 0; i < FB_MAX; i++) {
+ int pending;
+
mapped = 0;
info = registered_fb[i];
if (info == NULL)
continue;
+ pending = cancel_work_sync(&info->queue);
+ DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" :
+ "no"));
+
for (j = first_fb_vc; j <= last_fb_vc; j++) {
if (con2fb_map[j] == i)
mapped = 1;
}
dev_dbg(dev, "fb phys 0x%llx 0x%lx\n",
- (u64)par->fb_base_phys, (ulong)par->mapped_vram);
+ (unsigned long long)par->fb_base_phys, (ulong)par->mapped_vram);
dev_dbg(dev, "mmio phys 0x%llx 0x%lx\n",
- (u64)par->mmio_base_phys, (ulong)par->mmio_len);
+ (unsigned long long)par->mmio_base_phys, (ulong)par->mmio_len);
if (mb862xx_pci_gdc_init(par))
goto io_unmap;
if (free_blocks - (nblocks + root_blocks + dirty_blocks) <
EXT4_FREEBLOCKS_WATERMARK) {
- free_blocks = percpu_counter_sum(fbc);
- dirty_blocks = percpu_counter_sum(dbc);
+ free_blocks = percpu_counter_sum_positive(fbc);
+ dirty_blocks = percpu_counter_sum_positive(dbc);
if (dirty_blocks < 0) {
printk(KERN_CRIT "Dirty block accounting "
"went wrong %lld\n",
watches = &inode->inotify_watches;
list_for_each_entry_safe(watch, next_w, watches, i_list) {
struct inotify_handle *ih= watch->ih;
+ get_inotify_watch(watch);
mutex_lock(&ih->mutex);
ih->in_ops->handle_event(watch, watch->wd, IN_UNMOUNT, 0,
NULL, NULL);
inotify_remove_watch_locked(ih, watch);
mutex_unlock(&ih->mutex);
+ put_inotify_watch(watch);
}
mutex_unlock(&inode->inotify_mutex);
iput(inode);
task->latency_record[i].time,
task->latency_record[i].max);
for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
- char sym[KSYM_NAME_LEN];
+ char sym[KSYM_SYMBOL_LEN];
char *c;
if (!task->latency_record[i].backtrace[q])
break;
return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
}
-static unsigned long pte_to_pagemap_entry(pte_t pte)
+static u64 pte_to_pagemap_entry(pte_t pte)
{
- unsigned long pme = 0;
+ u64 pme = 0;
if (is_swap_pte(pte))
pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
| PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
#define atomic_long_cmpxchg(l, old, new) \
(atomic_cmpxchg((atomic_t *)(l), (old), (new)))
#define atomic_long_xchg(v, new) \
- (atomic_xchg((atomic_t *)(l), (new)))
+ (atomic_xchg((atomic_t *)(v), (new)))
#endif /* BITS_PER_LONG == 64 */
#include <linux/ktime.h>
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/module.h>
#include <linux/kallsyms.h>
#ifdef CONFIG_FUNCTION_TRACER
struct boot_trace {
pid_t caller;
- char func[KSYM_NAME_LEN];
+ char func[KSYM_SYMBOL_LEN];
int result;
unsigned long long duration; /* usecs */
ktime_t calltime;
latency_record[i].time,
latency_record[i].max);
for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
- char sym[KSYM_NAME_LEN];
+ char sym[KSYM_SYMBOL_LEN];
char *c;
if (!latency_record[i].backtrace[q])
break;
if (ret < 0)
break;
else if (!ret) {
- if (spliced)
- break;
- if (flags & SPLICE_F_NONBLOCK) {
+ if (flags & SPLICE_F_NONBLOCK)
ret = -EAGAIN;
- break;
- }
+ break;
}
*ppos += ret;
for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
- *pcount = 0;
}
- fbc->count = ret;
-
spin_unlock(&fbc->lock);
return ret;
}
if (!fbc->counters)
return;
- free_percpu(fbc->counters);
- fbc->counters = NULL;
#ifdef CONFIG_HOTPLUG_CPU
mutex_lock(&percpu_counters_lock);
list_del(&fbc->list);
mutex_unlock(&percpu_counters_lock);
#endif
+ free_percpu(fbc->counters);
+ fbc->counters = NULL;
}
EXPORT_SYMBOL(percpu_counter_destroy);
int ret = 0;
struct device *dev;
- if (WARN_ON(bdi->dev))
+ if (bdi->dev) /* The driver needs to use separate queues per device */
goto exit;
va_start(args, fmt);
/*
* Determine the nodes of an array of pages and store it in an array of status.
*/
-static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
- const void __user * __user *pages,
- int __user *status)
+static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
+ const void __user **pages, int *status)
{
unsigned long i;
- int err;
down_read(&mm->mmap_sem);
for (i = 0; i < nr_pages; i++) {
- const void __user *p;
- unsigned long addr;
+ unsigned long addr = (unsigned long)(*pages);
struct vm_area_struct *vma;
struct page *page;
-
- err = -EFAULT;
- if (get_user(p, pages+i))
- goto out;
- addr = (unsigned long) p;
+ int err;
vma = find_vma(mm, addr);
if (!vma)
err = page_to_nid(page);
set_status:
- put_user(err, status+i);
+ *status = err;
+
+ pages++;
+ status++;
+ }
+
+ up_read(&mm->mmap_sem);
+}
+
+/*
+ * Determine the nodes of a user array of pages and store it in
+ * a user array of status.
+ */
+static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
+ const void __user * __user *pages,
+ int __user *status)
+{
+#define DO_PAGES_STAT_CHUNK_NR 16
+ const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
+ int chunk_status[DO_PAGES_STAT_CHUNK_NR];
+ unsigned long i, chunk_nr = DO_PAGES_STAT_CHUNK_NR;
+ int err;
+
+ for (i = 0; i < nr_pages; i += chunk_nr) {
+ if (chunk_nr + i > nr_pages)
+ chunk_nr = nr_pages - i;
+
+ err = copy_from_user(chunk_pages, &pages[i],
+ chunk_nr * sizeof(*chunk_pages));
+ if (err) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
+
+ err = copy_to_user(&status[i], chunk_status,
+ chunk_nr * sizeof(*chunk_status));
+ if (err) {
+ err = -EFAULT;
+ goto out;
+ }
}
err = 0;
out:
- up_read(&mm->mmap_sem);
return err;
}
start_pfn = NODE_DATA(nid)->node_start_pfn;
nr_pages = NODE_DATA(nid)->node_spanned_pages;
+ if (!nr_pages)
+ return 0;
+
table_size = sizeof(struct page_cgroup) * nr_pages;
base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
for (i = 0; i < t.count; i++) {
struct location *l = &t.loc[i];
- if (len > PAGE_SIZE - 100)
+ if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
break;
len += sprintf(buf + len, "%7ld ", l->count);
put_cpu();
}
-#if defined(CONFIG_NUMA) || defined(CONFIG_UNEVICTABLE_LRU)
static void lru_add_drain_per_cpu(struct work_struct *dummy)
{
lru_add_drain();
return schedule_on_each_cpu(lru_add_drain_per_cpu);
}
-#else
-
-/*
- * Returns 0 for success
- */
-int lru_add_drain_all(void)
-{
- lru_add_drain();
- return 0;
-}
-#endif
-
/*
* Batched page_cache_release(). Decrement the reference count on all the
* passed pages. If it fell to zero then remove the page from the LRU and
v->addr, v->addr + v->size, v->size);
if (v->caller) {
- char buff[2 * KSYM_NAME_LEN];
+ char buff[KSYM_SYMBOL_LEN];
seq_putc(m, ' ');
sprint_symbol(buff, (unsigned long)v->caller);