/* Hit the asid limit. Start over */
static int gru_wrap_asid(struct gru_state *gru)
{
- gru_dbg(grudev, "gru %p\n", gru);
+ gru_dbg(grudev, "gid %d\n", gru->gs_gid);
STAT(asid_wrap);
gru->gs_asid_gen++;
- gru_flush_all_tlb(gru);
return MIN_ASID;
}
{
int i, gid, inuse_asid, limit;
- gru_dbg(grudev, "gru %p, asid 0x%x\n", gru, asid);
+ gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
STAT(asid_next);
limit = MAX_ASID;
if (asid >= limit)
asid = gru_wrap_asid(gru);
+ gru_flush_all_tlb(gru);
gid = gru->gs_gid;
again:
for (i = 0; i < GRU_NUM_CCH; i++) {
if (!gru->gs_gts[i])
continue;
inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid;
- gru_dbg(grudev, "gru %p, inuse_asid 0x%x, cxtnum %d, gts %p\n",
- gru, inuse_asid, i, gru->gs_gts[i]);
+ gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n",
+ gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms,
+ inuse_asid, i);
if (inuse_asid == asid) {
asid += ASID_INC;
if (asid >= limit) {
}
gru->gs_asid_limit = limit;
gru->gs_asid = asid;
- gru_dbg(grudev, "gru %p, new asid 0x%x, new_limit 0x%x\n", gru, asid,
- limit);
+ gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid,
+ asid, limit);
return asid;
}
{
int asid;
- spin_lock(&gru->gs_asid_lock);
gru->gs_asid += ASID_INC;
asid = gru->gs_asid;
if (asid >= gru->gs_asid_limit)
asid = gru_reset_asid_limit(gru, asid);
- spin_unlock(&gru->gs_asid_lock);
- gru_dbg(grudev, "gru %p, asid 0x%x\n", gru, asid);
+ gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
return asid;
}
* TLB manangment requires tracking all GRU chiplets that have loaded a GSEG
* context.
*/
-static int gru_load_mm_tracker(struct gru_state *gru, struct gru_mm_struct *gms,
- int ctxnum)
+static int gru_load_mm_tracker(struct gru_state *gru,
+ struct gru_thread_state *gts)
{
+ struct gru_mm_struct *gms = gts->ts_gms;
struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid];
- unsigned short ctxbitmap = (1 << ctxnum);
+ unsigned short ctxbitmap = (1 << gts->ts_ctxnum);
int asid;
spin_lock(&gms->ms_asid_lock);
asid = asids->mt_asid;
- if (asid == 0 || asids->mt_asid_gen != gru->gs_asid_gen) {
+ spin_lock(&gru->gs_asid_lock);
+ if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen !=
+ gru->gs_asid_gen)) {
asid = gru_assign_asid(gru);
asids->mt_asid = asid;
asids->mt_asid_gen = gru->gs_asid_gen;
} else {
STAT(asid_reuse);
}
+ spin_unlock(&gru->gs_asid_lock);
BUG_ON(asids->mt_ctxbitmap & ctxbitmap);
asids->mt_ctxbitmap |= ctxbitmap;
spin_unlock(&gms->ms_asid_lock);
gru_dbg(grudev,
- "gru %x, gms %p, ctxnum 0x%d, asid 0x%x, asidmap 0x%lx\n",
- gru->gs_gid, gms, ctxnum, asid, gms->ms_asidmap[0]);
+ "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n",
+ gru->gs_gid, gts, gms, gts->ts_ctxnum, asid,
+ gms->ms_asidmap[0]);
return asid;
}
static void gru_unload_mm_tracker(struct gru_state *gru,
- struct gru_mm_struct *gms, int ctxnum)
+ struct gru_thread_state *gts)
{
+ struct gru_mm_struct *gms = gts->ts_gms;
struct gru_mm_tracker *asids;
unsigned short ctxbitmap;
asids = &gms->ms_asids[gru->gs_gid];
- ctxbitmap = (1 << ctxnum);
+ ctxbitmap = (1 << gts->ts_ctxnum);
spin_lock(&gms->ms_asid_lock);
+ spin_lock(&gru->gs_asid_lock);
BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap);
asids->mt_ctxbitmap ^= ctxbitmap;
- gru_dbg(grudev, "gru %x, gms %p, ctxnum 0x%d, asidmap 0x%lx\n",
- gru->gs_gid, gms, ctxnum, gms->ms_asidmap[0]);
+ gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n",
+ gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]);
+ spin_unlock(&gru->gs_asid_lock);
spin_unlock(&gms->ms_asid_lock);
}
gts->ts_vma = vma;
gts->ts_tlb_int_select = -1;
gts->ts_gms = gru_register_mmu_notifier();
+ gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT);
if (!gts->ts_gms)
goto err;
struct gru_state *gru;
gru = gts->ts_gru;
- gru_dbg(grudev, "gts %p, gru %p\n", gts, gru);
+ gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid);
spin_lock(&gru->gs_lock);
gru->gs_gts[gts->ts_ctxnum] = NULL;
__clear_bit(gts->ts_ctxnum, &gru->gs_context_map);
gts->ts_ctxnum = NULLCTX;
gts->ts_gru = NULL;
+ gts->ts_blade = -1;
spin_unlock(&gru->gs_lock);
gts_drop(gts);
return GRU_HANDLE_BYTES;
}
-static void gru_prefetch_context(void *gseg, void *cb, void *cbe, unsigned long cbrmap,
- unsigned long length)
+static void gru_prefetch_context(void *gseg, void *cb, void *cbe,
+ unsigned long cbrmap, unsigned long length)
{
int i, scr;
zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
+ gru_dbg(grudev, "gts %p\n", gts);
lock_cch_handle(cch);
if (cch_interrupt_sync(cch))
BUG();
- gru_dbg(grudev, "gts %p\n", gts);
- gru_unload_mm_tracker(gru, gts->ts_gms, gts->ts_ctxnum);
+ gru_unload_mm_tracker(gru, gts);
if (savestate)
gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr,
ctxnum, gts->ts_cbr_map,
cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
lock_cch_handle(cch);
- asid = gru_load_mm_tracker(gru, gts->ts_gms, gts->ts_ctxnum);
+ asid = gru_load_mm_tracker(gru, gts);
cch->tfm_fault_bit_enable =
(gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
|| gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
cch->tlb_int_select = gts->ts_tlb_int_select;
}
cch->tfm_done_bit_enable = 0;
- err = cch_allocate(cch, asid, gts->ts_cbr_map, gts->ts_dsr_map);
+ err = cch_allocate(cch, asid, gts->ts_sizeavail, gts->ts_cbr_map,
+ gts->ts_dsr_map);
if (err) {
gru_dbg(grudev,
"err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n",
/*
* Update fields in an active CCH:
* - retarget interrupts on local blade
+ * - update sizeavail mask
* - force a delayed context unload by clearing the CCH asids. This
* forces TLB misses for new GRU instructions. The context is unloaded
* when the next TLB miss occurs.
*/
-static int gru_update_cch(struct gru_thread_state *gts, int int_select)
+int gru_update_cch(struct gru_thread_state *gts, int force_unload)
{
struct gru_context_configuration_handle *cch;
struct gru_state *gru = gts->ts_gru;
goto exit;
if (cch_interrupt(cch))
BUG();
- if (int_select >= 0) {
- gts->ts_tlb_int_select = int_select;
- cch->tlb_int_select = int_select;
+ if (!force_unload) {
+ for (i = 0; i < 8; i++)
+ cch->sizeavail[i] = gts->ts_sizeavail;
+ gts->ts_tlb_int_select = gru_cpu_fault_map_id();
+ cch->tlb_int_select = gru_cpu_fault_map_id();
} else {
for (i = 0; i < 8; i++)
cch->asid[i] = 0;
gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
gru_cpu_fault_map_id());
- return gru_update_cch(gts, gru_cpu_fault_map_id());
+ return gru_update_cch(gts, 0);
}
STAT(steal_context_failed);
}
gru_dbg(grudev,
- "stole gru %x, ctxnum %d from gts %p. Need cb %d, ds %d;"
+ "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;"
" avail cb %ld, ds %ld\n",
gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map),
hweight64(gru->gs_dsr_map));
}
reserve_gru_resources(gru, gts);
gts->ts_gru = gru;
+ gts->ts_blade = gru->gs_blade_id;
gts->ts_ctxnum =
find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
BUG_ON(gts->ts_ctxnum == GRU_NUM_CCH);
STAT(assign_context);
gru_dbg(grudev,
- "gseg %p, gts %p, gru %x, ctx %d, cbr %d, dsr %d\n",
+ "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n",
gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts,
gts->ts_gru->gs_gid, gts->ts_ctxnum,
gts->ts_cbr_au_count, gts->ts_dsr_au_count);
return VM_FAULT_SIGBUS;
again:
- preempt_disable();
mutex_lock(>s->ts_ctxlock);
+ preempt_disable();
if (gts->ts_gru) {
if (gts->ts_gru->gs_blade_id != uv_numa_blade_id()) {
STAT(migrated_nopfn_unload);