aboutsummaryrefslogtreecommitdiffstats
AgeCommit message (Expand)AuthorFilesLines
2017-09-28Create test suite for Euphrates releasechenjiankun3-0/+138
2017-09-28task: fixup s/exe_info/exc_infoRoss Brattain1-1/+1
2017-09-25tg_rfc2544_ixia: log exceptions and fix list iterRoss Brattain2-14/+20
2017-09-25Add service in kubernetes contextchenjiankun5-21/+181
2017-09-25bugfix: tc025 should use ipmi to poweroffroot3-32/+37
2017-09-25Introduced timeout to post method of HttpClientMaciej Skrocki2-11/+15
2017-09-25tg_rfc2544_ixia: use traffic id from JSON to update macRoss Brattain3-42/+55
2017-09-25Bumped samplevnfs flavor disk size to 6G.Maciej Skrocki20-29/+28
2017-09-25ixia: renamed execute to execute_trafficRoss Brattain1-2/+2
2017-09-25nsb_setup.sh: update to Ansible 2.3.2Ross Brattain1-1/+1
2017-09-25cgnapt: mock time.sleep in unitestsRoss Brattain1-2/+4
2017-09-25requirements: bump Ansible version to 2.3Ross Brattain1-1/+1
2017-09-25Tweaked nsb_setup to always re-download latest containerMaciej Skrocki1-2/+4
2017-09-25prox: fix TG KPIsRoss Brattain4-12/+21
2017-09-25Addition of PROX NSB tests to yardstickDanielMartinBuckley36-125/+11531
2017-09-25prox: use find_relative_file when uploadingRoss Brattain2-2/+3
2017-09-25add opnfv_os-odl-fdio-dvr-ha_daily.yaml scenarioRoss Brattain1-0/+38
2017-09-25prox: pass prox_config_dict between Processes using queueRoss Brattain9-93/+86
2017-09-25Fixed few gaps when running on a minimal version of systemMaciej Skrocki2-1/+2
2017-09-25prox testcases: private -> uplink,public -> downlink, vnf_0, tg_0Ross Brattain22-258/+346
2017-09-25UDP_Replay: enable collectd KPIsRoss Brattain1-1/+1
2017-09-25Heat: use pkey from string instead of key_filenameRoss Brattain3-16/+30
2017-09-25Generate pod.yaml from current contextRoss Brattain5-3/+49
2017-09-25Add host&targer in scenario['options']['server_name'] supportchenjiankun1-31/+37
2017-09-25Log each test case status in a taskchenjiankun1-2/+5
2017-09-25ansible: resize VNF imageRoss Brattain1-0/+18
2017-09-19add scenario opnfv_os-nosdn-bar-ha_daily.yaml for BarometerRoss Brattain2-0/+223
2017-09-19NSB: count from 0, vnf_0, tg_9, private->uplink, public->downlinkRoss Brattain151-1062/+1088
2017-09-19apiserver: mock socket.gethostbynameRoss Brattain1-0/+6
2017-09-18NSB test: remove vld_id it is not neededRoss Brattain8-16/+0
2017-09-18Fixing vPE config creation anf bring up issues.Deepak S1-2/+0
2017-09-18Fix all the exception while genereating the latency trafficDeepak S2-23/+19
2017-09-18Terminate the ixia traffic generator at the end of the test runDeepak S1-19/+22
2017-09-18Added multi-port testcases for vFWDeepak S5-104/+204
2017-09-18rename odl_l3 tests to just odlRoss Brattain3-0/+166
2017-09-18Fix the offset bug for UDP dst port in the traffic profileMartin Banszel2-11/+14
2017-09-18vnf_generic: adjust ssh timeout for number of VNFsRoss Brattain1-4/+9
2017-09-18NSB: fix port topologyMartin Banszel75-1688/+2360
2017-09-15Merge "add tc083 rst and tc073/tc083 netperf local dashboard"Rex Lee3-0/+713
2017-09-15Merge "bugfix: show testcase api return type error"Rex Lee1-1/+7
2017-09-15Merge "tc006: fio job_file.ini the volume should be configed"Rex Lee1-1/+2
2017-09-15Merge "KVMFORNFV: Update Grafana config for Packet Forwarding test cases."Rex Lee1-51/+2811
2017-09-15Merge "improve logging, clear using print"Rex Lee9-43/+40
2017-09-15Merge "[DRAFT] add yardstick cli info in userguide"Rex Lee1-2/+136
2017-09-15Merge "tc063: make node parameter can be pass"Rex Lee1-2/+2
2017-09-15add tc083 rst and tc073/tc083 netperf local dashboardrexlee87763-0/+713
2017-09-15Merge "Create opnfv_k8-ovn-lb-noha_daily.yaml test suite file"Jing Lu1-0/+18
2017-09-15Merge "tc011: make parameters to be able to config"Rex Lee1-4/+8
2017-09-14Merge "ssh fix, always wait"Ross Brattain3-2/+5
2017-09-14KVMFORNFV: Update Grafana config for Packet Forwarding test cases.kalyanreddy1-51/+2811
super_block *sb) { struct logfs_super *super = logfs_super(sb); return super->s_free_list.count; } /* journal has distance -1, top-most ifile layer distance 0 */ static u8 root_distance(struct super_block *sb, gc_level_t __gc_level) { struct logfs_super *super = logfs_super(sb); u8 gc_level = (__force u8)__gc_level; switch (gc_level) { case 0: /* fall through */ case 1: /* fall through */ case 2: /* fall through */ case 3: /* file data or indirect blocks */ return super->s_ifile_levels + super->s_iblock_levels - gc_level; case 6: /* fall through */ case 7: /* fall through */ case 8: /* fall through */ case 9: /* inode file data or indirect blocks */ return super->s_ifile_levels - (gc_level - 6); default: printk(KERN_ERR"LOGFS: segment of unknown level %x found\n", gc_level); WARN_ON(1); return super->s_ifile_levels + super->s_iblock_levels; } } static int segment_is_reserved(struct super_block *sb, u32 segno) { struct logfs_super *super = logfs_super(sb); struct logfs_area *area; void *reserved; int i; /* Some segments are reserved. Just pretend they were all valid */ reserved = btree_lookup32(&super->s_reserved_segments, segno); if (reserved) return 1; /* Currently open segments */ for_each_area(i) { area = super->s_area[i]; if (area->a_is_open && area->a_segno == segno) return 1; } return 0; } static void logfs_mark_segment_bad(struct super_block *sb, u32 segno) { BUG(); } /* * Returns the bytes consumed by valid objects in this segment. Object headers * are counted, the segment header is not. */ static u32 logfs_valid_bytes(struct super_block *sb, u32 segno, u32 *ec, gc_level_t *gc_level) { struct logfs_segment_entry se; u32 ec_level; logfs_get_segment_entry(sb, segno, &se); if (se.ec_level == cpu_to_be32(BADSEG) || se.valid == cpu_to_be32(RESERVED)) return RESERVED; ec_level = be32_to_cpu(se.ec_level); *ec = ec_level >> 4; *gc_level = GC_LEVEL(ec_level & 0xf); return be32_to_cpu(se.valid); } static void logfs_cleanse_block(struct super_block *sb, u64 ofs, u64 ino, u64 bix, gc_level_t gc_level) { struct inode *inode; int err, cookie; inode = logfs_safe_iget(sb, ino, &cookie); err = logfs_rewrite_block(inode, bix, ofs, gc_level, 0); BUG_ON(err); logfs_safe_iput(inode, cookie); } static u32 logfs_gc_segment(struct super_block *sb, u32 segno) { struct logfs_super *super = logfs_super(sb); struct logfs_segment_header sh; struct logfs_object_header oh; u64 ofs, ino, bix; u32 seg_ofs, logical_segno, cleaned = 0; int err, len, valid; gc_level_t gc_level; LOGFS_BUG_ON(segment_is_reserved(sb, segno), sb); btree_insert32(&super->s_reserved_segments, segno, (void *)1, GFP_NOFS); err = wbuf_read(sb, dev_ofs(sb, segno, 0), sizeof(sh), &sh); BUG_ON(err); gc_level = GC_LEVEL(sh.level); logical_segno = be32_to_cpu(sh.segno); if (sh.crc != logfs_crc32(&sh, sizeof(sh), 4)) { logfs_mark_segment_bad(sb, segno); cleaned = -1; goto out; } for (seg_ofs = LOGFS_SEGMENT_HEADERSIZE; seg_ofs + sizeof(oh) < super->s_segsize; ) { ofs = dev_ofs(sb, logical_segno, seg_ofs); err = wbuf_read(sb, dev_ofs(sb, segno, seg_ofs), sizeof(oh), &oh); BUG_ON(err); if (!memchr_inv(&oh, 0xff, sizeof(oh))) break; if (oh.crc != logfs_crc32(&oh, sizeof(oh) - 4, 4)) { logfs_mark_segment_bad(sb, segno); cleaned = super->s_segsize - 1; goto out; } ino = be64_to_cpu(oh.ino); bix = be64_to_cpu(oh.bix); len = sizeof(oh) + be16_to_cpu(oh.len); valid = logfs_is_valid_block(sb, ofs, ino, bix, gc_level); if (valid == 1) { logfs_cleanse_block(sb, ofs, ino, bix, gc_level); cleaned += len; } else if (valid == 2) { /* Will be invalid upon journal commit */ cleaned += len; } seg_ofs += len; } out: btree_remove32(&super->s_reserved_segments, segno); return cleaned; } static struct gc_candidate *add_list(struct gc_candidate *cand, struct candidate_list *list) { struct rb_node **p = &list->rb_tree.rb_node; struct rb_node *parent = NULL; struct gc_candidate *cur; int comp; cand->list = list; while (*p) { parent = *p; cur = rb_entry(parent, struct gc_candidate, rb_node); if (list->sort_by_ec) comp = cand->erase_count < cur->erase_count; else comp = cand->valid < cur->valid; if (comp) p = &parent->rb_left; else p = &parent->rb_right; } rb_link_node(&cand->rb_node, parent, p); rb_insert_color(&cand->rb_node, &list->rb_tree); if (list->count <= list->maxcount) { list->count++; return NULL; } cand = rb_entry(rb_last(&list->rb_tree), struct gc_candidate, rb_node); rb_erase(&cand->rb_node, &list->rb_tree); cand->list = NULL; return cand; } static void remove_from_list(struct gc_candidate *cand) { struct candidate_list *list = cand->list; rb_erase(&cand->rb_node, &list->rb_tree); list->count--; } static void free_candidate(struct super_block *sb, struct gc_candidate *cand) { struct logfs_super *super = logfs_super(sb); btree_remove32(&super->s_cand_tree, cand->segno); kfree(cand); } u32 get_best_cand(struct super_block *sb, struct candidate_list *list, u32 *ec) { struct gc_candidate *cand; u32 segno; BUG_ON(list->count == 0); cand = rb_entry(rb_first(&list->rb_tree), struct gc_candidate, rb_node); remove_from_list(cand); segno = cand->segno; if (ec) *ec = cand->erase_count; free_candidate(sb, cand); return segno; } /* * We have several lists to manage segments with. The reserve_list is used to * deal with bad blocks. We try to keep the best (lowest ec) segments on this * list. * The free_list contains free segments for normal usage. It usually gets the * second pick after the reserve_list. But when the free_list is running short * it is more important to keep the free_list full than to keep a reserve. * * Segments that are not free are put onto a per-level low_list. If we have * to run garbage collection, we pick a candidate from there. All segments on * those lists should have at least some free space so GC will make progress. * * And last we have the ec_list, which is used to pick segments for wear * leveling. * * If all appropriate lists are full, we simply free the candidate and forget * about that segment for a while. We have better candidates for each purpose. */ static void __add_candidate(struct super_block *sb, struct gc_candidate *cand) { struct logfs_super *super = logfs_super(sb); u32 full = super->s_segsize - LOGFS_SEGMENT_RESERVE; if (cand->valid == 0) { /* 100% free segments */ log_gc_noisy("add reserve segment %x (ec %x) at %llx\n", cand->segno, cand->erase_count, dev_ofs(sb, cand->segno, 0)); cand = add_list(cand, &super->s_reserve_list); if (cand) { log_gc_noisy("add free segment %x (ec %x) at %llx\n", cand->segno, cand->erase_count, dev_ofs(sb, cand->segno, 0)); cand = add_list(cand, &super->s_free_list); } } else { /* good candidates for Garbage Collection */ if (cand->valid < full) cand = add_list(cand, &super->s_low_list[cand->dist]); /* good candidates for wear leveling, * segments that were recently written get ignored */ if (cand) cand = add_list(cand, &super->s_ec_list); } if (cand) free_candidate(sb, cand); } static int add_candidate(struct super_block *sb, u32 segno, u32 valid, u32 ec, u8 dist) { struct logfs_super *super = logfs_super(sb); struct gc_candidate *cand; cand = kmalloc(sizeof(*cand), GFP_NOFS); if (!cand) return -ENOMEM; cand->segno = segno; cand->valid = valid; cand->erase_count = ec; cand->dist = dist; btree_insert32(&super->s_cand_tree, segno, cand, GFP_NOFS); __add_candidate(sb, cand); return 0; } static void remove_segment_from_lists(struct super_block *sb, u32 segno) { struct logfs_super *super = logfs_super(sb); struct gc_candidate *cand; cand = btree_lookup32(&super->s_cand_tree, segno); if (cand) { remove_from_list(cand); free_candidate(sb, cand); } } static void scan_segment(struct super_block *sb, u32 segno) { u32 valid, ec = 0; gc_level_t gc_level = 0; u8 dist; if (segment_is_reserved(sb, segno)) return; remove_segment_from_lists(sb, segno); valid = logfs_valid_bytes(sb, segno, &ec, &gc_level); if (valid == RESERVED) return; dist = root_distance(sb, gc_level); add_candidate(sb, segno, valid, ec, dist); } static struct gc_candidate *first_in_list(struct candidate_list *list) { if (list->count == 0) return NULL; return rb_entry(rb_first(&list->rb_tree), struct gc_candidate, rb_node); } /* * Find the best segment for garbage collection. Main criterion is * the segment requiring the least effort to clean. Secondary * criterion is to GC on the lowest level available. * * So we search the least effort segment on the lowest level first, * then move up and pick another segment iff is requires significantly * less effort. Hence the LOGFS_MAX_OBJECTSIZE in the comparison. */ static struct gc_candidate *get_candidate(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); int i, max_dist; struct gc_candidate *cand = NULL, *this; max_dist = min(no_free_segments(sb), LOGFS_NO_AREAS - 1); for (i = max_dist; i >= 0; i--) { this = first_in_list(&super->s_low_list[i]); if (!this) continue; if (!cand) cand = this; if (this->valid + LOGFS_MAX_OBJECTSIZE <= cand->valid) cand = this; } return cand; } static int __logfs_gc_once(struct super_block *sb, struct gc_candidate *cand) { struct logfs_super *super = logfs_super(sb); gc_level_t gc_level; u32 cleaned, valid, segno, ec; u8 dist; if (!cand) { log_gc("GC attempted, but no candidate found\n"); return 0; } segno = cand->segno; dist = cand->dist; valid = logfs_valid_bytes(sb, segno, &ec, &gc_level); free_candidate(sb, cand); log_gc("GC segment #%02x at %llx, %x required, %x free, %x valid, %llx free\n", segno, (u64)segno << super->s_segshift, dist, no_free_segments(sb), valid, super->s_free_bytes); cleaned = logfs_gc_segment(sb, segno); log_gc("GC segment #%02x complete - now %x valid\n", segno, valid - cleaned); BUG_ON(cleaned != valid); return 1; } static int logfs_gc_once(struct super_block *sb) { struct gc_candidate *cand; cand = get_candidate(sb); if (cand) remove_from_list(cand); return __logfs_gc_once(sb, cand); } /* returns 1 if a wrap occurs, 0 otherwise */ static int logfs_scan_some(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); u32 segno; int i, ret = 0; segno = super->s_sweeper; for (i = SCAN_RATIO; i > 0; i--) { segno++; if (segno >= super->s_no_segs) { segno = 0; ret = 1; /* Break out of the loop. We want to read a single * block from the segment size on next invocation if * SCAN_RATIO is set to match block size */ break; } scan_segment(sb, segno); } super->s_sweeper = segno; return ret; } /* * In principle, this function should loop forever, looking for GC candidates * and moving data. LogFS is designed in such a way that this loop is * guaranteed to terminate. * * Limiting the loop to some iterations serves purely to catch cases when * these guarantees have failed. An actual endless loop is an obvious bug * and should be reported as such. */ static void __logfs_gc_pass(struct super_block *sb, int target) { struct logfs_super *super = logfs_super(sb); struct logfs_block *block; int round, progress, last_progress = 0; /* * Doing too many changes to the segfile at once would result * in a large number of aliases. Write the journal before * things get out of hand. */ if (super->s_shadow_tree.no_shadowed_segments >= MAX_OBJ_ALIASES) logfs_write_anchor(sb); if (no_free_segments(sb) >= target && super->s_no_object_aliases < MAX_OBJ_ALIASES) return; log_gc("__logfs_gc_pass(%x)\n", target); for (round = 0; round < SCAN_ROUNDS; ) { if (no_free_segments(sb) >= target) goto write_alias; /* Sync in-memory state with on-medium state in case they * diverged */ logfs_write_anchor(sb); round += logfs_scan_some(sb); if (no_free_segments(sb) >= target) goto write_alias; progress = logfs_gc_once(sb); if (progress) last_progress = round; else if (round - last_progress > 2) break; continue; /* * The goto logic is nasty, I just don't know a better way to * code it. GC is supposed to ensure two things: * 1. Enough free segments are available. * 2. The number of aliases is bounded. * When 1. is achieved, we take a look at 2. and write back * some alias-containing blocks, if necessary. However, after * each such write we need to go back to 1., as writes can * consume free segments. */ write_alias: if (super->s_no_object_aliases < MAX_OBJ_ALIASES) return; if (list_empty(&super->s_object_alias)) { /* All aliases are still in btree */ return; } log_gc("Write back one alias\n"); block = list_entry(super->s_object_alias.next, struct logfs_block, alias_list); block->ops->write_block(block); /* * To round off the nasty goto logic, we reset round here. It * is a safety-net for GC not making any progress and limited * to something reasonably small. If incremented it for every * single alias, the loop could terminate rather quickly. */ round = 0; } LOGFS_BUG(sb); } static int wl_ratelimit(struct super_block *sb, u64 *next_event) { struct logfs_super *super = logfs_super(sb); if (*next_event < super->s_gec) { *next_event = super->s_gec + WL_RATELIMIT; return 0; } return 1; } static void logfs_wl_pass(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); struct gc_candidate *wl_cand, *free_cand; if (wl_ratelimit(sb, &super->s_wl_gec_ostore)) return; wl_cand = first_in_list(&super->s_ec_list); if (!wl_cand) return; free_cand = first_in_list(&super->s_free_list); if (!free_cand) return; if (wl_cand->erase_count < free_cand->erase_count + WL_DELTA) { remove_from_list(wl_cand); __logfs_gc_once(sb, wl_cand); } } /* * The journal needs wear leveling as well. But moving the journal is an * expensive operation so we try to avoid it as much as possible. And if we * have to do it, we move the whole journal, not individual segments. * * Ratelimiting is not strictly necessary here, it mainly serves to avoid the * calculations. First we check whether moving the journal would be a * significant improvement. That means that a) the current journal segments * have more wear than the future journal segments and b) the current journal * segments have more wear than normal ostore segments. * Rationale for b) is that we don't have to move the journal if it is aging * less than the ostore, even if the reserve segments age even less (they are * excluded from wear leveling, after all). * Next we check that the superblocks have less wear than the journal. Since * moving the journal requires writing the superblocks, we have to protect the * superblocks even more than the journal. * * Also we double the acceptable wear difference, compared to ostore wear * leveling. Journal data is read and rewritten rapidly, comparatively. So * soft errors have much less time to accumulate and we allow the journal to * be a bit worse than the ostore. */ static void logfs_journal_wl_pass(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); struct gc_candidate *cand; u32 min_journal_ec = -1, max_reserve_ec = 0; int i; if (wl_ratelimit(sb, &super->s_wl_gec_journal)) return; if (super->s_reserve_list.count < super->s_no_journal_segs) { /* Reserve is not full enough to move complete journal */ return; } journal_for_each(i) if (super->s_journal_seg[i]) min_journal_ec = min(min_journal_ec, super->s_journal_ec[i]); cand = rb_entry(rb_first(&super->s_free_list.rb_tree), struct gc_candidate, rb_node); max_reserve_ec = cand->erase_count; for (i = 0; i < 2; i++) { struct logfs_segment_entry se; u32 segno = seg_no(sb, super->s_sb_ofs[i]); u32 ec; logfs_get_segment_entry(sb, segno, &se); ec = be32_to_cpu(se.ec_level) >> 4; max_reserve_ec = max(max_reserve_ec, ec); } if (min_journal_ec > max_reserve_ec + 2 * WL_DELTA) { do_logfs_journal_wl_pass(sb); } } void logfs_gc_pass(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); //BUG_ON(mutex_trylock(&logfs_super(sb)->s_w_mutex)); /* Write journal before free space is getting saturated with dirty * objects. */ if (super->s_dirty_used_bytes + super->s_dirty_free_bytes + LOGFS_MAX_OBJECTSIZE >= super->s_free_bytes) logfs_write_anchor(sb); __logfs_gc_pass(sb, super->s_total_levels); logfs_wl_pass(sb); logfs_journal_wl_pass(sb); } static int check_area(struct super_block *sb, int i) { struct logfs_super *super = logfs_super(sb); struct logfs_area *area = super->s_area[i]; gc_level_t gc_level; u32 cleaned, valid, ec; u32 segno = area->a_segno; u64 ofs = dev_ofs(sb, area->a_segno, area->a_written_bytes); if (!area->a_is_open) return 0; if (super->s_devops->can_write_buf(sb, ofs) == 0) return 0; printk(KERN_INFO"LogFS: Possibly incomplete write at %llx\n", ofs); /* * The device cannot write back the write buffer. Most likely the * wbuf was already written out and the system crashed at some point * before the journal commit happened. In that case we wouldn't have * to do anything. But if the crash happened before the wbuf was * written out correctly, we must GC this segment. So assume the * worst and always do the GC run. */ area->a_is_open = 0; valid = logfs_valid_bytes(sb, segno, &ec, &gc_level); cleaned = logfs_gc_segment(sb, segno); if (cleaned != valid) return -EIO; return 0; } int logfs_check_areas(struct super_block *sb) { int i, err; for_each_area(i) { err = check_area(sb, i); if (err) return err; } return 0; } static void logfs_init_candlist(struct candidate_list *list, int maxcount, int sort_by_ec) { list->count = 0; list->maxcount = maxcount; list->sort_by_ec = sort_by_ec; list->rb_tree = RB_ROOT; } int logfs_init_gc(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); int i; btree_init_mempool32(&super->s_cand_tree, super->s_btree_pool); logfs_init_candlist(&super->s_free_list, LIST_SIZE + SCAN_RATIO, 1); logfs_init_candlist(&super->s_reserve_list, super->s_bad_seg_reserve, 1); for_each_area(i) logfs_init_candlist(&super->s_low_list[i], LIST_SIZE, 0); logfs_init_candlist(&super->s_ec_list, LIST_SIZE, 1); return 0; } static void logfs_cleanup_list(struct super_block *sb, struct candidate_list *list) { struct gc_candidate *cand; while (list->count) { cand = rb_entry(list->rb_tree.rb_node, struct gc_candidate, rb_node); remove_from_list(cand); free_candidate(sb, cand); } BUG_ON(list->rb_tree.rb_node); } void logfs_cleanup_gc(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); int i; if (!super->s_free_list.count) return; /* * FIXME: The btree may still contain a single empty node. So we * call the grim visitor to clean up that mess. Btree code should * do it for us, really. */ btree_grim_visitor32(&super->s_cand_tree, 0, NULL); logfs_cleanup_list(sb, &super->s_free_list); logfs_cleanup_list(sb, &super->s_reserve_list); for_each_area(i) logfs_cleanup_list(sb, &super->s_low_list[i]); logfs_cleanup_list(sb, &super->s_ec_list); }