Open minzu0306 opened 8 years ago
do_set_node_unlink(engine, info, node, hidx)
do_set_elem_unlink(engine, info, node, hidx, NULL, elem, )
do_set_elem_traverse_dfs(engine, info, info->root, count, true, NULL)
내 생각으로는 do_set_elem_traverse_dfs()에서 if(child_node->tot_hash_cnt == 0 && child_node->tot_elem_cnt < (SET_MAX_HASHCHAIN_SIZE/2 ) 에서 if(child_node->tot_hash_cnt == 0 && child_node->tot_elem_cnt == 0 )으로 바꾸면 될거같다 하지만 그러면 elem이 없는 node들까지 쭉 훑게되므로 오버헤드가 생기게 된다
do_set_elem_delete_fast(engine, info, 30){
do_set_elem_traverse_fast(engine, info, info->root, count)
}
do_set_elem_traverse_fast(engine, info, info->root, count){
if(node->tot_hash_cnt >0){
for (hidx = 0; hidx <node->tot_hash_cnt; hidx++){
set_hash_node *child_node
fcnt += do_set_elem_traverse_dfs();
if ( child_node->tot_hash_cnt == 0 && child_node->tot_elem_cnt == 0)
do_set_node_unlink(engine, info, node, hidx);
}
}
for (hidx=0; hidx<node->tot_elem_cnt; hidx++){
if ( count < tot_elem_cnt){
if (
}
if(node->hcnt[hidx] >0){
set_elem_item *elem = node->htab[hidx];
}
collection_delete_thread()
{
info = (set_meta_info *)item_get_meta(it);
set_hash_node node = info->root;
node = do_set_elem_delete(engine, info, default_node, 30 ,ELEM_DELETE_COLL);
}
static set_hash_node do_set_elem_delete(struct default_engine *engine,
set_meta_info *info, set_hash_node node, const uint32_t count,
enum elem_delete_cause cause)
{
assert(cause == ELEM_DELETE_COLL);
//uint32_t fcnt = 0;
if(info->root != NULL){
if (node == info->root){
node= do_set_elem_traver_dfs(engine, info, info->root, count, true, NULL);
}else {
node = do_set_elem_traverse_dfs(engine, info, node, count, true, NULL);
}
if (info->root->tot_hash_cnt == 0 && info->root->tot_elem_cnt == 0) {
do_set_node_unlink(engine, info, NULL, 0);
}
}
return node;
}
for (hidx = 0 ; hidx <SET_HASHTAB_SIZE; hidx++){
if (node->
}
#ifdef DELETE_NO_MERGE
set_hash_node *node = info->root;
#endif
#ifdef DELETE_NO_MERGE
node = do_set_elem_delete_fast(engine, info, node, 30);
#else
(void)do_set_elem_delete(engine, info, 30, ELEM_DELETE_COLL);
#endif
static set_hash_node do_set_elem_delete_fast(struct default_engine *engine, set_meta_info *info,
set_hash_node *node, count uint32_t count)
{
set_hash_node return_node;
if (info->root != NULL){
return_node = do_set_traverse_fast(engine, info, node, count);
if (info->root->tot_hash_cnt == 0 && info->root->tot_elem_cnt == 0) {
do_set_node_unlink(engine, info, NULL, 0);
return NULL;
}
}
return return_node;
}
처음 설계한 함수
#ifdef DELETE_NO_MERGE
static *set_hash_node do_set_elem_traverse_fast(struct default_engine *engine, set_meta_info *info,
set_hash_node *node, const uint32_t *count)
{
int hidx;
const bool delete = true;
set_hash_node *tempnode;
if(node->tot_hash_cnt > 0){
for (hidx = 0; hidx < SET_HASHTAB_SIZE; hidx++) {
if (node->hcnt[hidx] == -1) {
set_hash_node *child_node = (set_hash_node *)node->htab[hidx];
tempnode = do_set_elem_traverse_fast(engine, info, child_node, count);
if(child_node->tot_hash_cnt == 0 && child_node->tot_elem_cnt == 0){
do_set_node_unlink(engine, info, node, hidx);
} else {
return tempnode;
}
}
if (count == 0){
if (node->tot_hash_cnt > 0 || node->tot_elem_cnt > 10){
return node;
}else {
count = node->tot_elem_cnt;
}
}
}
}
for (hidx = 0; hidx < SET_HASHTAB_SIZE; hidx++) {
if (node->hcnt[hidx] > 0) {
set_elem_item *elem = node->htab[hidx];
while (elem != NULL) {
/* delete element */
if (delete) do_set_elem_unlink(engine, info, node, hidx, NULL, elem,ELEM_DELETE_NORMAL);
count -= 1;
if(count == 0){
if (node->tot_elem_cnt > 10 || node->tot_elem_cnt == 0){
return &node;
} else{
count = node->tot_elem_cnt;
}
}
elem = (delete ? node->htab[hidx] : elem->next);
}
}
}
}
static void do_set_elem_delete_fast(struct default_engine *engine, set_meta_info *info,
set_hash_node *node, const uint32_t count)
{
set_hash_node *cur_node;
const uint32_t *rcnt = &count;
if (info->root != NULL){
cur_node = do_set_elem_traverse_fast(engine, info, node, rcnt);
if(info->root->tot_hash_cnt == 0 && info->root->tot_elem_cnt ==0){
do_set_node_unlink(engine, info, NULL, 0);
}
}
}
#endif
btree collection 성능개선과 동일하게 set collection도 성능 개선 작업 진행