Open fantas1a opened 2 days ago
I have same error when in local mode:
05:19:43 [ INFO] starting local_fs_reader<bytes>
job
thread_id = 140685963821056
finish pre_touching
local_scheduler serialize task time: 0.0030098890000000004 s
local_scheduler deserialize task time: 8.0407e-5 s
tid: 11, at the begining of secure execution
thread 'Option::unwrap()
on a None
value', src/lib.rs:244:72
Illegal instruction
And the hosts.conf file I used when in local mode:
master = "10.0.0.6:3000" slaves = [""]
I executed the ~/flare/bin/set_env_distri.sh in advance.
@AmbitionXiang Hearty thanks if you could take a look at this : )
And if I test with se1_sec()?; se2_sec()?; se3_sec()?; in app/src/main.rs, it prompted:
09:56:41 [ INFO] starting
unknown
job thread_id = 139655171670016 finish pre_touching local_scheduler serialize task time: 0.0030862180000000004 s local_scheduler serialize task time: 4.7784e-5 s local_scheduler deserialize task time: 0.00013935800000000002 s in denepdency, key = (16961387396503108670, 0, 0), ops = [OpId { h: 14338286665493845171 }, OpId { h: 17013284107314768422 }, OpId { h: 17639518093728688273 }, OpId { h: 10378376576142600703 }, OpId { h: 11054832435461403850 }] thread 'tokio-runtime-worker' panicked at 'calledResult::unwrap()
on anErr
value: InputRead(Os { code: 2, kind: NotFound, message: "No such file or directory" })', /home/dbucket/flare/flare-core-untrusted/src/dependency.rs:294:74 note: run withRUST_BACKTRACE=1
environment variable to display a backtrace local_scheduler deserialize task time: 5.2827e-5 s in denepdency, key = (15998552245887240225, 0, 1), ops = [OpId { h: 14338286665493845171 }, OpId { h: 7045621555540763815 }, OpId { h: 8622552163589584583 }, OpId { h: 12031491541542741121 }, OpId { h: 10809193829318009398 }] thread 'tokio-runtime-worker' panicked at 'calledResult::unwrap()
on anErr
value: InputRead(Os { code: 2, kind: NotFound, message: "No such file or directory" })', /home/dbucket/flare/flare-core-untrusted/src/dependency.rs:294:74
Here is the main function I used:
fn main() -> Result<()> { //Fn! will make the closures serializable. It is necessary. use serde_closure version 0.1.3. / dijkstra / //dijkstra_sec_0()?; //dijkstra_unsec_0()?;
/* map */ //map_sec_0()?; //map_sec_1()?; //map_unsec_0()?; //map_unsec_1()?; /* filter */ //filter_sec_0()?; //filter_unsec_0()?; /* group_by */ //group_by_sec_0()?; //group_by_sec_1()?; /* join */ //join_sec_0()?; //join_sec_1()?; //join_sec_2()?; //join_unsec_2()?; /* distinct */ //distinct_sec_0()?; //distinct_unsec_0()?; /* local file reader */ //file_read_sec_0()?; //file_read_unsec_0()?; /* partition_wise_sample */ //part_wise_sample_sec_0()?; //part_wise_sample_unsec_0()?; //part_wise_sample_unsec_1()?; //part_wise_sample_unsec_2()?; /* take */ //take_sec_0()?; //take_unsec_0()?; /* reduce */ //reduce_sec_0()?; /* count */ //count_sec_0()?; //count_unsec_0()?; /* union */ //union_sec_0()?; //union_unsec_0()?; /* zip */ //zip_sec_0()?; /* big data bench */ // aggregate_sec()?; // filter_sec()?; // cross_project_sec()?; // aggregate_unsec()?; // filter_unsec()?; // cross_project_unsec()?; /* tpc-h */ // te1_sec()?; // te2_sec()?; // te3_sec()?; // te1_unsec()?; // te2_unsec()?; // te3_unsec()?; /* social graph */ se1_sec()?; se2_sec()?; se3_sec()?; // se1_unsec()?; // se2_unsec()?; // se3_unsec()?; /* kmeans */ //kmeans_sec_0()?; //kmeans_unsec_0()?; /* linear regression */ //lr_sec()?; //lr_unsec()?; /* matrix multipilication */ //mm_sec_0()?; //mm_unsec_0()?; /* page rank */ //pagerank_sec_0()?; //pagerank_unsec_0()?; /* pearson correlation algorithm */ //pearson_sec_0()?; //pearson_unsec_0()?; /* transitive_closure */ //transitive_closure_sec_0()?; //transitive_closure_sec_1()?; //transitive_closure_unsec_0()?; //transitive_closure_unsec_1()?; /* triangle counting */ //triangle_counting_sec_0()?; //triangle_counting_unsec_0()?; // test the speculative execution in loop //test0_sec_0()?; //topk //topk_sec_0()?; //topk_unsec_0()?; Ok(())
}
Hi, I wonder if anyone can help with the following errors. I run SODA with one master and two slave nodes, and all with scalable SGX.
Here is my node configuration:
My machine:
And following is what I did: