Open drewkerrigan opened 8 years ago
@drewkerrigan A couple of comments and questions as I start making progress on this.
I've pushed what I've done so far to https://github.com/travisbhartwell/riak_explorer/tree/add-integration-tests-109
test/integration/re_wm_explorer_test.erl
, I found a test at the bottom of src/riak_explorer.erl
: https://github.com/travisbhartwell/riak_explorer/blob/add-integration-tests-109/src/riak_explorer.erl#L73-L79
This was duplicating a test that was in the integration tests. Do we want to delete this one and only put tests in test/integration/
?get_results_json_field/2
(https://github.com/travisbhartwell/riak_explorer/blob/add-integration-tests-109/test/integration/re_wm_explore_test.erl#L50-L53), but unfortunately failed at making it generic. I think such a function would be useful, but I noticed there is an inconsistency in how results are returned. For example (results show using curl):From /explore/ping
:
{"ping":{"id":"ping","message":"pong"},"links":{"self":"/explore/ping"}
From '/control/nodes/riak@127.0.0.1/status':
{"status":{"nodes":[{"id":"riak@127.0.0.1","status":"valid","ring_percentage":100.0,"pending_percentage":null}],"valid":1,"leaving":0,"exiting":0,"joining":0,"down":0},"links":{"self":"/control/nodes/riak@127.0.0.1/status"}}
From /explore/clusters/default/nodes
:
{"nodes":[{"id":"riak@127.0.0.1"}],"links":{"self":"/explore/clusters/default/nodes"}}
From /explore/clusters/default/nodes/riak@127.0.0.1/config
:
{"config":{"config":{"vnode_management_timer":"10s","transfer_limit":"2","tls_protocols.tlsv1.2":"on","tls_protocols.tlsv1.1":"off","tls_protocols.tlsv1":"off","tls_protocols.sslv3":"off","strong_consistency":"off","storage_backend":"bitcask","snmp.traps.replication":"off","snmp.refresh_frequency":"1m","snmp.nodePutTimeMedianThreshold":"off","snmp.nodePutTimeMeanThreshold":"off","snmp.nodePutTime99Threshold":"off","snmp.nodePutTime95Threshold":"off","snmp.nodePutTime100Threshold":"off","snmp.nodeGetTimeMedianThreshold":"off","snmp.nodeGetTimeMeanThreshold":"off","snmp.nodeGetTime99Threshold":"off","snmp.nodeGetTime95Threshold":"off","snmp.nodeGetTime100Threshold":"off","snmp.force_reload":"on","snmp.database_dir":"/var/lib/riak/snmp/agent/db","secure_referer_check":"on","search.solr.start_timeout":"30s","search.solr.port":"8093","search.solr.jvm_options":"-d64 -Xms1g -Xmx1g -XX:+UseStringCache -XX:+UseCompressedOops","search.solr.jmx_port":"8985","search.root_dir":"$(platform_data_dir)/yz","search.anti_entropy.data_dir":"$(platform_data_dir)/yz_anti_entropy","search":"off","sasl":"off","runtime_health.triggers.process.long_schedule":"off","runtime_health.triggers.process.heap_size":"160444000","runtime_health.triggers.process.garbage_collection":"off","runtime_health.triggers.port":"on","runtime_health.triggers.distribution_port":"on","runtime_health.thresholds.busy_processes":"30","runtime_health.thresholds.busy_ports":"2","ring_size":"64","ring.state_dir":"$(platform_data_dir)/ring","riak_control.auth.mode":"off","riak_control":"off","retry_put_coordinator_failure":"on","protobuf.nagle":"off","protobuf.backlog":"128","platform_log_dir":"/var/log/riak","platform_lib_dir":"/usr/lib/riak/lib","platform_etc_dir":"/etc/riak","platform_data_dir":"/var/lib/riak","platform_bin_dir":"/usr/sbin","object.size.warning_threshold":"5MB","object.size.maximum":"50MB","object.siblings.warning_threshold":"25","object.siblings.maximum":"100","object.format":"1","nodename":"riak@127.0.0.1","metadata_cache_size":"off","max_concurrent_requests":"50000","log.syslog.level":"info","log.syslog.ident":"riak","log.syslog.facility":"daemon","log.syslog":"off","log.error.redirect":"on","log.error.messages_per_second":"100","log.error.file":"$(platform_log_dir)/error.log","log.crash.size":"10MB","log.crash.rotation.keep":"5","log.crash.rotation":"$D0","log.crash.maximum_message_size":"64KB","log.crash.file":"$(platform_log_dir)/crash.log","log.crash":"on","log.console.level":"info","log.console.file":"$(platform_log_dir)/console.log","log.console":"file","listener.protobuf.internal":"127.0.0.1:8087","listener.http.internal":"127.0.0.1:8098","leveldb.write_buffer_size_min":"30MB","leveldb.write_buffer_size_max":"60MB","leveldb.verify_compaction":"on","leveldb.verify_checksums":"on","leveldb.tiered":"off","leveldb.threads":"71","leveldb.sync_on_write":"off","leveldb.maximum_memory.percent":"70","leveldb.limited_developer_mem":"off","leveldb.fadvise_willneed":"false","leveldb.data_root":"$(platform_data_dir)/leveldb","leveldb.compression":"on","leveldb.compaction.trigger.tombstone_count":"1000","leveldb.bloomfilter":"on","leveldb.block_cache_threshold":"32MB","leveldb.block.size_steps":"16","leveldb.block.size":"4KB","leveldb.block.restart_interval":"16","jmx.restart_check":"10m","jmx.refresh_rate":"30s","jmx.port":"41110","jmx":"off","javascript.reduce_pool_size":"6","javascript.maximum_stack_size":"16MB","javascript.maximum_heap_size":"8MB","javascript.map_pool_size":"8","javascript.hook_pool_size":"2","honor_cipher_order":"on","handoff.use_background_manager":"off","handoff.port":"8099","handoff.outbound":"on","handoff.max_rejects":"6","handoff.ip":"0.0.0.0","handoff.inbound":"on","erlang.smp":"enable","erlang.schedulers.force_wakeup_interval":"500","erlang.schedulers.compaction_of_load":"false","erlang.process_limit":"256000","erlang.max_ports":"65536","erlang.max_ets_tables":"256000","erlang.fullsweep_after":"0","erlang.distribution_buffer_size":"32MB","erlang.crash_dump":"/var/log/riak/erl_crash.dump","erlang.async_threads":"64","erlang.W":"w","erlang.K":"on","dtrace":"off","distributed_cookie":"riak","datatypes.compression_level":"1","check_crl":"on","buckets.default.w":"quorum","buckets.default.rw":"quorum","buckets.default.r":"quorum","buckets.default.pw":"0","buckets.default.pr":"0","buckets.default.notfound_ok":"true","buckets.default.n_val":"3","buckets.default.merge_strategy":"1","buckets.default.last_write_wins":"false","buckets.default.dw":"quorum","buckets.default.basic_quorum":"false","buckets.default.allow_mult":"false","bitcask.sync.strategy":"none","bitcask.open_timeout":"4s","bitcask.merge_check_jitter":"30%","bitcask.merge_check_interval":"3m","bitcask.merge.window.start":"0","bitcask.merge.window.end":"23","bitcask.merge.triggers.fragmentation":"60","bitcask.merge.triggers.dead_bytes":"512MB","bitcask.merge.thresholds.small_file":"10MB","bitcask.merge.thresholds.fragmentation":"40","bitcask.merge.thresholds.dead_bytes":"128MB","bitcask.merge.policy":"always","bitcask.max_merge_size":"100GB","bitcask.max_file_size":"2GB","bitcask.io_mode":"erlang","bitcask.hintfile_checksums":"strict","bitcask.fold.max_puts":"0","bitcask.fold.max_age":"unlimited","bitcask.expiry.grace_time":"0","bitcask.expiry":"off","bitcask.data_root":"$(platform_data_dir)/bitcask","background_manager":"off","anti_entropy.write_buffer_size":"4MB","anti_entropy.use_background_manager":"off","anti_entropy.trigger_interval":"15s","anti_entropy.tree.expiry":"1w","anti_entropy.tree.build_limit.per_timespan":"1h","anti_entropy.tree.build_limit.number":"1","anti_entropy.throttle":"on","anti_entropy.max_open_files":"20","anti_entropy.data_dir":"$(platform_data_dir)/anti_entropy","anti_entropy.concurrency_limit":"2","anti_entropy.bloomfilter":"on","anti_entropy":"active"},"advanced_config":["{riak_core,[{cluster_mgr,{\"0.0.0.0\",9080}}]}","{riak_repl,[{data_root,\"/var/lib/riak/riak_repl/\"},\n {max_fssource_cluster,5},\n {max_fssource_node,1},\n {max_fssink_node,1},\n {fullsync_on_connect,true},\n {fullsync_interval,30},\n {rtq_max_bytes,104857600},\n {proxy_get,disabled},\n {rt_heartbeat_interval,15},\n {rt_heartbeat_timeout,15},\n {fullsync_use_background_manager,true}]}"]},"links":{"self":"/explore/clusters/default/nodes/riak@127.0.0.1/config"}}
These results aren't consist. For example, the ping
and status
endpoints the results are part of an object's definition (if I'm using the JSON terminology right), with status
being a level nested. But, in the nodes
and config
endpoints (and many others), the results are in an array. It would seem it would make sense and make things easier for testing (and probably on the Explorer GUI side, too) if this were consistent. I could just use an extension of get_result_json_field/2
to pull out the particular field that I'm comparing instead of writing slightly similar code over and over.
Hey, re: the test at the bottom of riak_explorer.erl
- the test itself is kind of pointless as it is just verifying a hardcoded result that the function will always return... However, the fact that it verifies the same data as the integration test is irrelevant because this one is a unit test. There should also be comprehensive unit tests in each of the files to test the accuracy of code that can be executed without requiring integration with other pieces of the stack (like Riak or relying on a webserver running). So tl;dr, I'm fine with deleting the test, but not with the reason you're suggesting. The primary value that the integration tests provides is that it tests a much larger code path (client -> RiEx webmachine -> re_riak.erl -> riak database -> re_riak.erl -> webmachine -> client). The individual unit tests are meant to only test an individual function's or small group of functions' validity ( re_riak:ping() in this example).
re: inconsistency in endpoint results, I actually only see one endpoint that is inconsistent, which is the /explore/clusters/default/nodes/riak@127.0.0.1/config
endpoint. I think it should just be {"config": {"id": "riak@127.0.0.1", "key1": "value1", "key2": "value2", ...}}
.
The other ones make sense to me. For example, the ping
model is defined in this case by having:
ping.id
ping.message
The status
model is defined:
status.nodes -> array/list of node objects, each with an id attribute
status.nodes.0.id
status.nodes.1.id
status.valid
status.leaving
status.exiting
status.joining
The nodes
endpoint is also an array/list of nodes
, but the context is different. Even though the context differs and the attributes will be different, the id
attribute should remain the same, so we end up with the node
minimum model like this:
node.id
And the nodes
endpoint giving this type of object
nodes.0.id
nodes.1.id
...
All that being said, I can think of a couple helper functions that might make things a bit easier for you despite the same data being in different parts of the data structure depending on the model, for instance:
get_value(Payload, Attributes) ->
get_inner_value(mochijson2:decode(Payload), Attributes).
get_inner_value(Value, []) ->
Value;
get_inner_value({struct, Value}, Attributes) ->
get_inner_value(Value, Attributes);
get_inner_value(Value, [Key|Rest]) when is_number(Key) ->
get_inner_value(lists:nth(Key, Value), Rest);
get_inner_value(Value, [Key|Rest]) when is_atom(Key) ->
get_inner_value(Value, [list_to_binary(atom_to_list(Key))|Rest]);
get_inner_value(Value, [Key|Rest]) ->
get_inner_value(proplists:get_value(Key, Value), Rest).
Would allow you to do the following types of tests:
test1() ->
Payload = "{\"ping\":{\"id\":\"ping\",\"message\":\"pong\"},\"links\":{\"self\":\"/explore/ping\"}}",
<<"pong">> = get_value(Payload, [ping, message]).
test2() ->
Payload = "{\"status\":{\"nodes\":[{\"id\":\"riak@127.0.0.1\",\"status\":\"valid\",\"ring_percentage\":100.0,\"pending_percentage\":null}],\"valid\":1,\"leaving\":0,\"exiting\":0,\"joining\":0,\"down\":0},\"links\":{\"self\":\"/control/nodes/riak@127.0.0.1/status\"}}",
<<"riak@127.0.0.1">> = get_value(Payload, [status, nodes, 1, id]).
test3() ->
Payload = "{\"nodes\":[{\"id\":\"riak@127.0.0.1\"}],\"links\":{\"self\":\"/explore/clusters/default/nodes\"}}",
<<"riak@127.0.0.1">> = get_value(Payload, [nodes, 1, id]).
test4() ->
Payload = "{\"config\":{\"config\":{\"vnode_management_timer\":\"10s\",\"transfer_limit\":\"2\",\"tls_protocols.tlsv1.2\":\"on\",\"tls_protocols.tlsv1.1\":\"off\"}}}",
<<"10s">> = get_value(Payload, [config, config, vnode_management_timer]).
New tests should verify return codes and expected content / headers for each of the
re_wm_*
resources.