response = login()
ai_service = create_ai_service()
assert response == True
assert len(ai_service) > 0
problem_type = ''
exec_log.info("testing started for " + test_name)
for stage in test_params:
for test_key, test_val in stage.items():
if(test_key == 'dataImport'):
test_upload_and_import_data(test_val['input'], test_val['expectedOutput'])
elif(test_key == 'featureEngineering'):
problem_type = test_feature_eng(test_val['input'], test_val['expectedOutput'])
elif(test_key == 'training'):
test_training(test_val['input'], test_val['expectedOutput'], problem_type)
elif(test_key == "askAI"):
test_get_prediction(test_val['input'], test_val['expectedOutput'])
elif(test_key== "askAI_Range_Query"):
test_prediction_range_query(test_val['input'], test_val['expectedOutput'])
delete_ai_service()
@allure.title("test upload and import data")
@allure.step
def test_upload_and_import_data(input_params, output_params):
exec_log.info("Testing started for uploading and importing data")
import local file
test_file_name = input_params['test_filename']
file_with_path = filePath + test_file_name
df = pd.read_csv(file_with_path)
assert len(df.index.values) > 0
json_str = df.to_json()
#upload local file and import dataset to the service
response = aiservice.uploadJSONData(jsonData=json_str, fileName=file_with_path, cloud='AWS')
assert response['result'] == output_params['result']
response = aiservice.getFE_Report()
report = response[aiservice._fe_id]['result']
json_report = json.loads(report)
problem_type = json_report['identifyProblem']['Entries'][0]['Summary']
#assert if the user specified problem type is different from the one selected by Navigator
if(input_params['problemType'] != 'auto'):
assert input_params['problemType'] in problem_type
return problem_type
@allure.step
def test_training(input_params, output_params, problem_type):
exec_log.info("Testing started for training")
if(input_params['launchMode']=='semi-automatic'):
train_params = {
'engine': input_params['engine'] ,
}
response = aiservice.fetchTrainParams(train_params)
hyper_params = []
for algo, algo_params in response.items():
# hyper_params.append(algo_params)
if(algo in input_params['algorithm'].keys()):
hyper_params_value_list = algo_params['HyperParameters']
for key,value in hyper_params_value_list.items():
if(key in input_params['algorithm'][algo].keys()):
new_value = input_params['algorithm'][algo][key]
hyper_params_value_list[key] = new_value
#hyperParamVal['value'] = input_params['HyperParameters'][hyper_param_name]
hyper_params.append(algo_params)
train_params['params'] = hyper_params
train_params['launchMode'] = 'semi-automatic'
train_params['waitTime'] = 60
response = aiservice.launchTrain(train_params)
assert response['status'] == output_params['result']
elif(input_params['launchMode'] == 'automatic'):
response = aiservice.launchTrain(input_params)
assert response['status'] == output_params['result']
#test these as nested steps, so we can get an organized report
test_confusion_matrix(response,problem_type, output_params)
test_hyper_parameters(response, input_params) # engine: aws-sklearn-serverless/aws-sagemaker
This my code and when I test this through vs code gives this error. no tests are discovered.
import os import aimlflow from aimlflow import AiService import random, string import pandas as pd import pytest import json import logging from pytest_cases import fixture, parametrize_with_cases import allure
exec_log = logging.getLogger('ML_workflow') logs_dir = 'test_data/logs' aiservice = AiService.AiService() filePath = 'test_data/' #filepath = 'Client/python/test_data/' when running from vscode. filepath = 'test_data/' when running from terminal.
@fixture(autouse=True) def configure_logging(request, caplog): log_file = filePath + 'log' request.config.pluginmanager.get_plugin("logging-plugin").set_log_path(log_file) caplog.set_level(logging.INFO)
def randomword(length): letters = string.ascii_lowercase return ''.join(random.choice(letters) for i in range(length))
def login():
username and password to login to aiclub
def create_ai_service(): ai_service_name = randomword(5) response = aiservice.createService(ai_service_name) assert len(response) > 0 return response
def delete_ai_service(): response = aiservice.deleteService() assert response == 'success' return response
def load_test_params(): json_path = filePath + '/tests/' json_list = [f for f in os.listdir(json_path) if f.endswith('.json')]
TEST_PARAMS = load_test_params()
@pytest.mark.parametrize("test_name,test_params", TEST_PARAMS.items(), ids=list(TEST_PARAMS.keys())) def test_workflow(caplog,test_name,test_params):
@allure.title("test upload and import data") @allure.step def test_upload_and_import_data(input_params, output_params): exec_log.info("Testing started for uploading and importing data")
import local file
@allure.title("test feature engineering") @allure.step def test_feature_eng(input_params, output_params): exec_log.info("Testing for feature engineering started") response = aiservice.launchFE(input_params) assert response['status'] == output_params['result']
@allure.step def test_training(input_params, output_params, problem_type): exec_log.info("Testing started for training")
@allure.step def test_confusion_matrix(response, problem_type, output_params):
@allure.step
def test_hyper_parameters(response, input_params):
@allure.step def test_get_prediction(input_params, output_params): exec_log.info("Testing started for prediction") response = aiservice.get_prediction(input_params) assert response['predicted_label'] != None test_insight(response)
def test_prediction_range_query(input_params, output_params): exec_log.info("Testing started for range based prediction") response = aiservice.get_prediction(input_params) assert response['xAxisLabel'] == output_params['xAxisLabel'] assert response['yAxisLabel'] == output_params['yAxisLabel'] assert len(response['xAxis']) == output_params['xAxisSize'] assert len(response['yAxis']) == output_params['yAxisSize']
test whther we have a matrix with correct dimensions
@allure.step def test_insight(response):