Trax-air / swagger-stub

Generate a stub from a swagger file
MIT License
22 stars 6 forks source link

AttributeError: 'generator' object has no attribute 'append' #12

Open shelomi123 opened 3 years ago

shelomi123 commented 3 years ago

This my code and when I test this through vs code gives this error. no tests are discovered.

import os import aimlflow from aimlflow import AiService import random, string import pandas as pd import pytest import json import logging from pytest_cases import fixture, parametrize_with_cases import allure

exec_log = logging.getLogger('ML_workflow') logs_dir = 'test_data/logs' aiservice = AiService.AiService() filePath = 'test_data/' #filepath = 'Client/python/test_data/' when running from vscode. filepath = 'test_data/' when running from terminal.

@fixture(autouse=True) def configure_logging(request, caplog): log_file = filePath + 'log' request.config.pluginmanager.get_plugin("logging-plugin").set_log_path(log_file) caplog.set_level(logging.INFO)

def randomword(length): letters = string.ascii_lowercase return ''.join(random.choice(letters) for i in range(length))

def login():

username and password to login to aiclub

os.environ['username'] = ''
os.environ['password'] = ''
response = aiservice.auth()
assert response == True
return response

def create_ai_service(): ai_service_name = randomword(5) response = aiservice.createService(ai_service_name) assert len(response) > 0 return response

def delete_ai_service(): response = aiservice.deleteService() assert response == 'success' return response

def load_test_params(): json_path = filePath + '/tests/' json_list = [f for f in os.listdir(json_path) if f.endswith('.json')]

for i in json_list:
    with open(json_path+ i) as json_file:
        data = json.load(json_file)
        test_params = data
        return test_params

TEST_PARAMS = load_test_params()

@pytest.mark.parametrize("test_name,test_params", TEST_PARAMS.items(), ids=list(TEST_PARAMS.keys())) def test_workflow(caplog,test_name,test_params):

response = login()

ai_service = create_ai_service()

assert response == True
assert len(ai_service) > 0
problem_type = ''
exec_log.info("testing started for " + test_name)
for stage in test_params:
    for test_key, test_val in stage.items():
        if(test_key == 'dataImport'):
            test_upload_and_import_data(test_val['input'], test_val['expectedOutput'])
        elif(test_key == 'featureEngineering'):
            problem_type = test_feature_eng(test_val['input'], test_val['expectedOutput'])
        elif(test_key == 'training'):
            test_training(test_val['input'], test_val['expectedOutput'], problem_type)
        elif(test_key == "askAI"):
            test_get_prediction(test_val['input'], test_val['expectedOutput'])
        elif(test_key== "askAI_Range_Query"):
            test_prediction_range_query(test_val['input'], test_val['expectedOutput'])

delete_ai_service()

@allure.title("test upload and import data") @allure.step def test_upload_and_import_data(input_params, output_params): exec_log.info("Testing started for uploading and importing data")

import local file

test_file_name = input_params['test_filename']
file_with_path = filePath + test_file_name

df = pd.read_csv(file_with_path)
assert len(df.index.values) > 0
json_str = df.to_json()
#upload local file and import dataset to the service
response = aiservice.uploadJSONData(jsonData=json_str, fileName=file_with_path, cloud='AWS')
assert  response['result'] == output_params['result']

@allure.title("test feature engineering") @allure.step def test_feature_eng(input_params, output_params): exec_log.info("Testing for feature engineering started") response = aiservice.launchFE(input_params) assert response['status'] == output_params['result']

response = aiservice.getFE_Report()
report = response[aiservice._fe_id]['result']
json_report = json.loads(report)
problem_type = json_report['identifyProblem']['Entries'][0]['Summary']
#assert if the user specified problem type is different from the one selected by Navigator
if(input_params['problemType'] != 'auto'):
    assert input_params['problemType'] in problem_type

return problem_type

@allure.step def test_training(input_params, output_params, problem_type): exec_log.info("Testing started for training")

if(input_params['launchMode']=='semi-automatic'): 

    train_params = {
        'engine': input_params['engine'] ,
    }
    response = aiservice.fetchTrainParams(train_params)
    hyper_params = []
    for algo, algo_params in response.items():
        # hyper_params.append(algo_params)
        if(algo in input_params['algorithm'].keys()):
            hyper_params_value_list = algo_params['HyperParameters']
            for key,value in hyper_params_value_list.items():
                if(key in input_params['algorithm'][algo].keys()):
                    new_value = input_params['algorithm'][algo][key]
                    hyper_params_value_list[key] = new_value
                    #hyperParamVal['value'] = input_params['HyperParameters'][hyper_param_name]
            hyper_params.append(algo_params)

    train_params['params'] = hyper_params
    train_params['launchMode'] = 'semi-automatic'
    train_params['waitTime'] = 60
    response = aiservice.launchTrain(train_params)
    assert response['status'] == output_params['result']
elif(input_params['launchMode'] == 'automatic'):
    response = aiservice.launchTrain(input_params)
    assert response['status'] == output_params['result']
    #test these as nested steps, so we can get an organized report
    test_confusion_matrix(response,problem_type, output_params)
    test_hyper_parameters(response, input_params) # engine: aws-sklearn-serverless/aws-sagemaker

@allure.step def test_confusion_matrix(response, problem_type, output_params):

for item in response['result'].items():
    assert item[1]['metricName'] == output_params['metric']
    #check for confusion matrix"
    if('classifier' in problem_type):
        exec_log.info(item[0])
        assert item[1]['confusionMatrix'] != None
        confusion_matrix = item[1]['confusionMatrix']['matrix']
        assert len(confusion_matrix) > 0
        exec_log.info('confusion matrix')
    if('regressor' in problem_type):
        exec_log.info(item[0])
        assert item[1]['value'] != None

@allure.step
def test_hyper_parameters(response, input_params):

if(input_params['launchMode'] == 'semi-automatic'):
    for hyperParam in response['hyper_params'].items():
        exec_log.info("Hyper params:")
        exec_log.info(hyperParam[0])
        assert input_params['algorithm'] in hyperParam[0]
        hyper_param_list = hyperParam[1]
        input_hyper_params = input_params['HyperParameters']
        for input_hyper_param in input_hyper_params.items:
            if(input_hyper_param.key in hyper_param_list.keys()):
                assert input_hyper_param.value == hyper_param_list[input_hyper_param.key]
else:
    for hyperParam in response['hyper_params'].items():
        exec_log.info("Hyper params:")
        exec_log.info(hyperParam[0])
        hyper_param_list = hyperParam[1]
        exec_log.info(hyper_param_list)
        assert len(hyper_param_list) > 0

@allure.step def test_get_prediction(input_params, output_params): exec_log.info("Testing started for prediction") response = aiservice.get_prediction(input_params) assert response['predicted_label'] != None test_insight(response)

def test_prediction_range_query(input_params, output_params): exec_log.info("Testing started for range based prediction") response = aiservice.get_prediction(input_params) assert response['xAxisLabel'] == output_params['xAxisLabel'] assert response['yAxisLabel'] == output_params['yAxisLabel'] assert len(response['xAxis']) == output_params['xAxisSize'] assert len(response['yAxis']) == output_params['yAxisSize']

test whther we have a matrix with correct dimensions

matrixList = response['matrix']
assert len(matrixList) == output_params['yAxisSize']
for item in matrixList:
    assert len(item) == output_params['xAxisSize']

@allure.step def test_insight(response):

if('featureImportance' in response['Insight']):
    data_table = response['Insight']['featureImportance']['data']
    assert len(data_table) > 0
elif(response['Insight'] == 'nearestNeighbors'):
    data_dict = response['Insight']['nearestNeighbors']
    assert len(data_dict) > 0
elif('modelCoefficients' in response['Insight']  ):
    data_table = response['Insight']['modelCoefficients ']['data']
    assert len(data_table) > 0
shelomi123 commented 3 years ago

AttributeError: 'generator' object has no attribute 'next' #8

@cyprieng