Open prasanmgc opened 1 month ago
Hi! Can you include a snippet of your hooks / envionment.py
please? Or if you can provide a minimum reproducable example repo, that would be better. Thanks!
@iamkenos Please find the environment.py as requested.
"""
Python file containing Behave Hooks
"""
import os
import numpy as np
import yaml
import time
import glob
from behavex_images import image_attachments
from behavex_images.image_attachments import AttachmentsCondition
from features.models.utils.constants import *
from features.models.utils.driverutils import SeleniumDriverFactory
from features.models.utils import loggerutils
from time import strftime
from features.models.utils.actions import PageAction
def before_all(context) -> None:
context.configs = None
with open(os.path.join(os.getcwd(), config_file), 'r') as yaml_file:
context.configs = yaml.safe_load(yaml_file)
loggerutils.setup_logging()
loggerutils.setup_logging_context(context)
context.browser = context.configs['env']['browser']
context.headless = context.configs['env']['headless']
context.element_fetch_timeout = context.configs['env']['element_fetch_timeout']
context.application_url = context.configs['env']['application_url']
context.username = context.configs['env']['username']
context.password = os.environ.get('pwd')
context.passed_scenarios = []
context.failed_scenarios = []
context.skipped_scenarios = []
def before_feature(context, feature) -> None:
context.logger.info('-' * 93)
context.logger.info('STARTED EXECUTION OF FEATURE: ' + str(feature.name))
context.logger.info('Tags: ' + str([str(item) for item in feature.tags]))
context.logger.info('Filename: ' + str(feature.filename))
context.logger.info('Line: ' + str(feature.line))
context.logger.info('-' * 93)
now = time.time()
retention_days = int(context.configs['env']['number_of_days_to_keep_log_files'])
retention_time = now - retention_days * 86400
def delete_old_files(directory):
if os.path.exists(directory):
try:
for file in os.listdir(directory):
file_path = os.path.join(directory, file)
if os.path.isfile(file_path) and os.stat(file_path).st_mtime < retention_time \
and os.stat(file_path).st_mtime < now - 300:
os.remove(file_path)
context.logger.info(f'Deleted old files in {directory}')
except Exception as ex:
context.logger.error(f'Unable to delete files in {directory}! Error: {ex}')
delete_old_files(os.path.normpath(os.path.join(os.getcwd(), log_dir)))
try:
xml_reports = glob.glob(os.path.join(os.getcwd(), 'reports', '*.xml'))
map(os.remove, xml_reports)
context.logger.info('Deleted old XML report files')
except Exception as e:
context.logger.error(f'Unable to delete XML report files! Error: {e}')
context.logger.info('=' * 93)
context.logger.info('TESTING STARTED AT : ' + strftime('%Y-%m-%d %H:%M:%S'))
context.logger.info('=' * 93)
# Get the appropriate driver for the browser specified in config file
driver_factory = SeleniumDriverFactory(context)
context.driver = driver_factory.get_driver()
image_attachments.set_attachments_condition(context, AttachmentsCondition.ALWAYS)
# Set driver implicit timeout. Web driver will keep polling for the element for the specified timeout
# period.
timeout = context.configs['env']['implicit_timeout']
context.driver.implicitly_wait(timeout)
context.logger.info("Driver implicit timeout is set to '" + str(timeout) + "' seconds")
# Resetting application state before executing the next feature
reset_application_state(context)
def reset_application_state(context) -> None:
context.driver.get(context.application_url) # Navigate to the application URL
# context.driver.delete_all_cookies() # Clear cookies to reset session
# context.driver.refresh() # Refresh the page to ensure a clean state
def before_scenario(context, scenario) -> None:
context.logger.info('STARTED EXECUTION OF SCENARIO: ' + str(scenario.name))
context.logger.info('Tags: ' + str([str(item) for item in scenario.tags]))
context.logger.info('Filename: ' + str(scenario.filename))
context.logger.info('Line: ' + str(scenario.line))
context.logger.info('-' * 93)
def before_step(context, step) -> None:
# Log the current step's progress
context.logger.info(f'{step.name} : in Progress, Line: {step.line}')
context.scenario_name = context.scenario.name.split('--')[0].strip()
context.name = context.scenario_name
def after_step(context, step) -> None:
if step.status == 'failed':
context.logger.error(f'{step.name} : FAILED')
else:
context.logger.info(f'{step.name} : PASSED')
if step.status == 'failed':
try:
page_actions = PageAction(context)
if page_actions.wait_for_browser_to_load_completely():
page_actions.switch_to_default_content()
screenshot_file = os.path.normpath(os.path.join(
context.evidence_path,
f'{context.scenario_name}_{step.name[0:15]}_{get_current_date_timestamp()}.png'))
context.driver.save_screenshot(screenshot_file)
image_attachments.attach_image_file(context, screenshot_file, 'Screenshot')
context.logger.info(f"Screenshot captured in '{screenshot_file}'")
except Exception as e:
context.logger.error(f'Unable to take screenshot! Error: {e}', exc_info=True)
def after_scenario(context, scenario) -> None:
context.logger.info('FINISHED EXECUTION OF SCENARIO: ' + str(scenario.name))
if scenario.status == 'failed':
context.logger.error(f'{scenario.name} : FAILED')
else:
context.logger.info(f'{scenario.name} : PASSED')
context.logger.info('-' * 93)
def after_feature(context, feature) -> None:
context.logger.info('FINISHED EXECUTION OF FEATURE: ' + str(feature.name))
if feature.status == 'failed':
context.logger.error(f'{feature.name} : FAILED')
else:
context.logger.info(f'{feature.name} : PASSED')
if context.driver is not None:
try:
context.driver.close()
except Exception as e:
context.logger.error('Unable to close browser window! Error: %s' % e, exc_info=True)
try:
context.driver.quit()
except Exception as e:
context.logger.error('Unable to quit driver! Error: %s' % e, exc_info=True)
def after_all(context) -> None:
context.logger.info('=' * 93)
context.logger.info('TESTING FINISHED AT : ' + strftime('%Y-%m-%d %H:%M:%S'))
context.logger.info('=' * 93)
Hello @prasanmgc
I did get the same issue but after upgrading to latest version behavex-images , it was resolved. please refer (https://github.com/hrcorval/behavex/issues/166)
Small doubt is it capturing the images that are failed and is it shown in HTML report..?
@Yaswanthbobbu Yes, it is capturing screenshots and I could see it in the report. In my case, screenshots captured for both pass and failed. That's the framework requirement.
I did go through the comments on your issue. It looks like you are mixing 2 different things (Allure & behavex). You don't need Allure, as behavex can suffice your needs on reports. Simply use behavex for your framework. Behavex currently doesn't support Allure out of the box. There are couple of issues and discussions raised by others. Please go through that.
Let me know, if you need any help.
@prasanmgc
Thank you below is the method im using but im unable to see the screenshots in html. would you be able to help me out (can discuss in the #166 chat)
def after_step(context, step): screenshot_name = getattr(step, 'name', 'Unnamed Step') screenshot_file = os.path.normpath(os.path.join(context.evidence_path, f'{screenshot_name}.png')) context.driver.save_screenshot(screenshot_file) image_attachments.attach_image_file(context, screenshot_file, 'Screenshot')
@Yaswanthbobbu Have you given the below line in before hook?
image_attachments.set_attachments_condition(context, AttachmentsCondition.ALWAYS)
In your case, you can change it to the below.
image_attachments.set_attachments_condition(context, AttachmentsCondition.ONLY_ON_FAILURE)
Mee too facing same issue with behavex==3.0.7 (latest version). Looks like when we run test scenarios in parallel, we see concurrent issue accessing outputs\logs\image_attachments_utils directory which is created by default when execution starts even before executing "before_all". How to we control this. Solution would be need to append process_id with _outputs\logs\image_attachmentsutils{processid} (something like this, but not sure how to control this). Can we do it using any behavex config file? Any suggestions or help will be appreciated
Hi @prasanmgc , I just wanted to ask you to check with latest behavex-images version, as it fixes a critical compatibility issue with numpy. I believe the reported issue should be fixed with behavex-images 3.0.8. Please, keep us posted. THanks!
Describe the bug When running scenarios in parallel, I am getting the below error from behavex.
ERROR:root:Unexpected error in 'before_all (behavex-images)' function: ERROR:root:EOF when reading a line
Another program is using the files in path Z:\Prasanna\Repos\pyselenium_poc\reports\outputs\logs\image_attachments_utils. Please, close the program and press 'c' to continue or 'a' for abort. Retry number 2
.[WinError 183] Cannot create a file when that file already exists: 'Z:\Prasanna\Repos\pyselenium_poc\reports\outputs\logs\image_attachments_utils'
To Reproduce Steps to reproduce the behavior:
Expected behavior Error shouldn't appear, as the first process that is started will create the relevant directory for results. Subsequent processes should check if directory exists and proceed further.
Screenshots NA
Desktop (please complete the following information):
Additional context Directory creation for report generation by behavex should be enhanced for parallel execution. The above error doesn't have any impact on the execution though it changes the console logs color to red.