Closed matthewjhayhurst closed 3 years ago
This is my wdio.conf.js:
const config = require('./Lib/config') const moment = require('moment'); import { ReportAggregator, HtmlReporter} from '@rpii/wdio-html-reporter' ;
let chromeOptions = {};
if(~process.argv.indexOf('--headless')) { chromeOptions.args = ['--headless', '--disable-gpu'] }
exports.config = {
//
// ====================
// Runner Configuration
// ====================
//
// WebdriverIO allows it to run your tests in arbitrary locations (e.g. locally or
// on a remote machine).
runner: 'local',
//
// =====================
// Server Configurations
// =====================
// Host address of the running Selenium server. This information is usually obsolete as
// WebdriverIO automatically connects to localhost. Also, if you are using one of the
// supported cloud services like Sauce Labs, Browserstack, or Testing Bot you don't
// need to define host and port information because WebdriverIO can figure that out
// according to your user and key information. However, if you are using a private Selenium
// backend you should define the host address, port, and path here.
//
hostname: 'hub-cloud.browserstack.com',
port: 80,
protocol: 'http',
//
// =================
// Service Providers
// =================
// WebdriverIO supports Sauce Labs, Browserstack, and Testing Bot (other cloud providers
// should work too though). These services define specific user and key (or access key)
// values you need to put in here in order to connect to these services.
//
user: config.BROWSERSTACK_USER,
key: config.BROWSERSTACK_KEY,
//
// If you run your tests on Sauce Labs you can specify the region you want to run your tests
// in via the region
property. Available short handles for regions are us
(default) and eu
.
// These regions are used for the Sauce Labs VM cloud and the Sauce Labs Real Device Cloud.
// If you don't provide the region it will default for the us
//
// ==================
// Specify Test Files
// ==================
// Define which test specs should run. The pattern is relative to the directory
// from which `wdio` was called. Notice that, if you are calling `wdio` from an
// NPM script (see https://docs.npmjs.com/cli/run-script) then the current working
// directory is where your package.json resides, so `wdio` will be called from there.
//
specs: [
],
// Patterns to exclude.
exclude: [
'./Tests/DEV_tests/Services/devChangeEmailErrors.js',
'./Tests/DEV_tests/Services/devChangeEmailFlow.js',
'./Tests/DEV_tests/Services/devCreateAccount.js',
'./Tests/DEV_tests/Services/devCreateAccountErrors.js',
'./Tests/DEV_tests/Services/devInviteUser.js',
'./Tests/DEV_tests/Services/devForgottenPasswordErrors.js',
'./Tests/DEV_tests/Services/devForgottenPasswordFlow.js',
],
//
// ============
// Capabilities
// ============
// Define your capabilities here. WebdriverIO can run multiple capabilities at the same
// time. Depending on the number of capabilities, WebdriverIO launches several test
// sessions. Within your capabilities you can overwrite the spec and exclude options in
// order to group specific specs to a specific capability.
//
// First, you can define how many instances should be started at the same time. Let's
// say you have 3 different capabilities (Chrome, Firefox, and Safari) and you have
// set maxInstances to 1; wdio will spawn 3 processes. Therefore, if you have 10 spec
// files and you set maxInstances to 10, all spec files will get tested at the same time
// and 30 processes will get spawned. The property handles how many capabilities
// from the same test should run tests.
//
maxInstances: 8,
//
// If you have trouble getting all important capabilities together, check out the
// Sauce Labs platform configurator - a great tool to configure your capabilities:
// https://docs.saucelabs.com/reference/platforms-configurator
//
capabilities: [{
// "os" : "OS X",
// "os_version" : "Catalina",
// "browserName" : "Firefox",
// "browser_version" : "79.0 beta",
// 'resolution': '1024x768',
// 'buildName': Date.now().toString(),
// maxInstances can get overwritten per capability. So if you have an in-house Selenium
// grid with only 5 firefox instances available you can make sure that not more than
// 5 instances get started at a time.
maxInstances: config.maxInstance,
//
browserName: 'chrome',
'goog:chromeOptions': chromeOptions,
// If outputDir is provided WebdriverIO can capture driver session logs
// it is possible to configure which logTypes to include/exclude.
// excludeDriverLogs: ['*'], // pass '*' to exclude all driver session logs
// excludeDriverLogs: ['bugreport', 'server'],
}],
//
// ===================
// Test Configurations
// ===================
// Define all options that are relevant for the WebdriverIO instance here
//
// Level of logging verbosity: trace | debug | info | warn | error | silent
logLevel: config.logLevel,
//
// Set specific log levels per logger
// loggers:
// - webdriver, webdriverio
// - @wdio/applitools-service, @wdio/browserstack-service, @wdio/devtools-service, @wdio/sauce-service
// - @wdio/mocha-framework, @wdio/jasmine-framework
// - @wdio/local-runner, @wdio/lambda-runner
// - @wdio/sumologic-reporter
// - @wdio/cli, @wdio/config, @wdio/sync, @wdio/utils
// Level of logging verbosity: trace | debug | info | warn | error | silent
// logLevels: {
// webdriver: 'info',
// '@wdio/applitools-service': 'info'
// },
//
// If you only want to run your tests until a specific amount of tests have failed use
// bail (default is 0 - don't bail, run all tests).
bail: config.bail,
//
// Set a base URL in order to shorten url command calls. If your `url` parameter starts
// with `/`, the base url gets prepended, not including the path portion of your baseUrl.
// If your `url` parameter starts without a scheme or `/` (like `some/path`), the base url
// gets prepended directly.
baseUrl: 'http://localhost',
//
// Default timeout for all waitFor* commands.
waitforTimeout: 15000,
//
// Default timeout in milliseconds for request
// if browser driver or grid doesn't send response
connectionRetryTimeout: 120000,
//
// Default request retries count
connectionRetryCount: 3,
//
// Test runner services
// Services take over a specific job you don't want to take care of. They enhance
// your test setup with almost no effort. Unlike plugins, they don't add new
// commands. Instead, they hook themselves up into the test process.
services: [ 'chromedriver'
// ['browserstack', {
// browserstackLocal: true
// }]
],
// Framework you want to run your specs with.
// The following are supported: Mocha, Jasmine, and Cucumber
// see also: https://webdriver.io/docs/frameworks.html
//
// Make sure you have the wdio adapter package for the specific framework installed
// before running any tests.
framework: 'mocha',
//
// The number of times to retry the entire specfile when it fails as a whole
specFileRetries: 1,
//
// Whether or not retried specfiles should be retried immediately or deferred to the end of the queue
// specFileRetriesDeferred: false,
//
// Test reporter for stdout.
// The only one supported by default is 'dot'
// see also: https://webdriver.io/docs/dot-reporter.html
reporters: ['spec',
[HtmlReporter, {
debug: true,
outputDir: './reports/html-reports/',
filename: 'report.html',
reportTitle: 'Report for Individual Test',
//to show the report in a browser when done
showInBrowser: false,
//to turn on screenshots after every test
useOnAfterCommandForScreenshot: false,
// to use the template override option, can point to your own file in the test project:
// templateFilename: path.resolve(__dirname, '../template/wdio-html-reporter-alt-template.hbs'),
// to add custom template functions for your custom template:
// templateFuncs: {
// addOne: (v) => {
// return v+1;
// },
// },
}
]
],
//debug: true,
//execArgv: ['--inspect-brk=127.0.0.1:5859'],
//
// Options to be passed to Mocha.
// See the full list at http://mochajs.org/
mochaOpts: {
// Babel setup
require: ['@babel/register'],
ui: 'bdd',
timeout: 60000,
bail: true
},
//
// =====
// Hooks
// =====
// WebdriverIO provides several hooks you can use to interfere with the test process in order to enhance
// it and to build services around it. You can either apply a single function or an array of
// methods to it. If one of them returns with a promise, WebdriverIO will wait until that promise got
// resolved to continue.
/**
* Gets executed once before all workers get launched.
* @param {Object} config wdio configuration object
* @param {Array.<Object>} capabilities list of capabilities details
*/
onPrepare: function (config, capabilities) {
const timestamp = moment().format('DDMMYYYY_HHmm');
let reportAggregator = new ReportAggregator({
outputDir: './reports/html-reports/',
filename: 'Test_Automation_Report_'+timestamp+'.html',
reportTitle: 'Test Automation Report_'+timestamp,
browserName : 'Chrome',
// to use the template override option, can point to your own file in the test project:
// templateFilename: path.resolve(__dirname, '../template/wdio-html-reporter-alt-template.hbs')
});
reportAggregator.clean() ;
global.reportAggregator = reportAggregator;
},
/**
* Gets executed before a worker process is spawned and can be used to initialise specific service
* for that worker as well as modify runtime environments in an async fashion.
* @param {String} cid capability id (e.g 0-0)
* @param {[type]} caps object containing capabilities for session that will be spawn in the worker
* @param {[type]} specs specs to be run in the worker process
* @param {[type]} args object that will be merged with the main configuration once worker is initialised
* @param {[type]} execArgv list of string arguments passed to the worker process
*/
// onWorkerStart: function (cid, caps, specs, args, execArgv) {
// },
/**
* Gets executed just before initialising the webdriver session and test framework. It allows you
* to manipulate configurations depending on the capability or spec.
* @param {Object} config wdio configuration object
* @param {Array.<Object>} capabilities list of capabilities details
* @param {Array.<String>} specs List of spec file paths that are to be run
*/
// beforeSession: function (config, capabilities, specs) {
// },
/**
* Gets executed before test execution begins. At this point you can access to all global
* variables like `browser`. It is the perfect place to define custom commands.
* @param {Array.<Object>} capabilities list of capabilities details
* @param {Array.<String>} specs List of spec file paths that are to be run
*/
// before: function (capabilities, specs) {
// },
/**
* Runs before a WebdriverIO command gets executed.
* @param {String} commandName hook command name
* @param {Array} args arguments that command would receive
*/
// beforeCommand: function (commandName, args) {
// },
/**
* Hook that gets executed before the suite starts
* @param {Object} suite suite details
*/
// beforeSuite: function (suite) {
// },
/**
* Function to be executed before a test (in Mocha/Jasmine) starts.
*/
// beforeTest: function (test, context) {
// },
/**
* Hook that gets executed _before_ a hook within the suite starts (e.g. runs before calling
* beforeEach in Mocha)
*/
// beforeHook: function (test, context) {
// },
/**
* Hook that gets executed _after_ a hook within the suite starts (e.g. runs after calling
* afterEach in Mocha)
*/
// afterHook: function (test, context, { error, result, duration, passed, retries }) {
// },
/**
* Function to be executed after a test (in Mocha/Jasmine).
*/
afterTest: function (test, context, { passed }) {
const path = require('path');
if(!passed){
const timestamp = moment().format('DDMMYYYY_HHmm');
let filepathArray = (test.file).split('\\');
let filename = filepathArray[filepathArray.length - 1].split('.js')[0];
let screenshotName = filename + "_" + test.title + "_FAILED_"+timestamp;
const filepath = path.join('reports/html-reports/screenshots/', screenshotName + '.png');
browser.saveScreenshot(filepath);
process.emit('test:screenshot', filepath);
}
},
/**
* Hook that gets executed after the suite has ended
* @param {Object} suite suite details
*/
/**
* Runs after a WebdriverIO command gets executed
* @param {String} commandName hook command name
* @param {Array} args arguments that command would receive
* @param {Number} result 0 - command success, 1 - command error
* @param {Object} error error object if any
*/
// afterCommand: function (commandName, args, result, error) {
// },
/**
* Gets executed after all tests are done. You still have access to all global variables from
* the test.
* @param {Number} result 0 - test pass, 1 - test fail
* @param {Array.<Object>} capabilities list of capabilities details
* @param {Array.<String>} specs List of spec file paths that ran
*/
// after: function (result, capabilities, specs) {
// },
/**
* Gets executed right after terminating the webdriver session.
* @param {Object} config wdio configuration object
* @param {Array.<Object>} capabilities list of capabilities details
* @param {Array.<String>} specs List of spec file paths that ran
*/
// afterSession: function (config, capabilities, specs) {
// },
/**
* Gets executed after all workers got shut down and the process is about to exit. An error
* thrown in the onComplete hook will result in the test run failing.
* @param {Object} exitCode 0 - success, 1 - fail
* @param {Object} config wdio configuration object
* @param {Array.<Object>} capabilities list of capabilities details
* @param {<Object>} results object containing test results
*/
onComplete: function(exitCode, config, capabilities, results) {
(async () => {
await global.reportAggregator.createReport();
})();
},
/**
* Gets executed when a refresh happens.
* @param {String} oldSessionId session ID of the old session
* @param {String} newSessionId session ID of the new session
*/
//onReload: function(oldSessionId, newSessionId) {
//}
}
Sorry I don't think this issue is being caused by wdio-html-reporter...closing
Hi, HtmlReporter/ReportAggregator work fine when I run the tests one at a time.
However, when I run in parallel I get the following error:
Error [ERR_STREAM_WRITE_AFTER_END]: write after end at writeAfterEnd (_stream_writable.js:266:14) at RunnerStream.Writable.write (_stream_writable.js:315:5) at RunnerTransformStream.ondata (_stream_readable.js:719:22) at RunnerTransformStream.emit (events.js:314:20) at RunnerTransformStream.EventEmitter.emit (domain.js:483:12) at addChunk (_stream_readable.js:298:12) at readableAddChunk (_stream_readable.js:273:9) at RunnerTransformStream.Readable.push (_stream_readable.js:214:10) at RunnerTransformStream.Transform.push (_stream_transform.js:152:32) at RunnerTransformStream._transform (C:\dev\repositories\login.dfe.ui-tests\node_modules\@wdio\local-runner\build\transformStream.js:15:14)
If I remove the HtmlReporter/ReportAggregator code - the tests run fine in parallel.