UNPKG

tui-grid

Version:

TOAST UI Grid : Powerful data grid control supported by TOAST UI

288 lines (276 loc) 10.9 kB
'use strict'; var path = require('path'); var VisualRegressionCompare = require('wdio-visual-regression-service/compare'); /** * Returns the screenshot name * @param {String} basePath - base path * @returns {String} */ function getScreenshotName(basePath) { return function(context) { var testName = context.test.title.replace(/\s+/g, '-'); var browserName = context.browser.name; var browserVersion = parseInt(context.browser.version, 10); var subDir = browserName + '_v' + browserVersion; var fileName = testName + '.png'; return path.join(basePath, subDir, fileName); }; } exports.config = { // // ===================== // Server Configurations // ===================== // Host address of the running Selenium server. This information is usually obsolete as // WebdriverIO automatically connects to localhost. Also, if you are using one of the // supported cloud services like Sauce Labs, Browserstack, or Testing Bot you don't // need to define host and port information because WebdriverIO can figure that out // according to your user and key information. However, if you are using a private Selenium // backend you should define the host address, port, and path here. // host: 'fe.nhnent.com', port: 4444, path: '/wd/hub', // // ================== // Specify Test Files // ================== // Define which test specs should run. The pattern is relative to the directory // from which `wdio` was called. Notice that, if you are calling `wdio` from an // NPM script (see https://docs.npmjs.com/cli/run-script) then the current working // directory is where your package.json resides, so `wdio` will be called from there. // specs: [ './test/e2e/js/**/*.spec.js' ], // Patterns to exclude. exclude: [ // 'path/to/excluded/files' ], // // ============ // Capabilities // ============ // Define your capabilities here. WebdriverIO can run multiple capabilities at the same // time. Depending on the number of capabilities, WebdriverIO launches several test // sessions. Within your capabilities you can overwrite the spec and exclude options in // order to group specific specs to a specific capability. // // First, you can define how many instances should be started at the same time. Let's // say you have 3 different capabilities (Chrome, Firefox, and Safari) and you have // set maxInstances to 1; wdio will spawn 3 processes. Therefore, if you have 10 spec // files and you set maxInstances to 10, all spec files will get tested at the same time // and 30 processes will get spawned. The property handles how many capabilities // from the same test should run tests. // maxInstances: 2, // // If you have trouble getting all important capabilities together, check out the // Sauce Labs platform configurator - a great tool to configure your capabilities: // https://docs.saucelabs.com/reference/platforms-configurator // capabilities: [ // { // browserName: 'internet explorer', // version: 9 // }, // { // browserName: 'internet explorer', // version: 10 // }, { // maxInstances can get overwritten per capability. So if you have an in-house Selenium // grid with only 5 firefox instances available you can make sure that not more than // 5 instances get started at a time. maxInstances: 1, browserName: 'internet explorer', version: 11 }, { maxInstances: 1, browserName: 'chrome' } ], // // =================== // Test Configurations // =================== // Define all options that are relevant for the WebdriverIO instance here // // By default WebdriverIO commands are executed in a synchronous way using // the wdio-sync package. If you still want to run your tests in an async way // e.g. using promises you can set the sync option to false. sync: true, // // Level of logging verbosity: silent | verbose | command | data | result | error logLevel: 'error', // // Enables colors for log output. coloredLogs: true, // // If you only want to run your tests until a specific amount of tests have failed use // bail (default is 0 - don't bail, run all tests). bail: 0, // // Saves a screenshot to a given path if a command fails. screenshotPath: './screenshots/error/', // // Set a base URL in order to shorten url command calls. If your url parameter starts // with "/", then the base url gets prepended. baseUrl: 'http://fe.nhnent.com:4567', // baseUrl: 'http://10.77.95.186:4567', // // Default timeout for all waitFor* commands. waitforTimeout: 10000, // // Default timeout in milliseconds for request // if Selenium Grid doesn't send response connectionRetryTimeout: 90000, // // Default request retries count connectionRetryCount: 3, // // Initialize the browser instance with a WebdriverIO plugin. The object should have the // plugin name as key and the desired plugin options as properties. Make sure you have // the plugin installed before running any tests. The following plugins are currently // available: // WebdriverCSS: https://github.com/webdriverio/webdrivercss // WebdriverRTC: https://github.com/webdriverio/webdriverrtc // Browserevent: https://github.com/webdriverio/browserevent // plugins: { // webdrivercss: { // screenshotRoot: 'my-shots', // failedComparisonsRoot: 'diffs', // misMatchTolerance: 0.05, // screenWidth: [320,480,640,1024] // }, // webdriverrtc: {}, // browserevent: {} // }, // // Test runner services // Services take over a specific job you don't want to take care of. They enhance // your test setup with almost no effort. Unlike plugins, they don't add new // commands. Instead, they hook themselves up into the test process. services: ['static-server', 'visual-regression'], staticServerFolders: [ { mount: '/examples', path: './examples' }, { mount: '/dist', path: './dist' }, { mount: '/lib', path: './lib' } ], staticServerPort: 4567, staticServerLog: true, visualRegression: { compare: new VisualRegressionCompare.LocalCompare({ referenceName: getScreenshotName(path.join(process.cwd(), 'screenshots/reference')), screenshotName: getScreenshotName(path.join(process.cwd(), 'screenshots/screen')), diffName: getScreenshotName(path.join(process.cwd(), 'screenshots/diff')) }), misMatchTolerance: 0.1 }, // Framework you want to run your specs with. // The following are supported: Mocha, Jasmine, and Cucumber // see also: http://webdriver.io/guide/testrunner/frameworks.html // // Make sure you have the wdio adapter package for the specific framework installed // before running any tests. framework: 'jasmine', // // Test reporter for stdout. // The only one supported by default is 'dot' // see also: http://webdriver.io/guide/testrunner/reporters.html // reporters: ['dot'], // // Options to be passed to Jasmine. jasmineNodeOpts: { // // Jasmine default timeout defaultTimeoutInterval: 30000, // // The Jasmine framework allows interception of each assertion in order to log the state of the application // or website depending on the result. For example, it is pretty handy to take a screenshot every time // an assertion fails. expectationResultHandler: function(passed, assertion) { // eslint-disable-line no-unused-vars // do something } } // // ===== // Hooks // ===== // WebdriverIO provides several hooks you can use to interfere with the test process in order to enhance // it and to build services around it. You can either apply a single function or an array of // methods to it. If one of them returns with a promise, WebdriverIO will wait until that promise got // resolved to continue. // // Gets executed once before all workers get launched. // onPrepare: function (config, capabilities) { // }, // // Gets executed just before initialising the webdriver session and test framework. It allows you // to manipulate configurations depending on the capability or spec. // beforeSession: function (config, capabilities, specs) { // }, // // Gets executed before test execution begins. At this point you can access all global // variables, such as `browser`. It is the perfect place to define custom commands. // before: function (capabilities, specs) { // }, // // Hook that gets executed before the suite starts // beforeSuite: function (suite) { // }, // // Hook that gets executed _before_ a hook within the suite starts (e.g. runs before calling // beforeEach in Mocha) // beforeHook: function () { // }, // // Hook that gets executed _after_ a hook within the suite starts (e.g. runs after calling // afterEach in Mocha) // afterHook: function () { // }, // // Function to be executed before a test (in Mocha/Jasmine) or a step (in Cucumber) starts. // beforeTest: function (test) { // }, // // Runs before a WebdriverIO command gets executed. // beforeCommand: function (commandName, args) { // }, // // Runs after a WebdriverIO command gets executed // afterCommand: function (commandName, args, result, error) { // }, // // Function to be executed after a test (in Mocha/Jasmine) or a step (in Cucumber) starts. // afterTest: function (test) { // }, // // Hook that gets executed after the suite has ended // afterSuite: function (suite) { // }, // // Gets executed after all tests are done. You still have access to all global variables from // the test. // after: function (result, capabilities, specs) { // }, // // Gets executed right after terminating the webdriver session. // afterSession: function (config, capabilities, specs) { // }, // // Gets executed after all workers got shut down and the process is about to exit. It is not // possible to defer the end of the process using a promise. // onComplete: function(exitCode) { // } };