Merge branch 'master' into MM-67140-session-validation-on-logout

This commit is contained in:
Mattermost Build 2026-02-03 00:19:21 +02:00 committed by GitHub
commit 9cd44a8f6e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
250 changed files with 65426 additions and 2777 deletions

View file

@ -0,0 +1,2 @@
node_modules/
.env

View file

@ -0,0 +1,48 @@
name: Calculate Cypress Results
description: Calculate Cypress test results with optional merge of retest results
author: Mattermost
inputs:
original-results-path:
description: Path to the original Cypress results directory (e.g., e2e-tests/cypress/results)
required: true
retest-results-path:
description: Path to the retest Cypress results directory (optional - if not provided, only calculates from original)
required: false
write-merged:
description: Whether to write merged results back to the original directory (default true)
required: false
default: "true"
outputs:
# Merge outputs
merged:
description: Whether merge was performed (true/false)
# Calculation outputs (same as calculate-cypress-test-results)
passed:
description: Number of passed tests
failed:
description: Number of failed tests
pending:
description: Number of pending/skipped tests
total_specs:
description: Total number of spec files
commit_status_message:
description: Message for commit status (e.g., "X failed, Y passed (Z spec files)")
failed_specs:
description: Comma-separated list of failed spec files (for retest)
failed_specs_count:
description: Number of failed spec files
failed_tests:
description: Markdown table rows of failed tests (for GitHub summary)
total:
description: Total number of tests (passed + failed)
pass_rate:
description: Pass rate percentage (e.g., "100.00")
color:
description: Color for webhook based on pass rate (green=100%, yellow=99%+, orange=98%+, red=<98%)
runs:
using: node24
main: dist/index.js

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,15 @@
/** @type {import('ts-jest').JestConfigWithTsJest} */
module.exports = {
preset: "ts-jest",
testEnvironment: "node",
testMatch: ["**/*.test.ts"],
moduleFileExtensions: ["ts", "js"],
transform: {
"^.+\\.ts$": [
"ts-jest",
{
useESM: false,
},
],
},
};

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,27 @@
{
"name": "calculate-cypress-results",
"private": true,
"version": "0.1.0",
"main": "dist/index.js",
"scripts": {
"build": "tsup",
"prettier": "npx prettier --write \"src/**/*.ts\"",
"local-action": "local-action . src/main.ts .env",
"test": "jest --verbose",
"test:watch": "jest --watch --verbose",
"test:silent": "jest --silent",
"tsc": "tsc -b"
},
"dependencies": {
"@actions/core": "3.0.0"
},
"devDependencies": {
"@github/local-action": "7.0.0",
"@types/jest": "30.0.0",
"@types/node": "25.2.0",
"jest": "30.2.0",
"ts-jest": "29.4.6",
"tsup": "8.5.1",
"typescript": "5.9.3"
}
}

View file

@ -0,0 +1,3 @@
import { run } from "./main";
run();

View file

@ -0,0 +1,99 @@
import * as core from "@actions/core";
import {
loadSpecFiles,
mergeResults,
writeMergedResults,
calculateResultsFromSpecs,
} from "./merge";
export async function run(): Promise<void> {
const originalPath = core.getInput("original-results-path", {
required: true,
});
const retestPath = core.getInput("retest-results-path"); // Optional
const shouldWriteMerged = core.getInput("write-merged") !== "false"; // Default true
core.info(`Original results: ${originalPath}`);
core.info(`Retest results: ${retestPath || "(not provided)"}`);
let merged = false;
let specs;
if (retestPath) {
// Check if retest path has results
const retestSpecs = await loadSpecFiles(retestPath);
if (retestSpecs.length > 0) {
core.info(`Found ${retestSpecs.length} retest spec files`);
// Merge results
core.info("Merging results...");
const mergeResult = await mergeResults(originalPath, retestPath);
specs = mergeResult.specs;
merged = true;
core.info(`Retested specs: ${mergeResult.retestFiles.join(", ")}`);
core.info(`Total merged specs: ${specs.length}`);
// Write merged results back to original directory
if (shouldWriteMerged) {
core.info("Writing merged results to original directory...");
const writeResult = await writeMergedResults(
originalPath,
retestPath,
);
core.info(`Updated files: ${writeResult.updatedFiles.length}`);
core.info(
`Removed duplicates: ${writeResult.removedFiles.length}`,
);
}
} else {
core.warning(
`No retest results found at ${retestPath}, using original only`,
);
specs = await loadSpecFiles(originalPath);
}
} else {
core.info("No retest path provided, using original results only");
specs = await loadSpecFiles(originalPath);
}
core.info(`Calculating results from ${specs.length} spec files...`);
// Handle case where no results found
if (specs.length === 0) {
core.setFailed("No Cypress test results found");
return;
}
// Calculate all outputs from final results
const calc = calculateResultsFromSpecs(specs);
// Log results
core.startGroup("Final Results");
core.info(`Passed: ${calc.passed}`);
core.info(`Failed: ${calc.failed}`);
core.info(`Pending: ${calc.pending}`);
core.info(`Total: ${calc.total}`);
core.info(`Pass Rate: ${calc.passRate}%`);
core.info(`Color: ${calc.color}`);
core.info(`Spec Files: ${calc.totalSpecs}`);
core.info(`Failed Specs Count: ${calc.failedSpecsCount}`);
core.info(`Commit Status Message: ${calc.commitStatusMessage}`);
core.info(`Failed Specs: ${calc.failedSpecs || "none"}`);
core.endGroup();
// Set all outputs
core.setOutput("merged", merged.toString());
core.setOutput("passed", calc.passed);
core.setOutput("failed", calc.failed);
core.setOutput("pending", calc.pending);
core.setOutput("total_specs", calc.totalSpecs);
core.setOutput("commit_status_message", calc.commitStatusMessage);
core.setOutput("failed_specs", calc.failedSpecs);
core.setOutput("failed_specs_count", calc.failedSpecsCount);
core.setOutput("failed_tests", calc.failedTests);
core.setOutput("total", calc.total);
core.setOutput("pass_rate", calc.passRate);
core.setOutput("color", calc.color);
}

View file

@ -0,0 +1,271 @@
import { calculateResultsFromSpecs } from "./merge";
import type { ParsedSpecFile, MochawesomeResult } from "./types";
/**
* Helper to create a mochawesome result for testing
*/
function createMochawesomeResult(
specFile: string,
tests: { title: string; state: "passed" | "failed" | "pending" }[],
): MochawesomeResult {
return {
stats: {
suites: 1,
tests: tests.length,
passes: tests.filter((t) => t.state === "passed").length,
pending: tests.filter((t) => t.state === "pending").length,
failures: tests.filter((t) => t.state === "failed").length,
start: new Date().toISOString(),
end: new Date().toISOString(),
duration: 1000,
testsRegistered: tests.length,
passPercent: 0,
pendingPercent: 0,
other: 0,
hasOther: false,
skipped: 0,
hasSkipped: false,
},
results: [
{
uuid: "uuid-1",
title: specFile,
fullFile: `/app/e2e-tests/cypress/tests/integration/${specFile}`,
file: `tests/integration/${specFile}`,
beforeHooks: [],
afterHooks: [],
tests: tests.map((t, i) => ({
title: t.title,
fullTitle: `${specFile} > ${t.title}`,
timedOut: null,
duration: 500,
state: t.state,
speed: "fast",
pass: t.state === "passed",
fail: t.state === "failed",
pending: t.state === "pending",
context: null,
code: "",
err: t.state === "failed" ? { message: "Test failed" } : {},
uuid: `test-uuid-${i}`,
parentUUID: "uuid-1",
isHook: false,
skipped: false,
})),
suites: [],
passes: tests
.filter((t) => t.state === "passed")
.map((_, i) => `test-uuid-${i}`),
failures: tests
.filter((t) => t.state === "failed")
.map((_, i) => `test-uuid-${i}`),
pending: tests
.filter((t) => t.state === "pending")
.map((_, i) => `test-uuid-${i}`),
skipped: [],
duration: 1000,
root: true,
rootEmpty: false,
_timeout: 60000,
},
],
};
}
function createParsedSpecFile(
specFile: string,
tests: { title: string; state: "passed" | "failed" | "pending" }[],
): ParsedSpecFile {
return {
filePath: `/path/to/${specFile}.json`,
specPath: `tests/integration/${specFile}`,
result: createMochawesomeResult(specFile, tests),
};
}
describe("calculateResultsFromSpecs", () => {
it("should calculate all outputs correctly for passing results", () => {
const specs: ParsedSpecFile[] = [
createParsedSpecFile("login.spec.ts", [
{
title: "should login with valid credentials",
state: "passed",
},
]),
createParsedSpecFile("messaging.spec.ts", [
{ title: "should send a message", state: "passed" },
]),
];
const calc = calculateResultsFromSpecs(specs);
expect(calc.passed).toBe(2);
expect(calc.failed).toBe(0);
expect(calc.pending).toBe(0);
expect(calc.total).toBe(2);
expect(calc.passRate).toBe("100.00");
expect(calc.color).toBe("#43A047"); // green
expect(calc.totalSpecs).toBe(2);
expect(calc.failedSpecs).toBe("");
expect(calc.failedSpecsCount).toBe(0);
expect(calc.commitStatusMessage).toBe("2 passed in 2 spec files");
});
it("should calculate all outputs correctly for results with failures", () => {
const specs: ParsedSpecFile[] = [
createParsedSpecFile("login.spec.ts", [
{
title: "should login with valid credentials",
state: "passed",
},
]),
createParsedSpecFile("channels.spec.ts", [
{ title: "should create a channel", state: "failed" },
]),
];
const calc = calculateResultsFromSpecs(specs);
expect(calc.passed).toBe(1);
expect(calc.failed).toBe(1);
expect(calc.pending).toBe(0);
expect(calc.total).toBe(2);
expect(calc.passRate).toBe("50.00");
expect(calc.color).toBe("#F44336"); // red
expect(calc.totalSpecs).toBe(2);
expect(calc.failedSpecs).toBe("tests/integration/channels.spec.ts");
expect(calc.failedSpecsCount).toBe(1);
expect(calc.commitStatusMessage).toBe(
"1 failed, 1 passed in 2 spec files",
);
expect(calc.failedTests).toContain("should create a channel");
});
it("should handle pending tests correctly", () => {
const specs: ParsedSpecFile[] = [
createParsedSpecFile("login.spec.ts", [
{ title: "should login", state: "passed" },
{ title: "should logout", state: "pending" },
]),
];
const calc = calculateResultsFromSpecs(specs);
expect(calc.passed).toBe(1);
expect(calc.failed).toBe(0);
expect(calc.pending).toBe(1);
expect(calc.total).toBe(1); // Total excludes pending
expect(calc.passRate).toBe("100.00");
});
it("should limit failed tests to 10 entries", () => {
const specs: ParsedSpecFile[] = [
createParsedSpecFile("big-test.spec.ts", [
{ title: "test 1", state: "failed" },
{ title: "test 2", state: "failed" },
{ title: "test 3", state: "failed" },
{ title: "test 4", state: "failed" },
{ title: "test 5", state: "failed" },
{ title: "test 6", state: "failed" },
{ title: "test 7", state: "failed" },
{ title: "test 8", state: "failed" },
{ title: "test 9", state: "failed" },
{ title: "test 10", state: "failed" },
{ title: "test 11", state: "failed" },
{ title: "test 12", state: "failed" },
]),
];
const calc = calculateResultsFromSpecs(specs);
expect(calc.failed).toBe(12);
expect(calc.failedTests).toContain("...and 2 more failed tests");
});
});
describe("merge simulation", () => {
it("should produce correct results when merging original with retest", () => {
// Simulate original: 2 passed, 1 failed
const originalSpecs: ParsedSpecFile[] = [
createParsedSpecFile("login.spec.ts", [
{ title: "should login", state: "passed" },
]),
createParsedSpecFile("messaging.spec.ts", [
{ title: "should send message", state: "passed" },
]),
createParsedSpecFile("channels.spec.ts", [
{ title: "should create channel", state: "failed" },
]),
];
// Verify original has failure
const originalCalc = calculateResultsFromSpecs(originalSpecs);
expect(originalCalc.passed).toBe(2);
expect(originalCalc.failed).toBe(1);
expect(originalCalc.passRate).toBe("66.67");
// Simulate retest: channels.spec.ts now passes
const retestSpec = createParsedSpecFile("channels.spec.ts", [
{ title: "should create channel", state: "passed" },
]);
// Simulate merge: replace original channels.spec.ts with retest
const specMap = new Map<string, ParsedSpecFile>();
for (const spec of originalSpecs) {
specMap.set(spec.specPath, spec);
}
specMap.set(retestSpec.specPath, retestSpec);
const mergedSpecs = Array.from(specMap.values());
// Calculate final results
const finalCalc = calculateResultsFromSpecs(mergedSpecs);
expect(finalCalc.passed).toBe(3);
expect(finalCalc.failed).toBe(0);
expect(finalCalc.pending).toBe(0);
expect(finalCalc.total).toBe(3);
expect(finalCalc.passRate).toBe("100.00");
expect(finalCalc.color).toBe("#43A047"); // green
expect(finalCalc.totalSpecs).toBe(3);
expect(finalCalc.failedSpecs).toBe("");
expect(finalCalc.failedSpecsCount).toBe(0);
expect(finalCalc.commitStatusMessage).toBe("3 passed in 3 spec files");
});
it("should handle case where retest still fails", () => {
// Original: 1 passed, 1 failed
const originalSpecs: ParsedSpecFile[] = [
createParsedSpecFile("login.spec.ts", [
{ title: "should login", state: "passed" },
]),
createParsedSpecFile("channels.spec.ts", [
{ title: "should create channel", state: "failed" },
]),
];
// Retest: channels.spec.ts still fails
const retestSpec = createParsedSpecFile("channels.spec.ts", [
{ title: "should create channel", state: "failed" },
]);
// Merge
const specMap = new Map<string, ParsedSpecFile>();
for (const spec of originalSpecs) {
specMap.set(spec.specPath, spec);
}
specMap.set(retestSpec.specPath, retestSpec);
const mergedSpecs = Array.from(specMap.values());
const finalCalc = calculateResultsFromSpecs(mergedSpecs);
expect(finalCalc.passed).toBe(1);
expect(finalCalc.failed).toBe(1);
expect(finalCalc.passRate).toBe("50.00");
expect(finalCalc.color).toBe("#F44336"); // red
expect(finalCalc.failedSpecs).toBe(
"tests/integration/channels.spec.ts",
);
expect(finalCalc.failedSpecsCount).toBe(1);
});
});

View file

@ -0,0 +1,321 @@
import * as fs from "fs/promises";
import * as path from "path";
import type {
MochawesomeResult,
ParsedSpecFile,
CalculationResult,
FailedTest,
TestItem,
SuiteItem,
ResultItem,
} from "./types";
/**
* Find all JSON files in a directory recursively
*/
async function findJsonFiles(dir: string): Promise<string[]> {
const files: string[] = [];
try {
const entries = await fs.readdir(dir, { withFileTypes: true });
for (const entry of entries) {
const fullPath = path.join(dir, entry.name);
if (entry.isDirectory()) {
const subFiles = await findJsonFiles(fullPath);
files.push(...subFiles);
} else if (entry.isFile() && entry.name.endsWith(".json")) {
files.push(fullPath);
}
}
} catch {
// Directory doesn't exist or not accessible
}
return files;
}
/**
* Parse a mochawesome JSON file
*/
async function parseSpecFile(filePath: string): Promise<ParsedSpecFile | null> {
try {
const content = await fs.readFile(filePath, "utf8");
const result: MochawesomeResult = JSON.parse(content);
// Extract spec path from results[0].file
const specPath = result.results?.[0]?.file;
if (!specPath) {
return null;
}
return {
filePath,
specPath,
result,
};
} catch {
return null;
}
}
/**
* Extract all tests from a result recursively
*/
function getAllTests(result: MochawesomeResult): TestItem[] {
const tests: TestItem[] = [];
function extractFromSuite(suite: SuiteItem | ResultItem) {
tests.push(...(suite.tests || []));
for (const nestedSuite of suite.suites || []) {
extractFromSuite(nestedSuite);
}
}
for (const resultItem of result.results || []) {
extractFromSuite(resultItem);
}
return tests;
}
/**
* Get color based on pass rate
*/
function getColor(passRate: number): string {
if (passRate === 100) {
return "#43A047"; // green
} else if (passRate >= 99) {
return "#FFEB3B"; // yellow
} else if (passRate >= 98) {
return "#FF9800"; // orange
} else {
return "#F44336"; // red
}
}
/**
* Calculate results from parsed spec files
*/
export function calculateResultsFromSpecs(
specs: ParsedSpecFile[],
): CalculationResult {
let passed = 0;
let failed = 0;
let pending = 0;
const failedSpecsSet = new Set<string>();
const failedTestsList: FailedTest[] = [];
for (const spec of specs) {
const tests = getAllTests(spec.result);
for (const test of tests) {
if (test.state === "passed") {
passed++;
} else if (test.state === "failed") {
failed++;
failedSpecsSet.add(spec.specPath);
failedTestsList.push({
title: test.title,
file: spec.specPath,
});
} else if (test.state === "pending") {
pending++;
}
}
}
const totalSpecs = specs.length;
const failedSpecs = Array.from(failedSpecsSet).join(",");
const failedSpecsCount = failedSpecsSet.size;
// Build failed tests markdown table (limit to 10)
let failedTests = "";
const uniqueFailedTests = failedTestsList.filter(
(test, index, self) =>
index ===
self.findIndex(
(t) => t.title === test.title && t.file === test.file,
),
);
if (uniqueFailedTests.length > 0) {
const limitedTests = uniqueFailedTests.slice(0, 10);
failedTests = limitedTests
.map((t) => {
const escapedTitle = t.title
.replace(/`/g, "\\`")
.replace(/\|/g, "\\|");
return `| ${escapedTitle} | ${t.file} |`;
})
.join("\n");
if (uniqueFailedTests.length > 10) {
const remaining = uniqueFailedTests.length - 10;
failedTests += `\n| _...and ${remaining} more failed tests_ | |`;
}
} else if (failed > 0) {
failedTests = "| Unable to parse failed tests | - |";
}
// Calculate totals and pass rate
// Pass rate = passed / (passed + failed), excluding pending
const total = passed + failed;
const passRate = total > 0 ? ((passed * 100) / total).toFixed(2) : "0.00";
const color = getColor(parseFloat(passRate));
// Build commit status message
const specSuffix = totalSpecs > 0 ? ` in ${totalSpecs} spec files` : "";
const commitStatusMessage =
failed === 0
? `${passed} passed${specSuffix}`
: `${failed} failed, ${passed} passed${specSuffix}`;
return {
passed,
failed,
pending,
totalSpecs,
commitStatusMessage,
failedSpecs,
failedSpecsCount,
failedTests,
total,
passRate,
color,
};
}
/**
* Load all spec files from a mochawesome results directory
*/
export async function loadSpecFiles(
resultsPath: string,
): Promise<ParsedSpecFile[]> {
// Mochawesome results are at: results/mochawesome-report/json/tests/
const mochawesomeDir = path.join(
resultsPath,
"mochawesome-report",
"json",
"tests",
);
const jsonFiles = await findJsonFiles(mochawesomeDir);
const specs: ParsedSpecFile[] = [];
for (const file of jsonFiles) {
const parsed = await parseSpecFile(file);
if (parsed) {
specs.push(parsed);
}
}
return specs;
}
/**
* Merge original and retest results
* - For each spec in retest, replace the matching spec in original
* - Keep original specs that are not in retest
*/
export async function mergeResults(
originalPath: string,
retestPath: string,
): Promise<{
specs: ParsedSpecFile[];
retestFiles: string[];
mergedCount: number;
}> {
const originalSpecs = await loadSpecFiles(originalPath);
const retestSpecs = await loadSpecFiles(retestPath);
// Build a map of original specs by spec path
const specMap = new Map<string, ParsedSpecFile>();
for (const spec of originalSpecs) {
specMap.set(spec.specPath, spec);
}
// Replace with retest results
const retestFiles: string[] = [];
for (const retestSpec of retestSpecs) {
specMap.set(retestSpec.specPath, retestSpec);
retestFiles.push(retestSpec.specPath);
}
return {
specs: Array.from(specMap.values()),
retestFiles,
mergedCount: retestSpecs.length,
};
}
/**
* Write merged results back to the original directory
* This updates the original JSON files with retest results
*/
export async function writeMergedResults(
originalPath: string,
retestPath: string,
): Promise<{ updatedFiles: string[]; removedFiles: string[] }> {
const mochawesomeDir = path.join(
originalPath,
"mochawesome-report",
"json",
"tests",
);
const retestMochawesomeDir = path.join(
retestPath,
"mochawesome-report",
"json",
"tests",
);
const originalJsonFiles = await findJsonFiles(mochawesomeDir);
const retestJsonFiles = await findJsonFiles(retestMochawesomeDir);
const updatedFiles: string[] = [];
const removedFiles: string[] = [];
// For each retest file, find and replace the original
for (const retestFile of retestJsonFiles) {
const retestSpec = await parseSpecFile(retestFile);
if (!retestSpec) continue;
const specPath = retestSpec.specPath;
// Find all original files with matching spec path
// Prefer nested path (under integration/), remove flat duplicates
let nestedFile: string | null = null;
const flatFiles: string[] = [];
for (const origFile of originalJsonFiles) {
const origSpec = await parseSpecFile(origFile);
if (origSpec && origSpec.specPath === specPath) {
if (origFile.includes("/integration/")) {
nestedFile = origFile;
} else {
flatFiles.push(origFile);
}
}
}
// Update the nested file (proper location) or first flat file if no nested
const retestContent = await fs.readFile(retestFile, "utf8");
if (nestedFile) {
await fs.writeFile(nestedFile, retestContent);
updatedFiles.push(nestedFile);
// Remove flat duplicates
for (const flatFile of flatFiles) {
await fs.unlink(flatFile);
removedFiles.push(flatFile);
}
} else if (flatFiles.length > 0) {
await fs.writeFile(flatFiles[0], retestContent);
updatedFiles.push(flatFiles[0]);
}
}
return { updatedFiles, removedFiles };
}

View file

@ -0,0 +1,138 @@
/**
* Mochawesome result structure for a single spec file
*/
export interface MochawesomeResult {
stats: MochawesomeStats;
results: ResultItem[];
}
export interface MochawesomeStats {
suites: number;
tests: number;
passes: number;
pending: number;
failures: number;
start: string;
end: string;
duration: number;
testsRegistered: number;
passPercent: number;
pendingPercent: number;
other: number;
hasOther: boolean;
skipped: number;
hasSkipped: boolean;
}
export interface ResultItem {
uuid: string;
title: string;
fullFile: string;
file: string;
beforeHooks: Hook[];
afterHooks: Hook[];
tests: TestItem[];
suites: SuiteItem[];
passes: string[];
failures: string[];
pending: string[];
skipped: string[];
duration: number;
root: boolean;
rootEmpty: boolean;
_timeout: number;
}
export interface SuiteItem {
uuid: string;
title: string;
fullFile: string;
file: string;
beforeHooks: Hook[];
afterHooks: Hook[];
tests: TestItem[];
suites: SuiteItem[];
passes: string[];
failures: string[];
pending: string[];
skipped: string[];
duration: number;
root: boolean;
rootEmpty: boolean;
_timeout: number;
}
export interface TestItem {
title: string;
fullTitle: string;
timedOut: boolean | null;
duration: number;
state: "passed" | "failed" | "pending";
speed: string | null;
pass: boolean;
fail: boolean;
pending: boolean;
context: string | null;
code: string;
err: TestError;
uuid: string;
parentUUID: string;
isHook: boolean;
skipped: boolean;
}
export interface TestError {
message?: string;
estack?: string;
diff?: string | null;
}
export interface Hook {
title: string;
fullTitle: string;
timedOut: boolean | null;
duration: number;
state: string | null;
speed: string | null;
pass: boolean;
fail: boolean;
pending: boolean;
context: string | null;
code: string;
err: TestError;
uuid: string;
parentUUID: string;
isHook: boolean;
skipped: boolean;
}
/**
* Parsed spec file with its path and results
*/
export interface ParsedSpecFile {
filePath: string;
specPath: string;
result: MochawesomeResult;
}
/**
* Calculation result outputs
*/
export interface CalculationResult {
passed: number;
failed: number;
pending: number;
totalSpecs: number;
commitStatusMessage: string;
failedSpecs: string;
failedSpecsCount: number;
failedTests: string;
total: number;
passRate: string;
color: string;
}
export interface FailedTest {
title: string;
file: string;
}

View file

@ -0,0 +1,17 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "CommonJS",
"moduleResolution": "Node",
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true,
"outDir": "./dist",
"rootDir": "./src",
"declaration": true,
"isolatedModules": true
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist", "**/*.test.ts"]
}

View file

@ -0,0 +1 @@
{"root":["./src/index.ts","./src/main.ts","./src/merge.ts","./src/types.ts"],"version":"5.9.3"}

View file

@ -0,0 +1,13 @@
import { defineConfig } from "tsup";
export default defineConfig({
entry: ["src/index.ts"],
format: ["cjs"],
target: "node24",
clean: true,
minify: false,
sourcemap: false,
splitting: false,
bundle: true,
noExternal: [/.*/],
});

View file

@ -0,0 +1,2 @@
node_modules/
.env

View file

@ -0,0 +1,51 @@
name: Calculate Playwright Results
description: Calculate Playwright test results with optional merge of retest results
author: Mattermost
inputs:
original-results-path:
description: Path to the original Playwright results.json file
required: true
retest-results-path:
description: Path to the retest Playwright results.json file (optional - if not provided, only calculates from original)
required: false
output-path:
description: Path to write the merged results.json file (defaults to original-results-path)
required: false
outputs:
# Merge outputs
merged:
description: Whether merge was performed (true/false)
# Calculation outputs (same as calculate-playwright-test-results)
passed:
description: Number of passed tests (not including flaky)
failed:
description: Number of failed tests
flaky:
description: Number of flaky tests (failed initially but passed on retry)
skipped:
description: Number of skipped tests
total_specs:
description: Total number of spec files
commit_status_message:
description: Message for commit status (e.g., "X failed, Y passed (Z spec files)")
failed_specs:
description: Comma-separated list of failed spec files (for retest)
failed_specs_count:
description: Number of failed spec files
failed_tests:
description: Markdown table rows of failed tests (for GitHub summary)
total:
description: Total number of tests (passed + flaky + failed)
pass_rate:
description: Pass rate percentage (e.g., "100.00")
passing:
description: Number of passing tests (passed + flaky)
color:
description: Color for webhook based on pass rate (green=100%, yellow=99%+, orange=98%+, red=<98%)
runs:
using: node24
main: dist/index.js

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,6 @@
module.exports = {
preset: "ts-jest",
testEnvironment: "node",
testMatch: ["**/*.test.ts"],
moduleFileExtensions: ["ts", "js"],
};

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,27 @@
{
"name": "calculate-playwright-results",
"private": true,
"version": "0.1.0",
"main": "dist/index.js",
"scripts": {
"build": "tsup",
"prettier": "npx prettier --write \"src/**/*.ts\"",
"local-action": "local-action . src/main.ts .env",
"test": "jest --verbose",
"test:watch": "jest --watch --verbose",
"test:silent": "jest --silent",
"tsc": "tsc -b"
},
"dependencies": {
"@actions/core": "3.0.0"
},
"devDependencies": {
"@github/local-action": "7.0.0",
"@types/jest": "30.0.0",
"@types/node": "25.2.0",
"jest": "30.2.0",
"ts-jest": "29.4.6",
"tsup": "8.5.1",
"typescript": "5.9.3"
}
}

View file

@ -0,0 +1,3 @@
import { run } from "./main";
run();

View file

@ -0,0 +1,121 @@
import * as core from "@actions/core";
import * as fs from "fs/promises";
import type { PlaywrightResults } from "./types";
import { mergeResults, calculateResults } from "./merge";
export async function run(): Promise<void> {
const originalPath = core.getInput("original-results-path", {
required: true,
});
const retestPath = core.getInput("retest-results-path"); // Optional
const outputPath = core.getInput("output-path") || originalPath;
core.info(`Original results: ${originalPath}`);
core.info(`Retest results: ${retestPath || "(not provided)"}`);
core.info(`Output path: ${outputPath}`);
// Check if original file exists
const originalExists = await fs
.access(originalPath)
.then(() => true)
.catch(() => false);
if (!originalExists) {
core.setFailed(`Original results not found at ${originalPath}`);
return;
}
// Read original file
core.info("Reading original results...");
const originalContent = await fs.readFile(originalPath, "utf8");
const original: PlaywrightResults = JSON.parse(originalContent);
core.info(
`Original: ${original.suites.length} suites, stats: ${JSON.stringify(original.stats)}`,
);
// Check if retest path is provided and exists
let finalResults: PlaywrightResults;
let merged = false;
if (retestPath) {
const retestExists = await fs
.access(retestPath)
.then(() => true)
.catch(() => false);
if (retestExists) {
// Read retest file and merge
core.info("Reading retest results...");
const retestContent = await fs.readFile(retestPath, "utf8");
const retest: PlaywrightResults = JSON.parse(retestContent);
core.info(
`Retest: ${retest.suites.length} suites, stats: ${JSON.stringify(retest.stats)}`,
);
// Merge results
core.info("Merging results at suite level...");
const mergeResult = mergeResults(original, retest);
finalResults = mergeResult.merged;
merged = true;
core.info(`Retested specs: ${mergeResult.retestFiles.join(", ")}`);
core.info(
`Kept ${original.suites.length - mergeResult.retestFiles.length} original suites`,
);
core.info(`Added ${retest.suites.length} retest suites`);
core.info(`Total merged suites: ${mergeResult.totalSuites}`);
// Write merged results
core.info(`Writing merged results to ${outputPath}...`);
await fs.writeFile(
outputPath,
JSON.stringify(finalResults, null, 2),
);
} else {
core.warning(
`Retest results not found at ${retestPath}, using original only`,
);
finalResults = original;
}
} else {
core.info("No retest path provided, using original results only");
finalResults = original;
}
// Calculate all outputs from final results
const calc = calculateResults(finalResults);
// Log results
core.startGroup("Final Results");
core.info(`Passed: ${calc.passed}`);
core.info(`Failed: ${calc.failed}`);
core.info(`Flaky: ${calc.flaky}`);
core.info(`Skipped: ${calc.skipped}`);
core.info(`Passing (passed + flaky): ${calc.passing}`);
core.info(`Total: ${calc.total}`);
core.info(`Pass Rate: ${calc.passRate}%`);
core.info(`Color: ${calc.color}`);
core.info(`Spec Files: ${calc.totalSpecs}`);
core.info(`Failed Specs Count: ${calc.failedSpecsCount}`);
core.info(`Commit Status Message: ${calc.commitStatusMessage}`);
core.info(`Failed Specs: ${calc.failedSpecs || "none"}`);
core.endGroup();
// Set all outputs
core.setOutput("merged", merged.toString());
core.setOutput("passed", calc.passed);
core.setOutput("failed", calc.failed);
core.setOutput("flaky", calc.flaky);
core.setOutput("skipped", calc.skipped);
core.setOutput("total_specs", calc.totalSpecs);
core.setOutput("commit_status_message", calc.commitStatusMessage);
core.setOutput("failed_specs", calc.failedSpecs);
core.setOutput("failed_specs_count", calc.failedSpecsCount);
core.setOutput("failed_tests", calc.failedTests);
core.setOutput("total", calc.total);
core.setOutput("pass_rate", calc.passRate);
core.setOutput("passing", calc.passing);
core.setOutput("color", calc.color);
}

View file

@ -0,0 +1,509 @@
import { mergeResults, computeStats, calculateResults } from "./merge";
import type { PlaywrightResults, Suite } from "./types";
describe("mergeResults", () => {
const createSuite = (file: string, tests: { status: string }[]): Suite => ({
title: file,
file,
column: 0,
line: 0,
specs: [
{
title: "test spec",
ok: true,
tags: [],
tests: tests.map((t) => ({
timeout: 60000,
annotations: [],
expectedStatus: "passed",
projectId: "chrome",
projectName: "chrome",
results: [
{
workerIndex: 0,
parallelIndex: 0,
status: t.status,
duration: 1000,
errors: [],
stdout: [],
stderr: [],
retry: 0,
startTime: new Date().toISOString(),
annotations: [],
},
],
})),
},
],
});
it("should keep original suites not in retest", () => {
const original: PlaywrightResults = {
config: {},
suites: [
createSuite("spec1.ts", [{ status: "passed" }]),
createSuite("spec2.ts", [{ status: "failed" }]),
createSuite("spec3.ts", [{ status: "passed" }]),
],
stats: {
startTime: new Date().toISOString(),
duration: 10000,
expected: 2,
unexpected: 1,
skipped: 0,
flaky: 0,
},
};
const retest: PlaywrightResults = {
config: {},
suites: [createSuite("spec2.ts", [{ status: "passed" }])],
stats: {
startTime: new Date().toISOString(),
duration: 5000,
expected: 1,
unexpected: 0,
skipped: 0,
flaky: 0,
},
};
const result = mergeResults(original, retest);
expect(result.totalSuites).toBe(3);
expect(result.retestFiles).toEqual(["spec2.ts"]);
expect(result.merged.suites.map((s) => s.file)).toEqual([
"spec1.ts",
"spec3.ts",
"spec2.ts",
]);
});
it("should compute correct stats from merged suites", () => {
const original: PlaywrightResults = {
config: {},
suites: [
createSuite("spec1.ts", [{ status: "passed" }]),
createSuite("spec2.ts", [{ status: "failed" }]),
],
stats: {
startTime: new Date().toISOString(),
duration: 10000,
expected: 1,
unexpected: 1,
skipped: 0,
flaky: 0,
},
};
const retest: PlaywrightResults = {
config: {},
suites: [createSuite("spec2.ts", [{ status: "passed" }])],
stats: {
startTime: new Date().toISOString(),
duration: 5000,
expected: 1,
unexpected: 0,
skipped: 0,
flaky: 0,
},
};
const result = mergeResults(original, retest);
expect(result.stats.expected).toBe(2);
expect(result.stats.unexpected).toBe(0);
expect(result.stats.duration).toBe(15000);
});
});
describe("computeStats", () => {
it("should count flaky tests correctly", () => {
const suites: Suite[] = [
{
title: "spec1.ts",
file: "spec1.ts",
column: 0,
line: 0,
specs: [
{
title: "flaky test",
ok: true,
tags: [],
tests: [
{
timeout: 60000,
annotations: [],
expectedStatus: "passed",
projectId: "chrome",
projectName: "chrome",
results: [
{
workerIndex: 0,
parallelIndex: 0,
status: "failed",
duration: 1000,
errors: [],
stdout: [],
stderr: [],
retry: 0,
startTime: new Date().toISOString(),
annotations: [],
},
{
workerIndex: 0,
parallelIndex: 0,
status: "passed",
duration: 1000,
errors: [],
stdout: [],
stderr: [],
retry: 1,
startTime: new Date().toISOString(),
annotations: [],
},
],
},
],
},
],
},
];
const stats = computeStats(suites);
expect(stats.expected).toBe(0);
expect(stats.flaky).toBe(1);
expect(stats.unexpected).toBe(0);
});
});
describe("calculateResults", () => {
const createSuiteWithSpec = (
file: string,
specTitle: string,
testResults: { status: string; retry: number }[],
): Suite => ({
title: file,
file,
column: 0,
line: 0,
specs: [
{
title: specTitle,
ok: testResults[testResults.length - 1].status === "passed",
tags: [],
tests: [
{
timeout: 60000,
annotations: [],
expectedStatus: "passed",
projectId: "chrome",
projectName: "chrome",
results: testResults.map((r) => ({
workerIndex: 0,
parallelIndex: 0,
status: r.status,
duration: 1000,
errors:
r.status === "failed"
? [{ message: "error" }]
: [],
stdout: [],
stderr: [],
retry: r.retry,
startTime: new Date().toISOString(),
annotations: [],
})),
location: {
file,
line: 10,
column: 5,
},
},
],
},
],
});
it("should calculate all outputs correctly for passing results", () => {
const results: PlaywrightResults = {
config: {},
suites: [
createSuiteWithSpec("login.spec.ts", "should login", [
{ status: "passed", retry: 0 },
]),
createSuiteWithSpec(
"messaging.spec.ts",
"should send message",
[{ status: "passed", retry: 0 }],
),
],
stats: {
startTime: new Date().toISOString(),
duration: 5000,
expected: 2,
unexpected: 0,
skipped: 0,
flaky: 0,
},
};
const calc = calculateResults(results);
expect(calc.passed).toBe(2);
expect(calc.failed).toBe(0);
expect(calc.flaky).toBe(0);
expect(calc.skipped).toBe(0);
expect(calc.total).toBe(2);
expect(calc.passing).toBe(2);
expect(calc.passRate).toBe("100.00");
expect(calc.color).toBe("#43A047"); // green
expect(calc.totalSpecs).toBe(2);
expect(calc.failedSpecs).toBe("");
expect(calc.failedSpecsCount).toBe(0);
expect(calc.commitStatusMessage).toBe("2 passed in 2 spec files");
});
it("should calculate all outputs correctly for results with failures", () => {
const results: PlaywrightResults = {
config: {},
suites: [
createSuiteWithSpec("login.spec.ts", "should login", [
{ status: "passed", retry: 0 },
]),
createSuiteWithSpec(
"channels.spec.ts",
"should create channel",
[
{ status: "failed", retry: 0 },
{ status: "failed", retry: 1 },
{ status: "failed", retry: 2 },
],
),
],
stats: {
startTime: new Date().toISOString(),
duration: 10000,
expected: 1,
unexpected: 1,
skipped: 0,
flaky: 0,
},
};
const calc = calculateResults(results);
expect(calc.passed).toBe(1);
expect(calc.failed).toBe(1);
expect(calc.flaky).toBe(0);
expect(calc.total).toBe(2);
expect(calc.passing).toBe(1);
expect(calc.passRate).toBe("50.00");
expect(calc.color).toBe("#F44336"); // red
expect(calc.totalSpecs).toBe(2);
expect(calc.failedSpecs).toBe("channels.spec.ts");
expect(calc.failedSpecsCount).toBe(1);
expect(calc.commitStatusMessage).toBe(
"1 failed, 1 passed in 2 spec files",
);
expect(calc.failedTests).toContain("should create channel");
});
});
describe("full integration: original with failure, retest passes", () => {
const createSuiteWithSpec = (
file: string,
specTitle: string,
testResults: { status: string; retry: number }[],
): Suite => ({
title: file,
file,
column: 0,
line: 0,
specs: [
{
title: specTitle,
ok: testResults[testResults.length - 1].status === "passed",
tags: [],
tests: [
{
timeout: 60000,
annotations: [],
expectedStatus: "passed",
projectId: "chrome",
projectName: "chrome",
results: testResults.map((r) => ({
workerIndex: 0,
parallelIndex: 0,
status: r.status,
duration: 1000,
errors:
r.status === "failed"
? [{ message: "error" }]
: [],
stdout: [],
stderr: [],
retry: r.retry,
startTime: new Date().toISOString(),
annotations: [],
})),
location: {
file,
line: 10,
column: 5,
},
},
],
},
],
});
it("should merge and calculate correctly when failed test passes on retest", () => {
// Original: 2 passed, 1 failed (channels.spec.ts)
const original: PlaywrightResults = {
config: {},
suites: [
createSuiteWithSpec("login.spec.ts", "should login", [
{ status: "passed", retry: 0 },
]),
createSuiteWithSpec(
"messaging.spec.ts",
"should send message",
[{ status: "passed", retry: 0 }],
),
createSuiteWithSpec(
"channels.spec.ts",
"should create channel",
[
{ status: "failed", retry: 0 },
{ status: "failed", retry: 1 },
{ status: "failed", retry: 2 },
],
),
],
stats: {
startTime: new Date().toISOString(),
duration: 18000,
expected: 2,
unexpected: 1,
skipped: 0,
flaky: 0,
},
};
// Retest: channels.spec.ts now passes
const retest: PlaywrightResults = {
config: {},
suites: [
createSuiteWithSpec(
"channels.spec.ts",
"should create channel",
[{ status: "passed", retry: 0 }],
),
],
stats: {
startTime: new Date().toISOString(),
duration: 3000,
expected: 1,
unexpected: 0,
skipped: 0,
flaky: 0,
},
};
// Step 1: Verify original has failure
const originalCalc = calculateResults(original);
expect(originalCalc.passed).toBe(2);
expect(originalCalc.failed).toBe(1);
expect(originalCalc.passRate).toBe("66.67");
// Step 2: Merge results
const mergeResult = mergeResults(original, retest);
// Step 3: Verify merge structure
expect(mergeResult.totalSuites).toBe(3);
expect(mergeResult.retestFiles).toEqual(["channels.spec.ts"]);
expect(mergeResult.merged.suites.map((s) => s.file)).toEqual([
"login.spec.ts",
"messaging.spec.ts",
"channels.spec.ts",
]);
// Step 4: Calculate final results
const finalCalc = calculateResults(mergeResult.merged);
// Step 5: Verify all outputs
expect(finalCalc.passed).toBe(3);
expect(finalCalc.failed).toBe(0);
expect(finalCalc.flaky).toBe(0);
expect(finalCalc.skipped).toBe(0);
expect(finalCalc.total).toBe(3);
expect(finalCalc.passing).toBe(3);
expect(finalCalc.passRate).toBe("100.00");
expect(finalCalc.color).toBe("#43A047"); // green
expect(finalCalc.totalSpecs).toBe(3);
expect(finalCalc.failedSpecs).toBe("");
expect(finalCalc.failedSpecsCount).toBe(0);
expect(finalCalc.commitStatusMessage).toBe("3 passed in 3 spec files");
expect(finalCalc.failedTests).toBe("");
});
it("should handle case where retest still fails", () => {
// Original: 2 passed, 1 failed
const original: PlaywrightResults = {
config: {},
suites: [
createSuiteWithSpec("login.spec.ts", "should login", [
{ status: "passed", retry: 0 },
]),
createSuiteWithSpec(
"channels.spec.ts",
"should create channel",
[{ status: "failed", retry: 0 }],
),
],
stats: {
startTime: new Date().toISOString(),
duration: 10000,
expected: 1,
unexpected: 1,
skipped: 0,
flaky: 0,
},
};
// Retest: channels.spec.ts still fails
const retest: PlaywrightResults = {
config: {},
suites: [
createSuiteWithSpec(
"channels.spec.ts",
"should create channel",
[
{ status: "failed", retry: 0 },
{ status: "failed", retry: 1 },
],
),
],
stats: {
startTime: new Date().toISOString(),
duration: 5000,
expected: 0,
unexpected: 1,
skipped: 0,
flaky: 0,
},
};
const mergeResult = mergeResults(original, retest);
const finalCalc = calculateResults(mergeResult.merged);
expect(finalCalc.passed).toBe(1);
expect(finalCalc.failed).toBe(1);
expect(finalCalc.passRate).toBe("50.00");
expect(finalCalc.color).toBe("#F44336"); // red
expect(finalCalc.failedSpecs).toBe("channels.spec.ts");
expect(finalCalc.failedSpecsCount).toBe(1);
});
});

View file

@ -0,0 +1,289 @@
import type {
PlaywrightResults,
Suite,
Test,
Stats,
MergeResult,
CalculationResult,
FailedTest,
} from "./types";
interface TestInfo {
title: string;
file: string;
finalStatus: string;
hadFailure: boolean;
}
/**
* Extract all tests from suites recursively with their info
*/
function getAllTestsWithInfo(suites: Suite[]): TestInfo[] {
const tests: TestInfo[] = [];
function extractFromSuite(suite: Suite) {
for (const spec of suite.specs || []) {
for (const test of spec.tests || []) {
if (!test.results || test.results.length === 0) {
continue;
}
const finalResult = test.results[test.results.length - 1];
const hadFailure = test.results.some(
(r) => r.status === "failed" || r.status === "timedOut",
);
tests.push({
title: spec.title || test.projectName,
file: test.location?.file || suite.file,
finalStatus: finalResult.status,
hadFailure,
});
}
}
for (const nestedSuite of suite.suites || []) {
extractFromSuite(nestedSuite);
}
}
for (const suite of suites) {
extractFromSuite(suite);
}
return tests;
}
/**
* Extract all tests from suites recursively
*/
function getAllTests(suites: Suite[]): Test[] {
const tests: Test[] = [];
function extractFromSuite(suite: Suite) {
for (const spec of suite.specs || []) {
tests.push(...spec.tests);
}
for (const nestedSuite of suite.suites || []) {
extractFromSuite(nestedSuite);
}
}
for (const suite of suites) {
extractFromSuite(suite);
}
return tests;
}
/**
* Compute stats from suites
*/
export function computeStats(
suites: Suite[],
originalStats?: Stats,
retestStats?: Stats,
): Stats {
const tests = getAllTests(suites);
let expected = 0;
let unexpected = 0;
let skipped = 0;
let flaky = 0;
for (const test of tests) {
if (!test.results || test.results.length === 0) {
continue;
}
const finalResult = test.results[test.results.length - 1];
const finalStatus = finalResult.status;
// Check if any result was a failure
const hadFailure = test.results.some(
(r) => r.status === "failed" || r.status === "timedOut",
);
if (finalStatus === "skipped") {
skipped++;
} else if (finalStatus === "failed" || finalStatus === "timedOut") {
unexpected++;
} else if (finalStatus === "passed") {
if (hadFailure) {
flaky++;
} else {
expected++;
}
}
}
// Compute duration as sum of both runs
const duration =
(originalStats?.duration || 0) + (retestStats?.duration || 0);
return {
startTime: originalStats?.startTime || new Date().toISOString(),
duration,
expected,
unexpected,
skipped,
flaky,
};
}
/**
* Get color based on pass rate
*/
function getColor(passRate: number): string {
if (passRate === 100) {
return "#43A047"; // green
} else if (passRate >= 99) {
return "#FFEB3B"; // yellow
} else if (passRate >= 98) {
return "#FF9800"; // orange
} else {
return "#F44336"; // red
}
}
/**
* Calculate all outputs from results
*/
export function calculateResults(
results: PlaywrightResults,
): CalculationResult {
const stats = results.stats || {
expected: 0,
unexpected: 0,
skipped: 0,
flaky: 0,
startTime: new Date().toISOString(),
duration: 0,
};
const passed = stats.expected;
const failed = stats.unexpected;
const flaky = stats.flaky;
const skipped = stats.skipped;
// Count unique spec files
const specFiles = new Set<string>();
for (const suite of results.suites) {
specFiles.add(suite.file);
}
const totalSpecs = specFiles.size;
// Get all tests with info for failed tests extraction
const testsInfo = getAllTestsWithInfo(results.suites);
// Extract failed specs
const failedSpecsSet = new Set<string>();
const failedTestsList: FailedTest[] = [];
for (const test of testsInfo) {
if (test.finalStatus === "failed" || test.finalStatus === "timedOut") {
failedSpecsSet.add(test.file);
failedTestsList.push({
title: test.title,
file: test.file,
});
}
}
const failedSpecs = Array.from(failedSpecsSet).join(",");
const failedSpecsCount = failedSpecsSet.size;
// Build failed tests markdown table (limit to 10)
let failedTests = "";
const uniqueFailedTests = failedTestsList.filter(
(test, index, self) =>
index ===
self.findIndex(
(t) => t.title === test.title && t.file === test.file,
),
);
if (uniqueFailedTests.length > 0) {
const limitedTests = uniqueFailedTests.slice(0, 10);
failedTests = limitedTests
.map((t) => {
const escapedTitle = t.title
.replace(/`/g, "\\`")
.replace(/\|/g, "\\|");
return `| ${escapedTitle} | ${t.file} |`;
})
.join("\n");
if (uniqueFailedTests.length > 10) {
const remaining = uniqueFailedTests.length - 10;
failedTests += `\n| _...and ${remaining} more failed tests_ | |`;
}
} else if (failed > 0) {
failedTests = "| Unable to parse failed tests | - |";
}
// Calculate totals and pass rate
const passing = passed + flaky;
const total = passing + failed;
const passRate = total > 0 ? ((passing * 100) / total).toFixed(2) : "0.00";
const color = getColor(parseFloat(passRate));
// Build commit status message
const specSuffix = totalSpecs > 0 ? ` in ${totalSpecs} spec files` : "";
const commitStatusMessage =
failed === 0
? `${passed} passed${specSuffix}`
: `${failed} failed, ${passed} passed${specSuffix}`;
return {
passed,
failed,
flaky,
skipped,
totalSpecs,
commitStatusMessage,
failedSpecs,
failedSpecsCount,
failedTests,
total,
passRate,
passing,
color,
};
}
/**
* Merge original and retest results at suite level
* - Keep original suites that are NOT in retest
* - Add all retest suites (replacing matching originals)
*/
export function mergeResults(
original: PlaywrightResults,
retest: PlaywrightResults,
): MergeResult {
// Get list of retested spec files
const retestFiles = retest.suites.map((s) => s.file);
// Filter original suites - keep only those NOT in retest
const keptOriginalSuites = original.suites.filter(
(suite) => !retestFiles.includes(suite.file),
);
// Merge: kept original suites + all retest suites
const mergedSuites = [...keptOriginalSuites, ...retest.suites];
// Compute stats from merged suites
const stats = computeStats(mergedSuites, original.stats, retest.stats);
const merged: PlaywrightResults = {
config: original.config,
suites: mergedSuites,
stats,
};
return {
merged,
stats,
totalSuites: mergedSuites.length,
retestFiles,
};
}

View file

@ -0,0 +1,88 @@
export interface PlaywrightResults {
config: Record<string, unknown>;
suites: Suite[];
stats?: Stats;
}
export interface Suite {
title: string;
file: string;
column: number;
line: number;
specs: Spec[];
suites?: Suite[];
}
export interface Spec {
title: string;
ok: boolean;
tags: string[];
tests: Test[];
}
export interface Test {
timeout: number;
annotations: unknown[];
expectedStatus: string;
projectId: string;
projectName: string;
results: TestResult[];
location?: TestLocation;
}
export interface TestResult {
workerIndex: number;
parallelIndex: number;
status: string;
duration: number;
errors: unknown[];
stdout: unknown[];
stderr: unknown[];
retry: number;
startTime: string;
annotations: unknown[];
attachments?: unknown[];
}
export interface TestLocation {
file: string;
line: number;
column: number;
}
export interface Stats {
startTime: string;
duration: number;
expected: number;
unexpected: number;
skipped: number;
flaky: number;
}
export interface MergeResult {
merged: PlaywrightResults;
stats: Stats;
totalSuites: number;
retestFiles: string[];
}
export interface CalculationResult {
passed: number;
failed: number;
flaky: number;
skipped: number;
totalSpecs: number;
commitStatusMessage: string;
failedSpecs: string;
failedSpecsCount: number;
failedTests: string;
total: number;
passRate: string;
passing: number;
color: string;
}
export interface FailedTest {
title: string;
file: string;
}

View file

@ -0,0 +1,17 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "CommonJS",
"moduleResolution": "Node",
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true,
"outDir": "dist",
"rootDir": "./src",
"declaration": true,
"isolatedModules": true
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist", "**/*.test.ts"]
}

View file

@ -0,0 +1 @@
{"root":["./src/index.ts","./src/main.ts","./src/merge.ts","./src/types.ts"],"version":"5.9.3"}

View file

@ -0,0 +1,12 @@
import { defineConfig } from "tsup";
export default defineConfig({
entry: ["src/index.ts"],
format: ["cjs"],
outDir: "dist",
clean: true,
noExternal: [/.*/], // Bundle all dependencies
minify: false,
sourcemap: false,
target: "node24",
});

View file

@ -0,0 +1,91 @@
---
name: Check E2E Test Only
description: Check if PR contains only E2E test changes and determine the appropriate docker image tag
inputs:
base_sha:
description: Base commit SHA (PR base)
required: false
head_sha:
description: Head commit SHA (PR head)
required: false
pr_number:
description: PR number (used to fetch SHAs via API if base_sha/head_sha not provided)
required: false
outputs:
e2e_test_only:
description: Whether the PR contains only E2E test changes (true/false)
value: ${{ steps.check.outputs.e2e_test_only }}
image_tag:
description: Docker image tag to use (master for E2E-only, short SHA for mixed)
value: ${{ steps.check.outputs.image_tag }}
runs:
using: composite
steps:
- name: ci/check-e2e-test-only
id: check
shell: bash
env:
GH_TOKEN: ${{ github.token }}
INPUT_BASE_SHA: ${{ inputs.base_sha }}
INPUT_HEAD_SHA: ${{ inputs.head_sha }}
INPUT_PR_NUMBER: ${{ inputs.pr_number }}
run: |
# Resolve SHAs from PR number if not provided
if [ -z "$INPUT_BASE_SHA" ] || [ -z "$INPUT_HEAD_SHA" ]; then
if [ -z "$INPUT_PR_NUMBER" ]; then
echo "::error::Either base_sha/head_sha or pr_number must be provided"
exit 1
fi
echo "Resolving SHAs from PR #${INPUT_PR_NUMBER}"
PR_DATA=$(gh api "repos/${{ github.repository }}/pulls/${INPUT_PR_NUMBER}")
INPUT_BASE_SHA=$(echo "$PR_DATA" | jq -r '.base.sha')
INPUT_HEAD_SHA=$(echo "$PR_DATA" | jq -r '.head.sha')
if [ -z "$INPUT_BASE_SHA" ] || [ "$INPUT_BASE_SHA" = "null" ] || \
[ -z "$INPUT_HEAD_SHA" ] || [ "$INPUT_HEAD_SHA" = "null" ]; then
echo "::error::Could not resolve SHAs for PR #${INPUT_PR_NUMBER}"
exit 1
fi
fi
SHORT_SHA="${INPUT_HEAD_SHA::7}"
# Get changed files - try git first, fall back to API
CHANGED_FILES=$(git diff --name-only "$INPUT_BASE_SHA"..."$INPUT_HEAD_SHA" 2>/dev/null || \
gh api "repos/${{ github.repository }}/pulls/${INPUT_PR_NUMBER}/files" --jq '.[].filename' 2>/dev/null || echo "")
if [ -z "$CHANGED_FILES" ]; then
echo "::warning::Could not determine changed files, assuming not E2E-only"
echo "e2e_test_only=false" >> $GITHUB_OUTPUT
echo "image_tag=${SHORT_SHA}" >> $GITHUB_OUTPUT
exit 0
fi
echo "Changed files:"
echo "$CHANGED_FILES"
# Check if all files are E2E-related
E2E_TEST_ONLY="true"
while IFS= read -r file; do
[ -z "$file" ] && continue
if [[ ! "$file" =~ ^e2e-tests/ ]] && \
[[ ! "$file" =~ ^\.github/workflows/e2e- ]]; then
echo "Non-E2E file found: $file"
E2E_TEST_ONLY="false"
break
fi
done <<< "$CHANGED_FILES"
echo "E2E test only: ${E2E_TEST_ONLY}"
# Set outputs
echo "e2e_test_only=${E2E_TEST_ONLY}" >> $GITHUB_OUTPUT
if [ "$E2E_TEST_ONLY" = "true" ]; then
echo "image_tag=master" >> $GITHUB_OUTPUT
else
echo "image_tag=${SHORT_SHA}" >> $GITHUB_OUTPUT
fi

320
.github/e2e-test-workflow-for-pr.md vendored Normal file
View file

@ -0,0 +1,320 @@
# E2E Test Workflow For PR
This document describes the E2E test workflow for Pull Requests in Mattermost.
## Overview
This is an **automated workflow** that runs smoke-then-full E2E tests automatically for every PR commit. Smoke tests run first as a gate—if they fail, full tests are skipped to save CI resources and provide fast feedback.
Both Cypress and Playwright test suites run **in parallel** with independent status checks.
**Note**: This workflow is designed for **Pull Requests only**. It will fail if the commit SHA is not associated with an open PR.
### On-Demand Testing
For on-demand E2E testing, the existing triggers still work:
- **Comment triggers**: `/e2e-test`, `/e2e-test fips`, or with `MM_ENV` parameters
- **Label trigger**: `E2E/Run`
These manual triggers are separate from this automated workflow and can be used for custom test configurations or re-runs.
## Workflow Files
```
.github/workflows/
├── e2e-tests-ci.yml # Main orchestrator (resolves PR, triggers both)
├── e2e-tests-cypress.yml # Cypress: smoke → full
└── e2e-tests-playwright.yml # Playwright: smoke → full
```
## Architecture Diagram
```
┌─────────────────────────────────────────────────────────────────────────────────┐
│ MAIN ORCHESTRATOR: e2e-tests-ci.yml │
└─────────────────────────────────────────────────────────────────────────────────┘
┌─────────────────────┐
│ workflow_dispatch │
│ (commit_sha) │
└──────────┬──────────┘
┌──────────▼──────────┐
│ resolve-pr │
│ (GitHub API call) │
│ │
│ Fails if no PR │
│ found for commit │
└──────────┬──────────┘
┌──────────────────┴──────────────────┐
│ (parallel) │
▼ ▼
┌─────────────────────────────────┐ ┌─────────────────────────────────┐
│ e2e-tests-cypress.yml │ │ e2e-tests-playwright.yml │
│ (reusable workflow) │ │ (reusable workflow) │
│ │ │ │
│ Inputs: │ │ Inputs: │
│ • commit_sha │ │ • commit_sha │
│ • workers_number: "20" │ │ • workers_number: "1" (default)│
│ • server: "onprem" │ │ • server: "onprem" │
│ • enable_reporting: true │ │ • enable_reporting: true │
│ • report_type: "PR" │ │ • report_type: "PR" │
│ • pr_number │ │ • pr_number (required for full)│
└─────────────────────────────────┘ └─────────────────────────────────┘
```
## Per-Framework Workflow Flow
Each framework (Cypress/Playwright) follows the same pattern:
```
┌──────────────────────────────────────────────────────────────────┐
│ PREFLIGHT CHECKS │
└──────────────────────────────────────────────────────────────────┘
┌─────────────────────────┼─────────────────────────┐
│ │ │
▼ ▼ ▼
┌────────────┐ ┌─────────────┐ ┌─────────────┐
│ lint/tsc │ │ shell-check │ │ update- │
│ check │ │ │ │ status │
└─────┬──────┘ └──────┬──────┘ │ (pending) │
│ │ └──────┬──────┘
└──────────────────────┴────────────────────────┘
┌──────────────────────────────────────────────────────────────────┐
│ GENERATE BUILD VARIABLES │
│ (branch, build_id, server_image) │
│ │
│ Server image generated from commit SHA: │
│ mattermostdevelopment/mattermost-enterprise-edition:<sha7>
└─────────────────────────────┬────────────────────────────────────┘
┌──────────────────────────────────────────────────────────────────┐
│ SMOKE TESTS │
│ ┌────────────────────────────────────────────────────────────┐ │
│ │ generate-test-cycle (smoke) [Cypress only] │ │
│ └─────────────────────────┬──────────────────────────────────┘ │
│ │ │
│ ▼ │
│ ┌────────────────────────────────────────────────────────────┐ │
│ │ smoke-test │ │
│ │ • Cypress: TEST_FILTER: --stage=@prod --group=@smoke │ │
│ │ • Playwright: TEST_FILTER: --grep @smoke │ │
│ │ • Fail fast if any smoke test fails │ │
│ └─────────────────────────┬──────────────────────────────────┘ │
│ │ │
│ ▼ │
│ ┌────────────────────────────────────────────────────────────┐ │
│ │ smoke-report │ │
│ │ • Assert 0 failures │ │
│ │ • Upload results to S3 (Playwright) │ │
│ │ • Update commit status │ │
│ └────────────────────────────────────────────────────────────┘ │
└─────────────────────────────┬────────────────────────────────────┘
│ (only if smoke passes)
│ (Playwright: also requires pr_number)
┌──────────────────────────────────────────────────────────────────┐
│ FULL TESTS │
│ ┌────────────────────────────────────────────────────────────┐ │
│ │ generate-test-cycle (full) [Cypress only] │ │
│ └─────────────────────────┬──────────────────────────────────┘ │
│ │ │
│ ▼ │
│ ┌────────────────────────────────────────────────────────────┐ │
│ │ full-test (matrix: workers) │ │
│ │ • Cypress: TEST_FILTER: --stage='@prod' │ │
│ │ --exclude-group='@smoke' │ │
│ │ • Playwright: TEST_FILTER: --grep-invert "@smoke|@visual" │ │
│ │ • Multiple workers for parallelism │ │
│ └─────────────────────────┬──────────────────────────────────┘ │
│ │ │
│ ▼ │
│ ┌────────────────────────────────────────────────────────────┐ │
│ │ full-report │ │
│ │ • Aggregate results from all workers │ │
│ │ • Upload results to S3 (Playwright) │ │
│ │ • Publish report (if reporting enabled) │ │
│ │ • Update final commit status │ │
│ └────────────────────────────────────────────────────────────┘ │
└──────────────────────────────────────────────────────────────────┘
```
## Commit Status Checks
Each workflow phase creates its own GitHub commit status check:
```
GitHub Commit Status Checks:
═══════════════════════════
┌─────────────────────────────────────────────────────────────────────────────┐
│ E2E Tests/cypress-smoke ●────────●────────● │
│ pending running ✓ passed / ✗ failed │
│ │
│ E2E Tests/cypress-full ○ ○ ●────────●────────● │
│ (skip) (skip) pending running ✓/✗ │
│ │ │
│ └── Only runs if smoke passes │
└─────────────────────────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────────────────────┐
│ E2E Tests/playwright-smoke ●────────●────────● │
│ pending running ✓ passed / ✗ failed │
│ │
│ E2E Tests/playwright-full ○ ○ ●────────●────────● │
│ (skip) (skip) pending running ✓/✗ │
│ │ │
│ └── Only runs if smoke passes │
│ AND pr_number is provided │
└─────────────────────────────────────────────────────────────────────────────┘
```
## Timeline
```
Timeline:
─────────────────────────────────────────────────────────────────────────────►
T0 T1 T2 T3 T4
│ │ │ │ │
│ Start │ Preflight │ Smoke Tests │ Full Tests │ Done
│ resolve │ Checks │ (both parallel) │ (both parallel) │
│ PR │ │ │ (if smoke pass) │
```
## Test Filtering
| Framework | Smoke Tests | Full Tests |
|-----------|-------------|------------|
| **Cypress** | `--stage=@prod --group=@smoke` | See below |
| **Playwright** | `--grep @smoke` | `--grep-invert "@smoke\|@visual"` |
### Cypress Full Test Filter
```
--stage="@prod"
--excludeGroup="@smoke,@te_only,@cloud_only,@high_availability"
--sortFirst="@compliance_export,@elasticsearch,@ldap_group,@ldap"
--sortLast="@saml,@keycloak,@plugin,@plugins_uninstall,@mfa,@license_removal"
```
- **excludeGroup**: Skips smoke tests (already run), TE-only, cloud-only, and HA tests
- **sortFirst**: Runs long-running test groups early for better parallelization
- **sortLast**: Runs tests that may affect system state at the end
## Tagging Smoke Tests
### Cypress
Add `@smoke` to the Group comment at the top of spec files:
```javascript
// Stage: @prod
// Group: @channels @messaging @smoke
```
### Playwright
Add `@smoke` to the test tag option:
```typescript
test('critical login flow', {tag: ['@smoke', '@login']}, async ({pw}) => {
// ...
});
```
## Worker Configuration
| Framework | Smoke Workers | Full Workers |
|-----------|---------------|--------------|
| **Cypress** | 1 | 20 |
| **Playwright** | 1 | 1 (uses internal parallelism via `PW_WORKERS`) |
## Docker Services
Different test phases enable different Docker services based on test requirements:
| Test Phase | Docker Services |
|------------|-----------------|
| Smoke Tests | `postgres inbucket` |
| Full Tests | `postgres inbucket minio openldap elasticsearch keycloak` |
Full tests enable additional services to support tests requiring LDAP, Elasticsearch, S3-compatible storage (Minio), and SAML/OAuth (Keycloak).
## Failure Behavior
1. **Smoke test fails**: Full tests are skipped, only smoke commit status shows failure (no full test status created)
2. **Full test fails**: Full commit status shows failure with details
3. **Both pass**: Both smoke and full commit statuses show success
4. **No PR found**: Workflow fails immediately with error message
**Note**: Full test status updates use explicit job result checks (`needs.full-report.result == 'success'` / `'failure'`) rather than global `success()` / `failure()` functions. This ensures full test status is only updated when full tests actually run, not when smoke tests fail upstream.
## Manual Trigger
The workflow can be triggered manually via `workflow_dispatch` for PR commits:
```bash
# Run E2E tests for a PR commit
gh workflow run e2e-tests-ci.yml -f commit_sha=<PR_COMMIT_SHA>
```
**Note**: The commit SHA must be associated with an open PR. The workflow will fail otherwise.
## Automated Trigger (Argo Events)
The workflow is automatically triggered by Argo Events when the `Enterprise CI/docker-image` status check succeeds on a commit.
### Fork PR Handling
For PRs from forked repositories:
- `body.branches` may be empty (commit doesn't exist in base repo branches)
- Falls back to `master` branch for workflow files (trusted code)
- The `commit_sha` still points to the fork's commit for testing
- PR number is resolved via GitHub API (works for fork PRs)
### Flow
```
Enterprise CI/docker-image succeeds
Argo Events Sensor
workflow_dispatch
(ref, commit_sha)
e2e-tests-ci.yml
resolve-pr (GitHub API)
Cypress + Playwright (parallel)
```
## S3 Report Storage
Playwright test results are uploaded to S3:
| Test Phase | S3 Path |
|------------|---------|
| Smoke (with PR) | `server-pr-{PR_NUMBER}/e2e-reports/playwright-smoke/{RUN_ID}/` |
| Smoke (no PR) | `server-commit-{SHA7}/e2e-reports/playwright-smoke/{RUN_ID}/` |
| Full | `server-pr-{PR_NUMBER}/e2e-reports/playwright-full/{RUN_ID}/` |
**Note**: Full tests require a PR number, so there's no commit-based fallback for full test reports.
## Related Files
- `e2e-tests/cypress/` - Cypress test suite
- `e2e-tests/playwright/` - Playwright test suite
- `e2e-tests/.ci/` - CI configuration and environment files
- `e2e-tests/Makefile` - Main Makefile with targets for running tests, generating cycles, and reporting

View file

@ -263,7 +263,6 @@ jobs:
status_check_context: "${{ needs.generate-test-variables.outputs.status_check_context }}"
workers_number: "${{ needs.generate-test-variables.outputs.workers_number }}"
testcase_failure_fatal: "${{ needs.generate-test-variables.outputs.TESTCASE_FAILURE_FATAL == 'true' }}"
run_preflight_checks: false
enable_reporting: true
SERVER: "${{ needs.generate-test-variables.outputs.SERVER }}"
SERVER_IMAGE: "${{ needs.generate-test-variables.outputs.SERVER_IMAGE }}"
@ -300,7 +299,6 @@ jobs:
status_check_context: "${{ needs.generate-test-variables.outputs.status_check_context }}-playwright"
workers_number: "1"
testcase_failure_fatal: "${{ needs.generate-test-variables.outputs.TESTCASE_FAILURE_FATAL == 'true' }}"
run_preflight_checks: false
enable_reporting: true
SERVER: "${{ needs.generate-test-variables.outputs.SERVER }}"
SERVER_IMAGE: "${{ needs.generate-test-variables.outputs.SERVER_IMAGE }}"

69
.github/workflows/e2e-tests-check.yml vendored Normal file
View file

@ -0,0 +1,69 @@
---
name: E2E Tests Check
on:
pull_request:
paths:
- "e2e-tests/**"
- "webapp/platform/client/**"
- "webapp/platform/types/**"
- ".github/workflows/e2e-*.yml"
jobs:
check:
runs-on: ubuntu-24.04
steps:
- name: ci/checkout-repo
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- name: ci/setup-node
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
with:
node-version-file: ".nvmrc"
cache: npm
cache-dependency-path: |
e2e-tests/cypress/package-lock.json
e2e-tests/playwright/package-lock.json
# Cypress check
- name: ci/cypress/npm-install
working-directory: e2e-tests/cypress
run: npm ci
- name: ci/cypress/npm-check
working-directory: e2e-tests/cypress
run: npm run check
# Playwright check
- name: ci/get-webapp-node-modules
working-directory: webapp
run: make node_modules
- name: ci/playwright/npm-install
working-directory: e2e-tests/playwright
run: npm ci
- name: ci/playwright/npm-check
working-directory: e2e-tests/playwright
run: npm run check
# Shell check
- name: ci/shell-check
working-directory: e2e-tests
run: make check-shell
# E2E-only check and trigger
- name: ci/check-e2e-test-only
id: check
uses: ./.github/actions/check-e2e-test-only
with:
base_sha: ${{ github.event.pull_request.base.sha }}
head_sha: ${{ github.event.pull_request.head.sha }}
- name: ci/trigger-e2e-with-master-image
if: steps.check.outputs.e2e_test_only == 'true'
env:
GH_TOKEN: ${{ github.token }}
PR_NUMBER: ${{ github.event.pull_request.number }}
IMAGE_TAG: ${{ steps.check.outputs.image_tag }}
run: |
echo "Triggering E2E tests for PR #${PR_NUMBER} with mattermostdevelopment/mattermost-enterprise-edition:${IMAGE_TAG}"
gh workflow run e2e-tests-ci.yml --field pr_number="${PR_NUMBER}"

View file

@ -20,12 +20,6 @@ on:
type: boolean
required: false
default: true
# NB: the following toggles will skip individual steps, rather than the whole jobs,
# to let the dependent jobs run even if these are false
run_preflight_checks:
type: boolean
required: false
default: true
enable_reporting:
type: boolean
required: false
@ -107,7 +101,7 @@ jobs:
update-initial-status:
runs-on: ubuntu-24.04
steps:
- uses: mattermost/actions/delivery/update-commit-status@main
- uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
env:
GITHUB_TOKEN: ${{ github.token }}
with:
@ -117,92 +111,6 @@ jobs:
description: E2E tests for mattermost server app
status: pending
cypress-check:
runs-on: ubuntu-24.04
needs:
- update-initial-status
defaults:
run:
working-directory: e2e-tests/cypress
steps:
- name: ci/checkout-repo
if: "${{ inputs.run_preflight_checks }}"
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ inputs.commit_sha }}
fetch-depth: 0
- name: ci/setup-node
if: "${{ inputs.run_preflight_checks }}"
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
id: setup_node
with:
node-version-file: ".nvmrc"
cache: npm
cache-dependency-path: "e2e-tests/cypress/package-lock.json"
- name: ci/cypress/npm-install
if: "${{ inputs.run_preflight_checks }}"
run: |
npm ci
- name: ci/cypress/npm-check
if: "${{ inputs.run_preflight_checks }}"
run: |
npm run check
playwright-check:
runs-on: ubuntu-24.04
needs:
- update-initial-status
defaults:
run:
working-directory: e2e-tests/playwright
steps:
- name: ci/checkout-repo
if: "${{ inputs.run_preflight_checks }}"
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ inputs.commit_sha }}
fetch-depth: 0
- name: ci/setup-node
if: "${{ inputs.run_preflight_checks }}"
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
id: setup_node
with:
node-version-file: ".nvmrc"
cache: npm
cache-dependency-path: "e2e-tests/playwright/package-lock.json"
- name: ci/get-webapp-node-modules
if: "${{ inputs.run_preflight_checks }}"
working-directory: webapp
# requires build of client and types
run: |
make node_modules
- name: ci/playwright/npm-install
if: "${{ inputs.run_preflight_checks }}"
run: |
npm ci
- name: ci/playwright/npm-check
if: "${{ inputs.run_preflight_checks }}"
run: |
npm run check
shell-check:
runs-on: ubuntu-24.04
needs:
- update-initial-status
defaults:
run:
working-directory: e2e-tests
steps:
- name: ci/checkout-repo
if: "${{ inputs.run_preflight_checks }}"
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ inputs.commit_sha }}
fetch-depth: 0
- name: ci/shell-check
if: "${{ inputs.run_preflight_checks }}"
run: make check-shell
generate-build-variables:
runs-on: ubuntu-24.04
needs:
@ -290,9 +198,6 @@ jobs:
runs-on: "${{ matrix.os }}"
timeout-minutes: 120
needs:
- cypress-check
- playwright-check
- shell-check
- generate-build-variables
- generate-test-cycle
defaults:
@ -364,7 +269,9 @@ jobs:
echo "RollingRelease: smoketest completed. Starting full E2E tests."
fi
make
make cloud-teardown
- name: ci/cloud-teardown
if: always()
run: make cloud-teardown
- name: ci/e2e-test-store-results
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
if: always()
@ -427,12 +334,14 @@ jobs:
SERVER_IMAGE: "${{ inputs.SERVER_IMAGE }}"
AUTOMATION_DASHBOARD_URL: "${{ secrets.AUTOMATION_DASHBOARD_URL }}"
WEBHOOK_URL: "${{ secrets.REPORT_WEBHOOK_URL }}"
PR_NUMBER: "${{ inputs.PR_NUMBER }}"
BRANCH: "${{ inputs.BRANCH }}"
BUILD_ID: "${{ inputs.BUILD_ID }}"
MM_ENV: "${{ inputs.MM_ENV }}"
TM4J_API_KEY: "${{ secrets.REPORT_TM4J_API_KEY }}"
TEST_CYCLE_LINK_PREFIX: "${{ secrets.REPORT_TM4J_TEST_CYCLE_LINK_PREFIX }}"
run: |
echo "DEBUG: TYPE=${TYPE}, PR_NUMBER=${PR_NUMBER:-<not set>}"
make report
# The results dir may have been modified as part of the reporting: re-upload
- name: ci/upload-report-global
@ -469,12 +378,6 @@ jobs:
echo "📤 Uploading to s3://${AWS_S3_BUCKET}/${S3_PATH}/"
if [[ -d "$LOCAL_LOGS_PATH" ]]; then
aws s3 sync "$LOCAL_LOGS_PATH" "s3://${AWS_S3_BUCKET}/${S3_PATH}/logs/" \
--acl public-read \
--cache-control "no-cache"
fi
if [[ -d "$LOCAL_RESULTS_PATH" ]]; then
aws s3 sync "$LOCAL_RESULTS_PATH" "s3://${AWS_S3_BUCKET}/${S3_PATH}/results/" \
--acl public-read \
@ -534,7 +437,7 @@ jobs:
- test
- report
steps:
- uses: mattermost/actions/delivery/update-commit-status@main
- uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
env:
GITHUB_TOKEN: ${{ github.token }}
with:
@ -557,7 +460,7 @@ jobs:
- test
- report
steps:
- uses: mattermost/actions/delivery/update-commit-status@main
- uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
env:
GITHUB_TOKEN: ${{ github.token }}
with:

View file

@ -1,49 +1,207 @@
---
name: E2E Smoketests
name: E2E Tests
on:
# For PRs, this workflow gets triggered from the Argo Events platform.
# Check the following repo for details: https://github.com/mattermost/delivery-platform
# Argo Events Trigger (automated):
# - Triggered by: Enterprise CI/docker-image status check (success)
# - Payload: { ref: "<branch>", inputs: { commit_sha: "<sha>" } }
# - Uses commit-specific docker image
# - Checks for relevant file changes before running tests
#
# Manual Trigger:
# - Enter PR number only - commit SHA is resolved automatically from PR head
# - Uses commit-specific docker image
# - E2E tests always run (no file change check)
#
workflow_dispatch:
inputs:
commit_sha:
pr_number:
description: "PR number to test (for manual triggers)"
type: string
required: true
required: false
commit_sha:
description: "Commit SHA to test (for Argo Events)"
type: string
required: false
jobs:
generate-test-variables:
resolve-pr:
runs-on: ubuntu-24.04
outputs:
BRANCH: "${{ steps.generate.outputs.BRANCH }}"
BUILD_ID: "${{ steps.generate.outputs.BUILD_ID }}"
SERVER_IMAGE: "${{ steps.generate.outputs.SERVER_IMAGE }}"
PR_NUMBER: "${{ steps.resolve.outputs.PR_NUMBER }}"
COMMIT_SHA: "${{ steps.resolve.outputs.COMMIT_SHA }}"
SERVER_IMAGE_TAG: "${{ steps.e2e-check.outputs.image_tag }}"
steps:
- name: ci/smoke/generate-test-variables
id: generate
run: |
### Populate support variables
COMMIT_SHA=${{ inputs.commit_sha }}
SERVER_IMAGE_TAG="${COMMIT_SHA::7}"
- name: ci/checkout-repo
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
# BUILD_ID format: $pipelineID-$imageTag-$testType-$serverType-$serverEdition
# Reference on BUILD_ID parsing: https://github.com/saturninoabril/automation-dashboard/blob/175891781bf1072c162c58c6ec0abfc5bcb3520e/lib/common_utils.ts#L3-L23
BUILD_ID="${{ github.run_id }}_${{ github.run_attempt }}-${SERVER_IMAGE_TAG}-smoketest-onprem-ent"
echo "BRANCH=server-smoketest-${COMMIT_SHA::7}" >> $GITHUB_OUTPUT
echo "BUILD_ID=${BUILD_ID}" >> $GITHUB_OUTPUT
echo "SERVER_IMAGE=mattermostdevelopment/mattermost-enterprise-edition:${SERVER_IMAGE_TAG}" >> $GITHUB_OUTPUT
e2e-smoketest:
- name: ci/resolve-pr-and-commit
id: resolve
env:
GH_TOKEN: ${{ github.token }}
INPUT_PR_NUMBER: ${{ inputs.pr_number }}
INPUT_COMMIT_SHA: ${{ inputs.commit_sha }}
run: |
# Validate inputs
if [ -n "$INPUT_PR_NUMBER" ] && ! [[ "$INPUT_PR_NUMBER" =~ ^[0-9]+$ ]]; then
echo "::error::Invalid PR number format. Must be numeric."
exit 1
fi
if [ -n "$INPUT_COMMIT_SHA" ] && ! [[ "$INPUT_COMMIT_SHA" =~ ^[a-f0-9]{7,40}$ ]]; then
echo "::error::Invalid commit SHA format. Must be 7-40 hex characters."
exit 1
fi
# Manual trigger: PR number provided, resolve commit SHA from PR head
if [ -n "$INPUT_PR_NUMBER" ]; then
echo "Manual trigger: resolving commit SHA from PR #${INPUT_PR_NUMBER}"
PR_DATA=$(gh api "repos/${{ github.repository }}/pulls/${INPUT_PR_NUMBER}")
COMMIT_SHA=$(echo "$PR_DATA" | jq -r '.head.sha')
if [ -z "$COMMIT_SHA" ] || [ "$COMMIT_SHA" = "null" ]; then
echo "::error::Could not resolve commit SHA for PR #${INPUT_PR_NUMBER}"
exit 1
fi
echo "PR_NUMBER=${INPUT_PR_NUMBER}" >> $GITHUB_OUTPUT
echo "COMMIT_SHA=${COMMIT_SHA}" >> $GITHUB_OUTPUT
exit 0
fi
# Argo Events trigger: commit SHA provided, resolve PR number
if [ -n "$INPUT_COMMIT_SHA" ]; then
echo "Automated trigger: resolving PR number from commit ${INPUT_COMMIT_SHA}"
PR_NUMBER=$(gh api "repos/${{ github.repository }}/commits/${INPUT_COMMIT_SHA}/pulls" \
--jq '.[0].number // empty' 2>/dev/null || echo "")
if [ -n "$PR_NUMBER" ]; then
echo "Found PR #${PR_NUMBER} for commit ${INPUT_COMMIT_SHA}"
echo "PR_NUMBER=${PR_NUMBER}" >> $GITHUB_OUTPUT
echo "COMMIT_SHA=${INPUT_COMMIT_SHA}" >> $GITHUB_OUTPUT
else
echo "::error::No PR found for commit ${INPUT_COMMIT_SHA}. This workflow is for PRs only."
exit 1
fi
exit 0
fi
# Neither provided
echo "::error::Either pr_number or commit_sha must be provided"
exit 1
- name: ci/check-e2e-test-only
id: e2e-check
uses: ./.github/actions/check-e2e-test-only
with:
pr_number: ${{ steps.resolve.outputs.PR_NUMBER }}
check-changes:
needs: resolve-pr
runs-on: ubuntu-24.04
outputs:
should_run: "${{ steps.check.outputs.should_run }}"
steps:
- name: ci/checkout-repo
if: inputs.commit_sha != ''
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ needs.resolve-pr.outputs.COMMIT_SHA }}
fetch-depth: 0
- name: ci/check-relevant-changes
id: check
env:
GH_TOKEN: ${{ github.token }}
PR_NUMBER: ${{ needs.resolve-pr.outputs.PR_NUMBER }}
COMMIT_SHA: ${{ needs.resolve-pr.outputs.COMMIT_SHA }}
INPUT_PR_NUMBER: ${{ inputs.pr_number }}
run: |
# Manual trigger (pr_number provided): always run E2E tests
if [ -n "$INPUT_PR_NUMBER" ]; then
echo "Manual trigger detected - skipping file change check"
echo "should_run=true" >> $GITHUB_OUTPUT
exit 0
fi
# Automated trigger (commit_sha provided): check for relevant file changes
echo "Automated trigger detected - checking for relevant file changes"
# Get the base branch of the PR
BASE_SHA=$(gh api "repos/${{ github.repository }}/pulls/${PR_NUMBER}" --jq '.base.sha')
# Get changed files between base and head
CHANGED_FILES=$(git diff --name-only "${BASE_SHA}...${COMMIT_SHA}")
echo "Changed files:"
echo "$CHANGED_FILES"
# Check for relevant changes
SHOULD_RUN="false"
# Check for server Go files
if echo "$CHANGED_FILES" | grep -qE '^server/.*\.go$'; then
echo "Found server Go file changes"
SHOULD_RUN="true"
fi
# Check for webapp ts/js/tsx/jsx files
if echo "$CHANGED_FILES" | grep -qE '^webapp/.*\.(ts|tsx|js|jsx)$'; then
echo "Found webapp TypeScript/JavaScript file changes"
SHOULD_RUN="true"
fi
# Check for e2e-tests ts/js/tsx/jsx files
if echo "$CHANGED_FILES" | grep -qE '^e2e-tests/.*\.(ts|tsx|js|jsx)$'; then
echo "Found e2e-tests TypeScript/JavaScript file changes"
SHOULD_RUN="true"
fi
# Check for E2E-related CI workflow files
if echo "$CHANGED_FILES" | grep -qE '^\.github/workflows/e2e-.*\.yml$'; then
echo "Found E2E CI workflow file changes"
SHOULD_RUN="true"
fi
echo "should_run=${SHOULD_RUN}" >> $GITHUB_OUTPUT
echo "Should run E2E tests: ${SHOULD_RUN}"
e2e-cypress:
needs:
- generate-test-variables
uses: ./.github/workflows/e2e-tests-ci-template.yml
- resolve-pr
- check-changes
if: needs.check-changes.outputs.should_run == 'true'
uses: ./.github/workflows/e2e-tests-cypress.yml
with:
commit_sha: "${{ inputs.commit_sha }}"
status_check_context: "E2E Tests/smoketests"
TEST: cypress
REPORT_TYPE: none
SERVER: onprem
BRANCH: "${{ needs.generate-test-variables.outputs.BRANCH }}"
BUILD_ID: "${{ needs.generate-test-variables.outputs.BUILD_ID }}"
SERVER_IMAGE: "${{ needs.generate-test-variables.outputs.SERVER_IMAGE }}"
commit_sha: "${{ needs.resolve-pr.outputs.COMMIT_SHA }}"
server: "onprem"
server_image_tag: "${{ needs.resolve-pr.outputs.SERVER_IMAGE_TAG }}"
enable_reporting: true
report_type: "PR"
pr_number: "${{ needs.resolve-pr.outputs.PR_NUMBER }}"
secrets:
MM_LICENSE: "${{ secrets.MM_E2E_TEST_LICENSE_ONPREM_ENT }}"
AUTOMATION_DASHBOARD_URL: "${{ secrets.MM_E2E_AUTOMATION_DASHBOARD_URL }}"
AUTOMATION_DASHBOARD_TOKEN: "${{ secrets.MM_E2E_AUTOMATION_DASHBOARD_TOKEN }}"
PUSH_NOTIFICATION_SERVER: "${{ secrets.MM_E2E_PUSH_NOTIFICATION_SERVER }}"
REPORT_WEBHOOK_URL: "${{ secrets.MM_E2E_REPORT_WEBHOOK_URL }}"
CWS_URL: "${{ secrets.MM_E2E_CWS_URL }}"
CWS_EXTRA_HTTP_HEADERS: "${{ secrets.MM_E2E_CWS_EXTRA_HTTP_HEADERS }}"
e2e-playwright:
needs:
- resolve-pr
- check-changes
if: needs.check-changes.outputs.should_run == 'true'
uses: ./.github/workflows/e2e-tests-playwright.yml
with:
commit_sha: "${{ needs.resolve-pr.outputs.COMMIT_SHA }}"
server: "onprem"
server_image_tag: "${{ needs.resolve-pr.outputs.SERVER_IMAGE_TAG }}"
enable_reporting: true
report_type: "PR"
pr_number: "${{ needs.resolve-pr.outputs.PR_NUMBER }}"
secrets:
MM_LICENSE: "${{ secrets.MM_E2E_TEST_LICENSE_ONPREM_ENT }}"
AWS_ACCESS_KEY_ID: "${{ secrets.CYPRESS_AWS_ACCESS_KEY_ID }}"
AWS_SECRET_ACCESS_KEY: "${{ secrets.CYPRESS_AWS_SECRET_ACCESS_KEY }}"
REPORT_WEBHOOK_URL: "${{ secrets.MM_E2E_REPORT_WEBHOOK_URL }}"

View file

@ -0,0 +1,552 @@
---
name: E2E Tests - Cypress Template
on:
workflow_call:
inputs:
# Test configuration
test_type:
description: "Type of test run (smoke or full)"
type: string
required: true
test_filter:
description: "Test filter arguments"
type: string
required: true
workers:
description: "Number of parallel workers"
type: number
required: false
default: 1
timeout_minutes:
description: "Job timeout in minutes"
type: number
required: false
default: 30
enabled_docker_services:
description: "Space-separated list of docker services to enable"
type: string
required: false
default: "postgres inbucket"
# Common build variables
commit_sha:
type: string
required: true
branch:
type: string
required: true
build_id:
type: string
required: true
server_image_tag:
description: "Server image tag (e.g., master or short SHA)"
type: string
required: true
server:
type: string
required: false
default: onprem
# Reporting options
enable_reporting:
type: boolean
required: false
default: false
report_type:
type: string
required: false
pr_number:
type: string
required: false
# Commit status configuration
context_name:
description: "GitHub commit status context name"
type: string
required: true
outputs:
passed:
description: "Number of passed tests"
value: ${{ jobs.report.outputs.passed }}
failed:
description: "Number of failed tests"
value: ${{ jobs.report.outputs.failed }}
status_check_url:
description: "URL to test results"
value: ${{ jobs.generate-test-cycle.outputs.status_check_url }}
secrets:
MM_LICENSE:
required: false
AUTOMATION_DASHBOARD_URL:
required: false
AUTOMATION_DASHBOARD_TOKEN:
required: false
PUSH_NOTIFICATION_SERVER:
required: false
REPORT_WEBHOOK_URL:
required: false
CWS_URL:
required: false
CWS_EXTRA_HTTP_HEADERS:
required: false
env:
SERVER_IMAGE: "mattermostdevelopment/mattermost-enterprise-edition:${{ inputs.server_image_tag }}"
jobs:
update-initial-status:
runs-on: ubuntu-24.04
steps:
- name: ci/set-initial-status
uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
env:
GITHUB_TOKEN: ${{ github.token }}
with:
repository_full_name: ${{ github.repository }}
commit_sha: ${{ inputs.commit_sha }}
context: ${{ inputs.context_name }}
description: "with image tag: ${{ inputs.server_image_tag }}"
status: pending
generate-test-cycle:
runs-on: ubuntu-24.04
outputs:
status_check_url: "${{ steps.generate-cycle.outputs.status_check_url }}"
workers: "${{ steps.generate-workers.outputs.workers }}"
steps:
- name: ci/generate-workers
id: generate-workers
run: |
echo "workers=$(jq -nc '[range(${{ inputs.workers }})]')" >> $GITHUB_OUTPUT
- name: ci/checkout-repo
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ inputs.commit_sha }}
fetch-depth: 0
- name: ci/setup-node
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
with:
node-version-file: ".nvmrc"
cache: npm
cache-dependency-path: "e2e-tests/cypress/package-lock.json"
- name: ci/generate-test-cycle
id: generate-cycle
working-directory: e2e-tests
env:
AUTOMATION_DASHBOARD_URL: "${{ secrets.AUTOMATION_DASHBOARD_URL }}"
AUTOMATION_DASHBOARD_TOKEN: "${{ secrets.AUTOMATION_DASHBOARD_TOKEN }}"
BRANCH: "${{ inputs.branch }}-${{ inputs.test_type }}"
BUILD_ID: "${{ inputs.build_id }}"
TEST: cypress
TEST_FILTER: "${{ inputs.test_filter }}"
run: |
set -e -o pipefail
make generate-test-cycle | tee generate-test-cycle.out
TEST_CYCLE_ID=$(sed -nE "s/^.*id: '([^']+)'.*$/\1/p" <generate-test-cycle.out)
if [ -n "$TEST_CYCLE_ID" ]; then
echo "status_check_url=https://automation-dashboard.vercel.app/cycles/${TEST_CYCLE_ID}" >> $GITHUB_OUTPUT
else
echo "status_check_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" >> $GITHUB_OUTPUT
fi
run-tests:
runs-on: ubuntu-24.04
timeout-minutes: ${{ fromJSON(inputs.timeout_minutes) }}
continue-on-error: ${{ inputs.workers > 1 }}
needs:
- generate-test-cycle
if: needs.generate-test-cycle.result == 'success'
strategy:
fail-fast: false
matrix:
worker_index: ${{ fromJSON(needs.generate-test-cycle.outputs.workers) }}
defaults:
run:
working-directory: e2e-tests
env:
AUTOMATION_DASHBOARD_URL: "${{ secrets.AUTOMATION_DASHBOARD_URL }}"
AUTOMATION_DASHBOARD_TOKEN: "${{ secrets.AUTOMATION_DASHBOARD_TOKEN }}"
SERVER: "${{ inputs.server }}"
MM_LICENSE: "${{ secrets.MM_LICENSE }}"
ENABLED_DOCKER_SERVICES: "${{ inputs.enabled_docker_services }}"
TEST: cypress
TEST_FILTER: "${{ inputs.test_filter }}"
BRANCH: "${{ inputs.branch }}-${{ inputs.test_type }}"
BUILD_ID: "${{ inputs.build_id }}"
CI_BASE_URL: "${{ inputs.test_type }}-test-${{ matrix.worker_index }}"
CYPRESS_pushNotificationServer: "${{ secrets.PUSH_NOTIFICATION_SERVER }}"
CWS_URL: "${{ secrets.CWS_URL }}"
CWS_EXTRA_HTTP_HEADERS: "${{ secrets.CWS_EXTRA_HTTP_HEADERS }}"
steps:
- name: ci/checkout-repo
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ inputs.commit_sha }}
fetch-depth: 0
- name: ci/setup-node
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
with:
node-version-file: ".nvmrc"
cache: npm
cache-dependency-path: "e2e-tests/cypress/package-lock.json"
- name: ci/run-tests
run: |
make cloud-init
make
- name: ci/cloud-teardown
if: always()
run: make cloud-teardown
- name: ci/upload-results
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
if: always()
with:
name: cypress-${{ inputs.test_type }}-results-${{ matrix.worker_index }}
path: |
e2e-tests/cypress/logs/
e2e-tests/cypress/results/
retention-days: 5
calculate-results:
runs-on: ubuntu-24.04
needs:
- generate-test-cycle
- run-tests
if: always() && needs.generate-test-cycle.result == 'success'
outputs:
passed: ${{ steps.calculate.outputs.passed }}
failed: ${{ steps.calculate.outputs.failed }}
pending: ${{ steps.calculate.outputs.pending }}
total_specs: ${{ steps.calculate.outputs.total_specs }}
failed_specs: ${{ steps.calculate.outputs.failed_specs }}
failed_specs_count: ${{ steps.calculate.outputs.failed_specs_count }}
failed_tests: ${{ steps.calculate.outputs.failed_tests }}
commit_status_message: ${{ steps.calculate.outputs.commit_status_message }}
total: ${{ steps.calculate.outputs.total }}
pass_rate: ${{ steps.calculate.outputs.pass_rate }}
color: ${{ steps.calculate.outputs.color }}
steps:
- name: ci/checkout-repo
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ inputs.commit_sha }}
fetch-depth: 0
- name: ci/download-results
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
with:
pattern: cypress-${{ inputs.test_type }}-results-*
path: e2e-tests/cypress/
merge-multiple: true
- name: ci/calculate
id: calculate
uses: ./.github/actions/calculate-cypress-results
with:
original-results-path: e2e-tests/cypress/results
run-failed-tests:
runs-on: ubuntu-24.04
timeout-minutes: ${{ fromJSON(inputs.timeout_minutes) }}
needs:
- generate-test-cycle
- run-tests
- calculate-results
if: >-
always() &&
needs.calculate-results.result == 'success' &&
needs.calculate-results.outputs.failed != '0' &&
fromJSON(needs.calculate-results.outputs.failed_specs_count) <= 20
defaults:
run:
working-directory: e2e-tests
env:
AUTOMATION_DASHBOARD_URL: "${{ secrets.AUTOMATION_DASHBOARD_URL }}"
AUTOMATION_DASHBOARD_TOKEN: "${{ secrets.AUTOMATION_DASHBOARD_TOKEN }}"
SERVER: "${{ inputs.server }}"
MM_LICENSE: "${{ secrets.MM_LICENSE }}"
ENABLED_DOCKER_SERVICES: "${{ inputs.enabled_docker_services }}"
TEST: cypress
BRANCH: "${{ inputs.branch }}-${{ inputs.test_type }}-retest"
BUILD_ID: "${{ inputs.build_id }}-retest"
CYPRESS_pushNotificationServer: "${{ secrets.PUSH_NOTIFICATION_SERVER }}"
CWS_URL: "${{ secrets.CWS_URL }}"
CWS_EXTRA_HTTP_HEADERS: "${{ secrets.CWS_EXTRA_HTTP_HEADERS }}"
steps:
- name: ci/checkout-repo
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ inputs.commit_sha }}
fetch-depth: 0
- name: ci/setup-node
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
with:
node-version-file: ".nvmrc"
cache: npm
cache-dependency-path: "e2e-tests/cypress/package-lock.json"
- name: ci/run-failed-specs
env:
SPEC_FILES: ${{ needs.calculate-results.outputs.failed_specs }}
run: |
echo "Retesting failed specs: $SPEC_FILES"
make cloud-init
make start-server run-specs
- name: ci/cloud-teardown
if: always()
run: make cloud-teardown
- name: ci/upload-retest-results
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
if: always()
with:
name: cypress-${{ inputs.test_type }}-retest-results
path: |
e2e-tests/cypress/logs/
e2e-tests/cypress/results/
retention-days: 5
report:
runs-on: ubuntu-24.04
needs:
- generate-test-cycle
- run-tests
- calculate-results
- run-failed-tests
if: always() && needs.calculate-results.result == 'success'
outputs:
passed: "${{ steps.final-results.outputs.passed }}"
failed: "${{ steps.final-results.outputs.failed }}"
commit_status_message: "${{ steps.final-results.outputs.commit_status_message }}"
defaults:
run:
working-directory: e2e-tests
steps:
- name: ci/checkout-repo
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ inputs.commit_sha }}
fetch-depth: 0
- name: ci/setup-node
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
with:
node-version-file: ".nvmrc"
cache: npm
cache-dependency-path: "e2e-tests/cypress/package-lock.json"
# PATH A: run-failed-tests was skipped (no failures to retest)
- name: ci/download-results-path-a
if: needs.run-failed-tests.result == 'skipped'
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
with:
pattern: cypress-${{ inputs.test_type }}-results-*
path: e2e-tests/cypress/
merge-multiple: true
- name: ci/use-previous-calculation
if: needs.run-failed-tests.result == 'skipped'
id: use-previous
run: |
echo "passed=${{ needs.calculate-results.outputs.passed }}" >> $GITHUB_OUTPUT
echo "failed=${{ needs.calculate-results.outputs.failed }}" >> $GITHUB_OUTPUT
echo "pending=${{ needs.calculate-results.outputs.pending }}" >> $GITHUB_OUTPUT
echo "total_specs=${{ needs.calculate-results.outputs.total_specs }}" >> $GITHUB_OUTPUT
echo "failed_specs=${{ needs.calculate-results.outputs.failed_specs }}" >> $GITHUB_OUTPUT
echo "failed_specs_count=${{ needs.calculate-results.outputs.failed_specs_count }}" >> $GITHUB_OUTPUT
echo "commit_status_message=${{ needs.calculate-results.outputs.commit_status_message }}" >> $GITHUB_OUTPUT
echo "total=${{ needs.calculate-results.outputs.total }}" >> $GITHUB_OUTPUT
echo "pass_rate=${{ needs.calculate-results.outputs.pass_rate }}" >> $GITHUB_OUTPUT
echo "color=${{ needs.calculate-results.outputs.color }}" >> $GITHUB_OUTPUT
{
echo "failed_tests<<EOF"
echo "${{ needs.calculate-results.outputs.failed_tests }}"
echo "EOF"
} >> $GITHUB_OUTPUT
# PATH B: run-failed-tests ran, need to merge and recalculate
- name: ci/download-original-results
if: needs.run-failed-tests.result != 'skipped'
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
with:
pattern: cypress-${{ inputs.test_type }}-results-*
path: e2e-tests/cypress/
merge-multiple: true
- name: ci/download-retest-results
if: needs.run-failed-tests.result != 'skipped'
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
with:
name: cypress-${{ inputs.test_type }}-retest-results
path: e2e-tests/cypress/retest-results/
- name: ci/calculate-results
if: needs.run-failed-tests.result != 'skipped'
id: recalculate
uses: ./.github/actions/calculate-cypress-results
with:
original-results-path: e2e-tests/cypress/results
retest-results-path: e2e-tests/cypress/retest-results/results
# Set final outputs from either path
- name: ci/set-final-results
id: final-results
env:
USE_PREVIOUS_FAILED_TESTS: ${{ steps.use-previous.outputs.failed_tests }}
RECALCULATE_FAILED_TESTS: ${{ steps.recalculate.outputs.failed_tests }}
run: |
if [ "${{ needs.run-failed-tests.result }}" == "skipped" ]; then
echo "passed=${{ steps.use-previous.outputs.passed }}" >> $GITHUB_OUTPUT
echo "failed=${{ steps.use-previous.outputs.failed }}" >> $GITHUB_OUTPUT
echo "pending=${{ steps.use-previous.outputs.pending }}" >> $GITHUB_OUTPUT
echo "total_specs=${{ steps.use-previous.outputs.total_specs }}" >> $GITHUB_OUTPUT
echo "failed_specs=${{ steps.use-previous.outputs.failed_specs }}" >> $GITHUB_OUTPUT
echo "failed_specs_count=${{ steps.use-previous.outputs.failed_specs_count }}" >> $GITHUB_OUTPUT
echo "commit_status_message=${{ steps.use-previous.outputs.commit_status_message }}" >> $GITHUB_OUTPUT
echo "total=${{ steps.use-previous.outputs.total }}" >> $GITHUB_OUTPUT
echo "pass_rate=${{ steps.use-previous.outputs.pass_rate }}" >> $GITHUB_OUTPUT
echo "color=${{ steps.use-previous.outputs.color }}" >> $GITHUB_OUTPUT
{
echo "failed_tests<<EOF"
echo "$USE_PREVIOUS_FAILED_TESTS"
echo "EOF"
} >> $GITHUB_OUTPUT
else
echo "passed=${{ steps.recalculate.outputs.passed }}" >> $GITHUB_OUTPUT
echo "failed=${{ steps.recalculate.outputs.failed }}" >> $GITHUB_OUTPUT
echo "pending=${{ steps.recalculate.outputs.pending }}" >> $GITHUB_OUTPUT
echo "total_specs=${{ steps.recalculate.outputs.total_specs }}" >> $GITHUB_OUTPUT
echo "failed_specs=${{ steps.recalculate.outputs.failed_specs }}" >> $GITHUB_OUTPUT
echo "failed_specs_count=${{ steps.recalculate.outputs.failed_specs_count }}" >> $GITHUB_OUTPUT
echo "commit_status_message=${{ steps.recalculate.outputs.commit_status_message }}" >> $GITHUB_OUTPUT
echo "total=${{ steps.recalculate.outputs.total }}" >> $GITHUB_OUTPUT
echo "pass_rate=${{ steps.recalculate.outputs.pass_rate }}" >> $GITHUB_OUTPUT
echo "color=${{ steps.recalculate.outputs.color }}" >> $GITHUB_OUTPUT
{
echo "failed_tests<<EOF"
echo "$RECALCULATE_FAILED_TESTS"
echo "EOF"
} >> $GITHUB_OUTPUT
fi
- name: ci/upload-combined-results
if: inputs.workers > 1
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: cypress-${{ inputs.test_type }}-results
path: |
e2e-tests/cypress/logs/
e2e-tests/cypress/results/
- name: ci/publish-report
if: inputs.enable_reporting && env.REPORT_WEBHOOK_URL != ''
env:
REPORT_WEBHOOK_URL: ${{ secrets.REPORT_WEBHOOK_URL }}
PASS_RATE: ${{ steps.final-results.outputs.pass_rate }}
PASSED: ${{ steps.final-results.outputs.passed }}
TOTAL: ${{ steps.final-results.outputs.total }}
TOTAL_SPECS: ${{ steps.final-results.outputs.total_specs }}
COLOR: ${{ steps.final-results.outputs.color }}
REPORT_URL: ${{ needs.generate-test-cycle.outputs.status_check_url }}
TEST_TYPE: ${{ inputs.test_type }}
PR_NUMBER: ${{ inputs.pr_number }}
run: |
# Capitalize test type
TEST_TYPE_CAP=$(echo "$TEST_TYPE" | sed 's/.*/\u&/')
# Build payload with attachments
PAYLOAD=$(cat <<EOF
{
"username": "E2E Test",
"icon_url": "https://mattermost.com/wp-content/uploads/2022/02/icon_WS.png",
"attachments": [{
"color": "${COLOR}",
"text": "**Results - Cypress ${TEST_TYPE_CAP} Tests**\n\n:open-pull-request: [mattermost-pr-${PR_NUMBER}](https://github.com/${{ github.repository }}/pull/${PR_NUMBER})\n:docker: \`${{ env.SERVER_IMAGE }}\`\n${PASS_RATE}% (${PASSED}/${TOTAL}) in ${TOTAL_SPECS} spec files | [full report](${REPORT_URL})"
}]
}
EOF
)
# Send to webhook
curl -X POST -H "Content-Type: application/json" -d "$PAYLOAD" "$REPORT_WEBHOOK_URL"
- name: ci/write-job-summary
if: always()
env:
STATUS_CHECK_URL: ${{ needs.generate-test-cycle.outputs.status_check_url }}
TEST_TYPE: ${{ inputs.test_type }}
PASSED: ${{ steps.final-results.outputs.passed }}
FAILED: ${{ steps.final-results.outputs.failed }}
PENDING: ${{ steps.final-results.outputs.pending }}
TOTAL_SPECS: ${{ steps.final-results.outputs.total_specs }}
FAILED_SPECS_COUNT: ${{ steps.final-results.outputs.failed_specs_count }}
FAILED_SPECS: ${{ steps.final-results.outputs.failed_specs }}
COMMIT_STATUS_MESSAGE: ${{ steps.final-results.outputs.commit_status_message }}
FAILED_TESTS: ${{ steps.final-results.outputs.failed_tests }}
run: |
{
echo "## E2E Test Results - Cypress ${TEST_TYPE}"
echo ""
if [ "$FAILED" = "0" ]; then
echo "All tests passed: **${PASSED} passed**"
else
echo "<details>"
echo "<summary>${FAILED} failed, ${PASSED} passed</summary>"
echo ""
echo "| Test | File |"
echo "|------|------|"
echo "${FAILED_TESTS}"
echo "</details>"
fi
echo ""
echo "### Calculation Outputs"
echo ""
echo "| Output | Value |"
echo "|--------|-------|"
echo "| passed | ${PASSED} |"
echo "| failed | ${FAILED} |"
echo "| pending | ${PENDING} |"
echo "| total_specs | ${TOTAL_SPECS} |"
echo "| failed_specs_count | ${FAILED_SPECS_COUNT} |"
echo "| commit_status_message | ${COMMIT_STATUS_MESSAGE} |"
echo "| failed_specs | ${FAILED_SPECS:-none} |"
echo ""
echo "---"
echo "[View Full Report](${STATUS_CHECK_URL})"
} >> $GITHUB_STEP_SUMMARY
- name: ci/assert-results
run: |
[ "${{ steps.final-results.outputs.failed }}" = "0" ]
update-success-status:
runs-on: ubuntu-24.04
if: always() && needs.report.result == 'success' && needs.calculate-results.result == 'success'
needs:
- generate-test-cycle
- calculate-results
- report
steps:
- uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
env:
GITHUB_TOKEN: ${{ github.token }}
with:
repository_full_name: ${{ github.repository }}
commit_sha: ${{ inputs.commit_sha }}
context: ${{ inputs.context_name }}
description: "${{ needs.report.outputs.commit_status_message }} with image tag: ${{ inputs.server_image_tag }}"
status: success
target_url: ${{ needs.generate-test-cycle.outputs.status_check_url }}
update-failure-status:
runs-on: ubuntu-24.04
if: always() && (needs.report.result != 'success' || needs.calculate-results.result != 'success')
needs:
- generate-test-cycle
- calculate-results
- report
steps:
- uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
env:
GITHUB_TOKEN: ${{ github.token }}
with:
repository_full_name: ${{ github.repository }}
commit_sha: ${{ inputs.commit_sha }}
context: ${{ inputs.context_name }}
description: "${{ needs.report.outputs.commit_status_message }} with image tag: ${{ inputs.server_image_tag }}"
status: failure
target_url: ${{ needs.generate-test-cycle.outputs.status_check_url }}

130
.github/workflows/e2e-tests-cypress.yml vendored Normal file
View file

@ -0,0 +1,130 @@
---
name: E2E Tests - Cypress
on:
workflow_call:
inputs:
commit_sha:
type: string
required: true
enable_reporting:
type: boolean
required: false
default: false
server:
type: string
required: false
default: onprem
report_type:
type: string
required: false
pr_number:
type: string
required: false
server_image_tag:
type: string
required: false
description: "Server image tag (e.g., master or short SHA)"
secrets:
MM_LICENSE:
required: false
AUTOMATION_DASHBOARD_URL:
required: false
AUTOMATION_DASHBOARD_TOKEN:
required: false
PUSH_NOTIFICATION_SERVER:
required: false
REPORT_WEBHOOK_URL:
required: false
CWS_URL:
required: false
CWS_EXTRA_HTTP_HEADERS:
required: false
jobs:
generate-build-variables:
runs-on: ubuntu-24.04
outputs:
branch: "${{ steps.build-vars.outputs.branch }}"
build_id: "${{ steps.build-vars.outputs.build_id }}"
server_image_tag: "${{ steps.build-vars.outputs.server_image_tag }}"
steps:
- name: ci/generate-build-variables
id: build-vars
env:
COMMIT_SHA: ${{ inputs.commit_sha }}
PR_NUMBER: ${{ inputs.pr_number }}
INPUT_SERVER_IMAGE_TAG: ${{ inputs.server_image_tag }}
RUN_ID: ${{ github.run_id }}
RUN_ATTEMPT: ${{ github.run_attempt }}
run: |
# Use provided server_image_tag or derive from commit SHA
if [ -n "$INPUT_SERVER_IMAGE_TAG" ]; then
SERVER_IMAGE_TAG="$INPUT_SERVER_IMAGE_TAG"
else
SERVER_IMAGE_TAG="${COMMIT_SHA::7}"
fi
echo "server_image_tag=${SERVER_IMAGE_TAG}" >> $GITHUB_OUTPUT
# Generate branch name
if [ -n "$PR_NUMBER" ]; then
echo "branch=server-pr-${PR_NUMBER}" >> $GITHUB_OUTPUT
else
echo "branch=server-commit-${SERVER_IMAGE_TAG}" >> $GITHUB_OUTPUT
fi
# Generate build ID
echo "build_id=${RUN_ID}_${RUN_ATTEMPT}-${SERVER_IMAGE_TAG}-cypress-onprem-ent" >> $GITHUB_OUTPUT
cypress-smoke:
needs:
- generate-build-variables
uses: ./.github/workflows/e2e-tests-cypress-template.yml
with:
test_type: smoke
test_filter: "--stage=@prod --group=@smoke"
workers: 1
timeout_minutes: 30
enabled_docker_services: "postgres inbucket"
commit_sha: ${{ inputs.commit_sha }}
branch: ${{ needs.generate-build-variables.outputs.branch }}
build_id: ${{ needs.generate-build-variables.outputs.build_id }}
server_image_tag: ${{ needs.generate-build-variables.outputs.server_image_tag }}
server: ${{ inputs.server }}
context_name: "E2E Tests / cypress-smoke"
secrets:
MM_LICENSE: ${{ secrets.MM_LICENSE }}
AUTOMATION_DASHBOARD_URL: ${{ secrets.AUTOMATION_DASHBOARD_URL }}
AUTOMATION_DASHBOARD_TOKEN: ${{ secrets.AUTOMATION_DASHBOARD_TOKEN }}
PUSH_NOTIFICATION_SERVER: ${{ secrets.PUSH_NOTIFICATION_SERVER }}
CWS_URL: ${{ secrets.CWS_URL }}
CWS_EXTRA_HTTP_HEADERS: ${{ secrets.CWS_EXTRA_HTTP_HEADERS }}
cypress-full:
needs:
- cypress-smoke
- generate-build-variables
if: needs.cypress-smoke.outputs.failed == '0'
uses: ./.github/workflows/e2e-tests-cypress-template.yml
with:
test_type: full
test_filter: '--stage="@prod" --excludeGroup="@smoke,@te_only,@cloud_only,@high_availability" --sortFirst="@compliance_export,@elasticsearch,@ldap_group,@ldap" --sortLast="@saml,@keycloak,@plugin,@plugins_uninstall,@mfa,@license_removal"'
workers: 20
timeout_minutes: 60
enabled_docker_services: "postgres inbucket minio openldap elasticsearch keycloak"
commit_sha: ${{ inputs.commit_sha }}
branch: ${{ needs.generate-build-variables.outputs.branch }}
build_id: ${{ needs.generate-build-variables.outputs.build_id }}
server_image_tag: ${{ needs.generate-build-variables.outputs.server_image_tag }}
server: ${{ inputs.server }}
enable_reporting: ${{ inputs.enable_reporting }}
report_type: ${{ inputs.report_type }}
pr_number: ${{ inputs.pr_number }}
context_name: "E2E Tests / cypress-full"
secrets:
MM_LICENSE: ${{ secrets.MM_LICENSE }}
AUTOMATION_DASHBOARD_URL: ${{ secrets.AUTOMATION_DASHBOARD_URL }}
AUTOMATION_DASHBOARD_TOKEN: ${{ secrets.AUTOMATION_DASHBOARD_TOKEN }}
PUSH_NOTIFICATION_SERVER: ${{ secrets.PUSH_NOTIFICATION_SERVER }}
REPORT_WEBHOOK_URL: ${{ secrets.REPORT_WEBHOOK_URL }}
CWS_URL: ${{ secrets.CWS_URL }}
CWS_EXTRA_HTTP_HEADERS: ${{ secrets.CWS_EXTRA_HTTP_HEADERS }}

View file

@ -0,0 +1,89 @@
---
name: E2E Tests - Override Status
on:
workflow_dispatch:
inputs:
pr_number:
description: "PR number to update status for"
required: true
type: string
jobs:
override-status:
runs-on: ubuntu-24.04
steps:
- name: Validate inputs
env:
PR_NUMBER: ${{ inputs.pr_number }}
run: |
if ! [[ "$PR_NUMBER" =~ ^[0-9]+$ ]]; then
echo "::error::Invalid PR number format. Must be numeric."
exit 1
fi
- name: Get PR head SHA
id: pr-info
env:
GH_TOKEN: ${{ github.token }}
PR_NUMBER: ${{ inputs.pr_number }}
run: |
PR_DATA=$(gh api repos/${{ github.repository }}/pulls/${PR_NUMBER})
HEAD_SHA=$(echo "$PR_DATA" | jq -r '.head.sha')
echo "head_sha=$HEAD_SHA" >> $GITHUB_OUTPUT
- name: Override failed full test statuses
env:
GH_TOKEN: ${{ github.token }}
COMMIT_SHA: ${{ steps.pr-info.outputs.head_sha }}
run: |
# Only full tests can be overridden (smoke tests must pass)
FULL_TEST_CONTEXTS=("E2E Tests/playwright-full" "E2E Tests/cypress-full")
for CONTEXT_NAME in "${FULL_TEST_CONTEXTS[@]}"; do
echo "Checking: $CONTEXT_NAME"
# Get current status
STATUS_JSON=$(gh api repos/${{ github.repository }}/commits/${COMMIT_SHA}/statuses \
--jq "[.[] | select(.context == \"$CONTEXT_NAME\")] | first // empty")
if [ -z "$STATUS_JSON" ]; then
echo " No status found, skipping"
continue
fi
CURRENT_DESC=$(echo "$STATUS_JSON" | jq -r '.description // ""')
CURRENT_URL=$(echo "$STATUS_JSON" | jq -r '.target_url // ""')
CURRENT_STATE=$(echo "$STATUS_JSON" | jq -r '.state // ""')
echo " Current: $CURRENT_DESC ($CURRENT_STATE)"
# Only override if status is failure
if [ "$CURRENT_STATE" != "failure" ]; then
echo " Not failed, skipping"
continue
fi
# Parse and construct new message
if [[ "$CURRENT_DESC" =~ ^([0-9]+)\ failed,\ ([0-9]+)\ passed$ ]]; then
FAILED="${BASH_REMATCH[1]}"
PASSED="${BASH_REMATCH[2]}"
NEW_MSG="${FAILED} failed (verified), ${PASSED} passed"
elif [[ "$CURRENT_DESC" =~ ^([0-9]+)\ failed\ \([^)]+\),\ ([0-9]+)\ passed$ ]]; then
FAILED="${BASH_REMATCH[1]}"
PASSED="${BASH_REMATCH[2]}"
NEW_MSG="${FAILED} failed (verified), ${PASSED} passed"
else
NEW_MSG="${CURRENT_DESC} (verified)"
fi
echo " New: $NEW_MSG"
# Update status via GitHub API
gh api repos/${{ github.repository }}/statuses/${COMMIT_SHA} \
-f state=success \
-f context="$CONTEXT_NAME" \
-f description="$NEW_MSG" \
-f target_url="$CURRENT_URL"
echo " Updated to success"
done

View file

@ -0,0 +1,442 @@
---
name: E2E Tests - Playwright Template
on:
workflow_call:
inputs:
# Test configuration
test_type:
description: "Type of test run (smoke or full)"
type: string
required: true
test_filter:
description: "Test filter arguments (e.g., --grep @smoke)"
type: string
required: true
timeout_minutes:
description: "Job timeout in minutes"
type: number
required: false
default: 60
enabled_docker_services:
description: "Space-separated list of docker services to enable"
type: string
required: false
default: "postgres inbucket"
# Common build variables
commit_sha:
type: string
required: true
branch:
type: string
required: true
build_id:
type: string
required: true
server_image_tag:
description: "Server image tag (e.g., master or short SHA)"
type: string
required: true
server:
type: string
required: false
default: onprem
# Reporting options
enable_reporting:
type: boolean
required: false
default: false
report_type:
type: string
required: false
pr_number:
type: string
required: false
# Commit status configuration
context_name:
description: "GitHub commit status context name"
type: string
required: true
outputs:
passed:
description: "Number of passed tests"
value: ${{ jobs.report.outputs.passed }}
failed:
description: "Number of failed tests"
value: ${{ jobs.report.outputs.failed }}
report_url:
description: "URL to test report on S3"
value: ${{ jobs.report.outputs.report_url }}
secrets:
MM_LICENSE:
required: false
REPORT_WEBHOOK_URL:
required: false
AWS_ACCESS_KEY_ID:
required: true
AWS_SECRET_ACCESS_KEY:
required: true
env:
SERVER_IMAGE: "mattermostdevelopment/mattermost-enterprise-edition:${{ inputs.server_image_tag }}"
jobs:
update-initial-status:
runs-on: ubuntu-24.04
steps:
- name: ci/set-initial-status
uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
env:
GITHUB_TOKEN: ${{ github.token }}
with:
repository_full_name: ${{ github.repository }}
commit_sha: ${{ inputs.commit_sha }}
context: ${{ inputs.context_name }}
description: "with image tag: ${{ inputs.server_image_tag }}"
status: pending
run-tests:
runs-on: ubuntu-24.04
timeout-minutes: ${{ fromJSON(inputs.timeout_minutes) }}
defaults:
run:
working-directory: e2e-tests
env:
SERVER: "${{ inputs.server }}"
MM_LICENSE: "${{ secrets.MM_LICENSE }}"
ENABLED_DOCKER_SERVICES: "${{ inputs.enabled_docker_services }}"
TEST: playwright
TEST_FILTER: "${{ inputs.test_filter }}"
BRANCH: "${{ inputs.branch }}-${{ inputs.test_type }}"
BUILD_ID: "${{ inputs.build_id }}"
steps:
- name: ci/checkout-repo
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ inputs.commit_sha }}
fetch-depth: 0
- name: ci/setup-node
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
with:
node-version-file: ".nvmrc"
cache: npm
cache-dependency-path: "e2e-tests/playwright/package-lock.json"
- name: ci/get-webapp-node-modules
working-directory: webapp
run: make node_modules
- name: ci/run-tests
run: |
make cloud-init
make
- name: ci/cloud-teardown
if: always()
run: make cloud-teardown
- name: ci/upload-results
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
if: always()
with:
name: playwright-${{ inputs.test_type }}-results
path: |
e2e-tests/playwright/logs/
e2e-tests/playwright/results/
retention-days: 5
calculate-results:
runs-on: ubuntu-24.04
needs:
- run-tests
if: always()
outputs:
passed: ${{ steps.calculate.outputs.passed }}
failed: ${{ steps.calculate.outputs.failed }}
flaky: ${{ steps.calculate.outputs.flaky }}
skipped: ${{ steps.calculate.outputs.skipped }}
total_specs: ${{ steps.calculate.outputs.total_specs }}
failed_specs: ${{ steps.calculate.outputs.failed_specs }}
failed_specs_count: ${{ steps.calculate.outputs.failed_specs_count }}
failed_tests: ${{ steps.calculate.outputs.failed_tests }}
commit_status_message: ${{ steps.calculate.outputs.commit_status_message }}
total: ${{ steps.calculate.outputs.total }}
pass_rate: ${{ steps.calculate.outputs.pass_rate }}
passing: ${{ steps.calculate.outputs.passing }}
color: ${{ steps.calculate.outputs.color }}
steps:
- name: ci/checkout-repo
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ inputs.commit_sha }}
fetch-depth: 0
- name: ci/download-results
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
with:
name: playwright-${{ inputs.test_type }}-results
path: e2e-tests/playwright/
- name: ci/calculate
id: calculate
uses: ./.github/actions/calculate-playwright-results
with:
original-results-path: e2e-tests/playwright/results/reporter/results.json
run-failed-tests:
runs-on: ubuntu-24.04
timeout-minutes: ${{ fromJSON(inputs.timeout_minutes) }}
needs:
- run-tests
- calculate-results
if: >-
always() &&
needs.calculate-results.result == 'success' &&
needs.calculate-results.outputs.failed != '0' &&
fromJSON(needs.calculate-results.outputs.failed_specs_count) <= 20
defaults:
run:
working-directory: e2e-tests
env:
SERVER: "${{ inputs.server }}"
MM_LICENSE: "${{ secrets.MM_LICENSE }}"
ENABLED_DOCKER_SERVICES: "${{ inputs.enabled_docker_services }}"
TEST: playwright
BRANCH: "${{ inputs.branch }}-${{ inputs.test_type }}-retest"
BUILD_ID: "${{ inputs.build_id }}-retest"
steps:
- name: ci/checkout-repo
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ inputs.commit_sha }}
fetch-depth: 0
- name: ci/setup-node
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
with:
node-version-file: ".nvmrc"
cache: npm
cache-dependency-path: "e2e-tests/playwright/package-lock.json"
- name: ci/get-webapp-node-modules
working-directory: webapp
run: make node_modules
- name: ci/run-failed-specs
env:
SPEC_FILES: ${{ needs.calculate-results.outputs.failed_specs }}
run: |
echo "Retesting failed specs: $SPEC_FILES"
make cloud-init
make start-server run-specs
- name: ci/cloud-teardown
if: always()
run: make cloud-teardown
- name: ci/upload-retest-results
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
if: always()
with:
name: playwright-${{ inputs.test_type }}-retest-results
path: |
e2e-tests/playwright/logs/
e2e-tests/playwright/results/
retention-days: 5
report:
runs-on: ubuntu-24.04
needs:
- run-tests
- calculate-results
- run-failed-tests
if: always() && needs.calculate-results.result == 'success'
outputs:
passed: "${{ steps.final-results.outputs.passed }}"
failed: "${{ steps.final-results.outputs.failed }}"
commit_status_message: "${{ steps.final-results.outputs.commit_status_message }}"
report_url: "${{ steps.upload-to-s3.outputs.report_url }}"
defaults:
run:
working-directory: e2e-tests
steps:
- name: ci/checkout-repo
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ inputs.commit_sha }}
fetch-depth: 0
- name: ci/setup-node
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
with:
node-version-file: ".nvmrc"
cache: npm
cache-dependency-path: "e2e-tests/playwright/package-lock.json"
# Download original results (always needed)
- name: ci/download-results
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
with:
name: playwright-${{ inputs.test_type }}-results
path: e2e-tests/playwright/
# Download retest results (only if retest ran)
- name: ci/download-retest-results
if: needs.run-failed-tests.result != 'skipped'
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
with:
name: playwright-${{ inputs.test_type }}-retest-results
path: e2e-tests/playwright/retest-results/
# Calculate results (with optional merge of retest results)
- name: ci/calculate-results
id: final-results
uses: ./.github/actions/calculate-playwright-results
with:
original-results-path: e2e-tests/playwright/results/reporter/results.json
retest-results-path: ${{ needs.run-failed-tests.result != 'skipped' && 'e2e-tests/playwright/retest-results/results/reporter/results.json' || '' }}
- name: ci/aws-configure
uses: aws-actions/configure-aws-credentials@61815dcd50bd041e203e49132bacad1fd04d2708 # v5.1.1
with:
aws-region: us-east-1
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
- name: ci/upload-to-s3
id: upload-to-s3
env:
AWS_REGION: us-east-1
AWS_S3_BUCKET: mattermost-cypress-report
PR_NUMBER: "${{ inputs.pr_number }}"
RUN_ID: "${{ github.run_id }}"
COMMIT_SHA: "${{ inputs.commit_sha }}"
TEST_TYPE: "${{ inputs.test_type }}"
run: |
LOCAL_RESULTS_PATH="playwright/results/"
LOCAL_LOGS_PATH="playwright/logs/"
# Use PR number if available, otherwise use commit SHA prefix
if [ -n "$PR_NUMBER" ]; then
S3_PATH="server-pr-${PR_NUMBER}/e2e-reports/playwright-${TEST_TYPE}/${RUN_ID}"
else
S3_PATH="server-commit-${COMMIT_SHA::7}/e2e-reports/playwright-${TEST_TYPE}/${RUN_ID}"
fi
if [[ -d "$LOCAL_RESULTS_PATH" ]]; then
aws s3 sync "$LOCAL_RESULTS_PATH" "s3://${AWS_S3_BUCKET}/${S3_PATH}/results/" \
--acl public-read --cache-control "no-cache"
fi
REPORT_URL="https://${AWS_S3_BUCKET}.s3.amazonaws.com/${S3_PATH}/results/reporter/index.html"
echo "report_url=$REPORT_URL" >> "$GITHUB_OUTPUT"
- name: ci/publish-report
if: inputs.enable_reporting && env.REPORT_WEBHOOK_URL != ''
env:
REPORT_WEBHOOK_URL: ${{ secrets.REPORT_WEBHOOK_URL }}
PASS_RATE: ${{ steps.final-results.outputs.pass_rate }}
PASSING: ${{ steps.final-results.outputs.passing }}
TOTAL: ${{ steps.final-results.outputs.total }}
TOTAL_SPECS: ${{ steps.final-results.outputs.total_specs }}
COLOR: ${{ steps.final-results.outputs.color }}
REPORT_URL: ${{ steps.upload-to-s3.outputs.report_url }}
TEST_TYPE: ${{ inputs.test_type }}
PR_NUMBER: ${{ inputs.pr_number }}
run: |
# Capitalize test type
TEST_TYPE_CAP=$(echo "$TEST_TYPE" | sed 's/.*/\u&/')
# Build payload with attachments
PAYLOAD=$(cat <<EOF
{
"username": "E2E Test",
"icon_url": "https://mattermost.com/wp-content/uploads/2022/02/icon_WS.png",
"attachments": [{
"color": "${COLOR}",
"text": "**Results - Playwright ${TEST_TYPE_CAP} Tests**\n\n:open-pull-request: [mattermost-pr-${PR_NUMBER}](https://github.com/${{ github.repository }}/pull/${PR_NUMBER})\n:docker: \`${{ env.SERVER_IMAGE }}\`\n${PASS_RATE}% (${PASSING}/${TOTAL}) in ${TOTAL_SPECS} spec files | [full report](${REPORT_URL})"
}]
}
EOF
)
# Send to webhook
curl -X POST -H "Content-Type: application/json" -d "$PAYLOAD" "$REPORT_WEBHOOK_URL"
- name: ci/write-job-summary
if: always()
env:
REPORT_URL: ${{ steps.upload-to-s3.outputs.report_url }}
TEST_TYPE: ${{ inputs.test_type }}
PASSED: ${{ steps.final-results.outputs.passed }}
FAILED: ${{ steps.final-results.outputs.failed }}
FLAKY: ${{ steps.final-results.outputs.flaky }}
SKIPPED: ${{ steps.final-results.outputs.skipped }}
TOTAL_SPECS: ${{ steps.final-results.outputs.total_specs }}
FAILED_SPECS_COUNT: ${{ steps.final-results.outputs.failed_specs_count }}
FAILED_SPECS: ${{ steps.final-results.outputs.failed_specs }}
COMMIT_STATUS_MESSAGE: ${{ steps.final-results.outputs.commit_status_message }}
FAILED_TESTS: ${{ steps.final-results.outputs.failed_tests }}
run: |
{
echo "## E2E Test Results - Playwright ${TEST_TYPE}"
echo ""
if [ "$FAILED" = "0" ]; then
echo "All tests passed: **${PASSED} passed**"
else
echo "<details>"
echo "<summary>${FAILED} failed, ${PASSED} passed</summary>"
echo ""
echo "| Test | File |"
echo "|------|------|"
echo "${FAILED_TESTS}"
echo "</details>"
fi
echo ""
echo "### Calculation Outputs"
echo ""
echo "| Output | Value |"
echo "|--------|-------|"
echo "| passed | ${PASSED} |"
echo "| failed | ${FAILED} |"
echo "| flaky | ${FLAKY} |"
echo "| skipped | ${SKIPPED} |"
echo "| total_specs | ${TOTAL_SPECS} |"
echo "| failed_specs_count | ${FAILED_SPECS_COUNT} |"
echo "| commit_status_message | ${COMMIT_STATUS_MESSAGE} |"
echo "| failed_specs | ${FAILED_SPECS:-none} |"
echo ""
echo "---"
echo "[View Full Report](${REPORT_URL})"
} >> $GITHUB_STEP_SUMMARY
- name: ci/assert-results
run: |
[ "${{ steps.final-results.outputs.failed }}" = "0" ]
update-success-status:
runs-on: ubuntu-24.04
if: always() && needs.report.result == 'success' && needs.calculate-results.result == 'success'
needs:
- calculate-results
- report
steps:
- uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
env:
GITHUB_TOKEN: ${{ github.token }}
with:
repository_full_name: ${{ github.repository }}
commit_sha: ${{ inputs.commit_sha }}
context: ${{ inputs.context_name }}
description: "${{ needs.report.outputs.commit_status_message }} with image tag: ${{ inputs.server_image_tag }}"
status: success
target_url: ${{ needs.report.outputs.report_url }}
update-failure-status:
runs-on: ubuntu-24.04
if: always() && (needs.report.result != 'success' || needs.calculate-results.result != 'success')
needs:
- calculate-results
- report
steps:
- uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
env:
GITHUB_TOKEN: ${{ github.token }}
with:
repository_full_name: ${{ github.repository }}
commit_sha: ${{ inputs.commit_sha }}
context: ${{ inputs.context_name }}
description: "${{ needs.report.outputs.commit_status_message }} with image tag: ${{ inputs.server_image_tag }}"
status: failure
target_url: ${{ needs.report.outputs.report_url }}

View file

@ -0,0 +1,120 @@
---
name: E2E Tests - Playwright
on:
workflow_call:
inputs:
commit_sha:
type: string
required: true
enable_reporting:
type: boolean
required: false
default: false
server:
type: string
required: false
default: onprem
report_type:
type: string
required: false
pr_number:
type: string
required: false
server_image_tag:
type: string
required: false
description: "Server image tag (e.g., master or short SHA)"
secrets:
MM_LICENSE:
required: false
REPORT_WEBHOOK_URL:
required: false
AWS_ACCESS_KEY_ID:
required: true
AWS_SECRET_ACCESS_KEY:
required: true
jobs:
generate-build-variables:
runs-on: ubuntu-24.04
outputs:
branch: "${{ steps.build-vars.outputs.branch }}"
build_id: "${{ steps.build-vars.outputs.build_id }}"
server_image_tag: "${{ steps.build-vars.outputs.server_image_tag }}"
steps:
- name: ci/generate-build-variables
id: build-vars
env:
COMMIT_SHA: ${{ inputs.commit_sha }}
PR_NUMBER: ${{ inputs.pr_number }}
INPUT_SERVER_IMAGE_TAG: ${{ inputs.server_image_tag }}
RUN_ID: ${{ github.run_id }}
RUN_ATTEMPT: ${{ github.run_attempt }}
run: |
# Use provided server_image_tag or derive from commit SHA
if [ -n "$INPUT_SERVER_IMAGE_TAG" ]; then
SERVER_IMAGE_TAG="$INPUT_SERVER_IMAGE_TAG"
else
SERVER_IMAGE_TAG="${COMMIT_SHA::7}"
fi
echo "server_image_tag=${SERVER_IMAGE_TAG}" >> $GITHUB_OUTPUT
# Generate branch name
if [ -n "$PR_NUMBER" ]; then
echo "branch=server-pr-${PR_NUMBER}" >> $GITHUB_OUTPUT
else
echo "branch=server-commit-${SERVER_IMAGE_TAG}" >> $GITHUB_OUTPUT
fi
# Generate build ID
echo "build_id=${RUN_ID}_${RUN_ATTEMPT}-${SERVER_IMAGE_TAG}-playwright-onprem-ent" >> $GITHUB_OUTPUT
playwright-smoke:
needs:
- generate-build-variables
uses: ./.github/workflows/e2e-tests-playwright-template.yml
with:
test_type: smoke
test_filter: "--grep @smoke"
timeout_minutes: 30
enabled_docker_services: "postgres inbucket"
commit_sha: ${{ inputs.commit_sha }}
branch: ${{ needs.generate-build-variables.outputs.branch }}
build_id: ${{ needs.generate-build-variables.outputs.build_id }}
server_image_tag: ${{ needs.generate-build-variables.outputs.server_image_tag }}
server: ${{ inputs.server }}
context_name: "E2E Tests / playwright-smoke"
pr_number: ${{ inputs.pr_number }}
secrets:
MM_LICENSE: ${{ secrets.MM_LICENSE }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
# ════════════════════════════════════════════════════════════════════════════
# FULL TESTS (only if smoke passes and pr_number is provided)
# ════════════════════════════════════════════════════════════════════════════
playwright-full:
needs:
- playwright-smoke
- generate-build-variables
if: needs.playwright-smoke.outputs.failed == '0' && inputs.pr_number != ''
uses: ./.github/workflows/e2e-tests-playwright-template.yml
with:
test_type: full
test_filter: '--grep-invert "@smoke|@visual"'
timeout_minutes: 120
enabled_docker_services: "postgres inbucket minio openldap elasticsearch keycloak"
commit_sha: ${{ inputs.commit_sha }}
branch: ${{ needs.generate-build-variables.outputs.branch }}
build_id: ${{ needs.generate-build-variables.outputs.build_id }}
server_image_tag: ${{ needs.generate-build-variables.outputs.server_image_tag }}
server: ${{ inputs.server }}
enable_reporting: ${{ inputs.enable_reporting }}
report_type: ${{ inputs.report_type }}
pr_number: ${{ inputs.pr_number }}
context_name: "E2E Tests / playwright-full"
secrets:
MM_LICENSE: ${{ secrets.MM_LICENSE }}
REPORT_WEBHOOK_URL: ${{ secrets.REPORT_WEBHOOK_URL }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View file

@ -0,0 +1,156 @@
---
name: "E2E Tests/verified"
on:
pull_request:
types: [labeled]
env:
REPORT_WEBHOOK_URL: ${{ secrets.MM_E2E_REPORT_WEBHOOK_URL }}
jobs:
approve-e2e:
if: github.event.label.name == 'E2E Tests/verified'
runs-on: ubuntu-24.04
steps:
- name: ci/check-user-permission
id: check-permission
env:
GH_TOKEN: ${{ github.token }}
LABEL_AUTHOR: ${{ github.event.sender.login }}
run: |
# Check if user is a member of mattermost org
HTTP_STATUS=$(gh api orgs/mattermost/members/${LABEL_AUTHOR} --silent -i 2>/dev/null | head -1 | awk '{print $2}')
if [[ "$HTTP_STATUS" != "204" ]]; then
echo "User ${LABEL_AUTHOR} is not a member of mattermost org (status: ${HTTP_STATUS})"
exit 1
fi
echo "User ${LABEL_AUTHOR} is a member of mattermost org"
- name: ci/override-failed-statuses
id: override
env:
GH_TOKEN: ${{ github.token }}
COMMIT_SHA: ${{ github.event.pull_request.head.sha }}
run: |
# Only full tests can be overridden (smoke tests must pass)
FULL_TEST_CONTEXTS=("E2E Tests/playwright-full" "E2E Tests/cypress-full")
OVERRIDDEN=""
WEBHOOK_DATA="[]"
for CONTEXT_NAME in "${FULL_TEST_CONTEXTS[@]}"; do
echo "Checking: $CONTEXT_NAME"
# Get current status
STATUS_JSON=$(gh api repos/${{ github.repository }}/commits/${COMMIT_SHA}/statuses \
--jq "[.[] | select(.context == \"$CONTEXT_NAME\")] | first // empty")
if [ -z "$STATUS_JSON" ]; then
echo " No status found, skipping"
continue
fi
CURRENT_DESC=$(echo "$STATUS_JSON" | jq -r '.description // ""')
CURRENT_URL=$(echo "$STATUS_JSON" | jq -r '.target_url // ""')
CURRENT_STATE=$(echo "$STATUS_JSON" | jq -r '.state // ""')
echo " Current: $CURRENT_DESC ($CURRENT_STATE)"
# Only override if status is failure
if [ "$CURRENT_STATE" != "failure" ]; then
echo " Not failed, skipping"
continue
fi
# Parse and construct new message
if [[ "$CURRENT_DESC" =~ ^([0-9]+)\ failed,\ ([0-9]+)\ passed ]]; then
FAILED="${BASH_REMATCH[1]}"
PASSED="${BASH_REMATCH[2]}"
NEW_MSG="${FAILED} failed (verified), ${PASSED} passed"
elif [[ "$CURRENT_DESC" =~ ^([0-9]+)\ failed\ \([^)]+\),\ ([0-9]+)\ passed ]]; then
FAILED="${BASH_REMATCH[1]}"
PASSED="${BASH_REMATCH[2]}"
NEW_MSG="${FAILED} failed (verified), ${PASSED} passed"
else
NEW_MSG="${CURRENT_DESC} (verified)"
fi
echo " New: $NEW_MSG"
# Update status via GitHub API
gh api repos/${{ github.repository }}/statuses/${COMMIT_SHA} \
-f state=success \
-f context="$CONTEXT_NAME" \
-f description="$NEW_MSG" \
-f target_url="$CURRENT_URL"
echo " Updated to success"
OVERRIDDEN="${OVERRIDDEN}- ${CONTEXT_NAME}\n"
# Collect data for webhook
TEST_TYPE="unknown"
if [[ "$CONTEXT_NAME" == *"playwright"* ]]; then
TEST_TYPE="playwright"
elif [[ "$CONTEXT_NAME" == *"cypress"* ]]; then
TEST_TYPE="cypress"
fi
WEBHOOK_DATA=$(echo "$WEBHOOK_DATA" | jq \
--arg context "$CONTEXT_NAME" \
--arg test_type "$TEST_TYPE" \
--arg description "$CURRENT_DESC" \
--arg report_url "$CURRENT_URL" \
'. + [{context: $context, test_type: $test_type, description: $description, report_url: $report_url}]')
done
echo "overridden<<EOF" >> $GITHUB_OUTPUT
echo -e "$OVERRIDDEN" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
echo "webhook_data<<EOF" >> $GITHUB_OUTPUT
echo "$WEBHOOK_DATA" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
- name: ci/build-webhook-message
if: env.REPORT_WEBHOOK_URL != '' && steps.override.outputs.overridden != ''
id: webhook-message
env:
WEBHOOK_DATA: ${{ steps.override.outputs.webhook_data }}
run: |
MESSAGE_TEXT=""
while IFS= read -r item; do
[ -z "$item" ] && continue
CONTEXT=$(echo "$item" | jq -r '.context')
DESCRIPTION=$(echo "$item" | jq -r '.description')
REPORT_URL=$(echo "$item" | jq -r '.report_url')
MESSAGE_TEXT="${MESSAGE_TEXT}- **${CONTEXT}**: ${DESCRIPTION}, [view report](${REPORT_URL})\n"
done < <(echo "$WEBHOOK_DATA" | jq -c '.[]')
{
echo "message_text<<EOF"
echo -e "$MESSAGE_TEXT"
echo "EOF"
} >> $GITHUB_OUTPUT
- name: ci/send-webhook-notification
if: env.REPORT_WEBHOOK_URL != '' && steps.override.outputs.overridden != ''
env:
REPORT_WEBHOOK_URL: ${{ env.REPORT_WEBHOOK_URL }}
MESSAGE_TEXT: ${{ steps.webhook-message.outputs.message_text }}
PR_NUMBER: ${{ github.event.pull_request.number }}
PR_URL: ${{ github.event.pull_request.html_url }}
COMMIT_SHA: ${{ github.event.pull_request.head.sha }}
SENDER: ${{ github.event.sender.login }}
run: |
PAYLOAD=$(cat <<EOF
{
"username": "E2E Test",
"icon_url": "https://mattermost.com/wp-content/uploads/2022/02/icon_WS.png",
"text": "**:white_check_mark: E2E Tests Verified**\n\nBy: \`@${SENDER}\` via \`E2E Tests/verified\` trigger-label\n:open-pull-request: [mattermost-pr-${PR_NUMBER}](${PR_URL}), commit: \`${COMMIT_SHA:0:7}\`\n\n${MESSAGE_TEXT}"
}
EOF
)
curl -X POST -H "Content-Type: application/json" -d "$PAYLOAD" "$REPORT_WEBHOOK_URL"

View file

@ -17,7 +17,7 @@ jobs:
if: github.repository_owner == 'mattermost' && github.event.workflow_run.event == 'pull_request' && github.event.workflow_run.conclusion == 'success'
runs-on: ubuntu-22.04
steps:
- uses: mattermost/actions/delivery/update-commit-status@d5174b860704729f4c14ef8489ae075742bfa08a
- uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
env:
GITHUB_TOKEN: ${{ github.token }}
with:
@ -155,7 +155,7 @@ jobs:
needs:
- build-docker
steps:
- uses: mattermost/actions/delivery/update-commit-status@d5174b860704729f4c14ef8489ae075742bfa08a
- uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
env:
GITHUB_TOKEN: ${{ github.token }}
with:
@ -171,7 +171,7 @@ jobs:
needs:
- build-docker
steps:
- uses: mattermost/actions/delivery/update-commit-status@d5174b860704729f4c14ef8489ae075742bfa08a
- uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
env:
GITHUB_TOKEN: ${{ github.token }}
with:

View file

@ -12,7 +12,6 @@ on:
pull_request:
paths:
- "server/**"
- "e2e-tests/**"
- ".github/workflows/server-ci.yml"
- ".github/workflows/server-test-template.yml"
- ".github/workflows/mmctl-test-template.yml"

View file

@ -7,7 +7,6 @@ on:
pull_request:
paths:
- "webapp/**"
- "e2e-tests/**"
- ".github/workflows/webapp-ci.yml"
- ".github/actions/webapp-setup/**"

View file

@ -12880,6 +12880,48 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
---
## x/sys
This product contains 'x/sys' by Go.
[mirror] Go packages for low-level interaction with the operating system
* HOMEPAGE:
* https://golang.org/x/sys
* LICENSE: BSD 3-Clause "New" or "Revised" License
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
---
## x/term

View file

@ -360,6 +360,36 @@
$ref: "#/components/responses/Forbidden"
"501":
$ref: "#/components/responses/NotImplemented"
/api/v4/cloud/check-cws-connection:
get:
tags:
- cloud
summary: Check CWS connection
description: >
Checks whether the Customer Web Server (CWS) is reachable from this instance.
Used to detect if the deployment is air-gapped.
##### Permissions
No permissions required.
__Minimum server version__: 5.28
__Note:__ This is intended for internal use and is subject to change.
operationId: CheckCWSConnection
responses:
"200":
description: CWS connection status returned successfully
content:
application/json:
schema:
type: object
properties:
status:
type: string
description: Connection status - "available" if CWS is reachable, "unavailable" if not
enum:
- available
- unavailable
/api/v4/cloud/webhook:
post:
tags:

View file

@ -98,7 +98,7 @@ case "${TEST:-$TEST_DEFAULT}" in
cypress )
export TEST_FILTER_DEFAULT='--stage=@prod --group=@smoke' ;;
playwright )
export TEST_FILTER_DEFAULT='functional/system_console/system_users/actions.spec.ts' ;;
export TEST_FILTER_DEFAULT='--grep @smoke' ;;
* )
export TEST_FILTER_DEFAULT='' ;;
esac

View file

@ -14,13 +14,16 @@ cd "$(dirname "$0")"
: ${WEBHOOK_URL:-} # Optional. Mattermost webhook to post the report back to
: ${RELEASE_DATE:-} # Optional. If set, its value will be included in the report as the release date of the tested artifact
if [ "$TYPE" = "PR" ]; then
# In this case, we expect the PR number to be present in the BRANCH variable
BRANCH_REGEX='^server-pr-[0-9]+$'
if ! grep -qE "${BRANCH_REGEX}" <<<"$BRANCH"; then
mme2e_log "Error: when using TYPE=PR, the BRANCH variable should respect regex '$BRANCH_REGEX'. Aborting." >&2
exit 1
# Try to determine PR number: first from PR_NUMBER, then from BRANCH (server-pr-XXXX format)
if [ -n "${PR_NUMBER:-}" ]; then
export PULL_REQUEST="https://github.com/mattermost/mattermost/pull/${PR_NUMBER}"
elif grep -qE '^server-pr-[0-9]+$' <<<"${BRANCH:-}"; then
PR_NUMBER="${BRANCH##*-}"
export PULL_REQUEST="https://github.com/mattermost/mattermost/pull/${PR_NUMBER}"
else
mme2e_log "Warning: TYPE=PR but cannot determine PR number from PR_NUMBER or BRANCH. Falling back to TYPE=NONE."
TYPE=NONE
fi
export PULL_REQUEST="https://github.com/mattermost/mattermost/pull/${BRANCH##*-}"
fi
# Env vars used during the test. Their values will be included in the report

View file

@ -0,0 +1,77 @@
#!/bin/bash
# shellcheck disable=SC2038
# Run specific spec files
# Usage: SPEC_FILES="path/to/spec1.ts,path/to/spec2.ts" make start-server run-specs
set -e -u -o pipefail
cd "$(dirname "$0")"
. .e2erc
if [ -z "${SPEC_FILES:-}" ]; then
mme2e_log "Error: SPEC_FILES environment variable is required"
mme2e_log "Usage: SPEC_FILES=\"path/to/spec.ts\" make start-server run-specs"
exit 1
fi
mme2e_log "Running spec files: $SPEC_FILES"
case $TEST in
cypress)
mme2e_log "Running Cypress with specified specs"
# Initialize cypress report directory
${MME2E_DC_SERVER} exec -T -u "$MME2E_UID" -- cypress bash <<EOF
rm -rf logs results
mkdir -p logs
mkdir -p results/junit
mkdir -p results/mochawesome-report/json/tests
touch results/junit/empty.xml
echo '<?xml version="1.0" encoding="UTF-8"?>' > results/junit/empty.xml
EOF
# Run cypress with specific spec files and mochawesome reporter
LOGFILE_SUFFIX="${CI_BASE_URL//\//_}_specs"
${MME2E_DC_SERVER} exec -T -u "$MME2E_UID" -- cypress npx cypress run \
--spec "$SPEC_FILES" \
--reporter cypress-multi-reporters \
--reporter-options configFile=reporter-config.json \
| tee "../cypress/logs/${LOGFILE_SUFFIX}_cypress.log" || true
# Collect run results
if [ -d ../cypress/results/mochawesome-report/json/tests/ ]; then
cat >../cypress/results/summary.json <<EOF
{
"passed": $(find ../cypress/results/mochawesome-report/json/tests/ -name '*.json' | xargs -l jq -r '.stats.passes' | jq -s add),
"failed": $(find ../cypress/results/mochawesome-report/json/tests/ -name '*.json' | xargs -l jq -r '.stats.failures' | jq -s add),
"failed_expected": 0
}
EOF
fi
# Collect server logs
${MME2E_DC_SERVER} logs --no-log-prefix -- server >"../cypress/logs/${LOGFILE_SUFFIX}_mattermost.log" 2>&1
;;
playwright)
mme2e_log "Running Playwright with specified specs"
# Convert comma-separated to space-separated for playwright
SPEC_ARGS=$(echo "$SPEC_FILES" | tr ',' ' ')
# Run playwright with specific spec files
LOGFILE_SUFFIX="${CI_BASE_URL//\//_}_specs"
${MME2E_DC_SERVER} exec -T -u "$MME2E_UID" -- playwright npm run test:ci -- "$SPEC_ARGS" | tee "../playwright/logs/${LOGFILE_SUFFIX}_playwright.log" || true
# Collect run results (if results.json exists)
if [ -f ../playwright/results/results.json ]; then
mme2e_log "Results file found at ../playwright/results/results.json"
fi
# Collect server logs
${MME2E_DC_SERVER} logs --no-log-prefix -- server >"../playwright/logs/${LOGFILE_SUFFIX}_mattermost.log" 2>&1
;;
*)
mme2e_log "Error, unsupported value for TEST: $TEST" >&2
mme2e_log "Aborting" >&2
exit 1
;;
esac
mme2e_log "Spec run complete"

View file

@ -9,7 +9,7 @@ clean:
rm -fv .ci/server.yml
rm -fv .ci/.env.{server,dashboard,cypress,playwright}
.PHONY: generate-server start-server run-test stop-server restart-server
.PHONY: generate-server start-server run-test run-specs stop-server restart-server
generate-server:
bash ./.ci/server.generate.sh
start-server: generate-server
@ -17,6 +17,8 @@ start-server: generate-server
bash ./.ci/server.prepare.sh
run-test:
bash ./.ci/server.run_test.sh
run-specs:
bash ./.ci/server.run_specs.sh
stop-server: generate-server
bash ./.ci/server.stop.sh
restart-server: stop-server start-server

View file

@ -100,6 +100,7 @@
"start:webhook": "node webhook_serve.js",
"pretest": "npm run clean",
"test": "cross-env TZ=Etc/UTC cypress run",
"test:smoke": "node run_tests.js --stage='@prod' --group='@smoke'",
"test:ci": "node run_tests.js",
"uniq-meta": "grep -r \"^// $META:\" cypress | grep -ow '@\\w*' | sort | uniq",
"check": "eslint .",

View file

@ -0,0 +1,15 @@
{
"reporterEnabled": "mocha-junit-reporter, mochawesome",
"mochaJunitReporterReporterOptions": {
"mochaFile": "results/junit/test_results[hash].xml",
"toConsole": false
},
"mochawesomeReporterOptions": {
"reportDir": "results/mochawesome-report",
"reportFilename": "json/tests/[name]",
"quiet": true,
"overwrite": false,
"html": false,
"json": true
}
}

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @accessibility
import {getRandomId} from '../../../utils';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @account_setting
import * as TIMEOUTS from '../../../../fixtures/timeouts';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @account_setting
describe('Account Settings', () => {

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @channel
import * as TIMEOUTS from '../../../fixtures/timeouts';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @channel
import {getAdminAccount} from '../../../support/env';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @messaging @benchmark
import {reportBenchmarkResults} from '../../../utils/benchmark';

View file

@ -227,6 +227,39 @@ describe('Channel Info RHS', () => {
cy.uiGetRHS().findByText('header for the tests').should('be.visible');
});
});
it('should be able to rename channel from About area', () => {
// # Create a dedicated channel for renaming to avoid affecting other tests
cy.apiCreateChannel(testTeam.id, 'channel-to-rename', 'Channel To Rename', 'O').then(({channel}) => {
cy.apiAddUserToChannel(channel.id, admin.id);
// # Go to the channel
cy.visit(`/${testTeam.name}/channels/${channel.name}`);
// # Open Channel Info RHS
cy.get('#channel-info-btn').click();
// # Click edit on channel name (first Edit in About)
cy.uiGetRHS().findAllByLabelText('Edit').first().click({force: true});
// * Rename Channel modal appears
cy.findByRole('heading', {name: /rename channel/i}).should('be.visible');
// # Fill display name and URL
cy.findByPlaceholderText(/enter display name/i).clear().type('Renamed Channel');
cy.get('.url-input-button').click();
cy.get('.url-input-container input').clear().type('renamed-channel');
cy.get('.url-input-container button.url-input-button').click();
// # Save
cy.findByRole('button', {name: /save/i}).click();
// * URL updated
cy.location('pathname').should('include', `/${testTeam.name}/channels/renamed-channel`);
// * Header shows new name
cy.get('#channelHeaderTitle').should('contain', 'Renamed Channel');
});
});
});
describe('bottom menu', () => {
it('should be able to manage notifications', () => {
@ -237,11 +270,29 @@ describe('Channel Info RHS', () => {
cy.get('#channel-info-btn').click();
// # Click on "Notification Preferences"
cy.uiGetRHS().findByText('Notification Preferences').should('be.visible').click();
cy.uiGetRHS().findByTestId('channel_info_rhs-menu').findByText('Notification Preferences').scrollIntoView().should('be.visible').click();
// * Ensures the modal is there
cy.get('.ChannelNotificationModal').should('be.visible');
});
it('should open Channel Settings from RHS menu', () => {
// # Go to test channel
cy.visit(`/${testTeam.name}/channels/${testChannel.name}`);
// # Close RHS if it's open, then click on the channel info button
cy.get('body').then(($body) => {
if ($body.find('#rhsCloseButton').length > 0) {
cy.get('#rhsCloseButton').click();
}
cy.get('#channel-info-btn').should('be.visible').click();
});
// * Channel Settings item is visible in RHS menu
cy.uiGetRHS().findByTestId('channel_info_rhs-menu').findByText('Channel Settings').scrollIntoView().should('be.visible').click();
// * Channel Settings modal opens
cy.get('.ChannelSettingsModal').should('be.visible');
});
it('should be able to view files and come back', () => {
// # Go to test channel
cy.visit(`/${testTeam.name}/channels/${testChannel.name}`);
@ -250,7 +301,7 @@ describe('Channel Info RHS', () => {
cy.get('#channel-info-btn').click();
// # Click on "Files"
cy.uiGetRHS().findByTestId('channel_info_rhs-menu').findByText('Files').should('be.visible').click();
cy.uiGetRHS().findByTestId('channel_info_rhs-menu').findByText('Files').scrollIntoView().should('be.visible').click();
// * Ensure we see the files RHS
cy.uiGetRHS().findByText('No files yet').should('be.visible');
@ -277,10 +328,10 @@ describe('Channel Info RHS', () => {
cy.get('#channel-info-btn').click();
// # Click on "Pinned Messages"
cy.uiGetRHS().findByTestId('channel_info_rhs-menu').findByText('Pinned messages').should('be.visible').click();
cy.uiGetRHS().findByTestId('channel_info_rhs-menu').findByText('Pinned messages').scrollIntoView().should('be.visible').click();
// * Ensure we see the Pinned Post RHS
cy.uiGetRHS().findByText('Hello channel info rhs spec').should('be.visible');
cy.uiGetRHS().findByText('Hello channel info rhs spec').first().should('be.visible');
// # Click the Back Icon
cy.uiGetRHS().get('[aria-label="Back Icon"]').click();
@ -296,7 +347,7 @@ describe('Channel Info RHS', () => {
cy.get('#channel-info-btn').click();
// # Click on "Members"
cy.uiGetRHS().findByTestId('channel_info_rhs-menu').findByText('Members').should('be.visible').click();
cy.uiGetRHS().findByTestId('channel_info_rhs-menu').findByText('Members').scrollIntoView().should('be.visible').click();
// * Ensure we see the members
cy.uiGetRHS().contains('sysadmin').should('be.visible');
@ -399,7 +450,7 @@ describe('Channel Info RHS', () => {
cy.get('#channel-info-btn').click();
// # Click on "Notification Preferences"
cy.uiGetRHS().findByText('Notification Preferences').should('be.visible').click();
cy.uiGetRHS().findByTestId('channel_info_rhs-menu').findByText('Notification Preferences').scrollIntoView().should('be.visible').click();
// * Ensures the modal is there
cy.get('.ChannelNotificationModal').should('be.visible');
@ -489,6 +540,6 @@ describe('Channel Info RHS', () => {
function ensureRHSIsOpenOnChannelInfo(testChannel) {
cy.get('#rhsContainer').then((rhsContainer) => {
cy.wrap(rhsContainer).findByText('Info').should('be.visible');
cy.wrap(rhsContainer).findByText(testChannel.display_name).should('be.visible');
cy.wrap(rhsContainer).find('.sidebar--right__title__subtitle').should('contain', testChannel.display_name);
});
}

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @channel @rhs
import * as TIMEOUTS from '../../../fixtures/timeouts';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @dm_category
import * as MESSAGES from '../../../fixtures/messages';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @channel_sidebar
import {

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @collapsed_reply_threads
import {Channel} from '@mattermost/types/channels';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @custom_status
import moment from 'moment-timezone';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @custom_status
describe('Custom Status - Setting a Custom Status', () => {

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @enterprise @not_cloud @system_console
describe('MM-T2574 Session Lengths', () => {

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @enterprise @guest_account
/**

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @enterprise @ldap
import {Channel} from '@mattermost/types/channels';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @enterprise @ldap
import {UserProfile} from '@mattermost/types/users';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @enterprise @ldap_group
describe('LDAP Group Sync - Test channel public/private toggle', () => {

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @enterprise @profile_popover
import * as TIMEOUTS from '../../../../fixtures/timeouts';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @enterprise @not_cloud @system_console @license_removal
import * as TIMEOUTS from '../../../../../fixtures/timeouts';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @enterprise @system_console @mfa
import ldapUsers from '../../../../fixtures/ldap_users.json';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @enterprise @system_console
import * as TIMEOUTS from '../../../../../fixtures/timeouts';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @enterprise @e20_only @not_cloud @system_console
import * as TIMEOUTS from '../../../../fixtures/timeouts';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @enterprise @system_console
import {getAdminAccount} from '../../../../support/env';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @enterprise @system_console
import {getAdminAccount} from '../../../../support/env';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @files_and_attachments
describe('Image Link Preview', () => {

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @integrations
import * as MESSAGES from '../../../../fixtures/messages';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @incoming_webhook
describe('Incoming webhook', () => {

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @incoming_webhook
import {enableUsernameAndIconOverride} from './helpers';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @incoming_webhook
import {getRandomId} from '../../../../utils';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @integrations
describe('Integrations', () => {

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @incoming_webhook
import {getRandomId} from '../../../../utils';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @integrations
import {getRandomId} from '../../../utils';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @integrations
import {getRandomId} from '../../../utils';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
import * as TIMEOUTS from '../../../../fixtures/timeouts';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @interactive_menu
/**

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @keyboard_shortcuts
import * as TIMEOUTS from '../../../fixtures/timeouts';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @markdown @not_cloud
import * as TIMEOUTS from '../../../fixtures/timeouts';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @messaging
import * as TIMEOUTS from '../../../fixtures/timeouts';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @messaging
import * as TIMEOUTS from '../../../fixtures/timeouts';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @messaging
function emojiVerification(postId) {

View file

@ -7,7 +7,6 @@
// Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @messaging
describe('Messaging', () => {

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @messaging
describe('Messaging', () => {

View file

@ -7,7 +7,6 @@
// Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @messaging
describe('Messaging', () => {

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @messaging
import * as TIMEOUTS from '../../../fixtures/timeouts';

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @messaging
describe('Messaging', () => {

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @messaging
describe('Image attachment', () => {

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @messaging
describe('Message', () => {

View file

@ -7,7 +7,6 @@
// - Use element ID when selecting an element. Create one if none.
// ***************************************************************
// Stage: @prod
// Group: @channels @not_cloud @messaging @plugin
import * as TIMEOUTS from '../../../fixtures/timeouts';

Some files were not shown because too many files have changed in this diff Show more