mirror of
https://github.com/mattermost/mattermost.git
synced 2026-02-11 23:03:45 -05:00
Compare commits
45 commits
master
...
v11.3.1-rc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7d4b795e32 | ||
|
|
e8c9f34e69 | ||
|
|
666644d003 | ||
|
|
59272d115b | ||
|
|
1fe1049198 | ||
|
|
a296621e3b | ||
|
|
6edc7d4bc8 | ||
|
|
71f013648a | ||
|
|
21220ccfec | ||
|
|
44bb4c2db6 | ||
|
|
2588c47b20 | ||
|
|
523c566727 | ||
|
|
d27a219506 | ||
|
|
3051ead599 | ||
|
|
c70e184b65 | ||
|
|
4356b092dd | ||
|
|
2d6b2ae112 | ||
|
|
6145da452c | ||
|
|
69b019d53e | ||
|
|
cd33bc9067 | ||
|
|
e260ac346f | ||
|
|
6f9b21c68e | ||
|
|
8567622504 | ||
|
|
378804d0df | ||
|
|
95b9160472 | ||
|
|
2a83ca9646 | ||
|
|
d5529dcc9a | ||
|
|
3bb469082e | ||
|
|
7cd53beea6 | ||
|
|
a912d1177b | ||
|
|
a5a0e18064 | ||
|
|
c7bebc1c81 | ||
|
|
9cf621f640 | ||
|
|
a73f912669 | ||
|
|
f5385514df | ||
|
|
636486dc56 | ||
|
|
9dbe20f9ab | ||
|
|
682534dea1 | ||
|
|
60c65c95b9 | ||
|
|
0a2f0c4e81 | ||
|
|
fcc81d962b | ||
|
|
5eb7b7acd8 | ||
|
|
b3b1005f09 | ||
|
|
b65dbf4434 | ||
|
|
b7bf0ec9f6 |
1880 changed files with 76504 additions and 168213 deletions
|
|
@ -1,2 +0,0 @@
|
|||
node_modules/
|
||||
.env
|
||||
|
|
@ -1,48 +0,0 @@
|
|||
name: Calculate Cypress Results
|
||||
description: Calculate Cypress test results with optional merge of retest results
|
||||
author: Mattermost
|
||||
|
||||
inputs:
|
||||
original-results-path:
|
||||
description: Path to the original Cypress results directory (e.g., e2e-tests/cypress/results)
|
||||
required: true
|
||||
retest-results-path:
|
||||
description: Path to the retest Cypress results directory (optional - if not provided, only calculates from original)
|
||||
required: false
|
||||
write-merged:
|
||||
description: Whether to write merged results back to the original directory (default true)
|
||||
required: false
|
||||
default: "true"
|
||||
|
||||
outputs:
|
||||
# Merge outputs
|
||||
merged:
|
||||
description: Whether merge was performed (true/false)
|
||||
|
||||
# Calculation outputs (same as calculate-cypress-test-results)
|
||||
passed:
|
||||
description: Number of passed tests
|
||||
failed:
|
||||
description: Number of failed tests
|
||||
pending:
|
||||
description: Number of pending/skipped tests
|
||||
total_specs:
|
||||
description: Total number of spec files
|
||||
commit_status_message:
|
||||
description: Message for commit status (e.g., "X failed, Y passed (Z spec files)")
|
||||
failed_specs:
|
||||
description: Comma-separated list of failed spec files (for retest)
|
||||
failed_specs_count:
|
||||
description: Number of failed spec files
|
||||
failed_tests:
|
||||
description: Markdown table rows of failed tests (for GitHub summary)
|
||||
total:
|
||||
description: Total number of tests (passed + failed)
|
||||
pass_rate:
|
||||
description: Pass rate percentage (e.g., "100.00")
|
||||
color:
|
||||
description: Color for webhook based on pass rate (green=100%, yellow=99%+, orange=98%+, red=<98%)
|
||||
|
||||
runs:
|
||||
using: node24
|
||||
main: dist/index.js
|
||||
19319
.github/actions/calculate-cypress-results/dist/index.js
vendored
19319
.github/actions/calculate-cypress-results/dist/index.js
vendored
File diff suppressed because one or more lines are too long
|
|
@ -1,15 +0,0 @@
|
|||
/** @type {import('ts-jest').JestConfigWithTsJest} */
|
||||
module.exports = {
|
||||
preset: "ts-jest",
|
||||
testEnvironment: "node",
|
||||
testMatch: ["**/*.test.ts"],
|
||||
moduleFileExtensions: ["ts", "js"],
|
||||
transform: {
|
||||
"^.+\\.ts$": [
|
||||
"ts-jest",
|
||||
{
|
||||
useESM: false,
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
9136
.github/actions/calculate-cypress-results/package-lock.json
generated
vendored
9136
.github/actions/calculate-cypress-results/package-lock.json
generated
vendored
File diff suppressed because it is too large
Load diff
|
|
@ -1,27 +0,0 @@
|
|||
{
|
||||
"name": "calculate-cypress-results",
|
||||
"private": true,
|
||||
"version": "0.1.0",
|
||||
"main": "dist/index.js",
|
||||
"scripts": {
|
||||
"build": "tsup",
|
||||
"prettier": "npx prettier --write \"src/**/*.ts\"",
|
||||
"local-action": "local-action . src/main.ts .env",
|
||||
"test": "jest --verbose",
|
||||
"test:watch": "jest --watch --verbose",
|
||||
"test:silent": "jest --silent",
|
||||
"tsc": "tsc -b"
|
||||
},
|
||||
"dependencies": {
|
||||
"@actions/core": "3.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@github/local-action": "7.0.0",
|
||||
"@types/jest": "30.0.0",
|
||||
"@types/node": "25.2.0",
|
||||
"jest": "30.2.0",
|
||||
"ts-jest": "29.4.6",
|
||||
"tsup": "8.5.1",
|
||||
"typescript": "5.9.3"
|
||||
}
|
||||
}
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
import { run } from "./main";
|
||||
|
||||
run();
|
||||
|
|
@ -1,99 +0,0 @@
|
|||
import * as core from "@actions/core";
|
||||
import {
|
||||
loadSpecFiles,
|
||||
mergeResults,
|
||||
writeMergedResults,
|
||||
calculateResultsFromSpecs,
|
||||
} from "./merge";
|
||||
|
||||
export async function run(): Promise<void> {
|
||||
const originalPath = core.getInput("original-results-path", {
|
||||
required: true,
|
||||
});
|
||||
const retestPath = core.getInput("retest-results-path"); // Optional
|
||||
const shouldWriteMerged = core.getInput("write-merged") !== "false"; // Default true
|
||||
|
||||
core.info(`Original results: ${originalPath}`);
|
||||
core.info(`Retest results: ${retestPath || "(not provided)"}`);
|
||||
|
||||
let merged = false;
|
||||
let specs;
|
||||
|
||||
if (retestPath) {
|
||||
// Check if retest path has results
|
||||
const retestSpecs = await loadSpecFiles(retestPath);
|
||||
|
||||
if (retestSpecs.length > 0) {
|
||||
core.info(`Found ${retestSpecs.length} retest spec files`);
|
||||
|
||||
// Merge results
|
||||
core.info("Merging results...");
|
||||
const mergeResult = await mergeResults(originalPath, retestPath);
|
||||
specs = mergeResult.specs;
|
||||
merged = true;
|
||||
|
||||
core.info(`Retested specs: ${mergeResult.retestFiles.join(", ")}`);
|
||||
core.info(`Total merged specs: ${specs.length}`);
|
||||
|
||||
// Write merged results back to original directory
|
||||
if (shouldWriteMerged) {
|
||||
core.info("Writing merged results to original directory...");
|
||||
const writeResult = await writeMergedResults(
|
||||
originalPath,
|
||||
retestPath,
|
||||
);
|
||||
core.info(`Updated files: ${writeResult.updatedFiles.length}`);
|
||||
core.info(
|
||||
`Removed duplicates: ${writeResult.removedFiles.length}`,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
core.warning(
|
||||
`No retest results found at ${retestPath}, using original only`,
|
||||
);
|
||||
specs = await loadSpecFiles(originalPath);
|
||||
}
|
||||
} else {
|
||||
core.info("No retest path provided, using original results only");
|
||||
specs = await loadSpecFiles(originalPath);
|
||||
}
|
||||
|
||||
core.info(`Calculating results from ${specs.length} spec files...`);
|
||||
|
||||
// Handle case where no results found
|
||||
if (specs.length === 0) {
|
||||
core.setFailed("No Cypress test results found");
|
||||
return;
|
||||
}
|
||||
|
||||
// Calculate all outputs from final results
|
||||
const calc = calculateResultsFromSpecs(specs);
|
||||
|
||||
// Log results
|
||||
core.startGroup("Final Results");
|
||||
core.info(`Passed: ${calc.passed}`);
|
||||
core.info(`Failed: ${calc.failed}`);
|
||||
core.info(`Pending: ${calc.pending}`);
|
||||
core.info(`Total: ${calc.total}`);
|
||||
core.info(`Pass Rate: ${calc.passRate}%`);
|
||||
core.info(`Color: ${calc.color}`);
|
||||
core.info(`Spec Files: ${calc.totalSpecs}`);
|
||||
core.info(`Failed Specs Count: ${calc.failedSpecsCount}`);
|
||||
core.info(`Commit Status Message: ${calc.commitStatusMessage}`);
|
||||
core.info(`Failed Specs: ${calc.failedSpecs || "none"}`);
|
||||
core.endGroup();
|
||||
|
||||
// Set all outputs
|
||||
core.setOutput("merged", merged.toString());
|
||||
core.setOutput("passed", calc.passed);
|
||||
core.setOutput("failed", calc.failed);
|
||||
core.setOutput("pending", calc.pending);
|
||||
core.setOutput("total_specs", calc.totalSpecs);
|
||||
core.setOutput("commit_status_message", calc.commitStatusMessage);
|
||||
core.setOutput("failed_specs", calc.failedSpecs);
|
||||
core.setOutput("failed_specs_count", calc.failedSpecsCount);
|
||||
core.setOutput("failed_tests", calc.failedTests);
|
||||
core.setOutput("total", calc.total);
|
||||
core.setOutput("pass_rate", calc.passRate);
|
||||
core.setOutput("color", calc.color);
|
||||
}
|
||||
|
|
@ -1,271 +0,0 @@
|
|||
import { calculateResultsFromSpecs } from "./merge";
|
||||
import type { ParsedSpecFile, MochawesomeResult } from "./types";
|
||||
|
||||
/**
|
||||
* Helper to create a mochawesome result for testing
|
||||
*/
|
||||
function createMochawesomeResult(
|
||||
specFile: string,
|
||||
tests: { title: string; state: "passed" | "failed" | "pending" }[],
|
||||
): MochawesomeResult {
|
||||
return {
|
||||
stats: {
|
||||
suites: 1,
|
||||
tests: tests.length,
|
||||
passes: tests.filter((t) => t.state === "passed").length,
|
||||
pending: tests.filter((t) => t.state === "pending").length,
|
||||
failures: tests.filter((t) => t.state === "failed").length,
|
||||
start: new Date().toISOString(),
|
||||
end: new Date().toISOString(),
|
||||
duration: 1000,
|
||||
testsRegistered: tests.length,
|
||||
passPercent: 0,
|
||||
pendingPercent: 0,
|
||||
other: 0,
|
||||
hasOther: false,
|
||||
skipped: 0,
|
||||
hasSkipped: false,
|
||||
},
|
||||
results: [
|
||||
{
|
||||
uuid: "uuid-1",
|
||||
title: specFile,
|
||||
fullFile: `/app/e2e-tests/cypress/tests/integration/${specFile}`,
|
||||
file: `tests/integration/${specFile}`,
|
||||
beforeHooks: [],
|
||||
afterHooks: [],
|
||||
tests: tests.map((t, i) => ({
|
||||
title: t.title,
|
||||
fullTitle: `${specFile} > ${t.title}`,
|
||||
timedOut: null,
|
||||
duration: 500,
|
||||
state: t.state,
|
||||
speed: "fast",
|
||||
pass: t.state === "passed",
|
||||
fail: t.state === "failed",
|
||||
pending: t.state === "pending",
|
||||
context: null,
|
||||
code: "",
|
||||
err: t.state === "failed" ? { message: "Test failed" } : {},
|
||||
uuid: `test-uuid-${i}`,
|
||||
parentUUID: "uuid-1",
|
||||
isHook: false,
|
||||
skipped: false,
|
||||
})),
|
||||
suites: [],
|
||||
passes: tests
|
||||
.filter((t) => t.state === "passed")
|
||||
.map((_, i) => `test-uuid-${i}`),
|
||||
failures: tests
|
||||
.filter((t) => t.state === "failed")
|
||||
.map((_, i) => `test-uuid-${i}`),
|
||||
pending: tests
|
||||
.filter((t) => t.state === "pending")
|
||||
.map((_, i) => `test-uuid-${i}`),
|
||||
skipped: [],
|
||||
duration: 1000,
|
||||
root: true,
|
||||
rootEmpty: false,
|
||||
_timeout: 60000,
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
function createParsedSpecFile(
|
||||
specFile: string,
|
||||
tests: { title: string; state: "passed" | "failed" | "pending" }[],
|
||||
): ParsedSpecFile {
|
||||
return {
|
||||
filePath: `/path/to/${specFile}.json`,
|
||||
specPath: `tests/integration/${specFile}`,
|
||||
result: createMochawesomeResult(specFile, tests),
|
||||
};
|
||||
}
|
||||
|
||||
describe("calculateResultsFromSpecs", () => {
|
||||
it("should calculate all outputs correctly for passing results", () => {
|
||||
const specs: ParsedSpecFile[] = [
|
||||
createParsedSpecFile("login.spec.ts", [
|
||||
{
|
||||
title: "should login with valid credentials",
|
||||
state: "passed",
|
||||
},
|
||||
]),
|
||||
createParsedSpecFile("messaging.spec.ts", [
|
||||
{ title: "should send a message", state: "passed" },
|
||||
]),
|
||||
];
|
||||
|
||||
const calc = calculateResultsFromSpecs(specs);
|
||||
|
||||
expect(calc.passed).toBe(2);
|
||||
expect(calc.failed).toBe(0);
|
||||
expect(calc.pending).toBe(0);
|
||||
expect(calc.total).toBe(2);
|
||||
expect(calc.passRate).toBe("100.00");
|
||||
expect(calc.color).toBe("#43A047"); // green
|
||||
expect(calc.totalSpecs).toBe(2);
|
||||
expect(calc.failedSpecs).toBe("");
|
||||
expect(calc.failedSpecsCount).toBe(0);
|
||||
expect(calc.commitStatusMessage).toBe("100% passed (2), 2 specs");
|
||||
});
|
||||
|
||||
it("should calculate all outputs correctly for results with failures", () => {
|
||||
const specs: ParsedSpecFile[] = [
|
||||
createParsedSpecFile("login.spec.ts", [
|
||||
{
|
||||
title: "should login with valid credentials",
|
||||
state: "passed",
|
||||
},
|
||||
]),
|
||||
createParsedSpecFile("channels.spec.ts", [
|
||||
{ title: "should create a channel", state: "failed" },
|
||||
]),
|
||||
];
|
||||
|
||||
const calc = calculateResultsFromSpecs(specs);
|
||||
|
||||
expect(calc.passed).toBe(1);
|
||||
expect(calc.failed).toBe(1);
|
||||
expect(calc.pending).toBe(0);
|
||||
expect(calc.total).toBe(2);
|
||||
expect(calc.passRate).toBe("50.00");
|
||||
expect(calc.color).toBe("#F44336"); // red
|
||||
expect(calc.totalSpecs).toBe(2);
|
||||
expect(calc.failedSpecs).toBe("tests/integration/channels.spec.ts");
|
||||
expect(calc.failedSpecsCount).toBe(1);
|
||||
expect(calc.commitStatusMessage).toBe(
|
||||
"50.0% passed (1/2), 1 failed, 2 specs",
|
||||
);
|
||||
expect(calc.failedTests).toContain("should create a channel");
|
||||
});
|
||||
|
||||
it("should handle pending tests correctly", () => {
|
||||
const specs: ParsedSpecFile[] = [
|
||||
createParsedSpecFile("login.spec.ts", [
|
||||
{ title: "should login", state: "passed" },
|
||||
{ title: "should logout", state: "pending" },
|
||||
]),
|
||||
];
|
||||
|
||||
const calc = calculateResultsFromSpecs(specs);
|
||||
|
||||
expect(calc.passed).toBe(1);
|
||||
expect(calc.failed).toBe(0);
|
||||
expect(calc.pending).toBe(1);
|
||||
expect(calc.total).toBe(1); // Total excludes pending
|
||||
expect(calc.passRate).toBe("100.00");
|
||||
});
|
||||
|
||||
it("should limit failed tests to 10 entries", () => {
|
||||
const specs: ParsedSpecFile[] = [
|
||||
createParsedSpecFile("big-test.spec.ts", [
|
||||
{ title: "test 1", state: "failed" },
|
||||
{ title: "test 2", state: "failed" },
|
||||
{ title: "test 3", state: "failed" },
|
||||
{ title: "test 4", state: "failed" },
|
||||
{ title: "test 5", state: "failed" },
|
||||
{ title: "test 6", state: "failed" },
|
||||
{ title: "test 7", state: "failed" },
|
||||
{ title: "test 8", state: "failed" },
|
||||
{ title: "test 9", state: "failed" },
|
||||
{ title: "test 10", state: "failed" },
|
||||
{ title: "test 11", state: "failed" },
|
||||
{ title: "test 12", state: "failed" },
|
||||
]),
|
||||
];
|
||||
|
||||
const calc = calculateResultsFromSpecs(specs);
|
||||
|
||||
expect(calc.failed).toBe(12);
|
||||
expect(calc.failedTests).toContain("...and 2 more failed tests");
|
||||
});
|
||||
});
|
||||
|
||||
describe("merge simulation", () => {
|
||||
it("should produce correct results when merging original with retest", () => {
|
||||
// Simulate original: 2 passed, 1 failed
|
||||
const originalSpecs: ParsedSpecFile[] = [
|
||||
createParsedSpecFile("login.spec.ts", [
|
||||
{ title: "should login", state: "passed" },
|
||||
]),
|
||||
createParsedSpecFile("messaging.spec.ts", [
|
||||
{ title: "should send message", state: "passed" },
|
||||
]),
|
||||
createParsedSpecFile("channels.spec.ts", [
|
||||
{ title: "should create channel", state: "failed" },
|
||||
]),
|
||||
];
|
||||
|
||||
// Verify original has failure
|
||||
const originalCalc = calculateResultsFromSpecs(originalSpecs);
|
||||
expect(originalCalc.passed).toBe(2);
|
||||
expect(originalCalc.failed).toBe(1);
|
||||
expect(originalCalc.passRate).toBe("66.67");
|
||||
|
||||
// Simulate retest: channels.spec.ts now passes
|
||||
const retestSpec = createParsedSpecFile("channels.spec.ts", [
|
||||
{ title: "should create channel", state: "passed" },
|
||||
]);
|
||||
|
||||
// Simulate merge: replace original channels.spec.ts with retest
|
||||
const specMap = new Map<string, ParsedSpecFile>();
|
||||
for (const spec of originalSpecs) {
|
||||
specMap.set(spec.specPath, spec);
|
||||
}
|
||||
specMap.set(retestSpec.specPath, retestSpec);
|
||||
|
||||
const mergedSpecs = Array.from(specMap.values());
|
||||
|
||||
// Calculate final results
|
||||
const finalCalc = calculateResultsFromSpecs(mergedSpecs);
|
||||
|
||||
expect(finalCalc.passed).toBe(3);
|
||||
expect(finalCalc.failed).toBe(0);
|
||||
expect(finalCalc.pending).toBe(0);
|
||||
expect(finalCalc.total).toBe(3);
|
||||
expect(finalCalc.passRate).toBe("100.00");
|
||||
expect(finalCalc.color).toBe("#43A047"); // green
|
||||
expect(finalCalc.totalSpecs).toBe(3);
|
||||
expect(finalCalc.failedSpecs).toBe("");
|
||||
expect(finalCalc.failedSpecsCount).toBe(0);
|
||||
expect(finalCalc.commitStatusMessage).toBe("100% passed (3), 3 specs");
|
||||
});
|
||||
|
||||
it("should handle case where retest still fails", () => {
|
||||
// Original: 1 passed, 1 failed
|
||||
const originalSpecs: ParsedSpecFile[] = [
|
||||
createParsedSpecFile("login.spec.ts", [
|
||||
{ title: "should login", state: "passed" },
|
||||
]),
|
||||
createParsedSpecFile("channels.spec.ts", [
|
||||
{ title: "should create channel", state: "failed" },
|
||||
]),
|
||||
];
|
||||
|
||||
// Retest: channels.spec.ts still fails
|
||||
const retestSpec = createParsedSpecFile("channels.spec.ts", [
|
||||
{ title: "should create channel", state: "failed" },
|
||||
]);
|
||||
|
||||
// Merge
|
||||
const specMap = new Map<string, ParsedSpecFile>();
|
||||
for (const spec of originalSpecs) {
|
||||
specMap.set(spec.specPath, spec);
|
||||
}
|
||||
specMap.set(retestSpec.specPath, retestSpec);
|
||||
|
||||
const mergedSpecs = Array.from(specMap.values());
|
||||
const finalCalc = calculateResultsFromSpecs(mergedSpecs);
|
||||
|
||||
expect(finalCalc.passed).toBe(1);
|
||||
expect(finalCalc.failed).toBe(1);
|
||||
expect(finalCalc.passRate).toBe("50.00");
|
||||
expect(finalCalc.color).toBe("#F44336"); // red
|
||||
expect(finalCalc.failedSpecs).toBe(
|
||||
"tests/integration/channels.spec.ts",
|
||||
);
|
||||
expect(finalCalc.failedSpecsCount).toBe(1);
|
||||
});
|
||||
});
|
||||
|
|
@ -1,323 +0,0 @@
|
|||
import * as fs from "fs/promises";
|
||||
import * as path from "path";
|
||||
import type {
|
||||
MochawesomeResult,
|
||||
ParsedSpecFile,
|
||||
CalculationResult,
|
||||
FailedTest,
|
||||
TestItem,
|
||||
SuiteItem,
|
||||
ResultItem,
|
||||
} from "./types";
|
||||
|
||||
/**
|
||||
* Find all JSON files in a directory recursively
|
||||
*/
|
||||
async function findJsonFiles(dir: string): Promise<string[]> {
|
||||
const files: string[] = [];
|
||||
|
||||
try {
|
||||
const entries = await fs.readdir(dir, { withFileTypes: true });
|
||||
|
||||
for (const entry of entries) {
|
||||
const fullPath = path.join(dir, entry.name);
|
||||
if (entry.isDirectory()) {
|
||||
const subFiles = await findJsonFiles(fullPath);
|
||||
files.push(...subFiles);
|
||||
} else if (entry.isFile() && entry.name.endsWith(".json")) {
|
||||
files.push(fullPath);
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Directory doesn't exist or not accessible
|
||||
}
|
||||
|
||||
return files;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse a mochawesome JSON file
|
||||
*/
|
||||
async function parseSpecFile(filePath: string): Promise<ParsedSpecFile | null> {
|
||||
try {
|
||||
const content = await fs.readFile(filePath, "utf8");
|
||||
const result: MochawesomeResult = JSON.parse(content);
|
||||
|
||||
// Extract spec path from results[0].file
|
||||
const specPath = result.results?.[0]?.file;
|
||||
if (!specPath) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
filePath,
|
||||
specPath,
|
||||
result,
|
||||
};
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract all tests from a result recursively
|
||||
*/
|
||||
function getAllTests(result: MochawesomeResult): TestItem[] {
|
||||
const tests: TestItem[] = [];
|
||||
|
||||
function extractFromSuite(suite: SuiteItem | ResultItem) {
|
||||
tests.push(...(suite.tests || []));
|
||||
for (const nestedSuite of suite.suites || []) {
|
||||
extractFromSuite(nestedSuite);
|
||||
}
|
||||
}
|
||||
|
||||
for (const resultItem of result.results || []) {
|
||||
extractFromSuite(resultItem);
|
||||
}
|
||||
|
||||
return tests;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get color based on pass rate
|
||||
*/
|
||||
function getColor(passRate: number): string {
|
||||
if (passRate === 100) {
|
||||
return "#43A047"; // green
|
||||
} else if (passRate >= 99) {
|
||||
return "#FFEB3B"; // yellow
|
||||
} else if (passRate >= 98) {
|
||||
return "#FF9800"; // orange
|
||||
} else {
|
||||
return "#F44336"; // red
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate results from parsed spec files
|
||||
*/
|
||||
export function calculateResultsFromSpecs(
|
||||
specs: ParsedSpecFile[],
|
||||
): CalculationResult {
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
let pending = 0;
|
||||
const failedSpecsSet = new Set<string>();
|
||||
const failedTestsList: FailedTest[] = [];
|
||||
|
||||
for (const spec of specs) {
|
||||
const tests = getAllTests(spec.result);
|
||||
|
||||
for (const test of tests) {
|
||||
if (test.state === "passed") {
|
||||
passed++;
|
||||
} else if (test.state === "failed") {
|
||||
failed++;
|
||||
failedSpecsSet.add(spec.specPath);
|
||||
failedTestsList.push({
|
||||
title: test.title,
|
||||
file: spec.specPath,
|
||||
});
|
||||
} else if (test.state === "pending") {
|
||||
pending++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const totalSpecs = specs.length;
|
||||
const failedSpecs = Array.from(failedSpecsSet).join(",");
|
||||
const failedSpecsCount = failedSpecsSet.size;
|
||||
|
||||
// Build failed tests markdown table (limit to 10)
|
||||
let failedTests = "";
|
||||
const uniqueFailedTests = failedTestsList.filter(
|
||||
(test, index, self) =>
|
||||
index ===
|
||||
self.findIndex(
|
||||
(t) => t.title === test.title && t.file === test.file,
|
||||
),
|
||||
);
|
||||
|
||||
if (uniqueFailedTests.length > 0) {
|
||||
const limitedTests = uniqueFailedTests.slice(0, 10);
|
||||
failedTests = limitedTests
|
||||
.map((t) => {
|
||||
const escapedTitle = t.title
|
||||
.replace(/`/g, "\\`")
|
||||
.replace(/\|/g, "\\|");
|
||||
return `| ${escapedTitle} | ${t.file} |`;
|
||||
})
|
||||
.join("\n");
|
||||
|
||||
if (uniqueFailedTests.length > 10) {
|
||||
const remaining = uniqueFailedTests.length - 10;
|
||||
failedTests += `\n| _...and ${remaining} more failed tests_ | |`;
|
||||
}
|
||||
} else if (failed > 0) {
|
||||
failedTests = "| Unable to parse failed tests | - |";
|
||||
}
|
||||
|
||||
// Calculate totals and pass rate
|
||||
// Pass rate = passed / (passed + failed), excluding pending
|
||||
const total = passed + failed;
|
||||
const passRate = total > 0 ? ((passed * 100) / total).toFixed(2) : "0.00";
|
||||
const color = getColor(parseFloat(passRate));
|
||||
|
||||
// Build commit status message
|
||||
const rate = total > 0 ? (passed * 100) / total : 0;
|
||||
const rateStr = rate === 100 ? "100%" : `${rate.toFixed(1)}%`;
|
||||
const specSuffix = totalSpecs > 0 ? `, ${totalSpecs} specs` : "";
|
||||
const commitStatusMessage =
|
||||
rate === 100
|
||||
? `${rateStr} passed (${passed})${specSuffix}`
|
||||
: `${rateStr} passed (${passed}/${total}), ${failed} failed${specSuffix}`;
|
||||
|
||||
return {
|
||||
passed,
|
||||
failed,
|
||||
pending,
|
||||
totalSpecs,
|
||||
commitStatusMessage,
|
||||
failedSpecs,
|
||||
failedSpecsCount,
|
||||
failedTests,
|
||||
total,
|
||||
passRate,
|
||||
color,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Load all spec files from a mochawesome results directory
|
||||
*/
|
||||
export async function loadSpecFiles(
|
||||
resultsPath: string,
|
||||
): Promise<ParsedSpecFile[]> {
|
||||
// Mochawesome results are at: results/mochawesome-report/json/tests/
|
||||
const mochawesomeDir = path.join(
|
||||
resultsPath,
|
||||
"mochawesome-report",
|
||||
"json",
|
||||
"tests",
|
||||
);
|
||||
|
||||
const jsonFiles = await findJsonFiles(mochawesomeDir);
|
||||
const specs: ParsedSpecFile[] = [];
|
||||
|
||||
for (const file of jsonFiles) {
|
||||
const parsed = await parseSpecFile(file);
|
||||
if (parsed) {
|
||||
specs.push(parsed);
|
||||
}
|
||||
}
|
||||
|
||||
return specs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge original and retest results
|
||||
* - For each spec in retest, replace the matching spec in original
|
||||
* - Keep original specs that are not in retest
|
||||
*/
|
||||
export async function mergeResults(
|
||||
originalPath: string,
|
||||
retestPath: string,
|
||||
): Promise<{
|
||||
specs: ParsedSpecFile[];
|
||||
retestFiles: string[];
|
||||
mergedCount: number;
|
||||
}> {
|
||||
const originalSpecs = await loadSpecFiles(originalPath);
|
||||
const retestSpecs = await loadSpecFiles(retestPath);
|
||||
|
||||
// Build a map of original specs by spec path
|
||||
const specMap = new Map<string, ParsedSpecFile>();
|
||||
for (const spec of originalSpecs) {
|
||||
specMap.set(spec.specPath, spec);
|
||||
}
|
||||
|
||||
// Replace with retest results
|
||||
const retestFiles: string[] = [];
|
||||
for (const retestSpec of retestSpecs) {
|
||||
specMap.set(retestSpec.specPath, retestSpec);
|
||||
retestFiles.push(retestSpec.specPath);
|
||||
}
|
||||
|
||||
return {
|
||||
specs: Array.from(specMap.values()),
|
||||
retestFiles,
|
||||
mergedCount: retestSpecs.length,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Write merged results back to the original directory
|
||||
* This updates the original JSON files with retest results
|
||||
*/
|
||||
export async function writeMergedResults(
|
||||
originalPath: string,
|
||||
retestPath: string,
|
||||
): Promise<{ updatedFiles: string[]; removedFiles: string[] }> {
|
||||
const mochawesomeDir = path.join(
|
||||
originalPath,
|
||||
"mochawesome-report",
|
||||
"json",
|
||||
"tests",
|
||||
);
|
||||
const retestMochawesomeDir = path.join(
|
||||
retestPath,
|
||||
"mochawesome-report",
|
||||
"json",
|
||||
"tests",
|
||||
);
|
||||
|
||||
const originalJsonFiles = await findJsonFiles(mochawesomeDir);
|
||||
const retestJsonFiles = await findJsonFiles(retestMochawesomeDir);
|
||||
|
||||
const updatedFiles: string[] = [];
|
||||
const removedFiles: string[] = [];
|
||||
|
||||
// For each retest file, find and replace the original
|
||||
for (const retestFile of retestJsonFiles) {
|
||||
const retestSpec = await parseSpecFile(retestFile);
|
||||
if (!retestSpec) continue;
|
||||
|
||||
const specPath = retestSpec.specPath;
|
||||
|
||||
// Find all original files with matching spec path
|
||||
// Prefer nested path (under integration/), remove flat duplicates
|
||||
let nestedFile: string | null = null;
|
||||
const flatFiles: string[] = [];
|
||||
|
||||
for (const origFile of originalJsonFiles) {
|
||||
const origSpec = await parseSpecFile(origFile);
|
||||
if (origSpec && origSpec.specPath === specPath) {
|
||||
if (origFile.includes("/integration/")) {
|
||||
nestedFile = origFile;
|
||||
} else {
|
||||
flatFiles.push(origFile);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update the nested file (proper location) or first flat file if no nested
|
||||
const retestContent = await fs.readFile(retestFile, "utf8");
|
||||
|
||||
if (nestedFile) {
|
||||
await fs.writeFile(nestedFile, retestContent);
|
||||
updatedFiles.push(nestedFile);
|
||||
|
||||
// Remove flat duplicates
|
||||
for (const flatFile of flatFiles) {
|
||||
await fs.unlink(flatFile);
|
||||
removedFiles.push(flatFile);
|
||||
}
|
||||
} else if (flatFiles.length > 0) {
|
||||
await fs.writeFile(flatFiles[0], retestContent);
|
||||
updatedFiles.push(flatFiles[0]);
|
||||
}
|
||||
}
|
||||
|
||||
return { updatedFiles, removedFiles };
|
||||
}
|
||||
|
|
@ -1,138 +0,0 @@
|
|||
/**
|
||||
* Mochawesome result structure for a single spec file
|
||||
*/
|
||||
export interface MochawesomeResult {
|
||||
stats: MochawesomeStats;
|
||||
results: ResultItem[];
|
||||
}
|
||||
|
||||
export interface MochawesomeStats {
|
||||
suites: number;
|
||||
tests: number;
|
||||
passes: number;
|
||||
pending: number;
|
||||
failures: number;
|
||||
start: string;
|
||||
end: string;
|
||||
duration: number;
|
||||
testsRegistered: number;
|
||||
passPercent: number;
|
||||
pendingPercent: number;
|
||||
other: number;
|
||||
hasOther: boolean;
|
||||
skipped: number;
|
||||
hasSkipped: boolean;
|
||||
}
|
||||
|
||||
export interface ResultItem {
|
||||
uuid: string;
|
||||
title: string;
|
||||
fullFile: string;
|
||||
file: string;
|
||||
beforeHooks: Hook[];
|
||||
afterHooks: Hook[];
|
||||
tests: TestItem[];
|
||||
suites: SuiteItem[];
|
||||
passes: string[];
|
||||
failures: string[];
|
||||
pending: string[];
|
||||
skipped: string[];
|
||||
duration: number;
|
||||
root: boolean;
|
||||
rootEmpty: boolean;
|
||||
_timeout: number;
|
||||
}
|
||||
|
||||
export interface SuiteItem {
|
||||
uuid: string;
|
||||
title: string;
|
||||
fullFile: string;
|
||||
file: string;
|
||||
beforeHooks: Hook[];
|
||||
afterHooks: Hook[];
|
||||
tests: TestItem[];
|
||||
suites: SuiteItem[];
|
||||
passes: string[];
|
||||
failures: string[];
|
||||
pending: string[];
|
||||
skipped: string[];
|
||||
duration: number;
|
||||
root: boolean;
|
||||
rootEmpty: boolean;
|
||||
_timeout: number;
|
||||
}
|
||||
|
||||
export interface TestItem {
|
||||
title: string;
|
||||
fullTitle: string;
|
||||
timedOut: boolean | null;
|
||||
duration: number;
|
||||
state: "passed" | "failed" | "pending";
|
||||
speed: string | null;
|
||||
pass: boolean;
|
||||
fail: boolean;
|
||||
pending: boolean;
|
||||
context: string | null;
|
||||
code: string;
|
||||
err: TestError;
|
||||
uuid: string;
|
||||
parentUUID: string;
|
||||
isHook: boolean;
|
||||
skipped: boolean;
|
||||
}
|
||||
|
||||
export interface TestError {
|
||||
message?: string;
|
||||
estack?: string;
|
||||
diff?: string | null;
|
||||
}
|
||||
|
||||
export interface Hook {
|
||||
title: string;
|
||||
fullTitle: string;
|
||||
timedOut: boolean | null;
|
||||
duration: number;
|
||||
state: string | null;
|
||||
speed: string | null;
|
||||
pass: boolean;
|
||||
fail: boolean;
|
||||
pending: boolean;
|
||||
context: string | null;
|
||||
code: string;
|
||||
err: TestError;
|
||||
uuid: string;
|
||||
parentUUID: string;
|
||||
isHook: boolean;
|
||||
skipped: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parsed spec file with its path and results
|
||||
*/
|
||||
export interface ParsedSpecFile {
|
||||
filePath: string;
|
||||
specPath: string;
|
||||
result: MochawesomeResult;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculation result outputs
|
||||
*/
|
||||
export interface CalculationResult {
|
||||
passed: number;
|
||||
failed: number;
|
||||
pending: number;
|
||||
totalSpecs: number;
|
||||
commitStatusMessage: string;
|
||||
failedSpecs: string;
|
||||
failedSpecsCount: number;
|
||||
failedTests: string;
|
||||
total: number;
|
||||
passRate: string;
|
||||
color: string;
|
||||
}
|
||||
|
||||
export interface FailedTest {
|
||||
title: string;
|
||||
file: string;
|
||||
}
|
||||
|
|
@ -1,17 +0,0 @@
|
|||
{
|
||||
"compilerOptions": {
|
||||
"target": "ES2022",
|
||||
"module": "CommonJS",
|
||||
"moduleResolution": "Node",
|
||||
"strict": true,
|
||||
"esModuleInterop": true,
|
||||
"skipLibCheck": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"outDir": "./dist",
|
||||
"rootDir": "./src",
|
||||
"declaration": true,
|
||||
"isolatedModules": true
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"exclude": ["node_modules", "dist", "**/*.test.ts"]
|
||||
}
|
||||
|
|
@ -1 +0,0 @@
|
|||
{"root":["./src/index.ts","./src/main.ts","./src/merge.ts","./src/types.ts"],"version":"5.9.3"}
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
import { defineConfig } from "tsup";
|
||||
|
||||
export default defineConfig({
|
||||
entry: ["src/index.ts"],
|
||||
format: ["cjs"],
|
||||
target: "node24",
|
||||
clean: true,
|
||||
minify: false,
|
||||
sourcemap: false,
|
||||
splitting: false,
|
||||
bundle: true,
|
||||
noExternal: [/.*/],
|
||||
});
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
node_modules/
|
||||
.env
|
||||
|
|
@ -1,51 +0,0 @@
|
|||
name: Calculate Playwright Results
|
||||
description: Calculate Playwright test results with optional merge of retest results
|
||||
author: Mattermost
|
||||
|
||||
inputs:
|
||||
original-results-path:
|
||||
description: Path to the original Playwright results.json file
|
||||
required: true
|
||||
retest-results-path:
|
||||
description: Path to the retest Playwright results.json file (optional - if not provided, only calculates from original)
|
||||
required: false
|
||||
output-path:
|
||||
description: Path to write the merged results.json file (defaults to original-results-path)
|
||||
required: false
|
||||
|
||||
outputs:
|
||||
# Merge outputs
|
||||
merged:
|
||||
description: Whether merge was performed (true/false)
|
||||
|
||||
# Calculation outputs (same as calculate-playwright-test-results)
|
||||
passed:
|
||||
description: Number of passed tests (not including flaky)
|
||||
failed:
|
||||
description: Number of failed tests
|
||||
flaky:
|
||||
description: Number of flaky tests (failed initially but passed on retry)
|
||||
skipped:
|
||||
description: Number of skipped tests
|
||||
total_specs:
|
||||
description: Total number of spec files
|
||||
commit_status_message:
|
||||
description: Message for commit status (e.g., "X failed, Y passed (Z spec files)")
|
||||
failed_specs:
|
||||
description: Comma-separated list of failed spec files (for retest)
|
||||
failed_specs_count:
|
||||
description: Number of failed spec files
|
||||
failed_tests:
|
||||
description: Markdown table rows of failed tests (for GitHub summary)
|
||||
total:
|
||||
description: Total number of tests (passed + flaky + failed)
|
||||
pass_rate:
|
||||
description: Pass rate percentage (e.g., "100.00")
|
||||
passing:
|
||||
description: Number of passing tests (passed + flaky)
|
||||
color:
|
||||
description: Color for webhook based on pass rate (green=100%, yellow=99%+, orange=98%+, red=<98%)
|
||||
|
||||
runs:
|
||||
using: node24
|
||||
main: dist/index.js
|
||||
19313
.github/actions/calculate-playwright-results/dist/index.js
vendored
19313
.github/actions/calculate-playwright-results/dist/index.js
vendored
File diff suppressed because one or more lines are too long
|
|
@ -1,6 +0,0 @@
|
|||
module.exports = {
|
||||
preset: "ts-jest",
|
||||
testEnvironment: "node",
|
||||
testMatch: ["**/*.test.ts"],
|
||||
moduleFileExtensions: ["ts", "js"],
|
||||
};
|
||||
9136
.github/actions/calculate-playwright-results/package-lock.json
generated
vendored
9136
.github/actions/calculate-playwright-results/package-lock.json
generated
vendored
File diff suppressed because it is too large
Load diff
|
|
@ -1,27 +0,0 @@
|
|||
{
|
||||
"name": "calculate-playwright-results",
|
||||
"private": true,
|
||||
"version": "0.1.0",
|
||||
"main": "dist/index.js",
|
||||
"scripts": {
|
||||
"build": "tsup",
|
||||
"prettier": "npx prettier --write \"src/**/*.ts\"",
|
||||
"local-action": "local-action . src/main.ts .env",
|
||||
"test": "jest --verbose",
|
||||
"test:watch": "jest --watch --verbose",
|
||||
"test:silent": "jest --silent",
|
||||
"tsc": "tsc -b"
|
||||
},
|
||||
"dependencies": {
|
||||
"@actions/core": "3.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@github/local-action": "7.0.0",
|
||||
"@types/jest": "30.0.0",
|
||||
"@types/node": "25.2.0",
|
||||
"jest": "30.2.0",
|
||||
"ts-jest": "29.4.6",
|
||||
"tsup": "8.5.1",
|
||||
"typescript": "5.9.3"
|
||||
}
|
||||
}
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
import { run } from "./main";
|
||||
|
||||
run();
|
||||
|
|
@ -1,121 +0,0 @@
|
|||
import * as core from "@actions/core";
|
||||
import * as fs from "fs/promises";
|
||||
import type { PlaywrightResults } from "./types";
|
||||
import { mergeResults, calculateResults } from "./merge";
|
||||
|
||||
export async function run(): Promise<void> {
|
||||
const originalPath = core.getInput("original-results-path", {
|
||||
required: true,
|
||||
});
|
||||
const retestPath = core.getInput("retest-results-path"); // Optional
|
||||
const outputPath = core.getInput("output-path") || originalPath;
|
||||
|
||||
core.info(`Original results: ${originalPath}`);
|
||||
core.info(`Retest results: ${retestPath || "(not provided)"}`);
|
||||
core.info(`Output path: ${outputPath}`);
|
||||
|
||||
// Check if original file exists
|
||||
const originalExists = await fs
|
||||
.access(originalPath)
|
||||
.then(() => true)
|
||||
.catch(() => false);
|
||||
|
||||
if (!originalExists) {
|
||||
core.setFailed(`Original results not found at ${originalPath}`);
|
||||
return;
|
||||
}
|
||||
|
||||
// Read original file
|
||||
core.info("Reading original results...");
|
||||
const originalContent = await fs.readFile(originalPath, "utf8");
|
||||
const original: PlaywrightResults = JSON.parse(originalContent);
|
||||
|
||||
core.info(
|
||||
`Original: ${original.suites.length} suites, stats: ${JSON.stringify(original.stats)}`,
|
||||
);
|
||||
|
||||
// Check if retest path is provided and exists
|
||||
let finalResults: PlaywrightResults;
|
||||
let merged = false;
|
||||
|
||||
if (retestPath) {
|
||||
const retestExists = await fs
|
||||
.access(retestPath)
|
||||
.then(() => true)
|
||||
.catch(() => false);
|
||||
|
||||
if (retestExists) {
|
||||
// Read retest file and merge
|
||||
core.info("Reading retest results...");
|
||||
const retestContent = await fs.readFile(retestPath, "utf8");
|
||||
const retest: PlaywrightResults = JSON.parse(retestContent);
|
||||
|
||||
core.info(
|
||||
`Retest: ${retest.suites.length} suites, stats: ${JSON.stringify(retest.stats)}`,
|
||||
);
|
||||
|
||||
// Merge results
|
||||
core.info("Merging results at suite level...");
|
||||
const mergeResult = mergeResults(original, retest);
|
||||
finalResults = mergeResult.merged;
|
||||
merged = true;
|
||||
|
||||
core.info(`Retested specs: ${mergeResult.retestFiles.join(", ")}`);
|
||||
core.info(
|
||||
`Kept ${original.suites.length - mergeResult.retestFiles.length} original suites`,
|
||||
);
|
||||
core.info(`Added ${retest.suites.length} retest suites`);
|
||||
core.info(`Total merged suites: ${mergeResult.totalSuites}`);
|
||||
|
||||
// Write merged results
|
||||
core.info(`Writing merged results to ${outputPath}...`);
|
||||
await fs.writeFile(
|
||||
outputPath,
|
||||
JSON.stringify(finalResults, null, 2),
|
||||
);
|
||||
} else {
|
||||
core.warning(
|
||||
`Retest results not found at ${retestPath}, using original only`,
|
||||
);
|
||||
finalResults = original;
|
||||
}
|
||||
} else {
|
||||
core.info("No retest path provided, using original results only");
|
||||
finalResults = original;
|
||||
}
|
||||
|
||||
// Calculate all outputs from final results
|
||||
const calc = calculateResults(finalResults);
|
||||
|
||||
// Log results
|
||||
core.startGroup("Final Results");
|
||||
core.info(`Passed: ${calc.passed}`);
|
||||
core.info(`Failed: ${calc.failed}`);
|
||||
core.info(`Flaky: ${calc.flaky}`);
|
||||
core.info(`Skipped: ${calc.skipped}`);
|
||||
core.info(`Passing (passed + flaky): ${calc.passing}`);
|
||||
core.info(`Total: ${calc.total}`);
|
||||
core.info(`Pass Rate: ${calc.passRate}%`);
|
||||
core.info(`Color: ${calc.color}`);
|
||||
core.info(`Spec Files: ${calc.totalSpecs}`);
|
||||
core.info(`Failed Specs Count: ${calc.failedSpecsCount}`);
|
||||
core.info(`Commit Status Message: ${calc.commitStatusMessage}`);
|
||||
core.info(`Failed Specs: ${calc.failedSpecs || "none"}`);
|
||||
core.endGroup();
|
||||
|
||||
// Set all outputs
|
||||
core.setOutput("merged", merged.toString());
|
||||
core.setOutput("passed", calc.passed);
|
||||
core.setOutput("failed", calc.failed);
|
||||
core.setOutput("flaky", calc.flaky);
|
||||
core.setOutput("skipped", calc.skipped);
|
||||
core.setOutput("total_specs", calc.totalSpecs);
|
||||
core.setOutput("commit_status_message", calc.commitStatusMessage);
|
||||
core.setOutput("failed_specs", calc.failedSpecs);
|
||||
core.setOutput("failed_specs_count", calc.failedSpecsCount);
|
||||
core.setOutput("failed_tests", calc.failedTests);
|
||||
core.setOutput("total", calc.total);
|
||||
core.setOutput("pass_rate", calc.passRate);
|
||||
core.setOutput("passing", calc.passing);
|
||||
core.setOutput("color", calc.color);
|
||||
}
|
||||
|
|
@ -1,509 +0,0 @@
|
|||
import { mergeResults, computeStats, calculateResults } from "./merge";
|
||||
import type { PlaywrightResults, Suite } from "./types";
|
||||
|
||||
describe("mergeResults", () => {
|
||||
const createSuite = (file: string, tests: { status: string }[]): Suite => ({
|
||||
title: file,
|
||||
file,
|
||||
column: 0,
|
||||
line: 0,
|
||||
specs: [
|
||||
{
|
||||
title: "test spec",
|
||||
ok: true,
|
||||
tags: [],
|
||||
tests: tests.map((t) => ({
|
||||
timeout: 60000,
|
||||
annotations: [],
|
||||
expectedStatus: "passed",
|
||||
projectId: "chrome",
|
||||
projectName: "chrome",
|
||||
results: [
|
||||
{
|
||||
workerIndex: 0,
|
||||
parallelIndex: 0,
|
||||
status: t.status,
|
||||
duration: 1000,
|
||||
errors: [],
|
||||
stdout: [],
|
||||
stderr: [],
|
||||
retry: 0,
|
||||
startTime: new Date().toISOString(),
|
||||
annotations: [],
|
||||
},
|
||||
],
|
||||
})),
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
it("should keep original suites not in retest", () => {
|
||||
const original: PlaywrightResults = {
|
||||
config: {},
|
||||
suites: [
|
||||
createSuite("spec1.ts", [{ status: "passed" }]),
|
||||
createSuite("spec2.ts", [{ status: "failed" }]),
|
||||
createSuite("spec3.ts", [{ status: "passed" }]),
|
||||
],
|
||||
stats: {
|
||||
startTime: new Date().toISOString(),
|
||||
duration: 10000,
|
||||
expected: 2,
|
||||
unexpected: 1,
|
||||
skipped: 0,
|
||||
flaky: 0,
|
||||
},
|
||||
};
|
||||
|
||||
const retest: PlaywrightResults = {
|
||||
config: {},
|
||||
suites: [createSuite("spec2.ts", [{ status: "passed" }])],
|
||||
stats: {
|
||||
startTime: new Date().toISOString(),
|
||||
duration: 5000,
|
||||
expected: 1,
|
||||
unexpected: 0,
|
||||
skipped: 0,
|
||||
flaky: 0,
|
||||
},
|
||||
};
|
||||
|
||||
const result = mergeResults(original, retest);
|
||||
|
||||
expect(result.totalSuites).toBe(3);
|
||||
expect(result.retestFiles).toEqual(["spec2.ts"]);
|
||||
expect(result.merged.suites.map((s) => s.file)).toEqual([
|
||||
"spec1.ts",
|
||||
"spec3.ts",
|
||||
"spec2.ts",
|
||||
]);
|
||||
});
|
||||
|
||||
it("should compute correct stats from merged suites", () => {
|
||||
const original: PlaywrightResults = {
|
||||
config: {},
|
||||
suites: [
|
||||
createSuite("spec1.ts", [{ status: "passed" }]),
|
||||
createSuite("spec2.ts", [{ status: "failed" }]),
|
||||
],
|
||||
stats: {
|
||||
startTime: new Date().toISOString(),
|
||||
duration: 10000,
|
||||
expected: 1,
|
||||
unexpected: 1,
|
||||
skipped: 0,
|
||||
flaky: 0,
|
||||
},
|
||||
};
|
||||
|
||||
const retest: PlaywrightResults = {
|
||||
config: {},
|
||||
suites: [createSuite("spec2.ts", [{ status: "passed" }])],
|
||||
stats: {
|
||||
startTime: new Date().toISOString(),
|
||||
duration: 5000,
|
||||
expected: 1,
|
||||
unexpected: 0,
|
||||
skipped: 0,
|
||||
flaky: 0,
|
||||
},
|
||||
};
|
||||
|
||||
const result = mergeResults(original, retest);
|
||||
|
||||
expect(result.stats.expected).toBe(2);
|
||||
expect(result.stats.unexpected).toBe(0);
|
||||
expect(result.stats.duration).toBe(15000);
|
||||
});
|
||||
});
|
||||
|
||||
describe("computeStats", () => {
|
||||
it("should count flaky tests correctly", () => {
|
||||
const suites: Suite[] = [
|
||||
{
|
||||
title: "spec1.ts",
|
||||
file: "spec1.ts",
|
||||
column: 0,
|
||||
line: 0,
|
||||
specs: [
|
||||
{
|
||||
title: "flaky test",
|
||||
ok: true,
|
||||
tags: [],
|
||||
tests: [
|
||||
{
|
||||
timeout: 60000,
|
||||
annotations: [],
|
||||
expectedStatus: "passed",
|
||||
projectId: "chrome",
|
||||
projectName: "chrome",
|
||||
results: [
|
||||
{
|
||||
workerIndex: 0,
|
||||
parallelIndex: 0,
|
||||
status: "failed",
|
||||
duration: 1000,
|
||||
errors: [],
|
||||
stdout: [],
|
||||
stderr: [],
|
||||
retry: 0,
|
||||
startTime: new Date().toISOString(),
|
||||
annotations: [],
|
||||
},
|
||||
{
|
||||
workerIndex: 0,
|
||||
parallelIndex: 0,
|
||||
status: "passed",
|
||||
duration: 1000,
|
||||
errors: [],
|
||||
stdout: [],
|
||||
stderr: [],
|
||||
retry: 1,
|
||||
startTime: new Date().toISOString(),
|
||||
annotations: [],
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
const stats = computeStats(suites);
|
||||
|
||||
expect(stats.expected).toBe(0);
|
||||
expect(stats.flaky).toBe(1);
|
||||
expect(stats.unexpected).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe("calculateResults", () => {
|
||||
const createSuiteWithSpec = (
|
||||
file: string,
|
||||
specTitle: string,
|
||||
testResults: { status: string; retry: number }[],
|
||||
): Suite => ({
|
||||
title: file,
|
||||
file,
|
||||
column: 0,
|
||||
line: 0,
|
||||
specs: [
|
||||
{
|
||||
title: specTitle,
|
||||
ok: testResults[testResults.length - 1].status === "passed",
|
||||
tags: [],
|
||||
tests: [
|
||||
{
|
||||
timeout: 60000,
|
||||
annotations: [],
|
||||
expectedStatus: "passed",
|
||||
projectId: "chrome",
|
||||
projectName: "chrome",
|
||||
results: testResults.map((r) => ({
|
||||
workerIndex: 0,
|
||||
parallelIndex: 0,
|
||||
status: r.status,
|
||||
duration: 1000,
|
||||
errors:
|
||||
r.status === "failed"
|
||||
? [{ message: "error" }]
|
||||
: [],
|
||||
stdout: [],
|
||||
stderr: [],
|
||||
retry: r.retry,
|
||||
startTime: new Date().toISOString(),
|
||||
annotations: [],
|
||||
})),
|
||||
location: {
|
||||
file,
|
||||
line: 10,
|
||||
column: 5,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
it("should calculate all outputs correctly for passing results", () => {
|
||||
const results: PlaywrightResults = {
|
||||
config: {},
|
||||
suites: [
|
||||
createSuiteWithSpec("login.spec.ts", "should login", [
|
||||
{ status: "passed", retry: 0 },
|
||||
]),
|
||||
createSuiteWithSpec(
|
||||
"messaging.spec.ts",
|
||||
"should send message",
|
||||
[{ status: "passed", retry: 0 }],
|
||||
),
|
||||
],
|
||||
stats: {
|
||||
startTime: new Date().toISOString(),
|
||||
duration: 5000,
|
||||
expected: 2,
|
||||
unexpected: 0,
|
||||
skipped: 0,
|
||||
flaky: 0,
|
||||
},
|
||||
};
|
||||
|
||||
const calc = calculateResults(results);
|
||||
|
||||
expect(calc.passed).toBe(2);
|
||||
expect(calc.failed).toBe(0);
|
||||
expect(calc.flaky).toBe(0);
|
||||
expect(calc.skipped).toBe(0);
|
||||
expect(calc.total).toBe(2);
|
||||
expect(calc.passing).toBe(2);
|
||||
expect(calc.passRate).toBe("100.00");
|
||||
expect(calc.color).toBe("#43A047"); // green
|
||||
expect(calc.totalSpecs).toBe(2);
|
||||
expect(calc.failedSpecs).toBe("");
|
||||
expect(calc.failedSpecsCount).toBe(0);
|
||||
expect(calc.commitStatusMessage).toBe("100% passed (2), 2 specs");
|
||||
});
|
||||
|
||||
it("should calculate all outputs correctly for results with failures", () => {
|
||||
const results: PlaywrightResults = {
|
||||
config: {},
|
||||
suites: [
|
||||
createSuiteWithSpec("login.spec.ts", "should login", [
|
||||
{ status: "passed", retry: 0 },
|
||||
]),
|
||||
createSuiteWithSpec(
|
||||
"channels.spec.ts",
|
||||
"should create channel",
|
||||
[
|
||||
{ status: "failed", retry: 0 },
|
||||
{ status: "failed", retry: 1 },
|
||||
{ status: "failed", retry: 2 },
|
||||
],
|
||||
),
|
||||
],
|
||||
stats: {
|
||||
startTime: new Date().toISOString(),
|
||||
duration: 10000,
|
||||
expected: 1,
|
||||
unexpected: 1,
|
||||
skipped: 0,
|
||||
flaky: 0,
|
||||
},
|
||||
};
|
||||
|
||||
const calc = calculateResults(results);
|
||||
|
||||
expect(calc.passed).toBe(1);
|
||||
expect(calc.failed).toBe(1);
|
||||
expect(calc.flaky).toBe(0);
|
||||
expect(calc.total).toBe(2);
|
||||
expect(calc.passing).toBe(1);
|
||||
expect(calc.passRate).toBe("50.00");
|
||||
expect(calc.color).toBe("#F44336"); // red
|
||||
expect(calc.totalSpecs).toBe(2);
|
||||
expect(calc.failedSpecs).toBe("channels.spec.ts");
|
||||
expect(calc.failedSpecsCount).toBe(1);
|
||||
expect(calc.commitStatusMessage).toBe(
|
||||
"50.0% passed (1/2), 1 failed, 2 specs",
|
||||
);
|
||||
expect(calc.failedTests).toContain("should create channel");
|
||||
});
|
||||
});
|
||||
|
||||
describe("full integration: original with failure, retest passes", () => {
|
||||
const createSuiteWithSpec = (
|
||||
file: string,
|
||||
specTitle: string,
|
||||
testResults: { status: string; retry: number }[],
|
||||
): Suite => ({
|
||||
title: file,
|
||||
file,
|
||||
column: 0,
|
||||
line: 0,
|
||||
specs: [
|
||||
{
|
||||
title: specTitle,
|
||||
ok: testResults[testResults.length - 1].status === "passed",
|
||||
tags: [],
|
||||
tests: [
|
||||
{
|
||||
timeout: 60000,
|
||||
annotations: [],
|
||||
expectedStatus: "passed",
|
||||
projectId: "chrome",
|
||||
projectName: "chrome",
|
||||
results: testResults.map((r) => ({
|
||||
workerIndex: 0,
|
||||
parallelIndex: 0,
|
||||
status: r.status,
|
||||
duration: 1000,
|
||||
errors:
|
||||
r.status === "failed"
|
||||
? [{ message: "error" }]
|
||||
: [],
|
||||
stdout: [],
|
||||
stderr: [],
|
||||
retry: r.retry,
|
||||
startTime: new Date().toISOString(),
|
||||
annotations: [],
|
||||
})),
|
||||
location: {
|
||||
file,
|
||||
line: 10,
|
||||
column: 5,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
it("should merge and calculate correctly when failed test passes on retest", () => {
|
||||
// Original: 2 passed, 1 failed (channels.spec.ts)
|
||||
const original: PlaywrightResults = {
|
||||
config: {},
|
||||
suites: [
|
||||
createSuiteWithSpec("login.spec.ts", "should login", [
|
||||
{ status: "passed", retry: 0 },
|
||||
]),
|
||||
createSuiteWithSpec(
|
||||
"messaging.spec.ts",
|
||||
"should send message",
|
||||
[{ status: "passed", retry: 0 }],
|
||||
),
|
||||
createSuiteWithSpec(
|
||||
"channels.spec.ts",
|
||||
"should create channel",
|
||||
[
|
||||
{ status: "failed", retry: 0 },
|
||||
{ status: "failed", retry: 1 },
|
||||
{ status: "failed", retry: 2 },
|
||||
],
|
||||
),
|
||||
],
|
||||
stats: {
|
||||
startTime: new Date().toISOString(),
|
||||
duration: 18000,
|
||||
expected: 2,
|
||||
unexpected: 1,
|
||||
skipped: 0,
|
||||
flaky: 0,
|
||||
},
|
||||
};
|
||||
|
||||
// Retest: channels.spec.ts now passes
|
||||
const retest: PlaywrightResults = {
|
||||
config: {},
|
||||
suites: [
|
||||
createSuiteWithSpec(
|
||||
"channels.spec.ts",
|
||||
"should create channel",
|
||||
[{ status: "passed", retry: 0 }],
|
||||
),
|
||||
],
|
||||
stats: {
|
||||
startTime: new Date().toISOString(),
|
||||
duration: 3000,
|
||||
expected: 1,
|
||||
unexpected: 0,
|
||||
skipped: 0,
|
||||
flaky: 0,
|
||||
},
|
||||
};
|
||||
|
||||
// Step 1: Verify original has failure
|
||||
const originalCalc = calculateResults(original);
|
||||
expect(originalCalc.passed).toBe(2);
|
||||
expect(originalCalc.failed).toBe(1);
|
||||
expect(originalCalc.passRate).toBe("66.67");
|
||||
|
||||
// Step 2: Merge results
|
||||
const mergeResult = mergeResults(original, retest);
|
||||
|
||||
// Step 3: Verify merge structure
|
||||
expect(mergeResult.totalSuites).toBe(3);
|
||||
expect(mergeResult.retestFiles).toEqual(["channels.spec.ts"]);
|
||||
expect(mergeResult.merged.suites.map((s) => s.file)).toEqual([
|
||||
"login.spec.ts",
|
||||
"messaging.spec.ts",
|
||||
"channels.spec.ts",
|
||||
]);
|
||||
|
||||
// Step 4: Calculate final results
|
||||
const finalCalc = calculateResults(mergeResult.merged);
|
||||
|
||||
// Step 5: Verify all outputs
|
||||
expect(finalCalc.passed).toBe(3);
|
||||
expect(finalCalc.failed).toBe(0);
|
||||
expect(finalCalc.flaky).toBe(0);
|
||||
expect(finalCalc.skipped).toBe(0);
|
||||
expect(finalCalc.total).toBe(3);
|
||||
expect(finalCalc.passing).toBe(3);
|
||||
expect(finalCalc.passRate).toBe("100.00");
|
||||
expect(finalCalc.color).toBe("#43A047"); // green
|
||||
expect(finalCalc.totalSpecs).toBe(3);
|
||||
expect(finalCalc.failedSpecs).toBe("");
|
||||
expect(finalCalc.failedSpecsCount).toBe(0);
|
||||
expect(finalCalc.commitStatusMessage).toBe("100% passed (3), 3 specs");
|
||||
expect(finalCalc.failedTests).toBe("");
|
||||
});
|
||||
|
||||
it("should handle case where retest still fails", () => {
|
||||
// Original: 2 passed, 1 failed
|
||||
const original: PlaywrightResults = {
|
||||
config: {},
|
||||
suites: [
|
||||
createSuiteWithSpec("login.spec.ts", "should login", [
|
||||
{ status: "passed", retry: 0 },
|
||||
]),
|
||||
createSuiteWithSpec(
|
||||
"channels.spec.ts",
|
||||
"should create channel",
|
||||
[{ status: "failed", retry: 0 }],
|
||||
),
|
||||
],
|
||||
stats: {
|
||||
startTime: new Date().toISOString(),
|
||||
duration: 10000,
|
||||
expected: 1,
|
||||
unexpected: 1,
|
||||
skipped: 0,
|
||||
flaky: 0,
|
||||
},
|
||||
};
|
||||
|
||||
// Retest: channels.spec.ts still fails
|
||||
const retest: PlaywrightResults = {
|
||||
config: {},
|
||||
suites: [
|
||||
createSuiteWithSpec(
|
||||
"channels.spec.ts",
|
||||
"should create channel",
|
||||
[
|
||||
{ status: "failed", retry: 0 },
|
||||
{ status: "failed", retry: 1 },
|
||||
],
|
||||
),
|
||||
],
|
||||
stats: {
|
||||
startTime: new Date().toISOString(),
|
||||
duration: 5000,
|
||||
expected: 0,
|
||||
unexpected: 1,
|
||||
skipped: 0,
|
||||
flaky: 0,
|
||||
},
|
||||
};
|
||||
|
||||
const mergeResult = mergeResults(original, retest);
|
||||
const finalCalc = calculateResults(mergeResult.merged);
|
||||
|
||||
expect(finalCalc.passed).toBe(1);
|
||||
expect(finalCalc.failed).toBe(1);
|
||||
expect(finalCalc.passRate).toBe("50.00");
|
||||
expect(finalCalc.color).toBe("#F44336"); // red
|
||||
expect(finalCalc.failedSpecs).toBe("channels.spec.ts");
|
||||
expect(finalCalc.failedSpecsCount).toBe(1);
|
||||
});
|
||||
});
|
||||
|
|
@ -1,291 +0,0 @@
|
|||
import type {
|
||||
PlaywrightResults,
|
||||
Suite,
|
||||
Test,
|
||||
Stats,
|
||||
MergeResult,
|
||||
CalculationResult,
|
||||
FailedTest,
|
||||
} from "./types";
|
||||
|
||||
interface TestInfo {
|
||||
title: string;
|
||||
file: string;
|
||||
finalStatus: string;
|
||||
hadFailure: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract all tests from suites recursively with their info
|
||||
*/
|
||||
function getAllTestsWithInfo(suites: Suite[]): TestInfo[] {
|
||||
const tests: TestInfo[] = [];
|
||||
|
||||
function extractFromSuite(suite: Suite) {
|
||||
for (const spec of suite.specs || []) {
|
||||
for (const test of spec.tests || []) {
|
||||
if (!test.results || test.results.length === 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const finalResult = test.results[test.results.length - 1];
|
||||
const hadFailure = test.results.some(
|
||||
(r) => r.status === "failed" || r.status === "timedOut",
|
||||
);
|
||||
|
||||
tests.push({
|
||||
title: spec.title || test.projectName,
|
||||
file: test.location?.file || suite.file,
|
||||
finalStatus: finalResult.status,
|
||||
hadFailure,
|
||||
});
|
||||
}
|
||||
}
|
||||
for (const nestedSuite of suite.suites || []) {
|
||||
extractFromSuite(nestedSuite);
|
||||
}
|
||||
}
|
||||
|
||||
for (const suite of suites) {
|
||||
extractFromSuite(suite);
|
||||
}
|
||||
|
||||
return tests;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract all tests from suites recursively
|
||||
*/
|
||||
function getAllTests(suites: Suite[]): Test[] {
|
||||
const tests: Test[] = [];
|
||||
|
||||
function extractFromSuite(suite: Suite) {
|
||||
for (const spec of suite.specs || []) {
|
||||
tests.push(...spec.tests);
|
||||
}
|
||||
for (const nestedSuite of suite.suites || []) {
|
||||
extractFromSuite(nestedSuite);
|
||||
}
|
||||
}
|
||||
|
||||
for (const suite of suites) {
|
||||
extractFromSuite(suite);
|
||||
}
|
||||
|
||||
return tests;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute stats from suites
|
||||
*/
|
||||
export function computeStats(
|
||||
suites: Suite[],
|
||||
originalStats?: Stats,
|
||||
retestStats?: Stats,
|
||||
): Stats {
|
||||
const tests = getAllTests(suites);
|
||||
|
||||
let expected = 0;
|
||||
let unexpected = 0;
|
||||
let skipped = 0;
|
||||
let flaky = 0;
|
||||
|
||||
for (const test of tests) {
|
||||
if (!test.results || test.results.length === 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const finalResult = test.results[test.results.length - 1];
|
||||
const finalStatus = finalResult.status;
|
||||
|
||||
// Check if any result was a failure
|
||||
const hadFailure = test.results.some(
|
||||
(r) => r.status === "failed" || r.status === "timedOut",
|
||||
);
|
||||
|
||||
if (finalStatus === "skipped") {
|
||||
skipped++;
|
||||
} else if (finalStatus === "failed" || finalStatus === "timedOut") {
|
||||
unexpected++;
|
||||
} else if (finalStatus === "passed") {
|
||||
if (hadFailure) {
|
||||
flaky++;
|
||||
} else {
|
||||
expected++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Compute duration as sum of both runs
|
||||
const duration =
|
||||
(originalStats?.duration || 0) + (retestStats?.duration || 0);
|
||||
|
||||
return {
|
||||
startTime: originalStats?.startTime || new Date().toISOString(),
|
||||
duration,
|
||||
expected,
|
||||
unexpected,
|
||||
skipped,
|
||||
flaky,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get color based on pass rate
|
||||
*/
|
||||
function getColor(passRate: number): string {
|
||||
if (passRate === 100) {
|
||||
return "#43A047"; // green
|
||||
} else if (passRate >= 99) {
|
||||
return "#FFEB3B"; // yellow
|
||||
} else if (passRate >= 98) {
|
||||
return "#FF9800"; // orange
|
||||
} else {
|
||||
return "#F44336"; // red
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate all outputs from results
|
||||
*/
|
||||
export function calculateResults(
|
||||
results: PlaywrightResults,
|
||||
): CalculationResult {
|
||||
const stats = results.stats || {
|
||||
expected: 0,
|
||||
unexpected: 0,
|
||||
skipped: 0,
|
||||
flaky: 0,
|
||||
startTime: new Date().toISOString(),
|
||||
duration: 0,
|
||||
};
|
||||
|
||||
const passed = stats.expected;
|
||||
const failed = stats.unexpected;
|
||||
const flaky = stats.flaky;
|
||||
const skipped = stats.skipped;
|
||||
|
||||
// Count unique spec files
|
||||
const specFiles = new Set<string>();
|
||||
for (const suite of results.suites) {
|
||||
specFiles.add(suite.file);
|
||||
}
|
||||
const totalSpecs = specFiles.size;
|
||||
|
||||
// Get all tests with info for failed tests extraction
|
||||
const testsInfo = getAllTestsWithInfo(results.suites);
|
||||
|
||||
// Extract failed specs
|
||||
const failedSpecsSet = new Set<string>();
|
||||
const failedTestsList: FailedTest[] = [];
|
||||
|
||||
for (const test of testsInfo) {
|
||||
if (test.finalStatus === "failed" || test.finalStatus === "timedOut") {
|
||||
failedSpecsSet.add(test.file);
|
||||
failedTestsList.push({
|
||||
title: test.title,
|
||||
file: test.file,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const failedSpecs = Array.from(failedSpecsSet).join(",");
|
||||
const failedSpecsCount = failedSpecsSet.size;
|
||||
|
||||
// Build failed tests markdown table (limit to 10)
|
||||
let failedTests = "";
|
||||
const uniqueFailedTests = failedTestsList.filter(
|
||||
(test, index, self) =>
|
||||
index ===
|
||||
self.findIndex(
|
||||
(t) => t.title === test.title && t.file === test.file,
|
||||
),
|
||||
);
|
||||
|
||||
if (uniqueFailedTests.length > 0) {
|
||||
const limitedTests = uniqueFailedTests.slice(0, 10);
|
||||
failedTests = limitedTests
|
||||
.map((t) => {
|
||||
const escapedTitle = t.title
|
||||
.replace(/`/g, "\\`")
|
||||
.replace(/\|/g, "\\|");
|
||||
return `| ${escapedTitle} | ${t.file} |`;
|
||||
})
|
||||
.join("\n");
|
||||
|
||||
if (uniqueFailedTests.length > 10) {
|
||||
const remaining = uniqueFailedTests.length - 10;
|
||||
failedTests += `\n| _...and ${remaining} more failed tests_ | |`;
|
||||
}
|
||||
} else if (failed > 0) {
|
||||
failedTests = "| Unable to parse failed tests | - |";
|
||||
}
|
||||
|
||||
// Calculate totals and pass rate
|
||||
const passing = passed + flaky;
|
||||
const total = passing + failed;
|
||||
const passRate = total > 0 ? ((passing * 100) / total).toFixed(2) : "0.00";
|
||||
const color = getColor(parseFloat(passRate));
|
||||
|
||||
// Build commit status message
|
||||
const rate = total > 0 ? (passing * 100) / total : 0;
|
||||
const rateStr = rate === 100 ? "100%" : `${rate.toFixed(1)}%`;
|
||||
const specSuffix = totalSpecs > 0 ? `, ${totalSpecs} specs` : "";
|
||||
const commitStatusMessage =
|
||||
rate === 100
|
||||
? `${rateStr} passed (${passing})${specSuffix}`
|
||||
: `${rateStr} passed (${passing}/${total}), ${failed} failed${specSuffix}`;
|
||||
|
||||
return {
|
||||
passed,
|
||||
failed,
|
||||
flaky,
|
||||
skipped,
|
||||
totalSpecs,
|
||||
commitStatusMessage,
|
||||
failedSpecs,
|
||||
failedSpecsCount,
|
||||
failedTests,
|
||||
total,
|
||||
passRate,
|
||||
passing,
|
||||
color,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge original and retest results at suite level
|
||||
* - Keep original suites that are NOT in retest
|
||||
* - Add all retest suites (replacing matching originals)
|
||||
*/
|
||||
export function mergeResults(
|
||||
original: PlaywrightResults,
|
||||
retest: PlaywrightResults,
|
||||
): MergeResult {
|
||||
// Get list of retested spec files
|
||||
const retestFiles = retest.suites.map((s) => s.file);
|
||||
|
||||
// Filter original suites - keep only those NOT in retest
|
||||
const keptOriginalSuites = original.suites.filter(
|
||||
(suite) => !retestFiles.includes(suite.file),
|
||||
);
|
||||
|
||||
// Merge: kept original suites + all retest suites
|
||||
const mergedSuites = [...keptOriginalSuites, ...retest.suites];
|
||||
|
||||
// Compute stats from merged suites
|
||||
const stats = computeStats(mergedSuites, original.stats, retest.stats);
|
||||
|
||||
const merged: PlaywrightResults = {
|
||||
config: original.config,
|
||||
suites: mergedSuites,
|
||||
stats,
|
||||
};
|
||||
|
||||
return {
|
||||
merged,
|
||||
stats,
|
||||
totalSuites: mergedSuites.length,
|
||||
retestFiles,
|
||||
};
|
||||
}
|
||||
|
|
@ -1,88 +0,0 @@
|
|||
export interface PlaywrightResults {
|
||||
config: Record<string, unknown>;
|
||||
suites: Suite[];
|
||||
stats?: Stats;
|
||||
}
|
||||
|
||||
export interface Suite {
|
||||
title: string;
|
||||
file: string;
|
||||
column: number;
|
||||
line: number;
|
||||
specs: Spec[];
|
||||
suites?: Suite[];
|
||||
}
|
||||
|
||||
export interface Spec {
|
||||
title: string;
|
||||
ok: boolean;
|
||||
tags: string[];
|
||||
tests: Test[];
|
||||
}
|
||||
|
||||
export interface Test {
|
||||
timeout: number;
|
||||
annotations: unknown[];
|
||||
expectedStatus: string;
|
||||
projectId: string;
|
||||
projectName: string;
|
||||
results: TestResult[];
|
||||
location?: TestLocation;
|
||||
}
|
||||
|
||||
export interface TestResult {
|
||||
workerIndex: number;
|
||||
parallelIndex: number;
|
||||
status: string;
|
||||
duration: number;
|
||||
errors: unknown[];
|
||||
stdout: unknown[];
|
||||
stderr: unknown[];
|
||||
retry: number;
|
||||
startTime: string;
|
||||
annotations: unknown[];
|
||||
attachments?: unknown[];
|
||||
}
|
||||
|
||||
export interface TestLocation {
|
||||
file: string;
|
||||
line: number;
|
||||
column: number;
|
||||
}
|
||||
|
||||
export interface Stats {
|
||||
startTime: string;
|
||||
duration: number;
|
||||
expected: number;
|
||||
unexpected: number;
|
||||
skipped: number;
|
||||
flaky: number;
|
||||
}
|
||||
|
||||
export interface MergeResult {
|
||||
merged: PlaywrightResults;
|
||||
stats: Stats;
|
||||
totalSuites: number;
|
||||
retestFiles: string[];
|
||||
}
|
||||
|
||||
export interface CalculationResult {
|
||||
passed: number;
|
||||
failed: number;
|
||||
flaky: number;
|
||||
skipped: number;
|
||||
totalSpecs: number;
|
||||
commitStatusMessage: string;
|
||||
failedSpecs: string;
|
||||
failedSpecsCount: number;
|
||||
failedTests: string;
|
||||
total: number;
|
||||
passRate: string;
|
||||
passing: number;
|
||||
color: string;
|
||||
}
|
||||
|
||||
export interface FailedTest {
|
||||
title: string;
|
||||
file: string;
|
||||
}
|
||||
|
|
@ -1,17 +0,0 @@
|
|||
{
|
||||
"compilerOptions": {
|
||||
"target": "ES2022",
|
||||
"module": "CommonJS",
|
||||
"moduleResolution": "Node",
|
||||
"strict": true,
|
||||
"esModuleInterop": true,
|
||||
"skipLibCheck": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"outDir": "dist",
|
||||
"rootDir": "./src",
|
||||
"declaration": true,
|
||||
"isolatedModules": true
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"exclude": ["node_modules", "dist", "**/*.test.ts"]
|
||||
}
|
||||
|
|
@ -1 +0,0 @@
|
|||
{"root":["./src/index.ts","./src/main.ts","./src/merge.ts","./src/types.ts"],"version":"5.9.3"}
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
import { defineConfig } from "tsup";
|
||||
|
||||
export default defineConfig({
|
||||
entry: ["src/index.ts"],
|
||||
format: ["cjs"],
|
||||
outDir: "dist",
|
||||
clean: true,
|
||||
noExternal: [/.*/], // Bundle all dependencies
|
||||
minify: false,
|
||||
sourcemap: false,
|
||||
target: "node24",
|
||||
});
|
||||
91
.github/actions/check-e2e-test-only/action.yml
vendored
91
.github/actions/check-e2e-test-only/action.yml
vendored
|
|
@ -1,91 +0,0 @@
|
|||
---
|
||||
name: Check E2E Test Only
|
||||
description: Check if PR contains only E2E test changes and determine the appropriate docker image tag
|
||||
|
||||
inputs:
|
||||
base_sha:
|
||||
description: Base commit SHA (PR base)
|
||||
required: false
|
||||
head_sha:
|
||||
description: Head commit SHA (PR head)
|
||||
required: false
|
||||
pr_number:
|
||||
description: PR number (used to fetch SHAs via API if base_sha/head_sha not provided)
|
||||
required: false
|
||||
|
||||
outputs:
|
||||
e2e_test_only:
|
||||
description: Whether the PR contains only E2E test changes (true/false)
|
||||
value: ${{ steps.check.outputs.e2e_test_only }}
|
||||
image_tag:
|
||||
description: Docker image tag to use (master for E2E-only, short SHA for mixed)
|
||||
value: ${{ steps.check.outputs.image_tag }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: ci/check-e2e-test-only
|
||||
id: check
|
||||
shell: bash
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
INPUT_BASE_SHA: ${{ inputs.base_sha }}
|
||||
INPUT_HEAD_SHA: ${{ inputs.head_sha }}
|
||||
INPUT_PR_NUMBER: ${{ inputs.pr_number }}
|
||||
run: |
|
||||
# Resolve SHAs from PR number if not provided
|
||||
if [ -z "$INPUT_BASE_SHA" ] || [ -z "$INPUT_HEAD_SHA" ]; then
|
||||
if [ -z "$INPUT_PR_NUMBER" ]; then
|
||||
echo "::error::Either base_sha/head_sha or pr_number must be provided"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Resolving SHAs from PR #${INPUT_PR_NUMBER}"
|
||||
PR_DATA=$(gh api "repos/${{ github.repository }}/pulls/${INPUT_PR_NUMBER}")
|
||||
INPUT_BASE_SHA=$(echo "$PR_DATA" | jq -r '.base.sha')
|
||||
INPUT_HEAD_SHA=$(echo "$PR_DATA" | jq -r '.head.sha')
|
||||
|
||||
if [ -z "$INPUT_BASE_SHA" ] || [ "$INPUT_BASE_SHA" = "null" ] || \
|
||||
[ -z "$INPUT_HEAD_SHA" ] || [ "$INPUT_HEAD_SHA" = "null" ]; then
|
||||
echo "::error::Could not resolve SHAs for PR #${INPUT_PR_NUMBER}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
SHORT_SHA="${INPUT_HEAD_SHA::7}"
|
||||
|
||||
# Get changed files - try git first, fall back to API
|
||||
CHANGED_FILES=$(git diff --name-only "$INPUT_BASE_SHA"..."$INPUT_HEAD_SHA" 2>/dev/null || \
|
||||
gh api "repos/${{ github.repository }}/pulls/${INPUT_PR_NUMBER}/files" --jq '.[].filename' 2>/dev/null || echo "")
|
||||
|
||||
if [ -z "$CHANGED_FILES" ]; then
|
||||
echo "::warning::Could not determine changed files, assuming not E2E-only"
|
||||
echo "e2e_test_only=false" >> $GITHUB_OUTPUT
|
||||
echo "image_tag=${SHORT_SHA}" >> $GITHUB_OUTPUT
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Changed files:"
|
||||
echo "$CHANGED_FILES"
|
||||
|
||||
# Check if all files are E2E-related
|
||||
E2E_TEST_ONLY="true"
|
||||
while IFS= read -r file; do
|
||||
[ -z "$file" ] && continue
|
||||
if [[ ! "$file" =~ ^e2e-tests/ ]] && \
|
||||
[[ ! "$file" =~ ^\.github/workflows/e2e- ]]; then
|
||||
echo "Non-E2E file found: $file"
|
||||
E2E_TEST_ONLY="false"
|
||||
break
|
||||
fi
|
||||
done <<< "$CHANGED_FILES"
|
||||
|
||||
echo "E2E test only: ${E2E_TEST_ONLY}"
|
||||
|
||||
# Set outputs
|
||||
echo "e2e_test_only=${E2E_TEST_ONLY}" >> $GITHUB_OUTPUT
|
||||
if [ "$E2E_TEST_ONLY" = "true" ]; then
|
||||
echo "image_tag=master" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "image_tag=${SHORT_SHA}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
17
.github/actions/webapp-setup/action.yml
vendored
17
.github/actions/webapp-setup/action.yml
vendored
|
|
@ -15,28 +15,13 @@ runs:
|
|||
path: |
|
||||
webapp/node_modules
|
||||
webapp/channels/node_modules
|
||||
webapp/platform/client/node_modules
|
||||
webapp/platform/components/node_modules
|
||||
webapp/platform/types/node_modules
|
||||
key: node-modules-${{ runner.os }}-${{ hashFiles('webapp/package-lock.json') }}
|
||||
- name: ci/cache-platform-builds
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
id: cache-platform-builds
|
||||
with:
|
||||
path: |
|
||||
webapp/platform/types/lib
|
||||
webapp/platform/client/lib
|
||||
webapp/platform/components/dist
|
||||
key: platform-builds-${{ runner.os }}-${{ hashFiles('webapp/platform/types/src/**', 'webapp/platform/client/src/**', 'webapp/platform/components/src/**') }}
|
||||
key: node-modules-${{ runner.os }}-${{ hashFiles('webapp/package-lock.json') }}
|
||||
- name: ci/get-node-modules
|
||||
if: steps.cache-node-modules.outputs.cache-hit != 'true'
|
||||
shell: bash
|
||||
working-directory: webapp
|
||||
run: |
|
||||
make node_modules
|
||||
- name: ci/build-platform-packages
|
||||
if: steps.cache-platform-builds.outputs.cache-hit != 'true'
|
||||
shell: bash
|
||||
working-directory: webapp
|
||||
run: |
|
||||
npm run build --workspace=platform/types --workspace=platform/client --workspace=platform/components
|
||||
|
|
|
|||
352
.github/e2e-tests-workflows.md
vendored
352
.github/e2e-tests-workflows.md
vendored
|
|
@ -1,352 +0,0 @@
|
|||
# E2E Test Pipelines
|
||||
|
||||
Three automated E2E test pipelines cover different stages of the development lifecycle.
|
||||
|
||||
## Pipelines
|
||||
|
||||
| Pipeline | Trigger | Editions Tested | Image Source |
|
||||
|----------|---------|----------------|--------------|
|
||||
| **PR** (`e2e-tests-ci.yml`) | Argo Events on `Enterprise CI/docker-image` status | enterprise | `mattermostdevelopment/**` |
|
||||
| **Merge to master/release** (`e2e-tests-on-merge.yml`) | Platform delivery after docker build (`delivery-platform/.github/workflows/mattermost-platform-delivery.yaml`) | enterprise, fips | `mattermostdevelopment/**` |
|
||||
| **Release cut** (`e2e-tests-on-release.yml`) | Platform release after docker build (`delivery-platform/.github/workflows/release-mattermost-platform.yml`) | enterprise, fips, team (future) | `mattermost/**` |
|
||||
|
||||
All pipelines follow the **smoke-then-full** pattern: smoke tests run first, full tests only run if smoke passes.
|
||||
|
||||
## Workflow Files
|
||||
|
||||
```
|
||||
.github/workflows/
|
||||
├── e2e-tests-ci.yml # PR orchestrator
|
||||
├── e2e-tests-on-merge.yml # Merge orchestrator (master/release branches)
|
||||
├── e2e-tests-on-release.yml # Release cut orchestrator
|
||||
├── e2e-tests-cypress.yml # Shared wrapper: cypress smoke -> full
|
||||
├── e2e-tests-playwright.yml # Shared wrapper: playwright smoke -> full
|
||||
├── e2e-tests-cypress-template.yml # Template: actual cypress test execution
|
||||
└── e2e-tests-playwright-template.yml # Template: actual playwright test execution
|
||||
```
|
||||
|
||||
### Call hierarchy
|
||||
|
||||
```
|
||||
e2e-tests-ci.yml ─────────────────┐
|
||||
e2e-tests-on-merge.yml ───────────┤──► e2e-tests-cypress.yml ──► e2e-tests-cypress-template.yml
|
||||
e2e-tests-on-release.yml ─────────┘ e2e-tests-playwright.yml ──► e2e-tests-playwright-template.yml
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Pipeline 1: PR (`e2e-tests-ci.yml`)
|
||||
|
||||
Runs E2E tests for every PR commit after the enterprise docker image is built. Fails if the commit is not associated with an open PR.
|
||||
|
||||
**Trigger chain:**
|
||||
```
|
||||
PR commit ─► Enterprise CI builds docker image
|
||||
─► Argo Events detects "Enterprise CI/docker-image" status
|
||||
─► dispatches e2e-tests-ci.yml
|
||||
```
|
||||
|
||||
For PRs from forks, `body.branches` may be empty so the workflow falls back to `master` for workflow files (trusted code), while `commit_sha` still points to the fork's commit.
|
||||
|
||||
**Jobs:** 2 (cypress + playwright), each does smoke -> full
|
||||
|
||||
**Commit statuses (4 total):**
|
||||
|
||||
| Context | Description (pending) | Description (result) |
|
||||
|---------|----------------------|---------------------|
|
||||
| `e2e-test/cypress-smoke\|enterprise` | `tests running, image_tag:abc1234` | `100% passed (1313), 440 specs, image_tag:abc1234` |
|
||||
| `e2e-test/cypress-full\|enterprise` | `tests running, image_tag:abc1234` | `100% passed (1313), 440 specs, image_tag:abc1234` |
|
||||
| `e2e-test/playwright-smoke\|enterprise` | `tests running, image_tag:abc1234` | `100% passed (200), 50 specs, image_tag:abc1234` |
|
||||
| `e2e-test/playwright-full\|enterprise` | `tests running, image_tag:abc1234` | `99.5% passed (199/200), 1 failed, 50 specs, image_tag:abc1234` |
|
||||
|
||||
**Manual trigger (CLI):**
|
||||
```bash
|
||||
gh workflow run e2e-tests-ci.yml \
|
||||
--repo mattermost/mattermost \
|
||||
--field pr_number="35171"
|
||||
```
|
||||
|
||||
**Manual trigger (GitHub UI):**
|
||||
1. Go to **Actions** > **E2E Tests (smoke-then-full)**
|
||||
2. Click **Run workflow**
|
||||
3. Fill in `pr_number` (e.g., `35171`)
|
||||
4. Click **Run workflow**
|
||||
|
||||
### On-demand testing
|
||||
|
||||
For on-demand E2E testing, the existing triggers still work:
|
||||
- **Comment triggers**: `/e2e-test`, `/e2e-test fips`, or with `MM_ENV` parameters
|
||||
- **Label trigger**: `E2E/Run`
|
||||
|
||||
These are separate from the automated workflow and can be used for custom test configurations or re-runs.
|
||||
|
||||
---
|
||||
|
||||
## Pipeline 2: Merge (`e2e-tests-on-merge.yml`)
|
||||
|
||||
Runs E2E tests after every push/merge to `master` or `release-*` branches.
|
||||
|
||||
**Trigger chain:**
|
||||
```
|
||||
Push to master/release-*
|
||||
─► Argo Events (mattermost-platform-package sensor)
|
||||
─► delivery-platform/.github/workflows/mattermost-platform-delivery.yaml
|
||||
─► builds docker images (enterprise + fips)
|
||||
─► trigger-e2e-tests job dispatches e2e-tests-on-merge.yml
|
||||
```
|
||||
|
||||
**Jobs:** 4 (cypress + playwright) x (enterprise + fips), smoke skipped, full tests only
|
||||
|
||||
**Commit statuses (4 total):**
|
||||
|
||||
| Context | Description example |
|
||||
|---------|-------------------|
|
||||
| `e2e-test/cypress-full\|enterprise` | `100% passed (1313), 440 specs, image_tag:abc1234_def5678` |
|
||||
| `e2e-test/cypress-full\|fips` | `100% passed (1313), 440 specs, image_tag:abc1234_def5678` |
|
||||
| `e2e-test/playwright-full\|enterprise` | `100% passed (200), 50 specs, image_tag:abc1234_def5678` |
|
||||
| `e2e-test/playwright-full\|fips` | `100% passed (200), 50 specs, image_tag:abc1234_def5678` |
|
||||
|
||||
**Manual trigger (CLI):**
|
||||
```bash
|
||||
# For master
|
||||
gh workflow run e2e-tests-on-merge.yml \
|
||||
--repo mattermost/mattermost \
|
||||
--field branch="master" \
|
||||
--field commit_sha="<full_commit_sha>" \
|
||||
--field server_image_tag="<image_tag>"
|
||||
|
||||
# For release branch
|
||||
gh workflow run e2e-tests-on-merge.yml \
|
||||
--repo mattermost/mattermost \
|
||||
--field branch="release-11.4" \
|
||||
--field commit_sha="<full_commit_sha>" \
|
||||
--field server_image_tag="<image_tag>"
|
||||
```
|
||||
|
||||
**Manual trigger (GitHub UI):**
|
||||
1. Go to **Actions** > **E2E Tests (master/release - merge)**
|
||||
2. Click **Run workflow**
|
||||
3. Fill in:
|
||||
- `branch`: `master` or `release-11.4`
|
||||
- `commit_sha`: full 40-char SHA
|
||||
- `server_image_tag`: e.g., `abc1234_def5678`
|
||||
4. Click **Run workflow**
|
||||
|
||||
---
|
||||
|
||||
## Pipeline 3: Release Cut (`e2e-tests-on-release.yml`)
|
||||
|
||||
Runs E2E tests after a release cut against the published release images.
|
||||
|
||||
**Trigger chain:**
|
||||
```
|
||||
Manual release cut
|
||||
─► delivery-platform/.github/workflows/release-mattermost-platform.yml
|
||||
─► builds and publishes release docker images
|
||||
─► trigger-e2e-tests job dispatches e2e-tests-on-release.yml
|
||||
```
|
||||
|
||||
**Jobs:** 4 (cypress + playwright) x (enterprise + fips), smoke skipped, full tests only. Team edition planned for future.
|
||||
|
||||
**Commit statuses (4 total, 6 when team is enabled):**
|
||||
|
||||
Descriptions include alias tags showing which rolling docker tags point to the same image.
|
||||
|
||||
RC example (11.4.0-rc3):
|
||||
|
||||
| Context | Description example |
|
||||
|---------|-------------------|
|
||||
| `e2e-test/cypress-full\|enterprise` | `100% passed (1313), 440 specs, image_tag:11.4.0-rc3 (release-11.4, release-11)` |
|
||||
| `e2e-test/cypress-full\|fips` | `100% passed (1313), 440 specs, image_tag:11.4.0-rc3 (release-11.4, release-11)` |
|
||||
| `e2e-test/cypress-full\|team` (future) | `100% passed (1313), 440 specs, image_tag:11.4.0-rc3 (release-11.4, release-11)` |
|
||||
|
||||
Stable example (11.4.0) — includes `MAJOR.MINOR` alias:
|
||||
|
||||
| Context | Description example |
|
||||
|---------|-------------------|
|
||||
| `e2e-test/cypress-full\|enterprise` | `100% passed (1313), 440 specs, image_tag:11.4.0 (release-11.4, release-11, 11.4)` |
|
||||
| `e2e-test/cypress-full\|fips` | `100% passed (1313), 440 specs, image_tag:11.4.0 (release-11.4, release-11, 11.4)` |
|
||||
| `e2e-test/cypress-full\|team` (future) | `100% passed (1313), 440 specs, image_tag:11.4.0 (release-11.4, release-11, 11.4)` |
|
||||
|
||||
**Manual trigger (CLI):**
|
||||
```bash
|
||||
gh workflow run e2e-tests-on-release.yml \
|
||||
--repo mattermost/mattermost \
|
||||
--field branch="release-11.4" \
|
||||
--field commit_sha="<full_commit_sha>" \
|
||||
--field server_image_tag="11.4.0" \
|
||||
--field server_image_aliases="release-11.4, release-11, 11.4"
|
||||
```
|
||||
|
||||
**Manual trigger (GitHub UI):**
|
||||
1. Go to **Actions** > **E2E Tests (release cut)**
|
||||
2. Click **Run workflow**
|
||||
3. Fill in:
|
||||
- `branch`: `release-11.4`
|
||||
- `commit_sha`: full 40-char SHA
|
||||
- `server_image_tag`: e.g., `11.4.0` or `11.4.0-rc3`
|
||||
- `server_image_aliases`: e.g., `release-11.4, release-11, 11.4` (optional)
|
||||
4. Click **Run workflow**
|
||||
|
||||
---
|
||||
|
||||
## Commit Status Format
|
||||
|
||||
**Context name:** `e2e-test/<phase>|<edition>`
|
||||
|
||||
Where `<phase>` is `cypress-smoke`, `cypress-full`, `playwright-smoke`, or `playwright-full`.
|
||||
|
||||
**Description format:**
|
||||
- All passed: `100% passed (<count>), <specs> specs, image_tag:<tag>[ (<aliases>)]`
|
||||
- With failures: `<rate>% passed (<passed>/<total>), <failed> failed, <specs> specs, image_tag:<tag>[ (<aliases>)]`
|
||||
- Pending: `tests running, image_tag:<tag>[ (<aliases>)]`
|
||||
|
||||
- Pass rate: `100%` if all pass, otherwise one decimal (e.g., `99.5%`)
|
||||
- Aliases only present for release cuts
|
||||
|
||||
### Failure behavior
|
||||
|
||||
1. **Smoke test fails**: Full tests are skipped, only smoke commit status shows failure
|
||||
2. **Full test fails**: Full commit status shows failure with pass rate
|
||||
3. **Both pass**: Both smoke and full commit statuses show success
|
||||
4. **No PR found** (PR pipeline only): Workflow fails immediately
|
||||
|
||||
---
|
||||
|
||||
## Smoke-then-Full Pattern
|
||||
|
||||
Each wrapper (Cypress/Playwright) follows this flow:
|
||||
|
||||
```
|
||||
generate-build-variables (branch, build_id, server_image)
|
||||
─► smoke tests (1 worker, minimal docker services)
|
||||
─► if smoke passes ─► full tests (20 workers cypress / 1 worker playwright, all docker services)
|
||||
─► report (aggregate results, update commit status)
|
||||
```
|
||||
|
||||
### Test filtering
|
||||
|
||||
| Framework | Smoke | Full |
|
||||
|-----------|-------|------|
|
||||
| **Cypress** | `--stage=@prod --group=@smoke` | `--stage="@prod" --excludeGroup="@te_only,@cloud_only,@high_availability" --sortFirst=... --sortLast=...` |
|
||||
| **Playwright** | `--grep @smoke` | `--grep-invert "@smoke\|@visual"` |
|
||||
|
||||
### Worker configuration
|
||||
|
||||
| Framework | Smoke Workers | Full Workers |
|
||||
|-----------|---------------|--------------|
|
||||
| **Cypress** | 1 | 20 |
|
||||
| **Playwright** | 1 | 1 (uses internal parallelism via `PW_WORKERS`) |
|
||||
|
||||
### Docker services
|
||||
|
||||
| Test Phase | Docker Services |
|
||||
|------------|-----------------|
|
||||
| Smoke | `postgres inbucket` |
|
||||
| Full | `postgres inbucket minio openldap elasticsearch keycloak` |
|
||||
|
||||
---
|
||||
|
||||
## Tagging Smoke Tests
|
||||
|
||||
### Cypress
|
||||
|
||||
Add `@smoke` to the Group comment at the top of spec files:
|
||||
|
||||
```javascript
|
||||
// Stage: @prod
|
||||
// Group: @channels @messaging @smoke
|
||||
```
|
||||
|
||||
### Playwright
|
||||
|
||||
Add `@smoke` to the test tag option:
|
||||
|
||||
```typescript
|
||||
test('critical login flow', {tag: ['@smoke', '@login']}, async ({pw}) => {
|
||||
// ...
|
||||
});
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Shared Wrapper Inputs
|
||||
|
||||
The wrappers (`e2e-tests-cypress.yml`, `e2e-tests-playwright.yml`) accept these inputs:
|
||||
|
||||
| Input | Default | Description |
|
||||
|-------|---------|-------------|
|
||||
| `server_edition` | `enterprise` | Edition: `enterprise`, `fips`, or `team` |
|
||||
| `server_image_repo` | `mattermostdevelopment` | Docker namespace: `mattermostdevelopment` or `mattermost` |
|
||||
| `server_image_tag` | derived from `commit_sha` | Docker image tag |
|
||||
| `server_image_aliases` | _(empty)_ | Alias tags shown in commit status description |
|
||||
| `ref_branch` | _(empty)_ | Source branch name for webhook messages (e.g., `master` or `release-11.4`) |
|
||||
|
||||
The automation dashboard branch name is derived from context:
|
||||
- PR: `server-pr-<pr_number>` (e.g., `server-pr-35205`)
|
||||
- Master merge: `server-master-<image_tag>` (e.g., `server-master-abc1234_def5678`)
|
||||
- Release merge: `server-release-<version>-<image_tag>` (e.g., `server-release-11.4-abc1234_def5678`)
|
||||
- Fallback: `server-commit-<image_tag>`
|
||||
|
||||
The test type suffix (`-smoke` or `-full`) is appended by the template.
|
||||
|
||||
The server image is derived as:
|
||||
```
|
||||
{server_image_repo}/{edition_image_name}:{server_image_tag}
|
||||
```
|
||||
|
||||
Where `edition_image_name` maps to:
|
||||
- `enterprise` -> `mattermost-enterprise-edition`
|
||||
- `fips` -> `mattermost-enterprise-fips-edition`
|
||||
- `team` -> `mattermost-team-edition`
|
||||
|
||||
---
|
||||
|
||||
## Webhook Message Format
|
||||
|
||||
After full tests complete, a webhook notification is sent to the configured `REPORT_WEBHOOK_URL`. The results line uses the same `commit_status_message` as the GitHub commit status. The source line varies by pipeline using `report_type` and `ref_branch`.
|
||||
|
||||
**Report types:** `PR`, `MASTER`, `RELEASE`, `RELEASE_CUT`
|
||||
|
||||
### PR
|
||||
|
||||
```
|
||||
:open-pull-request: mattermost-pr-35205
|
||||
:docker: mattermostdevelopment/mattermost-enterprise-edition:abc1234
|
||||
100% passed (1313), 440 specs | full report
|
||||
```
|
||||
|
||||
### Merge to master
|
||||
|
||||
```
|
||||
:git_merge: abc1234 on master
|
||||
:docker: mattermostdevelopment/mattermost-enterprise-edition:abc1234_def5678
|
||||
100% passed (1313), 440 specs | full report
|
||||
```
|
||||
|
||||
### Merge to release branch
|
||||
|
||||
```
|
||||
:git_merge: abc1234 on release-11.4
|
||||
:docker: mattermostdevelopment/mattermost-enterprise-edition:abc1234_def5678
|
||||
100% passed (1313), 440 specs | full report
|
||||
```
|
||||
|
||||
### Release cut
|
||||
|
||||
```
|
||||
:github_round: abc1234 on release-11.4
|
||||
:docker: mattermost/mattermost-enterprise-edition:11.4.0-rc3
|
||||
100% passed (1313), 440 specs | full report
|
||||
```
|
||||
|
||||
The commit short SHA links to the commit on GitHub. The PR number links to the pull request.
|
||||
|
||||
---
|
||||
|
||||
## Related Files
|
||||
|
||||
- `e2e-tests/cypress/` - Cypress test suite
|
||||
- `e2e-tests/playwright/` - Playwright test suite
|
||||
- `e2e-tests/.ci/` - CI configuration and environment files
|
||||
- `e2e-tests/Makefile` - Makefile with targets for running tests, generating cycles, and reporting
|
||||
2
.github/workflows/api.yml
vendored
2
.github/workflows/api.yml
vendored
|
|
@ -20,7 +20,7 @@ jobs:
|
|||
- name: Checkout code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
|
||||
- uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
|
||||
with:
|
||||
node-version-file: .nvmrc
|
||||
cache: "npm"
|
||||
|
|
|
|||
24
.github/workflows/build-server-image.yml
vendored
24
.github/workflows/build-server-image.yml
vendored
|
|
@ -33,8 +33,8 @@ jobs:
|
|||
- name: buildenv/docker-login
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
username: ${{ secrets.DOCKERHUB_DEV_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_DEV_TOKEN }}
|
||||
|
||||
- name: buildenv/build
|
||||
uses: docker/build-push-action@1dc73863535b631f98b2378be8619f83b136f4a0 # v6.17.0
|
||||
|
|
@ -44,16 +44,16 @@ jobs:
|
|||
load: true
|
||||
push: false
|
||||
pull: false
|
||||
tags: mattermost/mattermost-build-server:test
|
||||
tags: mattermostdevelopment/mattermost-build-server:test
|
||||
|
||||
- name: buildenv/test
|
||||
run: |
|
||||
docker run --rm mattermost/mattermost-build-server:test /bin/sh -c "go version && node --version"
|
||||
docker run --rm mattermostdevelopment/mattermost-build-server:test /bin/sh -c "go version && node --version"
|
||||
|
||||
- name: buildenv/calculate-golang-version
|
||||
id: go
|
||||
run: |
|
||||
GO_VERSION=$(docker run --rm mattermost/mattermost-build-server:test go version | awk '{print $3}' | sed 's/go//')
|
||||
GO_VERSION=$(docker run --rm mattermostdevelopment/mattermost-build-server:test go version | awk '{print $3}' | sed 's/go//')
|
||||
echo "GO_VERSION=${GO_VERSION}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: buildenv/push
|
||||
|
|
@ -65,7 +65,7 @@ jobs:
|
|||
load: false
|
||||
push: true
|
||||
pull: true
|
||||
tags: mattermost/mattermost-build-server:${{ steps.go.outputs.GO_VERSION }}
|
||||
tags: mattermostdevelopment/mattermost-build-server:${{ steps.go.outputs.GO_VERSION }}
|
||||
|
||||
build-image-fips:
|
||||
runs-on: ubuntu-22.04
|
||||
|
|
@ -79,8 +79,8 @@ jobs:
|
|||
- name: buildenv/docker-login
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
username: ${{ secrets.DOCKERHUB_DEV_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_DEV_TOKEN }}
|
||||
|
||||
- name: buildenv/build
|
||||
uses: docker/build-push-action@1dc73863535b631f98b2378be8619f83b136f4a0 # v6.17.0
|
||||
|
|
@ -90,16 +90,16 @@ jobs:
|
|||
load: true
|
||||
push: false
|
||||
pull: false
|
||||
tags: mattermost/mattermost-build-server-fips:test
|
||||
tags: mattermostdevelopment/mattermost-build-server-fips:test
|
||||
|
||||
- name: buildenv/test
|
||||
run: |
|
||||
docker run --rm --entrypoint bash mattermost/mattermost-build-server-fips:test -c "go version && node --version"
|
||||
docker run --rm --entrypoint bash mattermostdevelopment/mattermost-build-server-fips:test -c "go version && node --version"
|
||||
|
||||
- name: buildenv/calculate-golang-version
|
||||
id: go
|
||||
run: |
|
||||
GO_VERSION=$(docker run --rm --entrypoint bash mattermost/mattermost-build-server-fips:test -c "go version" | awk '{print $3}' | sed 's/go//')
|
||||
GO_VERSION=$(docker run --rm --entrypoint bash mattermostdevelopment/mattermost-build-server-fips:test -c "go version" | awk '{print $3}' | sed 's/go//')
|
||||
echo "GO_VERSION=${GO_VERSION}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: buildenv/push
|
||||
|
|
@ -111,4 +111,4 @@ jobs:
|
|||
load: false
|
||||
push: true
|
||||
pull: true
|
||||
tags: mattermost/mattermost-build-server-fips:${{ steps.go.outputs.GO_VERSION }}
|
||||
tags: mattermostdevelopment/mattermost-build-server-fips:${{ steps.go.outputs.GO_VERSION }}
|
||||
|
|
|
|||
2
.github/workflows/e2e-fulltests-ci.yml
vendored
2
.github/workflows/e2e-fulltests-ci.yml
vendored
|
|
@ -263,6 +263,7 @@ jobs:
|
|||
status_check_context: "${{ needs.generate-test-variables.outputs.status_check_context }}"
|
||||
workers_number: "${{ needs.generate-test-variables.outputs.workers_number }}"
|
||||
testcase_failure_fatal: "${{ needs.generate-test-variables.outputs.TESTCASE_FAILURE_FATAL == 'true' }}"
|
||||
run_preflight_checks: false
|
||||
enable_reporting: true
|
||||
SERVER: "${{ needs.generate-test-variables.outputs.SERVER }}"
|
||||
SERVER_IMAGE: "${{ needs.generate-test-variables.outputs.SERVER_IMAGE }}"
|
||||
|
|
@ -299,6 +300,7 @@ jobs:
|
|||
status_check_context: "${{ needs.generate-test-variables.outputs.status_check_context }}-playwright"
|
||||
workers_number: "1"
|
||||
testcase_failure_fatal: "${{ needs.generate-test-variables.outputs.TESTCASE_FAILURE_FATAL == 'true' }}"
|
||||
run_preflight_checks: false
|
||||
enable_reporting: true
|
||||
SERVER: "${{ needs.generate-test-variables.outputs.SERVER }}"
|
||||
SERVER_IMAGE: "${{ needs.generate-test-variables.outputs.SERVER_IMAGE }}"
|
||||
|
|
|
|||
69
.github/workflows/e2e-tests-check.yml
vendored
69
.github/workflows/e2e-tests-check.yml
vendored
|
|
@ -1,69 +0,0 @@
|
|||
---
|
||||
name: E2E Tests Check
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "e2e-tests/**"
|
||||
- "webapp/platform/client/**"
|
||||
- "webapp/platform/types/**"
|
||||
- ".github/workflows/e2e-*.yml"
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: ci/checkout-repo
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: ci/setup-node
|
||||
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
|
||||
with:
|
||||
node-version-file: ".nvmrc"
|
||||
cache: npm
|
||||
cache-dependency-path: |
|
||||
e2e-tests/cypress/package-lock.json
|
||||
e2e-tests/playwright/package-lock.json
|
||||
|
||||
# Cypress check
|
||||
- name: ci/cypress/npm-install
|
||||
working-directory: e2e-tests/cypress
|
||||
run: npm ci
|
||||
- name: ci/cypress/npm-check
|
||||
working-directory: e2e-tests/cypress
|
||||
run: npm run check
|
||||
|
||||
# Playwright check
|
||||
- name: ci/get-webapp-node-modules
|
||||
working-directory: webapp
|
||||
run: make node_modules
|
||||
- name: ci/playwright/npm-install
|
||||
working-directory: e2e-tests/playwright
|
||||
run: npm ci
|
||||
- name: ci/playwright/npm-check
|
||||
working-directory: e2e-tests/playwright
|
||||
run: npm run check
|
||||
|
||||
# Shell check
|
||||
- name: ci/shell-check
|
||||
working-directory: e2e-tests
|
||||
run: make check-shell
|
||||
|
||||
# E2E-only check and trigger
|
||||
- name: ci/check-e2e-test-only
|
||||
id: check
|
||||
uses: ./.github/actions/check-e2e-test-only
|
||||
with:
|
||||
base_sha: ${{ github.event.pull_request.base.sha }}
|
||||
head_sha: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: ci/trigger-e2e-with-master-image
|
||||
if: steps.check.outputs.e2e_test_only == 'true'
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
IMAGE_TAG: ${{ steps.check.outputs.image_tag }}
|
||||
run: |
|
||||
echo "Triggering E2E tests for PR #${PR_NUMBER} with mattermostdevelopment/mattermost-enterprise-edition:${IMAGE_TAG}"
|
||||
gh workflow run e2e-tests-ci.yml --field pr_number="${PR_NUMBER}"
|
||||
119
.github/workflows/e2e-tests-ci-template.yml
vendored
119
.github/workflows/e2e-tests-ci-template.yml
vendored
|
|
@ -20,6 +20,12 @@ on:
|
|||
type: boolean
|
||||
required: false
|
||||
default: true
|
||||
# NB: the following toggles will skip individual steps, rather than the whole jobs,
|
||||
# to let the dependent jobs run even if these are false
|
||||
run_preflight_checks:
|
||||
type: boolean
|
||||
required: false
|
||||
default: true
|
||||
enable_reporting:
|
||||
type: boolean
|
||||
required: false
|
||||
|
|
@ -101,7 +107,7 @@ jobs:
|
|||
update-initial-status:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
|
||||
- uses: mattermost/actions/delivery/update-commit-status@main
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
|
|
@ -111,6 +117,92 @@ jobs:
|
|||
description: E2E tests for mattermost server app
|
||||
status: pending
|
||||
|
||||
cypress-check:
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- update-initial-status
|
||||
defaults:
|
||||
run:
|
||||
working-directory: e2e-tests/cypress
|
||||
steps:
|
||||
- name: ci/checkout-repo
|
||||
if: "${{ inputs.run_preflight_checks }}"
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ inputs.commit_sha }}
|
||||
fetch-depth: 0
|
||||
- name: ci/setup-node
|
||||
if: "${{ inputs.run_preflight_checks }}"
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
|
||||
id: setup_node
|
||||
with:
|
||||
node-version-file: ".nvmrc"
|
||||
cache: npm
|
||||
cache-dependency-path: "e2e-tests/cypress/package-lock.json"
|
||||
- name: ci/cypress/npm-install
|
||||
if: "${{ inputs.run_preflight_checks }}"
|
||||
run: |
|
||||
npm ci
|
||||
- name: ci/cypress/npm-check
|
||||
if: "${{ inputs.run_preflight_checks }}"
|
||||
run: |
|
||||
npm run check
|
||||
|
||||
playwright-check:
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- update-initial-status
|
||||
defaults:
|
||||
run:
|
||||
working-directory: e2e-tests/playwright
|
||||
steps:
|
||||
- name: ci/checkout-repo
|
||||
if: "${{ inputs.run_preflight_checks }}"
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ inputs.commit_sha }}
|
||||
fetch-depth: 0
|
||||
- name: ci/setup-node
|
||||
if: "${{ inputs.run_preflight_checks }}"
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
|
||||
id: setup_node
|
||||
with:
|
||||
node-version-file: ".nvmrc"
|
||||
cache: npm
|
||||
cache-dependency-path: "e2e-tests/playwright/package-lock.json"
|
||||
- name: ci/get-webapp-node-modules
|
||||
if: "${{ inputs.run_preflight_checks }}"
|
||||
working-directory: webapp
|
||||
# requires build of client and types
|
||||
run: |
|
||||
make node_modules
|
||||
- name: ci/playwright/npm-install
|
||||
if: "${{ inputs.run_preflight_checks }}"
|
||||
run: |
|
||||
npm ci
|
||||
- name: ci/playwright/npm-check
|
||||
if: "${{ inputs.run_preflight_checks }}"
|
||||
run: |
|
||||
npm run check
|
||||
|
||||
shell-check:
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- update-initial-status
|
||||
defaults:
|
||||
run:
|
||||
working-directory: e2e-tests
|
||||
steps:
|
||||
- name: ci/checkout-repo
|
||||
if: "${{ inputs.run_preflight_checks }}"
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ inputs.commit_sha }}
|
||||
fetch-depth: 0
|
||||
- name: ci/shell-check
|
||||
if: "${{ inputs.run_preflight_checks }}"
|
||||
run: make check-shell
|
||||
|
||||
generate-build-variables:
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
|
|
@ -154,7 +246,7 @@ jobs:
|
|||
ref: ${{ inputs.commit_sha }}
|
||||
fetch-depth: 0
|
||||
- name: ci/setup-node
|
||||
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
|
||||
id: setup_node
|
||||
with:
|
||||
node-version-file: ".nvmrc"
|
||||
|
|
@ -198,6 +290,9 @@ jobs:
|
|||
runs-on: "${{ matrix.os }}"
|
||||
timeout-minutes: 120
|
||||
needs:
|
||||
- cypress-check
|
||||
- playwright-check
|
||||
- shell-check
|
||||
- generate-build-variables
|
||||
- generate-test-cycle
|
||||
defaults:
|
||||
|
|
@ -238,7 +333,7 @@ jobs:
|
|||
ln -sfn /usr/local/opt/docker-compose/bin/docker-compose ~/.docker/cli-plugins/docker-compose
|
||||
sudo ln -sf $HOME/.colima/default/docker.sock /var/run/docker.sock
|
||||
- name: ci/setup-node
|
||||
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
|
||||
id: setup_node
|
||||
with:
|
||||
node-version-file: ".nvmrc"
|
||||
|
|
@ -269,9 +364,7 @@ jobs:
|
|||
echo "RollingRelease: smoketest completed. Starting full E2E tests."
|
||||
fi
|
||||
make
|
||||
- name: ci/cloud-teardown
|
||||
if: always()
|
||||
run: make cloud-teardown
|
||||
make cloud-teardown
|
||||
- name: ci/e2e-test-store-results
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: always()
|
||||
|
|
@ -319,7 +412,7 @@ jobs:
|
|||
e2e-tests/${{ inputs.TEST }}/results/
|
||||
- name: ci/setup-node
|
||||
if: "${{ inputs.enable_reporting }}"
|
||||
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
|
||||
id: setup_node
|
||||
with:
|
||||
node-version-file: ".nvmrc"
|
||||
|
|
@ -334,14 +427,12 @@ jobs:
|
|||
SERVER_IMAGE: "${{ inputs.SERVER_IMAGE }}"
|
||||
AUTOMATION_DASHBOARD_URL: "${{ secrets.AUTOMATION_DASHBOARD_URL }}"
|
||||
WEBHOOK_URL: "${{ secrets.REPORT_WEBHOOK_URL }}"
|
||||
PR_NUMBER: "${{ inputs.PR_NUMBER }}"
|
||||
BRANCH: "${{ inputs.BRANCH }}"
|
||||
BUILD_ID: "${{ inputs.BUILD_ID }}"
|
||||
MM_ENV: "${{ inputs.MM_ENV }}"
|
||||
TM4J_API_KEY: "${{ secrets.REPORT_TM4J_API_KEY }}"
|
||||
TEST_CYCLE_LINK_PREFIX: "${{ secrets.REPORT_TM4J_TEST_CYCLE_LINK_PREFIX }}"
|
||||
run: |
|
||||
echo "DEBUG: TYPE=${TYPE}, PR_NUMBER=${PR_NUMBER:-<not set>}"
|
||||
make report
|
||||
# The results dir may have been modified as part of the reporting: re-upload
|
||||
- name: ci/upload-report-global
|
||||
|
|
@ -378,6 +469,12 @@ jobs:
|
|||
|
||||
echo "📤 Uploading to s3://${AWS_S3_BUCKET}/${S3_PATH}/"
|
||||
|
||||
if [[ -d "$LOCAL_LOGS_PATH" ]]; then
|
||||
aws s3 sync "$LOCAL_LOGS_PATH" "s3://${AWS_S3_BUCKET}/${S3_PATH}/logs/" \
|
||||
--acl public-read \
|
||||
--cache-control "no-cache"
|
||||
fi
|
||||
|
||||
if [[ -d "$LOCAL_RESULTS_PATH" ]]; then
|
||||
aws s3 sync "$LOCAL_RESULTS_PATH" "s3://${AWS_S3_BUCKET}/${S3_PATH}/results/" \
|
||||
--acl public-read \
|
||||
|
|
@ -437,7 +534,7 @@ jobs:
|
|||
- test
|
||||
- report
|
||||
steps:
|
||||
- uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
|
||||
- uses: mattermost/actions/delivery/update-commit-status@main
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
|
|
@ -460,7 +557,7 @@ jobs:
|
|||
- test
|
||||
- report
|
||||
steps:
|
||||
- uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
|
||||
- uses: mattermost/actions/delivery/update-commit-status@main
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
|
|
|
|||
218
.github/workflows/e2e-tests-ci.yml
vendored
218
.github/workflows/e2e-tests-ci.yml
vendored
|
|
@ -1,207 +1,49 @@
|
|||
---
|
||||
name: E2E Tests (smoke-then-full)
|
||||
name: E2E Smoketests
|
||||
on:
|
||||
# Argo Events Trigger (automated):
|
||||
# - Triggered by: Enterprise CI/docker-image status check (success)
|
||||
# - Payload: { ref: "<branch>", inputs: { commit_sha: "<sha>" } }
|
||||
# - Uses commit-specific docker image
|
||||
# - Checks for relevant file changes before running tests
|
||||
#
|
||||
# Manual Trigger:
|
||||
# - Enter PR number only - commit SHA is resolved automatically from PR head
|
||||
# - Uses commit-specific docker image
|
||||
# - E2E tests always run (no file change check)
|
||||
#
|
||||
# For PRs, this workflow gets triggered from the Argo Events platform.
|
||||
# Check the following repo for details: https://github.com/mattermost/delivery-platform
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
pr_number:
|
||||
description: "PR number to test (for manual triggers)"
|
||||
type: string
|
||||
required: false
|
||||
commit_sha:
|
||||
description: "Commit SHA to test (for Argo Events)"
|
||||
type: string
|
||||
required: false
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
resolve-pr:
|
||||
generate-test-variables:
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
PR_NUMBER: "${{ steps.resolve.outputs.PR_NUMBER }}"
|
||||
COMMIT_SHA: "${{ steps.resolve.outputs.COMMIT_SHA }}"
|
||||
SERVER_IMAGE_TAG: "${{ steps.e2e-check.outputs.image_tag }}"
|
||||
BRANCH: "${{ steps.generate.outputs.BRANCH }}"
|
||||
BUILD_ID: "${{ steps.generate.outputs.BUILD_ID }}"
|
||||
SERVER_IMAGE: "${{ steps.generate.outputs.SERVER_IMAGE }}"
|
||||
steps:
|
||||
- name: ci/checkout-repo
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: ci/resolve-pr-and-commit
|
||||
id: resolve
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
INPUT_PR_NUMBER: ${{ inputs.pr_number }}
|
||||
INPUT_COMMIT_SHA: ${{ inputs.commit_sha }}
|
||||
- name: ci/smoke/generate-test-variables
|
||||
id: generate
|
||||
run: |
|
||||
# Validate inputs
|
||||
if [ -n "$INPUT_PR_NUMBER" ] && ! [[ "$INPUT_PR_NUMBER" =~ ^[0-9]+$ ]]; then
|
||||
echo "::error::Invalid PR number format. Must be numeric."
|
||||
exit 1
|
||||
fi
|
||||
if [ -n "$INPUT_COMMIT_SHA" ] && ! [[ "$INPUT_COMMIT_SHA" =~ ^[a-f0-9]{7,40}$ ]]; then
|
||||
echo "::error::Invalid commit SHA format. Must be 7-40 hex characters."
|
||||
exit 1
|
||||
fi
|
||||
### Populate support variables
|
||||
COMMIT_SHA=${{ inputs.commit_sha }}
|
||||
SERVER_IMAGE_TAG="${COMMIT_SHA::7}"
|
||||
|
||||
# Manual trigger: PR number provided, resolve commit SHA from PR head
|
||||
if [ -n "$INPUT_PR_NUMBER" ]; then
|
||||
echo "Manual trigger: resolving commit SHA from PR #${INPUT_PR_NUMBER}"
|
||||
PR_DATA=$(gh api "repos/${{ github.repository }}/pulls/${INPUT_PR_NUMBER}")
|
||||
COMMIT_SHA=$(echo "$PR_DATA" | jq -r '.head.sha')
|
||||
|
||||
if [ -z "$COMMIT_SHA" ] || [ "$COMMIT_SHA" = "null" ]; then
|
||||
echo "::error::Could not resolve commit SHA for PR #${INPUT_PR_NUMBER}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "PR_NUMBER=${INPUT_PR_NUMBER}" >> $GITHUB_OUTPUT
|
||||
echo "COMMIT_SHA=${COMMIT_SHA}" >> $GITHUB_OUTPUT
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Argo Events trigger: commit SHA provided, resolve PR number
|
||||
if [ -n "$INPUT_COMMIT_SHA" ]; then
|
||||
echo "Automated trigger: resolving PR number from commit ${INPUT_COMMIT_SHA}"
|
||||
PR_NUMBER=$(gh api "repos/${{ github.repository }}/commits/${INPUT_COMMIT_SHA}/pulls" \
|
||||
--jq '.[0].number // empty' 2>/dev/null || echo "")
|
||||
if [ -n "$PR_NUMBER" ]; then
|
||||
echo "Found PR #${PR_NUMBER} for commit ${INPUT_COMMIT_SHA}"
|
||||
echo "PR_NUMBER=${PR_NUMBER}" >> $GITHUB_OUTPUT
|
||||
echo "COMMIT_SHA=${INPUT_COMMIT_SHA}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "::error::No PR found for commit ${INPUT_COMMIT_SHA}. This workflow is for PRs only."
|
||||
exit 1
|
||||
fi
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Neither provided
|
||||
echo "::error::Either pr_number or commit_sha must be provided"
|
||||
exit 1
|
||||
|
||||
- name: ci/check-e2e-test-only
|
||||
id: e2e-check
|
||||
uses: ./.github/actions/check-e2e-test-only
|
||||
with:
|
||||
pr_number: ${{ steps.resolve.outputs.PR_NUMBER }}
|
||||
|
||||
|
||||
check-changes:
|
||||
needs: resolve-pr
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
should_run: "${{ steps.check.outputs.should_run }}"
|
||||
steps:
|
||||
- name: ci/checkout-repo
|
||||
if: inputs.commit_sha != ''
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ needs.resolve-pr.outputs.COMMIT_SHA }}
|
||||
fetch-depth: 0
|
||||
- name: ci/check-relevant-changes
|
||||
id: check
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
PR_NUMBER: ${{ needs.resolve-pr.outputs.PR_NUMBER }}
|
||||
COMMIT_SHA: ${{ needs.resolve-pr.outputs.COMMIT_SHA }}
|
||||
INPUT_PR_NUMBER: ${{ inputs.pr_number }}
|
||||
run: |
|
||||
# Manual trigger (pr_number provided): always run E2E tests
|
||||
if [ -n "$INPUT_PR_NUMBER" ]; then
|
||||
echo "Manual trigger detected - skipping file change check"
|
||||
echo "should_run=true" >> $GITHUB_OUTPUT
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Automated trigger (commit_sha provided): check for relevant file changes
|
||||
echo "Automated trigger detected - checking for relevant file changes"
|
||||
|
||||
# Get the base branch of the PR
|
||||
BASE_SHA=$(gh api "repos/${{ github.repository }}/pulls/${PR_NUMBER}" --jq '.base.sha')
|
||||
|
||||
# Get changed files between base and head
|
||||
CHANGED_FILES=$(git diff --name-only "${BASE_SHA}...${COMMIT_SHA}")
|
||||
|
||||
echo "Changed files:"
|
||||
echo "$CHANGED_FILES"
|
||||
|
||||
# Check for relevant changes
|
||||
SHOULD_RUN="false"
|
||||
|
||||
# Check for server Go files
|
||||
if echo "$CHANGED_FILES" | grep -qE '^server/.*\.go$'; then
|
||||
echo "Found server Go file changes"
|
||||
SHOULD_RUN="true"
|
||||
fi
|
||||
|
||||
# Check for webapp ts/js/tsx/jsx files
|
||||
if echo "$CHANGED_FILES" | grep -qE '^webapp/.*\.(ts|tsx|js|jsx)$'; then
|
||||
echo "Found webapp TypeScript/JavaScript file changes"
|
||||
SHOULD_RUN="true"
|
||||
fi
|
||||
|
||||
# Check for e2e-tests ts/js/tsx/jsx files
|
||||
if echo "$CHANGED_FILES" | grep -qE '^e2e-tests/.*\.(ts|tsx|js|jsx)$'; then
|
||||
echo "Found e2e-tests TypeScript/JavaScript file changes"
|
||||
SHOULD_RUN="true"
|
||||
fi
|
||||
|
||||
# Check for E2E-related CI workflow files
|
||||
if echo "$CHANGED_FILES" | grep -qE '^\.github/workflows/e2e-.*\.yml$'; then
|
||||
echo "Found E2E CI workflow file changes"
|
||||
SHOULD_RUN="true"
|
||||
fi
|
||||
|
||||
echo "should_run=${SHOULD_RUN}" >> $GITHUB_OUTPUT
|
||||
echo "Should run E2E tests: ${SHOULD_RUN}"
|
||||
|
||||
e2e-cypress:
|
||||
# BUILD_ID format: $pipelineID-$imageTag-$testType-$serverType-$serverEdition
|
||||
# Reference on BUILD_ID parsing: https://github.com/saturninoabril/automation-dashboard/blob/175891781bf1072c162c58c6ec0abfc5bcb3520e/lib/common_utils.ts#L3-L23
|
||||
BUILD_ID="${{ github.run_id }}_${{ github.run_attempt }}-${SERVER_IMAGE_TAG}-smoketest-onprem-ent"
|
||||
echo "BRANCH=server-smoketest-${COMMIT_SHA::7}" >> $GITHUB_OUTPUT
|
||||
echo "BUILD_ID=${BUILD_ID}" >> $GITHUB_OUTPUT
|
||||
echo "SERVER_IMAGE=mattermostdevelopment/mattermost-enterprise-edition:${SERVER_IMAGE_TAG}" >> $GITHUB_OUTPUT
|
||||
e2e-smoketest:
|
||||
needs:
|
||||
- resolve-pr
|
||||
- check-changes
|
||||
if: needs.check-changes.outputs.should_run == 'true'
|
||||
uses: ./.github/workflows/e2e-tests-cypress.yml
|
||||
- generate-test-variables
|
||||
uses: ./.github/workflows/e2e-tests-ci-template.yml
|
||||
with:
|
||||
commit_sha: "${{ needs.resolve-pr.outputs.COMMIT_SHA }}"
|
||||
server: "onprem"
|
||||
server_image_tag: "${{ needs.resolve-pr.outputs.SERVER_IMAGE_TAG }}"
|
||||
enable_reporting: true
|
||||
report_type: "PR"
|
||||
pr_number: "${{ needs.resolve-pr.outputs.PR_NUMBER }}"
|
||||
commit_sha: "${{ inputs.commit_sha }}"
|
||||
status_check_context: "E2E Tests/smoketests"
|
||||
TEST: cypress
|
||||
REPORT_TYPE: none
|
||||
SERVER: onprem
|
||||
BRANCH: "${{ needs.generate-test-variables.outputs.BRANCH }}"
|
||||
BUILD_ID: "${{ needs.generate-test-variables.outputs.BUILD_ID }}"
|
||||
SERVER_IMAGE: "${{ needs.generate-test-variables.outputs.SERVER_IMAGE }}"
|
||||
secrets:
|
||||
MM_LICENSE: "${{ secrets.MM_E2E_TEST_LICENSE_ONPREM_ENT }}"
|
||||
AUTOMATION_DASHBOARD_URL: "${{ secrets.MM_E2E_AUTOMATION_DASHBOARD_URL }}"
|
||||
AUTOMATION_DASHBOARD_TOKEN: "${{ secrets.MM_E2E_AUTOMATION_DASHBOARD_TOKEN }}"
|
||||
PUSH_NOTIFICATION_SERVER: "${{ secrets.MM_E2E_PUSH_NOTIFICATION_SERVER }}"
|
||||
REPORT_WEBHOOK_URL: "${{ secrets.MM_E2E_REPORT_WEBHOOK_URL }}"
|
||||
CWS_URL: "${{ secrets.MM_E2E_CWS_URL }}"
|
||||
CWS_EXTRA_HTTP_HEADERS: "${{ secrets.MM_E2E_CWS_EXTRA_HTTP_HEADERS }}"
|
||||
|
||||
e2e-playwright:
|
||||
needs:
|
||||
- resolve-pr
|
||||
- check-changes
|
||||
if: needs.check-changes.outputs.should_run == 'true'
|
||||
uses: ./.github/workflows/e2e-tests-playwright.yml
|
||||
with:
|
||||
commit_sha: "${{ needs.resolve-pr.outputs.COMMIT_SHA }}"
|
||||
server: "onprem"
|
||||
server_image_tag: "${{ needs.resolve-pr.outputs.SERVER_IMAGE_TAG }}"
|
||||
enable_reporting: true
|
||||
report_type: "PR"
|
||||
pr_number: "${{ needs.resolve-pr.outputs.PR_NUMBER }}"
|
||||
secrets:
|
||||
MM_LICENSE: "${{ secrets.MM_E2E_TEST_LICENSE_ONPREM_ENT }}"
|
||||
AWS_ACCESS_KEY_ID: "${{ secrets.CYPRESS_AWS_ACCESS_KEY_ID }}"
|
||||
AWS_SECRET_ACCESS_KEY: "${{ secrets.CYPRESS_AWS_SECRET_ACCESS_KEY }}"
|
||||
REPORT_WEBHOOK_URL: "${{ secrets.MM_E2E_REPORT_WEBHOOK_URL }}"
|
||||
|
|
|
|||
575
.github/workflows/e2e-tests-cypress-template.yml
vendored
575
.github/workflows/e2e-tests-cypress-template.yml
vendored
|
|
@ -1,575 +0,0 @@
|
|||
---
|
||||
name: E2E Tests - Cypress Template
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
# Test configuration
|
||||
test_type:
|
||||
description: "Type of test run (smoke or full)"
|
||||
type: string
|
||||
required: true
|
||||
test_filter:
|
||||
description: "Test filter arguments"
|
||||
type: string
|
||||
required: true
|
||||
workers:
|
||||
description: "Number of parallel workers"
|
||||
type: number
|
||||
required: false
|
||||
default: 1
|
||||
timeout_minutes:
|
||||
description: "Job timeout in minutes"
|
||||
type: number
|
||||
required: false
|
||||
default: 30
|
||||
enabled_docker_services:
|
||||
description: "Space-separated list of docker services to enable"
|
||||
type: string
|
||||
required: false
|
||||
default: "postgres inbucket"
|
||||
|
||||
# Common build variables
|
||||
commit_sha:
|
||||
type: string
|
||||
required: true
|
||||
branch:
|
||||
type: string
|
||||
required: true
|
||||
build_id:
|
||||
type: string
|
||||
required: true
|
||||
server_image_tag:
|
||||
description: "Server image tag (e.g., master or short SHA)"
|
||||
type: string
|
||||
required: true
|
||||
server:
|
||||
type: string
|
||||
required: false
|
||||
default: onprem
|
||||
server_edition:
|
||||
description: "Server edition: enterprise (default), fips, or team"
|
||||
type: string
|
||||
required: false
|
||||
default: enterprise
|
||||
server_image_repo:
|
||||
description: "Docker registry: mattermostdevelopment (default) or mattermost"
|
||||
type: string
|
||||
required: false
|
||||
default: mattermostdevelopment
|
||||
server_image_aliases:
|
||||
description: "Comma-separated alias tags for description (e.g., 'release-11.4, release-11')"
|
||||
type: string
|
||||
required: false
|
||||
|
||||
# Reporting options
|
||||
enable_reporting:
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
report_type:
|
||||
type: string
|
||||
required: false
|
||||
ref_branch:
|
||||
description: "Source branch name for webhook messages (e.g., 'master' or 'release-11.4')"
|
||||
type: string
|
||||
required: false
|
||||
pr_number:
|
||||
type: string
|
||||
required: false
|
||||
# Commit status configuration
|
||||
context_name:
|
||||
description: "GitHub commit status context name"
|
||||
type: string
|
||||
required: true
|
||||
|
||||
outputs:
|
||||
passed:
|
||||
description: "Number of passed tests"
|
||||
value: ${{ jobs.report.outputs.passed }}
|
||||
failed:
|
||||
description: "Number of failed tests"
|
||||
value: ${{ jobs.report.outputs.failed }}
|
||||
status_check_url:
|
||||
description: "URL to test results"
|
||||
value: ${{ jobs.generate-test-cycle.outputs.status_check_url }}
|
||||
|
||||
secrets:
|
||||
MM_LICENSE:
|
||||
required: false
|
||||
AUTOMATION_DASHBOARD_URL:
|
||||
required: false
|
||||
AUTOMATION_DASHBOARD_TOKEN:
|
||||
required: false
|
||||
PUSH_NOTIFICATION_SERVER:
|
||||
required: false
|
||||
REPORT_WEBHOOK_URL:
|
||||
required: false
|
||||
CWS_URL:
|
||||
required: false
|
||||
CWS_EXTRA_HTTP_HEADERS:
|
||||
required: false
|
||||
|
||||
env:
|
||||
SERVER_IMAGE: "${{ inputs.server_image_repo }}/${{ inputs.server_edition == 'fips' && 'mattermost-enterprise-fips-edition' || inputs.server_edition == 'team' && 'mattermost-team-edition' || 'mattermost-enterprise-edition' }}:${{ inputs.server_image_tag }}"
|
||||
|
||||
jobs:
|
||||
update-initial-status:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: ci/set-initial-status
|
||||
uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
repository_full_name: ${{ github.repository }}
|
||||
commit_sha: ${{ inputs.commit_sha }}
|
||||
context: ${{ inputs.context_name }}
|
||||
description: "tests running, image_tag:${{ inputs.server_image_tag }}${{ inputs.server_image_aliases && format(' ({0})', inputs.server_image_aliases) || '' }}"
|
||||
status: pending
|
||||
|
||||
generate-test-cycle:
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
status_check_url: "${{ steps.generate-cycle.outputs.status_check_url }}"
|
||||
workers: "${{ steps.generate-workers.outputs.workers }}"
|
||||
steps:
|
||||
- name: ci/generate-workers
|
||||
id: generate-workers
|
||||
run: |
|
||||
echo "workers=$(jq -nc '[range(${{ inputs.workers }})]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: ci/checkout-repo
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
ref: ${{ inputs.commit_sha }}
|
||||
fetch-depth: 0
|
||||
- name: ci/setup-node
|
||||
uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
||||
with:
|
||||
node-version-file: ".nvmrc"
|
||||
cache: npm
|
||||
cache-dependency-path: "e2e-tests/cypress/package-lock.json"
|
||||
|
||||
- name: ci/generate-test-cycle
|
||||
id: generate-cycle
|
||||
working-directory: e2e-tests
|
||||
env:
|
||||
AUTOMATION_DASHBOARD_URL: "${{ secrets.AUTOMATION_DASHBOARD_URL }}"
|
||||
AUTOMATION_DASHBOARD_TOKEN: "${{ secrets.AUTOMATION_DASHBOARD_TOKEN }}"
|
||||
BRANCH: "${{ inputs.branch }}-${{ inputs.test_type }}"
|
||||
BUILD_ID: "${{ inputs.build_id }}"
|
||||
TEST: cypress
|
||||
TEST_FILTER: "${{ inputs.test_filter }}"
|
||||
run: |
|
||||
set -e -o pipefail
|
||||
make generate-test-cycle | tee generate-test-cycle.out
|
||||
TEST_CYCLE_ID=$(sed -nE "s/^.*id: '([^']+)'.*$/\1/p" <generate-test-cycle.out)
|
||||
if [ -n "$TEST_CYCLE_ID" ]; then
|
||||
echo "status_check_url=https://automation-dashboard.vercel.app/cycles/${TEST_CYCLE_ID}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "status_check_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
run-tests:
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: ${{ fromJSON(inputs.timeout_minutes) }}
|
||||
continue-on-error: ${{ inputs.workers > 1 }}
|
||||
needs:
|
||||
- generate-test-cycle
|
||||
if: needs.generate-test-cycle.result == 'success'
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
worker_index: ${{ fromJSON(needs.generate-test-cycle.outputs.workers) }}
|
||||
defaults:
|
||||
run:
|
||||
working-directory: e2e-tests
|
||||
env:
|
||||
AUTOMATION_DASHBOARD_URL: "${{ secrets.AUTOMATION_DASHBOARD_URL }}"
|
||||
AUTOMATION_DASHBOARD_TOKEN: "${{ secrets.AUTOMATION_DASHBOARD_TOKEN }}"
|
||||
SERVER: "${{ inputs.server }}"
|
||||
MM_LICENSE: "${{ secrets.MM_LICENSE }}"
|
||||
ENABLED_DOCKER_SERVICES: "${{ inputs.enabled_docker_services }}"
|
||||
TEST: cypress
|
||||
TEST_FILTER: "${{ inputs.test_filter }}"
|
||||
BRANCH: "${{ inputs.branch }}-${{ inputs.test_type }}"
|
||||
BUILD_ID: "${{ inputs.build_id }}"
|
||||
CI_BASE_URL: "${{ inputs.test_type }}-test-${{ matrix.worker_index }}"
|
||||
CYPRESS_pushNotificationServer: "${{ secrets.PUSH_NOTIFICATION_SERVER }}"
|
||||
CWS_URL: "${{ secrets.CWS_URL }}"
|
||||
CWS_EXTRA_HTTP_HEADERS: "${{ secrets.CWS_EXTRA_HTTP_HEADERS }}"
|
||||
steps:
|
||||
- name: ci/checkout-repo
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ inputs.commit_sha }}
|
||||
fetch-depth: 0
|
||||
- name: ci/setup-node
|
||||
uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
||||
with:
|
||||
node-version-file: ".nvmrc"
|
||||
cache: npm
|
||||
cache-dependency-path: "e2e-tests/cypress/package-lock.json"
|
||||
- name: ci/run-tests
|
||||
run: |
|
||||
make cloud-init
|
||||
make
|
||||
- name: ci/cloud-teardown
|
||||
if: always()
|
||||
run: make cloud-teardown
|
||||
- name: ci/upload-results
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: always()
|
||||
with:
|
||||
name: cypress-${{ inputs.test_type }}-${{ inputs.server_edition }}-results-${{ matrix.worker_index }}
|
||||
path: |
|
||||
e2e-tests/cypress/logs/
|
||||
e2e-tests/cypress/results/
|
||||
retention-days: 5
|
||||
|
||||
calculate-results:
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- generate-test-cycle
|
||||
- run-tests
|
||||
if: always() && needs.generate-test-cycle.result == 'success'
|
||||
outputs:
|
||||
passed: ${{ steps.calculate.outputs.passed }}
|
||||
failed: ${{ steps.calculate.outputs.failed }}
|
||||
pending: ${{ steps.calculate.outputs.pending }}
|
||||
total_specs: ${{ steps.calculate.outputs.total_specs }}
|
||||
failed_specs: ${{ steps.calculate.outputs.failed_specs }}
|
||||
failed_specs_count: ${{ steps.calculate.outputs.failed_specs_count }}
|
||||
failed_tests: ${{ steps.calculate.outputs.failed_tests }}
|
||||
commit_status_message: ${{ steps.calculate.outputs.commit_status_message }}
|
||||
total: ${{ steps.calculate.outputs.total }}
|
||||
pass_rate: ${{ steps.calculate.outputs.pass_rate }}
|
||||
color: ${{ steps.calculate.outputs.color }}
|
||||
steps:
|
||||
- name: ci/checkout-repo
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- name: ci/download-results
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
pattern: cypress-${{ inputs.test_type }}-${{ inputs.server_edition }}-results-*
|
||||
path: e2e-tests/cypress/
|
||||
merge-multiple: true
|
||||
- name: ci/calculate
|
||||
id: calculate
|
||||
uses: ./.github/actions/calculate-cypress-results
|
||||
with:
|
||||
original-results-path: e2e-tests/cypress/results
|
||||
|
||||
run-failed-tests:
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: ${{ fromJSON(inputs.timeout_minutes) }}
|
||||
needs:
|
||||
- generate-test-cycle
|
||||
- run-tests
|
||||
- calculate-results
|
||||
if: >-
|
||||
always() &&
|
||||
needs.calculate-results.result == 'success' &&
|
||||
needs.calculate-results.outputs.failed != '0' &&
|
||||
fromJSON(needs.calculate-results.outputs.failed_specs_count) <= 20
|
||||
defaults:
|
||||
run:
|
||||
working-directory: e2e-tests
|
||||
env:
|
||||
AUTOMATION_DASHBOARD_URL: "${{ secrets.AUTOMATION_DASHBOARD_URL }}"
|
||||
AUTOMATION_DASHBOARD_TOKEN: "${{ secrets.AUTOMATION_DASHBOARD_TOKEN }}"
|
||||
SERVER: "${{ inputs.server }}"
|
||||
MM_LICENSE: "${{ secrets.MM_LICENSE }}"
|
||||
ENABLED_DOCKER_SERVICES: "${{ inputs.enabled_docker_services }}"
|
||||
TEST: cypress
|
||||
BRANCH: "${{ inputs.branch }}-${{ inputs.test_type }}-retest"
|
||||
BUILD_ID: "${{ inputs.build_id }}-retest"
|
||||
CYPRESS_pushNotificationServer: "${{ secrets.PUSH_NOTIFICATION_SERVER }}"
|
||||
CWS_URL: "${{ secrets.CWS_URL }}"
|
||||
CWS_EXTRA_HTTP_HEADERS: "${{ secrets.CWS_EXTRA_HTTP_HEADERS }}"
|
||||
steps:
|
||||
- name: ci/checkout-repo
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ inputs.commit_sha }}
|
||||
fetch-depth: 0
|
||||
- name: ci/setup-node
|
||||
uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
||||
with:
|
||||
node-version-file: ".nvmrc"
|
||||
cache: npm
|
||||
cache-dependency-path: "e2e-tests/cypress/package-lock.json"
|
||||
- name: ci/run-failed-specs
|
||||
env:
|
||||
SPEC_FILES: ${{ needs.calculate-results.outputs.failed_specs }}
|
||||
run: |
|
||||
echo "Retesting failed specs: $SPEC_FILES"
|
||||
make cloud-init
|
||||
make start-server run-specs
|
||||
- name: ci/cloud-teardown
|
||||
if: always()
|
||||
run: make cloud-teardown
|
||||
- name: ci/upload-retest-results
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: always()
|
||||
with:
|
||||
name: cypress-${{ inputs.test_type }}-${{ inputs.server_edition }}-retest-results
|
||||
path: |
|
||||
e2e-tests/cypress/logs/
|
||||
e2e-tests/cypress/results/
|
||||
retention-days: 5
|
||||
|
||||
report:
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- generate-test-cycle
|
||||
- run-tests
|
||||
- calculate-results
|
||||
- run-failed-tests
|
||||
if: always() && needs.calculate-results.result == 'success'
|
||||
outputs:
|
||||
passed: "${{ steps.final-results.outputs.passed }}"
|
||||
failed: "${{ steps.final-results.outputs.failed }}"
|
||||
commit_status_message: "${{ steps.final-results.outputs.commit_status_message }}"
|
||||
defaults:
|
||||
run:
|
||||
working-directory: e2e-tests
|
||||
steps:
|
||||
- name: ci/checkout-repo
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- name: ci/setup-node
|
||||
uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
||||
with:
|
||||
node-version-file: ".nvmrc"
|
||||
cache: npm
|
||||
cache-dependency-path: "e2e-tests/cypress/package-lock.json"
|
||||
|
||||
# PATH A: run-failed-tests was skipped (no failures to retest)
|
||||
- name: ci/download-results-path-a
|
||||
if: needs.run-failed-tests.result == 'skipped'
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
pattern: cypress-${{ inputs.test_type }}-${{ inputs.server_edition }}-results-*
|
||||
path: e2e-tests/cypress/
|
||||
merge-multiple: true
|
||||
- name: ci/use-previous-calculation
|
||||
if: needs.run-failed-tests.result == 'skipped'
|
||||
id: use-previous
|
||||
run: |
|
||||
echo "passed=${{ needs.calculate-results.outputs.passed }}" >> $GITHUB_OUTPUT
|
||||
echo "failed=${{ needs.calculate-results.outputs.failed }}" >> $GITHUB_OUTPUT
|
||||
echo "pending=${{ needs.calculate-results.outputs.pending }}" >> $GITHUB_OUTPUT
|
||||
echo "total_specs=${{ needs.calculate-results.outputs.total_specs }}" >> $GITHUB_OUTPUT
|
||||
echo "failed_specs=${{ needs.calculate-results.outputs.failed_specs }}" >> $GITHUB_OUTPUT
|
||||
echo "failed_specs_count=${{ needs.calculate-results.outputs.failed_specs_count }}" >> $GITHUB_OUTPUT
|
||||
echo "commit_status_message=${{ needs.calculate-results.outputs.commit_status_message }}" >> $GITHUB_OUTPUT
|
||||
echo "total=${{ needs.calculate-results.outputs.total }}" >> $GITHUB_OUTPUT
|
||||
echo "pass_rate=${{ needs.calculate-results.outputs.pass_rate }}" >> $GITHUB_OUTPUT
|
||||
echo "color=${{ needs.calculate-results.outputs.color }}" >> $GITHUB_OUTPUT
|
||||
{
|
||||
echo "failed_tests<<EOF"
|
||||
echo "${{ needs.calculate-results.outputs.failed_tests }}"
|
||||
echo "EOF"
|
||||
} >> $GITHUB_OUTPUT
|
||||
|
||||
# PATH B: run-failed-tests ran, need to merge and recalculate
|
||||
- name: ci/download-original-results
|
||||
if: needs.run-failed-tests.result != 'skipped'
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
pattern: cypress-${{ inputs.test_type }}-${{ inputs.server_edition }}-results-*
|
||||
path: e2e-tests/cypress/
|
||||
merge-multiple: true
|
||||
- name: ci/download-retest-results
|
||||
if: needs.run-failed-tests.result != 'skipped'
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: cypress-${{ inputs.test_type }}-${{ inputs.server_edition }}-retest-results
|
||||
path: e2e-tests/cypress/retest-results/
|
||||
- name: ci/calculate-results
|
||||
if: needs.run-failed-tests.result != 'skipped'
|
||||
id: recalculate
|
||||
uses: ./.github/actions/calculate-cypress-results
|
||||
with:
|
||||
original-results-path: e2e-tests/cypress/results
|
||||
retest-results-path: e2e-tests/cypress/retest-results/results
|
||||
|
||||
# Set final outputs from either path
|
||||
- name: ci/set-final-results
|
||||
id: final-results
|
||||
env:
|
||||
USE_PREVIOUS_FAILED_TESTS: ${{ steps.use-previous.outputs.failed_tests }}
|
||||
RECALCULATE_FAILED_TESTS: ${{ steps.recalculate.outputs.failed_tests }}
|
||||
run: |
|
||||
if [ "${{ needs.run-failed-tests.result }}" == "skipped" ]; then
|
||||
echo "passed=${{ steps.use-previous.outputs.passed }}" >> $GITHUB_OUTPUT
|
||||
echo "failed=${{ steps.use-previous.outputs.failed }}" >> $GITHUB_OUTPUT
|
||||
echo "pending=${{ steps.use-previous.outputs.pending }}" >> $GITHUB_OUTPUT
|
||||
echo "total_specs=${{ steps.use-previous.outputs.total_specs }}" >> $GITHUB_OUTPUT
|
||||
echo "failed_specs=${{ steps.use-previous.outputs.failed_specs }}" >> $GITHUB_OUTPUT
|
||||
echo "failed_specs_count=${{ steps.use-previous.outputs.failed_specs_count }}" >> $GITHUB_OUTPUT
|
||||
echo "commit_status_message=${{ steps.use-previous.outputs.commit_status_message }}" >> $GITHUB_OUTPUT
|
||||
echo "total=${{ steps.use-previous.outputs.total }}" >> $GITHUB_OUTPUT
|
||||
echo "pass_rate=${{ steps.use-previous.outputs.pass_rate }}" >> $GITHUB_OUTPUT
|
||||
echo "color=${{ steps.use-previous.outputs.color }}" >> $GITHUB_OUTPUT
|
||||
{
|
||||
echo "failed_tests<<EOF"
|
||||
echo "$USE_PREVIOUS_FAILED_TESTS"
|
||||
echo "EOF"
|
||||
} >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "passed=${{ steps.recalculate.outputs.passed }}" >> $GITHUB_OUTPUT
|
||||
echo "failed=${{ steps.recalculate.outputs.failed }}" >> $GITHUB_OUTPUT
|
||||
echo "pending=${{ steps.recalculate.outputs.pending }}" >> $GITHUB_OUTPUT
|
||||
echo "total_specs=${{ steps.recalculate.outputs.total_specs }}" >> $GITHUB_OUTPUT
|
||||
echo "failed_specs=${{ steps.recalculate.outputs.failed_specs }}" >> $GITHUB_OUTPUT
|
||||
echo "failed_specs_count=${{ steps.recalculate.outputs.failed_specs_count }}" >> $GITHUB_OUTPUT
|
||||
echo "commit_status_message=${{ steps.recalculate.outputs.commit_status_message }}" >> $GITHUB_OUTPUT
|
||||
echo "total=${{ steps.recalculate.outputs.total }}" >> $GITHUB_OUTPUT
|
||||
echo "pass_rate=${{ steps.recalculate.outputs.pass_rate }}" >> $GITHUB_OUTPUT
|
||||
echo "color=${{ steps.recalculate.outputs.color }}" >> $GITHUB_OUTPUT
|
||||
{
|
||||
echo "failed_tests<<EOF"
|
||||
echo "$RECALCULATE_FAILED_TESTS"
|
||||
echo "EOF"
|
||||
} >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: ci/upload-combined-results
|
||||
if: inputs.workers > 1
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: cypress-${{ inputs.test_type }}-${{ inputs.server_edition }}-results
|
||||
path: |
|
||||
e2e-tests/cypress/logs/
|
||||
e2e-tests/cypress/results/
|
||||
- name: ci/publish-report
|
||||
if: inputs.enable_reporting && env.REPORT_WEBHOOK_URL != ''
|
||||
env:
|
||||
REPORT_WEBHOOK_URL: ${{ secrets.REPORT_WEBHOOK_URL }}
|
||||
COMMIT_STATUS_MESSAGE: ${{ steps.final-results.outputs.commit_status_message }}
|
||||
COLOR: ${{ steps.final-results.outputs.color }}
|
||||
REPORT_URL: ${{ needs.generate-test-cycle.outputs.status_check_url }}
|
||||
TEST_TYPE: ${{ inputs.test_type }}
|
||||
REPORT_TYPE: ${{ inputs.report_type }}
|
||||
COMMIT_SHA: ${{ inputs.commit_sha }}
|
||||
REF_BRANCH: ${{ inputs.ref_branch }}
|
||||
PR_NUMBER: ${{ inputs.pr_number }}
|
||||
run: |
|
||||
# Capitalize test type
|
||||
TEST_TYPE_CAP=$(echo "$TEST_TYPE" | sed 's/.*/\u&/')
|
||||
|
||||
# Build source line based on report type
|
||||
COMMIT_SHORT="${COMMIT_SHA::7}"
|
||||
COMMIT_URL="https://github.com/${{ github.repository }}/commit/${COMMIT_SHA}"
|
||||
if [ "$REPORT_TYPE" = "RELEASE_CUT" ]; then
|
||||
SOURCE_LINE=":github_round: [${COMMIT_SHORT}](${COMMIT_URL}) on \`${REF_BRANCH}\`"
|
||||
elif [ "$REPORT_TYPE" = "MASTER" ] || [ "$REPORT_TYPE" = "RELEASE" ]; then
|
||||
SOURCE_LINE=":git_merge: [${COMMIT_SHORT}](${COMMIT_URL}) on \`${REF_BRANCH}\`"
|
||||
else
|
||||
SOURCE_LINE=":open-pull-request: [mattermost-pr-${PR_NUMBER}](https://github.com/${{ github.repository }}/pull/${PR_NUMBER})"
|
||||
fi
|
||||
|
||||
# Build payload with attachments
|
||||
PAYLOAD=$(cat <<EOF
|
||||
{
|
||||
"username": "E2E Test",
|
||||
"icon_url": "https://mattermost.com/wp-content/uploads/2022/02/icon_WS.png",
|
||||
"attachments": [{
|
||||
"color": "${COLOR}",
|
||||
"text": "**Results - Cypress ${TEST_TYPE_CAP} Tests**\n\n${SOURCE_LINE}\n:docker: \`${{ env.SERVER_IMAGE }}\`\n${COMMIT_STATUS_MESSAGE} | [full report](${REPORT_URL})"
|
||||
}]
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
# Send to webhook
|
||||
curl -X POST -H "Content-Type: application/json" -d "$PAYLOAD" "$REPORT_WEBHOOK_URL"
|
||||
- name: ci/write-job-summary
|
||||
if: always()
|
||||
env:
|
||||
STATUS_CHECK_URL: ${{ needs.generate-test-cycle.outputs.status_check_url }}
|
||||
TEST_TYPE: ${{ inputs.test_type }}
|
||||
PASSED: ${{ steps.final-results.outputs.passed }}
|
||||
FAILED: ${{ steps.final-results.outputs.failed }}
|
||||
PENDING: ${{ steps.final-results.outputs.pending }}
|
||||
TOTAL_SPECS: ${{ steps.final-results.outputs.total_specs }}
|
||||
FAILED_SPECS_COUNT: ${{ steps.final-results.outputs.failed_specs_count }}
|
||||
FAILED_SPECS: ${{ steps.final-results.outputs.failed_specs }}
|
||||
COMMIT_STATUS_MESSAGE: ${{ steps.final-results.outputs.commit_status_message }}
|
||||
FAILED_TESTS: ${{ steps.final-results.outputs.failed_tests }}
|
||||
run: |
|
||||
{
|
||||
echo "## E2E Test Results - Cypress ${TEST_TYPE}"
|
||||
echo ""
|
||||
|
||||
if [ "$FAILED" = "0" ]; then
|
||||
echo "All tests passed: **${PASSED} passed**"
|
||||
else
|
||||
echo "<details>"
|
||||
echo "<summary>${FAILED} failed, ${PASSED} passed</summary>"
|
||||
echo ""
|
||||
echo "| Test | File |"
|
||||
echo "|------|------|"
|
||||
echo "${FAILED_TESTS}"
|
||||
echo "</details>"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "### Calculation Outputs"
|
||||
echo ""
|
||||
echo "| Output | Value |"
|
||||
echo "|--------|-------|"
|
||||
echo "| passed | ${PASSED} |"
|
||||
echo "| failed | ${FAILED} |"
|
||||
echo "| pending | ${PENDING} |"
|
||||
echo "| total_specs | ${TOTAL_SPECS} |"
|
||||
echo "| failed_specs_count | ${FAILED_SPECS_COUNT} |"
|
||||
echo "| commit_status_message | ${COMMIT_STATUS_MESSAGE} |"
|
||||
echo "| failed_specs | ${FAILED_SPECS:-none} |"
|
||||
|
||||
echo ""
|
||||
echo "---"
|
||||
echo "[View Full Report](${STATUS_CHECK_URL})"
|
||||
} >> $GITHUB_STEP_SUMMARY
|
||||
- name: ci/assert-results
|
||||
run: |
|
||||
[ "${{ steps.final-results.outputs.failed }}" = "0" ]
|
||||
|
||||
update-success-status:
|
||||
runs-on: ubuntu-24.04
|
||||
if: always() && needs.report.result == 'success' && needs.calculate-results.result == 'success'
|
||||
needs:
|
||||
- generate-test-cycle
|
||||
- calculate-results
|
||||
- report
|
||||
steps:
|
||||
- uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
repository_full_name: ${{ github.repository }}
|
||||
commit_sha: ${{ inputs.commit_sha }}
|
||||
context: ${{ inputs.context_name }}
|
||||
description: "${{ needs.report.outputs.commit_status_message }}, image_tag:${{ inputs.server_image_tag }}${{ inputs.server_image_aliases && format(' ({0})', inputs.server_image_aliases) || '' }}"
|
||||
status: success
|
||||
target_url: ${{ needs.generate-test-cycle.outputs.status_check_url }}
|
||||
|
||||
update-failure-status:
|
||||
runs-on: ubuntu-24.04
|
||||
if: always() && (needs.report.result != 'success' || needs.calculate-results.result != 'success')
|
||||
needs:
|
||||
- generate-test-cycle
|
||||
- calculate-results
|
||||
- report
|
||||
steps:
|
||||
- uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
repository_full_name: ${{ github.repository }}
|
||||
commit_sha: ${{ inputs.commit_sha }}
|
||||
context: ${{ inputs.context_name }}
|
||||
description: "${{ needs.report.outputs.commit_status_message }}, image_tag:${{ inputs.server_image_tag }}${{ inputs.server_image_aliases && format(' ({0})', inputs.server_image_aliases) || '' }}"
|
||||
status: failure
|
||||
target_url: ${{ needs.generate-test-cycle.outputs.status_check_url }}
|
||||
194
.github/workflows/e2e-tests-cypress.yml
vendored
194
.github/workflows/e2e-tests-cypress.yml
vendored
|
|
@ -1,194 +0,0 @@
|
|||
---
|
||||
name: E2E Tests - Cypress
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
commit_sha:
|
||||
type: string
|
||||
required: true
|
||||
enable_reporting:
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
server:
|
||||
type: string
|
||||
required: false
|
||||
default: onprem
|
||||
report_type:
|
||||
type: string
|
||||
required: false
|
||||
pr_number:
|
||||
type: string
|
||||
required: false
|
||||
server_image_tag:
|
||||
type: string
|
||||
required: false
|
||||
description: "Server image tag (e.g., master or short SHA)"
|
||||
server_edition:
|
||||
type: string
|
||||
required: false
|
||||
description: "Server edition: enterprise (default), fips, or team"
|
||||
server_image_repo:
|
||||
type: string
|
||||
required: false
|
||||
default: mattermostdevelopment
|
||||
description: "Docker registry: mattermostdevelopment (default) or mattermost"
|
||||
skip_smoke:
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
description: "Skip smoke tests and run full tests directly"
|
||||
server_image_aliases:
|
||||
type: string
|
||||
required: false
|
||||
description: "Comma-separated alias tags for context name (e.g., 'release-11.4, release-11')"
|
||||
ref_branch:
|
||||
type: string
|
||||
required: false
|
||||
description: "Source branch name for webhook messages (e.g., 'master' or 'release-11.4')"
|
||||
secrets:
|
||||
MM_LICENSE:
|
||||
required: false
|
||||
AUTOMATION_DASHBOARD_URL:
|
||||
required: false
|
||||
AUTOMATION_DASHBOARD_TOKEN:
|
||||
required: false
|
||||
PUSH_NOTIFICATION_SERVER:
|
||||
required: false
|
||||
REPORT_WEBHOOK_URL:
|
||||
required: false
|
||||
CWS_URL:
|
||||
required: false
|
||||
CWS_EXTRA_HTTP_HEADERS:
|
||||
required: false
|
||||
|
||||
jobs:
|
||||
generate-build-variables:
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
branch: "${{ steps.build-vars.outputs.branch }}"
|
||||
build_id: "${{ steps.build-vars.outputs.build_id }}"
|
||||
server_image_tag: "${{ steps.build-vars.outputs.server_image_tag }}"
|
||||
server_image: "${{ steps.build-vars.outputs.server_image }}"
|
||||
steps:
|
||||
- name: ci/generate-build-variables
|
||||
id: build-vars
|
||||
env:
|
||||
COMMIT_SHA: ${{ inputs.commit_sha }}
|
||||
PR_NUMBER: ${{ inputs.pr_number }}
|
||||
INPUT_SERVER_IMAGE_TAG: ${{ inputs.server_image_tag }}
|
||||
RUN_ID: ${{ github.run_id }}
|
||||
RUN_ATTEMPT: ${{ github.run_attempt }}
|
||||
run: |
|
||||
# Use provided server_image_tag or derive from commit SHA
|
||||
if [ -n "$INPUT_SERVER_IMAGE_TAG" ]; then
|
||||
SERVER_IMAGE_TAG="$INPUT_SERVER_IMAGE_TAG"
|
||||
else
|
||||
SERVER_IMAGE_TAG="${COMMIT_SHA::7}"
|
||||
fi
|
||||
|
||||
# Validate server_image_tag format (alphanumeric, dots, hyphens, underscores)
|
||||
if ! [[ "$SERVER_IMAGE_TAG" =~ ^[a-zA-Z0-9._-]+$ ]]; then
|
||||
echo "::error::Invalid server_image_tag format: ${SERVER_IMAGE_TAG}"
|
||||
exit 1
|
||||
fi
|
||||
echo "server_image_tag=${SERVER_IMAGE_TAG}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Generate branch name
|
||||
REF_BRANCH="${{ inputs.ref_branch }}"
|
||||
if [ -n "$PR_NUMBER" ]; then
|
||||
echo "branch=server-pr-${PR_NUMBER}" >> $GITHUB_OUTPUT
|
||||
elif [ -n "$REF_BRANCH" ]; then
|
||||
echo "branch=server-${REF_BRANCH}-${SERVER_IMAGE_TAG}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "branch=server-commit-${SERVER_IMAGE_TAG}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
# Determine server image name
|
||||
EDITION="${{ inputs.server_edition }}"
|
||||
REPO="${{ inputs.server_image_repo }}"
|
||||
REPO="${REPO:-mattermostdevelopment}"
|
||||
case "$EDITION" in
|
||||
fips) IMAGE_NAME="mattermost-enterprise-fips-edition" ;;
|
||||
team) IMAGE_NAME="mattermost-team-edition" ;;
|
||||
*) IMAGE_NAME="mattermost-enterprise-edition" ;;
|
||||
esac
|
||||
SERVER_IMAGE="${REPO}/${IMAGE_NAME}:${SERVER_IMAGE_TAG}"
|
||||
echo "server_image=${SERVER_IMAGE}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Validate server_image_aliases format if provided
|
||||
ALIASES="${{ inputs.server_image_aliases }}"
|
||||
if [ -n "$ALIASES" ] && ! [[ "$ALIASES" =~ ^[a-zA-Z0-9._,\ -]+$ ]]; then
|
||||
echo "::error::Invalid server_image_aliases format: ${ALIASES}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Generate build ID
|
||||
if [ -n "$EDITION" ] && [ "$EDITION" != "enterprise" ]; then
|
||||
echo "build_id=${RUN_ID}_${RUN_ATTEMPT}-${SERVER_IMAGE_TAG}-cypress-onprem-${EDITION}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "build_id=${RUN_ID}_${RUN_ATTEMPT}-${SERVER_IMAGE_TAG}-cypress-onprem-ent" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
cypress-smoke:
|
||||
if: ${{ !inputs.skip_smoke }}
|
||||
needs:
|
||||
- generate-build-variables
|
||||
uses: ./.github/workflows/e2e-tests-cypress-template.yml
|
||||
with:
|
||||
test_type: smoke
|
||||
test_filter: "--stage=@prod --group=@smoke"
|
||||
workers: 1
|
||||
timeout_minutes: 30
|
||||
enabled_docker_services: "postgres inbucket"
|
||||
commit_sha: ${{ inputs.commit_sha }}
|
||||
branch: ${{ needs.generate-build-variables.outputs.branch }}
|
||||
build_id: ${{ needs.generate-build-variables.outputs.build_id }}
|
||||
server_image_tag: ${{ needs.generate-build-variables.outputs.server_image_tag }}
|
||||
server_edition: ${{ inputs.server_edition }}
|
||||
server_image_repo: ${{ inputs.server_image_repo }}
|
||||
server_image_aliases: ${{ inputs.server_image_aliases }}
|
||||
server: ${{ inputs.server }}
|
||||
context_name: "e2e-test/cypress-smoke/${{ inputs.server_edition || 'enterprise' }}"
|
||||
secrets:
|
||||
MM_LICENSE: ${{ secrets.MM_LICENSE }}
|
||||
AUTOMATION_DASHBOARD_URL: ${{ secrets.AUTOMATION_DASHBOARD_URL }}
|
||||
AUTOMATION_DASHBOARD_TOKEN: ${{ secrets.AUTOMATION_DASHBOARD_TOKEN }}
|
||||
PUSH_NOTIFICATION_SERVER: ${{ secrets.PUSH_NOTIFICATION_SERVER }}
|
||||
CWS_URL: ${{ secrets.CWS_URL }}
|
||||
CWS_EXTRA_HTTP_HEADERS: ${{ secrets.CWS_EXTRA_HTTP_HEADERS }}
|
||||
|
||||
# Full Tests (runs if smoke passed or skipped)
|
||||
cypress-full:
|
||||
needs:
|
||||
- cypress-smoke
|
||||
- generate-build-variables
|
||||
if: always() && (needs.cypress-smoke.result == 'skipped' || needs.cypress-smoke.outputs.failed == '0')
|
||||
uses: ./.github/workflows/e2e-tests-cypress-template.yml
|
||||
with:
|
||||
test_type: full
|
||||
test_filter: '--stage="@prod" --excludeGroup="@te_only,@cloud_only,@high_availability" --sortFirst="@compliance_export,@elasticsearch,@ldap_group,@ldap" --sortLast="@saml,@keycloak,@plugin,@plugins_uninstall,@mfa,@license_removal"'
|
||||
workers: 20
|
||||
timeout_minutes: 60
|
||||
enabled_docker_services: "postgres inbucket minio openldap elasticsearch keycloak"
|
||||
commit_sha: ${{ inputs.commit_sha }}
|
||||
branch: ${{ needs.generate-build-variables.outputs.branch }}
|
||||
build_id: ${{ needs.generate-build-variables.outputs.build_id }}
|
||||
server_image_tag: ${{ needs.generate-build-variables.outputs.server_image_tag }}
|
||||
server_edition: ${{ inputs.server_edition }}
|
||||
server_image_repo: ${{ inputs.server_image_repo }}
|
||||
server_image_aliases: ${{ inputs.server_image_aliases }}
|
||||
server: ${{ inputs.server }}
|
||||
enable_reporting: ${{ inputs.enable_reporting }}
|
||||
report_type: ${{ inputs.report_type }}
|
||||
ref_branch: ${{ inputs.ref_branch }}
|
||||
pr_number: ${{ inputs.pr_number }}
|
||||
context_name: "e2e-test/cypress-full/${{ inputs.server_edition || 'enterprise' }}"
|
||||
secrets:
|
||||
MM_LICENSE: ${{ secrets.MM_LICENSE }}
|
||||
AUTOMATION_DASHBOARD_URL: ${{ secrets.AUTOMATION_DASHBOARD_URL }}
|
||||
AUTOMATION_DASHBOARD_TOKEN: ${{ secrets.AUTOMATION_DASHBOARD_TOKEN }}
|
||||
PUSH_NOTIFICATION_SERVER: ${{ secrets.PUSH_NOTIFICATION_SERVER }}
|
||||
REPORT_WEBHOOK_URL: ${{ secrets.REPORT_WEBHOOK_URL }}
|
||||
CWS_URL: ${{ secrets.CWS_URL }}
|
||||
CWS_EXTRA_HTTP_HEADERS: ${{ secrets.CWS_EXTRA_HTTP_HEADERS }}
|
||||
134
.github/workflows/e2e-tests-on-merge.yml
vendored
134
.github/workflows/e2e-tests-on-merge.yml
vendored
|
|
@ -1,134 +0,0 @@
|
|||
---
|
||||
name: E2E Tests (master/release - merge)
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
branch:
|
||||
type: string
|
||||
required: true
|
||||
description: "Branch name (e.g., 'master' or 'release-11.4')"
|
||||
commit_sha:
|
||||
type: string
|
||||
required: true
|
||||
description: "Commit SHA to test"
|
||||
server_image_tag:
|
||||
type: string
|
||||
required: true
|
||||
description: "Docker image tag (e.g., 'abc1234_def5678' or 'master')"
|
||||
|
||||
jobs:
|
||||
generate-build-variables:
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
report_type: "${{ steps.vars.outputs.report_type }}"
|
||||
ref_branch: "${{ steps.vars.outputs.ref_branch }}"
|
||||
steps:
|
||||
- name: ci/checkout-repo
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
ref: ${{ inputs.branch }}
|
||||
fetch-depth: 50
|
||||
- name: ci/generate-variables
|
||||
id: vars
|
||||
env:
|
||||
BRANCH: ${{ inputs.branch }}
|
||||
COMMIT_SHA: ${{ inputs.commit_sha }}
|
||||
run: |
|
||||
# Strip refs/heads/ prefix if present
|
||||
BRANCH="${BRANCH#refs/heads/}"
|
||||
|
||||
# Validate branch is master or release-X.Y
|
||||
if [[ "$BRANCH" == "master" ]]; then
|
||||
echo "report_type=MASTER" >> $GITHUB_OUTPUT
|
||||
elif [[ "$BRANCH" =~ ^release-[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "report_type=RELEASE" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "::error::Branch ${BRANCH} must be 'master' or 'release-X.Y' format."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "ref_branch=${BRANCH}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Validate commit exists on the branch
|
||||
if ! git merge-base --is-ancestor "$COMMIT_SHA" HEAD; then
|
||||
echo "::error::Commit ${COMMIT_SHA} is not on branch ${BRANCH}."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Enterprise Edition
|
||||
e2e-cypress:
|
||||
needs: generate-build-variables
|
||||
uses: ./.github/workflows/e2e-tests-cypress.yml
|
||||
with:
|
||||
commit_sha: ${{ inputs.commit_sha }}
|
||||
server_image_tag: ${{ inputs.server_image_tag }}
|
||||
skip_smoke: true
|
||||
server: onprem
|
||||
enable_reporting: true
|
||||
report_type: ${{ needs.generate-build-variables.outputs.report_type }}
|
||||
ref_branch: ${{ needs.generate-build-variables.outputs.ref_branch }}
|
||||
secrets:
|
||||
MM_LICENSE: "${{ secrets.MM_E2E_TEST_LICENSE_ONPREM_ENT }}"
|
||||
AUTOMATION_DASHBOARD_URL: "${{ secrets.MM_E2E_AUTOMATION_DASHBOARD_URL }}"
|
||||
AUTOMATION_DASHBOARD_TOKEN: "${{ secrets.MM_E2E_AUTOMATION_DASHBOARD_TOKEN }}"
|
||||
PUSH_NOTIFICATION_SERVER: "${{ secrets.MM_E2E_PUSH_NOTIFICATION_SERVER }}"
|
||||
REPORT_WEBHOOK_URL: "${{ secrets.MM_E2E_REPORT_WEBHOOK_URL }}"
|
||||
CWS_URL: "${{ secrets.MM_E2E_CWS_URL }}"
|
||||
CWS_EXTRA_HTTP_HEADERS: "${{ secrets.MM_E2E_CWS_EXTRA_HTTP_HEADERS }}"
|
||||
|
||||
e2e-playwright:
|
||||
needs: generate-build-variables
|
||||
uses: ./.github/workflows/e2e-tests-playwright.yml
|
||||
with:
|
||||
commit_sha: ${{ inputs.commit_sha }}
|
||||
server_image_tag: ${{ inputs.server_image_tag }}
|
||||
skip_smoke: true
|
||||
server: onprem
|
||||
enable_reporting: true
|
||||
report_type: ${{ needs.generate-build-variables.outputs.report_type }}
|
||||
ref_branch: ${{ needs.generate-build-variables.outputs.ref_branch }}
|
||||
secrets:
|
||||
MM_LICENSE: "${{ secrets.MM_E2E_TEST_LICENSE_ONPREM_ENT }}"
|
||||
AWS_ACCESS_KEY_ID: "${{ secrets.CYPRESS_AWS_ACCESS_KEY_ID }}"
|
||||
AWS_SECRET_ACCESS_KEY: "${{ secrets.CYPRESS_AWS_SECRET_ACCESS_KEY }}"
|
||||
REPORT_WEBHOOK_URL: "${{ secrets.MM_E2E_REPORT_WEBHOOK_URL }}"
|
||||
|
||||
# Enterprise FIPS Edition
|
||||
e2e-cypress-fips:
|
||||
needs: generate-build-variables
|
||||
uses: ./.github/workflows/e2e-tests-cypress.yml
|
||||
with:
|
||||
commit_sha: ${{ inputs.commit_sha }}
|
||||
server_image_tag: ${{ inputs.server_image_tag }}
|
||||
server_edition: fips
|
||||
skip_smoke: true
|
||||
server: onprem
|
||||
enable_reporting: true
|
||||
report_type: ${{ needs.generate-build-variables.outputs.report_type }}
|
||||
ref_branch: ${{ needs.generate-build-variables.outputs.ref_branch }}
|
||||
secrets:
|
||||
MM_LICENSE: "${{ secrets.MM_E2E_TEST_LICENSE_ONPREM_ENT }}"
|
||||
AUTOMATION_DASHBOARD_URL: "${{ secrets.MM_E2E_AUTOMATION_DASHBOARD_URL }}"
|
||||
AUTOMATION_DASHBOARD_TOKEN: "${{ secrets.MM_E2E_AUTOMATION_DASHBOARD_TOKEN }}"
|
||||
PUSH_NOTIFICATION_SERVER: "${{ secrets.MM_E2E_PUSH_NOTIFICATION_SERVER }}"
|
||||
REPORT_WEBHOOK_URL: "${{ secrets.MM_E2E_REPORT_WEBHOOK_URL }}"
|
||||
CWS_URL: "${{ secrets.MM_E2E_CWS_URL }}"
|
||||
CWS_EXTRA_HTTP_HEADERS: "${{ secrets.MM_E2E_CWS_EXTRA_HTTP_HEADERS }}"
|
||||
|
||||
e2e-playwright-fips:
|
||||
needs: generate-build-variables
|
||||
uses: ./.github/workflows/e2e-tests-playwright.yml
|
||||
with:
|
||||
commit_sha: ${{ inputs.commit_sha }}
|
||||
server_image_tag: ${{ inputs.server_image_tag }}
|
||||
server_edition: fips
|
||||
skip_smoke: true
|
||||
server: onprem
|
||||
enable_reporting: true
|
||||
report_type: ${{ needs.generate-build-variables.outputs.report_type }}
|
||||
ref_branch: ${{ needs.generate-build-variables.outputs.ref_branch }}
|
||||
secrets:
|
||||
MM_LICENSE: "${{ secrets.MM_E2E_TEST_LICENSE_ONPREM_ENT }}"
|
||||
AWS_ACCESS_KEY_ID: "${{ secrets.CYPRESS_AWS_ACCESS_KEY_ID }}"
|
||||
AWS_SECRET_ACCESS_KEY: "${{ secrets.CYPRESS_AWS_SECRET_ACCESS_KEY }}"
|
||||
REPORT_WEBHOOK_URL: "${{ secrets.MM_E2E_REPORT_WEBHOOK_URL }}"
|
||||
137
.github/workflows/e2e-tests-on-release.yml
vendored
137
.github/workflows/e2e-tests-on-release.yml
vendored
|
|
@ -1,137 +0,0 @@
|
|||
---
|
||||
name: E2E Tests (release cut)
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
branch:
|
||||
type: string
|
||||
required: true
|
||||
description: "Release branch (e.g., 'release-11.4')"
|
||||
commit_sha:
|
||||
type: string
|
||||
required: true
|
||||
description: "Commit SHA to test"
|
||||
server_image_tag:
|
||||
type: string
|
||||
required: true
|
||||
description: "Docker image tag (e.g., '11.4.0', '11.4.0-rc3', or 'release-11.4')"
|
||||
server_image_aliases:
|
||||
type: string
|
||||
required: false
|
||||
description: "Comma-separated alias tags (e.g., 'release-11.4, release-11')"
|
||||
|
||||
jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
ref_branch: "${{ steps.check.outputs.ref_branch }}"
|
||||
steps:
|
||||
- name: ci/checkout-repo
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
ref: ${{ inputs.branch }}
|
||||
fetch-depth: 50
|
||||
- name: ci/validate-inputs
|
||||
id: check
|
||||
env:
|
||||
BRANCH: ${{ inputs.branch }}
|
||||
COMMIT_SHA: ${{ inputs.commit_sha }}
|
||||
run: |
|
||||
# Strip refs/heads/ prefix if present
|
||||
BRANCH="${BRANCH#refs/heads/}"
|
||||
|
||||
if ! [[ "$BRANCH" =~ ^release-[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "::error::Branch ${BRANCH} must be 'release-X.Y' format."
|
||||
exit 1
|
||||
elif ! git merge-base --is-ancestor "$COMMIT_SHA" HEAD; then
|
||||
echo "::error::Commit ${COMMIT_SHA} is not on branch ${BRANCH}."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "ref_branch=${BRANCH}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Enterprise Edition
|
||||
e2e-cypress:
|
||||
needs: validate
|
||||
uses: ./.github/workflows/e2e-tests-cypress.yml
|
||||
with:
|
||||
commit_sha: ${{ inputs.commit_sha }}
|
||||
server_image_tag: ${{ inputs.server_image_tag }}
|
||||
server_image_repo: mattermost
|
||||
server_image_aliases: ${{ inputs.server_image_aliases }}
|
||||
skip_smoke: true
|
||||
server: onprem
|
||||
enable_reporting: true
|
||||
report_type: RELEASE_CUT
|
||||
ref_branch: ${{ needs.validate.outputs.ref_branch }}
|
||||
secrets:
|
||||
MM_LICENSE: "${{ secrets.MM_E2E_TEST_LICENSE_ONPREM_ENT }}"
|
||||
AUTOMATION_DASHBOARD_URL: "${{ secrets.MM_E2E_AUTOMATION_DASHBOARD_URL }}"
|
||||
AUTOMATION_DASHBOARD_TOKEN: "${{ secrets.MM_E2E_AUTOMATION_DASHBOARD_TOKEN }}"
|
||||
PUSH_NOTIFICATION_SERVER: "${{ secrets.MM_E2E_PUSH_NOTIFICATION_SERVER }}"
|
||||
REPORT_WEBHOOK_URL: "${{ secrets.MM_E2E_REPORT_WEBHOOK_URL }}"
|
||||
CWS_URL: "${{ secrets.MM_E2E_CWS_URL }}"
|
||||
CWS_EXTRA_HTTP_HEADERS: "${{ secrets.MM_E2E_CWS_EXTRA_HTTP_HEADERS }}"
|
||||
|
||||
e2e-playwright:
|
||||
needs: validate
|
||||
uses: ./.github/workflows/e2e-tests-playwright.yml
|
||||
with:
|
||||
commit_sha: ${{ inputs.commit_sha }}
|
||||
server_image_tag: ${{ inputs.server_image_tag }}
|
||||
server_image_repo: mattermost
|
||||
server_image_aliases: ${{ inputs.server_image_aliases }}
|
||||
skip_smoke: true
|
||||
server: onprem
|
||||
enable_reporting: true
|
||||
report_type: RELEASE_CUT
|
||||
ref_branch: ${{ needs.validate.outputs.ref_branch }}
|
||||
secrets:
|
||||
MM_LICENSE: "${{ secrets.MM_E2E_TEST_LICENSE_ONPREM_ENT }}"
|
||||
AWS_ACCESS_KEY_ID: "${{ secrets.CYPRESS_AWS_ACCESS_KEY_ID }}"
|
||||
AWS_SECRET_ACCESS_KEY: "${{ secrets.CYPRESS_AWS_SECRET_ACCESS_KEY }}"
|
||||
REPORT_WEBHOOK_URL: "${{ secrets.MM_E2E_REPORT_WEBHOOK_URL }}"
|
||||
|
||||
# Enterprise FIPS Edition
|
||||
e2e-cypress-fips:
|
||||
needs: validate
|
||||
uses: ./.github/workflows/e2e-tests-cypress.yml
|
||||
with:
|
||||
commit_sha: ${{ inputs.commit_sha }}
|
||||
server_image_tag: ${{ inputs.server_image_tag }}
|
||||
server_edition: fips
|
||||
server_image_repo: mattermost
|
||||
server_image_aliases: ${{ inputs.server_image_aliases }}
|
||||
skip_smoke: true
|
||||
server: onprem
|
||||
enable_reporting: true
|
||||
report_type: RELEASE_CUT
|
||||
ref_branch: ${{ needs.validate.outputs.ref_branch }}
|
||||
secrets:
|
||||
MM_LICENSE: "${{ secrets.MM_E2E_TEST_LICENSE_ONPREM_ENT }}"
|
||||
AUTOMATION_DASHBOARD_URL: "${{ secrets.MM_E2E_AUTOMATION_DASHBOARD_URL }}"
|
||||
AUTOMATION_DASHBOARD_TOKEN: "${{ secrets.MM_E2E_AUTOMATION_DASHBOARD_TOKEN }}"
|
||||
PUSH_NOTIFICATION_SERVER: "${{ secrets.MM_E2E_PUSH_NOTIFICATION_SERVER }}"
|
||||
REPORT_WEBHOOK_URL: "${{ secrets.MM_E2E_REPORT_WEBHOOK_URL }}"
|
||||
CWS_URL: "${{ secrets.MM_E2E_CWS_URL }}"
|
||||
CWS_EXTRA_HTTP_HEADERS: "${{ secrets.MM_E2E_CWS_EXTRA_HTTP_HEADERS }}"
|
||||
|
||||
e2e-playwright-fips:
|
||||
needs: validate
|
||||
uses: ./.github/workflows/e2e-tests-playwright.yml
|
||||
with:
|
||||
commit_sha: ${{ inputs.commit_sha }}
|
||||
server_image_tag: ${{ inputs.server_image_tag }}
|
||||
server_edition: fips
|
||||
server_image_repo: mattermost
|
||||
server_image_aliases: ${{ inputs.server_image_aliases }}
|
||||
skip_smoke: true
|
||||
server: onprem
|
||||
enable_reporting: true
|
||||
report_type: RELEASE_CUT
|
||||
ref_branch: ${{ needs.validate.outputs.ref_branch }}
|
||||
secrets:
|
||||
MM_LICENSE: "${{ secrets.MM_E2E_TEST_LICENSE_ONPREM_ENT }}"
|
||||
AWS_ACCESS_KEY_ID: "${{ secrets.CYPRESS_AWS_ACCESS_KEY_ID }}"
|
||||
AWS_SECRET_ACCESS_KEY: "${{ secrets.CYPRESS_AWS_SECRET_ACCESS_KEY }}"
|
||||
REPORT_WEBHOOK_URL: "${{ secrets.MM_E2E_REPORT_WEBHOOK_URL }}"
|
||||
89
.github/workflows/e2e-tests-override-status.yml
vendored
89
.github/workflows/e2e-tests-override-status.yml
vendored
|
|
@ -1,89 +0,0 @@
|
|||
---
|
||||
name: E2E Tests - Override Status
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
pr_number:
|
||||
description: "PR number to update status for"
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
override-status:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Validate inputs
|
||||
env:
|
||||
PR_NUMBER: ${{ inputs.pr_number }}
|
||||
run: |
|
||||
if ! [[ "$PR_NUMBER" =~ ^[0-9]+$ ]]; then
|
||||
echo "::error::Invalid PR number format. Must be numeric."
|
||||
exit 1
|
||||
fi
|
||||
- name: Get PR head SHA
|
||||
id: pr-info
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
PR_NUMBER: ${{ inputs.pr_number }}
|
||||
run: |
|
||||
PR_DATA=$(gh api repos/${{ github.repository }}/pulls/${PR_NUMBER})
|
||||
HEAD_SHA=$(echo "$PR_DATA" | jq -r '.head.sha')
|
||||
echo "head_sha=$HEAD_SHA" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Override failed full test statuses
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
COMMIT_SHA: ${{ steps.pr-info.outputs.head_sha }}
|
||||
run: |
|
||||
# Only full tests can be overridden (smoke tests must pass)
|
||||
FULL_TEST_CONTEXTS=("e2e-test/playwright-full/enterprise" "e2e-test/cypress-full/enterprise")
|
||||
|
||||
for CONTEXT_NAME in "${FULL_TEST_CONTEXTS[@]}"; do
|
||||
echo "Checking: $CONTEXT_NAME"
|
||||
|
||||
# Get current status
|
||||
STATUS_JSON=$(gh api repos/${{ github.repository }}/commits/${COMMIT_SHA}/statuses \
|
||||
--jq "[.[] | select(.context == \"$CONTEXT_NAME\")] | first // empty")
|
||||
|
||||
if [ -z "$STATUS_JSON" ]; then
|
||||
echo " No status found, skipping"
|
||||
continue
|
||||
fi
|
||||
|
||||
CURRENT_DESC=$(echo "$STATUS_JSON" | jq -r '.description // ""')
|
||||
CURRENT_URL=$(echo "$STATUS_JSON" | jq -r '.target_url // ""')
|
||||
CURRENT_STATE=$(echo "$STATUS_JSON" | jq -r '.state // ""')
|
||||
|
||||
echo " Current: $CURRENT_DESC ($CURRENT_STATE)"
|
||||
|
||||
# Only override if status is failure
|
||||
if [ "$CURRENT_STATE" != "failure" ]; then
|
||||
echo " Not failed, skipping"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Parse and construct new message
|
||||
if [[ "$CURRENT_DESC" =~ ^([0-9]+)\ failed,\ ([0-9]+)\ passed$ ]]; then
|
||||
FAILED="${BASH_REMATCH[1]}"
|
||||
PASSED="${BASH_REMATCH[2]}"
|
||||
NEW_MSG="${FAILED} failed (verified), ${PASSED} passed"
|
||||
elif [[ "$CURRENT_DESC" =~ ^([0-9]+)\ failed\ \([^)]+\),\ ([0-9]+)\ passed$ ]]; then
|
||||
FAILED="${BASH_REMATCH[1]}"
|
||||
PASSED="${BASH_REMATCH[2]}"
|
||||
NEW_MSG="${FAILED} failed (verified), ${PASSED} passed"
|
||||
else
|
||||
NEW_MSG="${CURRENT_DESC} (verified)"
|
||||
fi
|
||||
|
||||
echo " New: $NEW_MSG"
|
||||
|
||||
# Update status via GitHub API
|
||||
gh api repos/${{ github.repository }}/statuses/${COMMIT_SHA} \
|
||||
-f state=success \
|
||||
-f context="$CONTEXT_NAME" \
|
||||
-f description="$NEW_MSG" \
|
||||
-f target_url="$CURRENT_URL"
|
||||
|
||||
echo " Updated to success"
|
||||
done
|
||||
465
.github/workflows/e2e-tests-playwright-template.yml
vendored
465
.github/workflows/e2e-tests-playwright-template.yml
vendored
|
|
@ -1,465 +0,0 @@
|
|||
---
|
||||
name: E2E Tests - Playwright Template
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
# Test configuration
|
||||
test_type:
|
||||
description: "Type of test run (smoke or full)"
|
||||
type: string
|
||||
required: true
|
||||
test_filter:
|
||||
description: "Test filter arguments (e.g., --grep @smoke)"
|
||||
type: string
|
||||
required: true
|
||||
timeout_minutes:
|
||||
description: "Job timeout in minutes"
|
||||
type: number
|
||||
required: false
|
||||
default: 60
|
||||
enabled_docker_services:
|
||||
description: "Space-separated list of docker services to enable"
|
||||
type: string
|
||||
required: false
|
||||
default: "postgres inbucket"
|
||||
|
||||
# Common build variables
|
||||
commit_sha:
|
||||
type: string
|
||||
required: true
|
||||
branch:
|
||||
type: string
|
||||
required: true
|
||||
build_id:
|
||||
type: string
|
||||
required: true
|
||||
server_image_tag:
|
||||
description: "Server image tag (e.g., master or short SHA)"
|
||||
type: string
|
||||
required: true
|
||||
server:
|
||||
type: string
|
||||
required: false
|
||||
default: onprem
|
||||
server_edition:
|
||||
description: "Server edition: enterprise (default), fips, or team"
|
||||
type: string
|
||||
required: false
|
||||
default: enterprise
|
||||
server_image_repo:
|
||||
description: "Docker registry: mattermostdevelopment (default) or mattermost"
|
||||
type: string
|
||||
required: false
|
||||
default: mattermostdevelopment
|
||||
server_image_aliases:
|
||||
description: "Comma-separated alias tags for description (e.g., 'release-11.4, release-11')"
|
||||
type: string
|
||||
required: false
|
||||
|
||||
# Reporting options
|
||||
enable_reporting:
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
report_type:
|
||||
type: string
|
||||
required: false
|
||||
ref_branch:
|
||||
description: "Source branch name for webhook messages (e.g., 'master' or 'release-11.4')"
|
||||
type: string
|
||||
required: false
|
||||
pr_number:
|
||||
type: string
|
||||
required: false
|
||||
|
||||
# Commit status configuration
|
||||
context_name:
|
||||
description: "GitHub commit status context name"
|
||||
type: string
|
||||
required: true
|
||||
|
||||
outputs:
|
||||
passed:
|
||||
description: "Number of passed tests"
|
||||
value: ${{ jobs.report.outputs.passed }}
|
||||
failed:
|
||||
description: "Number of failed tests"
|
||||
value: ${{ jobs.report.outputs.failed }}
|
||||
report_url:
|
||||
description: "URL to test report on S3"
|
||||
value: ${{ jobs.report.outputs.report_url }}
|
||||
|
||||
secrets:
|
||||
MM_LICENSE:
|
||||
required: false
|
||||
REPORT_WEBHOOK_URL:
|
||||
required: false
|
||||
AWS_ACCESS_KEY_ID:
|
||||
required: true
|
||||
AWS_SECRET_ACCESS_KEY:
|
||||
required: true
|
||||
|
||||
env:
|
||||
SERVER_IMAGE: "${{ inputs.server_image_repo }}/${{ inputs.server_edition == 'fips' && 'mattermost-enterprise-fips-edition' || inputs.server_edition == 'team' && 'mattermost-team-edition' || 'mattermost-enterprise-edition' }}:${{ inputs.server_image_tag }}"
|
||||
|
||||
jobs:
|
||||
update-initial-status:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: ci/set-initial-status
|
||||
uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
repository_full_name: ${{ github.repository }}
|
||||
commit_sha: ${{ inputs.commit_sha }}
|
||||
context: ${{ inputs.context_name }}
|
||||
description: "tests running, image_tag:${{ inputs.server_image_tag }}${{ inputs.server_image_aliases && format(' ({0})', inputs.server_image_aliases) || '' }}"
|
||||
status: pending
|
||||
|
||||
run-tests:
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: ${{ fromJSON(inputs.timeout_minutes) }}
|
||||
defaults:
|
||||
run:
|
||||
working-directory: e2e-tests
|
||||
env:
|
||||
SERVER: "${{ inputs.server }}"
|
||||
MM_LICENSE: "${{ secrets.MM_LICENSE }}"
|
||||
ENABLED_DOCKER_SERVICES: "${{ inputs.enabled_docker_services }}"
|
||||
TEST: playwright
|
||||
TEST_FILTER: "${{ inputs.test_filter }}"
|
||||
BRANCH: "${{ inputs.branch }}-${{ inputs.test_type }}"
|
||||
BUILD_ID: "${{ inputs.build_id }}"
|
||||
steps:
|
||||
- name: ci/checkout-repo
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
ref: ${{ inputs.commit_sha }}
|
||||
fetch-depth: 0
|
||||
- name: ci/setup-node
|
||||
uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
||||
with:
|
||||
node-version-file: ".nvmrc"
|
||||
cache: npm
|
||||
cache-dependency-path: "e2e-tests/playwright/package-lock.json"
|
||||
- name: ci/get-webapp-node-modules
|
||||
working-directory: webapp
|
||||
run: make node_modules
|
||||
- name: ci/run-tests
|
||||
run: |
|
||||
make cloud-init
|
||||
make
|
||||
- name: ci/cloud-teardown
|
||||
if: always()
|
||||
run: make cloud-teardown
|
||||
- name: ci/upload-results
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: always()
|
||||
with:
|
||||
name: playwright-${{ inputs.test_type }}-${{ inputs.server_edition }}-results
|
||||
path: |
|
||||
e2e-tests/playwright/logs/
|
||||
e2e-tests/playwright/results/
|
||||
retention-days: 5
|
||||
|
||||
calculate-results:
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- run-tests
|
||||
if: always()
|
||||
outputs:
|
||||
passed: ${{ steps.calculate.outputs.passed }}
|
||||
failed: ${{ steps.calculate.outputs.failed }}
|
||||
flaky: ${{ steps.calculate.outputs.flaky }}
|
||||
skipped: ${{ steps.calculate.outputs.skipped }}
|
||||
total_specs: ${{ steps.calculate.outputs.total_specs }}
|
||||
failed_specs: ${{ steps.calculate.outputs.failed_specs }}
|
||||
failed_specs_count: ${{ steps.calculate.outputs.failed_specs_count }}
|
||||
failed_tests: ${{ steps.calculate.outputs.failed_tests }}
|
||||
commit_status_message: ${{ steps.calculate.outputs.commit_status_message }}
|
||||
total: ${{ steps.calculate.outputs.total }}
|
||||
pass_rate: ${{ steps.calculate.outputs.pass_rate }}
|
||||
passing: ${{ steps.calculate.outputs.passing }}
|
||||
color: ${{ steps.calculate.outputs.color }}
|
||||
steps:
|
||||
- name: ci/checkout-repo
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- name: ci/download-results
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: playwright-${{ inputs.test_type }}-${{ inputs.server_edition }}-results
|
||||
path: e2e-tests/playwright/
|
||||
- name: ci/calculate
|
||||
id: calculate
|
||||
uses: ./.github/actions/calculate-playwright-results
|
||||
with:
|
||||
original-results-path: e2e-tests/playwright/results/reporter/results.json
|
||||
|
||||
run-failed-tests:
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: ${{ fromJSON(inputs.timeout_minutes) }}
|
||||
needs:
|
||||
- run-tests
|
||||
- calculate-results
|
||||
if: >-
|
||||
always() &&
|
||||
needs.calculate-results.result == 'success' &&
|
||||
needs.calculate-results.outputs.failed != '0' &&
|
||||
fromJSON(needs.calculate-results.outputs.failed_specs_count) <= 20
|
||||
defaults:
|
||||
run:
|
||||
working-directory: e2e-tests
|
||||
env:
|
||||
SERVER: "${{ inputs.server }}"
|
||||
MM_LICENSE: "${{ secrets.MM_LICENSE }}"
|
||||
ENABLED_DOCKER_SERVICES: "${{ inputs.enabled_docker_services }}"
|
||||
TEST: playwright
|
||||
BRANCH: "${{ inputs.branch }}-${{ inputs.test_type }}-retest"
|
||||
BUILD_ID: "${{ inputs.build_id }}-retest"
|
||||
steps:
|
||||
- name: ci/checkout-repo
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
ref: ${{ inputs.commit_sha }}
|
||||
fetch-depth: 0
|
||||
- name: ci/setup-node
|
||||
uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
||||
with:
|
||||
node-version-file: ".nvmrc"
|
||||
cache: npm
|
||||
cache-dependency-path: "e2e-tests/playwright/package-lock.json"
|
||||
- name: ci/get-webapp-node-modules
|
||||
working-directory: webapp
|
||||
run: make node_modules
|
||||
- name: ci/run-failed-specs
|
||||
env:
|
||||
SPEC_FILES: ${{ needs.calculate-results.outputs.failed_specs }}
|
||||
run: |
|
||||
echo "Retesting failed specs: $SPEC_FILES"
|
||||
make cloud-init
|
||||
make start-server run-specs
|
||||
- name: ci/cloud-teardown
|
||||
if: always()
|
||||
run: make cloud-teardown
|
||||
- name: ci/upload-retest-results
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: always()
|
||||
with:
|
||||
name: playwright-${{ inputs.test_type }}-${{ inputs.server_edition }}-retest-results
|
||||
path: |
|
||||
e2e-tests/playwright/logs/
|
||||
e2e-tests/playwright/results/
|
||||
retention-days: 5
|
||||
|
||||
report:
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- run-tests
|
||||
- calculate-results
|
||||
- run-failed-tests
|
||||
if: always() && needs.calculate-results.result == 'success'
|
||||
outputs:
|
||||
passed: "${{ steps.final-results.outputs.passed }}"
|
||||
failed: "${{ steps.final-results.outputs.failed }}"
|
||||
commit_status_message: "${{ steps.final-results.outputs.commit_status_message }}"
|
||||
report_url: "${{ steps.upload-to-s3.outputs.report_url }}"
|
||||
defaults:
|
||||
run:
|
||||
working-directory: e2e-tests
|
||||
steps:
|
||||
- name: ci/checkout-repo
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- name: ci/setup-node
|
||||
uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
||||
with:
|
||||
node-version-file: ".nvmrc"
|
||||
cache: npm
|
||||
cache-dependency-path: "e2e-tests/playwright/package-lock.json"
|
||||
|
||||
# Download original results (always needed)
|
||||
- name: ci/download-results
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: playwright-${{ inputs.test_type }}-${{ inputs.server_edition }}-results
|
||||
path: e2e-tests/playwright/
|
||||
|
||||
# Download retest results (only if retest ran)
|
||||
- name: ci/download-retest-results
|
||||
if: needs.run-failed-tests.result != 'skipped'
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: playwright-${{ inputs.test_type }}-${{ inputs.server_edition }}-retest-results
|
||||
path: e2e-tests/playwright/retest-results/
|
||||
|
||||
# Calculate results (with optional merge of retest results)
|
||||
- name: ci/calculate-results
|
||||
id: final-results
|
||||
uses: ./.github/actions/calculate-playwright-results
|
||||
with:
|
||||
original-results-path: e2e-tests/playwright/results/reporter/results.json
|
||||
retest-results-path: ${{ needs.run-failed-tests.result != 'skipped' && 'e2e-tests/playwright/retest-results/results/reporter/results.json' || '' }}
|
||||
|
||||
- name: ci/aws-configure
|
||||
uses: aws-actions/configure-aws-credentials@61815dcd50bd041e203e49132bacad1fd04d2708 # v5.1.1
|
||||
with:
|
||||
aws-region: us-east-1
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
- name: ci/upload-to-s3
|
||||
id: upload-to-s3
|
||||
env:
|
||||
AWS_REGION: us-east-1
|
||||
AWS_S3_BUCKET: mattermost-cypress-report
|
||||
PR_NUMBER: "${{ inputs.pr_number }}"
|
||||
RUN_ID: "${{ github.run_id }}"
|
||||
COMMIT_SHA: "${{ inputs.commit_sha }}"
|
||||
TEST_TYPE: "${{ inputs.test_type }}"
|
||||
run: |
|
||||
LOCAL_RESULTS_PATH="playwright/results/"
|
||||
LOCAL_LOGS_PATH="playwright/logs/"
|
||||
|
||||
# Use PR number if available, otherwise use commit SHA prefix
|
||||
if [ -n "$PR_NUMBER" ]; then
|
||||
S3_PATH="server-pr-${PR_NUMBER}/e2e-reports/playwright-${TEST_TYPE}/${RUN_ID}"
|
||||
else
|
||||
S3_PATH="server-commit-${COMMIT_SHA::7}/e2e-reports/playwright-${TEST_TYPE}/${RUN_ID}"
|
||||
fi
|
||||
|
||||
if [[ -d "$LOCAL_RESULTS_PATH" ]]; then
|
||||
aws s3 sync "$LOCAL_RESULTS_PATH" "s3://${AWS_S3_BUCKET}/${S3_PATH}/results/" \
|
||||
--acl public-read --cache-control "no-cache"
|
||||
fi
|
||||
|
||||
REPORT_URL="https://${AWS_S3_BUCKET}.s3.amazonaws.com/${S3_PATH}/results/reporter/index.html"
|
||||
echo "report_url=$REPORT_URL" >> "$GITHUB_OUTPUT"
|
||||
- name: ci/publish-report
|
||||
if: inputs.enable_reporting && env.REPORT_WEBHOOK_URL != ''
|
||||
env:
|
||||
REPORT_WEBHOOK_URL: ${{ secrets.REPORT_WEBHOOK_URL }}
|
||||
COMMIT_STATUS_MESSAGE: ${{ steps.final-results.outputs.commit_status_message }}
|
||||
COLOR: ${{ steps.final-results.outputs.color }}
|
||||
REPORT_URL: ${{ steps.upload-to-s3.outputs.report_url }}
|
||||
TEST_TYPE: ${{ inputs.test_type }}
|
||||
REPORT_TYPE: ${{ inputs.report_type }}
|
||||
COMMIT_SHA: ${{ inputs.commit_sha }}
|
||||
REF_BRANCH: ${{ inputs.ref_branch }}
|
||||
PR_NUMBER: ${{ inputs.pr_number }}
|
||||
run: |
|
||||
# Capitalize test type
|
||||
TEST_TYPE_CAP=$(echo "$TEST_TYPE" | sed 's/.*/\u&/')
|
||||
|
||||
# Build source line based on report type
|
||||
COMMIT_SHORT="${COMMIT_SHA::7}"
|
||||
COMMIT_URL="https://github.com/${{ github.repository }}/commit/${COMMIT_SHA}"
|
||||
if [ "$REPORT_TYPE" = "RELEASE_CUT" ]; then
|
||||
SOURCE_LINE=":github_round: [${COMMIT_SHORT}](${COMMIT_URL}) on \`${REF_BRANCH}\`"
|
||||
elif [ "$REPORT_TYPE" = "MASTER" ] || [ "$REPORT_TYPE" = "RELEASE" ]; then
|
||||
SOURCE_LINE=":git_merge: [${COMMIT_SHORT}](${COMMIT_URL}) on \`${REF_BRANCH}\`"
|
||||
else
|
||||
SOURCE_LINE=":open-pull-request: [mattermost-pr-${PR_NUMBER}](https://github.com/${{ github.repository }}/pull/${PR_NUMBER})"
|
||||
fi
|
||||
|
||||
# Build payload with attachments
|
||||
PAYLOAD=$(cat <<EOF
|
||||
{
|
||||
"username": "E2E Test",
|
||||
"icon_url": "https://mattermost.com/wp-content/uploads/2022/02/icon_WS.png",
|
||||
"attachments": [{
|
||||
"color": "${COLOR}",
|
||||
"text": "**Results - Playwright ${TEST_TYPE_CAP} Tests**\n\n${SOURCE_LINE}\n:docker: \`${{ env.SERVER_IMAGE }}\`\n${COMMIT_STATUS_MESSAGE} | [full report](${REPORT_URL})"
|
||||
}]
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
# Send to webhook
|
||||
curl -X POST -H "Content-Type: application/json" -d "$PAYLOAD" "$REPORT_WEBHOOK_URL"
|
||||
- name: ci/write-job-summary
|
||||
if: always()
|
||||
env:
|
||||
REPORT_URL: ${{ steps.upload-to-s3.outputs.report_url }}
|
||||
TEST_TYPE: ${{ inputs.test_type }}
|
||||
PASSED: ${{ steps.final-results.outputs.passed }}
|
||||
FAILED: ${{ steps.final-results.outputs.failed }}
|
||||
FLAKY: ${{ steps.final-results.outputs.flaky }}
|
||||
SKIPPED: ${{ steps.final-results.outputs.skipped }}
|
||||
TOTAL_SPECS: ${{ steps.final-results.outputs.total_specs }}
|
||||
FAILED_SPECS_COUNT: ${{ steps.final-results.outputs.failed_specs_count }}
|
||||
FAILED_SPECS: ${{ steps.final-results.outputs.failed_specs }}
|
||||
COMMIT_STATUS_MESSAGE: ${{ steps.final-results.outputs.commit_status_message }}
|
||||
FAILED_TESTS: ${{ steps.final-results.outputs.failed_tests }}
|
||||
run: |
|
||||
{
|
||||
echo "## E2E Test Results - Playwright ${TEST_TYPE}"
|
||||
echo ""
|
||||
|
||||
if [ "$FAILED" = "0" ]; then
|
||||
echo "All tests passed: **${PASSED} passed**"
|
||||
else
|
||||
echo "<details>"
|
||||
echo "<summary>${FAILED} failed, ${PASSED} passed</summary>"
|
||||
echo ""
|
||||
echo "| Test | File |"
|
||||
echo "|------|------|"
|
||||
echo "${FAILED_TESTS}"
|
||||
echo "</details>"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "### Calculation Outputs"
|
||||
echo ""
|
||||
echo "| Output | Value |"
|
||||
echo "|--------|-------|"
|
||||
echo "| passed | ${PASSED} |"
|
||||
echo "| failed | ${FAILED} |"
|
||||
echo "| flaky | ${FLAKY} |"
|
||||
echo "| skipped | ${SKIPPED} |"
|
||||
echo "| total_specs | ${TOTAL_SPECS} |"
|
||||
echo "| failed_specs_count | ${FAILED_SPECS_COUNT} |"
|
||||
echo "| commit_status_message | ${COMMIT_STATUS_MESSAGE} |"
|
||||
echo "| failed_specs | ${FAILED_SPECS:-none} |"
|
||||
|
||||
echo ""
|
||||
echo "---"
|
||||
echo "[View Full Report](${REPORT_URL})"
|
||||
} >> $GITHUB_STEP_SUMMARY
|
||||
- name: ci/assert-results
|
||||
run: |
|
||||
[ "${{ steps.final-results.outputs.failed }}" = "0" ]
|
||||
|
||||
update-success-status:
|
||||
runs-on: ubuntu-24.04
|
||||
if: always() && needs.report.result == 'success' && needs.calculate-results.result == 'success'
|
||||
needs:
|
||||
- calculate-results
|
||||
- report
|
||||
steps:
|
||||
- uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
repository_full_name: ${{ github.repository }}
|
||||
commit_sha: ${{ inputs.commit_sha }}
|
||||
context: ${{ inputs.context_name }}
|
||||
description: "${{ needs.report.outputs.commit_status_message }}, image_tag:${{ inputs.server_image_tag }}${{ inputs.server_image_aliases && format(' ({0})', inputs.server_image_aliases) || '' }}"
|
||||
status: success
|
||||
target_url: ${{ needs.report.outputs.report_url }}
|
||||
|
||||
update-failure-status:
|
||||
runs-on: ubuntu-24.04
|
||||
if: always() && (needs.report.result != 'success' || needs.calculate-results.result != 'success')
|
||||
needs:
|
||||
- calculate-results
|
||||
- report
|
||||
steps:
|
||||
- uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
repository_full_name: ${{ github.repository }}
|
||||
commit_sha: ${{ inputs.commit_sha }}
|
||||
context: ${{ inputs.context_name }}
|
||||
description: "${{ needs.report.outputs.commit_status_message }}, image_tag:${{ inputs.server_image_tag }}${{ inputs.server_image_aliases && format(' ({0})', inputs.server_image_aliases) || '' }}"
|
||||
status: failure
|
||||
target_url: ${{ needs.report.outputs.report_url }}
|
||||
181
.github/workflows/e2e-tests-playwright.yml
vendored
181
.github/workflows/e2e-tests-playwright.yml
vendored
|
|
@ -1,181 +0,0 @@
|
|||
---
|
||||
name: E2E Tests - Playwright
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
commit_sha:
|
||||
type: string
|
||||
required: true
|
||||
enable_reporting:
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
server:
|
||||
type: string
|
||||
required: false
|
||||
default: onprem
|
||||
report_type:
|
||||
type: string
|
||||
required: false
|
||||
pr_number:
|
||||
type: string
|
||||
required: false
|
||||
server_image_tag:
|
||||
type: string
|
||||
required: false
|
||||
description: "Server image tag (e.g., master or short SHA)"
|
||||
server_edition:
|
||||
type: string
|
||||
required: false
|
||||
description: "Server edition: enterprise (default), fips, or team"
|
||||
server_image_repo:
|
||||
type: string
|
||||
required: false
|
||||
default: mattermostdevelopment
|
||||
description: "Docker registry: mattermostdevelopment (default) or mattermost"
|
||||
skip_smoke:
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
description: "Skip smoke tests and run full tests directly"
|
||||
server_image_aliases:
|
||||
type: string
|
||||
required: false
|
||||
description: "Comma-separated alias tags for context name (e.g., 'release-11.4, release-11')"
|
||||
ref_branch:
|
||||
type: string
|
||||
required: false
|
||||
description: "Source branch name for webhook messages (e.g., 'master' or 'release-11.4')"
|
||||
secrets:
|
||||
MM_LICENSE:
|
||||
required: false
|
||||
REPORT_WEBHOOK_URL:
|
||||
required: false
|
||||
AWS_ACCESS_KEY_ID:
|
||||
required: true
|
||||
AWS_SECRET_ACCESS_KEY:
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
generate-build-variables:
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
branch: "${{ steps.build-vars.outputs.branch }}"
|
||||
build_id: "${{ steps.build-vars.outputs.build_id }}"
|
||||
server_image_tag: "${{ steps.build-vars.outputs.server_image_tag }}"
|
||||
server_image: "${{ steps.build-vars.outputs.server_image }}"
|
||||
steps:
|
||||
- name: ci/generate-build-variables
|
||||
id: build-vars
|
||||
env:
|
||||
COMMIT_SHA: ${{ inputs.commit_sha }}
|
||||
PR_NUMBER: ${{ inputs.pr_number }}
|
||||
INPUT_SERVER_IMAGE_TAG: ${{ inputs.server_image_tag }}
|
||||
RUN_ID: ${{ github.run_id }}
|
||||
RUN_ATTEMPT: ${{ github.run_attempt }}
|
||||
run: |
|
||||
# Use provided server_image_tag or derive from commit SHA
|
||||
if [ -n "$INPUT_SERVER_IMAGE_TAG" ]; then
|
||||
SERVER_IMAGE_TAG="$INPUT_SERVER_IMAGE_TAG"
|
||||
else
|
||||
SERVER_IMAGE_TAG="${COMMIT_SHA::7}"
|
||||
fi
|
||||
|
||||
# Validate server_image_tag format (alphanumeric, dots, hyphens, underscores)
|
||||
if ! [[ "$SERVER_IMAGE_TAG" =~ ^[a-zA-Z0-9._-]+$ ]]; then
|
||||
echo "::error::Invalid server_image_tag format: ${SERVER_IMAGE_TAG}"
|
||||
exit 1
|
||||
fi
|
||||
echo "server_image_tag=${SERVER_IMAGE_TAG}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Generate branch name
|
||||
REF_BRANCH="${{ inputs.ref_branch }}"
|
||||
if [ -n "$PR_NUMBER" ]; then
|
||||
echo "branch=server-pr-${PR_NUMBER}" >> $GITHUB_OUTPUT
|
||||
elif [ -n "$REF_BRANCH" ]; then
|
||||
echo "branch=server-${REF_BRANCH}-${SERVER_IMAGE_TAG}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "branch=server-commit-${SERVER_IMAGE_TAG}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
# Determine server image name
|
||||
EDITION="${{ inputs.server_edition }}"
|
||||
REPO="${{ inputs.server_image_repo }}"
|
||||
REPO="${REPO:-mattermostdevelopment}"
|
||||
case "$EDITION" in
|
||||
fips) IMAGE_NAME="mattermost-enterprise-fips-edition" ;;
|
||||
team) IMAGE_NAME="mattermost-team-edition" ;;
|
||||
*) IMAGE_NAME="mattermost-enterprise-edition" ;;
|
||||
esac
|
||||
SERVER_IMAGE="${REPO}/${IMAGE_NAME}:${SERVER_IMAGE_TAG}"
|
||||
echo "server_image=${SERVER_IMAGE}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Validate server_image_aliases format if provided
|
||||
ALIASES="${{ inputs.server_image_aliases }}"
|
||||
if [ -n "$ALIASES" ] && ! [[ "$ALIASES" =~ ^[a-zA-Z0-9._,\ -]+$ ]]; then
|
||||
echo "::error::Invalid server_image_aliases format: ${ALIASES}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Generate build ID
|
||||
if [ -n "$EDITION" ] && [ "$EDITION" != "enterprise" ]; then
|
||||
echo "build_id=${RUN_ID}_${RUN_ATTEMPT}-${SERVER_IMAGE_TAG}-playwright-onprem-${EDITION}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "build_id=${RUN_ID}_${RUN_ATTEMPT}-${SERVER_IMAGE_TAG}-playwright-onprem-ent" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
playwright-smoke:
|
||||
if: ${{ !inputs.skip_smoke }}
|
||||
needs:
|
||||
- generate-build-variables
|
||||
uses: ./.github/workflows/e2e-tests-playwright-template.yml
|
||||
with:
|
||||
test_type: smoke
|
||||
test_filter: "--grep @smoke"
|
||||
timeout_minutes: 30
|
||||
enabled_docker_services: "postgres inbucket"
|
||||
commit_sha: ${{ inputs.commit_sha }}
|
||||
branch: ${{ needs.generate-build-variables.outputs.branch }}
|
||||
build_id: ${{ needs.generate-build-variables.outputs.build_id }}
|
||||
server_image_tag: ${{ needs.generate-build-variables.outputs.server_image_tag }}
|
||||
server_edition: ${{ inputs.server_edition }}
|
||||
server_image_repo: ${{ inputs.server_image_repo }}
|
||||
server_image_aliases: ${{ inputs.server_image_aliases }}
|
||||
server: ${{ inputs.server }}
|
||||
context_name: "e2e-test/playwright-smoke/${{ inputs.server_edition || 'enterprise' }}"
|
||||
pr_number: ${{ inputs.pr_number }}
|
||||
secrets:
|
||||
MM_LICENSE: ${{ secrets.MM_LICENSE }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
||||
# Full Tests (runs if smoke passed or skipped)
|
||||
playwright-full:
|
||||
needs:
|
||||
- playwright-smoke
|
||||
- generate-build-variables
|
||||
if: always() && (needs.playwright-smoke.result == 'skipped' || needs.playwright-smoke.outputs.failed == '0')
|
||||
uses: ./.github/workflows/e2e-tests-playwright-template.yml
|
||||
with:
|
||||
test_type: full
|
||||
test_filter: '--grep-invert "@visual"'
|
||||
timeout_minutes: 120
|
||||
enabled_docker_services: "postgres inbucket minio openldap elasticsearch keycloak"
|
||||
commit_sha: ${{ inputs.commit_sha }}
|
||||
branch: ${{ needs.generate-build-variables.outputs.branch }}
|
||||
build_id: ${{ needs.generate-build-variables.outputs.build_id }}
|
||||
server_image_tag: ${{ needs.generate-build-variables.outputs.server_image_tag }}
|
||||
server_edition: ${{ inputs.server_edition }}
|
||||
server_image_repo: ${{ inputs.server_image_repo }}
|
||||
server_image_aliases: ${{ inputs.server_image_aliases }}
|
||||
server: ${{ inputs.server }}
|
||||
enable_reporting: ${{ inputs.enable_reporting }}
|
||||
report_type: ${{ inputs.report_type }}
|
||||
ref_branch: ${{ inputs.ref_branch }}
|
||||
pr_number: ${{ inputs.pr_number }}
|
||||
context_name: "e2e-test/playwright-full/${{ inputs.server_edition || 'enterprise' }}"
|
||||
secrets:
|
||||
MM_LICENSE: ${{ secrets.MM_LICENSE }}
|
||||
REPORT_WEBHOOK_URL: ${{ secrets.REPORT_WEBHOOK_URL }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
150
.github/workflows/e2e-tests-verified-label.yml
vendored
150
.github/workflows/e2e-tests-verified-label.yml
vendored
|
|
@ -1,150 +0,0 @@
|
|||
---
|
||||
name: "E2E Tests/verified"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled]
|
||||
|
||||
env:
|
||||
REPORT_WEBHOOK_URL: ${{ secrets.MM_E2E_REPORT_WEBHOOK_URL }}
|
||||
|
||||
jobs:
|
||||
approve-e2e:
|
||||
if: github.event.label.name == 'E2E Tests/verified'
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: ci/check-user-permission
|
||||
id: check-permission
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
LABEL_AUTHOR: ${{ github.event.sender.login }}
|
||||
run: |
|
||||
# Check if user has write permission to the repository
|
||||
PERMISSION=$(gh api repos/${{ github.repository }}/collaborators/${LABEL_AUTHOR}/permission --jq '.permission' 2>/dev/null || echo "none")
|
||||
if [[ "$PERMISSION" != "admin" && "$PERMISSION" != "write" ]]; then
|
||||
echo "User ${LABEL_AUTHOR} doesn't have write permission to the repository (permission: ${PERMISSION})"
|
||||
exit 1
|
||||
fi
|
||||
echo "User ${LABEL_AUTHOR} has ${PERMISSION} permission to the repository"
|
||||
|
||||
- name: ci/override-failed-statuses
|
||||
id: override
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
COMMIT_SHA: ${{ github.event.pull_request.head.sha }}
|
||||
run: |
|
||||
# Only full tests can be overridden (smoke tests must pass)
|
||||
FULL_TEST_CONTEXTS=("e2e-test/playwright-full/enterprise" "e2e-test/cypress-full/enterprise")
|
||||
OVERRIDDEN=""
|
||||
WEBHOOK_DATA="[]"
|
||||
|
||||
for CONTEXT_NAME in "${FULL_TEST_CONTEXTS[@]}"; do
|
||||
echo "Checking: $CONTEXT_NAME"
|
||||
|
||||
# Get current status
|
||||
STATUS_JSON=$(gh api repos/${{ github.repository }}/commits/${COMMIT_SHA}/statuses \
|
||||
--jq "[.[] | select(.context == \"$CONTEXT_NAME\")] | first // empty")
|
||||
|
||||
if [ -z "$STATUS_JSON" ]; then
|
||||
echo " No status found, skipping"
|
||||
continue
|
||||
fi
|
||||
|
||||
CURRENT_DESC=$(echo "$STATUS_JSON" | jq -r '.description // ""')
|
||||
CURRENT_URL=$(echo "$STATUS_JSON" | jq -r '.target_url // ""')
|
||||
CURRENT_STATE=$(echo "$STATUS_JSON" | jq -r '.state // ""')
|
||||
|
||||
echo " Current: $CURRENT_DESC ($CURRENT_STATE)"
|
||||
|
||||
# Only override if status is failure
|
||||
if [ "$CURRENT_STATE" != "failure" ]; then
|
||||
echo " Not failed, skipping"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Prefix existing description
|
||||
if [ -n "$CURRENT_DESC" ]; then
|
||||
NEW_MSG="(verified) ${CURRENT_DESC}"
|
||||
else
|
||||
NEW_MSG="(verified)"
|
||||
fi
|
||||
|
||||
echo " New: $NEW_MSG"
|
||||
|
||||
# Update status via GitHub API
|
||||
gh api repos/${{ github.repository }}/statuses/${COMMIT_SHA} \
|
||||
-f state=success \
|
||||
-f context="$CONTEXT_NAME" \
|
||||
-f description="$NEW_MSG" \
|
||||
-f target_url="$CURRENT_URL"
|
||||
|
||||
echo " Updated to success"
|
||||
OVERRIDDEN="${OVERRIDDEN}- ${CONTEXT_NAME}\n"
|
||||
|
||||
# Collect data for webhook
|
||||
TEST_TYPE="unknown"
|
||||
if [[ "$CONTEXT_NAME" == *"playwright"* ]]; then
|
||||
TEST_TYPE="playwright"
|
||||
elif [[ "$CONTEXT_NAME" == *"cypress"* ]]; then
|
||||
TEST_TYPE="cypress"
|
||||
fi
|
||||
|
||||
WEBHOOK_DATA=$(echo "$WEBHOOK_DATA" | jq \
|
||||
--arg context "$CONTEXT_NAME" \
|
||||
--arg test_type "$TEST_TYPE" \
|
||||
--arg description "$CURRENT_DESC" \
|
||||
--arg report_url "$CURRENT_URL" \
|
||||
'. + [{context: $context, test_type: $test_type, description: $description, report_url: $report_url}]')
|
||||
done
|
||||
|
||||
echo "overridden<<EOF" >> $GITHUB_OUTPUT
|
||||
echo -e "$OVERRIDDEN" >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "webhook_data<<EOF" >> $GITHUB_OUTPUT
|
||||
echo "$WEBHOOK_DATA" >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: ci/build-webhook-message
|
||||
if: env.REPORT_WEBHOOK_URL != '' && steps.override.outputs.overridden != ''
|
||||
id: webhook-message
|
||||
env:
|
||||
WEBHOOK_DATA: ${{ steps.override.outputs.webhook_data }}
|
||||
run: |
|
||||
MESSAGE_TEXT=""
|
||||
|
||||
while IFS= read -r item; do
|
||||
[ -z "$item" ] && continue
|
||||
CONTEXT=$(echo "$item" | jq -r '.context')
|
||||
DESCRIPTION=$(echo "$item" | jq -r '.description')
|
||||
REPORT_URL=$(echo "$item" | jq -r '.report_url')
|
||||
|
||||
MESSAGE_TEXT="${MESSAGE_TEXT}- **${CONTEXT}**: ${DESCRIPTION}, [view report](${REPORT_URL})\n"
|
||||
done < <(echo "$WEBHOOK_DATA" | jq -c '.[]')
|
||||
|
||||
{
|
||||
echo "message_text<<EOF"
|
||||
echo -e "$MESSAGE_TEXT"
|
||||
echo "EOF"
|
||||
} >> $GITHUB_OUTPUT
|
||||
|
||||
- name: ci/send-webhook-notification
|
||||
if: env.REPORT_WEBHOOK_URL != '' && steps.override.outputs.overridden != ''
|
||||
env:
|
||||
REPORT_WEBHOOK_URL: ${{ env.REPORT_WEBHOOK_URL }}
|
||||
MESSAGE_TEXT: ${{ steps.webhook-message.outputs.message_text }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PR_URL: ${{ github.event.pull_request.html_url }}
|
||||
COMMIT_SHA: ${{ github.event.pull_request.head.sha }}
|
||||
SENDER: ${{ github.event.sender.login }}
|
||||
run: |
|
||||
PAYLOAD=$(cat <<EOF
|
||||
{
|
||||
"username": "E2E Test",
|
||||
"icon_url": "https://mattermost.com/wp-content/uploads/2022/02/icon_WS.png",
|
||||
"text": "**:white_check_mark: E2E Tests Verified**\n\nBy: \`@${SENDER}\` via \`E2E Tests/verified\` trigger-label\n:open-pull-request: [mattermost-pr-${PR_NUMBER}](${PR_URL}), commit: \`${COMMIT_SHA:0:7}\`\n\n${MESSAGE_TEXT}"
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
curl -X POST -H "Content-Type: application/json" -d "$PAYLOAD" "$REPORT_WEBHOOK_URL"
|
||||
2
.github/workflows/mmctl-test-template.yml
vendored
2
.github/workflows/mmctl-test-template.yml
vendored
|
|
@ -46,7 +46,7 @@ jobs:
|
|||
echo "BUILD_IMAGE=mattermost/mattermost-build-server-fips:${{ inputs.go-version }}" >> "${GITHUB_OUTPUT}"
|
||||
echo "LOG_ARTIFACT_NAME=${{ inputs.logsartifact }}-fips" >> "${GITHUB_OUTPUT}"
|
||||
else
|
||||
echo "BUILD_IMAGE=mattermost/mattermost-build-server:${{ inputs.go-version }}" >> "${GITHUB_OUTPUT}"
|
||||
echo "BUILD_IMAGE=mattermostdevelopment/mattermost-build-server:${{ inputs.go-version }}" >> "${GITHUB_OUTPUT}"
|
||||
echo "LOG_ARTIFACT_NAME=${{ inputs.logsartifact }}" >> "${GITHUB_OUTPUT}"
|
||||
fi
|
||||
|
||||
|
|
|
|||
6
.github/workflows/server-ci-artifacts.yml
vendored
6
.github/workflows/server-ci-artifacts.yml
vendored
|
|
@ -17,7 +17,7 @@ jobs:
|
|||
if: github.repository_owner == 'mattermost' && github.event.workflow_run.event == 'pull_request' && github.event.workflow_run.conclusion == 'success'
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
|
||||
- uses: mattermost/actions/delivery/update-commit-status@d5174b860704729f4c14ef8489ae075742bfa08a
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
|
|
@ -155,7 +155,7 @@ jobs:
|
|||
needs:
|
||||
- build-docker
|
||||
steps:
|
||||
- uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
|
||||
- uses: mattermost/actions/delivery/update-commit-status@d5174b860704729f4c14ef8489ae075742bfa08a
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
|
|
@ -171,7 +171,7 @@ jobs:
|
|||
needs:
|
||||
- build-docker
|
||||
steps:
|
||||
- uses: mattermost/actions/delivery/update-commit-status@f324ac89b05cc3511cb06e60642ac2fb829f0a63
|
||||
- uses: mattermost/actions/delivery/update-commit-status@d5174b860704729f4c14ef8489ae075742bfa08a
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
|
|
|
|||
32
.github/workflows/server-ci.yml
vendored
32
.github/workflows/server-ci.yml
vendored
|
|
@ -12,6 +12,7 @@ on:
|
|||
pull_request:
|
||||
paths:
|
||||
- "server/**"
|
||||
- "e2e-tests/**"
|
||||
- ".github/workflows/server-ci.yml"
|
||||
- ".github/workflows/server-test-template.yml"
|
||||
- ".github/workflows/mmctl-test-template.yml"
|
||||
|
|
@ -39,7 +40,7 @@ jobs:
|
|||
name: Check mocks
|
||||
needs: go
|
||||
runs-on: ubuntu-22.04
|
||||
container: mattermost/mattermost-build-server:${{ needs.go.outputs.version }}
|
||||
container: mattermostdevelopment/mattermost-build-server:${{ needs.go.outputs.version }}
|
||||
defaults:
|
||||
run:
|
||||
working-directory: server
|
||||
|
|
@ -56,7 +57,7 @@ jobs:
|
|||
name: Check go mod tidy
|
||||
needs: go
|
||||
runs-on: ubuntu-22.04
|
||||
container: mattermost/mattermost-build-server:${{ needs.go.outputs.version }}
|
||||
container: mattermostdevelopment/mattermost-build-server:${{ needs.go.outputs.version }}
|
||||
defaults:
|
||||
run:
|
||||
working-directory: server
|
||||
|
|
@ -73,7 +74,7 @@ jobs:
|
|||
name: check-style
|
||||
needs: go
|
||||
runs-on: ubuntu-22.04
|
||||
container: mattermost/mattermost-build-server:${{ needs.go.outputs.version }}
|
||||
container: mattermostdevelopment/mattermost-build-server:${{ needs.go.outputs.version }}
|
||||
defaults:
|
||||
run:
|
||||
working-directory: server
|
||||
|
|
@ -90,7 +91,7 @@ jobs:
|
|||
name: Check serialization methods for hot structs
|
||||
needs: go
|
||||
runs-on: ubuntu-22.04
|
||||
container: mattermost/mattermost-build-server:${{ needs.go.outputs.version }}
|
||||
container: mattermostdevelopment/mattermost-build-server:${{ needs.go.outputs.version }}
|
||||
defaults:
|
||||
run:
|
||||
working-directory: server
|
||||
|
|
@ -107,7 +108,7 @@ jobs:
|
|||
name: Vet API
|
||||
needs: go
|
||||
runs-on: ubuntu-22.04
|
||||
container: mattermost/mattermost-build-server:${{ needs.go.outputs.version }}
|
||||
container: mattermostdevelopment/mattermost-build-server:${{ needs.go.outputs.version }}
|
||||
defaults:
|
||||
run:
|
||||
working-directory: server
|
||||
|
|
@ -122,7 +123,7 @@ jobs:
|
|||
name: Check migration files
|
||||
needs: go
|
||||
runs-on: ubuntu-22.04
|
||||
container: mattermost/mattermost-build-server:${{ needs.go.outputs.version }}
|
||||
container: mattermostdevelopment/mattermost-build-server:${{ needs.go.outputs.version }}
|
||||
defaults:
|
||||
run:
|
||||
working-directory: server
|
||||
|
|
@ -137,7 +138,7 @@ jobs:
|
|||
name: Generate email templates
|
||||
needs: go
|
||||
runs-on: ubuntu-22.04
|
||||
container: mattermost/mattermost-build-server:${{ needs.go.outputs.version }}
|
||||
container: mattermostdevelopment/mattermost-build-server:${{ needs.go.outputs.version }}
|
||||
defaults:
|
||||
run:
|
||||
working-directory: server
|
||||
|
|
@ -154,7 +155,7 @@ jobs:
|
|||
name: Check store layers
|
||||
needs: go
|
||||
runs-on: ubuntu-22.04
|
||||
container: mattermost/mattermost-build-server:${{ needs.go.outputs.version }}
|
||||
container: mattermostdevelopment/mattermost-build-server:${{ needs.go.outputs.version }}
|
||||
defaults:
|
||||
run:
|
||||
working-directory: server
|
||||
|
|
@ -171,7 +172,7 @@ jobs:
|
|||
name: Check mmctl docs
|
||||
needs: go
|
||||
runs-on: ubuntu-22.04
|
||||
container: mattermost/mattermost-build-server:${{ needs.go.outputs.version }}
|
||||
container: mattermostdevelopment/mattermost-build-server:${{ needs.go.outputs.version }}
|
||||
defaults:
|
||||
run:
|
||||
working-directory: server
|
||||
|
|
@ -226,9 +227,8 @@ jobs:
|
|||
fips-enabled: true
|
||||
test-coverage:
|
||||
name: Generate Test Coverage
|
||||
# Disabled: Running out of memory and causing spurious failures.
|
||||
# Old condition: ${{ github.event_name != 'pull_request' || !startsWith(github.event.pull_request.base.ref, 'release-') }}
|
||||
if: false
|
||||
# Skip coverage generation for cherry-pick PRs into release branches.
|
||||
if: ${{ github.event_name != 'pull_request' || !startsWith(github.event.pull_request.base.ref, 'release-') }}
|
||||
needs: go
|
||||
uses: ./.github/workflows/server-test-template.yml
|
||||
secrets: inherit
|
||||
|
|
@ -270,7 +270,7 @@ jobs:
|
|||
name: Build mattermost server app
|
||||
needs: go
|
||||
runs-on: ubuntu-22.04
|
||||
container: mattermost/mattermost-build-server:${{ needs.go.outputs.version }}
|
||||
container: mattermostdevelopment/mattermost-build-server:${{ needs.go.outputs.version }}
|
||||
defaults:
|
||||
run:
|
||||
working-directory: server
|
||||
|
|
@ -281,12 +281,6 @@ jobs:
|
|||
steps:
|
||||
- name: Checkout mattermost project
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: ci/setup-node
|
||||
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
|
||||
with:
|
||||
node-version-file: ".nvmrc"
|
||||
cache: "npm"
|
||||
cache-dependency-path: "webapp/package-lock.json"
|
||||
- name: Run setup-go-work
|
||||
run: make setup-go-work
|
||||
- name: Build
|
||||
|
|
|
|||
2
.github/workflows/server-test-template.yml
vendored
2
.github/workflows/server-test-template.yml
vendored
|
|
@ -59,7 +59,7 @@ jobs:
|
|||
echo "BUILD_IMAGE=mattermost/mattermost-build-server-fips:${{ inputs.go-version }}" >> "${GITHUB_OUTPUT}"
|
||||
echo "LOG_ARTIFACT_NAME=${{ inputs.logsartifact }}-fips" >> "${GITHUB_OUTPUT}"
|
||||
else
|
||||
echo "BUILD_IMAGE=mattermost/mattermost-build-server:${{ inputs.go-version }}" >> "${GITHUB_OUTPUT}"
|
||||
echo "BUILD_IMAGE=mattermostdevelopment/mattermost-build-server:${{ inputs.go-version }}" >> "${GITHUB_OUTPUT}"
|
||||
echo "LOG_ARTIFACT_NAME=${{ inputs.logsartifact }}" >> "${GITHUB_OUTPUT}"
|
||||
fi
|
||||
|
||||
|
|
|
|||
13
.github/workflows/webapp-ci.yml
vendored
13
.github/workflows/webapp-ci.yml
vendored
|
|
@ -7,6 +7,7 @@ on:
|
|||
pull_request:
|
||||
paths:
|
||||
- "webapp/**"
|
||||
- "e2e-tests/**"
|
||||
- ".github/workflows/webapp-ci.yml"
|
||||
- ".github/actions/webapp-setup/**"
|
||||
|
||||
|
|
@ -40,10 +41,18 @@ jobs:
|
|||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: ci/setup
|
||||
uses: ./.github/actions/webapp-setup
|
||||
- name: ci/i18n-extract
|
||||
- name: ci/lint
|
||||
working-directory: webapp/channels
|
||||
run: |
|
||||
npm run i18n-extract:check
|
||||
cp src/i18n/en.json /tmp/en.json
|
||||
mkdir -p /tmp/fake-mobile-dir/assets/base/i18n/
|
||||
echo '{}' > /tmp/fake-mobile-dir/assets/base/i18n/en.json
|
||||
npm run mmjstool -- i18n extract-webapp --webapp-dir ./src --mobile-dir /tmp/fake-mobile-dir
|
||||
diff /tmp/en.json src/i18n/en.json
|
||||
# Address weblate behavior which does not remove whole translation item when translation string is set to empty
|
||||
npm run mmjstool -- i18n clean-empty --webapp-dir ./src --mobile-dir /tmp/fake-mobile-dir --check
|
||||
npm run mmjstool -- i18n check-empty-src --webapp-dir ./src --mobile-dir /tmp/fake-mobile-dir
|
||||
rm -rf tmp
|
||||
|
||||
check-types:
|
||||
needs: check-lint
|
||||
|
|
|
|||
3
.gitignore
vendored
3
.gitignore
vendored
|
|
@ -161,6 +161,5 @@ docker-compose.override.yaml
|
|||
.env
|
||||
|
||||
**/CLAUDE.local.md
|
||||
**/CLAUDE.md
|
||||
CLAUDE.md
|
||||
.cursorrules
|
||||
.cursor/
|
||||
|
|
|
|||
2
.nvmrc
2
.nvmrc
|
|
@ -1 +1 @@
|
|||
24.11
|
||||
20.11
|
||||
|
|
|
|||
202
NOTICE.txt
202
NOTICE.txt
|
|
@ -2527,16 +2527,83 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
||||
---
|
||||
|
||||
## LumenResearch/uasurfer
|
||||
## Masterminds/semver
|
||||
|
||||
This product contains 'LumenResearch/uasurfer' by Lumen Research.
|
||||
This product contains 'Masterminds/semver' by Masterminds.
|
||||
|
||||
Work with Semantic Versions in Go
|
||||
|
||||
* LICENSE: MIT License
|
||||
|
||||
Copyright (C) 2014-2019, Matt Butcher and Matt Farina
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
|
||||
---
|
||||
|
||||
## anthonynsimon/bild
|
||||
|
||||
This product contains 'anthonynsimon/bild' by anthonynsimon.
|
||||
|
||||
Image processing algorithms in pure Go
|
||||
|
||||
* HOMEPAGE:
|
||||
* https://github.com/anthonynsimon/bild
|
||||
|
||||
* LICENSE: MIT License
|
||||
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2016-2024 Anthony Simon
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
|
||||
---
|
||||
|
||||
## avct/uasurfer
|
||||
|
||||
This product contains 'uasurfer' by Avocet.
|
||||
|
||||
Go package for fast and reliable abstraction of browser user agent strings.
|
||||
|
||||
* HOMEPAGE:
|
||||
* https://github.com/LumenResearch/uasurfer
|
||||
* https://github.com/avct/uasurfer
|
||||
|
||||
* LICENSE: Other
|
||||
* LICENSE: Apache-2.0
|
||||
|
||||
|
||||
Apache License
|
||||
|
|
@ -2731,73 +2798,6 @@ Go package for fast and reliable abstraction of browser user agent strings.
|
|||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
---
|
||||
|
||||
## Masterminds/semver
|
||||
|
||||
This product contains 'Masterminds/semver' by Masterminds.
|
||||
|
||||
Work with Semantic Versions in Go
|
||||
|
||||
* LICENSE: MIT License
|
||||
|
||||
Copyright (C) 2014-2019, Matt Butcher and Matt Farina
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
|
||||
---
|
||||
|
||||
## anthonynsimon/bild
|
||||
|
||||
This product contains 'anthonynsimon/bild' by anthonynsimon.
|
||||
|
||||
Image processing algorithms in pure Go
|
||||
|
||||
* HOMEPAGE:
|
||||
* https://github.com/anthonynsimon/bild
|
||||
|
||||
* LICENSE: MIT License
|
||||
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2016-2024 Anthony Simon
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
|
||||
---
|
||||
|
||||
## aws/aws-sdk-go-v2
|
||||
|
|
@ -5918,9 +5918,6 @@ This product contains 'h2non/go-is-svg' by Tom.
|
|||
|
||||
Check if a given buffer is a valid SVG image in Go (golang)
|
||||
|
||||
* HOMEPAGE:
|
||||
* https://github.com/h2non/go-is-svg
|
||||
|
||||
* LICENSE: MIT License
|
||||
|
||||
The MIT License
|
||||
|
|
@ -10861,21 +10858,6 @@ Internationalize React apps. This library provides React components and an API t
|
|||
|
||||
|
||||
|
||||
---
|
||||
|
||||
## react-intl
|
||||
|
||||
This product contains 'react-intl' by Eric Ferraiuolo.
|
||||
|
||||
Internationalize React apps. This library provides React components and an API to format dates, numbers, and strings, including pluralization and handling translations.
|
||||
|
||||
* HOMEPAGE:
|
||||
* https://formatjs.github.io/docs/react-intl
|
||||
|
||||
* LICENSE: BSD-3-Clause
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
## react-is
|
||||
|
|
@ -12880,48 +12862,6 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
---
|
||||
|
||||
## x/sys
|
||||
|
||||
This product contains 'x/sys' by Go.
|
||||
|
||||
[mirror] Go packages for low-level interaction with the operating system
|
||||
|
||||
* HOMEPAGE:
|
||||
* https://golang.org/x/sys
|
||||
|
||||
* LICENSE: BSD 3-Clause "New" or "Revised" License
|
||||
|
||||
Copyright 2009 The Go Authors.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
---
|
||||
|
||||
## x/term
|
||||
|
|
|
|||
|
|
@ -20,8 +20,6 @@ build-v4: node_modules playbooks
|
|||
@cat $(V4_SRC)/posts.yaml >> $(V4_YAML)
|
||||
@cat $(V4_SRC)/preferences.yaml >> $(V4_YAML)
|
||||
@cat $(V4_SRC)/files.yaml >> $(V4_YAML)
|
||||
@cat $(V4_SRC)/recaps.yaml >> $(V4_YAML)
|
||||
@cat $(V4_SRC)/ai.yaml >> $(V4_YAML)
|
||||
@cat $(V4_SRC)/uploads.yaml >> $(V4_YAML)
|
||||
@cat $(V4_SRC)/jobs.yaml >> $(V4_YAML)
|
||||
@cat $(V4_SRC)/system.yaml >> $(V4_YAML)
|
||||
|
|
|
|||
|
|
@ -283,16 +283,11 @@
|
|||
$ref: "#/components/responses/InternalServerError"
|
||||
"/api/v4/access_control_policies/{policy_id}/activate":
|
||||
get:
|
||||
deprecated: true
|
||||
tags:
|
||||
- access control
|
||||
summary: Activate or deactivate an access control policy
|
||||
description: |
|
||||
Updates the active status of an access control policy.
|
||||
|
||||
**Deprecated:** This endpoint will be removed in a future release. Use the dedicated access control policy update endpoint instead.
|
||||
Link: </api/v4/access_control_policies/activate>; rel="successor-version"
|
||||
|
||||
##### Permissions
|
||||
Must have the `manage_system` permission.
|
||||
operationId: UpdateAccessControlPolicyActiveStatus
|
||||
|
|
|
|||
|
|
@ -24,32 +24,6 @@
|
|||
$ref: "#/components/responses/Unauthorized"
|
||||
"500":
|
||||
$ref: "#/components/responses/InternalServerError"
|
||||
/api/v4/agents/status:
|
||||
get:
|
||||
tags:
|
||||
- agents
|
||||
summary: Get agents bridge status
|
||||
description: >
|
||||
Retrieve the status of the AI plugin bridge.
|
||||
Returns availability boolean and a reason code if unavailable.
|
||||
|
||||
##### Permissions
|
||||
|
||||
Must be authenticated.
|
||||
|
||||
__Minimum server version__: 11.2
|
||||
operationId: GetAgentsStatus
|
||||
responses:
|
||||
"200":
|
||||
description: Status retrieved successfully
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/AgentsIntegrityResponse"
|
||||
"401":
|
||||
$ref: "#/components/responses/Unauthorized"
|
||||
"500":
|
||||
$ref: "#/components/responses/InternalServerError"
|
||||
/api/v4/llmservices:
|
||||
get:
|
||||
tags:
|
||||
|
|
|
|||
|
|
@ -1,54 +0,0 @@
|
|||
/api/v4/ai/agents:
|
||||
get:
|
||||
tags:
|
||||
- ai
|
||||
summary: Get available AI agents
|
||||
description: >
|
||||
Retrieve all available AI agents from the AI plugin's bridge API.
|
||||
If a user ID is provided, only agents accessible to that user are returned.
|
||||
|
||||
##### Permissions
|
||||
|
||||
Must be authenticated.
|
||||
|
||||
__Minimum server version__: 11.2
|
||||
operationId: GetAIAgents
|
||||
responses:
|
||||
"200":
|
||||
description: AI agents retrieved successfully
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/AgentsResponse"
|
||||
"401":
|
||||
$ref: "#/components/responses/Unauthorized"
|
||||
"500":
|
||||
$ref: "#/components/responses/InternalServerError"
|
||||
/api/v4/ai/services:
|
||||
get:
|
||||
tags:
|
||||
- ai
|
||||
summary: Get available AI services
|
||||
description: >
|
||||
Retrieve all available AI services from the AI plugin's bridge API.
|
||||
If a user ID is provided, only services accessible to that user
|
||||
(via their permitted bots) are returned.
|
||||
|
||||
##### Permissions
|
||||
|
||||
Must be authenticated.
|
||||
|
||||
__Minimum server version__: 11.2
|
||||
operationId: GetAIServices
|
||||
responses:
|
||||
"200":
|
||||
description: AI services retrieved successfully
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/ServicesResponse"
|
||||
"401":
|
||||
$ref: "#/components/responses/Unauthorized"
|
||||
"500":
|
||||
$ref: "#/components/responses/InternalServerError"
|
||||
|
||||
|
|
@ -606,36 +606,18 @@
|
|||
summary: Patch a channel
|
||||
description: >
|
||||
Partially update a channel by providing only the fields you want to
|
||||
update. Omitted fields will not be updated. At least one of the allowed
|
||||
fields must be provided.
|
||||
|
||||
**Public and private channels:** Can update `name`, `display_name`,
|
||||
`purpose`, `header`, `group_constrained`, `autotranslation`, and
|
||||
`banner_info` (subject to permissions and channel type).
|
||||
|
||||
**Direct and group message channels:** Only `header` and (when not
|
||||
restricted by config) `autotranslation` can be updated; the caller
|
||||
must be a channel member. Updating `name`, `display_name`, or `purpose`
|
||||
is not allowed.
|
||||
|
||||
The default channel (e.g. Town Square) cannot have its `name` changed.
|
||||
update. Omitted fields will not be updated. The fields that can be
|
||||
updated are defined in the request body, all other provided fields will
|
||||
be ignored.
|
||||
|
||||
##### Permissions
|
||||
|
||||
- **Public channel:** For property updates (name, display_name, purpose, header, group_constrained),
|
||||
`manage_public_channel_properties` is required. For `autotranslation`, `manage_public_channel_auto_translation`
|
||||
is required. For `banner_info`, `manage_public_channel_banner` is required (Channel Banner feature and
|
||||
Enterprise license required).
|
||||
- **Private channel:** For property updates, `manage_private_channel_properties` is required. For
|
||||
`autotranslation`, `manage_private_channel_auto_translation` is required. For `banner_info`,
|
||||
`manage_private_channel_banner` is required (Channel Banner feature and Enterprise license required).
|
||||
- **Direct or group message channel:** Must be a member of the channel; only `header` and (when allowed)
|
||||
`autotranslation` can be updated.
|
||||
If updating a public channel, `manage_public_channel_members` permission is required. If updating a private channel, `manage_private_channel_members` permission is required.
|
||||
operationId: PatchChannel
|
||||
parameters:
|
||||
- name: channel_id
|
||||
in: path
|
||||
description: Channel ID
|
||||
description: Channel GUID
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
|
|
@ -648,34 +630,20 @@
|
|||
name:
|
||||
type: string
|
||||
description: The unique handle for the channel, will be present in the
|
||||
channel URL. Cannot be updated for direct or group message channels.
|
||||
Cannot be changed for the default channel (e.g. Town Square).
|
||||
channel URL
|
||||
display_name:
|
||||
type: string
|
||||
description: The non-unique UI name for the channel. Cannot be updated
|
||||
for direct or group message channels.
|
||||
description: The non-unique UI name for the channel
|
||||
purpose:
|
||||
type: string
|
||||
description: A short description of the purpose of the channel. Cannot
|
||||
be updated for direct or group message channels.
|
||||
description: A short description of the purpose of the channel
|
||||
header:
|
||||
type: string
|
||||
description: Markdown-formatted text to display in the header of the
|
||||
channel
|
||||
group_constrained:
|
||||
type: boolean
|
||||
description: When true, only members of the linked LDAP groups can join
|
||||
the channel. Only applicable to public and private channels.
|
||||
autotranslation:
|
||||
type: boolean
|
||||
description: Enable or disable automatic message translation in the
|
||||
channel. Requires the auto-translation feature and appropriate
|
||||
channel permission. May be restricted for direct and group message
|
||||
channels by server configuration.
|
||||
banner_info:
|
||||
$ref: "#/components/schemas/ChannelBanner"
|
||||
description: Channel patch object; include only the fields to update. At least
|
||||
one field must be provided.
|
||||
description: Channel object to be updated
|
||||
required: true
|
||||
responses:
|
||||
"200":
|
||||
|
|
@ -1632,61 +1600,6 @@
|
|||
$ref: "#/components/responses/Forbidden"
|
||||
"404":
|
||||
$ref: "#/components/responses/NotFound"
|
||||
"/api/v4/channels/{channel_id}/members/{user_id}/autotranslation":
|
||||
put:
|
||||
tags:
|
||||
- channels
|
||||
summary: Update channel member autotranslation setting
|
||||
description: >
|
||||
Update a user's autotranslation setting for a channel. This controls whether
|
||||
messages in the channel should not be automatically translated for the user.
|
||||
By default, autotranslations are enabled for all users if the channel is enabled
|
||||
for autotranslation.
|
||||
|
||||
##### Permissions
|
||||
|
||||
Must be logged in as the user or have `edit_other_users` permission.
|
||||
operationId: UpdateChannelMemberAutotranslation
|
||||
parameters:
|
||||
- name: channel_id
|
||||
in: path
|
||||
description: Channel GUID
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: user_id
|
||||
in: path
|
||||
description: User GUID
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
required:
|
||||
- autotranslation_disabled
|
||||
properties:
|
||||
autotranslation_disabled:
|
||||
type: boolean
|
||||
description: Whether to disable autotranslation for the user in this channel
|
||||
required: true
|
||||
responses:
|
||||
"200":
|
||||
description: Channel member autotranslation setting update successful
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/StatusOK"
|
||||
"400":
|
||||
$ref: "#/components/responses/BadRequest"
|
||||
"401":
|
||||
$ref: "#/components/responses/Unauthorized"
|
||||
"403":
|
||||
$ref: "#/components/responses/Forbidden"
|
||||
"404":
|
||||
$ref: "#/components/responses/NotFound"
|
||||
"/api/v4/channels/members/{user_id}/view":
|
||||
post:
|
||||
tags:
|
||||
|
|
|
|||
|
|
@ -360,36 +360,6 @@
|
|||
$ref: "#/components/responses/Forbidden"
|
||||
"501":
|
||||
$ref: "#/components/responses/NotImplemented"
|
||||
/api/v4/cloud/check-cws-connection:
|
||||
get:
|
||||
tags:
|
||||
- cloud
|
||||
summary: Check CWS connection
|
||||
description: >
|
||||
Checks whether the Customer Web Server (CWS) is reachable from this instance.
|
||||
Used to detect if the deployment is air-gapped.
|
||||
|
||||
##### Permissions
|
||||
|
||||
No permissions required.
|
||||
|
||||
__Minimum server version__: 5.28
|
||||
__Note:__ This is intended for internal use and is subject to change.
|
||||
operationId: CheckCWSConnection
|
||||
responses:
|
||||
"200":
|
||||
description: CWS connection status returned successfully
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
description: Connection status - "available" if CWS is reachable, "unavailable" if not
|
||||
enum:
|
||||
- available
|
||||
- unavailable
|
||||
/api/v4/cloud/webhook:
|
||||
post:
|
||||
tags:
|
||||
|
|
|
|||
|
|
@ -83,17 +83,6 @@
|
|||
saml:
|
||||
type: string
|
||||
description: "SAML attribute for syncing"
|
||||
protected:
|
||||
type: boolean
|
||||
description: "If true, the field is read-only and cannot be modified."
|
||||
source_plugin_id:
|
||||
type: string
|
||||
description: "The ID of the plugin that created this field. This attribute cannot be changed."
|
||||
access_mode:
|
||||
type: string
|
||||
description: "Access mode of the field"
|
||||
enum: ["", "source_only", "shared_only"]
|
||||
default: ""
|
||||
responses:
|
||||
"201":
|
||||
description: Custom Profile Attribute field creation successful
|
||||
|
|
@ -119,9 +108,6 @@
|
|||
updated. The fields that can be updated are defined in the
|
||||
request body, all other provided fields will be ignored.
|
||||
|
||||
**Note:** Fields with `attrs.protected = true` cannot be
|
||||
modified and will return an error.
|
||||
|
||||
_This endpoint is experimental._
|
||||
|
||||
__Minimum server version__: 10.5
|
||||
|
|
@ -181,17 +167,6 @@
|
|||
saml:
|
||||
type: string
|
||||
description: "SAML attribute for syncing"
|
||||
protected:
|
||||
type: boolean
|
||||
description: "If true, the field is read-only and cannot be modified."
|
||||
source_plugin_id:
|
||||
type: string
|
||||
description: "The ID of the plugin that created this field. This attribute cannot be changed."
|
||||
access_mode:
|
||||
type: string
|
||||
description: "Access mode of the field"
|
||||
enum: ["", "source_only", "shared_only"]
|
||||
default: ""
|
||||
responses:
|
||||
"200":
|
||||
description: Custom Profile Attribute field patch successful
|
||||
|
|
@ -254,9 +229,6 @@
|
|||
that can be updated are defined in the request body, all other
|
||||
provided fields will be ignored.
|
||||
|
||||
**Note:** Values for fields with `attrs.protected = true` cannot be
|
||||
updated and will return an error.
|
||||
|
||||
_This endpoint is experimental._
|
||||
|
||||
__Minimum server version__: 10.5
|
||||
|
|
@ -384,9 +356,6 @@
|
|||
description: |
|
||||
Update Custom Profile Attribute field values for a specific user.
|
||||
|
||||
**Note:** Values for fields with `attrs.protected = true` cannot be
|
||||
updated and will return an error.
|
||||
|
||||
_This endpoint is experimental._
|
||||
|
||||
__Minimum server version__: 11
|
||||
|
|
|
|||
|
|
@ -3704,7 +3704,7 @@ components:
|
|||
type: array
|
||||
description: list of users participating in this thread. only includes IDs unless 'extended' was set to 'true'
|
||||
items:
|
||||
$ref: "#/components/schemas/User"
|
||||
$ref: "#/components/schemas/Post"
|
||||
post:
|
||||
$ref: "#/components/schemas/Post"
|
||||
RelationalIntegrityCheckData:
|
||||
|
|
@ -4048,15 +4048,6 @@ components:
|
|||
items:
|
||||
$ref: "#/components/schemas/BridgeServiceInfo"
|
||||
description: List of available LLM services
|
||||
AgentsIntegrityResponse:
|
||||
type: object
|
||||
properties:
|
||||
available:
|
||||
type: boolean
|
||||
description: Whether the AI plugin bridge is available
|
||||
reason:
|
||||
type: string
|
||||
description: Reason code if not available (translation ID)
|
||||
PostAcknowledgement:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -4642,83 +4633,6 @@ components:
|
|||
active:
|
||||
type: boolean
|
||||
description: The active status of the policy.
|
||||
Recap:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
description: Unique identifier for the recap
|
||||
user_id:
|
||||
type: string
|
||||
description: ID of the user who created the recap
|
||||
title:
|
||||
type: string
|
||||
description: AI-generated title for the recap (max 5 words)
|
||||
create_at:
|
||||
type: integer
|
||||
format: int64
|
||||
description: The time in milliseconds the recap was created
|
||||
update_at:
|
||||
type: integer
|
||||
format: int64
|
||||
description: The time in milliseconds the recap was last updated
|
||||
delete_at:
|
||||
type: integer
|
||||
format: int64
|
||||
description: The time in milliseconds the recap was deleted
|
||||
read_at:
|
||||
type: integer
|
||||
format: int64
|
||||
description: The time in milliseconds the recap was marked as read
|
||||
total_message_count:
|
||||
type: integer
|
||||
description: Total number of messages summarized across all channels
|
||||
status:
|
||||
type: string
|
||||
enum: [pending, processing, completed, failed]
|
||||
description: Current status of the recap job
|
||||
bot_id:
|
||||
type: string
|
||||
description: ID of the AI agent/bot used to generate this recap
|
||||
channels:
|
||||
type: array
|
||||
items:
|
||||
$ref: "#/components/schemas/RecapChannel"
|
||||
description: List of channel summaries included in this recap
|
||||
RecapChannel:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
description: Unique identifier for the recap channel
|
||||
recap_id:
|
||||
type: string
|
||||
description: ID of the parent recap
|
||||
channel_id:
|
||||
type: string
|
||||
description: ID of the channel that was summarized
|
||||
channel_name:
|
||||
type: string
|
||||
description: Display name of the channel
|
||||
highlights:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Key discussion points and important information from the channel
|
||||
action_items:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Tasks, todos, and action items mentioned in the channel
|
||||
source_post_ids:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: IDs of the posts used to generate this summary
|
||||
create_at:
|
||||
type: integer
|
||||
format: int64
|
||||
description: The time in milliseconds the recap channel was created
|
||||
externalDocs:
|
||||
description: Find out more about Mattermost
|
||||
url: 'https://about.mattermost.com'
|
||||
|
|
|
|||
|
|
@ -462,10 +462,8 @@ tags:
|
|||
description: Endpoints related to metrics, including the Client Performance Monitoring feature.
|
||||
- name: audit_logs
|
||||
description: Endpoints for managing audit log certificates and configuration.
|
||||
- name: recaps
|
||||
description: Endpoints for creating and managing AI-powered channel recaps that summarize unread messages.
|
||||
- name: agents
|
||||
description: Endpoints for interacting with AI agents and LLM services.
|
||||
- name: ai
|
||||
description: Endpoints for interacting with AI agents and services.
|
||||
servers:
|
||||
- url: "{your-mattermost-url}"
|
||||
variables:
|
||||
|
|
|
|||
|
|
@ -1,240 +0,0 @@
|
|||
"/api/v4/recaps":
|
||||
post:
|
||||
tags:
|
||||
- recaps
|
||||
- ai
|
||||
summary: Create a channel recap
|
||||
description: >
|
||||
Create a new AI-powered recap for the specified channels. The recap will
|
||||
summarize unread messages in the selected channels, extracting highlights
|
||||
and action items. This creates a background job that processes the recap
|
||||
asynchronously. The recap is created for the authenticated user.
|
||||
|
||||
##### Permissions
|
||||
|
||||
Must be authenticated. User must be a member of all specified channels.
|
||||
|
||||
__Minimum server version__: 11.2
|
||||
operationId: CreateRecap
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
required:
|
||||
- channel_ids
|
||||
- title
|
||||
- agent_id
|
||||
properties:
|
||||
title:
|
||||
type: string
|
||||
description: Title for the recap
|
||||
channel_ids:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: List of channel IDs to include in the recap
|
||||
minItems: 1
|
||||
agent_id:
|
||||
type: string
|
||||
description: ID of the AI agent to use for generating the recap
|
||||
description: Recap creation request
|
||||
required: true
|
||||
responses:
|
||||
"201":
|
||||
description: Recap creation successful. The recap will be processed asynchronously.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Recap"
|
||||
"400":
|
||||
$ref: "#/components/responses/BadRequest"
|
||||
"401":
|
||||
$ref: "#/components/responses/Unauthorized"
|
||||
"403":
|
||||
$ref: "#/components/responses/Forbidden"
|
||||
get:
|
||||
tags:
|
||||
- recaps
|
||||
- ai
|
||||
summary: Get current user's recaps
|
||||
description: >
|
||||
Get a paginated list of recaps created by the authenticated user.
|
||||
|
||||
##### Permissions
|
||||
|
||||
Must be authenticated.
|
||||
|
||||
__Minimum server version__: 11.2
|
||||
operationId: GetRecapsForUser
|
||||
parameters:
|
||||
- name: page
|
||||
in: query
|
||||
description: The page to select.
|
||||
schema:
|
||||
type: integer
|
||||
default: 0
|
||||
- name: per_page
|
||||
in: query
|
||||
description: The number of recaps per page.
|
||||
schema:
|
||||
type: integer
|
||||
default: 60
|
||||
responses:
|
||||
"200":
|
||||
description: Recaps retrieval successful
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: "#/components/schemas/Recap"
|
||||
"400":
|
||||
$ref: "#/components/responses/BadRequest"
|
||||
"401":
|
||||
$ref: "#/components/responses/Unauthorized"
|
||||
"/api/v4/recaps/{recap_id}":
|
||||
get:
|
||||
tags:
|
||||
- recaps
|
||||
- ai
|
||||
summary: Get a specific recap
|
||||
description: >
|
||||
Get a recap by its ID, including all channel summaries. Only the authenticated
|
||||
user who created the recap can retrieve it.
|
||||
|
||||
##### Permissions
|
||||
|
||||
Must be authenticated. Can only retrieve recaps created by the current user.
|
||||
|
||||
__Minimum server version__: 11.2
|
||||
operationId: GetRecap
|
||||
parameters:
|
||||
- name: recap_id
|
||||
in: path
|
||||
description: Recap GUID
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
"200":
|
||||
description: Recap retrieval successful
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Recap"
|
||||
"401":
|
||||
$ref: "#/components/responses/Unauthorized"
|
||||
"403":
|
||||
$ref: "#/components/responses/Forbidden"
|
||||
"404":
|
||||
$ref: "#/components/responses/NotFound"
|
||||
delete:
|
||||
tags:
|
||||
- recaps
|
||||
- ai
|
||||
summary: Delete a recap
|
||||
description: >
|
||||
Delete a recap by its ID. Only the authenticated user who created the recap
|
||||
can delete it.
|
||||
|
||||
##### Permissions
|
||||
|
||||
Must be authenticated. Can only delete recaps created by the current user.
|
||||
|
||||
__Minimum server version__: 11.2
|
||||
operationId: DeleteRecap
|
||||
parameters:
|
||||
- name: recap_id
|
||||
in: path
|
||||
description: Recap GUID
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
"200":
|
||||
description: Recap deletion successful
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/StatusOK"
|
||||
"401":
|
||||
$ref: "#/components/responses/Unauthorized"
|
||||
"403":
|
||||
$ref: "#/components/responses/Forbidden"
|
||||
"404":
|
||||
$ref: "#/components/responses/NotFound"
|
||||
"/api/v4/recaps/{recap_id}/read":
|
||||
post:
|
||||
tags:
|
||||
- recaps
|
||||
- ai
|
||||
summary: Mark a recap as read
|
||||
description: >
|
||||
Mark a recap as read by the authenticated user. This updates the recap's
|
||||
read status and timestamp.
|
||||
|
||||
##### Permissions
|
||||
|
||||
Must be authenticated. Can only mark recaps created by the current user as read.
|
||||
|
||||
__Minimum server version__: 11.2
|
||||
operationId: MarkRecapAsRead
|
||||
parameters:
|
||||
- name: recap_id
|
||||
in: path
|
||||
description: Recap GUID
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
"200":
|
||||
description: Recap marked as read successfully
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Recap"
|
||||
"401":
|
||||
$ref: "#/components/responses/Unauthorized"
|
||||
"403":
|
||||
$ref: "#/components/responses/Forbidden"
|
||||
"404":
|
||||
$ref: "#/components/responses/NotFound"
|
||||
"/api/v4/recaps/{recap_id}/regenerate":
|
||||
post:
|
||||
tags:
|
||||
- recaps
|
||||
- ai
|
||||
summary: Regenerate a recap
|
||||
description: >
|
||||
Regenerate a recap by its ID. This creates a new background job to
|
||||
regenerate the AI-powered recap with the latest messages from the
|
||||
specified channels.
|
||||
|
||||
##### Permissions
|
||||
|
||||
Must be authenticated. Can only regenerate recaps created by the current user.
|
||||
|
||||
__Minimum server version__: 11.2
|
||||
operationId: RegenerateRecap
|
||||
parameters:
|
||||
- name: recap_id
|
||||
in: path
|
||||
description: Recap GUID
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
"200":
|
||||
description: Recap regeneration initiated successfully
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Recap"
|
||||
"401":
|
||||
$ref: "#/components/responses/Unauthorized"
|
||||
"403":
|
||||
$ref: "#/components/responses/Forbidden"
|
||||
"404":
|
||||
$ref: "#/components/responses/NotFound"
|
||||
|
||||
|
|
@ -1366,18 +1366,6 @@
|
|||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: graceful
|
||||
in: query
|
||||
description: If true, returns an array with both successful invites and errors instead of aborting on first error.
|
||||
required: false
|
||||
schema:
|
||||
type: boolean
|
||||
- name: guest_magic_link
|
||||
in: query
|
||||
description: If true, invites guests with magic link (passwordless) authentication. Requires guest magic link feature to be enabled.
|
||||
required: false
|
||||
schema:
|
||||
type: boolean
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
|
|
|
|||
|
|
@ -27,9 +27,6 @@
|
|||
password:
|
||||
description: The password used for email authentication.
|
||||
type: string
|
||||
magic_link_token:
|
||||
description: Magic link token for passwordless guest authentication. When provided, authenticates the user using the magic link token instead of password. Requires guest magic link feature to be enabled.
|
||||
type: string
|
||||
description: User authentication object
|
||||
required: true
|
||||
responses:
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ mme2e_wait_image () {
|
|||
IMAGE_NAME=${1?}
|
||||
RETRIES_LEFT=${2:-1}
|
||||
RETRIES_INTERVAL=${3:-10}
|
||||
mme2e_wait_command_success "docker pull --platform linux/amd64 $IMAGE_NAME" "Waiting for docker image ${IMAGE_NAME} to be available" "$RETRIES_LEFT" "$RETRIES_INTERVAL"
|
||||
mme2e_wait_command_success "docker pull $IMAGE_NAME" "Waiting for docker image ${IMAGE_NAME} to be available" "$RETRIES_LEFT" "$RETRIES_INTERVAL"
|
||||
}
|
||||
mme2e_is_token_in_list() {
|
||||
local TOKEN=$1
|
||||
|
|
@ -98,7 +98,7 @@ case "${TEST:-$TEST_DEFAULT}" in
|
|||
cypress )
|
||||
export TEST_FILTER_DEFAULT='--stage=@prod --group=@smoke' ;;
|
||||
playwright )
|
||||
export TEST_FILTER_DEFAULT='--grep @smoke' ;;
|
||||
export TEST_FILTER_DEFAULT='functional/system_console/system_users/actions.spec.ts' ;;
|
||||
* )
|
||||
export TEST_FILTER_DEFAULT='' ;;
|
||||
esac
|
||||
|
|
|
|||
|
|
@ -14,16 +14,13 @@ cd "$(dirname "$0")"
|
|||
: ${WEBHOOK_URL:-} # Optional. Mattermost webhook to post the report back to
|
||||
: ${RELEASE_DATE:-} # Optional. If set, its value will be included in the report as the release date of the tested artifact
|
||||
if [ "$TYPE" = "PR" ]; then
|
||||
# Try to determine PR number: first from PR_NUMBER, then from BRANCH (server-pr-XXXX format)
|
||||
if [ -n "${PR_NUMBER:-}" ]; then
|
||||
export PULL_REQUEST="https://github.com/mattermost/mattermost/pull/${PR_NUMBER}"
|
||||
elif grep -qE '^server-pr-[0-9]+$' <<<"${BRANCH:-}"; then
|
||||
PR_NUMBER="${BRANCH##*-}"
|
||||
export PULL_REQUEST="https://github.com/mattermost/mattermost/pull/${PR_NUMBER}"
|
||||
else
|
||||
mme2e_log "Warning: TYPE=PR but cannot determine PR number from PR_NUMBER or BRANCH. Falling back to TYPE=NONE."
|
||||
TYPE=NONE
|
||||
# In this case, we expect the PR number to be present in the BRANCH variable
|
||||
BRANCH_REGEX='^server-pr-[0-9]+$'
|
||||
if ! grep -qE "${BRANCH_REGEX}" <<<"$BRANCH"; then
|
||||
mme2e_log "Error: when using TYPE=PR, the BRANCH variable should respect regex '$BRANCH_REGEX'. Aborting." >&2
|
||||
exit 1
|
||||
fi
|
||||
export PULL_REQUEST="https://github.com/mattermost/mattermost/pull/${BRANCH##*-}"
|
||||
fi
|
||||
|
||||
# Env vars used during the test. Their values will be included in the report
|
||||
|
|
|
|||
|
|
@ -48,7 +48,6 @@ generate_docker_compose_file() {
|
|||
services:
|
||||
server:
|
||||
image: \${SERVER_IMAGE}
|
||||
platform: linux/amd64
|
||||
restart: always
|
||||
env_file:
|
||||
- "./.env.server"
|
||||
|
|
@ -261,7 +260,7 @@ $(if mme2e_is_token_in_list "webhook-interactions" "$ENABLED_DOCKER_SERVICES"; t
|
|||
# shellcheck disable=SC2016
|
||||
echo '
|
||||
webhook-interactions:
|
||||
image: node:${NODE_VERSION_REQUIRED}
|
||||
image: mattermostdevelopment/mirrored-node:${NODE_VERSION_REQUIRED}
|
||||
command: sh -c "npm install --global --legacy-peer-deps && exec node webhook_serve.js"
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-s", "-o/dev/null", "127.0.0.1:3000"]
|
||||
|
|
@ -276,21 +275,11 @@ $(if mme2e_is_token_in_list "webhook-interactions" "$ENABLED_DOCKER_SERVICES"; t
|
|||
fi)
|
||||
|
||||
$(if mme2e_is_token_in_list "playwright" "$ENABLED_DOCKER_SERVICES"; then
|
||||
# shellcheck disable=SC2016
|
||||
echo '
|
||||
playwright:
|
||||
image: mcr.microsoft.com/playwright:v1.58.0-noble
|
||||
image: mcr.microsoft.com/playwright:v1.56.0-noble
|
||||
entrypoint: ["/bin/bash", "-c"]
|
||||
command:
|
||||
- |
|
||||
# Install Node.js based on .nvmrc
|
||||
NODE_VERSION=$$(cat /mattermost/.nvmrc)
|
||||
echo "Installing Node.js $${NODE_VERSION}..."
|
||||
curl -fsSL https://deb.nodesource.com/setup_$${NODE_VERSION%%.*}.x | bash -
|
||||
apt-get install -y nodejs
|
||||
echo "Node.js version: $$(node --version)"
|
||||
# Wait for termination signal
|
||||
until [ -f /var/run/mm_terminate ]; do sleep 5; done
|
||||
command: ["until [ -f /var/run/mm_terminate ]; do sleep 5; done"]
|
||||
env_file:
|
||||
- "./.env.playwright"
|
||||
environment:
|
||||
|
|
|
|||
|
|
@ -1,101 +0,0 @@
|
|||
#!/bin/bash
|
||||
# shellcheck disable=SC2038
|
||||
# Run specific spec files
|
||||
# Usage: SPEC_FILES="path/to/spec1.ts,path/to/spec2.ts" make start-server run-specs
|
||||
|
||||
set -e -u -o pipefail
|
||||
cd "$(dirname "$0")"
|
||||
. .e2erc
|
||||
|
||||
if [ -z "${SPEC_FILES:-}" ]; then
|
||||
mme2e_log "Error: SPEC_FILES environment variable is required"
|
||||
mme2e_log "Usage: SPEC_FILES=\"path/to/spec.ts\" make start-server run-specs"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mme2e_log "Running spec files: $SPEC_FILES"
|
||||
|
||||
case $TEST in
|
||||
cypress)
|
||||
mme2e_log "Running Cypress with specified specs"
|
||||
# Initialize cypress report directory
|
||||
${MME2E_DC_SERVER} exec -T -u "$MME2E_UID" -- cypress bash <<EOF
|
||||
rm -rf logs results
|
||||
mkdir -p logs
|
||||
mkdir -p results/junit
|
||||
mkdir -p results/mochawesome-report/json/tests
|
||||
touch results/junit/empty.xml
|
||||
echo '<?xml version="1.0" encoding="UTF-8"?>' > results/junit/empty.xml
|
||||
EOF
|
||||
|
||||
# Run cypress with specific spec files and mochawesome reporter
|
||||
LOGFILE_SUFFIX="${CI_BASE_URL//\//_}_specs"
|
||||
${MME2E_DC_SERVER} exec -T -u "$MME2E_UID" -- cypress npx cypress run \
|
||||
--spec "$SPEC_FILES" \
|
||||
--reporter cypress-multi-reporters \
|
||||
--reporter-options configFile=reporter-config.json \
|
||||
| tee "../cypress/logs/${LOGFILE_SUFFIX}_cypress.log" || true
|
||||
|
||||
# Collect run results
|
||||
if [ -d ../cypress/results/mochawesome-report/json/tests/ ]; then
|
||||
cat >../cypress/results/summary.json <<EOF
|
||||
{
|
||||
"passed": $(find ../cypress/results/mochawesome-report/json/tests/ -name '*.json' | xargs -l jq -r '.stats.passes' | jq -s add),
|
||||
"failed": $(find ../cypress/results/mochawesome-report/json/tests/ -name '*.json' | xargs -l jq -r '.stats.failures' | jq -s add),
|
||||
"failed_expected": 0
|
||||
}
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Collect server logs
|
||||
${MME2E_DC_SERVER} logs --no-log-prefix -- server >"../cypress/logs/${LOGFILE_SUFFIX}_mattermost.log" 2>&1
|
||||
;;
|
||||
playwright)
|
||||
mme2e_log "Running Playwright with specified specs"
|
||||
# Convert comma-separated to space-separated for playwright
|
||||
SPEC_ARGS=$(echo "$SPEC_FILES" | tr ',' ' ')
|
||||
|
||||
# Initialize playwright report and logs directory
|
||||
${MME2E_DC_SERVER} exec -T -u "$MME2E_UID" -- playwright bash <<EOF
|
||||
cd e2e-tests/playwright
|
||||
rm -rf logs results storage_state
|
||||
mkdir -p logs results
|
||||
touch logs/mattermost.log
|
||||
EOF
|
||||
|
||||
# Install dependencies
|
||||
mme2e_log "Prepare Playwright: install dependencies"
|
||||
${MME2E_DC_SERVER} exec -T -u "$MME2E_UID" -- playwright bash <<EOF
|
||||
cd webapp/
|
||||
npm install --cache /tmp/empty-cache
|
||||
cd ../e2e-tests/playwright
|
||||
npm install --cache /tmp/empty-cache
|
||||
EOF
|
||||
|
||||
# Run playwright with specific spec files
|
||||
LOGFILE_SUFFIX="${CI_BASE_URL//\//_}_specs"
|
||||
${MME2E_DC_SERVER} exec -T -u "$MME2E_UID" -- playwright bash -c "cd e2e-tests/playwright && npm run test:ci -- $SPEC_ARGS" | tee "../playwright/logs/${LOGFILE_SUFFIX}_playwright.log" || true
|
||||
|
||||
# Collect run results (if results.json exists)
|
||||
if [ -f ../playwright/results/reporter/results.json ]; then
|
||||
jq -f /dev/stdin ../playwright/results/reporter/results.json >../playwright/results/summary.json <<EOF
|
||||
{
|
||||
passed: .stats.expected,
|
||||
failed: .stats.unexpected,
|
||||
failed_expected: (.stats.skipped + .stats.flaky)
|
||||
}
|
||||
EOF
|
||||
mme2e_log "Results file found and summary generated"
|
||||
fi
|
||||
|
||||
# Collect server logs
|
||||
${MME2E_DC_SERVER} logs --no-log-prefix -- server >"../playwright/logs/${LOGFILE_SUFFIX}_mattermost.log" 2>&1
|
||||
;;
|
||||
*)
|
||||
mme2e_log "Error, unsupported value for TEST: $TEST" >&2
|
||||
mme2e_log "Aborting" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
mme2e_log "Spec run complete"
|
||||
|
|
@ -9,7 +9,7 @@ clean:
|
|||
rm -fv .ci/server.yml
|
||||
rm -fv .ci/.env.{server,dashboard,cypress,playwright}
|
||||
|
||||
.PHONY: generate-server start-server run-test run-specs stop-server restart-server
|
||||
.PHONY: generate-server start-server run-test stop-server restart-server
|
||||
generate-server:
|
||||
bash ./.ci/server.generate.sh
|
||||
start-server: generate-server
|
||||
|
|
@ -17,8 +17,6 @@ start-server: generate-server
|
|||
bash ./.ci/server.prepare.sh
|
||||
run-test:
|
||||
bash ./.ci/server.run_test.sh
|
||||
run-specs:
|
||||
bash ./.ci/server.run_specs.sh
|
||||
stop-server: generate-server
|
||||
bash ./.ci/server.stop.sh
|
||||
restart-server: stop-server start-server
|
||||
|
|
|
|||
223
e2e-tests/cypress/package-lock.json
generated
223
e2e-tests/cypress/package-lock.json
generated
|
|
@ -74,6 +74,7 @@
|
|||
"mochawesome-merge": "4.4.1",
|
||||
"mochawesome-report-generator": "6.2.0",
|
||||
"moment-timezone": "0.6.0",
|
||||
"mysql": "2.18.1",
|
||||
"path": "0.12.7",
|
||||
"pdf-parse": "1.1.1",
|
||||
"pg": "8.16.3",
|
||||
|
|
@ -92,6 +93,7 @@
|
|||
"integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@jridgewell/gen-mapping": "^0.3.5",
|
||||
"@jridgewell/trace-mapping": "^0.3.24"
|
||||
|
|
@ -1069,6 +1071,7 @@
|
|||
"integrity": "sha512-60X7qkglvrap8mn1lh2ebxXdZYtUcpd7gsmy9kLaBJ4i/WdY8PqTSdxyA8qraikqKQK5C1KRBKXqznrVapyNaw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
}
|
||||
|
|
@ -1111,7 +1114,6 @@
|
|||
"integrity": "sha512-N4ntErOlKvcbTt01rr5wj3y55xnIdx1ymrfIr8C2WnM1Y9glFgWaGDEULJIazOX3XM9NRzhfJ6zZnQ1sBNWU+w==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@nicolo-ribaudo/eslint-scope-5-internals": "5.1.1-v1",
|
||||
"eslint-visitor-keys": "^2.1.0",
|
||||
|
|
@ -1148,6 +1150,7 @@
|
|||
"integrity": "sha512-lJjzvrbEeWrhB4P3QBsH7tey117PjLZnDbLiQEKjQ/fNJTjuq4HSqgFA+UNSwZT8D7dxxbnuSBMsa1lrWzKlQg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@babel/parser": "^7.28.0",
|
||||
"@babel/types": "^7.28.0",
|
||||
|
|
@ -1165,6 +1168,7 @@
|
|||
"integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@babel/compat-data": "^7.27.2",
|
||||
"@babel/helper-validator-option": "^7.27.1",
|
||||
|
|
@ -1182,6 +1186,7 @@
|
|||
"integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
}
|
||||
|
|
@ -1192,6 +1197,7 @@
|
|||
"integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@babel/traverse": "^7.27.1",
|
||||
"@babel/types": "^7.27.1"
|
||||
|
|
@ -1206,6 +1212,7 @@
|
|||
"integrity": "sha512-dSOvYwvyLsWBeIRyOeHXp5vPj5l1I011r52FM1+r1jCERv+aFXYk4whgQccYEGYxK2H3ZAIA8nuPkQ0HaUo3qg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@babel/helper-module-imports": "^7.27.1",
|
||||
"@babel/helper-validator-identifier": "^7.27.1",
|
||||
|
|
@ -1224,6 +1231,7 @@
|
|||
"integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
}
|
||||
|
|
@ -1244,6 +1252,7 @@
|
|||
"integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
}
|
||||
|
|
@ -1254,6 +1263,7 @@
|
|||
"integrity": "sha512-/V9771t+EgXz62aCcyofnQhGM8DQACbRhvzKFsXKC9QM+5MadF8ZmIm0crDMaz3+o0h0zXfJnd4EhbYbxsrcFw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@babel/template": "^7.27.2",
|
||||
"@babel/types": "^7.28.2"
|
||||
|
|
@ -1268,6 +1278,7 @@
|
|||
"integrity": "sha512-jVZGvOxOuNSsuQuLRTh13nU0AogFlw32w/MT+LV6D3sP5WdbW61E77RnkbaO2dUvmPAYrBDJXGn5gGS6tH4j8g==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@babel/types": "^7.28.0"
|
||||
},
|
||||
|
|
@ -1294,6 +1305,7 @@
|
|||
"integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@babel/code-frame": "^7.27.1",
|
||||
"@babel/parser": "^7.27.2",
|
||||
|
|
@ -1309,6 +1321,7 @@
|
|||
"integrity": "sha512-mGe7UK5wWyh0bKRfupsUchrQGqvDbZDbKJw+kcRGSmdHVYrv+ltd0pnpDTVpiTqnaBru9iEvA8pz8W46v0Amwg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@babel/code-frame": "^7.27.1",
|
||||
"@babel/generator": "^7.28.0",
|
||||
|
|
@ -1328,6 +1341,7 @@
|
|||
"integrity": "sha512-ruv7Ae4J5dUYULmeXw1gmb7rYRz57OWCPM57pHojnLq/3Z1CK2lNSLTCVjxVk1F/TZHwOZZrOWi0ur95BbLxNQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@babel/helper-string-parser": "^7.27.1",
|
||||
"@babel/helper-validator-identifier": "^7.27.1"
|
||||
|
|
@ -1799,6 +1813,7 @@
|
|||
"integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@jridgewell/sourcemap-codec": "^1.5.0",
|
||||
"@jridgewell/trace-mapping": "^0.3.24"
|
||||
|
|
@ -1810,6 +1825,7 @@
|
|||
"integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=6.0.0"
|
||||
}
|
||||
|
|
@ -1820,6 +1836,7 @@
|
|||
"integrity": "sha512-ZMp1V8ZFcPG5dIWnQLr3NSI1MiCU7UETdS/A0G8V/XWHvJv3ZsFqutJn1Y5RPmAPX6F3BiE397OqveU/9NCuIA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@jridgewell/gen-mapping": "^0.3.5",
|
||||
"@jridgewell/trace-mapping": "^0.3.25"
|
||||
|
|
@ -1830,7 +1847,8 @@
|
|||
"resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz",
|
||||
"integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/@jridgewell/trace-mapping": {
|
||||
"version": "0.3.30",
|
||||
|
|
@ -1838,6 +1856,7 @@
|
|||
"integrity": "sha512-GQ7Nw5G2lTu/BtHTKfXhKHok2WGetd4XYcVKGx00SjAk8GMwgJM3zr6zORiPGuOE+/vkc90KtTosSSvaCjKb2Q==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@jridgewell/resolve-uri": "^3.1.0",
|
||||
"@jridgewell/sourcemap-codec": "^1.4.14"
|
||||
|
|
@ -1865,7 +1884,6 @@
|
|||
"integrity": "sha512-2795KUkp2EkuJ9NVohPkJmrgKunt6OZiLyo8zUoIWPJjxQ0upjiWJz/KenABx38v8+QfpSEN8tZSBN3lsZCueg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"peerDependencies": {
|
||||
"typescript": "^4.3.0 || ^5.0.0"
|
||||
},
|
||||
|
|
@ -2848,6 +2866,7 @@
|
|||
"integrity": "sha512-FXx2pKgId/WyYo2jXw63kk7/+TY7u7AziEJxJAnSFzHlqTAS3Ync6SvgYAN/k4/PQpnnVuzoMuVnByKK2qp0ag==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@types/estree": "*",
|
||||
"@types/json-schema": "*"
|
||||
|
|
@ -2859,6 +2878,7 @@
|
|||
"integrity": "sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@types/eslint": "*",
|
||||
"@types/estree": "*"
|
||||
|
|
@ -3159,7 +3179,6 @@
|
|||
"integrity": "sha512-pUXGCuHnnKw6PyYq93lLRiZm3vjuslIy7tus1lIQTYVK9bL8XBgJnCWm8a0KcTtHC84Yya1Q6rtll+duSMj0dg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@typescript-eslint/scope-manager": "8.39.1",
|
||||
"@typescript-eslint/types": "8.39.1",
|
||||
|
|
@ -3378,6 +3397,7 @@
|
|||
"integrity": "sha512-nuBEDgQfm1ccRp/8bCQrx1frohyufl4JlbMMZ4P1wpeOfDhF6FQkxZJ1b/e+PLwr6X1Nhw6OLme5usuBWYBvuQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@webassemblyjs/helper-numbers": "1.13.2",
|
||||
"@webassemblyjs/helper-wasm-bytecode": "1.13.2"
|
||||
|
|
@ -3388,21 +3408,24 @@
|
|||
"resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.13.2.tgz",
|
||||
"integrity": "sha512-6oXyTOzbKxGH4steLbLNOu71Oj+C8Lg34n6CqRvqfS2O71BxY6ByfMDRhBytzknj9yGUPVJ1qIKhRlAwO1AovA==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/@webassemblyjs/helper-api-error": {
|
||||
"version": "1.13.2",
|
||||
"resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.13.2.tgz",
|
||||
"integrity": "sha512-U56GMYxy4ZQCbDZd6JuvvNV/WFildOjsaWD3Tzzvmw/mas3cXzRJPMjP83JqEsgSbyrmaGjBfDtV7KDXV9UzFQ==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/@webassemblyjs/helper-buffer": {
|
||||
"version": "1.14.1",
|
||||
"resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.14.1.tgz",
|
||||
"integrity": "sha512-jyH7wtcHiKssDtFPRB+iQdxlDf96m0E39yb0k5uJVhFGleZFoNw1c4aeIcVUPPbXUVJ94wwnMOAqUHyzoEPVMA==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/@webassemblyjs/helper-numbers": {
|
||||
"version": "1.13.2",
|
||||
|
|
@ -3410,6 +3433,7 @@
|
|||
"integrity": "sha512-FE8aCmS5Q6eQYcV3gI35O4J789wlQA+7JrqTTpJqn5emA4U2hvwJmvFRC0HODS+3Ye6WioDklgd6scJ3+PLnEA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@webassemblyjs/floating-point-hex-parser": "1.13.2",
|
||||
"@webassemblyjs/helper-api-error": "1.13.2",
|
||||
|
|
@ -3421,7 +3445,8 @@
|
|||
"resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.13.2.tgz",
|
||||
"integrity": "sha512-3QbLKy93F0EAIXLh0ogEVR6rOubA9AoZ+WRYhNbFyuB70j3dRdwH9g+qXhLAO0kiYGlg3TxDV+I4rQTr/YNXkA==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/@webassemblyjs/helper-wasm-section": {
|
||||
"version": "1.14.1",
|
||||
|
|
@ -3429,6 +3454,7 @@
|
|||
"integrity": "sha512-ds5mXEqTJ6oxRoqjhWDU83OgzAYjwsCV8Lo/N+oRsNDmx/ZDpqalmrtgOMkHwxsG0iI//3BwWAErYRHtgn0dZw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@webassemblyjs/ast": "1.14.1",
|
||||
"@webassemblyjs/helper-buffer": "1.14.1",
|
||||
|
|
@ -3442,6 +3468,7 @@
|
|||
"integrity": "sha512-4LtOzh58S/5lX4ITKxnAK2USuNEvpdVV9AlgGQb8rJDHaLeHciwG4zlGr0j/SNWlr7x3vO1lDEsuePvtcDNCkw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@xtuc/ieee754": "^1.2.0"
|
||||
}
|
||||
|
|
@ -3452,6 +3479,7 @@
|
|||
"integrity": "sha512-Lde1oNoIdzVzdkNEAWZ1dZ5orIbff80YPdHx20mrHwHrVNNTjNr8E3xz9BdpcGqRQbAEa+fkrCb+fRFTl/6sQw==",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@xtuc/long": "4.2.2"
|
||||
}
|
||||
|
|
@ -3461,7 +3489,8 @@
|
|||
"resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.13.2.tgz",
|
||||
"integrity": "sha512-3NQWGjKTASY1xV5m7Hr0iPeXD9+RDobLll3T9d2AO+g3my8xy5peVyjSag4I50mR1bBSN/Ct12lo+R9tJk0NZQ==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/@webassemblyjs/wasm-edit": {
|
||||
"version": "1.14.1",
|
||||
|
|
@ -3469,6 +3498,7 @@
|
|||
"integrity": "sha512-RNJUIQH/J8iA/1NzlE4N7KtyZNHi3w7at7hDjvRNm5rcUXa00z1vRz3glZoULfJ5mpvYhLybmVcwcjGrC1pRrQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@webassemblyjs/ast": "1.14.1",
|
||||
"@webassemblyjs/helper-buffer": "1.14.1",
|
||||
|
|
@ -3486,6 +3516,7 @@
|
|||
"integrity": "sha512-AmomSIjP8ZbfGQhumkNvgC33AY7qtMCXnN6bL2u2Js4gVCg8fp735aEiMSBbDR7UQIj90n4wKAFUSEd0QN2Ukg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@webassemblyjs/ast": "1.14.1",
|
||||
"@webassemblyjs/helper-wasm-bytecode": "1.13.2",
|
||||
|
|
@ -3500,6 +3531,7 @@
|
|||
"integrity": "sha512-PTcKLUNvBqnY2U6E5bdOQcSM+oVP/PmrDY9NzowJjislEjwP/C4an2303MCVS2Mg9d3AJpIGdUFIQQWbPds0Sw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@webassemblyjs/ast": "1.14.1",
|
||||
"@webassemblyjs/helper-buffer": "1.14.1",
|
||||
|
|
@ -3513,6 +3545,7 @@
|
|||
"integrity": "sha512-JLBl+KZ0R5qB7mCnud/yyX08jWFw5MsoalJ1pQ4EdFlgj9VdXKGuENGsiCIjegI1W7p91rUlcB/LB5yRJKNTcQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@webassemblyjs/ast": "1.14.1",
|
||||
"@webassemblyjs/helper-api-error": "1.13.2",
|
||||
|
|
@ -3528,6 +3561,7 @@
|
|||
"integrity": "sha512-kPSSXE6De1XOR820C90RIo2ogvZG+c3KiHzqUoO/F34Y2shGzesfqv7o57xrxovZJH/MetF5UjroJ/R/3isoiw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@webassemblyjs/ast": "1.14.1",
|
||||
"@xtuc/long": "4.2.2"
|
||||
|
|
@ -3538,14 +3572,16 @@
|
|||
"resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz",
|
||||
"integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==",
|
||||
"dev": true,
|
||||
"license": "BSD-3-Clause"
|
||||
"license": "BSD-3-Clause",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/@xtuc/long": {
|
||||
"version": "4.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz",
|
||||
"integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0"
|
||||
"license": "Apache-2.0",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/accepts": {
|
||||
"version": "2.0.0",
|
||||
|
|
@ -3567,7 +3603,6 @@
|
|||
"integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"bin": {
|
||||
"acorn": "bin/acorn"
|
||||
},
|
||||
|
|
@ -3581,6 +3616,7 @@
|
|||
"integrity": "sha512-wKmbr/DDiIXzEOiWrTTUcDm24kQ2vGfZQvM2fwg2vXqR5uW6aapr7ObPtj1th32b9u90/Pf4AItvdTh42fBmVQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=10.13.0"
|
||||
},
|
||||
|
|
@ -3635,6 +3671,7 @@
|
|||
"integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"ajv": "^8.0.0"
|
||||
},
|
||||
|
|
@ -3653,6 +3690,7 @@
|
|||
"integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"fast-deep-equal": "^3.1.3",
|
||||
"fast-uri": "^3.0.1",
|
||||
|
|
@ -3669,7 +3707,8 @@
|
|||
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
|
||||
"integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/ally.js": {
|
||||
"version": "1.4.1",
|
||||
|
|
@ -4159,6 +4198,16 @@
|
|||
"tweetnacl": "^0.14.3"
|
||||
}
|
||||
},
|
||||
"node_modules/bignumber.js": {
|
||||
"version": "9.0.0",
|
||||
"resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.0.0.tgz",
|
||||
"integrity": "sha512-t/OYhhJ2SD+YGBQcjY8GzzDHEk9f3nerxjtfa6tlMXfe7frs/WozhvCNoGvpM0P3bNf3Gq5ZRMlGr5f3r4/N8A==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/blob-util": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/blob-util/-/blob-util-2.0.2.tgz",
|
||||
|
|
@ -4291,7 +4340,8 @@
|
|||
"resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz",
|
||||
"integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/byte-length": {
|
||||
"version": "1.0.2",
|
||||
|
|
@ -4412,7 +4462,8 @@
|
|||
"url": "https://github.com/sponsors/ai"
|
||||
}
|
||||
],
|
||||
"license": "CC-BY-4.0"
|
||||
"license": "CC-BY-4.0",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/caseless": {
|
||||
"version": "0.12.0",
|
||||
|
|
@ -4507,6 +4558,7 @@
|
|||
"integrity": "sha512-rNjApaLzuwaOTjCiT8lSDdGN1APCiqkChLMJxJPWLunPAt5fy8xgU9/jNOchV84wfIxrA0lRQB7oCT8jrn/wrQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=6.0"
|
||||
}
|
||||
|
|
@ -4796,7 +4848,8 @@
|
|||
"resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz",
|
||||
"integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/cookie": {
|
||||
"version": "0.7.2",
|
||||
|
|
@ -4818,6 +4871,13 @@
|
|||
"node": ">=6.6.0"
|
||||
}
|
||||
},
|
||||
"node_modules/core-util-is": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz",
|
||||
"integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/cross-env": {
|
||||
"version": "10.0.0",
|
||||
"resolved": "https://registry.npmjs.org/cross-env/-/cross-env-10.0.0.tgz",
|
||||
|
|
@ -4875,7 +4935,6 @@
|
|||
"dev": true,
|
||||
"hasInstallScript": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@cypress/request": "^3.0.9",
|
||||
"@cypress/xvfb": "^1.2.4",
|
||||
|
|
@ -5364,7 +5423,8 @@
|
|||
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.200.tgz",
|
||||
"integrity": "sha512-rFCxROw7aOe4uPTfIAx+rXv9cEcGx+buAF4npnhtTqCJk5KDFRnh3+KYj7rdVh6lsFt5/aPs+Irj9rZ33WMA7w==",
|
||||
"dev": true,
|
||||
"license": "ISC"
|
||||
"license": "ISC",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/emoji-regex": {
|
||||
"version": "9.2.2",
|
||||
|
|
@ -5413,7 +5473,6 @@
|
|||
"integrity": "sha512-rRqJg/6gd538VHvR3PSrdRBb/1Vy2YfzHqzvbhGIQpDRKIa4FgV/54b5Q1xYSxOOwKvjXweS26E0Q+nAMwp2pQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"ansi-colors": "^4.1.1",
|
||||
"strip-ansi": "^6.0.1"
|
||||
|
|
@ -5544,7 +5603,8 @@
|
|||
"resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz",
|
||||
"integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/es-object-atoms": {
|
||||
"version": "1.1.1",
|
||||
|
|
@ -5642,7 +5702,6 @@
|
|||
"integrity": "sha512-TS9bTNIryDzStCpJN93aC5VRSW3uTx9sClUn4B87pwiCaJh220otoI0X8mJKr+VcPtniMdN8GKjlwgWGUv5ZKA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@eslint-community/eslint-utils": "^4.2.0",
|
||||
"@eslint-community/regexpp": "^4.12.1",
|
||||
|
|
@ -5844,7 +5903,6 @@
|
|||
"integrity": "sha512-whOE1HFo/qJDyX4SnXzP4N6zOWn79WhnCUY/iDR0mPfQZO8wcYE4JClzI2oZrhBnnMUCBCHZhO6VQyoBU95mZA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@rtsao/scc": "^1.1.0",
|
||||
"array-includes": "^3.1.9",
|
||||
|
|
@ -6477,7 +6535,8 @@
|
|||
"url": "https://opencollective.com/fastify"
|
||||
}
|
||||
],
|
||||
"license": "BSD-3-Clause"
|
||||
"license": "BSD-3-Clause",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/fast-xml-parser": {
|
||||
"version": "5.2.5",
|
||||
|
|
@ -6857,6 +6916,7 @@
|
|||
"integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
}
|
||||
|
|
@ -7023,7 +7083,8 @@
|
|||
"resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz",
|
||||
"integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==",
|
||||
"dev": true,
|
||||
"license": "BSD-2-Clause"
|
||||
"license": "BSD-2-Clause",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/glob/node_modules/minimatch": {
|
||||
"version": "10.0.3",
|
||||
|
|
@ -7924,6 +7985,13 @@
|
|||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/isarray": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
|
||||
"integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/isexe": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
|
||||
|
|
@ -7978,6 +8046,7 @@
|
|||
"integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@types/node": "*",
|
||||
"merge-stream": "^2.0.0",
|
||||
|
|
@ -7993,6 +8062,7 @@
|
|||
"integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"has-flag": "^4.0.0"
|
||||
},
|
||||
|
|
@ -8009,7 +8079,6 @@
|
|||
"integrity": "sha512-twQoecYPiVA5K/h6SxtORw/Bs3ar+mLUtoPSc7iMXzQzK8d7eJ/R09wmTwAjiamETn1cXYPGfNnu7DMoHgu12w==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"bin": {
|
||||
"jiti": "lib/jiti-cli.mjs"
|
||||
}
|
||||
|
|
@ -8047,6 +8116,7 @@
|
|||
"integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"bin": {
|
||||
"jsesc": "bin/jsesc"
|
||||
},
|
||||
|
|
@ -8066,7 +8136,8 @@
|
|||
"resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz",
|
||||
"integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/json-schema": {
|
||||
"version": "0.4.0",
|
||||
|
|
@ -8102,6 +8173,7 @@
|
|||
"integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"bin": {
|
||||
"json5": "lib/cli.js"
|
||||
},
|
||||
|
|
@ -8377,6 +8449,7 @@
|
|||
"integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=6.11.5"
|
||||
}
|
||||
|
|
@ -8579,6 +8652,7 @@
|
|||
"integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
|
||||
"dev": true,
|
||||
"license": "ISC",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"yallist": "^3.0.2"
|
||||
}
|
||||
|
|
@ -8800,7 +8874,6 @@
|
|||
"integrity": "sha512-5EK+Cty6KheMS/YLPPMJC64g5V61gIR25KsRItHw6x4hEKT6Njp1n9LOlH4gpevuwMVS66SXaBBpg+RWZkza4A==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"browser-stdout": "^1.3.1",
|
||||
"chokidar": "^4.0.1",
|
||||
|
|
@ -9299,6 +9372,29 @@
|
|||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/mysql": {
|
||||
"version": "2.18.1",
|
||||
"resolved": "https://registry.npmjs.org/mysql/-/mysql-2.18.1.tgz",
|
||||
"integrity": "sha512-Bca+gk2YWmqp2Uf6k5NFEurwY/0td0cpebAucFpY/3jhrwrVGuxU2uQFCHjU19SJfje0yQvi+rVWdq78hR5lig==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"bignumber.js": "9.0.0",
|
||||
"readable-stream": "2.3.7",
|
||||
"safe-buffer": "5.1.2",
|
||||
"sqlstring": "2.3.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/mysql/node_modules/safe-buffer": {
|
||||
"version": "5.1.2",
|
||||
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
|
||||
"integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/natural-compare": {
|
||||
"version": "1.4.0",
|
||||
"resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz",
|
||||
|
|
@ -9321,7 +9417,8 @@
|
|||
"resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz",
|
||||
"integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/node-ensure": {
|
||||
"version": "0.0.0",
|
||||
|
|
@ -9335,7 +9432,8 @@
|
|||
"resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.19.tgz",
|
||||
"integrity": "sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/notp": {
|
||||
"version": "2.0.3",
|
||||
|
|
@ -10166,6 +10264,13 @@
|
|||
"node": ">= 0.6.0"
|
||||
}
|
||||
},
|
||||
"node_modules/process-nextick-args": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
|
||||
"integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/prop-types": {
|
||||
"version": "15.8.1",
|
||||
"resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz",
|
||||
|
|
@ -10336,6 +10441,29 @@
|
|||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/readable-stream": {
|
||||
"version": "2.3.7",
|
||||
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz",
|
||||
"integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"core-util-is": "~1.0.0",
|
||||
"inherits": "~2.0.3",
|
||||
"isarray": "~1.0.0",
|
||||
"process-nextick-args": "~2.0.0",
|
||||
"safe-buffer": "~5.1.1",
|
||||
"string_decoder": "~1.1.1",
|
||||
"util-deprecate": "~1.0.1"
|
||||
}
|
||||
},
|
||||
"node_modules/readable-stream/node_modules/safe-buffer": {
|
||||
"version": "5.1.2",
|
||||
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
|
||||
"integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/readdirp": {
|
||||
"version": "4.1.2",
|
||||
"resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz",
|
||||
|
|
@ -10491,6 +10619,7 @@
|
|||
"integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
}
|
||||
|
|
@ -10723,6 +10852,7 @@
|
|||
"integrity": "sha512-Gn/JaSk/Mt9gYubxTtSn/QCV4em9mpAPiR1rqy/Ocu19u/G9J5WWdNoUT4SiV6mFC3y6cxyFcFwdzPM3FgxGAQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@types/json-schema": "^7.0.9",
|
||||
"ajv": "^8.9.0",
|
||||
|
|
@ -10761,6 +10891,7 @@
|
|||
"integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"fast-deep-equal": "^3.1.3"
|
||||
},
|
||||
|
|
@ -10773,7 +10904,8 @@
|
|||
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
|
||||
"integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/semver": {
|
||||
"version": "6.3.1",
|
||||
|
|
@ -10840,7 +10972,6 @@
|
|||
"integrity": "sha512-b0IrY3b1gVMsWvJppCf19g1p3JSnS0hQi6xu4Hi40CIhf0Lx8pQHcvBL+xunShpmOiQzg1NOia812NAWdSaShw==",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@servie/events": "^1.0.0",
|
||||
"byte-length": "^1.0.2",
|
||||
|
|
@ -11098,6 +11229,7 @@
|
|||
"integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
|
||||
"dev": true,
|
||||
"license": "BSD-3-Clause",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
}
|
||||
|
|
@ -11108,6 +11240,7 @@
|
|||
"integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"buffer-from": "^1.0.0",
|
||||
"source-map": "^0.6.0"
|
||||
|
|
@ -11123,6 +11256,16 @@
|
|||
"node": ">= 10.x"
|
||||
}
|
||||
},
|
||||
"node_modules/sqlstring": {
|
||||
"version": "2.3.1",
|
||||
"resolved": "https://registry.npmjs.org/sqlstring/-/sqlstring-2.3.1.tgz",
|
||||
"integrity": "sha512-ooAzh/7dxIG5+uDik1z/Rd1vli0+38izZhGzSa34FwR7IbelPWCCKSNIl8jlL/F7ERvy8CB2jNeM1E9i9mXMAQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/sshpk": {
|
||||
"version": "1.18.0",
|
||||
"resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.18.0.tgz",
|
||||
|
|
@ -11516,6 +11659,7 @@
|
|||
"integrity": "sha512-+6erLbBm0+LROX2sPXlUYx/ux5PyE9K/a92Wrt6oA+WDAoFTdpHE5tCYCI5PNzq2y8df4rA+QgHLJuR4jNymsg==",
|
||||
"dev": true,
|
||||
"license": "BSD-2-Clause",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@jridgewell/source-map": "^0.3.3",
|
||||
"acorn": "^8.14.0",
|
||||
|
|
@ -11535,6 +11679,7 @@
|
|||
"integrity": "sha512-vkZjpUjb6OMS7dhV+tILUW6BhpDR7P2L/aQSAv+Uwk+m8KATX9EccViHTJR2qDtACKPIYndLGCyl3FMo+r2LMw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@jridgewell/trace-mapping": "^0.3.25",
|
||||
"jest-worker": "^27.4.5",
|
||||
|
|
@ -11569,7 +11714,8 @@
|
|||
"resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz",
|
||||
"integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/thirty-two": {
|
||||
"version": "0.0.2",
|
||||
|
|
@ -11892,7 +12038,6 @@
|
|||
"integrity": "sha512-CWBzXQrc/qOkhidw1OzBTQuYRbfyxDXJMVJ1XNwUHGROVmuaeiEm3OslpZ1RV96d7SKKjZKrSJu3+t/xlw3R9A==",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"peer": true,
|
||||
"bin": {
|
||||
"tsc": "bin/tsc",
|
||||
"tsserver": "bin/tsserver"
|
||||
|
|
@ -11961,7 +12106,6 @@
|
|||
"integrity": "sha512-gTtSdWX9xiMPA/7MV9STjJOOYtWwIJIYxkQxnSV1U3xcE+mnJSH3f6zI0RYP+ew66WSlZ5ed+h0VCxsvdC1jJg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@typescript-eslint/scope-manager": "8.41.0",
|
||||
"@typescript-eslint/types": "8.41.0",
|
||||
|
|
@ -12250,6 +12394,7 @@
|
|||
}
|
||||
],
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"escalade": "^3.2.0",
|
||||
"picocolors": "^1.1.1"
|
||||
|
|
@ -12368,6 +12513,7 @@
|
|||
"integrity": "sha512-c5EGNOiyxxV5qmTtAB7rbiXxi1ooX1pQKMLX/MIabJjRA0SJBQOjKF+KSVfHkr9U1cADPon0mRiVe/riyaiDUA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"glob-to-regexp": "^0.4.1",
|
||||
"graceful-fs": "^4.1.2"
|
||||
|
|
@ -12382,6 +12528,7 @@
|
|||
"integrity": "sha512-rHY3vHXRbkSfhG6fH8zYQdth/BtDgXXuR2pHF++1f/EBkI8zkgM5XWfsC3BvOoW9pr1CvZ1qQCxhCEsbNgT50g==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@types/eslint-scope": "^3.7.7",
|
||||
"@types/estree": "^1.0.8",
|
||||
|
|
@ -12431,6 +12578,7 @@
|
|||
"integrity": "sha512-yd1RBzSGanHkitROoPFd6qsrxt+oFhg/129YzheDGqeustzX0vTZJZsSsQjVQC4yzBQ56K55XU8gaNCtIzOnTg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=10.13.0"
|
||||
}
|
||||
|
|
@ -12441,6 +12589,7 @@
|
|||
"integrity": "sha512-d4lC8xfavMeBjzGr2vECC3fsGXziXZQyJxD868h2M/mBI3PwAuODxAkLkq5HYuvrPYcUtiLzsTo8U3PgX3Ocww==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"graceful-fs": "^4.2.4",
|
||||
"tapable": "^2.2.0"
|
||||
|
|
@ -12455,6 +12604,7 @@
|
|||
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
|
|
@ -12465,6 +12615,7 @@
|
|||
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"mime-db": "1.52.0"
|
||||
},
|
||||
|
|
@ -12478,6 +12629,7 @@
|
|||
"integrity": "sha512-Re10+NauLTMCudc7T5WLFLAwDhQ0JWdrMK+9B2M8zR5hRExKmsRDCBA7/aV/pNJFltmBFO5BAMlQFi/vq3nKOg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
}
|
||||
|
|
@ -12754,7 +12906,8 @@
|
|||
"resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
|
||||
"integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==",
|
||||
"dev": true,
|
||||
"license": "ISC"
|
||||
"license": "ISC",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/yargs": {
|
||||
"version": "17.7.2",
|
||||
|
|
|
|||
|
|
@ -69,6 +69,7 @@
|
|||
"mochawesome-merge": "4.4.1",
|
||||
"mochawesome-report-generator": "6.2.0",
|
||||
"moment-timezone": "0.6.0",
|
||||
"mysql": "2.18.1",
|
||||
"path": "0.12.7",
|
||||
"pdf-parse": "1.1.1",
|
||||
"pg": "8.16.3",
|
||||
|
|
@ -100,7 +101,6 @@
|
|||
"start:webhook": "node webhook_serve.js",
|
||||
"pretest": "npm run clean",
|
||||
"test": "cross-env TZ=Etc/UTC cypress run",
|
||||
"test:smoke": "node run_tests.js --stage='@prod' --group='@smoke'",
|
||||
"test:ci": "node run_tests.js",
|
||||
"uniq-meta": "grep -r \"^// $META:\" cypress | grep -ow '@\\w*' | sort | uniq",
|
||||
"check": "eslint .",
|
||||
|
|
|
|||
|
|
@ -1,15 +0,0 @@
|
|||
{
|
||||
"reporterEnabled": "mocha-junit-reporter, mochawesome",
|
||||
"mochaJunitReporterReporterOptions": {
|
||||
"mochaFile": "results/junit/test_results[hash].xml",
|
||||
"toConsole": false
|
||||
},
|
||||
"mochawesomeReporterOptions": {
|
||||
"reportDir": "results/mochawesome-report",
|
||||
"reportFilename": "json/tests/[name]",
|
||||
"quiet": true,
|
||||
"overwrite": false,
|
||||
"html": false,
|
||||
"json": true
|
||||
}
|
||||
}
|
||||
|
|
@ -7,6 +7,7 @@
|
|||
// - Use element ID when selecting an element. Create one if none.
|
||||
// ***************************************************************
|
||||
|
||||
// Stage: @prod
|
||||
// Group: @channels @accessibility
|
||||
|
||||
import {getRandomId} from '../../../utils';
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ describe('Verify Accessibility Support in Popovers', () => {
|
|||
{ariaLabel: 'People & Body', header: 'People & Body'},
|
||||
{ariaLabel: 'Animals & Nature', header: 'Animals & Nature'},
|
||||
{ariaLabel: 'Food & Drink', header: 'Food & Drink'},
|
||||
{ariaLabel: 'Travel & Places', header: 'Travel & Places'},
|
||||
{ariaLabel: 'Travel Places', header: 'Travel Places'},
|
||||
{ariaLabel: 'Activities', header: 'Activities'},
|
||||
{ariaLabel: 'Objects', header: 'Objects'},
|
||||
{ariaLabel: 'Symbols', header: 'Symbols'},
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
// - Use element ID when selecting an element. Create one if none.
|
||||
// ***************************************************************
|
||||
|
||||
// Stage: @prod
|
||||
// Group: @channels @account_setting
|
||||
|
||||
import * as TIMEOUTS from '../../../../fixtures/timeouts';
|
||||
|
|
@ -84,7 +85,7 @@ describe('Profile > Profile Settings > Email', () => {
|
|||
cy.get('#primaryEmail').should('be.visible').click().blur();
|
||||
|
||||
// * Check that the correct error message is shown.
|
||||
cy.get('#error_primaryEmail').should('be.visible').should('have.text', 'Please enter a valid email address.');
|
||||
cy.get('#error_primaryEmail').should('be.visible').should('have.text', 'Please enter a valid email address');
|
||||
});
|
||||
|
||||
it('MM-T2067 email address already taken error', () => {
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
// - Use element ID when selecting an element. Create one if none.
|
||||
// ***************************************************************
|
||||
|
||||
// Stage: @prod
|
||||
// Group: @channels @account_setting
|
||||
|
||||
describe('Account Settings', () => {
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
// - Use element ID when selecting an element. Create one if none.
|
||||
// ***************************************************************
|
||||
|
||||
// Stage: @prod
|
||||
// Group: @channels @channel
|
||||
|
||||
import * as TIMEOUTS from '../../../fixtures/timeouts';
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
// - Use element ID when selecting an element. Create one if none.
|
||||
// ***************************************************************
|
||||
|
||||
// Stage: @prod
|
||||
// Group: @channels @channel
|
||||
|
||||
import {getAdminAccount} from '../../../support/env';
|
||||
|
|
@ -102,7 +103,7 @@ describe('Archived channels', () => {
|
|||
function verifyViewingArchivedChannel(channel) {
|
||||
// * Verify that we've switched to the correct channel and that the header contains the archived icon
|
||||
cy.get('#channelHeaderTitle').should('contain', channel.display_name);
|
||||
cy.findByTestId('channel-header-archive-icon').should('be.visible');
|
||||
cy.get('#channelHeaderInfo .icon__archive').should('be.visible');
|
||||
|
||||
// * Verify that the channel is visible in the sidebar with the archived icon
|
||||
cy.get(`#sidebarItem_${channel.name}`).should('be.visible').
|
||||
|
|
|
|||
|
|
@ -61,9 +61,7 @@ describe('Authentication', () => {
|
|||
|
||||
cy.get('#input_name').clear().type(`test${getRandomId()}`);
|
||||
|
||||
cy.get('#signup-body-card-form-check-terms-and-privacy').check();
|
||||
|
||||
cy.findByText('Create account').click();
|
||||
cy.findByText('Create Account').click();
|
||||
|
||||
// * Make sure account was created successfully and we are at the select team page
|
||||
cy.findByText('Teams you can join:', {timeout: TIMEOUTS.ONE_MIN}).should('be.visible');
|
||||
|
|
@ -115,9 +113,7 @@ describe('Authentication', () => {
|
|||
|
||||
cy.get('#input_name').clear().type(`test${getRandomId()}`);
|
||||
|
||||
cy.get('#signup-body-card-form-check-terms-and-privacy').check();
|
||||
|
||||
cy.findByText('Create account').click();
|
||||
cy.findByText('Create Account').click();
|
||||
|
||||
// * Make sure account was not created successfully
|
||||
cy.get('.AlertBanner__title').scrollIntoView().should('be.visible');
|
||||
|
|
@ -150,9 +146,7 @@ describe('Authentication', () => {
|
|||
|
||||
cy.get('#input_name').clear().type(username);
|
||||
|
||||
cy.get('#signup-body-card-form-check-terms-and-privacy').check();
|
||||
|
||||
cy.findByText('Create account').click();
|
||||
cy.findByText('Create Account').click();
|
||||
|
||||
// * Make sure account was created successfully and we are on the team joining page
|
||||
cy.findByText('Teams you can join:', {timeout: TIMEOUTS.ONE_MIN}).should('be.visible');
|
||||
|
|
|
|||
|
|
@ -61,18 +61,14 @@ describe('Authentication', () => {
|
|||
|
||||
cy.get('#input_password-input').clear().type('less');
|
||||
|
||||
cy.get('#signup-body-card-form-check-terms-and-privacy').check();
|
||||
|
||||
cy.findByText('Create account').click();
|
||||
cy.findByText('Create Account').click();
|
||||
|
||||
// * Assert the error is what is expected;
|
||||
cy.findByText('Your password must be 7-72 characters long.').should('be.visible');
|
||||
|
||||
cy.get('#input_password-input').clear().type('greaterthan7');
|
||||
|
||||
cy.get('#signup-body-card-form-check-terms-and-privacy').check();
|
||||
|
||||
cy.findByText('Create account').click();
|
||||
cy.findByText('Create Account').click();
|
||||
|
||||
// * Assert that we are not shown an MFA screen and instead a Teams You Can join page
|
||||
cy.findByText('Teams you can join:', {timeout: TIMEOUTS.ONE_MIN}).should('be.visible');
|
||||
|
|
@ -116,11 +112,9 @@ describe('Authentication', () => {
|
|||
|
||||
cy.get('#input_name').clear().type(`BestUsernameInTheWorld${getRandomId()}`);
|
||||
|
||||
cy.get('#signup-body-card-form-check-terms-and-privacy').check();
|
||||
|
||||
['NOLOWERCASE123!', 'noupppercase123!', 'NoNumber!', 'NoSymbol123'].forEach((option) => {
|
||||
cy.get('#input_password-input').clear().type(option);
|
||||
cy.findByText('Create account').click();
|
||||
cy.findByText('Create Account').click();
|
||||
|
||||
// * Assert the error is what is expected;
|
||||
cy.findByText('Your password must be 5-72 characters long and include both lowercase and uppercase letters, numbers, and special characters.').should('be.visible');
|
||||
|
|
|
|||
|
|
@ -149,11 +149,9 @@ describe('Authentication', () => {
|
|||
|
||||
cy.get('#input_password-input').type('Test123456!');
|
||||
|
||||
cy.get('#signup-body-card-form-check-terms-and-privacy').check();
|
||||
|
||||
['1user', 'te', 'user#1', 'user!1'].forEach((option) => {
|
||||
cy.get('#input_name').clear().type(option);
|
||||
cy.findByText('Create account').click();
|
||||
cy.findByText('Create Account').click();
|
||||
|
||||
// * Assert the error is what is expected;
|
||||
cy.get('.Input___error').scrollIntoView().should('be.visible');
|
||||
|
|
@ -185,9 +183,7 @@ describe('Authentication', () => {
|
|||
|
||||
cy.get('#input_name').clear().type(`Test${getRandomId()}`);
|
||||
|
||||
cy.get('#signup-body-card-form-check-terms-and-privacy').check();
|
||||
|
||||
cy.findByText('Create account').click();
|
||||
cy.findByText('Create Account').click();
|
||||
|
||||
// * Make sure account was created successfully and we are on the team joining page
|
||||
cy.findByText('Teams you can join:', {timeout: TIMEOUTS.ONE_MIN}).should('be.visible');
|
||||
|
|
@ -249,9 +245,7 @@ describe('Authentication', () => {
|
|||
|
||||
cy.get('#input_name').clear().type(`Test${getRandomId()}`);
|
||||
|
||||
cy.get('#signup-body-card-form-check-terms-and-privacy').check();
|
||||
|
||||
cy.findByText('Create account').click();
|
||||
cy.findByText('Create Account').click();
|
||||
|
||||
// * Make sure account was not created successfully
|
||||
cy.get('.AlertBanner__title').scrollIntoView().should('be.visible');
|
||||
|
|
@ -277,7 +271,7 @@ describe('Authentication', () => {
|
|||
cy.findByText('Copy invite link').click();
|
||||
|
||||
// # Input email, select member
|
||||
cy.findByLabelText('Invite People').type(`test-${getRandomId()}@mattermost.com{downarrow}{downarrow}{enter}`, {force: true});
|
||||
cy.findByLabelText('Add or Invite People').type(`test-${getRandomId()}@mattermost.com{downarrow}{downarrow}{enter}`, {force: true});
|
||||
|
||||
// # Click invite members button
|
||||
cy.findByRole('button', {name: 'Invite'}).click({force: true});
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
// - Use element ID when selecting an element. Create one if none.
|
||||
// ***************************************************************
|
||||
|
||||
// Stage: @prod
|
||||
// Group: @channels @messaging @benchmark
|
||||
|
||||
import {reportBenchmarkResults} from '../../../utils/benchmark';
|
||||
|
|
|
|||
|
|
@ -207,6 +207,7 @@ describe('Channel Info RHS', () => {
|
|||
cy.get('#channel-info-btn').click();
|
||||
|
||||
cy.apiPatchChannel(testChannel.id, {
|
||||
...testChannel,
|
||||
purpose: 'purpose for the tests',
|
||||
}).then(() => {
|
||||
cy.uiGetRHS().findByText('purpose for the tests').should('be.visible');
|
||||
|
|
@ -220,44 +221,12 @@ describe('Channel Info RHS', () => {
|
|||
cy.get('#channel-info-btn').click();
|
||||
|
||||
cy.apiPatchChannel(testChannel.id, {
|
||||
...testChannel,
|
||||
header: 'header for the tests',
|
||||
}).then(() => {
|
||||
cy.uiGetRHS().findByText('header for the tests').should('be.visible');
|
||||
});
|
||||
});
|
||||
it('should be able to rename channel from About area', () => {
|
||||
// # Create a dedicated channel for renaming to avoid affecting other tests
|
||||
cy.apiCreateChannel(testTeam.id, 'channel-to-rename', 'Channel To Rename', 'O').then(({channel}) => {
|
||||
cy.apiAddUserToChannel(channel.id, admin.id);
|
||||
|
||||
// # Go to the channel
|
||||
cy.visit(`/${testTeam.name}/channels/${channel.name}`);
|
||||
|
||||
// # Open Channel Info RHS
|
||||
cy.get('#channel-info-btn').click();
|
||||
|
||||
// # Click edit on channel name (first Edit in About)
|
||||
cy.uiGetRHS().findAllByLabelText('Edit').first().click({force: true});
|
||||
|
||||
// * Rename Channel modal appears
|
||||
cy.findByRole('heading', {name: /rename channel/i}).should('be.visible');
|
||||
|
||||
// # Fill display name and URL
|
||||
cy.findByPlaceholderText(/enter display name/i).clear().type('Renamed Channel');
|
||||
cy.get('.url-input-button').click();
|
||||
cy.get('.url-input-container input').clear().type('renamed-channel');
|
||||
cy.get('.url-input-container button.url-input-button').click();
|
||||
|
||||
// # Save
|
||||
cy.findByRole('button', {name: /save/i}).click();
|
||||
|
||||
// * URL updated
|
||||
cy.location('pathname').should('include', `/${testTeam.name}/channels/renamed-channel`);
|
||||
|
||||
// * Header shows new name
|
||||
cy.get('#channelHeaderTitle').should('contain', 'Renamed Channel');
|
||||
});
|
||||
});
|
||||
});
|
||||
describe('bottom menu', () => {
|
||||
it('should be able to manage notifications', () => {
|
||||
|
|
@ -268,29 +237,11 @@ describe('Channel Info RHS', () => {
|
|||
cy.get('#channel-info-btn').click();
|
||||
|
||||
// # Click on "Notification Preferences"
|
||||
cy.uiGetRHS().findByTestId('channel_info_rhs-menu').findByText('Notification Preferences').scrollIntoView().should('be.visible').click();
|
||||
cy.uiGetRHS().findByText('Notification Preferences').should('be.visible').click();
|
||||
|
||||
// * Ensures the modal is there
|
||||
cy.get('.ChannelNotificationModal').should('be.visible');
|
||||
});
|
||||
it('should open Channel Settings from RHS menu', () => {
|
||||
// # Go to test channel
|
||||
cy.visit(`/${testTeam.name}/channels/${testChannel.name}`);
|
||||
|
||||
// # Close RHS if it's open, then click on the channel info button
|
||||
cy.get('body').then(($body) => {
|
||||
if ($body.find('#rhsCloseButton').length > 0) {
|
||||
cy.get('#rhsCloseButton').click();
|
||||
}
|
||||
cy.get('#channel-info-btn').should('be.visible').click();
|
||||
});
|
||||
|
||||
// * Channel Settings item is visible in RHS menu
|
||||
cy.uiGetRHS().findByTestId('channel_info_rhs-menu').findByText('Channel Settings').scrollIntoView().should('be.visible').click();
|
||||
|
||||
// * Channel Settings modal opens
|
||||
cy.get('.ChannelSettingsModal').should('be.visible');
|
||||
});
|
||||
it('should be able to view files and come back', () => {
|
||||
// # Go to test channel
|
||||
cy.visit(`/${testTeam.name}/channels/${testChannel.name}`);
|
||||
|
|
@ -299,7 +250,7 @@ describe('Channel Info RHS', () => {
|
|||
cy.get('#channel-info-btn').click();
|
||||
|
||||
// # Click on "Files"
|
||||
cy.uiGetRHS().findByTestId('channel_info_rhs-menu').findByText('Files').scrollIntoView().should('be.visible').click();
|
||||
cy.uiGetRHS().findByTestId('channel_info_rhs-menu').findByText('Files').should('be.visible').click();
|
||||
|
||||
// * Ensure we see the files RHS
|
||||
cy.uiGetRHS().findByText('No files yet').should('be.visible');
|
||||
|
|
@ -326,10 +277,10 @@ describe('Channel Info RHS', () => {
|
|||
cy.get('#channel-info-btn').click();
|
||||
|
||||
// # Click on "Pinned Messages"
|
||||
cy.uiGetRHS().findByTestId('channel_info_rhs-menu').findByText('Pinned messages').scrollIntoView().should('be.visible').click();
|
||||
cy.uiGetRHS().findByTestId('channel_info_rhs-menu').findByText('Pinned messages').should('be.visible').click();
|
||||
|
||||
// * Ensure we see the Pinned Post RHS
|
||||
cy.uiGetRHS().findByText('Hello channel info rhs spec').first().should('be.visible');
|
||||
cy.uiGetRHS().findByText('Hello channel info rhs spec').should('be.visible');
|
||||
|
||||
// # Click the Back Icon
|
||||
cy.uiGetRHS().get('[aria-label="Back Icon"]').click();
|
||||
|
|
@ -345,7 +296,7 @@ describe('Channel Info RHS', () => {
|
|||
cy.get('#channel-info-btn').click();
|
||||
|
||||
// # Click on "Members"
|
||||
cy.uiGetRHS().findByTestId('channel_info_rhs-menu').findByText('Members').scrollIntoView().should('be.visible').click();
|
||||
cy.uiGetRHS().findByTestId('channel_info_rhs-menu').findByText('Members').should('be.visible').click();
|
||||
|
||||
// * Ensure we see the members
|
||||
cy.uiGetRHS().contains('sysadmin').should('be.visible');
|
||||
|
|
@ -431,6 +382,7 @@ describe('Channel Info RHS', () => {
|
|||
cy.get('#channel-info-btn').click();
|
||||
|
||||
cy.apiPatchChannel(groupChannel.id, {
|
||||
...groupChannel,
|
||||
header: 'header for the tests',
|
||||
}).then(() => {
|
||||
cy.uiGetRHS().findByText('header for the tests').should('be.visible');
|
||||
|
|
@ -447,7 +399,7 @@ describe('Channel Info RHS', () => {
|
|||
cy.get('#channel-info-btn').click();
|
||||
|
||||
// # Click on "Notification Preferences"
|
||||
cy.uiGetRHS().findByTestId('channel_info_rhs-menu').findByText('Notification Preferences').scrollIntoView().should('be.visible').click();
|
||||
cy.uiGetRHS().findByText('Notification Preferences').should('be.visible').click();
|
||||
|
||||
// * Ensures the modal is there
|
||||
cy.get('.ChannelNotificationModal').should('be.visible');
|
||||
|
|
@ -524,6 +476,7 @@ describe('Channel Info RHS', () => {
|
|||
cy.get('#channel-info-btn').click();
|
||||
|
||||
cy.apiPatchChannel(directChannel.id, {
|
||||
...directChannel,
|
||||
header: 'header for the tests',
|
||||
}).then(() => {
|
||||
cy.uiGetRHS().findByText('header for the tests').should('be.visible');
|
||||
|
|
@ -536,6 +489,6 @@ describe('Channel Info RHS', () => {
|
|||
function ensureRHSIsOpenOnChannelInfo(testChannel) {
|
||||
cy.get('#rhsContainer').then((rhsContainer) => {
|
||||
cy.wrap(rhsContainer).findByText('Info').should('be.visible');
|
||||
cy.wrap(rhsContainer).find('.sidebar--right__title__subtitle').should('contain', testChannel.display_name);
|
||||
cy.wrap(rhsContainer).findByText(testChannel.display_name).should('be.visible');
|
||||
});
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
// - Use element ID when selecting an element. Create one if none.
|
||||
// ***************************************************************
|
||||
|
||||
// Stage: @prod
|
||||
// Group: @channels @channel @rhs
|
||||
|
||||
import * as TIMEOUTS from '../../../fixtures/timeouts';
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
// - Use element ID when selecting an element. Create one if none.
|
||||
// ***************************************************************
|
||||
|
||||
// Stage: @prod
|
||||
// Group: @channels @dm_category
|
||||
|
||||
import * as MESSAGES from '../../../fixtures/messages';
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
// - Use element ID when selecting an element. Create one if none.
|
||||
// ***************************************************************
|
||||
|
||||
// Stage: @prod
|
||||
// Group: @channels @channel_sidebar
|
||||
|
||||
import {
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
// - Use element ID when selecting an element. Create one if none.
|
||||
// ***************************************************************
|
||||
|
||||
// Stage: @prod
|
||||
// Group: @channels @collapsed_reply_threads
|
||||
|
||||
import {Channel} from '@mattermost/types/channels';
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
// - Use element ID when selecting an element. Create one if none.
|
||||
// ***************************************************************
|
||||
|
||||
// Stage: @prod
|
||||
// Group: @channels @custom_status
|
||||
|
||||
import moment from 'moment-timezone';
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
// - Use element ID when selecting an element. Create one if none.
|
||||
// ***************************************************************
|
||||
|
||||
// Stage: @prod
|
||||
// Group: @channels @custom_status
|
||||
|
||||
describe('Custom Status - Setting a Custom Status', () => {
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ describe('Verify Accessibility Support in different input fields', () => {
|
|||
|
||||
// * Verify Accessibility Support in Add or Invite People input field
|
||||
cy.get('.users-emails-input__control').should('be.visible').within(() => {
|
||||
cy.get('input').should('have.attr', 'aria-label', 'Invite People').and('have.attr', 'aria-autocomplete', 'list');
|
||||
cy.get('input').should('have.attr', 'aria-label', 'Add or Invite People').and('have.attr', 'aria-autocomplete', 'list');
|
||||
cy.get('.users-emails-input__placeholder').should('have.text', 'Enter a name or email address');
|
||||
});
|
||||
|
||||
|
|
@ -58,7 +58,7 @@ describe('Verify Accessibility Support in different input fields', () => {
|
|||
|
||||
// * Verify Accessibility Support in Invite People input field
|
||||
cy.get('.users-emails-input__control').should('be.visible').within(() => {
|
||||
cy.get('input').should('have.attr', 'aria-label', 'Invite People').and('have.attr', 'aria-autocomplete', 'list');
|
||||
cy.get('input').should('have.attr', 'aria-label', 'Add or Invite People').and('have.attr', 'aria-autocomplete', 'list');
|
||||
cy.get('.users-emails-input__placeholder').should('have.text', 'Enter a name or email address');
|
||||
});
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,222 @@
|
|||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
// ***************************************************************
|
||||
// - [#] indicates a test step (e.g. # Go to a page)
|
||||
// - [*] indicates an assertion (e.g. * Check the title)
|
||||
// - Use element ID when selecting an element. Create one if none.
|
||||
// ***************************************************************
|
||||
|
||||
// Stage: @prod
|
||||
// Group: @channels @enterprise @accessibility
|
||||
|
||||
import {Channel} from '@mattermost/types/channels';
|
||||
import {Team} from '@mattermost/types/teams';
|
||||
import {UserProfile} from '@mattermost/types/users';
|
||||
|
||||
import * as TIMEOUTS from '../../../../fixtures/timeouts';
|
||||
|
||||
describe('Verify Accessibility Support in Modals & Dialogs', () => {
|
||||
let testTeam: Team;
|
||||
let testChannel: Channel;
|
||||
let testUser: UserProfile;
|
||||
let selectedRowText: string;
|
||||
|
||||
before(() => {
|
||||
// * Check if server has license for Guest Accounts
|
||||
cy.apiRequireLicenseForFeature('GuestAccounts');
|
||||
|
||||
cy.apiInitSetup({userPrefix: 'user000a'}).then(({team, channel, user}) => {
|
||||
testTeam = team;
|
||||
testChannel = channel;
|
||||
testUser = user;
|
||||
|
||||
cy.apiCreateUser({prefix: 'user000b'}).then(({user: newUser}) => {
|
||||
cy.apiAddUserToTeam(testTeam.id, newUser.id).then(() => {
|
||||
cy.apiAddUserToChannel(testChannel.id, newUser.id);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
// # Login as sysadmin and visit the town-square
|
||||
cy.apiAdminLogin();
|
||||
cy.visit(`/${testTeam.name}/channels/town-square`);
|
||||
});
|
||||
|
||||
it('MM-T1466 Accessibility Support in Direct Messages Dialog screen', () => {
|
||||
// * Verify the aria-label in create direct message button
|
||||
cy.uiAddDirectMessage().click();
|
||||
|
||||
// * Verify the accessibility support in Direct Messages Dialog
|
||||
cy.findAllByRole('dialog', {name: 'Direct Messages'}).eq(0).within(() => {
|
||||
cy.findByRole('heading', {name: 'Direct Messages'});
|
||||
|
||||
// * Verify the accessibility support in search input
|
||||
cy.findByLabelText('Search for people').
|
||||
should('have.attr', 'aria-autocomplete', 'list');
|
||||
|
||||
// # Search for a text and then check up and down arrow
|
||||
cy.findByLabelText('Search for people').
|
||||
typeWithForce('s').
|
||||
wait(TIMEOUTS.HALF_SEC).
|
||||
typeWithForce('{downarrow}{downarrow}{downarrow}{uparrow}');
|
||||
cy.get('#multiSelectList').children().eq(2).should('have.class', 'more-modal__row--selected').within(() => {
|
||||
cy.get('.more-modal__name').invoke('text').then((user) => {
|
||||
selectedRowText = user.split(' - ')[0].replace('@', '');
|
||||
});
|
||||
|
||||
// * Verify image alt is displayed
|
||||
cy.get('img.Avatar').should('have.attr', 'alt', 'user profile image');
|
||||
});
|
||||
|
||||
// * Verify if the reader is able to read out the selected row
|
||||
cy.get('.filtered-user-list div.sr-only:not([role="status"])').
|
||||
should('have.attr', 'aria-live', 'polite').
|
||||
and('have.attr', 'aria-atomic', 'true').
|
||||
invoke('text').then((text) => {
|
||||
expect(text).equal(selectedRowText);
|
||||
});
|
||||
|
||||
// # Search for an invalid text
|
||||
const additionalSearchTerm = 'somethingwhichdoesnotexist';
|
||||
cy.findByLabelText('Search for people').clear().
|
||||
typeWithForce(additionalSearchTerm).
|
||||
wait(TIMEOUTS.HALF_SEC);
|
||||
|
||||
// * Check if reader can read no results
|
||||
cy.get('.multi-select__wrapper').should('have.attr', 'aria-live', 'polite').and('have.text', `No results found matching ${additionalSearchTerm}`);
|
||||
});
|
||||
});
|
||||
|
||||
it('MM-T1467 Accessibility Support in Browse Channels Dialog screen', () => {
|
||||
function getChannelAriaLabel(channel) {
|
||||
return channel.display_name.toLowerCase() + ', ' + channel.purpose.toLowerCase();
|
||||
}
|
||||
|
||||
// # Create atleast 2 channels
|
||||
let otherChannel;
|
||||
cy.apiCreateChannel(testTeam.id, 'z_accessibility', 'Z Accessibility', 'O', 'other purpose').then(({channel}) => {
|
||||
otherChannel = channel;
|
||||
});
|
||||
cy.apiCreateChannel(testTeam.id, 'accessibility', 'Accessibility', 'O', 'some purpose').then(({channel}) => {
|
||||
cy.apiLogin(testUser).then(() => {
|
||||
cy.reload();
|
||||
|
||||
// * Verify the aria-label in more public channels button
|
||||
cy.uiBrowseOrCreateChannel('Browse channels');
|
||||
|
||||
// * Verify the accessibility support in More Channels Dialog
|
||||
cy.findByRole('dialog', {name: 'Browse Channels'}).within(() => {
|
||||
cy.findByRole('heading', {name: 'Browse Channels'});
|
||||
|
||||
// * Verify the accessibility support in search input
|
||||
cy.findByPlaceholderText('Search channels');
|
||||
|
||||
cy.get('#moreChannelsList').should('be.visible').then((el) => {
|
||||
return el[0].children.length === 2;
|
||||
});
|
||||
|
||||
// # Hide already joined channels
|
||||
cy.findByText('Hide Joined').click();
|
||||
|
||||
// # Focus on the Create Channel button and TAB five time
|
||||
cy.get('#createNewChannelButton').focus().tab().tab().tab().tab().tab();
|
||||
|
||||
// * Verify channel name is highlighted and reader reads the channel name and channel description
|
||||
cy.get('#moreChannelsList').within(() => {
|
||||
const selectedChannel = getChannelAriaLabel(channel);
|
||||
cy.findByLabelText(selectedChannel).should('be.visible').should('be.focused');
|
||||
});
|
||||
|
||||
// * Press Tab again and verify if focus changes to next row
|
||||
cy.focused().tab();
|
||||
cy.findByLabelText(getChannelAriaLabel(otherChannel)).should('be.focused');
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('MM-T1468 Accessibility Support in Add people to Channel Dialog screen', () => {
|
||||
// # Add atleast 5 users
|
||||
for (let i = 0; i < 5; i++) {
|
||||
cy.apiCreateUser().then(({user}) => {
|
||||
cy.apiAddUserToTeam(testTeam.id, user.id);
|
||||
});
|
||||
}
|
||||
|
||||
// # Visit the test channel, and wait for the page to fully load
|
||||
cy.visit(`/${testTeam.name}/channels/${testChannel.name}`);
|
||||
|
||||
// # Open Add Members Dialog
|
||||
cy.uiOpenChannelMenu('Members');
|
||||
cy.uiGetButton('Add').click();
|
||||
|
||||
// * Verify the accessibility support in Add people Dialog
|
||||
cy.findAllByRole('dialog').eq(0).within(() => {
|
||||
const modalName = `Add people to ${testChannel.display_name}`;
|
||||
cy.findByRole('heading', {name: modalName});
|
||||
cy.wait(TIMEOUTS.ONE_SEC);
|
||||
|
||||
// * Verify the accessibility support in search input
|
||||
cy.findByLabelText('Search for people or groups').
|
||||
should('have.attr', 'aria-autocomplete', 'list');
|
||||
|
||||
// # Search for a text and then check up and down arrow
|
||||
cy.findByLabelText('Search for people or groups').
|
||||
wait(TIMEOUTS.HALF_SEC).
|
||||
typeWithForce('u').
|
||||
wait(TIMEOUTS.HALF_SEC).
|
||||
typeWithForce('{downarrow}{downarrow}{downarrow}{downarrow}{uparrow}');
|
||||
cy.get('#multiSelectList').
|
||||
children().eq(2).
|
||||
should('have.class', 'more-modal__row--selected').
|
||||
within(() => {
|
||||
cy.get('.more-modal__name').invoke('text').then((user) => {
|
||||
selectedRowText = user.split(' - ')[0].replace('@', '');
|
||||
});
|
||||
|
||||
// * Verify image alt is displayed
|
||||
cy.get('img.Avatar').should('have.attr', 'alt', 'user profile image');
|
||||
});
|
||||
|
||||
// * Verify if the reader is able to read out the selected row
|
||||
cy.get('.filtered-user-list div.sr-only:not([role="status"])').
|
||||
should('have.attr', 'aria-live', 'polite').
|
||||
and('have.attr', 'aria-atomic', 'true').
|
||||
invoke('text').then((text) => {
|
||||
// Check that the readout starts with the selected user since it may be followed by
|
||||
// "Already in Channel" depending on which user was selected
|
||||
expect(text).to.match(new RegExp(`^${selectedRowText}\\b`));
|
||||
});
|
||||
|
||||
// # Search for an invalid text and check if reader can read no results
|
||||
cy.findByLabelText('Search for people or groups').
|
||||
typeWithForce('somethingwhichdoesnotexist').
|
||||
wait(TIMEOUTS.HALF_SEC);
|
||||
|
||||
// * Check if reader can read no results
|
||||
cy.get('.custom-no-options-message').
|
||||
should('be.visible').
|
||||
and('contain', 'No matches found - Invite them to the team');
|
||||
});
|
||||
});
|
||||
|
||||
it('MM-T1515 Verify Accessibility Support in Invite People Flow', () => {
|
||||
// # Open Invite People
|
||||
cy.uiGetLHSHeader().click();
|
||||
cy.get("#sidebarTeamMenu li:contains('Invite people')").should('be.visible').click();
|
||||
|
||||
// * Verify accessibility support in Invite People Dialog
|
||||
cy.findByTestId('invitationModal').should('have.attr', 'aria-modal', 'true').and('have.attr', 'aria-labelledby', 'invitation_modal_title').and('have.attr', 'role', 'dialog');
|
||||
cy.get('#invitation_modal_title').should('be.visible').and('contain.text', 'Invite people to');
|
||||
|
||||
// # Press tab
|
||||
cy.get('button.icon-close').focus().tab({shift: true}).tab();
|
||||
|
||||
// * Verify tab focuses on close button
|
||||
cy.get('button.icon-close').should('have.attr', 'aria-label', 'Close').and('be.focused');
|
||||
});
|
||||
});
|
||||
|
||||
|
|
@ -58,9 +58,7 @@ describe('Authentication', () => {
|
|||
|
||||
cy.get('#input_name').clear().type(`Test${getRandomId()}`);
|
||||
|
||||
cy.get('#signup-body-card-form-check-terms-and-privacy').check();
|
||||
|
||||
cy.findByText('Create account').click();
|
||||
cy.findByText('Create Account').click();
|
||||
|
||||
// * Make sure account was not created successfully
|
||||
cy.get('.AlertBanner__title').scrollIntoView().should('be.visible');
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
// - Use element ID when selecting an element. Create one if none.
|
||||
// ***************************************************************
|
||||
|
||||
// Stage: @prod
|
||||
// Group: @channels @enterprise @not_cloud @system_console
|
||||
|
||||
describe('MM-T2574 Session Lengths', () => {
|
||||
|
|
|
|||
|
|
@ -78,16 +78,13 @@ describe('Guest Accounts', () => {
|
|||
// # Click "Save".
|
||||
cy.findByText('Save').click().wait(TIMEOUTS.ONE_SEC);
|
||||
|
||||
// # Visit a page to trigger MFA setup redirect, then complete MFA setup for admin
|
||||
cy.visit('/');
|
||||
cy.url().should('include', 'mfa/setup');
|
||||
// # Get MFA secret
|
||||
cy.uiGetMFASecret(sysadmin.id).then((secret) => {
|
||||
adminMFASecret = secret;
|
||||
});
|
||||
|
||||
// # Navigate to Guest Access page.
|
||||
cy.visit('/admin_console/authentication/guest_access');
|
||||
cy.url().should('include', '/admin_console/authentication/guest_access');
|
||||
|
||||
// # Enable guest accounts.
|
||||
cy.findByTestId('GuestAccountsSettings.Enabletrue').check();
|
||||
|
|
@ -147,20 +144,20 @@ describe('Guest Accounts', () => {
|
|||
// # Create an account with Email and Password.
|
||||
cy.get('#input_name').type(username);
|
||||
cy.get('#input_password-input').type(username);
|
||||
cy.findByText('Create account').click();
|
||||
cy.findByText('Create Account').click();
|
||||
|
||||
// * When MFA is enforced for Guest Access, guest user should be forced to configure MFA while creating an account.
|
||||
cy.url().should('include', 'mfa/setup');
|
||||
cy.get('#mfa').wait(TIMEOUTS.HALF_SEC).find('p.col-sm-12 span').then((p) => {
|
||||
cy.get('#mfa').wait(TIMEOUTS.HALF_SEC).find('.col-sm-12').then((p) => {
|
||||
const secretp = p.text();
|
||||
const secret = secretp.split(' ')[1];
|
||||
|
||||
const token = authenticator.generateToken(secret);
|
||||
cy.findByPlaceholderText('MFA Code').type(token);
|
||||
cy.findByText('Save').click();
|
||||
cy.get('#mfa').find('.form-control').type(token);
|
||||
cy.get('#mfa').find('.btn.btn-primary').click();
|
||||
|
||||
cy.wait(TIMEOUTS.ONE_SEC);
|
||||
cy.findByText('Okay').click();
|
||||
cy.get('#mfa').find('.btn.btn-primary').click();
|
||||
});
|
||||
cy.apiLogout();
|
||||
});
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
// - Use element ID when selecting an element. Create one if none.
|
||||
// ***************************************************************
|
||||
|
||||
// Stage: @prod
|
||||
// Group: @channels @enterprise @guest_account
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -106,7 +106,7 @@ describe('Guest Account - Member Invitation Flow', () => {
|
|||
cy.get('#input_email').type(email);
|
||||
cy.get('#input_name').type(username);
|
||||
cy.get('#input_password-input').type('Testing123');
|
||||
cy.findByText('Create account').click();
|
||||
cy.findByText('Create Account').click();
|
||||
|
||||
// * Verify if user is added to the invited team
|
||||
cy.uiGetLHSHeader().findByText(testTeam.display_name);
|
||||
|
|
|
|||
|
|
@ -65,19 +65,8 @@ describe('Incoming webhook', () => {
|
|||
|
||||
cy.visit(`/${testTeam.name}/channels/${testChannel.name}`);
|
||||
|
||||
// # Post webhook and wait for attachment to render
|
||||
cy.postIncomingWebhook({url: incomingWebhook.url, data: payload});
|
||||
|
||||
// # Verify the post appears in the channel with attachment
|
||||
cy.getLastPost().within(() => {
|
||||
cy.get('.attachment__body').should('be.visible').should('contain', 'Findme.');
|
||||
});
|
||||
|
||||
// # Explicitly wait to give Elasticsearch time to index before searching
|
||||
// Using a longer wait time since Elasticsearch indexing can be slow in test environments
|
||||
cy.wait(TIMEOUTS.THREE_SEC);
|
||||
|
||||
// # Search for text in the attachment
|
||||
cy.uiGetSearchContainer().click();
|
||||
cy.uiGetSearchBox().
|
||||
wait(TIMEOUTS.HALF_SEC).
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
// - Use element ID when selecting an element. Create one if none.
|
||||
// ***************************************************************
|
||||
|
||||
// Stage: @prod
|
||||
// Group: @channels @enterprise @ldap
|
||||
|
||||
import {Channel} from '@mattermost/types/channels';
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
// - Use element ID when selecting an element. Create one if none.
|
||||
// ***************************************************************
|
||||
|
||||
// Stage: @prod
|
||||
// Group: @channels @enterprise @ldap
|
||||
|
||||
import {UserProfile} from '@mattermost/types/users';
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue