4a5f01a78fcf8fd946cdc1eb8127778ad2ff2dcd
35 Commits
| Author | SHA1 | Message | Date | |
|---|---|---|---|---|
|
|
4a5f01a78f |
chore: Update code to new repo
Some checks failed
ci / changes (push) Has been cancelled
ci / lint (push) Has been cancelled
ci / build (push) Has been cancelled
ci / tests-unit (push) Has been cancelled
ci / tests-types (push) Has been cancelled
ci / int-cosmosdb (push) Has been cancelled
ci / int-documentdb (push) Has been cancelled
ci / int-firestore (push) Has been cancelled
ci / int-mongodb (push) Has been cancelled
ci / int-postgres (push) Has been cancelled
ci / int-postgres-custom-schema (push) Has been cancelled
ci / int-postgres-uuid (push) Has been cancelled
ci / int-sqlite (push) Has been cancelled
ci / int-sqlite-uuid (push) Has been cancelled
ci / int-supabase (push) Has been cancelled
ci / e2e-_community (push) Has been cancelled
ci / e2e-access-control (push) Has been cancelled
ci / e2e-admin-bar (push) Has been cancelled
ci / e2e-admin-root (push) Has been cancelled
ci / e2e-admin__e2e__document-view (push) Has been cancelled
ci / e2e-admin__e2e__general (push) Has been cancelled
ci / e2e-admin__e2e__list-view (push) Has been cancelled
ci / e2e-auth (push) Has been cancelled
ci / e2e-auth-basic (push) Has been cancelled
ci / e2e-bulk-edit (push) Has been cancelled
ci / e2e-field-error-states (push) Has been cancelled
ci / e2e-fields-relationship (push) Has been cancelled
ci / e2e-fields__collections__Array (push) Has been cancelled
ci / e2e-fields__collections__Blocks#config.blockreferences.ts (push) Has been cancelled
ci / e2e-fields__collections__Blocks (push) Has been cancelled
ci / e2e-fields__collections__Checkbox (push) Has been cancelled
ci / e2e-fields__collections__Collapsible (push) Has been cancelled
ci / e2e-fields__collections__ConditionalLogic (push) Has been cancelled
ci / e2e-fields__collections__CustomID (push) Has been cancelled
ci / e2e-fields__collections__Date (push) Has been cancelled
ci / e2e-fields__collections__Email (push) Has been cancelled
ci / e2e-fields__collections__Indexed (push) Has been cancelled
ci / e2e-fields__collections__JSON (push) Has been cancelled
ci / e2e-fields__collections__Number (push) Has been cancelled
ci / e2e-fields__collections__Point (push) Has been cancelled
ci / e2e-fields__collections__Radio (push) Has been cancelled
ci / e2e-fields__collections__Relationship (push) Has been cancelled
ci / e2e-fields__collections__Row (push) Has been cancelled
ci / e2e-fields__collections__Select (push) Has been cancelled
ci / e2e-fields__collections__Tabs (push) Has been cancelled
ci / e2e-fields__collections__Tabs2 (push) Has been cancelled
ci / e2e-fields__collections__Text (push) Has been cancelled
ci / e2e-fields__collections__UI (push) Has been cancelled
ci / e2e-fields__collections__Upload (push) Has been cancelled
ci / e2e-folders (push) Has been cancelled
ci / e2e-form-state (push) Has been cancelled
ci / e2e-group-by (push) Has been cancelled
ci / e2e-hooks (push) Has been cancelled
ci / e2e-i18n (push) Has been cancelled
ci / e2e-joins (push) Has been cancelled
ci / e2e-lexical__collections__LexicalHeadingFeature (push) Has been cancelled
ci / e2e-lexical__collections__LexicalJSXConverter (push) Has been cancelled
ci / e2e-lexical__collections__LexicalLinkFeature (push) Has been cancelled
ci / e2e-lexical__collections__Lexical__e2e__blocks#config.blockreferences.ts (push) Has been cancelled
ci / e2e-lexical__collections__Lexical__e2e__blocks (push) Has been cancelled
ci / e2e-lexical__collections__Lexical__e2e__main (push) Has been cancelled
ci / e2e-lexical__collections__OnDemandForm (push) Has been cancelled
ci / e2e-lexical__collections__RichText (push) Has been cancelled
ci / e2e-lexical__collections___LexicalFullyFeatured (push) Has been cancelled
ci / e2e-lexical__collections___LexicalFullyFeatured__db (push) Has been cancelled
ci / e2e-live-preview (push) Has been cancelled
ci / e2e-localization (push) Has been cancelled
ci / e2e-locked-documents (push) Has been cancelled
ci / e2e-plugin-cloud-storage (push) Has been cancelled
ci / e2e-plugin-form-builder (push) Has been cancelled
ci / e2e-plugin-import-export (push) Has been cancelled
ci / e2e-plugin-multi-tenant (push) Has been cancelled
ci / e2e-plugin-nested-docs (push) Has been cancelled
ci / e2e-plugin-seo (push) Has been cancelled
ci / e2e-query-presets (push) Has been cancelled
ci / e2e-sort (push) Has been cancelled
ci / e2e-trash (push) Has been cancelled
ci / e2e-uploads (push) Has been cancelled
ci / e2e-versions (push) Has been cancelled
ci / e2e-turbo-_community (push) Has been cancelled
ci / e2e-turbo-access-control (push) Has been cancelled
ci / e2e-turbo-admin-bar (push) Has been cancelled
ci / e2e-turbo-admin-root (push) Has been cancelled
ci / e2e-turbo-admin__e2e__document-view (push) Has been cancelled
ci / e2e-turbo-admin__e2e__general (push) Has been cancelled
ci / e2e-turbo-admin__e2e__list-view (push) Has been cancelled
ci / e2e-turbo-auth (push) Has been cancelled
ci / e2e-turbo-auth-basic (push) Has been cancelled
ci / e2e-turbo-bulk-edit (push) Has been cancelled
ci / e2e-turbo-field-error-states (push) Has been cancelled
ci / e2e-turbo-fields-relationship (push) Has been cancelled
ci / e2e-turbo-fields__collections__Array (push) Has been cancelled
ci / e2e-turbo-fields__collections__Blocks#config.blockreferences.ts (push) Has been cancelled
ci / e2e-turbo-fields__collections__Blocks (push) Has been cancelled
ci / e2e-turbo-fields__collections__Checkbox (push) Has been cancelled
ci / e2e-turbo-fields__collections__Collapsible (push) Has been cancelled
ci / e2e-turbo-fields__collections__ConditionalLogic (push) Has been cancelled
ci / e2e-turbo-fields__collections__CustomID (push) Has been cancelled
ci / e2e-turbo-fields__collections__Date (push) Has been cancelled
ci / e2e-turbo-fields__collections__Email (push) Has been cancelled
ci / e2e-turbo-fields__collections__Indexed (push) Has been cancelled
ci / e2e-turbo-fields__collections__JSON (push) Has been cancelled
ci / e2e-turbo-fields__collections__Number (push) Has been cancelled
ci / e2e-turbo-fields__collections__Point (push) Has been cancelled
ci / e2e-turbo-fields__collections__Radio (push) Has been cancelled
ci / e2e-turbo-fields__collections__Relationship (push) Has been cancelled
ci / e2e-turbo-fields__collections__Row (push) Has been cancelled
ci / e2e-turbo-fields__collections__Select (push) Has been cancelled
ci / e2e-turbo-fields__collections__Tabs (push) Has been cancelled
ci / e2e-turbo-fields__collections__Tabs2 (push) Has been cancelled
ci / e2e-turbo-fields__collections__Text (push) Has been cancelled
ci / e2e-turbo-fields__collections__UI (push) Has been cancelled
ci / e2e-turbo-fields__collections__Upload (push) Has been cancelled
ci / e2e-turbo-folders (push) Has been cancelled
ci / e2e-turbo-form-state (push) Has been cancelled
ci / e2e-turbo-group-by (push) Has been cancelled
ci / e2e-turbo-hooks (push) Has been cancelled
ci / e2e-turbo-i18n (push) Has been cancelled
ci / e2e-turbo-joins (push) Has been cancelled
ci / e2e-turbo-lexical__collections__LexicalHeadingFeature (push) Has been cancelled
ci / e2e-turbo-lexical__collections__LexicalJSXConverter (push) Has been cancelled
ci / e2e-turbo-lexical__collections__LexicalLinkFeature (push) Has been cancelled
ci / e2e-turbo-lexical__collections__Lexical__e2e__blocks#config.blockreferences.ts (push) Has been cancelled
ci / e2e-turbo-lexical__collections__Lexical__e2e__blocks (push) Has been cancelled
ci / e2e-turbo-lexical__collections__Lexical__e2e__main (push) Has been cancelled
ci / e2e-turbo-lexical__collections__OnDemandForm (push) Has been cancelled
ci / e2e-turbo-lexical__collections__RichText (push) Has been cancelled
ci / e2e-turbo-lexical__collections___LexicalFullyFeatured (push) Has been cancelled
ci / e2e-turbo-lexical__collections___LexicalFullyFeatured__db (push) Has been cancelled
ci / e2e-turbo-live-preview (push) Has been cancelled
ci / e2e-turbo-localization (push) Has been cancelled
ci / e2e-turbo-locked-documents (push) Has been cancelled
ci / e2e-turbo-plugin-cloud-storage (push) Has been cancelled
ci / e2e-turbo-plugin-form-builder (push) Has been cancelled
ci / e2e-turbo-plugin-import-export (push) Has been cancelled
ci / e2e-turbo-plugin-multi-tenant (push) Has been cancelled
ci / e2e-turbo-plugin-nested-docs (push) Has been cancelled
ci / e2e-turbo-plugin-seo (push) Has been cancelled
ci / e2e-turbo-query-presets (push) Has been cancelled
ci / e2e-turbo-sort (push) Has been cancelled
ci / e2e-turbo-trash (push) Has been cancelled
ci / e2e-turbo-uploads (push) Has been cancelled
ci / e2e-turbo-versions (push) Has been cancelled
ci / build-template-blank-mongodb (push) Has been cancelled
ci / build-template-website-mongodb (push) Has been cancelled
ci / build-template-with-payload-cloud-mongodb (push) Has been cancelled
ci / build-template-with-vercel-mongodb-mongodb (push) Has been cancelled
ci / build-template-plugin- (push) Has been cancelled
ci / build-template-with-postgres-postgres (push) Has been cancelled
ci / build-template-with-vercel-postgres-postgres (push) Has been cancelled
ci / tests-type-generation (push) Has been cancelled
ci / All Green (push) Has been cancelled
ci / Publish Canary (push) Has been cancelled
ci / analyze (push) Has been cancelled
publish-prerelease / publish-prerelease-${{ github.ref_name }}-${{ github.sha }} (push) Has been cancelled
lock-issues / lock_issues (push) Has been cancelled
stale / stale (push) Has been cancelled
audit-dependencies / audit (push) Has been cancelled
activity-notifications / run (push) Has been cancelled
|
||
|
|
e0ffada80b |
feat: support parallel job queue tasks, speed up task running (#13614)
Currently, attempting to run tasks in parallel will result in DB errors. ## Solution The problem was caused due to inefficient db update calls. After each task completes, we need to update the log array in the payload-jobs collection. On postgres, that's a different table. Currently, the update works the following way: 1. Nuke the table 2. Re-insert every single row, including the new one This will throw db errors if multiple processes start doing that. Additionally, due to conflicts, new log rows may be lost. This PR makes use of the the [new db $push operation ](https://github.com/payloadcms/payload/pull/13453) we recently added to atomically push a new log row to the database in a single round-trip. This not only reduces the amount of db round trips (=> faster job queue system) but allows multiple tasks to perform this db operation in parallel, without conflicts. ## Problem **Example:** ```ts export const fastParallelTaskWorkflow: WorkflowConfig<'fastParallelTask'> = { slug: 'fastParallelTask', handler: async ({nlineTask }) => { const taskFunctions = [] for (let i = 0; i < 20; i++) { const idx = i + 1 taskFunctions.push(async () => { return await inlineTask(`parallel task ${idx}`, { input: { test: idx, }, task: () => { return { output: { taskID: idx.toString(), }, } }, }) }) } await Promise.all(taskFunctions.map((f) => f())) }, } ``` On SQLite, this would throw the following error: ```bash Caught error Error: UNIQUE constraint failed: payload_jobs_log.id at Object.next (/Users/alessio/Documents/GitHub/payload/node_modules/.pnpm/libsql@0.4.7/node_modules/libsql/index.js:335:20) at Statement.all (/Users/alessio/Documents/GitHub/payload/node_modules/.pnpm/libsql@0.4.7/node_modules/libsql/index.js:360:16) at executeStmt (/Users/alessio/Documents/GitHub/payload/node_modules/.pnpm/@libsql+client@0.14.0_bufferutil@4.0.8_utf-8-validate@6.0.5/node_modules/@libsql/client/lib-cjs/sqlite3.js:285:34) at Sqlite3Client.execute (/Users/alessio/Documents/GitHub/payload/node_modules/.pnpm/@libsql+client@0.14.0_bufferutil@4.0.8_utf-8-validate@6.0.5/node_modules/@libsql/client/lib-cjs/sqlite3.js:101:16) at /Users/alessio/Documents/GitHub/payload/node_modules/.pnpm/drizzle-orm@0.44.2_@libsql+client@0.14.0_bufferutil@4.0.8_utf-8-validate@6.0.5__@opentelemetr_asjmtflojkxlnxrshoh4fj5f6u/node_modules/src/libsql/session.ts:288:58 at LibSQLPreparedQuery.queryWithCache (/Users/alessio/Documents/GitHub/payload/node_modules/.pnpm/drizzle-orm@0.44.2_@libsql+client@0.14.0_bufferutil@4.0.8_utf-8-validate@6.0.5__@opentelemetr_asjmtflojkxlnxrshoh4fj5f6u/node_modules/src/sqlite-core/session.ts:79:18) at LibSQLPreparedQuery.values (/Users/alessio/Documents/GitHub/payload/node_modules/.pnpm/drizzle-orm@0.44.2_@libsql+client@0.14.0_bufferutil@4.0.8_utf-8-validate@6.0.5__@opentelemetr_asjmtflojkxlnxrshoh4fj5f6u/node_modules/src/libsql/session.ts:286:21) at LibSQLPreparedQuery.all (/Users/alessio/Documents/GitHub/payload/node_modules/.pnpm/drizzle-orm@0.44.2_@libsql+client@0.14.0_bufferutil@4.0.8_utf-8-validate@6.0.5__@opentelemetr_asjmtflojkxlnxrshoh4fj5f6u/node_modules/src/libsql/session.ts:214:27) at QueryPromise.all (/Users/alessio/Documents/GitHub/payload/node_modules/.pnpm/drizzle-orm@0.44.2_@libsql+client@0.14.0_bufferutil@4.0.8_utf-8-validate@6.0.5__@opentelemetr_asjmtflojkxlnxrshoh4fj5f6u/node_modules/src/sqlite-core/query-builders/insert.ts:402:26) at QueryPromise.execute (/Users/alessio/Documents/GitHub/payload/node_modules/.pnpm/drizzle-orm@0.44.2_@libsql+client@0.14.0_bufferutil@4.0.8_utf-8-validate@6.0.5__@opentelemetr_asjmtflojkxlnxrshoh4fj5f6u/node_modules/src/sqlite-core/query-builders/insert.ts:414:40) at QueryPromise.then (/Users/alessio/Documents/GitHub/payload/node_modules/.pnpm/drizzle-orm@0.44.2_@libsql+client@0.14.0_bufferutil@4.0.8_utf-8-validate@6.0.5__@opentelemetr_asjmtflojkxlnxrshoh4fj5f6u/node_modules/src/query-promise.ts:31:15) { rawCode: 1555, code: 'SQLITE_CONSTRAINT_PRIMARYKEY', libsqlError: true } ``` --- - To see the specific tasks where the Asana app for GitHub is being used, see below: - https://app.asana.com/0/0/1211001438499053 |
||
|
|
13c24afa63 |
feat: allow multiple, different payload instances using getPayload in same process (#13603)
Fixes https://github.com/payloadcms/payload/issues/13433. Testing release: `3.54.0-internal.90cf7d5` Previously, when calling `getPayload`, you would always use the same, cached payload instance within a single process, regardless of the arguments passed to the `getPayload` function. This resulted in the following issues - both are fixed by this PR: - If, in your frontend you're calling `getPayload` without `cron: true`, and you're hosting the Payload Admin Panel in the same process, crons will not be enabled even if you visit the admin panel which calls `getPayload` with `cron: true`. This will break jobs autorun depending on which page you visit first - admin panel or frontend - Within the same process, you are unable to use `getPayload` twice for different instances of payload with different Payload Configs. On postgres, you can get around this by manually calling new `BasePayload()` which skips the cache. This did not work on mongoose though, as mongoose was caching the models on a global singleton (this PR addresses this). In order to bust the cache for different Payload Config, this PR introduces a new, optional `key` property to `getPayload`. ## Mongoose - disable using global singleton This PR refactors the Payload Mongoose adapter to stop relying on the global mongoose singleton. Instead, each adapter instance now creates and manages its own scoped Connection object. ### Motivation Previously, calling `getPayload()` more than once in the same process would throw `Cannot overwrite model` errors because models were compiled into the global singleton. This prevented running multiple Payload instances side-by-side, even when pointing at different databases. ### Changes - Replace usage of `mongoose.connect()` / `mongoose.model()` with instance-scoped `createConnection()` and `connection.model()`. - Ensure models, globals, and versions are compiled per connection, not globally. - Added proper `close()` handling on `this.connection` instead of `mongoose.disconnect()`. --- - To see the specific tasks where the Asana app for GitHub is being used, see below: - https://app.asana.com/0/0/1211114366468745 |
||
|
|
ad2564e5fa |
fix: ensure scheduling by default only handles default queue, add allQueues config to autoRun (#13395)
By default, `payload.jobs.run` only runs jobs from the `default` queue (since https://github.com/payloadcms/payload/pull/12799). It exposes an `allQueues` property to run jobs from all queues. For handling schedules (`payload.jobs.handleSchedules` and `config.jobs.autoRun`), this behaves differently - jobs are run from all queues by default, and no `allQueues` property exists. This PR adds an `allQueues` property to scheduling, as well as changes the default behavior to only handle schedules for the `default` queue. That way, the behavior of running and scheduling jobs matches. --- - To see the specific tasks where the Asana app for GitHub is being used, see below: - https://app.asana.com/0/0/1210982048221260 |
||
|
|
3114b89d4c |
perf: 23% faster job queue system on postgres/sqlite (#13187)
Previously, a single run of the simplest job queue workflow (1 single task, no db calls by user code in the task - we're just testing db system overhead) would result in **22 db roundtrips** on drizzle. This PR reduces it to **17 db roundtrips** by doing the following: - Modifies db.updateJobs to use the new optimized upsertRow function if the update is simple - Do not unnecessarily pass the job log to the final job update when the workflow completes => allows using the optimized upsertRow function, as only the main table is involved --- - To see the specific tasks where the Asana app for GitHub is being used, see below: - https://app.asana.com/0/0/1210888186878606 |
||
|
|
c08b2aea89 |
feat: scheduling jobs (#12863)
Adds a new `schedule` property to workflow and task configs that can be used to have Payload automatically _queue_ jobs following a certain _schedule_. Docs: https://payloadcms.com/docs/dynamic/jobs-queue/schedules?branch=feat/schedule-jobs ## API Example ```ts export default buildConfig({ // ... jobs: { // ... scheduler: 'manual', // Or `cron` if you're not using serverless. If `manual` is used, then user needs to set up running /api/payload-jobs/handleSchedules or payload.jobs.handleSchedules in regular intervals tasks: [ { schedule: [ { cron: '* * * * * *', queue: 'autorunSecond', // Hooks are optional hooks: { // Not an array, as providing and calling `defaultBeforeSchedule` would be more error-prone if this was an array beforeSchedule: async (args) => { // Handles verifying that there are no jobs already scheduled or processing. // You can override this behavior by not calling defaultBeforeSchedule, e.g. if you wanted // to allow a maximum of 3 scheduled jobs in the queue instead of 1, or add any additional conditions const result = await args.defaultBeforeSchedule(args) return { ...result, input: { message: 'This task runs every second', }, } }, afterSchedule: async (args) => { await args.defaultAfterSchedule(args) // Handles updating the payload-jobs-stats global args.req.payload.logger.info( 'EverySecond task scheduled: ' + (args.status === 'success' ? args.job.id : 'skipped or failed to schedule'), ) }, }, }, ], slug: 'EverySecond', inputSchema: [ { name: 'message', type: 'text', required: true, }, ], handler: ({ input, req }) => { req.payload.logger.info(input.message) return { output: {}, } }, } ] } }) ``` --- - To see the specific tasks where the Asana app for GitHub is being used, see below: - https://app.asana.com/0/0/1210495300843759 |
||
|
|
64d76a3869 |
fix: cron jobs running when calling bin scripts, leading to db errors (#13135)
Previously, we were always initializing cronjobs when calling
`getPayload` or `payload.init`.
This is undesired in bin scripts - we don't want cron jobs to start
triggering db calls while we're running an initial migration using
`payload migrate` for example. This has previously led to a race
condition, triggering the following, occasional error, if job autoruns
were enabled:
```ts
DrizzleQueryError: Failed query: select "payload_jobs"."id", "payload_jobs"."input", "payload_jobs"."completed_at", "payload_jobs"."total_tried", "payload_jobs"."has_error", "payload_jobs"."error", "payload_jobs"."workflow_slug", "payload_jobs"."task_slug", "payload_jobs"."queue", "payload_jobs"."wait_until", "payload_jobs"."processing", "payload_jobs"."updated_at", "payload_jobs"."created_at", "payload_jobs_log"."data" as "log" from "payload_jobs" "payload_jobs" left join lateral (select coalesce(json_agg(json_build_array("payload_jobs_log"."_order", "payload_jobs_log"."id", "payload_jobs_log"."executed_at", "payload_jobs_log"."completed_at", "payload_jobs_log"."task_slug", "payload_jobs_log"."task_i_d", "payload_jobs_log"."input", "payload_jobs_log"."output", "payload_jobs_log"."state", "payload_jobs_log"."error") order by "payload_jobs_log"."_order" asc), '[]'::json) as "data" from (select * from "payload_jobs_log" "payload_jobs_log" where "payload_jobs_log"."_parent_id" = "payload_jobs"."id" order by "payload_jobs_log"."_order" asc) "payload_jobs_log") "payload_jobs_log" on true where ("payload_jobs"."completed_at" is null and ("payload_jobs"."has_error" is null or "payload_jobs"."has_error" <> $1) and "payload_jobs"."processing" = $2 and ("payload_jobs"."wait_until" is null or "payload_jobs"."wait_until" < $3) and "payload_jobs"."queue" = $4) order by "payload_jobs"."created_at" asc limit $5
params: true,false,2025-07-10T21:25:03.002Z,autorunSecond,100
at NodePgPreparedQuery.queryWithCache (/Users/alessio/Documents/GitHub/payload2/node_modules/.pnpm/drizzle-orm@0.44.2_@libsql+client@0.14.0_bufferutil@4.0.8_utf-8-validate@6.0.5__@opentelemetr_asjmtflojkxlnxrshoh4fj5f6u/node_modules/src/pg-core/session.ts:74:11)
at processTicksAndRejections (node:internal/process/task_queues:105:5)
at /Users/alessio/Documents/GitHub/payload2/node_modules/.pnpm/drizzle-orm@0.44.2_@libsql+client@0.14.0_bufferutil@4.0.8_utf-8-validate@6.0.5__@opentelemetr_asjmtflojkxlnxrshoh4fj5f6u/node_modules/src/node-postgres/session.ts:154:19
... 6 lines matching cause stack trace ...
at N._trigger (/Users/alessio/Documents/GitHub/payload2/node_modules/.pnpm/croner@9.0.0/node_modules/croner/dist/croner.cjs:1:16806) {
query: `select "payload_jobs"."id", "payload_jobs"."input", "payload_jobs"."completed_at", "payload_jobs"."total_tried", "payload_jobs"."has_error", "payload_jobs"."error", "payload_jobs"."workflow_slug", "payload_jobs"."task_slug", "payload_jobs"."queue", "payload_jobs"."wait_until", "payload_jobs"."processing", "payload_jobs"."updated_at", "payload_jobs"."created_at", "payload_jobs_log"."data" as "log" from "payload_jobs" "payload_jobs" left join lateral (select coalesce(json_agg(json_build_array("payload_jobs_log"."_order", "payload_jobs_log"."id", "payload_jobs_log"."executed_at", "payload_jobs_log"."completed_at", "payload_jobs_log"."task_slug", "payload_jobs_log"."task_i_d", "payload_jobs_log"."input", "payload_jobs_log"."output", "payload_jobs_log"."state", "payload_jobs_log"."error") order by "payload_jobs_log"."_order" asc), '[]'::json) as "data" from (select * from "payload_jobs_log" "payload_jobs_log" where "payload_jobs_log"."_parent_id" = "payload_jobs"."id" order by "payload_jobs_log"."_order" asc) "payload_jobs_log") "payload_jobs_log" on true where ("payload_jobs"."completed_at" is null and ("payload_jobs"."has_error" is null or "payload_jobs"."has_error" <> $1) and "payload_jobs"."processing" = $2 and ("payload_jobs"."wait_until" is null or "payload_jobs"."wait_until" < $3) and "payload_jobs"."queue" = $4) order by "payload_jobs"."created_at" asc limit $5`,
params: [ true, false, '2025-07-10T21:25:03.002Z', 'autorunSecond', 100 ],
cause: error: relation "payload_jobs" does not exist
at /Users/alessio/Documents/GitHub/payload2/node_modules/.pnpm/pg@8.16.3/node_modules/pg/lib/client.js:545:17
at processTicksAndRejections (node:internal/process/task_queues:105:5)
at /Users/alessio/Documents/GitHub/payload2/node_modules/.pnpm/drizzle-orm@0.44.2_@libsql+client@0.14.0_bufferutil@4.0.8_utf-8-validate@6.0.5__@opentelemetr_asjmtflojkxlnxrshoh4fj5f6u/node_modules/src/node-postgres/session.ts:161:13
at NodePgPreparedQuery.queryWithCache (/Users/alessio/Documents/GitHub/payload2/node_modules/.pnpm/drizzle-orm@0.44.2_@libsql+client@0.14.0_bufferutil@4.0.8_utf-8-validate@6.0.5__@opentelemetr_asjmtflojkxlnxrshoh4fj5f6u/node_modules/src/pg-core/session.ts:72:12)
at /Users/alessio/Documents/GitHub/payload2/node_modules/.pnpm/drizzle-orm@0.44.2_@libsql+client@0.14.0_bufferutil@4.0.8_utf-8-validate@6.0.5__@opentelemetr_asjmtflojkxlnxrshoh4fj5f6u/node_modules/src/node-postgres/session.ts:154:19
at find (/Users/alessio/Documents/GitHub/payload2/packages/drizzle/src/find/findMany.ts:162:19)
at Object.updateMany (/Users/alessio/Documents/GitHub/payload2/packages/drizzle/src/updateJobs.ts:26:16)
at updateJobs (/Users/alessio/Documents/GitHub/payload2/packages/payload/src/queues/utilities/updateJob.ts:102:37)
at runJobs (/Users/alessio/Documents/GitHub/payload2/packages/payload/src/queues/operations/runJobs/index.ts:181:25)
at Object.run (/Users/alessio/Documents/GitHub/payload2/packages/payload/src/queues/localAPI.ts:137:12)
at N.fn (/Users/alessio/Documents/GitHub/payload2/packages/payload/src/index.ts:866:13)
at N._trigger (/Users/alessio/Documents/GitHub/payload2/node_modules/.pnpm/croner@9.0.0/node_modules/croner/dist/croner.cjs:1:16806) {
length: 112,
severity: 'ERROR',
code: '42P01',
detail: undefined,
hint: undefined,
position: '406',
internalPosition: undefined,
internalQuery: undefined,
where: undefined,
schema: undefined,
table: undefined,
column: undefined,
dataType: undefined,
constraint: undefined,
file: 'parse_relation.c',
line: '1449',
routine: 'parserOpenTable'
}
}
```
This PR makes running crons opt-in using a new `cron` flag. By default,
no cron jobs will be created.
|
||
|
|
57d00ad2e9 | test: reduce queue test amount (#13008) | ||
|
|
59f536c2c9 |
refactor: simplify job queue error handling (#12845)
This simplifies workflow / task error handling, as well as cancelling jobs. Previously, we were handling errors when they occur and passing through error state using a `state` object - errors were then handled in multiple areas of the code. This PR adds new, clean `TaskError`, `WorkflowError` and `JobCancelledError` errors that are thrown when they occur and are handled **in one single place**, massively cleaning up complex functions like [payload/src/queues/operations/runJobs/runJob/getRunTaskFunction.ts](https://github.com/payloadcms/payload/compare/refactor/jobs-errors?expand=1#diff-53dc7ccb7c8e023c9ba63fdd2e78c32ad0be606a2c64a3512abad87893f5fd21) Performance will also be positively improved by this change - previously, as task / workflow failure or cancellation would have resulted in multiple, separate `updateJob` db calls, as data modifications to the job object required for storing failure state were done multiple times in multiple areas of the codebase. Most notably, task error state was handled and updated separately from workflow error state. Now, it's just a clean, single `updateJob` call This PR also does the following: - adds a new test for `deleteJobOnComplete` behavior - cleans up test suite - ensures `deleteJobOnComplete` does not delete definitively failed jobs --- - To see the specific tasks where the Asana app for GitHub is being used, see below: - https://app.asana.com/0/0/1210553277813320 |
||
|
|
84cb2b5819 |
refactor: simplify job type (#12816)
Previously, there were multiple ways to type a running job: - `GeneratedTypes['payload-jobs']` - only works in an installed project - is `any` in monorepo - `BaseJob` - works everywhere, but does not incorporate generated types which may include type for custom fields added to the jobs collection - `RunningJob<>` - more accurate version of `BaseJob`, but same problem This PR deprecated all those types in favor of a new `Job` type. Benefits: - Works in both monorepo and installed projects. If no generated types exist, it will automatically fall back to `BaseJob` - Comes with an optional generic that can be used to narrow down `job.input` based on the task / workflow slug. No need to use a separate type helper like `RunningJob<>` With this new type, I was able to replace every usage of `GeneratedTypes['payload-jobs']`, `BaseJob` and `RunningJob<>` with the simple `Job` type. Additionally, this PR simplifies some of the logic used to run jobs |
||
|
|
7c05c775cb |
docs: improve jobs autorun docs, adds e2e test (#12196)
This clarifies that jobs.autoRun only *runs* already-queued jobs. It does not queue the jobs for you. Also adds an e2e test as this functionality had no e2e coverage |
||
|
|
545d870650 |
chore: fix various e2e test setup issues (#12670)
I noticed a few issues when running e2e tests that will be resolved by this PR: - Most important: for some test suites (fields, fields-relationship, versions, queues, lexical), the database was cleared and seeded **twice** in between each test run. This is because the onInit function was running the clear and seed script, when it should only have been running the seed script. Clearing the database / the snapshot workflow is being done by the reInit endpoint, which then calls onInit to seed the actual data. - The slowest part of `clearAndSeedEverything` is recreating indexes on mongodb. This PR slightly improves performance here by: - Skipping this process for the built-in `['payload-migrations', 'payload-preferences', 'payload-locked-documents']` collections - Previously we were calling both `createIndexes` and `ensureIndexes`. This was unnecessary - `ensureIndexes` is a deprecated alias of `createIndexes`. This PR changes it to only call `createIndexes` - Makes the reinit endpoint accept GET requests instead of POST requests - this makes it easier to debug right in the browser - Some typescript fixes - Adds a `dev:memorydb` script to the package.json. For some reason, `dev` is super unreliable on mongodb locally when running e2e tests - it frequently fails during index creation. Using the memorydb fixes this issue, with the bonus of more closely resembling the CI environment - Previously, you were unable to run test suites using turbopack + postgres. This fixes it, by explicitly installing `pg` as devDependency in our monorepo - Fixes jest open handles warning |
||
|
|
30bb749e25 |
ci: skip flaky test on supabase (#12667)
This disables running the "`can reliably run workflows with parallel tasks`" int test on supabase. For unknown reasons, it fails most of the time. |
||
|
|
2a929cf385 |
chore: fix all lint errors and add mechanisms to prevent them from appearing again (#12401)
I think it's easier to review this PR commit by commit, so I'll explain it this way: ## Commits 1. [parallelize eslint script (still showing logs results in serial)]( |
||
|
|
e87521a376 |
perf(ui): significantly optimize form state component rendering, up to 96% smaller and 75% faster (#11946)
Significantly optimizes the component rendering strategy within the form state endpoint by precisely rendering only the fields that require it. This cuts down on server processing and network response sizes when invoking form state requests **that manipulate array and block rows which contain server components**, such as rich text fields, custom row labels, etc. (results listed below). Here's a breakdown of the issue: Previously, when manipulating array and block fields, _all_ rows would render any server components that might exist within them, including rich text fields. This means that subsequent changes to these fields would potentially _re-render_ those same components even if they don't require it. For example, if you have an array field with a rich text field within it, adding the first row would cause the rich text field to render, which is expected. However, when you add a second row, the rich text field within the first row would render again unnecessarily along with the new row. This is especially noticeable for fields with many rows, where every single row processes its server components and returns RSC data. And this does not only affect nested rich text fields, but any custom component defined on the field level, as these are handled in the same way. The reason this was necessary in the first place was to ensure that the server components receive the proper data when they are rendered, such as the row index and the row's data. Changing one of these rows could cause the server component to receive the wrong data if it was not freshly rendered. While this is still a requirement that rows receive up-to-date props, it is no longer necessary to render everything. Here's a breakdown of the actual fix: This change ensures that only the fields that are actually being manipulated will be rendered, rather than all rows. The existing rows will remain in memory on the client, while the newly rendered components will return from the server. For example, if you add a new row to an array field, only the new row will render its server components. To do this, we send the path of the field that is being manipulated to the server. The server can then use this path to determine for itself which fields have already been rendered and which ones need required rendering. ## Results The following results were gathered by booting up the `form-state` test suite and seeding 100 array rows, each containing a rich text field. To invoke a form state request, we navigate to a document within the "posts" collection, then add a new array row to the list. The result is then saved to the file system for comparison. | Test Suite | Collection | Number of Rows | Before | After | Percentage Change | |------|------|---------|--------|--------|--------| | `form-state` | `posts` | 101 | 1.9MB / 266ms | 80KB / 70ms | ~96% smaller / ~75% faster | --------- Co-authored-by: James <james@trbl.design> Co-authored-by: Alessio Gravili <alessio@gravili.de> |
||
|
|
c844b4c848 |
feat: configurable job queue processing order (LIFO/FIFO), allow sequential execution of jobs (#11897)
Previously, jobs were executed in FIFO order on MongoDB, and LIFO on Postgres, with no way to configure this behavior. This PR makes FIFO the default on both MongoDB and Postgres and introduces the following new options to configure the processing order globally or on a queue-by-queue basis: - a `processingOrder` property to the jobs config - a `processingOrder` argument to `payload.jobs.run()` to override what's set in the jobs config It also adds a new `sequential` option to `payload.jobs.run()`, which can be useful for debugging. |
||
|
|
9a1c3cf4cc |
fix: support parallel job queue tasks (#11917)
This adds support for running multiple job queue tasks in parallel within the same workflow while preventing conflicts. Previously, this would have caused the following issues: - Job log entries get lost - the final job log is incomplete, despite all tasks having been executed - Write conflicts in postgres, leading to unique constraint violation errors The solution involves handling job log data updates in a way that avoids overwriting, and ensuring the final update reflects the latest job log data. Each job log entry now initializes its own ID, so a given job log entry’s ID remains the same across multiple, parallel task executions. ## Postgres In Postgres, we need to enable transactions for the `payload.db.updateJobs` operation; otherwise, two tasks updating the same job in parallel can conflict. This happens because Postgres handles array rows by deleting them all, then re-inserting (rather than upserting). The rows are stored in a separate table, and the following scenario can occur: Op 1: deletes all job log rows Op 2: deletes all job log rows Op 1: inserts 200 job log rows Op 2: insert the same 200 job log rows again => `error: “duplicate key value violates unique constraint "payload_jobs_log_pkey”` Because transactions were not used, the rows inserted by Op 1 immediately became visible to Op 2, causing the conflict. Enabling transactions fixes this. In theory, it can still happen if Op 1 commits before Op 2 starts inserting (due to the read committed isolation level), but it should occur far less frequently. Alongside this change, we should consider inserting the rows using an upsert (update on conflict), which will get rid of this error completely. That way, if the insertion of Op 1 is visible to Op 2, Op 2 will simply overwrite it, rather than erroring. Individual job entries are immutable and job entries cannot be deleted, thus this shouldn't corrupt any data. ## Mongo In Mongo, the issue is addressed by ensuring that log row deletions caused due to different log states in concurrent operations are not merged back to the client job log, and by making sure the final update includes all job logs. There is no duplicate key error in Mongo because the array log resides in the same document and duplicates are simply upserted. We cannot use transactions in Mongo, as it appears to lock the document in a way that prevents reliable parallel updates, leading to: `MongoServerError: WriteConflict error: this operation conflicted with another operation. Please retry your operation or multi-document transaction` |
||
|
|
032c424244 |
perf: use direct db calls in job-queue system (#11489)
Previously, our job queue system relied on `payload.*` operations, which ran very frequently: - whenever job execution starts, as all jobs need to be set to `processing: true` - every single time a task completes or fails, as the job log needs to be updated - whenever job execution stops, to mark it as completed and to delete it (if `deleteJobOnComplete` is set) This PR replaces these with direct `payload.db.*` calls, which are significantly faster than payload operations. Given how often the job queue system communicates with the database, this should be a massive performance improvement. ## How it affects running hooks To generate the task status, we previously used an `afterRead` hook. Since direct db adapter calls no longer execute hooks, this PR introduces new `updateJob` and `updateJobs` helpers to handle task status generation outside the normal payload hook lifecycle. Additionally, a new `runHooks` property has been added to the global job configuration. While setting this to `true` can be useful if custom hooks were added to the `payload-jobs` collection config, this will revert the job system to use normal payload operations. This should be avoided as it degrades performance. In most cases, the `onSuccess` or `onFail` properties in the job config will be sufficient and much faster. Furthermore, if the `depth` property is set in the global job configuration, the job queue system will also fall back to the slower, normal payload operations. --------- Co-authored-by: Dan Ribbens <DanRibbens@users.noreply.github.com> |
||
|
|
79a7b4ad02 |
chore(db-mongodb): tsconfig uses strict: true and noUncheckedIndexedAccess: true (#11444)
Migrates the `db-mongodb` package to use `strict: true` and `noUncheckedIndexedAccess: true` TSConfig properties. This greatly improves code quality and prevents some runtime errors or gives better error messages. |
||
|
|
38131ed2c3 |
feat: ability to cancel jobs (#11409)
This adds new `payload.jobs.cancel` and `payload.jobs.cancelByID` methods that allow you to cancel already-running jobs, or prevent queued jobs from running. While it's not possible to cancel a function mid-execution, this will stop job execution the next time the job makes a request to the db, which happens after every task. |
||
|
|
d53f166476 |
fix: ensure errors returned from tasks are properly logged (#11443)
Fixes https://github.com/payloadcms/payload/issues/9767 We allow failing a job queue task by returning `{ state: 'failed' }` from the task, instead of throwing an error. However, previously, this threw an error when trying to update the task in the database. Additionally, it was not possible to customize the error message. This PR fixes that by letting you return `errorMessage` alongside `{ state: 'failed' }`, and by ensuring the error is transformed into proper json before saving it to the `error` column. |
||
|
|
c6ab312286 |
chore: cleanup queues test suite (#11410)
This PR extracts each workflow of our queues test suite into its own file |
||
|
|
117949b8d9 |
test: regenerate payload-types.ts for all test suites (#11238)
Regenerates `payload-types.ts` for all test suites. |
||
|
|
918bd72335 |
chore: update mongodb-memory-server v9 -> v10 (#10556)
Updated version of mongodb-memory-server to 10. |
||
|
|
08fb159943 |
feat: allow running sub-tasks from tasks (#10373)
Task handlers now receive `inlineTask` as an arg, which can be used to
run inline sub-tasks. In the task log, those inline tasks will have a
`parent` property that points to the parent task.
Example:
```ts
{
slug: 'subTask',
inputSchema: [
{
name: 'message',
type: 'text',
required: true,
},
],
handler: async ({ job, inlineTask }) => {
await inlineTask('create two docs', {
task: async ({ input, inlineTask }) => {
const { newSimple } = await inlineTask('create doc 1', {
task: async ({ req }) => {
const newSimple = await req.payload.create({
collection: 'simple',
req,
data: {
title: input.message,
},
})
return {
output: {
newSimple,
},
}
},
})
const { newSimple2 } = await inlineTask('create doc 2', {
task: async ({ req }) => {
const newSimple2 = await req.payload.create({
collection: 'simple',
req,
data: {
title: input.message,
},
})
return {
output: {
newSimple2,
},
}
},
})
return {
output: {
simpleID1: newSimple.id,
simpleID2: newSimple2.id,
},
}
},
input: {
message: job.input.message,
},
})
},
} as WorkflowConfig<'subTask'>
```
Job log example:
```ts
[
{
executedAt: '2025-01-06T03:55:44.682Z',
completedAt: '2025-01-06T03:55:44.684Z',
taskSlug: 'inline',
taskID: 'create doc 1',
output: { newSimple: [Object] },
parent: { taskSlug: 'inline', taskID: 'create two docs' }, // <= New
state: 'succeeded',
id: '677b5440ba35d345d1214d1b'
},
{
executedAt: '2025-01-06T03:55:44.690Z',
completedAt: '2025-01-06T03:55:44.692Z',
taskSlug: 'inline',
taskID: 'create doc 2',
output: { newSimple2: [Object] },
parent: { taskSlug: 'inline', taskID: 'create two docs' }, // <= New
state: 'succeeded',
id: '677b5440ba35d345d1214d1c'
},
{
executedAt: '2025-01-06T03:55:44.681Z',
completedAt: '2025-01-06T03:55:44.697Z',
taskSlug: 'inline',
taskID: 'create two docs',
input: { message: 'hello!' },
output: {
simpleID1: '677b54401e34772cc63c8693',
simpleID2: '677b54401e34772cc63c8697'
},
parent: {},
state: 'succeeded',
id: '677b5440ba35d345d1214d1d'
}
]
```
|
||
|
|
b3308736c4 |
feat: jsdocs for generated types, by using admin.description (#9917)
This makes use of admin.description to generate JSDocs for field,
collection and global generated types.


For the future, we should add a dedicated property to override these
JSDocs.
You can view the effect of this PR on our test suite generated types
here:
|
||
|
|
b1ef28dd39 |
feat: allow where in payload.jobs.run (#9877)
Example:
```ts
await payload.jobs.queue({
task: 'MyTask',
input: {
message: `secret`,
},
})
await payload.jobs.run({ where: { 'input.message': { equals: 'secret' } } })
```
|
||
|
|
09246a45e0 | feat: add payload.jobs.runByID (#9875) | ||
|
|
a89d54454a |
fix: ensure jobs do not retry indefinitely by default, fix undefined values in error messages (#9605)
## Fix default retries By default, if no `retries` property has been set, jobs / tasks should not be retried. This was not the case previously, as the `maxRetries` variable was `undefined`, causing jobs to retry endlessly. This PR sets them to `0` by default. Additionally, this fixes some undesirable behavior of the workflow retries property. Workflow retries now act as **maximum**, workflow-level retries. Only tasks that do not have a retry property set will inherit the workflow-level retries. ## Fix error messages Previously, you were able to encounter error messages with undefined values like these:  Reason is that it was always using `job.workflowSlug` for the error messages. However, if you queue a task directly, without a workflow, `job.workflowSlug` is undefined and `job.taskSlug` should be used instead. This PR then gets rid of the second undefined value by ensuring that `maxRetries´ is never undefined |
||
|
|
b96475b7b9 |
fix: run queues via the /payload-jobs/run endpoint without workflows (#9509)
Fixes https://github.com/payloadcms/payload/discussions/9418 (the `/api/payload-jobs/run` endpoint) when the config doesn't have any `workflows` but only `tasks` |
||
|
|
7eb388d403 |
fix: ensure deleteJobOnComplete property for jobs works (#9283)
Ensures that the `deleteJobOnComplete` (which is `true` by default) property works properly |
||
|
|
e40141b559 |
fix: queues types with strict: true (#9281)
Fixes types for workflows / jobs `input` and `output` when using `strict: true` or `strictNullChecks: true` by ensuring that all properties in generates types are requried |
||
|
|
e0309a1dd0 |
fix: allow specifying queue (#9151)
Allows user to specify a queue when calling `payload.jobs.queue()`. Closes #9133 |
||
|
|
c96fa613bc |
feat!: on demand rsc (#8364)
Currently, Payload renders all custom components on initial compile of the admin panel. This is problematic for two key reasons: 1. Custom components do not receive contextual data, i.e. fields do not receive their field data, edit views do not receive their document data, etc. 2. Components are unnecessarily rendered before they are used This was initially required to support React Server Components within the Payload Admin Panel for two key reasons: 1. Fields can be dynamically rendered within arrays, blocks, etc. 2. Documents can be recursively rendered within a "drawer" UI, i.e. relationship fields 3. Payload supports server/client component composition In order to achieve this, components need to be rendered on the server and passed as "slots" to the client. Currently, the pattern for this is to render custom server components in the "client config". Then when a view or field is needed to be rendered, we first check the client config for a "pre-rendered" component, otherwise render our client-side fallback component. But for the reasons listed above, this pattern doesn't exactly make custom server components very useful within the Payload Admin Panel, which is where this PR comes in. Now, instead of pre-rendering all components on initial compile, we're able to render custom components _on demand_, only as they are needed. To achieve this, we've established [this pattern](https://github.com/payloadcms/payload/pull/8481) of React Server Functions in the Payload Admin Panel. With Server Functions, we can iterate the Payload Config and return JSX through React's `text/x-component` content-type. This means we're able to pass contextual props to custom components, such as data for fields and views. ## Breaking Changes 1. Add the following to your root layout file, typically located at `(app)/(payload)/layout.tsx`: ```diff /* THIS FILE WAS GENERATED AUTOMATICALLY BY PAYLOAD. */ /* DO NOT MODIFY IT BECAUSE IT COULD BE REWRITTEN AT ANY TIME. */ + import type { ServerFunctionClient } from 'payload' import config from '@payload-config' import { RootLayout } from '@payloadcms/next/layouts' import { handleServerFunctions } from '@payloadcms/next/utilities' import React from 'react' import { importMap } from './admin/importMap.js' import './custom.scss' type Args = { children: React.ReactNode } + const serverFunctions: ServerFunctionClient = async function (args) { + 'use server' + return handleServerFunctions({ + ...args, + config, + importMap, + }) + } const Layout = ({ children }: Args) => ( <RootLayout config={config} importMap={importMap} + serverFunctions={serverFunctions} > {children} </RootLayout> ) export default Layout ``` 2. If you were previously posting to the `/api/form-state` endpoint, it no longer exists. Instead, you'll need to invoke the `form-state` Server Function, which can be done through the _new_ `getFormState` utility: ```diff - import { getFormState } from '@payloadcms/ui' - const { state } = await getFormState({ - apiRoute: '', - body: { - // ... - }, - serverURL: '' - }) + const { getFormState } = useServerFunctions() + + const { state } = await getFormState({ + // ... + }) ``` ## Breaking Changes ```diff - useFieldProps() - useCellProps() ``` More details coming soon. --------- Co-authored-by: Alessio Gravili <alessio@gravili.de> Co-authored-by: Jarrod Flesch <jarrodmflesch@gmail.com> Co-authored-by: James <james@trbl.design> |
||
|
|
8970c6b3a6 |
feat: adds jobs queue (#8228)
Adds a jobs queue to Payload.
- [x] Docs, w/ examples for Vercel Cron, additional services
- [x] Type the `job` using GeneratedTypes in `JobRunnerArgs`
(@AlessioGr)
- [x] Write the `runJobs` function
- [x] Allow for some type of `payload.runTask`
- [x] Open up a new bin script for running jobs
- [x] Determine strategy for runner endpoint to either await jobs
successfully or return early and stay open until job work completes
(serverless ramifications here)
- [x] Allow for job runner to accept how many jobs to run in one
invocation
- [x] Make a Payload local API method for creating a new job easily
(payload.createJob) or similar which is strongly typed (@AlessioGr)
- [x] Make `payload.runJobs` or similar (@AlessioGr)
- [x] Write tests for retrying up to max retries for a given step
- [x] Write tests for dynamic import of a runner
The shape of the config should permit the definition of steps separate
from the job workflows themselves.
```js
const config = {
// Not sure if we need this property anymore
queues: {
},
// A job is an instance of a workflow, stored in DB
// and triggered by something at some point
jobs: {
// Be able to override the jobs collection
collectionOverrides: () => {},
// Workflows are groups of tasks that handle
// the flow from task to task.
// When defined on the config, they are considered as predefined workflows
// BUT - in the future, we'll allow for UI-based workflow definition as well.
workflows: [
{
slug: 'job-name',
// Temporary name for this
// should be able to pass function
// or path to it for Node to dynamically import
controlFlowInJS: '/my-runner.js',
// Temporary name as well
// should be able to eventually define workflows
// in UI (meaning they need to be serialized in JSON)
// Should not be able to define both control flows
controlFlowInJSON: [
{
task: 'myTask',
next: {
// etc
}
}
],
// Workflows take input
// which are a group of fields
input: [
{
name: 'post',
type: 'relationship',
relationTo: 'posts',
maxDepth: 0,
required: true,
},
{
name: 'message',
type: 'text',
required: true,
},
],
},
],
// Tasks are defined separately as isolated functions
// that can be retried on fail
tasks: [
{
slug: 'myTask',
retries: 2,
// Each task takes input
// Used to auto-type the task func args
input: [
{
name: 'post',
type: 'relationship',
relationTo: 'posts',
maxDepth: 0,
required: true,
},
{
name: 'message',
type: 'text',
required: true,
},
],
// Each task takes output
// Used to auto-type the function signature
output: [
{
name: 'success',
type: 'checkbox',
}
],
onSuccess: () => {},
onFail: () => {},
run: myRunner,
},
]
}
}
```
### `payload.createJob`
This function should allow for the creation of jobs based on either a
workflow (group of tasks) or an individual task.
To create a job using a workflow:
```js
const job = await payload.createJob({
// Accept the `name` of a workflow so we can match to either a
// code-based workflow OR a workflow defined in the DB
// Should auto-type the input
workflowName: 'myWorkflow',
input: {
// typed to the args of the workflow by name
}
})
```
To create a job using a task:
```js
const job = await payload.createJob({
// Accept the `name` of a task
task: 'myTask',
input: {
// typed to the args of the task by name
}
})
```
---------
Co-authored-by: Alessio Gravili <alessio@gravili.de>
Co-authored-by: Dan Ribbens <dan.ribbens@gmail.com>
|