Adds a new `schedule` property to workflow and task configs that can be used to have Payload automatically _queue_ jobs following a certain _schedule_. Docs: https://payloadcms.com/docs/dynamic/jobs-queue/schedules?branch=feat/schedule-jobs ## API Example ```ts export default buildConfig({ // ... jobs: { // ... scheduler: 'manual', // Or `cron` if you're not using serverless. If `manual` is used, then user needs to set up running /api/payload-jobs/handleSchedules or payload.jobs.handleSchedules in regular intervals tasks: [ { schedule: [ { cron: '* * * * * *', queue: 'autorunSecond', // Hooks are optional hooks: { // Not an array, as providing and calling `defaultBeforeSchedule` would be more error-prone if this was an array beforeSchedule: async (args) => { // Handles verifying that there are no jobs already scheduled or processing. // You can override this behavior by not calling defaultBeforeSchedule, e.g. if you wanted // to allow a maximum of 3 scheduled jobs in the queue instead of 1, or add any additional conditions const result = await args.defaultBeforeSchedule(args) return { ...result, input: { message: 'This task runs every second', }, } }, afterSchedule: async (args) => { await args.defaultAfterSchedule(args) // Handles updating the payload-jobs-stats global args.req.payload.logger.info( 'EverySecond task scheduled: ' + (args.status === 'success' ? args.job.id : 'skipped or failed to schedule'), ) }, }, }, ], slug: 'EverySecond', inputSchema: [ { name: 'message', type: 'text', required: true, }, ], handler: ({ input, req }) => { req.payload.logger.info(input.message) return { output: {}, } }, } ] } }) ``` --- - To see the specific tasks where the Asana app for GitHub is being used, see below: - https://app.asana.com/0/0/1210495300843759
106 lines
4.0 KiB
TypeScript
106 lines
4.0 KiB
TypeScript
import path from 'path'
|
|
import { _internal_jobSystemGlobals, _internal_resetJobSystemGlobals, type Payload } from 'payload'
|
|
import { wait } from 'payload/shared'
|
|
import { fileURLToPath } from 'url'
|
|
|
|
import type { NextRESTClient } from '../helpers/NextRESTClient.js'
|
|
|
|
import { devUser } from '../credentials.js'
|
|
import { initPayloadInt } from '../helpers/initPayloadInt.js'
|
|
import { clearAndSeedEverything } from './seed.js'
|
|
|
|
let payload: Payload
|
|
let restClient: NextRESTClient
|
|
let token: string
|
|
|
|
const { email, password } = devUser
|
|
const filename = fileURLToPath(import.meta.url)
|
|
const dirname = path.dirname(filename)
|
|
|
|
describe('Queues - scheduling, with automatic scheduling handling', () => {
|
|
beforeAll(async () => {
|
|
process.env.SEED_IN_CONFIG_ONINIT = 'false' // Makes it so the payload config onInit seed is not run. Otherwise, the seed would be run unnecessarily twice for the initial test run - once for beforeEach and once for onInit
|
|
;({ payload, restClient } = await initPayloadInt(
|
|
dirname,
|
|
undefined,
|
|
undefined,
|
|
'config.schedules-autocron.ts',
|
|
))
|
|
})
|
|
|
|
afterAll(async () => {
|
|
// Ensure no new crons are scheduled
|
|
_internal_jobSystemGlobals.shouldAutoRun = false
|
|
_internal_jobSystemGlobals.shouldAutoSchedule = false
|
|
// Wait 3 seconds to ensure all currently-running crons are done. If we shut down the db while a function is running, it can cause issues
|
|
// Cron function runs may persist after a test has finished
|
|
await wait(3000)
|
|
// Now we can destroy the payload instance
|
|
await payload.destroy()
|
|
_internal_resetJobSystemGlobals()
|
|
})
|
|
|
|
afterEach(() => {
|
|
_internal_resetJobSystemGlobals()
|
|
})
|
|
|
|
beforeEach(async () => {
|
|
// Set autorun to false during seed process to ensure no crons are scheduled, which may affect the tests
|
|
_internal_jobSystemGlobals.shouldAutoRun = false
|
|
_internal_jobSystemGlobals.shouldAutoSchedule = false
|
|
|
|
await clearAndSeedEverything(payload)
|
|
const data = await restClient
|
|
.POST('/users/login', {
|
|
body: JSON.stringify({
|
|
email,
|
|
password,
|
|
}),
|
|
})
|
|
.then((res) => res.json())
|
|
|
|
if (data.token) {
|
|
token = data.token
|
|
}
|
|
payload.config.jobs.deleteJobOnComplete = true
|
|
_internal_jobSystemGlobals.shouldAutoRun = true
|
|
_internal_jobSystemGlobals.shouldAutoSchedule = true
|
|
})
|
|
|
|
it('can auto-schedule through automatic crons and autorun jobs', async () => {
|
|
// Do not call payload.jobs.run() or payload.jobs.handleSchedules() - payload should automatically schedule crons for auto-scheduling
|
|
|
|
// Autorun and Autoschedule runs every second - so should have autorun at least twice after 3.5 seconds. Case with the lowest amount of jobs completed,
|
|
// if autoschedule runs after the first autorun:
|
|
// Second 1: Autorun runs => no jobs
|
|
// Second 1: Autoschedule runs => scheduels 1 job
|
|
// Second 2: Autorun runs => runs 1 job => 1
|
|
// Second 2: Autoschedule runs => schedules 1 job
|
|
// Second 3: Autorun runs => runs 1 job => 2
|
|
// Second 3: Autoschedule runs => schedules 1 job
|
|
// Status after 3.5 seconds: 2 jobs running, 1 job scheduled
|
|
|
|
// Best case - most amounts of jobs completed:
|
|
// Second 1: Autoschedule runs => schedules 1 job
|
|
// Second 1: Autorun runs => runs 1 job => 1
|
|
// Second 2: Autoschedule runs => schedules 1 job
|
|
// Second 2: Autorun runs => runs 1 job => 2
|
|
// Second 3: Autoschedule runs => schedules 1 job
|
|
// Second 3: Autorun runs => runs 1 job => 3
|
|
// Status after 3.5 seconds: 3 jobs running, no jobs scheduled
|
|
const minJobsCompleted = 2
|
|
const maxJobsCompleted = 3
|
|
|
|
await new Promise((resolve) => setTimeout(resolve, 3500)) // 3 seconds + 0.5 seconds to ensure the last job has been completed
|
|
|
|
const allSimples = await payload.find({
|
|
collection: 'simple',
|
|
limit: 100,
|
|
})
|
|
|
|
expect(allSimples.totalDocs).toBeGreaterThanOrEqual(minJobsCompleted)
|
|
expect(allSimples.totalDocs).toBeLessThanOrEqual(maxJobsCompleted)
|
|
expect(allSimples?.docs?.[0]?.title).toBe('This task runs every second')
|
|
})
|
|
})
|