177 lines
5.5 KiB
TypeScript
177 lines
5.5 KiB
TypeScript
import fs from 'fs'
|
|
import * as os from 'node:os'
|
|
import path from 'path'
|
|
import { type Payload } from 'payload'
|
|
|
|
import { isMongoose } from './isMongoose.js'
|
|
import { resetDB } from './reset.js'
|
|
import { createSnapshot, dbSnapshot, restoreFromSnapshot, uploadsDirCache } from './snapshot.js'
|
|
|
|
type SeedFunction = (_payload: Payload) => Promise<void> | void
|
|
|
|
export async function seedDB({
|
|
_payload,
|
|
collectionSlugs,
|
|
seedFunction,
|
|
snapshotKey,
|
|
uploadsDir,
|
|
/**
|
|
* Always seeds, instead of restoring from snapshot for consecutive test runs
|
|
*/
|
|
alwaysSeed = false,
|
|
}: {
|
|
_payload: Payload
|
|
alwaysSeed?: boolean
|
|
collectionSlugs: string[]
|
|
seedFunction: SeedFunction
|
|
/**
|
|
* Key to uniquely identify the kind of snapshot. Each test suite should pass in a unique key
|
|
*/
|
|
snapshotKey: string
|
|
uploadsDir?: string
|
|
}) {
|
|
/**
|
|
* Reset database
|
|
*/
|
|
await resetDB(_payload, collectionSlugs)
|
|
/**
|
|
* Delete uploads directory if it exists
|
|
*/
|
|
if (uploadsDir) {
|
|
try {
|
|
// Attempt to clear the uploads directory if it exists
|
|
await fs.promises.access(uploadsDir)
|
|
const files = await fs.promises.readdir(uploadsDir)
|
|
for (const file of files) {
|
|
await fs.promises.rm(path.join(uploadsDir, file))
|
|
}
|
|
} catch (error) {
|
|
if (error.code !== 'ENOENT') {
|
|
// If the error is not because the directory doesn't exist
|
|
console.error('Error in operation (deleting uploads dir):', error)
|
|
throw error
|
|
}
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Mongoose & Postgres: Restore snapshot of old data if available
|
|
*
|
|
* Note for postgres: For postgres, this needs to happen AFTER the tables were created.
|
|
* This does not work if I run payload.db.init or payload.db.connect anywhere. Thus, when resetting the database, we are not dropping the schema, but are instead only deleting the table values
|
|
*/
|
|
let restored = false
|
|
if (!alwaysSeed && dbSnapshot[snapshotKey] && Object.keys(dbSnapshot[snapshotKey]).length) {
|
|
await restoreFromSnapshot(_payload, snapshotKey, collectionSlugs)
|
|
|
|
/**
|
|
* Restore uploads dir if it exists
|
|
*/
|
|
if (uploadsDir && fs.existsSync(uploadsDirCache[snapshotKey])) {
|
|
// move all files from inside uploadsDirCacheFolder to uploadsDir
|
|
await fs.promises
|
|
.readdir(uploadsDirCache[snapshotKey], { withFileTypes: true })
|
|
.then(async (files) => {
|
|
for (const file of files) {
|
|
if (file.isDirectory()) {
|
|
await fs.promises.mkdir(path.join(uploadsDir, file.name), {
|
|
recursive: true,
|
|
})
|
|
await fs.promises.copyFile(
|
|
path.join(uploadsDirCache[snapshotKey], file.name),
|
|
path.join(uploadsDir, file.name),
|
|
)
|
|
} else {
|
|
await fs.promises.copyFile(
|
|
path.join(uploadsDirCache[snapshotKey], file.name),
|
|
path.join(uploadsDir, file.name),
|
|
)
|
|
}
|
|
}
|
|
})
|
|
.catch((err) => {
|
|
console.error('Error in operation (restoring uploads dir):', err)
|
|
throw err
|
|
})
|
|
}
|
|
|
|
restored = true
|
|
}
|
|
|
|
/**
|
|
* Mongoose: Re-create indexes
|
|
* Postgres: No need for any action here, since we only delete the table data and no schemas
|
|
*/
|
|
// Dropping the db breaks indexes (on mongoose - did not test extensively on postgres yet), so we recreate them here
|
|
if (isMongoose(_payload)) {
|
|
await Promise.all([
|
|
...collectionSlugs.map(async (collectionSlug) => {
|
|
await _payload.db.collections[collectionSlug].createIndexes()
|
|
}),
|
|
])
|
|
}
|
|
|
|
/**
|
|
* If a snapshot was restored, we don't need to seed the database
|
|
*/
|
|
if (restored) {
|
|
return
|
|
}
|
|
|
|
/**
|
|
* Seed the database with data and save it to a snapshot
|
|
**/
|
|
if (typeof seedFunction === 'function') {
|
|
await seedFunction(_payload)
|
|
}
|
|
|
|
if (!alwaysSeed) {
|
|
await createSnapshot(_payload, snapshotKey, collectionSlugs)
|
|
}
|
|
|
|
/**
|
|
* Cache uploads dir to a cache folder if uploadsDir exists
|
|
*/
|
|
if (!alwaysSeed && uploadsDir && fs.existsSync(uploadsDir)) {
|
|
if (!uploadsDirCache[snapshotKey]) {
|
|
// Define new cache folder path to the OS temp directory (well a random folder inside it)
|
|
uploadsDirCache[snapshotKey] = path.join(
|
|
os.tmpdir(),
|
|
`${snapshotKey}`,
|
|
`payload-e2e-tests-uploads-cache`,
|
|
)
|
|
}
|
|
|
|
// delete the cache folder if it exists
|
|
if (fs.existsSync(uploadsDirCache[snapshotKey])) {
|
|
await fs.promises.rm(uploadsDirCache[snapshotKey], { recursive: true })
|
|
}
|
|
await fs.promises.mkdir(uploadsDirCache[snapshotKey], { recursive: true })
|
|
// recursively move all files and directories from uploadsDir to uploadsDirCacheFolder
|
|
await fs.promises
|
|
.readdir(uploadsDir, { withFileTypes: true })
|
|
.then(async (files) => {
|
|
for (const file of files) {
|
|
if (file.isDirectory()) {
|
|
await fs.promises.mkdir(path.join(uploadsDirCache[snapshotKey], file.name), {
|
|
recursive: true,
|
|
})
|
|
await fs.promises.copyFile(
|
|
path.join(uploadsDir, file.name),
|
|
path.join(uploadsDirCache[snapshotKey], file.name),
|
|
)
|
|
} else {
|
|
await fs.promises.copyFile(
|
|
path.join(uploadsDir, file.name),
|
|
path.join(uploadsDirCache[snapshotKey], file.name),
|
|
)
|
|
}
|
|
}
|
|
})
|
|
.catch((err) => {
|
|
console.error('Error in operation (creating snapshot of uploads dir):', err)
|
|
throw err
|
|
})
|
|
}
|
|
}
|