Files
payloadcms/test/helpers/seed.ts
Jacob Fletcher c96fa613bc feat!: on demand rsc (#8364)
Currently, Payload renders all custom components on initial compile of
the admin panel. This is problematic for two key reasons:
1. Custom components do not receive contextual data, i.e. fields do not
receive their field data, edit views do not receive their document data,
etc.
2. Components are unnecessarily rendered before they are used

This was initially required to support React Server Components within
the Payload Admin Panel for two key reasons:
1. Fields can be dynamically rendered within arrays, blocks, etc.
2. Documents can be recursively rendered within a "drawer" UI, i.e.
relationship fields
3. Payload supports server/client component composition 

In order to achieve this, components need to be rendered on the server
and passed as "slots" to the client. Currently, the pattern for this is
to render custom server components in the "client config". Then when a
view or field is needed to be rendered, we first check the client config
for a "pre-rendered" component, otherwise render our client-side
fallback component.

But for the reasons listed above, this pattern doesn't exactly make
custom server components very useful within the Payload Admin Panel,
which is where this PR comes in. Now, instead of pre-rendering all
components on initial compile, we're able to render custom components
_on demand_, only as they are needed.

To achieve this, we've established [this
pattern](https://github.com/payloadcms/payload/pull/8481) of React
Server Functions in the Payload Admin Panel. With Server Functions, we
can iterate the Payload Config and return JSX through React's
`text/x-component` content-type. This means we're able to pass
contextual props to custom components, such as data for fields and
views.

## Breaking Changes

1. Add the following to your root layout file, typically located at
`(app)/(payload)/layout.tsx`:

    ```diff
    /* THIS FILE WAS GENERATED AUTOMATICALLY BY PAYLOAD. */
    /* DO NOT MODIFY IT BECAUSE IT COULD BE REWRITTEN AT ANY TIME. */
    + import type { ServerFunctionClient } from 'payload'

    import config from '@payload-config'
    import { RootLayout } from '@payloadcms/next/layouts'
    import { handleServerFunctions } from '@payloadcms/next/utilities'
    import React from 'react'

    import { importMap } from './admin/importMap.js'
    import './custom.scss'

    type Args = {
      children: React.ReactNode
    }

+ const serverFunctions: ServerFunctionClient = async function (args) {
    +  'use server'
    +  return handleServerFunctions({
    +    ...args,
    +    config,
    +    importMap,
    +  })
    + }

    const Layout = ({ children }: Args) => (
      <RootLayout
        config={config}
        importMap={importMap}
    +  serverFunctions={serverFunctions}
      >
        {children}
      </RootLayout>
    )

    export default Layout
    ```

2. If you were previously posting to the `/api/form-state` endpoint, it
no longer exists. Instead, you'll need to invoke the `form-state` Server
Function, which can be done through the _new_ `getFormState` utility:

    ```diff
    - import { getFormState } from '@payloadcms/ui'
    - const { state } = await getFormState({
    -   apiRoute: '',
    -   body: {
    -     // ...
    -   },
    -   serverURL: ''
    - })

    + const { getFormState } = useServerFunctions()
    +
    + const { state } = await getFormState({
    +   // ...
    + })
    ```

## Breaking Changes

```diff
- useFieldProps()
- useCellProps()
```

More details coming soon.

---------

Co-authored-by: Alessio Gravili <alessio@gravili.de>
Co-authored-by: Jarrod Flesch <jarrodmflesch@gmail.com>
Co-authored-by: James <james@trbl.design>
2024-11-11 13:59:05 -05:00

220 lines
6.7 KiB
TypeScript

import fs from 'fs'
import * as os from 'node:os'
import path from 'path'
import { type Payload } from 'payload'
import { isMongoose } from './isMongoose.js'
import { resetDB } from './reset.js'
import { createSnapshot, dbSnapshot, restoreFromSnapshot, uploadsDirCache } from './snapshot.js'
type SeedFunction = (_payload: Payload) => Promise<void> | void
export async function seedDB({
_payload,
collectionSlugs,
seedFunction,
snapshotKey,
uploadsDir,
/**
* Always seeds, instead of restoring from snapshot for consecutive test runs
*/
alwaysSeed = false,
deleteOnly,
}: {
_payload: Payload
alwaysSeed?: boolean
collectionSlugs: string[]
deleteOnly?: boolean
seedFunction: SeedFunction
/**
* Key to uniquely identify the kind of snapshot. Each test suite should pass in a unique key
*/
snapshotKey: string
uploadsDir?: string | string[]
}) {
/**
* Reset database
*/
await resetDB(_payload, collectionSlugs)
/**
* Delete uploads directory if it exists
*/
if (uploadsDir) {
const uploadsDirs = Array.isArray(uploadsDir) ? uploadsDir : [uploadsDir]
for (const dir of uploadsDirs) {
try {
// Attempt to clear the uploads directory if it exists
await fs.promises.access(dir)
const files = await fs.promises.readdir(dir)
for (const file of files) {
await fs.promises.rm(path.join(dir, file))
}
} catch (error) {
if (error.code !== 'ENOENT') {
// If the error is not because the directory doesn't exist
console.error('Error in operation (deleting uploads dir):', dir, error)
throw error
}
}
}
}
/**
* Mongoose & Postgres: Restore snapshot of old data if available
*
* Note for postgres: For postgres, this needs to happen AFTER the tables were created.
* This does not work if I run payload.db.init or payload.db.connect anywhere. Thus, when resetting the database, we are not dropping the schema, but are instead only deleting the table values
*/
let restored = false
if (
!alwaysSeed &&
dbSnapshot[snapshotKey] &&
Object.keys(dbSnapshot[snapshotKey]).length &&
!deleteOnly
) {
await restoreFromSnapshot(_payload, snapshotKey, collectionSlugs)
/**
* Restore uploads dir if it exists
*/
if (uploadsDirCache[snapshotKey]) {
for (const cache of uploadsDirCache[snapshotKey]) {
if (cache.originalDir && fs.existsSync(cache.cacheDir)) {
// move all files from inside uploadsDirCacheFolder to uploadsDir
await fs.promises
.readdir(cache.cacheDir, { withFileTypes: true })
.then(async (files) => {
for (const file of files) {
if (file.isDirectory()) {
await fs.promises.mkdir(path.join(cache.originalDir, file.name), {
recursive: true,
})
await fs.promises.copyFile(
path.join(cache.cacheDir, file.name),
path.join(cache.originalDir, file.name),
)
} else {
await fs.promises.copyFile(
path.join(cache.cacheDir, file.name),
path.join(cache.originalDir, file.name),
)
}
}
})
.catch((err) => {
console.error('Error in operation (restoring uploads dir):', err)
throw err
})
}
}
}
restored = true
}
/**
* Mongoose: Re-create indexes
* Postgres: No need for any action here, since we only delete the table data and no schemas
*/
// Dropping the db breaks indexes (on mongoose - did not test extensively on postgres yet), so we recreate them here
if (isMongoose(_payload)) {
await Promise.all([
...collectionSlugs.map(async (collectionSlug) => {
await _payload.db.collections[collectionSlug].createIndexes()
}),
])
await Promise.all(
_payload.config.collections.map(async (coll) => {
await new Promise((resolve, reject) => {
_payload.db?.collections[coll.slug]?.ensureIndexes(function (err) {
if (err) {
reject(err)
}
resolve(true)
})
})
}),
)
}
/**
* If a snapshot was restored, we don't need to seed the database
*/
if (restored || deleteOnly) {
return
}
/**
* Seed the database with data and save it to a snapshot
**/
if (typeof seedFunction === 'function') {
await seedFunction(_payload)
}
if (!alwaysSeed) {
await createSnapshot(_payload, snapshotKey, collectionSlugs)
}
/**
* Cache uploads dir to a cache folder if uploadsDir exists
*/
if (!alwaysSeed && uploadsDir) {
const uploadsDirs = Array.isArray(uploadsDir) ? uploadsDir : [uploadsDir]
for (const dir of uploadsDirs) {
if (dir && fs.existsSync(dir)) {
if (!uploadsDirCache[snapshotKey]) {
uploadsDirCache[snapshotKey] = []
}
let newObj: {
cacheDir: string
originalDir: string
} = null
if (!uploadsDirCache[snapshotKey].find((cache) => cache.originalDir === dir)) {
// Define new cache folder path to the OS temp directory (well a random folder inside it)
newObj = {
originalDir: dir,
cacheDir: path.join(os.tmpdir(), `${snapshotKey}`, `payload-e2e-tests-uploads-cache`),
}
}
if (!newObj) {
continue
}
// delete the cache folder if it exists
if (fs.existsSync(newObj.cacheDir)) {
await fs.promises.rm(newObj.cacheDir, { recursive: true })
}
await fs.promises.mkdir(newObj.cacheDir, { recursive: true })
// recursively move all files and directories from uploadsDir to uploadsDirCacheFolder
try {
const files = await fs.promises.readdir(newObj.originalDir, { withFileTypes: true })
for (const file of files) {
if (file.isDirectory()) {
await fs.promises.mkdir(path.join(newObj.cacheDir, file.name), {
recursive: true,
})
await fs.promises.copyFile(
path.join(newObj.originalDir, file.name),
path.join(newObj.cacheDir, file.name),
)
} else {
await fs.promises.copyFile(
path.join(newObj.originalDir, file.name),
path.join(newObj.cacheDir, file.name),
)
}
}
uploadsDirCache[snapshotKey].push(newObj)
} catch (e) {
console.error('Error in operation (creating snapshot of uploads dir):', e)
throw e
}
}
}
}
}