Files
supabase/apps/studio/data/sql/execute-sql-query.ts
Charis 000c79e22b fix(studio): rework global storage size validation (#41378)
The global storage size validation depends on an unpaginated buckets
query to determine whether it is lower than any individual bucket's
cutoff. This causes a problem for users with tens of thousands of
buckets.

There's a bit of a UX/performance problem here, because in order to
determine whether any bucket's `file_size_limit` exceeds the global
setting, we need to get the max `file_size_limit` of `storage.buckets`
-- however, that column is not indexed.

My workaround is:
- Below a certain threshold (10,000) buckets, the query for max
`file_size_limit` is automatically run on form submit.
- Above that threshold, the user must confirm whether they want to run
the query. They're still allowed to change the storage config without
running it -- this does open a loophole where they can have a global
storage setting lower than an individual bucket's file size limit, but
though this is a potentially confusing situation, it's not strictly an
error.

---------

Co-authored-by: Joshen Lim <joshenlimek@gmail.com>
2025-12-16 09:02:31 -05:00

182 lines
5.1 KiB
TypeScript

import { QueryKey, useQuery } from '@tanstack/react-query'
import { DEFAULT_PLATFORM_APPLICATION_NAME } from '@supabase/pg-meta/src/constants'
import { handleError as handleErrorFetchers, post } from 'data/fetchers'
import { useSelectedProjectQuery } from 'hooks/misc/useSelectedProject'
import { MB, PROJECT_STATUS } from 'lib/constants'
import {
ROLE_IMPERSONATION_NO_RESULTS,
ROLE_IMPERSONATION_SQL_LINE_COUNT,
} from 'lib/role-impersonation'
import type { ResponseError, UseCustomQueryOptions } from 'types'
import { sqlKeys } from './keys'
export type ExecuteSqlVariables = {
projectRef?: string
connectionString?: string | null
sql: string
queryKey?: QueryKey
handleError?: (error: ResponseError) => { result: any }
isRoleImpersonationEnabled?: boolean
isStatementTimeoutDisabled?: boolean
autoLimit?: number
contextualInvalidation?: boolean
}
/**
* Executes a SQL query against the user's instance.
*
* @throws {Error}
*/
export async function executeSql<T = any>(
{
projectRef,
connectionString,
sql,
queryKey,
handleError,
isRoleImpersonationEnabled = false,
isStatementTimeoutDisabled = false,
}: Pick<
ExecuteSqlVariables,
| 'projectRef'
| 'connectionString'
| 'sql'
| 'queryKey'
| 'handleError'
| 'isRoleImpersonationEnabled'
| 'isStatementTimeoutDisabled'
>,
signal?: AbortSignal,
headersInit?: HeadersInit,
fetcherOverride?: (options: {
query: string
headers?: HeadersInit
}) => Promise<{ data: T } | { error: ResponseError }>
): Promise<{ result: T }> {
if (!projectRef) throw new Error('projectRef is required')
const sqlSize = new Blob([sql]).size
// [Joshen] I think the limit is around 1MB from testing, but its not exactly 1MB it seems
if (sqlSize > 0.98 * MB) {
throw new Error('Query is too large to be run via the SQL Editor')
}
let headers = new Headers(headersInit)
if (connectionString) headers.set('x-connection-encrypted', connectionString)
let data
let error
if (fetcherOverride) {
const result = await fetcherOverride({ query: sql, headers })
if ('data' in result) {
data = result.data
} else {
error = result.error
}
} else {
const result = await post('/platform/pg-meta/{ref}/query', {
signal,
params: {
header: {
'x-connection-encrypted': connectionString ?? '',
'x-pg-application-name': isStatementTimeoutDisabled
? 'supabase/dashboard-query-editor'
: DEFAULT_PLATFORM_APPLICATION_NAME,
},
path: { ref: projectRef },
// @ts-expect-error: This is just a client side thing to identify queries better
query: {
key:
queryKey
?.filter((seg) => typeof seg === 'string' || typeof seg === 'number')
.join('-') ?? '',
},
},
body: { query: sql, disable_statement_timeout: isStatementTimeoutDisabled },
headers,
})
data = result.data
error = result.error
}
if (error) {
if (
isRoleImpersonationEnabled &&
typeof error === 'object' &&
error !== null &&
'error' in error &&
'formattedError' in error
) {
let updatedError = error as { error: string; formattedError: string }
const regex = /LINE (\d+):/im
const [, lineNumberStr] = regex.exec(updatedError.error) ?? []
const lineNumber = Number(lineNumberStr)
if (!isNaN(lineNumber)) {
updatedError = {
...updatedError,
error: updatedError.error.replace(
regex,
`LINE ${lineNumber - ROLE_IMPERSONATION_SQL_LINE_COUNT}:`
),
formattedError: updatedError.formattedError.replace(
regex,
`LINE ${lineNumber - ROLE_IMPERSONATION_SQL_LINE_COUNT}:`
),
}
}
error = updatedError as any
}
if (handleError !== undefined) return handleError(error as any)
else handleErrorFetchers(error)
}
if (
isRoleImpersonationEnabled &&
Array.isArray(data) &&
data?.[0]?.[ROLE_IMPERSONATION_NO_RESULTS] === 1
) {
return { result: [] as T }
}
return { result: data as T }
}
export type ExecuteSqlData = Awaited<ReturnType<typeof executeSql<any[]>>>
export type ExecuteSqlError = ResponseError
/**
* @deprecated Use the regular useQuery with a function that calls executeSql() instead
*/
export const useExecuteSqlQuery = <TData = ExecuteSqlData>(
{
projectRef,
connectionString,
sql,
queryKey,
handleError,
isRoleImpersonationEnabled,
}: ExecuteSqlVariables,
{ enabled = true, ...options }: UseCustomQueryOptions<ExecuteSqlData, ExecuteSqlError, TData> = {}
) => {
const { data: project } = useSelectedProjectQuery()
const isActive = project?.status === PROJECT_STATUS.ACTIVE_HEALTHY
return useQuery<ExecuteSqlData, ExecuteSqlError, TData>({
queryKey: sqlKeys.query(projectRef, queryKey ?? [btoa(sql)]),
queryFn: ({ signal }) =>
executeSql(
{ projectRef, connectionString, sql, queryKey, handleError, isRoleImpersonationEnabled },
signal
),
enabled: enabled && typeof projectRef !== 'undefined' && isActive,
staleTime: 0,
...options,
})
}