Chore/shift manual queries into pg meta (#43692)

## Context

Related to FE-2557

Part of shifting manually written dashboard queries into
packages/pg-meta where
- pg-meta can be code owners of
- we can write tests for the queries 

This PR just shifts all the `.sql.ts` files that we previously created
into packages/pg-meta

There's still other areas where we need to shift over as well which I'll
address in subsequent PRs

## Notable changes

- `getTableRowsCountSql` -> Opted to shift `formatFilterValue` logic out
before calling this method (ref `table-rows-count-query`)
- `getDeleteOldCronJobRunDetailsByCtidSql` -> Opted to shift
`validatePageNumber` logic out before calling this method (ref
`CronJobsTab.useCleanupActions`)
This commit is contained in:
Joshen Lim
2026-03-16 16:14:48 +07:00
committed by GitHub
parent 7f61d9143b
commit 241f7bb721
31 changed files with 213 additions and 179 deletions

2
.github/CODEOWNERS vendored
View File

@@ -19,6 +19,4 @@
/apps/studio/components/interfaces/Organization/Documents/ @supabase/security
/apps/studio/pages/new/index.tsx @supabase/security
/apps/studio/data/**/*.sql.ts @supabase/postgres @avallete
/packages/shared-data/compute-disk-limits.ts @supabase/infra

View File

@@ -1,4 +1,4 @@
import { getScheduleDeleteCronJobRunDetailsSql } from 'data/database-cron-jobs/database-cron-jobs.sql'
import { getScheduleDeleteCronJobRunDetailsSql } from '@supabase/pg-meta'
import { CheckCircle2, XCircle } from 'lucide-react'
import {
Button,

View File

@@ -1,16 +1,21 @@
import {
getDeleteOldCronJobRunDetailsByCtidSql,
getJobRunDetailsPageCountSql,
} from '@supabase/pg-meta'
import {
CTID_BATCH_PAGE_SIZE,
getDeleteOldCronJobRunDetailsByCtidKey,
getDeleteOldCronJobRunDetailsByCtidSql,
getJobRunDetailsPageCountKey,
getJobRunDetailsPageCountSql,
} from 'data/database-cron-jobs/database-cron-jobs.sql'
validatePageNumber,
} from 'data/database-cron-jobs/database-cron-jobs.utils'
import { useExecuteSqlMutation } from 'data/sql/execute-sql-mutation'
import { useCallback, useRef, useState } from 'react'
import { toast } from 'sonner'
import { CLEANUP_INTERVALS } from './CronJobsTab.constants'
import type { ConnectionVars } from '@/data/common.types'
import {
getDeleteOldCronJobRunDetailsByCtidKey,
getJobRunDetailsPageCountKey,
} from '@/data/database-cron-jobs/keys'
import { useScheduleCronJobRunDetailsCleanupMutation } from '@/data/database-cron-jobs/schedule-clean-up-mutation'
// Delay between batches to allow other queries to proceed (in milliseconds)
@@ -110,6 +115,9 @@ export const useCronJobsCleanupActions = ({
const startPage = batch * CTID_BATCH_PAGE_SIZE
const endPage = Math.min((batch + 1) * CTID_BATCH_PAGE_SIZE, totalPages + 1)
validatePageNumber(startPage, 'startPage')
validatePageNumber(endPage, 'endPage')
setCleanupState({
status: 'deleting',
progress: {

View File

@@ -1,5 +1,5 @@
import { LARGEST_SIZE_LIMIT_BUCKETS_COUNT } from '@supabase/pg-meta'
import { InlineLink } from 'components/ui/InlineLink'
import { LARGEST_SIZE_LIMIT_BUCKETS_COUNT } from 'data/storage/storage.sql'
import Link from 'next/link'
import { type FieldError } from 'react-hook-form'
import { cn, Tooltip, TooltipContent, TooltipTrigger } from 'ui'

View File

@@ -1,9 +1,11 @@
import { getLiveTupleEstimate } from '@supabase/pg-meta'
import { useQuery } from '@tanstack/react-query'
import type { ConnectionVars } from 'data/common.types'
import { getLiveTupleEstimate, getLiveTupleEstimateKey } from 'data/database/database.sql'
import { executeSql } from 'data/sql/execute-sql-query'
import type { UseCustomQueryOptions } from 'types'
import { getLiveTupleEstimateKey } from '../database/keys'
type DatabaseCronJobsCountEstimateVariables = ConnectionVars
const cronJobsCountEstimateSql = getLiveTupleEstimate('job', 'cron')

View File

@@ -1,8 +1,8 @@
import { getCronJobsSql } from '@supabase/pg-meta'
import { InfiniteData, useInfiniteQuery } from '@tanstack/react-query'
import { COST_THRESHOLD_ERROR, executeSql } from 'data/sql/execute-sql-query'
import type { ResponseError, UseCustomInfiniteQueryOptions } from 'types'
import { getCronJobsSql } from './database-cron-jobs.sql'
import { databaseCronJobsKeys } from './keys'
export const CRON_JOBS_PAGE_LIMIT = 20

View File

@@ -1,3 +1,4 @@
import { getCronJobsMinimalSql } from '@supabase/pg-meta'
import { InfiniteData, useInfiniteQuery } from '@tanstack/react-query'
import { executeSql } from 'data/sql/execute-sql-query'
import type { ResponseError, UseCustomInfiniteQueryOptions } from 'types'
@@ -7,7 +8,6 @@ import {
CronJob,
DatabaseCronJobRunsVariables,
} from './database-cron-jobs-infinite-query'
import { getCronJobsMinimalSql } from './database-cron-jobs.sql'
import { databaseCronJobsKeys } from './keys'
export async function getDatabaseCronJobsMinimal({

View File

@@ -0,0 +1,12 @@
// Number of pages to process in each batch for ctid-based deletion
// Based on default Postgres shared buffer size of 128 MB, which fits ~16k pages
export const CTID_BATCH_PAGE_SIZE = 5_000
/**
* Validates that a value is a finite non-negative integer.
*/
export function validatePageNumber(value: number, name: string): void {
if (!Number.isFinite(value) || !Number.isInteger(value) || value < 0) {
throw new Error(`${name} must be a finite non-negative integer, got: ${value}`)
}
}

View File

@@ -1,3 +1,5 @@
import { sqlKeys } from '../sql/keys'
export const databaseCronJobsKeys = {
create: () => ['cron-jobs', 'create'] as const,
delete: () => ['cron-jobs', 'delete'] as const,
@@ -26,3 +28,17 @@ export const databaseCronJobsKeys = {
],
timezone: (projectRef: string | undefined) => ['database-cron-timezone', projectRef] as const,
}
export const getJobRunDetailsPageCountKey = (projectRef: string | undefined) =>
sqlKeys.query(projectRef, ['cron-job-run-details', 'page-count'])
export const getDeleteOldCronJobRunDetailsByCtidKey = (
projectRef: string | undefined,
interval: string,
startPage: number
) => sqlKeys.query(projectRef, ['cron-job-run-details', 'delete-batch', interval, startPage])
export const getScheduleDeleteCronJobRunDetailsKey = (
projectRef: string | undefined,
interval: string
) => sqlKeys.query(projectRef, ['cron-job-run-details', 'schedule', interval])

View File

@@ -1,12 +1,10 @@
import { getScheduleDeleteCronJobRunDetailsSql } from '@supabase/pg-meta'
import { useMutation } from '@tanstack/react-query'
import { executeSql } from 'data/sql/execute-sql-query'
import { toast } from 'sonner'
import type { ResponseError, UseCustomMutationOptions } from 'types'
import {
getScheduleDeleteCronJobRunDetailsKey,
getScheduleDeleteCronJobRunDetailsSql,
} from './database-cron-jobs.sql'
import { getScheduleDeleteCronJobRunDetailsKey } from './keys'
export type ScheduleCronJobRunDetailsCleanupVariables = {
projectRef: string

View File

@@ -1,8 +1,8 @@
import { getDatabaseExtensionDefaultSchemaSQL } from '@supabase/pg-meta'
import { useQuery } from '@tanstack/react-query'
import { UseCustomQueryOptions } from 'types'
import { executeSql, ExecuteSqlError } from '../sql/execute-sql-query'
import { getDatabaseExtensionDefaultSchemaSQL } from './database-extensions.sql'
import { databaseExtensionsKeys } from './keys'
type DatabaseExtensionDefaultSchemaVariables = {

View File

@@ -1,8 +1,8 @@
import { getIndexesSQL } from '@supabase/pg-meta'
import { useQuery } from '@tanstack/react-query'
import { UseCustomQueryOptions } from 'types'
import { executeSql, ExecuteSqlError } from '../sql/execute-sql-query'
import { getIndexesSQL } from './database-indexes.sql'
import { databaseIndexesKeys } from './keys'
type GetIndexesArgs = {

View File

@@ -1,20 +0,0 @@
import { literal } from '@supabase/pg-meta/src/pg-format'
import { sqlKeys } from '../sql/keys'
export const getLiveTupleEstimate = (table: string, schema: string = 'public') => {
const sql = /* SQL */ `
SELECT n_live_tup AS live_tuple_estimate
FROM pg_stat_user_tables
WHERE schemaname = ${literal(schema)}
AND relname = ${literal(table)};
`.trim()
return sql
}
export const getLiveTupleEstimateKey = (
projectRef: string | undefined,
table: string,
schema = 'public'
) => sqlKeys.query(projectRef, ['live-tuple-estimate', schema, table])

View File

@@ -1,3 +1,5 @@
import { sqlKeys } from '../sql/keys'
export const databaseKeys = {
schemas: (projectRef: string | undefined) => ['projects', projectRef, 'schemas'] as const,
keywords: (projectRef: string | undefined) => ['projects', projectRef, 'keywords'] as const,
@@ -63,3 +65,9 @@ export const databaseKeys = {
supamonitorEnabled: (projectRef: string | undefined) =>
['projects', projectRef, 'supamonitor-enabled'] as const,
}
export const getLiveTupleEstimateKey = (
projectRef: string | undefined,
table: string,
schema = 'public'
) => sqlKeys.query(projectRef, ['live-tuple-estimate', schema, table])

View File

@@ -1,9 +1,9 @@
import { getDefaultPrivilegesStateSql } from '@supabase/pg-meta'
import { queryOptions } from '@tanstack/react-query'
import { executeSql } from 'data/sql/execute-sql-query'
import type { ResponseError } from 'types'
import { privilegeKeys } from './keys'
import { getDefaultPrivilegesStateSql } from './privileges.sql'
export type DefaultPrivilegesVariables = {
projectRef?: string

View File

@@ -1,9 +1,9 @@
import { getExposedFunctionCountsSql } from '@supabase/pg-meta'
import { queryOptions } from '@tanstack/react-query'
import { executeSql } from 'data/sql/execute-sql-query'
import type { ResponseError } from 'types'
import { privilegeKeys } from './keys'
import { getExposedFunctionCountsSql } from './privileges.sql'
export type ExposedFunctionCountsVariables = {
projectRef?: string

View File

@@ -1,10 +1,12 @@
import { getExposedFunctionsSql } from '@supabase/pg-meta'
import { infiniteQueryOptions } from '@tanstack/react-query'
import { executeSql } from 'data/sql/execute-sql-query'
import type { ResponseError } from 'types'
import { privilegeKeys } from './keys'
import { getExposedFunctionsSql } from './privileges.sql'
import { INTERNAL_SCHEMAS } from '@/hooks/useProtectedSchemas'
export const IGNORED_SCHEMAS = [...INTERNAL_SCHEMAS, 'pg_catalog']
export const EXPOSED_FUNCTIONS_PAGE_LIMIT = 50
export type ExposedFunctionsVariables = {
@@ -38,7 +40,7 @@ export async function getExposedFunctions(
const offset = page * limit
const sql = getExposedFunctionsSql({ search, offset, limit })
const sql = getExposedFunctionsSql({ search, offset, limit, ignoredSchemas: IGNORED_SCHEMAS })
const { result } = await executeSql(
{

View File

@@ -1,9 +1,9 @@
import { getExposedTableCountsSql } from '@supabase/pg-meta'
import { queryOptions } from '@tanstack/react-query'
import { executeSql } from 'data/sql/execute-sql-query'
import type { ResponseError } from 'types'
import { privilegeKeys } from './keys'
import { getExposedTableCountsSql } from './privileges.sql'
export type ExposedTableCountsVariables = {
projectRef?: string

View File

@@ -1,10 +1,12 @@
import { getExposedTablesSql } from '@supabase/pg-meta'
import { infiniteQueryOptions } from '@tanstack/react-query'
import { executeSql } from 'data/sql/execute-sql-query'
import type { ResponseError } from 'types'
import { privilegeKeys } from './keys'
import { getExposedTablesSql } from './privileges.sql'
import { INTERNAL_SCHEMAS } from '@/hooks/useProtectedSchemas'
const IGNORED_SCHEMAS = [...INTERNAL_SCHEMAS, 'pg_catalog']
export const EXPOSED_TABLES_PAGE_LIMIT = 50
export type ExposedTablesVariables = {
@@ -39,7 +41,7 @@ export async function getExposedTables(
const offset = page * limit
const sql = getExposedTablesSql({ search, offset, limit })
const sql = getExposedTablesSql({ search, offset, limit, ignoredSchemas: IGNORED_SCHEMAS })
const { result } = await executeSql(
{

View File

@@ -1,10 +1,10 @@
import { buildDefaultPrivilegesSql } from '@supabase/pg-meta'
import { useMutation } from '@tanstack/react-query'
import { executeSql } from 'data/sql/execute-sql-query'
import { toast } from 'sonner'
import type { UseCustomMutationOptions } from 'types'
import type { ConnectionVars } from '../common.types'
import { buildDefaultPrivilegesSql } from './privileges.sql'
export type UpdateDefaultPrivilegesVariables = ConnectionVars & {
granted: boolean

View File

@@ -1,10 +1,10 @@
import { buildFunctionPrivilegesSql, buildTablePrivilegesSql } from '@supabase/pg-meta'
import { useMutation } from '@tanstack/react-query'
import { executeSql } from 'data/sql/execute-sql-query'
import { toast } from 'sonner'
import type { UseCustomMutationOptions } from 'types'
import type { ConnectionVars } from '../common.types'
import { buildFunctionPrivilegesSql, buildTablePrivilegesSql } from './privileges.sql'
export type UpdateExposedEntitiesVariables = ConnectionVars & {
tableIdsToAdd: number[]

View File

@@ -1,13 +1,11 @@
import { getLargestSizeLimitBucketsSqlUnoptimized, getLiveTupleEstimate } from '@supabase/pg-meta'
import { useQuery, useQueryClient } from '@tanstack/react-query'
import type { ConnectionVars } from 'data/common.types'
import { getLiveTupleEstimate, getLiveTupleEstimateKey } from 'data/database/database.sql'
import { executeSql } from 'data/sql/execute-sql-query'
import { useCallback } from 'react'
import {
getLargestSizeLimitBucketsKey,
getLargestSizeLimitBucketsSqlUnoptimized,
} from './storage.sql'
import { getLiveTupleEstimateKey } from '../database/keys'
import { sqlKeys } from '../sql/keys'
export const THRESHOLD_FOR_AUTO_QUERYING_BUCKET_LIMITS = 10_000
@@ -49,7 +47,7 @@ const getBucketsWithLargestSizeLimit = async ({
if (!projectRef) throw new Error('Project reference is required')
if (!connectionString) throw new Error('Connection string is required')
const key = getLargestSizeLimitBucketsKey(projectRef)
const key = sqlKeys.query(projectRef, ['buckets-with-largest-size-limit'])
const sql = getLargestSizeLimitBucketsSqlUnoptimized
const { result } = await executeSql<{ id: string; name: string; file_size_limit: number }[]>({
@@ -102,7 +100,7 @@ export const useLargestBucketSizeLimitsCheck = ({
enabled: !!projectRef && !!connectionString,
})
const bucketLimitsKey = getLargestSizeLimitBucketsKey(projectRef)
const bucketLimitsKey = sqlKeys.query(projectRef, ['buckets-with-largest-size-limit'])
const fetchLargestBucketLimits = useCallback(
() =>

View File

@@ -1,3 +1,4 @@
import { getTableRowsCountSql } from '@supabase/pg-meta'
import { QueryClient, useQuery, useQueryClient } from '@tanstack/react-query'
import { IS_PLATFORM } from 'common'
import { parseSupaTable } from 'components/grid/SupabaseGrid.utils'
@@ -10,7 +11,7 @@ import { UseCustomQueryOptions } from 'types'
import { useConnectionStringForReadOps } from '../read-replicas/replicas-query'
import { executeSql, ExecuteSqlError } from '../sql/execute-sql-query'
import { tableRowKeys } from './keys'
import { getTableRowsCountSql } from './table-rows.sql'
import { formatFilterValue } from './utils'
export type GetTableRowsCountArgs = {
table?: SupaTable
@@ -58,8 +59,14 @@ export async function getTableRowsCount(
const table = parseSupaTable(entity)
const formattedFilters = filters?.map((x) => ({ ...x, value: formatFilterValue(table, x) }))
const sql = wrapWithRoleImpersonation(
getTableRowsCountSql({ table, filters, enforceExactCount, isUsingReadReplica }),
getTableRowsCountSql({
table,
filters: formattedFilters,
enforceExactCount,
isUsingReadReplica,
}),
roleImpersonationState
)
const { result } = await executeSql(

View File

@@ -25,6 +25,31 @@ export { getIndexWorkerStatusSQL } from './sql/studio/auth/get-index-worker-stat
export { type OptimizedSearchColumns } from './sql/studio/auth/get-users-types'
export { getPaginatedUsersSQL, type UsersCursor } from './sql/studio/auth/get-users-paginated'
export { getUsersCountSQL } from './sql/studio/auth/get-users-count'
export {
getLargestSizeLimitBucketsSqlUnoptimized,
LARGEST_SIZE_LIMIT_BUCKETS_COUNT,
} from './sql/studio/storage/get-largest-size-limit-buckets'
export {
getExposedTablesSql,
getExposedTableCountsSql,
getExposedFunctionsSql,
getExposedFunctionCountsSql,
buildTablePrivilegesSql,
buildFunctionPrivilegesSql,
buildDefaultPrivilegesSql,
getDefaultPrivilegesStateSql,
} from './sql/studio/privileges'
export { getIndexesSQL } from './sql/studio/database/indexes'
export { getDatabaseExtensionDefaultSchemaSQL } from './sql/studio/database/extensions'
export {
getCronJobsMinimalSql,
getCronJobsSql,
getJobRunDetailsPageCountSql,
getDeleteOldCronJobRunDetailsByCtidSql,
getScheduleDeleteCronJobRunDetailsSql,
} from './sql/studio/database/cron-jobs'
export { getTableRowsCountSql } from './sql/studio/database/rows'
export { getLiveTupleEstimate } from './sql/studio/get-live-tuple-estimate'
export default {
roles,

View File

@@ -1,96 +1,5 @@
import { literal } from '@supabase/pg-meta/src/pg-format'
import { literal } from '../../../pg-format'
import { sqlKeys } from '../sql/keys'
const CRON_CLEANUP_SCHEDULE_NAME = 'delete-job-run-details'
const CRON_CLEANUP_SCHEDULE_EXPRESSION = '0 12 * * *'
// Number of pages to process in each batch for ctid-based deletion
// Based on default Postgres shared buffer size of 128 MB, which fits ~16k pages
export const CTID_BATCH_PAGE_SIZE = 5_000
/**
* Get the total number of pages in the job_run_details table.
* This is used to iterate through the table in batches using ctid ranges.
*/
export const getJobRunDetailsPageCountSql = () =>
`
SELECT pg_relation_size(oid) / current_setting('block_size')::int8 AS num_pages
FROM pg_class
WHERE relname = 'job_run_details'
AND relnamespace = 'cron'::regnamespace;
`.trim()
export const getJobRunDetailsPageCountKey = (projectRef: string | undefined) =>
sqlKeys.query(projectRef, ['cron-job-run-details', 'page-count'])
/**
* Validates that a value is a finite non-negative integer.
*/
function validatePageNumber(value: number, name: string): void {
if (!Number.isFinite(value) || !Number.isInteger(value) || value < 0) {
throw new Error(`${name} must be a finite non-negative integer, got: ${value}`)
}
}
/**
* Delete old cron job run details using ctid range filtering.
* This approach:
* 1. Only scans a bounded range of pages (not the full table)
* 2. Avoids buffer cache pollution by processing in chunks
* 3. Allows other queries to proceed between batches
*
* @param interval - The age threshold (e.g., '7 days')
* @param startPage - The starting page number (inclusive)
* @param endPage - The ending page number (exclusive)
* @returns SQL that deletes matching rows and returns the count of deleted rows
*/
export const getDeleteOldCronJobRunDetailsByCtidSql = (
interval: string,
startPage: number,
endPage: number
) => {
validatePageNumber(startPage, 'startPage')
validatePageNumber(endPage, 'endPage')
// After validation, these are guaranteed to be safe integers
// Using literal() on the string representation ensures proper escaping
const safeCtidStart = literal(`(${startPage},0)`)
const safeCtidEnd = literal(`(${endPage},0)`)
return `
WITH deleted AS (
DELETE FROM cron.job_run_details
WHERE ctid >= ${safeCtidStart}::tid
AND ctid < ${safeCtidEnd}::tid
AND end_time < now() - interval ${literal(interval)}
RETURNING 1
)
SELECT count(*) as deleted_count FROM deleted;
`.trim()
}
export const getDeleteOldCronJobRunDetailsByCtidKey = (
projectRef: string | undefined,
interval: string,
startPage: number
) => sqlKeys.query(projectRef, ['cron-job-run-details', 'delete-batch', interval, startPage])
export const getScheduleDeleteCronJobRunDetailsSql = (interval: string) =>
`
SELECT cron.schedule(
${literal(CRON_CLEANUP_SCHEDULE_NAME)},
${literal(CRON_CLEANUP_SCHEDULE_EXPRESSION)},
$$DELETE FROM cron.job_run_details WHERE end_time < now() - interval ${literal(interval)}$$
);
`.trim()
export const getScheduleDeleteCronJobRunDetailsKey = (
projectRef: string | undefined,
interval: string
) => sqlKeys.query(projectRef, ['cron-job-run-details', 'schedule', interval])
// [Joshen] Just omits the LEFT JOIN as that's the heavy part
export const getCronJobsMinimalSql = ({
searchTerm,
page,
@@ -160,3 +69,61 @@ ORDER BY job.jobid
LIMIT ${limit}
OFFSET ${page * limit};
`.trim()
/**
* Delete old cron job run details using ctid range filtering.
* This approach:
* 1. Only scans a bounded range of pages (not the full table)
* 2. Avoids buffer cache pollution by processing in chunks
* 3. Allows other queries to proceed between batches
*
* @param interval - The age threshold (e.g., '7 days')
* @param startPage - The starting page number (inclusive)
* @param endPage - The ending page number (exclusive)
* @returns SQL that deletes matching rows and returns the count of deleted rows
*/
export const getDeleteOldCronJobRunDetailsByCtidSql = (
interval: string,
startPage: number,
endPage: number
) => {
// After validation, these are guaranteed to be safe integers
// Using literal() on the string representation ensures proper escaping
const safeCtidStart = literal(`(${startPage},0)`)
const safeCtidEnd = literal(`(${endPage},0)`)
return `
WITH deleted AS (
DELETE FROM cron.job_run_details
WHERE ctid >= ${safeCtidStart}::tid
AND ctid < ${safeCtidEnd}::tid
AND end_time < now() - interval ${literal(interval)}
RETURNING 1
)
SELECT count(*) as deleted_count FROM deleted;
`.trim()
}
const CRON_CLEANUP_SCHEDULE_NAME = 'delete-job-run-details'
const CRON_CLEANUP_SCHEDULE_EXPRESSION = '0 12 * * *'
export const getScheduleDeleteCronJobRunDetailsSql = (interval: string) =>
`
SELECT cron.schedule(
${literal(CRON_CLEANUP_SCHEDULE_NAME)},
${literal(CRON_CLEANUP_SCHEDULE_EXPRESSION)},
$$DELETE FROM cron.job_run_details WHERE end_time < now() - interval ${literal(interval)}$$
);
`.trim()
/**
* Get the total number of pages in the job_run_details table.
* This is used to iterate through the table in batches using ctid ranges.
*/
export const getJobRunDetailsPageCountSql = () =>
`
SELECT pg_relation_size(oid) / current_setting('block_size')::int8 AS num_pages
FROM pg_class
WHERE relname = 'job_run_details'
AND relnamespace = 'cron'::regnamespace;
`.trim()

View File

@@ -1,4 +1,4 @@
import { literal } from '@supabase/pg-meta/src/pg-format'
import { literal } from '../../../pg-format'
export const getDatabaseExtensionDefaultSchemaSQL = ({ extension }: { extension: string }) => {
const sql = /* SQL */ `

View File

@@ -1,11 +1,5 @@
import { Query } from '@supabase/pg-meta/src/query'
import {
COUNT_ESTIMATE_SQL,
THRESHOLD_COUNT,
} from '@supabase/pg-meta/src/sql/studio/get-count-estimate'
import { GetTableRowsCountArgs } from './table-rows-count-query'
import { formatFilterValue } from './utils'
import { Filter, Query } from '../../../query'
import { COUNT_ESTIMATE_SQL, THRESHOLD_COUNT } from '../get-count-estimate'
/**
* [Joshen] Initially check reltuples from pg_class for an estimate of row count on the table
@@ -18,7 +12,12 @@ export const getTableRowsCountSql = ({
filters = [],
enforceExactCount = false,
isUsingReadReplica = false,
}: GetTableRowsCountArgs & { isUsingReadReplica?: boolean }) => {
}: {
table: any
filters?: Filter[]
enforceExactCount?: boolean
isUsingReadReplica?: boolean
}) => {
if (!table) return ``
if (enforceExactCount) {
@@ -27,8 +26,7 @@ export const getTableRowsCountSql = ({
filters
.filter((x) => x.value && x.value !== '')
.forEach((x) => {
const value = formatFilterValue(table, x)
queryChains = queryChains.filter(x.column, x.operator, value)
queryChains = queryChains.filter(x.column, x.operator, x.value)
})
return `select (${queryChains.toSql().slice(0, -1)}), false as is_estimate;`
} else {
@@ -37,8 +35,7 @@ export const getTableRowsCountSql = ({
filters
.filter((x) => x.value && x.value != '')
.forEach((x) => {
const value = formatFilterValue(table, x)
selectQueryChains = selectQueryChains.filter(x.column, x.operator, value)
selectQueryChains = selectQueryChains.filter(x.column, x.operator, x.value)
})
const selectBaseSql = selectQueryChains.toSql()
@@ -47,8 +44,7 @@ export const getTableRowsCountSql = ({
filters
.filter((x) => x.value && x.value != '')
.forEach((x) => {
const value = formatFilterValue(table, x)
countQueryChains = countQueryChains.filter(x.column, x.operator, value)
countQueryChains = countQueryChains.filter(x.column, x.operator, x.value)
})
const countBaseSql = countQueryChains.toSql().slice(0, -1)

View File

@@ -0,0 +1,12 @@
import { literal } from '../../pg-format'
export const getLiveTupleEstimate = (table: string, schema: string = 'public') => {
const sql = /* SQL */ `
SELECT n_live_tup AS live_tuple_estimate
FROM pg_stat_user_tables
WHERE schemaname = ${literal(schema)}
AND relname = ${literal(table)};
`.trim()
return sql
}

View File

@@ -1,9 +1,3 @@
import { INTERNAL_SCHEMAS } from '@/hooks/useProtectedSchemas'
export const IGNORED_SCHEMAS = [...INTERNAL_SCHEMAS, 'pg_catalog']
const IGNORED_SCHEMAS_LIST = IGNORED_SCHEMAS.map((s) => `'${s}'`).join(', ')
/**
* Builds the shared `table_privileges` and `table_grants` CTEs used by
* both the exposed-tables list query and the counts-only query.
@@ -11,7 +5,12 @@ const IGNORED_SCHEMAS_LIST = IGNORED_SCHEMAS.map((s) => `'${s}'`).join(', ')
* Returns SQL text meant to follow `WITH` (no leading `WITH` keyword).
* Callers that append additional CTEs should add a comma after interpolation.
*/
function getTableGrantsCTEs({ search }: { search?: string } = {}) {
function getTableGrantsCTEs({
search,
ignoredSchemas = [],
}: { search?: string; ignoredSchemas?: string[] } = {}) {
const IGNORED_SCHEMAS_LIST = ignoredSchemas.map((s) => `'${s}'`).join(', ')
return /* SQL */ `
table_privileges as (
select
@@ -83,13 +82,15 @@ export function getExposedTablesSql({
search,
offset,
limit,
ignoredSchemas = [],
}: {
search?: string
offset: number
limit: number
ignoredSchemas?: string[]
}) {
return /* SQL */ `
with ${getTableGrantsCTEs({ search })}
with ${getTableGrantsCTEs({ search, ignoredSchemas })}
select
(select count(*)::int from table_grants) as total_count,
coalesce(
@@ -135,7 +136,12 @@ export function getExposedTableCountsSql({ selectedSchemas }: { selectedSchemas:
* Returns SQL text meant to follow `WITH` (no leading `WITH` keyword).
* Callers that append additional CTEs should add a comma after interpolation.
*/
function getFunctionGrantsCTEs({ search }: { search?: string } = {}) {
function getFunctionGrantsCTEs({
search,
ignoredSchemas = [],
}: { search?: string; ignoredSchemas?: string[] } = {}) {
const IGNORED_SCHEMAS_LIST = ignoredSchemas.map((s) => `'${s}'`).join(', ')
return /* SQL */ `
function_privileges as (
select
@@ -177,13 +183,15 @@ export function getExposedFunctionsSql({
search,
offset,
limit,
ignoredSchemas = [],
}: {
search?: string
offset: number
limit: number
ignoredSchemas?: string[]
}) {
return /* SQL */ `
with ${getFunctionGrantsCTEs({ search })}
with ${getFunctionGrantsCTEs({ search, ignoredSchemas })}
select
(select count(*)::int from function_grants) as total_count,
coalesce(

View File

@@ -1,5 +1,3 @@
import { sqlKeys } from '../sql/keys'
export const LARGEST_SIZE_LIMIT_BUCKETS_COUNT = 50
/**
@@ -15,6 +13,3 @@ WHERE file_size_limit IS NOT NULL
ORDER BY file_size_limit DESC
LIMIT ${LARGEST_SIZE_LIMIT_BUCKETS_COUNT + 1};
`.trim()
export const getLargestSizeLimitBucketsKey = (projectRef: string | undefined) =>
sqlKeys.query(projectRef, ['buckets-with-largest-size-limit'])