Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions crates/pgt_configuration/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ pub mod diagnostics;
pub mod files;
pub mod generated;
pub mod migrations;
pub mod plpgsql_check;
pub mod typecheck;
pub mod vcs;

Expand All @@ -33,6 +34,10 @@ use files::{FilesConfiguration, PartialFilesConfiguration, partial_files_configu
use migrations::{
MigrationsConfiguration, PartialMigrationsConfiguration, partial_migrations_configuration,
};
use plpgsql_check::{
PartialPlPgSqlCheckConfiguration, PlPgSqlCheckConfiguration,
partial_pl_pg_sql_check_configuration,
};
use serde::{Deserialize, Serialize};
pub use typecheck::{
PartialTypecheckConfiguration, TypecheckConfiguration, partial_typecheck_configuration,
Expand Down Expand Up @@ -85,6 +90,10 @@ pub struct Configuration {
#[partial(type, bpaf(external(partial_typecheck_configuration), optional))]
pub typecheck: TypecheckConfiguration,

/// The configuration for type checking
#[partial(type, bpaf(external(partial_pl_pg_sql_check_configuration), optional))]
pub plpgsql_check: PlPgSqlCheckConfiguration,

/// The configuration of the database connection
#[partial(
type,
Expand Down Expand Up @@ -121,6 +130,9 @@ impl PartialConfiguration {
typecheck: Some(PartialTypecheckConfiguration {
..Default::default()
}),
plpgsql_check: Some(PartialPlPgSqlCheckConfiguration {
..Default::default()
}),
db: Some(PartialDatabaseConfiguration {
host: Some("127.0.0.1".to_string()),
port: Some(5432),
Expand Down
20 changes: 20 additions & 0 deletions crates/pgt_configuration/src/plpgsql_check.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
use biome_deserialize_macros::{Merge, Partial};
use bpaf::Bpaf;
use serde::{Deserialize, Serialize};

/// The configuration for type checking.
#[derive(Clone, Debug, Deserialize, Eq, Partial, PartialEq, Serialize)]
#[partial(derive(Bpaf, Clone, Eq, PartialEq, Merge))]
#[partial(cfg_attr(feature = "schema", derive(schemars::JsonSchema)))]
#[partial(serde(rename_all = "camelCase", default, deny_unknown_fields))]
pub struct PlPgSqlCheckConfiguration {
/// if `false`, it disables the feature and pglpgsql_check won't be executed. `true` by default
#[partial(bpaf(hide))]
pub enabled: bool,
}

impl Default for PlPgSqlCheckConfiguration {
fn default() -> Self {
Self { enabled: true }
}
}
4 changes: 4 additions & 0 deletions crates/pgt_configuration/src/typecheck.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@ use serde::{Deserialize, Serialize};
#[partial(cfg_attr(feature = "schema", derive(schemars::JsonSchema)))]
#[partial(serde(rename_all = "camelCase", default, deny_unknown_fields))]
pub struct TypecheckConfiguration {
/// if `false`, it disables the feature and the typechecker won't be executed. `true` by default
#[partial(bpaf(hide))]
pub enabled: bool,
/// Default search path schemas for type checking.
/// Can be a list of schema names or glob patterns like ["public", "app_*"].
/// If not specified, defaults to ["public"].
Expand All @@ -19,6 +22,7 @@ pub struct TypecheckConfiguration {
impl Default for TypecheckConfiguration {
fn default() -> Self {
Self {
enabled: true,
search_path: ["public".to_string()].into_iter().collect(),
}
}
Expand Down
33 changes: 33 additions & 0 deletions crates/pgt_workspace/src/settings.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ use pgt_configuration::{
diagnostics::InvalidIgnorePattern,
files::FilesConfiguration,
migrations::{MigrationsConfiguration, PartialMigrationsConfiguration},
plpgsql_check::PlPgSqlCheckConfiguration,
};
use pgt_fs::PgTPath;

Expand Down Expand Up @@ -213,6 +214,9 @@ pub struct Settings {
/// Type checking settings for the workspace
pub typecheck: TypecheckSettings,

/// plpgsql_check settings for the workspace
pub plpgsql_check: PlPgSqlCheckSettings,

/// Migrations settings
pub migrations: Option<MigrationSettings>,
}
Expand Down Expand Up @@ -253,6 +257,12 @@ impl Settings {
self.typecheck = to_typecheck_settings(TypecheckConfiguration::from(typecheck));
}

// plpgsql_check part
if let Some(plpgsql_check) = configuration.plpgsql_check {
self.plpgsql_check =
to_plpgsql_check_settings(PlPgSqlCheckConfiguration::from(plpgsql_check));
}

// Migrations settings
if let Some(migrations) = configuration.migrations {
self.migrations = to_migration_settings(
Expand Down Expand Up @@ -305,6 +315,13 @@ fn to_linter_settings(
fn to_typecheck_settings(conf: TypecheckConfiguration) -> TypecheckSettings {
TypecheckSettings {
search_path: conf.search_path.into_iter().collect(),
enabled: conf.enabled,
}
}

fn to_plpgsql_check_settings(conf: PlPgSqlCheckConfiguration) -> PlPgSqlCheckSettings {
PlPgSqlCheckSettings {
enabled: conf.enabled,
}
}

Expand Down Expand Up @@ -415,16 +432,32 @@ impl Default for LinterSettings {
}
}

/// Type checking settings for the entire workspace
#[derive(Debug)]
pub struct PlPgSqlCheckSettings {
/// Enabled by default
pub enabled: bool,
}

impl Default for PlPgSqlCheckSettings {
fn default() -> Self {
Self { enabled: true }
}
}

/// Type checking settings for the entire workspace
#[derive(Debug)]
pub struct TypecheckSettings {
/// Enabled by default
pub enabled: bool,
/// Default search path schemas for type checking
pub search_path: Vec<String>,
}

impl Default for TypecheckSettings {
fn default() -> Self {
Self {
enabled: true,
search_path: vec!["public".to_string()],
}
}
Expand Down
186 changes: 102 additions & 84 deletions crates/pgt_workspace/src/workspace/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -451,93 +451,111 @@ impl Workspace for WorkspaceServer {
/*
* Type-checking against database connection
*/
if let Some(pool) = self.get_current_connection() {
let path_clone = params.path.clone();
let schema_cache = self.schema_cache.load(pool.clone())?;
let input = doc.iter(TypecheckDiagnosticsMapper).collect::<Vec<_>>();
let search_path_patterns = settings.typecheck.search_path.clone();

// Combined async context for both typecheck and plpgsql_check
let async_results = run_async(async move {
stream::iter(input)
.map(|(id, range, ast, cst, sign)| {
let pool = pool.clone();
let path = path_clone.clone();
let schema_cache = Arc::clone(&schema_cache);
let search_path_patterns = search_path_patterns.clone();

async move {
let mut diagnostics = Vec::new();

if let Some(ast) = ast {
// Type checking
let typecheck_result = pgt_typecheck::check_sql(TypecheckParams {
conn: &pool,
sql: convert_to_positional_params(id.content()).as_str(),
ast: &ast,
tree: &cst,
schema_cache: schema_cache.as_ref(),
search_path_patterns,
identifiers: sign
.map(|s| {
s.args
.iter()
.map(|a| TypedIdentifier {
path: s.name.clone(),
name: a.name.clone(),
type_: IdentifierType {
schema: a.type_.schema.clone(),
name: a.type_.name.clone(),
is_array: a.type_.is_array,
},
})
.collect::<Vec<_>>()
})
.unwrap_or_default(),
})
.await;

if let Ok(Some(diag)) = typecheck_result {
let r = diag.location().span.map(|span| span + range.start());
diagnostics.push(
diag.with_file_path(path.as_path().display().to_string())
.with_file_span(r.unwrap_or(range)),
);
let typecheck_enabled = settings.typecheck.enabled;
let plpgsql_check_enabled = settings.plpgsql_check.enabled;
if typecheck_enabled || plpgsql_check_enabled {
if let Some(pool) = self.get_current_connection() {
let path_clone = params.path.clone();
let schema_cache = self.schema_cache.load(pool.clone())?;
let input = doc.iter(TypecheckDiagnosticsMapper).collect::<Vec<_>>();
let search_path_patterns = settings.typecheck.search_path.clone();

// Combined async context for both typecheck and plpgsql_check
let async_results = run_async(async move {
stream::iter(input)
.map(|(id, range, ast, cst, sign)| {
let pool = pool.clone();
let path = path_clone.clone();
let schema_cache = Arc::clone(&schema_cache);
let search_path_patterns = search_path_patterns.clone();

async move {
let mut diagnostics = Vec::new();

if let Some(ast) = ast {
// Type checking
if typecheck_enabled {
let typecheck_result =
pgt_typecheck::check_sql(TypecheckParams {
conn: &pool,
sql: convert_to_positional_params(id.content())
.as_str(),
ast: &ast,
tree: &cst,
schema_cache: schema_cache.as_ref(),
search_path_patterns,
identifiers: sign
.map(|s| {
s.args
.iter()
.map(|a| TypedIdentifier {
path: s.name.clone(),
name: a.name.clone(),
type_: IdentifierType {
schema: a.type_.schema.clone(),
name: a.type_.name.clone(),
is_array: a.type_.is_array,
},
})
.collect::<Vec<_>>()
})
.unwrap_or_default(),
})
.await;

if let Ok(Some(diag)) = typecheck_result {
let r = diag
.location()
.span
.map(|span| span + range.start());
diagnostics.push(
diag.with_file_path(
path.as_path().display().to_string(),
)
.with_file_span(r.unwrap_or(range)),
);
}
}

// plpgsql_check
if plpgsql_check_enabled {
let plpgsql_check_results =
pgt_plpgsql_check::check_plpgsql(
pgt_plpgsql_check::PlPgSqlCheckParams {
conn: &pool,
sql: id.content(),
ast: &ast,
schema_cache: schema_cache.as_ref(),
},
)
.await
.unwrap_or_else(|_| vec![]);

for d in plpgsql_check_results {
let r = d.span.map(|span| span + range.start());
diagnostics.push(
d.with_file_path(
path.as_path().display().to_string(),
)
.with_file_span(r.unwrap_or(range)),
);
}
}
}

// plpgsql_check
let plpgsql_check_results = pgt_plpgsql_check::check_plpgsql(
pgt_plpgsql_check::PlPgSqlCheckParams {
conn: &pool,
sql: id.content(),
ast: &ast,
schema_cache: schema_cache.as_ref(),
},
)
.await
.unwrap_or_else(|_| vec![]);

for d in plpgsql_check_results {
let r = d.span.map(|span| span + range.start());
diagnostics.push(
d.with_file_path(path.as_path().display().to_string())
.with_file_span(r.unwrap_or(range)),
);
}
Ok::<Vec<pgt_diagnostics::Error>, sqlx::Error>(diagnostics)
}

Ok::<Vec<pgt_diagnostics::Error>, sqlx::Error>(diagnostics)
}
})
.buffer_unordered(10)
.collect::<Vec<_>>()
.await
})?;

for result in async_results.into_iter() {
let diagnostics_batch = result?;
for diag in diagnostics_batch {
diagnostics.push(SDiagnostic::new(diag));
})
.buffer_unordered(10)
.collect::<Vec<_>>()
.await
})?;

for result in async_results.into_iter() {
let diagnostics_batch = result?;
for diag in diagnostics_batch {
diagnostics.push(SDiagnostic::new(diag));
}
}
}
}
Expand Down
Loading