Diesel implementation.

This commit is contained in:
Pedro de Oliveira 2023-05-29 21:05:30 +01:00
parent 5b5903ed70
commit 59f4469586
14 changed files with 372 additions and 682 deletions

3
.gitmodules vendored Normal file
View File

@ -0,0 +1,3 @@
[submodule "migrations"]
path = migrations
url = https://git.deadbsd.org/falso/inquisitorum-migrations.git

729
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -7,10 +7,13 @@ edition = "2021"
[dependencies]
actix-web = "4"
sqlx = { version = "0.6.3", features = [ "postgres", "migrate", "runtime-actix-native-tls" ] }
async-std = { version = "1", features = [ "attributes" ] }
diesel = { version = "2.1.0", features = ["r2d2", "postgres", "chrono", "numeric"] }
async-std = { version = "1", features = ["attributes"] }
serde = { version = "1.0", features = ["derive"] }
dotenvy_macro = "0.15.7"
unidecode = "0.3.0"
serde_json = "1.0.96"
serde_json = { version = "1.0.96", features = ["preserve_order"] }
regex = "1.8.3"
chrono = "0.4.24"
diesel_full_text_search = "2.1.0"
r2d2 = "0.8.10"

9
diesel.toml Normal file
View File

@ -0,0 +1,9 @@
# For documentation on how to configure this file,
# see https://diesel.rs/guides/configuring-diesel-cli
[print_schema]
file = "src/schema.rs"
custom_type_derives = ["diesel::query_builder::QueryId"]
[migrations_directory]
dir = "migrations"

1
migrations Submodule

@ -0,0 +1 @@
Subproject commit 809bdd0f1d930043365ce8e911fc62f8d82ef0bb

View File

@ -1,7 +1,13 @@
use crate::models::{get_full_process, get_random_process_id, AppState, Record};
use crate::models::process::*;
use crate::models::process_tag::*;
use crate::schema::*;
use crate::AppState;
use actix_web::{get, web, HttpResponse};
use serde_json::json;
use std::collections::HashMap;
use diesel::SelectableHelper;
use diesel::{sql_function, QueryDsl};
use diesel::{BelongingToDsl, RunQueryDsl};
use serde_json::{json, Map, Value};
use std::collections::{BTreeMap, HashMap};
pub fn config(conf: &mut web::ServiceConfig) {
let scope = web::scope("/api")
@ -12,9 +18,9 @@ pub fn config(conf: &mut web::ServiceConfig) {
#[get("/degredo")]
pub async fn get_degredo(data: web::Data<AppState>) -> HttpResponse {
let process_id = get_random_process_id(&data.db).await;
let process = get_full_process(&process_id, &data.db).await;
HttpResponse::Ok().json(process)
let id = get_random_process_id(&data.db);
let data = get_full_process(id, &data.db);
HttpResponse::Ok().json(data)
}
#[get("/adcautelam")]
@ -38,50 +44,5 @@ pub async fn get_adcautelam(
.unwrap_or(1);
let offset = page - 1;
let row: Option<(i32, i32)> = sqlx::query_as(
r#"SELECT DISTINCT ON (process_id) process_id, key_id
FROM records
WHERE value_tsvector @@ websearch_to_tsquery('portuguese', $1)
ORDER BY process_id, ts_rank_cd(value_tsvector, websearch_to_tsquery('portuguese', $1)) DESC
OFFSET $2 LIMIT 1"#,
)
.bind(key)
.bind(offset)
.fetch_optional(&data.db)
.await
.unwrap();
if let Some(row) = row {
let mut process = get_full_process(&row.0, &data.db).await;
let row2: Option<(String, String)> = sqlx::query_as(
r#"SELECT k."value" as key, ts_headline('portuguese', r."value", websearch_to_tsquery('portuguese', '$1')) as value
FROM records r
left join keys k on k.id = r.key_id
WHERE r.process_id = $2 AND r.key_id = $3"#,
)
.bind(key)
.bind(row.0)
.bind(row.1)
.fetch_optional(&data.db)
.await
.unwrap();
if let Some(row2) = row2 {
dbg!(&row2);
process.headline = Some(Record {
key: row2.0,
value: row2.1,
});
HttpResponse::Ok().json(process)
} else {
HttpResponse::NotFound().json(json!({
"error": "Invalid search term for 'key'."
}))
}
} else {
HttpResponse::NotFound().json(json!({
"error": "Invalid search term for 'key'."
}))
}
HttpResponse::Ok().json("OI")
}

View File

@ -1,2 +1,12 @@
pub mod models;
pub mod handlers;
pub mod models;
pub mod schema;
use diesel::prelude::*;
use diesel::r2d2::{ConnectionManager, Pool};
type DbPool = Pool<ConnectionManager<PgConnection>>;
pub struct AppState {
pub db: DbPool,
}

View File

@ -1,16 +1,15 @@
use actix_web::{web, App, HttpServer};
use diesel::prelude::*;
use diesel::r2d2::{ConnectionManager, Pool};
use dotenvy_macro::dotenv;
use sqlx::postgres::PgPoolOptions;
use inquisitorum::handlers::config;
use inquisitorum::models::AppState;
use inquisitorum::AppState;
#[actix_web::main]
async fn main() -> std::io::Result<()> {
let pool = PgPoolOptions::new()
.max_connections(5)
.connect(dotenv!("DATABASE_URL"))
.await
.expect("Failed to create pool");
let database_url = dotenv!("DATABASE_URL");
let manager = ConnectionManager::<PgConnection>::new(database_url);
let pool = Pool::builder().build(manager).expect("Failed to connect");
HttpServer::new(move || {
App::new()

View File

@ -1,73 +0,0 @@
use serde::Serialize;
use sqlx::PgPool;
use std::collections::BTreeMap;
use unidecode::unidecode;
pub struct AppState {
pub db: PgPool,
}
#[derive(Serialize)]
struct Info {
id: i32,
title: String,
scan: bool,
}
#[derive(Serialize)]
pub struct Record {
pub key: String,
pub value: String,
}
#[derive(Serialize)]
pub struct Process {
info: Info,
pub headline: Option<Record>,
records: BTreeMap<String, String>,
}
pub async fn get_random_process_id(db: &PgPool) -> i32 {
let row: (i32,) =
sqlx::query_as(r#"SELECT id, title, scan from processes order by RANDOM() limit 1"#)
.fetch_one(db)
.await
.expect("Failed to query");
row.0
}
pub async fn get_full_process(process_id: &i32, db: &PgPool) -> Process {
let info = sqlx::query_as!(
Info,
r#"SELECT id, title, scan
FROM public.processes
WHERE id = $1"#,
process_id
)
.fetch_one(db)
.await
.expect("Failed to query");
let records = sqlx::query_as!(
Record,
r#"SELECT k.value as "key", r.value as "value"
FROM public.records r
JOIN public.keys k ON k.id = r.key_id
WHERE process_id = $1"#,
info.id
)
.fetch_all(db)
.await
.expect("Failed to query");
let mut hash_records = BTreeMap::new();
for record in records {
hash_records.insert(unidecode(&record.key), record.value);
}
Process {
info,
headline: None,
records: hash_records,
}
}

3
src/models/mod.rs Normal file
View File

@ -0,0 +1,3 @@
pub mod tag;
pub mod process;
pub mod process_tag;

66
src/models/process.rs Normal file
View File

@ -0,0 +1,66 @@
use std::process;
use chrono::naive::NaiveDate;
use diesel::prelude::*;
use crate::DbPool;
use crate::schema::*;
use crate::models::process_tag::*;
use serde_json::{json, Map, Value};
#[derive(Identifiable, Queryable, Selectable, PartialEq, Debug)]
#[diesel(table_name = crate::schema::processes)]
#[diesel(check_for_backend(diesel::pg::Pg))]
pub struct Process {
pub id: i32,
pub title: String,
pub scan: bool,
dt_start: Option<NaiveDate>,
dt_finish: Option<NaiveDate>,
origin: Option<String>,
}
pub fn get_random_process_id(db: &DbPool) -> i32 {
sql_function!(fn random() -> Text);
let mut conn = db.get().unwrap();
processes::table
.limit(1)
.select(processes::id)
.order(random())
.first(&mut conn)
.expect("fail")
}
pub fn get_full_process(process_id: i32, db: &DbPool) -> Value {
let mut conn = db.get().unwrap();
let process = processes::table
.select(Process::as_select())
.filter(processes::id.eq(process_id))
.first(&mut conn)
.expect("fail");
let mut results: Vec<(String, String)> = ProcessTag::belonging_to(&process)
.inner_join(tags::table)
.select((tags::title_api, process_tags::contents))
.load(&mut conn)
.expect("fail");
results.sort_by_key(|(key, _)| key.clone());
let mut data = json!({
"document": {
"titulo": process.title,
},
});
if let Some(document) = data.get_mut("document").and_then(Value::as_object_mut) {
for (title, contents) in results {
if title != "codigo_de_referencia" {
document.insert(title, Value::String(contents));
}
}
document.insert("digital".to_owned(), Value::Bool(process.scan));
}
data
}

14
src/models/process_tag.rs Normal file
View File

@ -0,0 +1,14 @@
use diesel::prelude::*;
#[derive(Identifiable, Associations, Queryable, Selectable, Debug)]
#[diesel(table_name = crate::schema::process_tags)]
#[diesel(check_for_backend(diesel::pg::Pg))]
#[diesel(belongs_to(crate::models::tag::Tag))]
#[diesel(belongs_to(crate::models::process::Process))]
#[diesel(primary_key(process_id, tag_id))]
pub struct ProcessTag {
pub process_id: i32,
pub tag_id: i32,
pub contents: String,
//pub value_tsvector: TsVector,
}

10
src/models/tag.rs Normal file
View File

@ -0,0 +1,10 @@
use diesel::prelude::*;
#[derive(Identifiable, Queryable, Selectable, PartialEq)]
#[diesel(table_name = crate::schema::tags)]
#[diesel(check_for_backend(diesel::pg::Pg))]
pub struct Tag {
pub id: i32,
pub title: String,
pub title_api: String,
}

47
src/schema.rs Normal file
View File

@ -0,0 +1,47 @@
// @generated automatically by Diesel CLI.
pub mod sql_types {
#[derive(diesel::query_builder::QueryId, diesel::sql_types::SqlType)]
#[diesel(postgres_type(name = "tsvector", schema = "pg_catalog"))]
pub struct Tsvector;
}
diesel::table! {
use diesel::sql_types::*;
use super::sql_types::Tsvector;
process_tags (process_id, tag_id) {
process_id -> Int4,
tag_id -> Int4,
contents -> Text,
contents_tsvector -> Tsvector,
}
}
diesel::table! {
processes (id) {
id -> Int4,
title -> Varchar,
scan -> Bool,
dt_start -> Nullable<Date>,
dt_finish -> Nullable<Date>,
origin -> Nullable<Varchar>,
}
}
diesel::table! {
tags (id) {
id -> Int4,
title -> Varchar,
title_api -> Varchar,
}
}
diesel::joinable!(process_tags -> processes (process_id));
diesel::joinable!(process_tags -> tags (tag_id));
diesel::allow_tables_to_appear_in_same_query!(
process_tags,
processes,
tags,
);