vaultwarden/src/db/models/attachment.rs
Mathijs van Veluw 2ee5819b56
Use Diesels MultiConnections Derive (#6279)
* Use Diesels MultiConnections Derive

With this PR we remove almost all custom macro's to create the multiple database type code. This is now handled by Diesel it self.

This removed the need of the following functions/macro's:
 - `db_object!`
 - `::to_db`
 - `.from_db()`

It is also possible to just use one schema instead of multiple per type.

Also done:
 - Refactored the SQLite backup function
 - Some formatting of queries so every call is one a separate line, this looks a bit better
 - Declare `conn` as mut inside each `db_run!` instead of having to declare it as `mut` in functions or calls
 - Added an `ACTIVE_DB_TYPE` static which holds the currently active database type
 - Removed `diesel_logger` crate and use Diesel's `set_default_instrumentation()`
   If you want debug queries you can now simply change the log level of `vaultwarden::db::query_logger`
 - Use PostgreSQL v17 in the Alpine images to match the Debian Trixie version
 - Optimized the Workflows since `diesel_logger` isn't needed anymore

And on the extra plus-side, this lowers the compile-time and binary size too.

Signed-off-by: BlackDex <black.dex@gmail.com>

* Adjust query_logger and some other small items

Signed-off-by: BlackDex <black.dex@gmail.com>

* Remove  macro, replaced with an  function

Signed-off-by: BlackDex <black.dex@gmail.com>

* Implement custom connection manager

Signed-off-by: BlackDex <black.dex@gmail.com>

* Updated some crates to keep up2date

Signed-off-by: BlackDex <black.dex@gmail.com>

* Small adjustment

Signed-off-by: BlackDex <black.dex@gmail.com>

* crate updates

Signed-off-by: BlackDex <black.dex@gmail.com>

* Update crates

Signed-off-by: BlackDex <black.dex@gmail.com>

---------

Signed-off-by: BlackDex <black.dex@gmail.com>
2025-10-29 21:04:30 +01:00

251 lines
8.2 KiB
Rust

use bigdecimal::{BigDecimal, ToPrimitive};
use derive_more::{AsRef, Deref, Display};
use diesel::prelude::*;
use serde_json::Value;
use std::time::Duration;
use super::{CipherId, OrganizationId, UserId};
use crate::db::schema::{attachments, ciphers};
use crate::{config::PathType, CONFIG};
use macros::IdFromParam;
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
#[diesel(table_name = attachments)]
#[diesel(treat_none_as_null = true)]
#[diesel(primary_key(id))]
pub struct Attachment {
pub id: AttachmentId,
pub cipher_uuid: CipherId,
pub file_name: String, // encrypted
pub file_size: i64,
pub akey: Option<String>,
}
/// Local methods
impl Attachment {
pub const fn new(
id: AttachmentId,
cipher_uuid: CipherId,
file_name: String,
file_size: i64,
akey: Option<String>,
) -> Self {
Self {
id,
cipher_uuid,
file_name,
file_size,
akey,
}
}
pub fn get_file_path(&self) -> String {
format!("{}/{}", self.cipher_uuid, self.id)
}
pub async fn get_url(&self, host: &str) -> Result<String, crate::Error> {
let operator = CONFIG.opendal_operator_for_path_type(PathType::Attachments)?;
if operator.info().scheme() == opendal::Scheme::Fs {
let token = encode_jwt(&generate_file_download_claims(self.cipher_uuid.clone(), self.id.clone()));
Ok(format!("{host}/attachments/{}/{}?token={token}", self.cipher_uuid, self.id))
} else {
Ok(operator.presign_read(&self.get_file_path(), Duration::from_secs(5 * 60)).await?.uri().to_string())
}
}
pub async fn to_json(&self, host: &str) -> Result<Value, crate::Error> {
Ok(json!({
"id": self.id,
"url": self.get_url(host).await?,
"fileName": self.file_name,
"size": self.file_size.to_string(),
"sizeName": crate::util::get_display_size(self.file_size),
"key": self.akey,
"object": "attachment"
}))
}
}
use crate::auth::{encode_jwt, generate_file_download_claims};
use crate::db::DbConn;
use crate::api::EmptyResult;
use crate::error::MapResult;
/// Database methods
impl Attachment {
pub async fn save(&self, conn: &DbConn) -> EmptyResult {
db_run! { conn:
sqlite, mysql {
match diesel::replace_into(attachments::table)
.values(self)
.execute(conn)
{
Ok(_) => Ok(()),
// Record already exists and causes a Foreign Key Violation because replace_into() wants to delete the record first.
Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => {
diesel::update(attachments::table)
.filter(attachments::id.eq(&self.id))
.set(self)
.execute(conn)
.map_res("Error saving attachment")
}
Err(e) => Err(e.into()),
}.map_res("Error saving attachment")
}
postgresql {
diesel::insert_into(attachments::table)
.values(self)
.on_conflict(attachments::id)
.do_update()
.set(self)
.execute(conn)
.map_res("Error saving attachment")
}
}
}
pub async fn delete(&self, conn: &DbConn) -> EmptyResult {
db_run! { conn: {
crate::util::retry(||
diesel::delete(attachments::table.filter(attachments::id.eq(&self.id)))
.execute(conn),
10,
)
.map(|_| ())
.map_res("Error deleting attachment")
}}?;
let operator = CONFIG.opendal_operator_for_path_type(PathType::Attachments)?;
let file_path = self.get_file_path();
if let Err(e) = operator.delete(&file_path).await {
if e.kind() == opendal::ErrorKind::NotFound {
debug!("File '{file_path}' already deleted.");
} else {
return Err(e.into());
}
}
Ok(())
}
pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &DbConn) -> EmptyResult {
for attachment in Attachment::find_by_cipher(cipher_uuid, conn).await {
attachment.delete(conn).await?;
}
Ok(())
}
pub async fn find_by_id(id: &AttachmentId, conn: &DbConn) -> Option<Self> {
db_run! { conn: {
attachments::table
.filter(attachments::id.eq(id.to_lowercase()))
.first::<Self>(conn)
.ok()
}}
}
pub async fn find_by_cipher(cipher_uuid: &CipherId, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
attachments::table
.filter(attachments::cipher_uuid.eq(cipher_uuid))
.load::<Self>(conn)
.expect("Error loading attachments")
}}
}
pub async fn size_by_user(user_uuid: &UserId, conn: &DbConn) -> i64 {
db_run! { conn: {
let result: Option<BigDecimal> = attachments::table
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
.filter(ciphers::user_uuid.eq(user_uuid))
.select(diesel::dsl::sum(attachments::file_size))
.first(conn)
.expect("Error loading user attachment total size");
match result.map(|r| r.to_i64()) {
Some(Some(r)) => r,
Some(None) => i64::MAX,
None => 0
}
}}
}
pub async fn count_by_user(user_uuid: &UserId, conn: &DbConn) -> i64 {
db_run! { conn: {
attachments::table
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
.filter(ciphers::user_uuid.eq(user_uuid))
.count()
.first(conn)
.unwrap_or(0)
}}
}
pub async fn size_by_org(org_uuid: &OrganizationId, conn: &DbConn) -> i64 {
db_run! { conn: {
let result: Option<BigDecimal> = attachments::table
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
.filter(ciphers::organization_uuid.eq(org_uuid))
.select(diesel::dsl::sum(attachments::file_size))
.first(conn)
.expect("Error loading user attachment total size");
match result.map(|r| r.to_i64()) {
Some(Some(r)) => r,
Some(None) => i64::MAX,
None => 0
}
}}
}
pub async fn count_by_org(org_uuid: &OrganizationId, conn: &DbConn) -> i64 {
db_run! { conn: {
attachments::table
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
.filter(ciphers::organization_uuid.eq(org_uuid))
.count()
.first(conn)
.unwrap_or(0)
}}
}
// This will return all attachments linked to the user or org
// There is no filtering done here if the user actually has access!
// It is used to speed up the sync process, and the matching is done in a different part.
pub async fn find_all_by_user_and_orgs(
user_uuid: &UserId,
org_uuids: &Vec<OrganizationId>,
conn: &DbConn,
) -> Vec<Self> {
db_run! { conn: {
attachments::table
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
.filter(ciphers::user_uuid.eq(user_uuid))
.or_filter(ciphers::organization_uuid.eq_any(org_uuids))
.select(attachments::all_columns)
.load::<Self>(conn)
.expect("Error loading attachments")
}}
}
}
#[derive(
Clone,
Debug,
AsRef,
Deref,
DieselNewType,
Display,
FromForm,
Hash,
PartialEq,
Eq,
Serialize,
Deserialize,
IdFromParam,
)]
pub struct AttachmentId(pub String);