mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-11-07 04:42:59 -05:00
Use Diesels MultiConnections Derive (#6279)
* Use Diesels MultiConnections Derive With this PR we remove almost all custom macro's to create the multiple database type code. This is now handled by Diesel it self. This removed the need of the following functions/macro's: - `db_object!` - `::to_db` - `.from_db()` It is also possible to just use one schema instead of multiple per type. Also done: - Refactored the SQLite backup function - Some formatting of queries so every call is one a separate line, this looks a bit better - Declare `conn` as mut inside each `db_run!` instead of having to declare it as `mut` in functions or calls - Added an `ACTIVE_DB_TYPE` static which holds the currently active database type - Removed `diesel_logger` crate and use Diesel's `set_default_instrumentation()` If you want debug queries you can now simply change the log level of `vaultwarden::db::query_logger` - Use PostgreSQL v17 in the Alpine images to match the Debian Trixie version - Optimized the Workflows since `diesel_logger` isn't needed anymore And on the extra plus-side, this lowers the compile-time and binary size too. Signed-off-by: BlackDex <black.dex@gmail.com> * Adjust query_logger and some other small items Signed-off-by: BlackDex <black.dex@gmail.com> * Remove macro, replaced with an function Signed-off-by: BlackDex <black.dex@gmail.com> * Implement custom connection manager Signed-off-by: BlackDex <black.dex@gmail.com> * Updated some crates to keep up2date Signed-off-by: BlackDex <black.dex@gmail.com> * Small adjustment Signed-off-by: BlackDex <black.dex@gmail.com> * crate updates Signed-off-by: BlackDex <black.dex@gmail.com> * Update crates Signed-off-by: BlackDex <black.dex@gmail.com> --------- Signed-off-by: BlackDex <black.dex@gmail.com>
This commit is contained in:
committed by
GitHub
parent
7c597e88f9
commit
2ee5819b56
@@ -17,7 +17,14 @@ use crate::{
|
||||
auth::Headers,
|
||||
config::PathType,
|
||||
crypto,
|
||||
db::{models::*, DbConn, DbPool},
|
||||
db::{
|
||||
models::{
|
||||
Attachment, AttachmentId, Cipher, CipherId, Collection, CollectionCipher, CollectionGroup, CollectionId,
|
||||
CollectionUser, EventType, Favorite, Folder, FolderCipher, FolderId, Group, Membership, MembershipType,
|
||||
OrgPolicy, OrgPolicyType, OrganizationId, RepromptType, Send, UserId,
|
||||
},
|
||||
DbConn, DbPool,
|
||||
},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
@@ -93,8 +100,8 @@ pub fn routes() -> Vec<Route> {
|
||||
|
||||
pub async fn purge_trashed_ciphers(pool: DbPool) {
|
||||
debug!("Purging trashed ciphers");
|
||||
if let Ok(mut conn) = pool.get().await {
|
||||
Cipher::purge_trash(&mut conn).await;
|
||||
if let Ok(conn) = pool.get().await {
|
||||
Cipher::purge_trash(&conn).await;
|
||||
} else {
|
||||
error!("Failed to get DB connection while purging trashed ciphers")
|
||||
}
|
||||
@@ -107,11 +114,11 @@ struct SyncData {
|
||||
}
|
||||
|
||||
#[get("/sync?<data..>")]
|
||||
async fn sync(data: SyncData, headers: Headers, client_version: Option<ClientVersion>, mut conn: DbConn) -> JsonResult {
|
||||
let user_json = headers.user.to_json(&mut conn).await;
|
||||
async fn sync(data: SyncData, headers: Headers, client_version: Option<ClientVersion>, conn: DbConn) -> JsonResult {
|
||||
let user_json = headers.user.to_json(&conn).await;
|
||||
|
||||
// Get all ciphers which are visible by the user
|
||||
let mut ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &mut conn).await;
|
||||
let mut ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn).await;
|
||||
|
||||
// Filter out SSH keys if the client version is less than 2024.12.0
|
||||
let show_ssh_keys = if let Some(client_version) = client_version {
|
||||
@@ -124,31 +131,30 @@ async fn sync(data: SyncData, headers: Headers, client_version: Option<ClientVer
|
||||
ciphers.retain(|c| c.atype != 5);
|
||||
}
|
||||
|
||||
let cipher_sync_data = CipherSyncData::new(&headers.user.uuid, CipherSyncType::User, &mut conn).await;
|
||||
let cipher_sync_data = CipherSyncData::new(&headers.user.uuid, CipherSyncType::User, &conn).await;
|
||||
|
||||
// Lets generate the ciphers_json using all the gathered info
|
||||
let mut ciphers_json = Vec::with_capacity(ciphers.len());
|
||||
for c in ciphers {
|
||||
ciphers_json.push(
|
||||
c.to_json(&headers.host, &headers.user.uuid, Some(&cipher_sync_data), CipherSyncType::User, &mut conn)
|
||||
.await?,
|
||||
c.to_json(&headers.host, &headers.user.uuid, Some(&cipher_sync_data), CipherSyncType::User, &conn).await?,
|
||||
);
|
||||
}
|
||||
|
||||
let collections = Collection::find_by_user_uuid(headers.user.uuid.clone(), &mut conn).await;
|
||||
let collections = Collection::find_by_user_uuid(headers.user.uuid.clone(), &conn).await;
|
||||
let mut collections_json = Vec::with_capacity(collections.len());
|
||||
for c in collections {
|
||||
collections_json.push(c.to_json_details(&headers.user.uuid, Some(&cipher_sync_data), &mut conn).await);
|
||||
collections_json.push(c.to_json_details(&headers.user.uuid, Some(&cipher_sync_data), &conn).await);
|
||||
}
|
||||
|
||||
let folders_json: Vec<Value> =
|
||||
Folder::find_by_user(&headers.user.uuid, &mut conn).await.iter().map(Folder::to_json).collect();
|
||||
Folder::find_by_user(&headers.user.uuid, &conn).await.iter().map(Folder::to_json).collect();
|
||||
|
||||
let sends_json: Vec<Value> =
|
||||
Send::find_by_user(&headers.user.uuid, &mut conn).await.iter().map(Send::to_json).collect();
|
||||
Send::find_by_user(&headers.user.uuid, &conn).await.iter().map(Send::to_json).collect();
|
||||
|
||||
let policies_json: Vec<Value> =
|
||||
OrgPolicy::find_confirmed_by_user(&headers.user.uuid, &mut conn).await.iter().map(OrgPolicy::to_json).collect();
|
||||
OrgPolicy::find_confirmed_by_user(&headers.user.uuid, &conn).await.iter().map(OrgPolicy::to_json).collect();
|
||||
|
||||
let domains_json = if data.exclude_domains {
|
||||
Value::Null
|
||||
@@ -169,15 +175,14 @@ async fn sync(data: SyncData, headers: Headers, client_version: Option<ClientVer
|
||||
}
|
||||
|
||||
#[get("/ciphers")]
|
||||
async fn get_ciphers(headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &mut conn).await;
|
||||
let cipher_sync_data = CipherSyncData::new(&headers.user.uuid, CipherSyncType::User, &mut conn).await;
|
||||
async fn get_ciphers(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn).await;
|
||||
let cipher_sync_data = CipherSyncData::new(&headers.user.uuid, CipherSyncType::User, &conn).await;
|
||||
|
||||
let mut ciphers_json = Vec::with_capacity(ciphers.len());
|
||||
for c in ciphers {
|
||||
ciphers_json.push(
|
||||
c.to_json(&headers.host, &headers.user.uuid, Some(&cipher_sync_data), CipherSyncType::User, &mut conn)
|
||||
.await?,
|
||||
c.to_json(&headers.host, &headers.user.uuid, Some(&cipher_sync_data), CipherSyncType::User, &conn).await?,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -189,16 +194,16 @@ async fn get_ciphers(headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
}
|
||||
|
||||
#[get("/ciphers/<cipher_id>")]
|
||||
async fn get_cipher(cipher_id: CipherId, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else {
|
||||
async fn get_cipher(cipher_id: CipherId, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &conn).await else {
|
||||
err!("Cipher doesn't exist")
|
||||
};
|
||||
|
||||
if !cipher.is_accessible_to_user(&headers.user.uuid, &mut conn).await {
|
||||
if !cipher.is_accessible_to_user(&headers.user.uuid, &conn).await {
|
||||
err!("Cipher is not owned by user")
|
||||
}
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &conn).await?))
|
||||
}
|
||||
|
||||
#[get("/ciphers/<cipher_id>/admin")]
|
||||
@@ -291,7 +296,7 @@ async fn post_ciphers_admin(data: Json<ShareCipherData>, headers: Headers, conn:
|
||||
async fn post_ciphers_create(
|
||||
data: Json<ShareCipherData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let mut data: ShareCipherData = data.into_inner();
|
||||
@@ -305,11 +310,11 @@ async fn post_ciphers_create(
|
||||
// This check is usually only needed in update_cipher_from_data(), but we
|
||||
// need it here as well to avoid creating an empty cipher in the call to
|
||||
// cipher.save() below.
|
||||
enforce_personal_ownership_policy(Some(&data.cipher), &headers, &mut conn).await?;
|
||||
enforce_personal_ownership_policy(Some(&data.cipher), &headers, &conn).await?;
|
||||
|
||||
let mut cipher = Cipher::new(data.cipher.r#type, data.cipher.name.clone());
|
||||
cipher.user_uuid = Some(headers.user.uuid.clone());
|
||||
cipher.save(&mut conn).await?;
|
||||
cipher.save(&conn).await?;
|
||||
|
||||
// When cloning a cipher, the Bitwarden clients seem to set this field
|
||||
// based on the cipher being cloned (when creating a new cipher, it's set
|
||||
@@ -319,12 +324,12 @@ async fn post_ciphers_create(
|
||||
// or otherwise), we can just ignore this field entirely.
|
||||
data.cipher.last_known_revision_date = None;
|
||||
|
||||
share_cipher_by_uuid(&cipher.uuid, data, &headers, &mut conn, &nt, None).await
|
||||
share_cipher_by_uuid(&cipher.uuid, data, &headers, &conn, &nt, None).await
|
||||
}
|
||||
|
||||
/// Called when creating a new user-owned cipher.
|
||||
#[post("/ciphers", data = "<data>")]
|
||||
async fn post_ciphers(data: Json<CipherData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
async fn post_ciphers(data: Json<CipherData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
let mut data: CipherData = data.into_inner();
|
||||
|
||||
// The web/browser clients set this field to null as expected, but the
|
||||
@@ -334,9 +339,9 @@ async fn post_ciphers(data: Json<CipherData>, headers: Headers, mut conn: DbConn
|
||||
data.last_known_revision_date = None;
|
||||
|
||||
let mut cipher = Cipher::new(data.r#type, data.name.clone());
|
||||
update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherCreate).await?;
|
||||
update_cipher_from_data(&mut cipher, data, &headers, None, &conn, &nt, UpdateType::SyncCipherCreate).await?;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &conn).await?))
|
||||
}
|
||||
|
||||
/// Enforces the personal ownership policy on user-owned ciphers, if applicable.
|
||||
@@ -346,11 +351,7 @@ async fn post_ciphers(data: Json<CipherData>, headers: Headers, mut conn: DbConn
|
||||
/// allowed to delete or share such ciphers to an org, however.
|
||||
///
|
||||
/// Ref: https://bitwarden.com/help/article/policies/#personal-ownership
|
||||
async fn enforce_personal_ownership_policy(
|
||||
data: Option<&CipherData>,
|
||||
headers: &Headers,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
async fn enforce_personal_ownership_policy(data: Option<&CipherData>, headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||
if data.is_none() || data.unwrap().organization_id.is_none() {
|
||||
let user_id = &headers.user.uuid;
|
||||
let policy_type = OrgPolicyType::PersonalOwnership;
|
||||
@@ -366,7 +367,7 @@ pub async fn update_cipher_from_data(
|
||||
data: CipherData,
|
||||
headers: &Headers,
|
||||
shared_to_collections: Option<Vec<CollectionId>>,
|
||||
conn: &mut DbConn,
|
||||
conn: &DbConn,
|
||||
nt: &Notify<'_>,
|
||||
ut: UpdateType,
|
||||
) -> EmptyResult {
|
||||
@@ -559,13 +560,8 @@ struct RelationsData {
|
||||
}
|
||||
|
||||
#[post("/ciphers/import", data = "<data>")]
|
||||
async fn post_ciphers_import(
|
||||
data: Json<ImportData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
enforce_personal_ownership_policy(None, &headers, &mut conn).await?;
|
||||
async fn post_ciphers_import(data: Json<ImportData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
enforce_personal_ownership_policy(None, &headers, &conn).await?;
|
||||
|
||||
let data: ImportData = data.into_inner();
|
||||
|
||||
@@ -577,14 +573,14 @@ async fn post_ciphers_import(
|
||||
|
||||
// Read and create the folders
|
||||
let existing_folders: HashSet<Option<FolderId>> =
|
||||
Folder::find_by_user(&headers.user.uuid, &mut conn).await.into_iter().map(|f| Some(f.uuid)).collect();
|
||||
Folder::find_by_user(&headers.user.uuid, &conn).await.into_iter().map(|f| Some(f.uuid)).collect();
|
||||
let mut folders: Vec<FolderId> = Vec::with_capacity(data.folders.len());
|
||||
for folder in data.folders.into_iter() {
|
||||
let folder_id = if existing_folders.contains(&folder.id) {
|
||||
folder.id.unwrap()
|
||||
} else {
|
||||
let mut new_folder = Folder::new(headers.user.uuid.clone(), folder.name);
|
||||
new_folder.save(&mut conn).await?;
|
||||
new_folder.save(&conn).await?;
|
||||
new_folder.uuid
|
||||
};
|
||||
|
||||
@@ -604,12 +600,12 @@ async fn post_ciphers_import(
|
||||
cipher_data.folder_id = folder_id;
|
||||
|
||||
let mut cipher = Cipher::new(cipher_data.r#type, cipher_data.name.clone());
|
||||
update_cipher_from_data(&mut cipher, cipher_data, &headers, None, &mut conn, &nt, UpdateType::None).await?;
|
||||
update_cipher_from_data(&mut cipher, cipher_data, &headers, None, &conn, &nt, UpdateType::None).await?;
|
||||
}
|
||||
|
||||
let mut user = headers.user;
|
||||
user.update_revision(&mut conn).await?;
|
||||
nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &mut conn).await;
|
||||
user.update_revision(&conn).await?;
|
||||
nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &conn).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -653,12 +649,12 @@ async fn put_cipher(
|
||||
cipher_id: CipherId,
|
||||
data: Json<CipherData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let data: CipherData = data.into_inner();
|
||||
|
||||
let Some(mut cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else {
|
||||
let Some(mut cipher) = Cipher::find_by_uuid(&cipher_id, &conn).await else {
|
||||
err!("Cipher doesn't exist")
|
||||
};
|
||||
|
||||
@@ -667,13 +663,13 @@ async fn put_cipher(
|
||||
// cipher itself, so the user shouldn't need write access to change these.
|
||||
// Interestingly, upstream Bitwarden doesn't properly handle this either.
|
||||
|
||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &mut conn).await {
|
||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn).await {
|
||||
err!("Cipher is not write accessible")
|
||||
}
|
||||
|
||||
update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherUpdate).await?;
|
||||
update_cipher_from_data(&mut cipher, data, &headers, None, &conn, &nt, UpdateType::SyncCipherUpdate).await?;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &conn).await?))
|
||||
}
|
||||
|
||||
#[post("/ciphers/<cipher_id>/partial", data = "<data>")]
|
||||
@@ -692,26 +688,26 @@ async fn put_cipher_partial(
|
||||
cipher_id: CipherId,
|
||||
data: Json<PartialCipherData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
let data: PartialCipherData = data.into_inner();
|
||||
|
||||
let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else {
|
||||
let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &conn).await else {
|
||||
err!("Cipher doesn't exist")
|
||||
};
|
||||
|
||||
if let Some(ref folder_id) = data.folder_id {
|
||||
if Folder::find_by_uuid_and_user(folder_id, &headers.user.uuid, &mut conn).await.is_none() {
|
||||
if Folder::find_by_uuid_and_user(folder_id, &headers.user.uuid, &conn).await.is_none() {
|
||||
err!("Invalid folder", "Folder does not exist or belongs to another user");
|
||||
}
|
||||
}
|
||||
|
||||
// Move cipher
|
||||
cipher.move_to_folder(data.folder_id.clone(), &headers.user.uuid, &mut conn).await?;
|
||||
cipher.move_to_folder(data.folder_id.clone(), &headers.user.uuid, &conn).await?;
|
||||
// Update favorite
|
||||
cipher.set_favorite(Some(data.favorite), &headers.user.uuid, &mut conn).await?;
|
||||
cipher.set_favorite(Some(data.favorite), &headers.user.uuid, &conn).await?;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &conn).await?))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -764,35 +760,34 @@ async fn post_collections_update(
|
||||
cipher_id: CipherId,
|
||||
data: Json<CollectionsAdminData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let data: CollectionsAdminData = data.into_inner();
|
||||
|
||||
let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else {
|
||||
let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &conn).await else {
|
||||
err!("Cipher doesn't exist")
|
||||
};
|
||||
|
||||
if !cipher.is_in_editable_collection_by_user(&headers.user.uuid, &mut conn).await {
|
||||
if !cipher.is_in_editable_collection_by_user(&headers.user.uuid, &conn).await {
|
||||
err!("Collection cannot be changed")
|
||||
}
|
||||
|
||||
let posted_collections = HashSet::<CollectionId>::from_iter(data.collection_ids);
|
||||
let current_collections =
|
||||
HashSet::<CollectionId>::from_iter(cipher.get_collections(headers.user.uuid.clone(), &mut conn).await);
|
||||
HashSet::<CollectionId>::from_iter(cipher.get_collections(headers.user.uuid.clone(), &conn).await);
|
||||
|
||||
for collection in posted_collections.symmetric_difference(¤t_collections) {
|
||||
match Collection::find_by_uuid_and_org(collection, cipher.organization_uuid.as_ref().unwrap(), &mut conn).await
|
||||
{
|
||||
match Collection::find_by_uuid_and_org(collection, cipher.organization_uuid.as_ref().unwrap(), &conn).await {
|
||||
None => err!("Invalid collection ID provided"),
|
||||
Some(collection) => {
|
||||
if collection.is_writable_by_user(&headers.user.uuid, &mut conn).await {
|
||||
if collection.is_writable_by_user(&headers.user.uuid, &conn).await {
|
||||
if posted_collections.contains(&collection.uuid) {
|
||||
// Add to collection
|
||||
CollectionCipher::save(&cipher.uuid, &collection.uuid, &mut conn).await?;
|
||||
CollectionCipher::save(&cipher.uuid, &collection.uuid, &conn).await?;
|
||||
} else {
|
||||
// Remove from collection
|
||||
CollectionCipher::delete(&cipher.uuid, &collection.uuid, &mut conn).await?;
|
||||
CollectionCipher::delete(&cipher.uuid, &collection.uuid, &conn).await?;
|
||||
}
|
||||
} else {
|
||||
err!("No rights to modify the collection")
|
||||
@@ -804,10 +799,10 @@ async fn post_collections_update(
|
||||
nt.send_cipher_update(
|
||||
UpdateType::SyncCipherUpdate,
|
||||
&cipher,
|
||||
&cipher.update_users_revision(&mut conn).await,
|
||||
&cipher.update_users_revision(&conn).await,
|
||||
&headers.device,
|
||||
Some(Vec::from_iter(posted_collections)),
|
||||
&mut conn,
|
||||
&conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
@@ -818,11 +813,11 @@ async fn post_collections_update(
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
&headers.ip.ip,
|
||||
&mut conn,
|
||||
&conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &conn).await?))
|
||||
}
|
||||
|
||||
#[put("/ciphers/<cipher_id>/collections-admin", data = "<data>")]
|
||||
@@ -841,35 +836,34 @@ async fn post_collections_admin(
|
||||
cipher_id: CipherId,
|
||||
data: Json<CollectionsAdminData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
let data: CollectionsAdminData = data.into_inner();
|
||||
|
||||
let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else {
|
||||
let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &conn).await else {
|
||||
err!("Cipher doesn't exist")
|
||||
};
|
||||
|
||||
if !cipher.is_in_editable_collection_by_user(&headers.user.uuid, &mut conn).await {
|
||||
if !cipher.is_in_editable_collection_by_user(&headers.user.uuid, &conn).await {
|
||||
err!("Collection cannot be changed")
|
||||
}
|
||||
|
||||
let posted_collections = HashSet::<CollectionId>::from_iter(data.collection_ids);
|
||||
let current_collections =
|
||||
HashSet::<CollectionId>::from_iter(cipher.get_admin_collections(headers.user.uuid.clone(), &mut conn).await);
|
||||
HashSet::<CollectionId>::from_iter(cipher.get_admin_collections(headers.user.uuid.clone(), &conn).await);
|
||||
|
||||
for collection in posted_collections.symmetric_difference(¤t_collections) {
|
||||
match Collection::find_by_uuid_and_org(collection, cipher.organization_uuid.as_ref().unwrap(), &mut conn).await
|
||||
{
|
||||
match Collection::find_by_uuid_and_org(collection, cipher.organization_uuid.as_ref().unwrap(), &conn).await {
|
||||
None => err!("Invalid collection ID provided"),
|
||||
Some(collection) => {
|
||||
if collection.is_writable_by_user(&headers.user.uuid, &mut conn).await {
|
||||
if collection.is_writable_by_user(&headers.user.uuid, &conn).await {
|
||||
if posted_collections.contains(&collection.uuid) {
|
||||
// Add to collection
|
||||
CollectionCipher::save(&cipher.uuid, &collection.uuid, &mut conn).await?;
|
||||
CollectionCipher::save(&cipher.uuid, &collection.uuid, &conn).await?;
|
||||
} else {
|
||||
// Remove from collection
|
||||
CollectionCipher::delete(&cipher.uuid, &collection.uuid, &mut conn).await?;
|
||||
CollectionCipher::delete(&cipher.uuid, &collection.uuid, &conn).await?;
|
||||
}
|
||||
} else {
|
||||
err!("No rights to modify the collection")
|
||||
@@ -881,10 +875,10 @@ async fn post_collections_admin(
|
||||
nt.send_cipher_update(
|
||||
UpdateType::SyncCipherUpdate,
|
||||
&cipher,
|
||||
&cipher.update_users_revision(&mut conn).await,
|
||||
&cipher.update_users_revision(&conn).await,
|
||||
&headers.device,
|
||||
Some(Vec::from_iter(posted_collections)),
|
||||
&mut conn,
|
||||
&conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
@@ -895,7 +889,7 @@ async fn post_collections_admin(
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
&headers.ip.ip,
|
||||
&mut conn,
|
||||
&conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
@@ -916,12 +910,12 @@ async fn post_cipher_share(
|
||||
cipher_id: CipherId,
|
||||
data: Json<ShareCipherData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let data: ShareCipherData = data.into_inner();
|
||||
|
||||
share_cipher_by_uuid(&cipher_id, data, &headers, &mut conn, &nt, None).await
|
||||
share_cipher_by_uuid(&cipher_id, data, &headers, &conn, &nt, None).await
|
||||
}
|
||||
|
||||
#[put("/ciphers/<cipher_id>/share", data = "<data>")]
|
||||
@@ -929,12 +923,12 @@ async fn put_cipher_share(
|
||||
cipher_id: CipherId,
|
||||
data: Json<ShareCipherData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let data: ShareCipherData = data.into_inner();
|
||||
|
||||
share_cipher_by_uuid(&cipher_id, data, &headers, &mut conn, &nt, None).await
|
||||
share_cipher_by_uuid(&cipher_id, data, &headers, &conn, &nt, None).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -948,7 +942,7 @@ struct ShareSelectedCipherData {
|
||||
async fn put_cipher_share_selected(
|
||||
data: Json<ShareSelectedCipherData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
let mut data: ShareSelectedCipherData = data.into_inner();
|
||||
@@ -975,14 +969,14 @@ async fn put_cipher_share_selected(
|
||||
|
||||
match shared_cipher_data.cipher.id.take() {
|
||||
Some(id) => {
|
||||
share_cipher_by_uuid(&id, shared_cipher_data, &headers, &mut conn, &nt, Some(UpdateType::None)).await?
|
||||
share_cipher_by_uuid(&id, shared_cipher_data, &headers, &conn, &nt, Some(UpdateType::None)).await?
|
||||
}
|
||||
None => err!("Request missing ids field"),
|
||||
};
|
||||
}
|
||||
|
||||
// Multi share actions do not send out a push for each cipher, we need to send a general sync here
|
||||
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &mut conn).await;
|
||||
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &conn).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -991,7 +985,7 @@ async fn share_cipher_by_uuid(
|
||||
cipher_id: &CipherId,
|
||||
data: ShareCipherData,
|
||||
headers: &Headers,
|
||||
conn: &mut DbConn,
|
||||
conn: &DbConn,
|
||||
nt: &Notify<'_>,
|
||||
override_ut: Option<UpdateType>,
|
||||
) -> JsonResult {
|
||||
@@ -1050,17 +1044,17 @@ async fn get_attachment(
|
||||
cipher_id: CipherId,
|
||||
attachment_id: AttachmentId,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else {
|
||||
let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &conn).await else {
|
||||
err!("Cipher doesn't exist")
|
||||
};
|
||||
|
||||
if !cipher.is_accessible_to_user(&headers.user.uuid, &mut conn).await {
|
||||
if !cipher.is_accessible_to_user(&headers.user.uuid, &conn).await {
|
||||
err!("Cipher is not accessible")
|
||||
}
|
||||
|
||||
match Attachment::find_by_id(&attachment_id, &mut conn).await {
|
||||
match Attachment::find_by_id(&attachment_id, &conn).await {
|
||||
Some(attachment) if cipher_id == attachment.cipher_uuid => Ok(Json(attachment.to_json(&headers.host).await?)),
|
||||
Some(_) => err!("Attachment doesn't belong to cipher"),
|
||||
None => err!("Attachment doesn't exist"),
|
||||
@@ -1090,13 +1084,13 @@ async fn post_attachment_v2(
|
||||
cipher_id: CipherId,
|
||||
data: Json<AttachmentRequestData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else {
|
||||
let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &conn).await else {
|
||||
err!("Cipher doesn't exist")
|
||||
};
|
||||
|
||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &mut conn).await {
|
||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn).await {
|
||||
err!("Cipher is not write accessible")
|
||||
}
|
||||
|
||||
@@ -1109,7 +1103,7 @@ async fn post_attachment_v2(
|
||||
let attachment_id = crypto::generate_attachment_id();
|
||||
let attachment =
|
||||
Attachment::new(attachment_id.clone(), cipher.uuid.clone(), data.file_name, file_size, Some(data.key));
|
||||
attachment.save(&mut conn).await.expect("Error saving attachment");
|
||||
attachment.save(&conn).await.expect("Error saving attachment");
|
||||
|
||||
let url = format!("/ciphers/{}/attachment/{attachment_id}", cipher.uuid);
|
||||
let response_key = match data.admin_request {
|
||||
@@ -1122,7 +1116,7 @@ async fn post_attachment_v2(
|
||||
"attachmentId": attachment_id,
|
||||
"url": url,
|
||||
"fileUploadType": FileUploadType::Direct as i32,
|
||||
response_key: cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?,
|
||||
response_key: cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &conn).await?,
|
||||
})))
|
||||
}
|
||||
|
||||
@@ -1145,7 +1139,7 @@ async fn save_attachment(
|
||||
cipher_id: CipherId,
|
||||
data: Form<UploadData<'_>>,
|
||||
headers: &Headers,
|
||||
mut conn: DbConn,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> Result<(Cipher, DbConn), crate::error::Error> {
|
||||
let data = data.into_inner();
|
||||
@@ -1157,11 +1151,11 @@ async fn save_attachment(
|
||||
err!("Attachment size can't be negative")
|
||||
}
|
||||
|
||||
let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else {
|
||||
let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &conn).await else {
|
||||
err!("Cipher doesn't exist")
|
||||
};
|
||||
|
||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &mut conn).await {
|
||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn).await {
|
||||
err!("Cipher is not write accessible")
|
||||
}
|
||||
|
||||
@@ -1176,7 +1170,7 @@ async fn save_attachment(
|
||||
match CONFIG.user_attachment_limit() {
|
||||
Some(0) => err!("Attachments are disabled"),
|
||||
Some(limit_kb) => {
|
||||
let already_used = Attachment::size_by_user(user_id, &mut conn).await;
|
||||
let already_used = Attachment::size_by_user(user_id, &conn).await;
|
||||
let left = limit_kb
|
||||
.checked_mul(1024)
|
||||
.and_then(|l| l.checked_sub(already_used))
|
||||
@@ -1198,7 +1192,7 @@ async fn save_attachment(
|
||||
match CONFIG.org_attachment_limit() {
|
||||
Some(0) => err!("Attachments are disabled"),
|
||||
Some(limit_kb) => {
|
||||
let already_used = Attachment::size_by_org(org_id, &mut conn).await;
|
||||
let already_used = Attachment::size_by_org(org_id, &conn).await;
|
||||
let left = limit_kb
|
||||
.checked_mul(1024)
|
||||
.and_then(|l| l.checked_sub(already_used))
|
||||
@@ -1249,10 +1243,10 @@ async fn save_attachment(
|
||||
if size != attachment.file_size {
|
||||
// Update the attachment with the actual file size.
|
||||
attachment.file_size = size;
|
||||
attachment.save(&mut conn).await.expect("Error updating attachment");
|
||||
attachment.save(&conn).await.expect("Error updating attachment");
|
||||
}
|
||||
} else {
|
||||
attachment.delete(&mut conn).await.ok();
|
||||
attachment.delete(&conn).await.ok();
|
||||
|
||||
err!(format!("Attachment size mismatch (expected within [{min_size}, {max_size}], got {size})"));
|
||||
}
|
||||
@@ -1272,7 +1266,7 @@ async fn save_attachment(
|
||||
}
|
||||
let attachment =
|
||||
Attachment::new(file_id.clone(), cipher_id.clone(), encrypted_filename.unwrap(), size, data.key);
|
||||
attachment.save(&mut conn).await.expect("Error saving attachment");
|
||||
attachment.save(&conn).await.expect("Error saving attachment");
|
||||
}
|
||||
|
||||
save_temp_file(PathType::Attachments, &format!("{cipher_id}/{file_id}"), data.data, true).await?;
|
||||
@@ -1280,10 +1274,10 @@ async fn save_attachment(
|
||||
nt.send_cipher_update(
|
||||
UpdateType::SyncCipherUpdate,
|
||||
&cipher,
|
||||
&cipher.update_users_revision(&mut conn).await,
|
||||
&cipher.update_users_revision(&conn).await,
|
||||
&headers.device,
|
||||
None,
|
||||
&mut conn,
|
||||
&conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
@@ -1295,7 +1289,7 @@ async fn save_attachment(
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
&headers.ip.ip,
|
||||
&mut conn,
|
||||
&conn,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
@@ -1313,10 +1307,10 @@ async fn post_attachment_v2_data(
|
||||
attachment_id: AttachmentId,
|
||||
data: Form<UploadData<'_>>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
let attachment = match Attachment::find_by_id(&attachment_id, &mut conn).await {
|
||||
let attachment = match Attachment::find_by_id(&attachment_id, &conn).await {
|
||||
Some(attachment) if cipher_id == attachment.cipher_uuid => Some(attachment),
|
||||
Some(_) => err!("Attachment doesn't belong to cipher"),
|
||||
None => err!("Attachment doesn't exist"),
|
||||
@@ -1340,9 +1334,9 @@ async fn post_attachment(
|
||||
// the attachment database record as well as saving the data to disk.
|
||||
let attachment = None;
|
||||
|
||||
let (cipher, mut conn) = save_attachment(attachment, cipher_id, data, &headers, conn, nt).await?;
|
||||
let (cipher, conn) = save_attachment(attachment, cipher_id, data, &headers, conn, nt).await?;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &conn).await?))
|
||||
}
|
||||
|
||||
#[post("/ciphers/<cipher_id>/attachment-admin", format = "multipart/form-data", data = "<data>")]
|
||||
@@ -1362,10 +1356,10 @@ async fn post_attachment_share(
|
||||
attachment_id: AttachmentId,
|
||||
data: Form<UploadData<'_>>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
_delete_cipher_attachment_by_id(&cipher_id, &attachment_id, &headers, &mut conn, &nt).await?;
|
||||
_delete_cipher_attachment_by_id(&cipher_id, &attachment_id, &headers, &conn, &nt).await?;
|
||||
post_attachment(cipher_id, data, headers, conn, nt).await
|
||||
}
|
||||
|
||||
@@ -1396,10 +1390,10 @@ async fn delete_attachment(
|
||||
cipher_id: CipherId,
|
||||
attachment_id: AttachmentId,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
_delete_cipher_attachment_by_id(&cipher_id, &attachment_id, &headers, &mut conn, &nt).await
|
||||
_delete_cipher_attachment_by_id(&cipher_id, &attachment_id, &headers, &conn, &nt).await
|
||||
}
|
||||
|
||||
#[delete("/ciphers/<cipher_id>/attachment/<attachment_id>/admin")]
|
||||
@@ -1407,55 +1401,45 @@ async fn delete_attachment_admin(
|
||||
cipher_id: CipherId,
|
||||
attachment_id: AttachmentId,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
_delete_cipher_attachment_by_id(&cipher_id, &attachment_id, &headers, &mut conn, &nt).await
|
||||
_delete_cipher_attachment_by_id(&cipher_id, &attachment_id, &headers, &conn, &nt).await
|
||||
}
|
||||
|
||||
#[post("/ciphers/<cipher_id>/delete")]
|
||||
async fn delete_cipher_post(cipher_id: CipherId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&cipher_id, &headers, &mut conn, &CipherDeleteOptions::HardSingle, &nt).await
|
||||
async fn delete_cipher_post(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&cipher_id, &headers, &conn, &CipherDeleteOptions::HardSingle, &nt).await
|
||||
// permanent delete
|
||||
}
|
||||
|
||||
#[post("/ciphers/<cipher_id>/delete-admin")]
|
||||
async fn delete_cipher_post_admin(
|
||||
cipher_id: CipherId,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&cipher_id, &headers, &mut conn, &CipherDeleteOptions::HardSingle, &nt).await
|
||||
async fn delete_cipher_post_admin(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&cipher_id, &headers, &conn, &CipherDeleteOptions::HardSingle, &nt).await
|
||||
// permanent delete
|
||||
}
|
||||
|
||||
#[put("/ciphers/<cipher_id>/delete")]
|
||||
async fn delete_cipher_put(cipher_id: CipherId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&cipher_id, &headers, &mut conn, &CipherDeleteOptions::SoftSingle, &nt).await
|
||||
async fn delete_cipher_put(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&cipher_id, &headers, &conn, &CipherDeleteOptions::SoftSingle, &nt).await
|
||||
// soft delete
|
||||
}
|
||||
|
||||
#[put("/ciphers/<cipher_id>/delete-admin")]
|
||||
async fn delete_cipher_put_admin(
|
||||
cipher_id: CipherId,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&cipher_id, &headers, &mut conn, &CipherDeleteOptions::SoftSingle, &nt).await
|
||||
async fn delete_cipher_put_admin(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&cipher_id, &headers, &conn, &CipherDeleteOptions::SoftSingle, &nt).await
|
||||
// soft delete
|
||||
}
|
||||
|
||||
#[delete("/ciphers/<cipher_id>")]
|
||||
async fn delete_cipher(cipher_id: CipherId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&cipher_id, &headers, &mut conn, &CipherDeleteOptions::HardSingle, &nt).await
|
||||
async fn delete_cipher(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&cipher_id, &headers, &conn, &CipherDeleteOptions::HardSingle, &nt).await
|
||||
// permanent delete
|
||||
}
|
||||
|
||||
#[delete("/ciphers/<cipher_id>/admin")]
|
||||
async fn delete_cipher_admin(cipher_id: CipherId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&cipher_id, &headers, &mut conn, &CipherDeleteOptions::HardSingle, &nt).await
|
||||
async fn delete_cipher_admin(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&cipher_id, &headers, &conn, &CipherDeleteOptions::HardSingle, &nt).await
|
||||
// permanent delete
|
||||
}
|
||||
|
||||
@@ -1526,38 +1510,33 @@ async fn delete_cipher_selected_put_admin(
|
||||
}
|
||||
|
||||
#[put("/ciphers/<cipher_id>/restore")]
|
||||
async fn restore_cipher_put(cipher_id: CipherId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
_restore_cipher_by_uuid(&cipher_id, &headers, false, &mut conn, &nt).await
|
||||
async fn restore_cipher_put(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
_restore_cipher_by_uuid(&cipher_id, &headers, false, &conn, &nt).await
|
||||
}
|
||||
|
||||
#[put("/ciphers/<cipher_id>/restore-admin")]
|
||||
async fn restore_cipher_put_admin(
|
||||
cipher_id: CipherId,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
_restore_cipher_by_uuid(&cipher_id, &headers, false, &mut conn, &nt).await
|
||||
async fn restore_cipher_put_admin(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
_restore_cipher_by_uuid(&cipher_id, &headers, false, &conn, &nt).await
|
||||
}
|
||||
|
||||
#[put("/ciphers/restore-admin", data = "<data>")]
|
||||
async fn restore_cipher_selected_admin(
|
||||
data: Json<CipherIdsData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
_restore_multiple_ciphers(data, &headers, &mut conn, &nt).await
|
||||
_restore_multiple_ciphers(data, &headers, &conn, &nt).await
|
||||
}
|
||||
|
||||
#[put("/ciphers/restore", data = "<data>")]
|
||||
async fn restore_cipher_selected(
|
||||
data: Json<CipherIdsData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
_restore_multiple_ciphers(data, &headers, &mut conn, &nt).await
|
||||
_restore_multiple_ciphers(data, &headers, &conn, &nt).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -1571,14 +1550,14 @@ struct MoveCipherData {
|
||||
async fn move_cipher_selected(
|
||||
data: Json<MoveCipherData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
let data = data.into_inner();
|
||||
let user_id = &headers.user.uuid;
|
||||
|
||||
if let Some(ref folder_id) = data.folder_id {
|
||||
if Folder::find_by_uuid_and_user(folder_id, user_id, &mut conn).await.is_none() {
|
||||
if Folder::find_by_uuid_and_user(folder_id, user_id, &conn).await.is_none() {
|
||||
err!("Invalid folder", "Folder does not exist or belongs to another user");
|
||||
}
|
||||
}
|
||||
@@ -1588,10 +1567,10 @@ async fn move_cipher_selected(
|
||||
|
||||
// TODO: Convert this to use a single query (or at least less) to update all items
|
||||
// Find all ciphers a user has access to, all others will be ignored
|
||||
let accessible_ciphers = Cipher::find_by_user_and_ciphers(user_id, &data.ids, &mut conn).await;
|
||||
let accessible_ciphers = Cipher::find_by_user_and_ciphers(user_id, &data.ids, &conn).await;
|
||||
let accessible_ciphers_count = accessible_ciphers.len();
|
||||
for cipher in accessible_ciphers {
|
||||
cipher.move_to_folder(data.folder_id.clone(), user_id, &mut conn).await?;
|
||||
cipher.move_to_folder(data.folder_id.clone(), user_id, &conn).await?;
|
||||
if cipher_count == 1 {
|
||||
single_cipher = Some(cipher);
|
||||
}
|
||||
@@ -1604,12 +1583,12 @@ async fn move_cipher_selected(
|
||||
std::slice::from_ref(user_id),
|
||||
&headers.device,
|
||||
None,
|
||||
&mut conn,
|
||||
&conn,
|
||||
)
|
||||
.await;
|
||||
} else {
|
||||
// Multi move actions do not send out a push for each cipher, we need to send a general sync here
|
||||
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &mut conn).await;
|
||||
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &conn).await;
|
||||
}
|
||||
|
||||
if cipher_count != accessible_ciphers_count {
|
||||
@@ -1642,23 +1621,23 @@ async fn delete_all(
|
||||
organization: Option<OrganizationIdData>,
|
||||
data: Json<PasswordOrOtpData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
let data: PasswordOrOtpData = data.into_inner();
|
||||
let mut user = headers.user;
|
||||
|
||||
data.validate(&user, true, &mut conn).await?;
|
||||
data.validate(&user, true, &conn).await?;
|
||||
|
||||
match organization {
|
||||
Some(org_data) => {
|
||||
// Organization ID in query params, purging organization vault
|
||||
match Membership::find_by_user_and_org(&user.uuid, &org_data.org_id, &mut conn).await {
|
||||
match Membership::find_by_user_and_org(&user.uuid, &org_data.org_id, &conn).await {
|
||||
None => err!("You don't have permission to purge the organization vault"),
|
||||
Some(member) => {
|
||||
if member.atype == MembershipType::Owner {
|
||||
Cipher::delete_all_by_organization(&org_data.org_id, &mut conn).await?;
|
||||
nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &mut conn).await;
|
||||
Cipher::delete_all_by_organization(&org_data.org_id, &conn).await?;
|
||||
nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &conn).await;
|
||||
|
||||
log_event(
|
||||
EventType::OrganizationPurgedVault as i32,
|
||||
@@ -1667,7 +1646,7 @@ async fn delete_all(
|
||||
&user.uuid,
|
||||
headers.device.atype,
|
||||
&headers.ip.ip,
|
||||
&mut conn,
|
||||
&conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
@@ -1681,17 +1660,17 @@ async fn delete_all(
|
||||
None => {
|
||||
// No organization ID in query params, purging user vault
|
||||
// Delete ciphers and their attachments
|
||||
for cipher in Cipher::find_owned_by_user(&user.uuid, &mut conn).await {
|
||||
cipher.delete(&mut conn).await?;
|
||||
for cipher in Cipher::find_owned_by_user(&user.uuid, &conn).await {
|
||||
cipher.delete(&conn).await?;
|
||||
}
|
||||
|
||||
// Delete folders
|
||||
for f in Folder::find_by_user(&user.uuid, &mut conn).await {
|
||||
f.delete(&mut conn).await?;
|
||||
for f in Folder::find_by_user(&user.uuid, &conn).await {
|
||||
f.delete(&conn).await?;
|
||||
}
|
||||
|
||||
user.update_revision(&mut conn).await?;
|
||||
nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &mut conn).await;
|
||||
user.update_revision(&conn).await?;
|
||||
nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &conn).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1709,7 +1688,7 @@ pub enum CipherDeleteOptions {
|
||||
async fn _delete_cipher_by_uuid(
|
||||
cipher_id: &CipherId,
|
||||
headers: &Headers,
|
||||
conn: &mut DbConn,
|
||||
conn: &DbConn,
|
||||
delete_options: &CipherDeleteOptions,
|
||||
nt: &Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
@@ -1775,20 +1754,20 @@ struct CipherIdsData {
|
||||
async fn _delete_multiple_ciphers(
|
||||
data: Json<CipherIdsData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
conn: DbConn,
|
||||
delete_options: CipherDeleteOptions,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
let data = data.into_inner();
|
||||
|
||||
for cipher_id in data.ids {
|
||||
if let error @ Err(_) = _delete_cipher_by_uuid(&cipher_id, &headers, &mut conn, &delete_options, &nt).await {
|
||||
if let error @ Err(_) = _delete_cipher_by_uuid(&cipher_id, &headers, &conn, &delete_options, &nt).await {
|
||||
return error;
|
||||
};
|
||||
}
|
||||
|
||||
// Multi delete actions do not send out a push for each cipher, we need to send a general sync here
|
||||
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &mut conn).await;
|
||||
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &conn).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1797,7 +1776,7 @@ async fn _restore_cipher_by_uuid(
|
||||
cipher_id: &CipherId,
|
||||
headers: &Headers,
|
||||
multi_restore: bool,
|
||||
conn: &mut DbConn,
|
||||
conn: &DbConn,
|
||||
nt: &Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let Some(mut cipher) = Cipher::find_by_uuid(cipher_id, conn).await else {
|
||||
@@ -1842,7 +1821,7 @@ async fn _restore_cipher_by_uuid(
|
||||
async fn _restore_multiple_ciphers(
|
||||
data: Json<CipherIdsData>,
|
||||
headers: &Headers,
|
||||
conn: &mut DbConn,
|
||||
conn: &DbConn,
|
||||
nt: &Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let data = data.into_inner();
|
||||
@@ -1869,7 +1848,7 @@ async fn _delete_cipher_attachment_by_id(
|
||||
cipher_id: &CipherId,
|
||||
attachment_id: &AttachmentId,
|
||||
headers: &Headers,
|
||||
conn: &mut DbConn,
|
||||
conn: &DbConn,
|
||||
nt: &Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let Some(attachment) = Attachment::find_by_id(attachment_id, conn).await else {
|
||||
@@ -1938,7 +1917,7 @@ pub enum CipherSyncType {
|
||||
}
|
||||
|
||||
impl CipherSyncData {
|
||||
pub async fn new(user_id: &UserId, sync_type: CipherSyncType, conn: &mut DbConn) -> Self {
|
||||
pub async fn new(user_id: &UserId, sync_type: CipherSyncType, conn: &DbConn) -> Self {
|
||||
let cipher_folders: HashMap<CipherId, FolderId>;
|
||||
let cipher_favorites: HashSet<CipherId>;
|
||||
match sync_type {
|
||||
|
||||
Reference in New Issue
Block a user