Some small Labrinth refactors and fixes (#3698)
* chore(labrinth): fix typos, simplify out `remove_duplicates` func * fix(labrinth): implement `capitalize_first` so that it can't panic on wide chars * chore(labrinth): refactor out unneeded clone highlighted by nightly Clippy lints * chore(labrinth): simplify `capitalize_first` implementation * fix(labrinth): preserve ordering when deduplicating project field values This addresses an unintended behavior change on 157647faf2778c74096e624aeef9cdb79539489c. * fix(labrinth/tests): make `index_swaps` test run successfully I wonder why we don't run these more often... * refactor: rename `.env.example` files to `.env.local`, make local envs more consistent between frontend and backend * chore(labrinth/.env.local): proper email verif. and password reset paths
This commit is contained in:
parent
be37f077d3
commit
a9cfc37aac
@ -7,7 +7,7 @@ This project is part of our [monorepo](https://github.com/modrinth/code). You ca
|
|||||||
|
|
||||||
[labrinth] is the Rust-based backend serving Modrinth's API with the help of the [Actix](https://actix.rs) framework. To get started with a labrinth instance, install docker, docker-compose (which comes with Docker), and [Rust]. The initial startup can be done simply with the command `docker-compose up`, or with `docker compose up` (Compose V2 and later). That will deploy a PostgreSQL database on port 5432 and a MeiliSearch instance on port 7700. To run the API itself, you'll need to use the `cargo run` command, this will deploy the API on port 8000.
|
[labrinth] is the Rust-based backend serving Modrinth's API with the help of the [Actix](https://actix.rs) framework. To get started with a labrinth instance, install docker, docker-compose (which comes with Docker), and [Rust]. The initial startup can be done simply with the command `docker-compose up`, or with `docker compose up` (Compose V2 and later). That will deploy a PostgreSQL database on port 5432 and a MeiliSearch instance on port 7700. To run the API itself, you'll need to use the `cargo run` command, this will deploy the API on port 8000.
|
||||||
|
|
||||||
To get a basic configuration, copy the `.env.example` file to `.env`. Now, you'll have to install the sqlx CLI, which can be done with cargo:
|
To get a basic configuration, copy the `.env.local` file to `.env`. Now, you'll have to install the sqlx CLI, which can be done with cargo:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cargo install --git https://github.com/launchbadge/sqlx sqlx-cli --no-default-features --features postgres,rustls
|
cargo install --git https://github.com/launchbadge/sqlx sqlx-cli --no-default-features --features postgres,rustls
|
||||||
|
@ -1,3 +0,0 @@
|
|||||||
BASE_URL=https://api.modrinth.com/v2/
|
|
||||||
BROWSER_BASE_URL=https://api.modrinth.com/v2/
|
|
||||||
PYRO_BASE_URL=https://archon.modrinth.com/
|
|
5
apps/frontend/.env.local
Normal file
5
apps/frontend/.env.local
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
BASE_URL=http://127.0.0.1:8000/v2/
|
||||||
|
BROWSER_BASE_URL=http://127.0.0.1:8000/v2/
|
||||||
|
PYRO_BASE_URL=https://staging-archon.modrinth.com
|
||||||
|
PROD_OVERRIDE=true
|
||||||
|
|
@ -2,7 +2,7 @@ DEBUG=true
|
|||||||
RUST_LOG=info,sqlx::query=warn
|
RUST_LOG=info,sqlx::query=warn
|
||||||
SENTRY_DSN=none
|
SENTRY_DSN=none
|
||||||
|
|
||||||
SITE_URL=https://modrinth.com
|
SITE_URL=http://localhost:3000
|
||||||
CDN_URL=https://staging-cdn.modrinth.com
|
CDN_URL=https://staging-cdn.modrinth.com
|
||||||
LABRINTH_ADMIN_KEY=feedbeef
|
LABRINTH_ADMIN_KEY=feedbeef
|
||||||
RATE_LIMIT_IGNORE_KEY=feedbeef
|
RATE_LIMIT_IGNORE_KEY=feedbeef
|
||||||
@ -87,8 +87,8 @@ SMTP_HOST=none
|
|||||||
SMTP_PORT=465
|
SMTP_PORT=465
|
||||||
SMTP_TLS=tls
|
SMTP_TLS=tls
|
||||||
|
|
||||||
SITE_VERIFY_EMAIL_PATH=none
|
SITE_VERIFY_EMAIL_PATH=auth/verify-email
|
||||||
SITE_RESET_PASSWORD_PATH=none
|
SITE_RESET_PASSWORD_PATH=auth/reset-password
|
||||||
SITE_BILLING_PATH=none
|
SITE_BILLING_PATH=none
|
||||||
|
|
||||||
SENDY_URL=none
|
SENDY_URL=none
|
@ -1,4 +1,5 @@
|
|||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::HashMap;
|
||||||
|
use std::mem;
|
||||||
|
|
||||||
use crate::database::models::loader_fields::VersionField;
|
use crate::database::models::loader_fields::VersionField;
|
||||||
use crate::database::models::project_item::{LinkUrl, ProjectQueryResult};
|
use crate::database::models::project_item::{LinkUrl, ProjectQueryResult};
|
||||||
@ -8,6 +9,7 @@ use crate::models::ids::{
|
|||||||
};
|
};
|
||||||
use ariadne::ids::UserId;
|
use ariadne::ids::UserId;
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
|
use itertools::Itertools;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use validator::Validate;
|
use validator::Validate;
|
||||||
|
|
||||||
@ -95,19 +97,6 @@ pub struct Project {
|
|||||||
pub fields: HashMap<String, Vec<serde_json::Value>>,
|
pub fields: HashMap<String, Vec<serde_json::Value>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn remove_duplicates(values: Vec<serde_json::Value>) -> Vec<serde_json::Value> {
|
|
||||||
let mut seen = HashSet::new();
|
|
||||||
values
|
|
||||||
.into_iter()
|
|
||||||
.filter(|value| {
|
|
||||||
// Convert the JSON value to a string for comparison
|
|
||||||
let as_string = value.to_string();
|
|
||||||
// Check if the string is already in the set
|
|
||||||
seen.insert(as_string)
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is a helper function to convert a list of VersionFields into a HashMap of field name to vecs of values
|
// This is a helper function to convert a list of VersionFields into a HashMap of field name to vecs of values
|
||||||
// This allows for removal of duplicates
|
// This allows for removal of duplicates
|
||||||
pub fn from_duplicate_version_fields(
|
pub fn from_duplicate_version_fields(
|
||||||
@ -132,9 +121,9 @@ pub fn from_duplicate_version_fields(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove duplicates by converting to string and back
|
// Remove duplicates
|
||||||
for (_, v) in fields.iter_mut() {
|
for (_, v) in fields.iter_mut() {
|
||||||
*v = remove_duplicates(v.clone());
|
*v = mem::take(v).into_iter().unique().collect_vec();
|
||||||
}
|
}
|
||||||
fields
|
fields
|
||||||
}
|
}
|
||||||
@ -624,7 +613,7 @@ pub struct Version {
|
|||||||
pub downloads: u32,
|
pub downloads: u32,
|
||||||
/// The type of the release - `Alpha`, `Beta`, or `Release`.
|
/// The type of the release - `Alpha`, `Beta`, or `Release`.
|
||||||
pub version_type: VersionType,
|
pub version_type: VersionType,
|
||||||
/// The status of tne version
|
/// The status of the version
|
||||||
pub status: VersionStatus,
|
pub status: VersionStatus,
|
||||||
/// The requested status of the version (used for scheduling)
|
/// The requested status of the version (used for scheduling)
|
||||||
pub requested_status: Option<VersionStatus>,
|
pub requested_status: Option<VersionStatus>,
|
||||||
@ -880,7 +869,7 @@ impl std::fmt::Display for DependencyType {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl DependencyType {
|
impl DependencyType {
|
||||||
// These are constant, so this can remove unneccessary allocations (`to_string`)
|
// These are constant, so this can remove unnecessary allocations (`to_string`)
|
||||||
pub fn as_str(&self) -> &'static str {
|
pub fn as_str(&self) -> &'static str {
|
||||||
match self {
|
match self {
|
||||||
DependencyType::Required => "required",
|
DependencyType::Required => "required",
|
||||||
|
@ -264,11 +264,11 @@ pub fn convert_side_types_v2_bools(
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn capitalize_first(input: &str) -> String {
|
pub fn capitalize_first(input: &str) -> String {
|
||||||
let mut result = input.to_owned();
|
input
|
||||||
if let Some(first_char) = result.get_mut(0..1) {
|
.chars()
|
||||||
first_char.make_ascii_uppercase();
|
.enumerate()
|
||||||
}
|
.map(|(i, c)| if i == 0 { c.to_ascii_uppercase() } else { c })
|
||||||
result
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
@ -52,10 +52,9 @@ pub async fn get_version_from_hash(
|
|||||||
.map(|x| x.1)
|
.map(|x| x.1)
|
||||||
.ok();
|
.ok();
|
||||||
let hash = info.into_inner().0.to_lowercase();
|
let hash = info.into_inner().0.to_lowercase();
|
||||||
let algorithm = hash_query
|
let algorithm = hash_query.algorithm.clone().unwrap_or_else(|| {
|
||||||
.algorithm
|
default_algorithm_from_hashes(std::slice::from_ref(&hash))
|
||||||
.clone()
|
});
|
||||||
.unwrap_or_else(|| default_algorithm_from_hashes(&[hash.clone()]));
|
|
||||||
let file = database::models::DBVersion::get_file_from_hash(
|
let file = database::models::DBVersion::get_file_from_hash(
|
||||||
algorithm,
|
algorithm,
|
||||||
hash,
|
hash,
|
||||||
@ -140,10 +139,9 @@ pub async fn get_update_from_hash(
|
|||||||
.ok();
|
.ok();
|
||||||
let hash = info.into_inner().0.to_lowercase();
|
let hash = info.into_inner().0.to_lowercase();
|
||||||
if let Some(file) = database::models::DBVersion::get_file_from_hash(
|
if let Some(file) = database::models::DBVersion::get_file_from_hash(
|
||||||
hash_query
|
hash_query.algorithm.clone().unwrap_or_else(|| {
|
||||||
.algorithm
|
default_algorithm_from_hashes(std::slice::from_ref(&hash))
|
||||||
.clone()
|
}),
|
||||||
.unwrap_or_else(|| default_algorithm_from_hashes(&[hash.clone()])),
|
|
||||||
hash,
|
hash,
|
||||||
hash_query.version_id.map(|x| x.into()),
|
hash_query.version_id.map(|x| x.into()),
|
||||||
&**pool,
|
&**pool,
|
||||||
@ -577,10 +575,9 @@ pub async fn delete_file(
|
|||||||
.1;
|
.1;
|
||||||
|
|
||||||
let hash = info.into_inner().0.to_lowercase();
|
let hash = info.into_inner().0.to_lowercase();
|
||||||
let algorithm = hash_query
|
let algorithm = hash_query.algorithm.clone().unwrap_or_else(|| {
|
||||||
.algorithm
|
default_algorithm_from_hashes(std::slice::from_ref(&hash))
|
||||||
.clone()
|
});
|
||||||
.unwrap_or_else(|| default_algorithm_from_hashes(&[hash.clone()]));
|
|
||||||
let file = database::models::DBVersion::get_file_from_hash(
|
let file = database::models::DBVersion::get_file_from_hash(
|
||||||
algorithm.clone(),
|
algorithm.clone(),
|
||||||
hash,
|
hash,
|
||||||
@ -709,10 +706,9 @@ pub async fn download_version(
|
|||||||
.ok();
|
.ok();
|
||||||
|
|
||||||
let hash = info.into_inner().0.to_lowercase();
|
let hash = info.into_inner().0.to_lowercase();
|
||||||
let algorithm = hash_query
|
let algorithm = hash_query.algorithm.clone().unwrap_or_else(|| {
|
||||||
.algorithm
|
default_algorithm_from_hashes(std::slice::from_ref(&hash))
|
||||||
.clone()
|
});
|
||||||
.unwrap_or_else(|| default_algorithm_from_hashes(&[hash.clone()]));
|
|
||||||
let file = database::models::DBVersion::get_file_from_hash(
|
let file = database::models::DBVersion::get_file_from_hash(
|
||||||
algorithm.clone(),
|
algorithm.clone(),
|
||||||
hash,
|
hash,
|
||||||
|
@ -151,7 +151,7 @@ async fn index_swaps() {
|
|||||||
test_env.api.remove_project("alpha", USER_USER_PAT).await;
|
test_env.api.remove_project("alpha", USER_USER_PAT).await;
|
||||||
assert_status!(&resp, StatusCode::NO_CONTENT);
|
assert_status!(&resp, StatusCode::NO_CONTENT);
|
||||||
|
|
||||||
// We should not get any results, because the project has been deleted
|
// Deletions should not be indexed immediately
|
||||||
let projects = test_env
|
let projects = test_env
|
||||||
.api
|
.api
|
||||||
.search_deserialized(
|
.search_deserialized(
|
||||||
@ -160,7 +160,8 @@ async fn index_swaps() {
|
|||||||
USER_USER_PAT,
|
USER_USER_PAT,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
assert_eq!(projects.total_hits, 0);
|
assert_eq!(projects.total_hits, 1);
|
||||||
|
assert!(projects.hits[0].slug.as_ref().unwrap().contains("alpha"));
|
||||||
|
|
||||||
// But when we reindex, it should be gone
|
// But when we reindex, it should be gone
|
||||||
let resp = test_env.api.reset_search_index().await;
|
let resp = test_env.api.reset_search_index().await;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user