docs
Some checks failed
CI / test (push) Failing after 4m0s
CI / lint (push) Failing after 4s
CI / deploy (push) Has been skipped

This commit is contained in:
2025-08-02 00:18:09 +03:00
parent adda2b30f9
commit ea92a376ed
32 changed files with 3360 additions and 280 deletions

View File

@@ -1,22 +1,22 @@
use crate::s3_utils::get_s3_filelist;
use actix_web::error::ErrorInternalServerError;
use aws_config::BehaviorVersion;
use aws_sdk_s3::{config::Credentials, Client as S3Client};
use log::warn;
use redis::{aio::MultiplexedConnection, AsyncCommands, Client as RedisClient};
use std::env;
use log::warn;
use crate::s3_utils::get_s3_filelist;
#[derive(Clone)]
pub struct AppState {
pub redis: MultiplexedConnection,
pub storj_client: S3Client,
pub aws_client: S3Client,
pub bucket: String
pub bucket: String,
}
const PATH_MAPPING_KEY: &str = "filepath_mapping"; // Ключ для хранения маппинга путей
const WEEK_SECONDS: u64 = 604800;
// Убираем TTL для квоты - она должна быть постоянной на пользователя
const QUOTA_TTL: u64 = 0; // 0 означает отсутствие TTL
impl AppState {
/// Инициализация нового состояния приложения.
@@ -40,7 +40,7 @@ impl AppState {
let aws_access_key = env::var("AWS_ACCESS_KEY").expect("AWS_ACCESS_KEY must be set");
let aws_secret_key = env::var("AWS_SECRET_KEY").expect("AWS_SECRET_KEY must be set");
let aws_endpoint =
env::var("AWS_END_POINT").unwrap_or_else(|_| "https://s3.amazonaws.com".to_string());
env::var("AWS_END_POINT").unwrap_or_else(|_| "https://s3.amazonaws.com".to_string());
// Конфигурируем клиент S3 для Storj
let storj_config = aws_config::defaults(BehaviorVersion::latest())
@@ -71,14 +71,14 @@ impl AppState {
))
.load()
.await;
let aws_client = S3Client::new(&aws_config);
let app_state = AppState {
redis: redis_connection,
storj_client,
aws_client,
bucket
bucket,
};
// Кэшируем список файлов из AWS при старте приложения
@@ -91,7 +91,7 @@ impl AppState {
pub async fn cache_filelist(&self) {
warn!("caching AWS filelist...");
let mut redis = self.redis.clone();
// Запрашиваем список файлов из Storj S3
let filelist = get_s3_filelist(&self.aws_client, &self.bucket).await;
@@ -105,7 +105,7 @@ impl AppState {
warn!("cached {} files", filelist.len());
}
/// Получает путь из ключа (имени файла) в Redis.
pub async fn get_path(&self, filename: &str) -> Result<Option<String>, actix_web::Error> {
let mut redis = self.redis.clone();
@@ -133,9 +133,9 @@ impl AppState {
let quota: u64 = redis.get(&quota_key).await.unwrap_or(0);
if quota == 0 {
// Если квота не найдена, устанавливаем её в 0 байт и задаем TTL на одну неделю
// Если квота не найдена, устанавливаем её в 0 байт без TTL (постоянная квота)
redis
.set_ex::<&str, u64, ()>(&quota_key, 0, WEEK_SECONDS)
.set::<&str, u64, ()>(&quota_key, 0)
.await
.map_err(|_| {
ErrorInternalServerError("Failed to set initial user quota in Redis")
@@ -161,10 +161,10 @@ impl AppState {
ErrorInternalServerError("Failed to check if user quota exists in Redis")
})?;
// Если ключ не существует, создаем его с начальным значением и устанавливаем TTL
// Если ключ не существует, создаем его с начальным значением без TTL
if !exists {
redis
.set_ex::<_, u64, ()>(&quota_key, bytes, WEEK_SECONDS)
.set::<_, u64, ()>(&quota_key, bytes)
.await
.map_err(|_| {
ErrorInternalServerError("Failed to set initial user quota in Redis")
@@ -180,4 +180,42 @@ impl AppState {
Ok(new_quota)
}
/// Устанавливает квоту пользователя в байтах (позволяет увеличить или уменьшить)
pub async fn set_user_quota(&self, user_id: &str, bytes: u64) -> Result<u64, actix_web::Error> {
let mut redis = self.redis.clone();
let quota_key = format!("quota:{}", user_id);
// Устанавливаем новое значение квоты
redis
.set::<_, u64, ()>(&quota_key, bytes)
.await
.map_err(|_| ErrorInternalServerError("Failed to set user quota in Redis"))?;
Ok(bytes)
}
/// Увеличивает квоту пользователя на указанное количество байт
pub async fn increase_user_quota(
&self,
user_id: &str,
additional_bytes: u64,
) -> Result<u64, actix_web::Error> {
let mut redis = self.redis.clone();
let quota_key = format!("quota:{}", user_id);
// Получаем текущую квоту
let current_quota: u64 = redis.get(&quota_key).await.unwrap_or(0);
// Вычисляем новую квоту
let new_quota = current_quota + additional_bytes;
// Устанавливаем новое значение
redis
.set::<_, u64, ()>(&quota_key, new_quota)
.await
.map_err(|_| ErrorInternalServerError("Failed to increase user quota in Redis"))?;
Ok(new_quota)
}
}

View File

@@ -30,7 +30,7 @@ struct Claims {
/// Получает айди пользователя из токена в заголовке
pub async fn get_id_by_token(token: &str) -> Result<String, Box<dyn Error>> {
let auth_api_base = env::var("AUTH_URL")?;
let auth_api_base = env::var("CORE_URL")?;
let query_name = "validate_jwt_token";
let operation = "ValidateToken";
let mut headers = HeaderMap::new();

View File

@@ -1,9 +1,9 @@
use reqwest::Client as HTTPClient;
use reqwest::Client as HTTPClient;
use serde::Deserialize;
use serde_json::json;
use std::{collections::HashMap, env, error::Error};
// Структура для десериализации ответа от сервиса аутентификации
#[derive(Deserialize)]
struct CoreResponse {
@@ -46,11 +46,7 @@ pub async fn get_shout_by_id(shout_id: i32) -> Result<Shout, Box<dyn Error>> {
});
let client = HTTPClient::new();
let response = client
.post(&api_base)
.json(&gql)
.send()
.await?;
let response = client.post(&api_base).json(&gql).send().await?;
if response.status().is_success() {
let core_response: CoreResponse = response.json().await?;
@@ -67,4 +63,4 @@ pub async fn get_shout_by_id(shout_id: i32) -> Result<Shout, Box<dyn Error>> {
response.status().to_string(),
)))
}
}
}

View File

@@ -1,21 +1,21 @@
mod proxy;
mod upload;
mod quota;
mod serve_file;
mod upload;
pub use proxy::proxy_handler;
pub use quota::{get_quota_handler, increase_quota_handler, set_quota_handler};
pub use upload::upload_handler;
// Лимит квоты на пользователя: 2 ГБ в неделю
pub const MAX_WEEK_BYTES: u64 = 2 * 1024 * 1024 * 1024;
// Общий лимит квоты на пользователя: 5 ГБ
pub const MAX_USER_QUOTA_BYTES: u64 = 5 * 1024 * 1024 * 1024;
use actix_web::{HttpResponse, Result, HttpRequest};
use actix_web::{HttpRequest, HttpResponse, Result};
/// Обработчик для корневого пути /
pub async fn root_handler(req: HttpRequest) -> Result<HttpResponse> {
match req.method().as_str() {
"GET" => Ok(HttpResponse::Ok()
.content_type("text/plain")
.body("ok")),
_ => Ok(HttpResponse::MethodNotAllowed().finish())
"GET" => Ok(HttpResponse::Ok().content_type("text/plain").body("ok")),
_ => Ok(HttpResponse::MethodNotAllowed().finish()),
}
}

View File

@@ -4,9 +4,9 @@ use log::{error, warn};
use crate::app_state::AppState;
use crate::handlers::serve_file::serve_file;
use crate::lookup::{find_file_by_pattern, get_mime_type};
use crate::s3_utils::{check_file_exists, load_file_from_s3, upload_to_s3};
use crate::thumbnail::{find_closest_width, parse_file_path, thumbdata_save};
use crate::lookup::{find_file_by_pattern, get_mime_type};
/// Обработчик для скачивания файла и генерации миниатюры, если она недоступна.
pub async fn proxy_handler(
@@ -56,14 +56,22 @@ pub async fn proxy_handler(
warn!("content_type: {}", content_type);
let shout_id = match req.query_string().contains("s=") {
true => req.query_string().split("s=").collect::<Vec<&str>>().pop().unwrap_or(""),
false => ""
true => req
.query_string()
.split("s=")
.collect::<Vec<&str>>()
.pop()
.unwrap_or(""),
false => "",
};
return match state.get_path(&filekey).await {
Ok(Some(stored_path)) => {
warn!("Found stored path in DB: {}", stored_path);
warn!("Checking Storj path - bucket: {}, path: {}", state.bucket, stored_path);
warn!(
"Checking Storj path - bucket: {}, path: {}",
state.bucket, stored_path
);
if check_file_exists(&state.storj_client, &state.bucket, &stored_path).await? {
warn!("File exists in Storj: {}", stored_path);
if content_type.starts_with("image") {
@@ -73,20 +81,26 @@ pub async fn proxy_handler(
serve_file(&stored_path, &state, shout_id).await
} else {
let closest: u32 = find_closest_width(requested_width as u32);
warn!("Calculated closest width: {} for requested: {}", closest, requested_width);
warn!(
"Calculated closest width: {} for requested: {}",
closest, requested_width
);
let thumb_filename = &format!("{}_{}.{}", base_filename, closest, ext);
warn!("Generated thumbnail filename: {}", thumb_filename);
// Проверяем, существует ли уже миниатюра в Storj
match check_file_exists(&state.storj_client, &state.bucket, thumb_filename).await {
match check_file_exists(&state.storj_client, &state.bucket, thumb_filename)
.await
{
Ok(true) => {
warn!("serve existed thumb file: {}", thumb_filename);
serve_file(thumb_filename, &state, shout_id).await
},
}
Ok(false) => {
// Миниатюра не существует, возвращаем оригинал и запускаем генерацию миниатюры
let original_file = serve_file(&stored_path, &state, shout_id).await?;
let original_file =
serve_file(&stored_path, &state, shout_id).await?;
// Запускаем асинхронную задачу для генерации миниатюры
let state_clone = state.clone();
let stored_path_clone = stored_path.clone();
@@ -94,9 +108,22 @@ pub async fn proxy_handler(
let content_type_clone = content_type.to_string();
actix_web::rt::spawn(async move {
if let Ok(filedata) = load_file_from_s3(&state_clone.storj_client, &state_clone.bucket, &stored_path_clone).await {
if let Ok(filedata) = load_file_from_s3(
&state_clone.storj_client,
&state_clone.bucket,
&stored_path_clone,
)
.await
{
warn!("generate new thumb files: {}", stored_path_clone);
if let Err(e) = thumbdata_save(filedata, &state_clone, &filekey_clone, content_type_clone).await {
if let Err(e) = thumbdata_save(
filedata,
&state_clone,
&filekey_clone,
content_type_clone,
)
.await
{
error!("Failed to generate thumbnail: {}", e);
}
}
@@ -115,7 +142,10 @@ pub async fn proxy_handler(
serve_file(&stored_path, &state, shout_id).await
}
} else {
warn!("Attempting to load from AWS - bucket: {}, path: {}", state.bucket, stored_path);
warn!(
"Attempting to load from AWS - bucket: {}, path: {}",
state.bucket, stored_path
);
// Определяем тип медиа из content_type
let media_type = content_type.split("/").next().unwrap_or("image");
@@ -124,7 +154,7 @@ pub async fn proxy_handler(
let paths_lower = vec![
stored_path.clone(),
// format!("production/{}", stored_path),
format!("production/{}/{}", media_type, stored_path)
format!("production/{}/{}", media_type, stored_path),
];
// Создаем те же пути, но с оригинальным регистром расширения
@@ -133,7 +163,7 @@ pub async fn proxy_handler(
let paths_orig = vec![
orig_stored_path.clone(),
// format!("production/{}", orig_stored_path),
format!("production/{}/{}", media_type, orig_stored_path)
format!("production/{}/{}", media_type, orig_stored_path),
];
// Объединяем все пути для проверки
@@ -143,22 +173,29 @@ pub async fn proxy_handler(
warn!("Trying AWS path: {}", path);
match load_file_from_s3(&state.aws_client, &state.bucket, &path).await {
Ok(filedata) => {
warn!("Successfully loaded file from AWS, size: {} bytes", filedata.len());
warn!(
"Successfully loaded file from AWS, size: {} bytes",
filedata.len()
);
warn!("Attempting to upload to Storj with key: {}", filekey);
if let Err(e) = upload_to_s3(
&state.storj_client,
&state.bucket,
&filekey,
filedata.clone(),
&content_type,
).await {
)
.await
{
error!("Failed to upload to Storj: {} - Error: {}", filekey, e);
} else {
warn!("Successfully uploaded to Storj: {}", filekey);
}
return Ok(HttpResponse::Ok().content_type(content_type).body(filedata));
return Ok(HttpResponse::Ok()
.content_type(content_type)
.body(filedata));
}
Err(err) => {
warn!("Failed to load from AWS path {}: {:?}", path, err);
@@ -174,18 +211,23 @@ pub async fn proxy_handler(
Ok(None) => {
warn!("No stored path found in DB for: {}", filekey);
let ct_parts = content_type.split("/").collect::<Vec<&str>>();
// Создаем два варианта пути - с оригинальным расширением и с нижним регистром
let filepath_lower = format!("production/{}/{}.{}", ct_parts[0], base_filename, ext);
let filepath_orig = format!("production/{}/{}.{}", ct_parts[0], base_filename, extension);
warn!("Looking up files with paths: {} or {} in bucket: {}",
filepath_lower, filepath_orig, state.bucket);
let filepath_orig =
format!("production/{}/{}.{}", ct_parts[0], base_filename, extension);
warn!(
"Looking up files with paths: {} or {} in bucket: {}",
filepath_lower, filepath_orig, state.bucket
);
// Проверяем существование файла с обоими вариантами расширения
let exists_in_aws_lower = check_file_exists(&state.aws_client, &state.bucket, &filepath_lower).await?;
let exists_in_aws_orig = check_file_exists(&state.aws_client, &state.bucket, &filepath_orig).await?;
let exists_in_aws_lower =
check_file_exists(&state.aws_client, &state.bucket, &filepath_lower).await?;
let exists_in_aws_orig =
check_file_exists(&state.aws_client, &state.bucket, &filepath_orig).await?;
let filepath = if exists_in_aws_orig {
filepath_orig
} else if exists_in_aws_lower {
@@ -195,15 +237,25 @@ pub async fn proxy_handler(
filepath_lower
};
let exists_in_storj = check_file_exists(&state.storj_client, &state.bucket, &filepath).await?;
let exists_in_storj =
check_file_exists(&state.storj_client, &state.bucket, &filepath).await?;
warn!("Checking existence in Storj: {}", exists_in_storj);
if exists_in_storj {
warn!("file {} exists in storj, try to generate thumbnails", filepath);
warn!(
"file {} exists in storj, try to generate thumbnails",
filepath
);
match load_file_from_s3(&state.aws_client, &state.bucket, &filepath).await {
Ok(filedata) => {
let _ = thumbdata_save(filedata.clone(), &state, &filekey, content_type.to_string()).await;
let _ = thumbdata_save(
filedata.clone(),
&state,
&filekey,
content_type.to_string(),
)
.await;
}
Err(e) => {
error!("cannot download {} from storj: {}", filekey, e);
@@ -214,16 +266,25 @@ pub async fn proxy_handler(
warn!("file {} does not exist in storj", filepath);
}
let exists_in_aws = check_file_exists(&state.aws_client, &state.bucket, &filepath).await?;
let exists_in_aws =
check_file_exists(&state.aws_client, &state.bucket, &filepath).await?;
warn!("Checking existence in AWS: {}", exists_in_aws);
if exists_in_aws {
warn!("File found in AWS, attempting to download: {}", filepath);
match load_file_from_s3(&state.aws_client, &state.bucket, &filepath).await {
Ok(filedata) => {
warn!("Successfully downloaded file from AWS, size: {} bytes", filedata.len());
let _ = thumbdata_save(filedata.clone(), &state, &filekey, content_type.to_string())
.await;
warn!(
"Successfully downloaded file from AWS, size: {} bytes",
filedata.len()
);
let _ = thumbdata_save(
filedata.clone(),
&state,
&filekey,
content_type.to_string(),
)
.await;
if let Err(e) = upload_to_s3(
&state.storj_client,
&state.bucket,
@@ -231,27 +292,31 @@ pub async fn proxy_handler(
filedata.clone(),
&content_type,
)
.await {
.await
{
warn!("cannot upload to storj: {}", e);
} else {
warn!("file {} uploaded to storj", filekey);
state.set_path(&filekey, &filepath).await;
}
Ok(HttpResponse::Ok().content_type(content_type).body(filedata))
},
}
Err(e) => {
error!("Failed to download from AWS: {} - Error: {}", filepath, e);
Err(ErrorInternalServerError(e))
},
}
}
} else {
error!("File not found in either Storj or AWS: {}", filepath);
Err(ErrorNotFound("file does not exist"))
}
},
}
Err(e) => {
error!("Database error while getting path: {} - Full error: {:?}", filekey, e);
error!(
"Database error while getting path: {} - Full error: {:?}",
filekey, e
);
Err(ErrorInternalServerError(e))
}
}
};
}

151
src/handlers/quota.rs Normal file
View File

@@ -0,0 +1,151 @@
use actix_web::{web, HttpRequest, HttpResponse, Result};
use log::{error, warn};
use serde::{Deserialize, Serialize};
use crate::app_state::AppState;
use crate::auth::get_id_by_token;
#[derive(Deserialize)]
pub struct QuotaRequest {
pub user_id: String,
pub additional_bytes: Option<u64>,
pub new_quota_bytes: Option<u64>,
}
#[derive(Serialize)]
pub struct QuotaResponse {
pub user_id: String,
pub current_quota: u64,
pub max_quota: u64,
}
/// Обработчик для получения информации о квоте пользователя
pub async fn get_quota_handler(
req: HttpRequest,
state: web::Data<AppState>,
) -> Result<HttpResponse, actix_web::Error> {
// Проверяем авторизацию
let token = req
.headers()
.get("Authorization")
.and_then(|header_value| header_value.to_str().ok());
if token.is_none() {
return Err(actix_web::error::ErrorUnauthorized("Unauthorized"));
}
let _admin_id = get_id_by_token(token.unwrap())
.await
.map_err(|_| actix_web::error::ErrorUnauthorized("Invalid token"))?;
// Получаем user_id из query параметров
let user_id = req
.query_string()
.split("user_id=")
.nth(1)
.and_then(|s| s.split('&').next())
.ok_or_else(|| actix_web::error::ErrorBadRequest("Missing user_id parameter"))?;
// Получаем текущую квоту пользователя
let current_quota = state.get_or_create_quota(user_id).await?;
let response = QuotaResponse {
user_id: user_id.to_string(),
current_quota,
max_quota: crate::handlers::MAX_USER_QUOTA_BYTES,
};
Ok(HttpResponse::Ok().json(response))
}
/// Обработчик для увеличения квоты пользователя
pub async fn increase_quota_handler(
req: HttpRequest,
quota_data: web::Json<QuotaRequest>,
state: web::Data<AppState>,
) -> Result<HttpResponse, actix_web::Error> {
// Проверяем авторизацию
let token = req
.headers()
.get("Authorization")
.and_then(|header_value| header_value.to_str().ok());
if token.is_none() {
return Err(actix_web::error::ErrorUnauthorized("Unauthorized"));
}
let _admin_id = get_id_by_token(token.unwrap())
.await
.map_err(|_| actix_web::error::ErrorUnauthorized("Invalid token"))?;
let additional_bytes = quota_data
.additional_bytes
.ok_or_else(|| actix_web::error::ErrorBadRequest("Missing additional_bytes parameter"))?;
if additional_bytes == 0 {
return Err(actix_web::error::ErrorBadRequest(
"additional_bytes must be greater than 0",
));
}
// Увеличиваем квоту пользователя
let new_quota = state
.increase_user_quota(&quota_data.user_id, additional_bytes)
.await?;
warn!(
"Increased quota for user {} by {} bytes, new total: {} bytes",
quota_data.user_id, additional_bytes, new_quota
);
let response = QuotaResponse {
user_id: quota_data.user_id.clone(),
current_quota: new_quota,
max_quota: crate::handlers::MAX_USER_QUOTA_BYTES,
};
Ok(HttpResponse::Ok().json(response))
}
/// Обработчик для установки квоты пользователя
pub async fn set_quota_handler(
req: HttpRequest,
quota_data: web::Json<QuotaRequest>,
state: web::Data<AppState>,
) -> Result<HttpResponse, actix_web::Error> {
// Проверяем авторизацию
let token = req
.headers()
.get("Authorization")
.and_then(|header_value| header_value.to_str().ok());
if token.is_none() {
return Err(actix_web::error::ErrorUnauthorized("Unauthorized"));
}
let _admin_id = get_id_by_token(token.unwrap())
.await
.map_err(|_| actix_web::error::ErrorUnauthorized("Invalid token"))?;
let new_quota_bytes = quota_data
.new_quota_bytes
.ok_or_else(|| actix_web::error::ErrorBadRequest("Missing new_quota_bytes parameter"))?;
// Устанавливаем новую квоту пользователя
let new_quota = state
.set_user_quota(&quota_data.user_id, new_quota_bytes)
.await?;
warn!(
"Set quota for user {} to {} bytes",
quota_data.user_id, new_quota
);
let response = QuotaResponse {
user_id: quota_data.user_id.clone(),
current_quota: new_quota,
max_quota: crate::handlers::MAX_USER_QUOTA_BYTES,
};
Ok(HttpResponse::Ok().json(response))
}

View File

@@ -6,7 +6,11 @@ use crate::overlay::generate_overlay;
use crate::s3_utils::check_file_exists;
/// Функция для обслуживания файла по заданному пути.
pub async fn serve_file(filepath: &str, state: &AppState, shout_id: &str) -> Result<HttpResponse, actix_web::Error> {
pub async fn serve_file(
filepath: &str,
state: &AppState,
shout_id: &str,
) -> Result<HttpResponse, actix_web::Error> {
if filepath.is_empty() {
return Err(ErrorInternalServerError("Filename is empty".to_string()));
}
@@ -14,7 +18,10 @@ pub async fn serve_file(filepath: &str, state: &AppState, shout_id: &str) -> Res
// Проверяем наличие файла в Storj S3
let exists = check_file_exists(&state.storj_client, &state.bucket, &filepath).await?;
if !exists {
return Err(ErrorInternalServerError(format!("File {} not found in Storj", filepath)));
return Err(ErrorInternalServerError(format!(
"File {} not found in Storj",
filepath
)));
}
// Получаем объект из Storj S3
@@ -25,7 +32,9 @@ pub async fn serve_file(filepath: &str, state: &AppState, shout_id: &str) -> Res
.key(filepath)
.send()
.await
.map_err(|_| ErrorInternalServerError(format!("Failed to get {} object from Storj", filepath)))?;
.map_err(|_| {
ErrorInternalServerError(format!("Failed to get {} object from Storj", filepath))
})?;
let data: aws_sdk_s3::primitives::AggregatedBytes = get_object_output
.body
@@ -35,10 +44,9 @@ pub async fn serve_file(filepath: &str, state: &AppState, shout_id: &str) -> Res
let data_bytes = match shout_id.is_empty() {
true => data.into_bytes(),
false => generate_overlay(shout_id, data.into_bytes()).await?
false => generate_overlay(shout_id, data.into_bytes()).await?,
};
let mime_type = MimeGuess::from_path(&filepath).first_or_octet_stream();
Ok(HttpResponse::Ok()

View File

@@ -4,10 +4,10 @@ use log::{error, warn};
use crate::app_state::AppState;
use crate::auth::{get_id_by_token, user_added_file};
use crate::s3_utils::{self, upload_to_s3, generate_key_with_extension};
use crate::handlers::MAX_USER_QUOTA_BYTES;
use crate::lookup::store_file_info;
use crate::s3_utils::{self, generate_key_with_extension, upload_to_s3};
use futures::TryStreamExt;
use crate::handlers::MAX_WEEK_BYTES;
// use crate::thumbnail::convert_heic_to_jpeg;
/// Обработчик для аплоада файлов.
@@ -28,7 +28,7 @@ pub async fn upload_handler(
let user_id = get_id_by_token(token.unwrap()).await?;
// Получаем текущую квоту пользователя
let this_week_amount: u64 = state.get_or_create_quota(&user_id).await.unwrap_or(0);
let current_quota: u64 = state.get_or_create_quota(&user_id).await.unwrap_or(0);
let mut body = "ok".to_string();
while let Ok(Some(field)) = payload.try_next().await {
let mut field = field;
@@ -46,7 +46,9 @@ pub async fn upload_handler(
Some(mime) => mime,
None => {
warn!("Неподдерживаемый формат файла");
return Err(actix_web::error::ErrorUnsupportedMediaType("Неподдерживаемый формат файла"));
return Err(actix_web::error::ErrorUnsupportedMediaType(
"Неподдерживаемый формат файла",
));
}
};
@@ -63,14 +65,18 @@ pub async fn upload_handler(
Some(ext) => ext,
None => {
warn!("Неподдерживаемый тип содержимого: {}", content_type);
return Err(actix_web::error::ErrorUnsupportedMediaType("Неподдерживаемый тип содержимого"));
return Err(actix_web::error::ErrorUnsupportedMediaType(
"Неподдерживаемый тип содержимого",
));
}
};
// Проверяем, что добавление файла не превышает лимит квоты
if this_week_amount + file_size > MAX_WEEK_BYTES {
warn!("Quota would exceed limit: current={}, adding={}, limit={}",
this_week_amount, file_size, MAX_WEEK_BYTES);
if current_quota + file_size > MAX_USER_QUOTA_BYTES {
warn!(
"Quota would exceed limit: current={}, adding={}, limit={}",
current_quota, file_size, MAX_USER_QUOTA_BYTES
);
return Err(actix_web::error::ErrorUnauthorized("Quota exceeded"));
}
@@ -84,27 +90,33 @@ pub async fn upload_handler(
&filename,
file_bytes,
&content_type,
).await {
)
.await
{
Ok(_) => {
warn!("file {} uploaded to storj, incrementing quota by {} bytes", filename, file_size);
warn!(
"file {} uploaded to storj, incrementing quota by {} bytes",
filename, file_size
);
if let Err(e) = state.increment_uploaded_bytes(&user_id, file_size).await {
error!("Failed to increment quota: {}", e);
return Err(e);
}
// Сохраняем информацию о файле в Redis
let mut redis = state.redis.clone();
store_file_info(&mut redis, &filename, &content_type).await?;
user_added_file(&mut redis, &user_id, &filename).await?;
// Сохраняем маппинг пути
let generated_key = generate_key_with_extension(filename.clone(), content_type.clone());
let generated_key =
generate_key_with_extension(filename.clone(), content_type.clone());
state.set_path(&filename, &generated_key).await;
if let Ok(new_quota) = state.get_or_create_quota(&user_id).await {
warn!("New quota for user {}: {} bytes", user_id, new_quota);
}
body = filename;
}
Err(e) => {

View File

@@ -1,8 +1,8 @@
use std::collections::HashMap;
use actix_web::error::ErrorInternalServerError;
use once_cell::sync::Lazy;
use redis::aio::MultiplexedConnection;
use redis::AsyncCommands;
use std::collections::HashMap;
pub static MIME_TYPES: Lazy<HashMap<&'static str, &'static str>> = Lazy::new(|| {
let mut m = HashMap::new();
@@ -36,7 +36,6 @@ pub fn get_mime_type(extension: &str) -> Option<&'static str> {
MIME_TYPES.get(extension).copied()
}
/// Ищет файл в Redis по шаблону имени
pub async fn find_file_by_pattern(
redis: &mut MultiplexedConnection,
@@ -67,4 +66,4 @@ pub async fn store_file_info(
.await
.map_err(|_| ErrorInternalServerError("Failed to store file info in Redis"))?;
Ok(())
}
}

View File

@@ -1,20 +1,27 @@
mod app_state;
mod auth;
mod lookup;
mod core;
mod handlers;
mod lookup;
mod overlay;
mod s3_utils;
mod thumbnail;
mod core;
mod overlay;
use actix_web::{middleware::Logger, web, App, HttpServer, http::header::{self, HeaderName}};
use actix_cors::Cors;
use actix_web::{
http::header::{self, HeaderName},
middleware::Logger,
web, App, HttpServer,
};
use app_state::AppState;
use handlers::{proxy_handler, upload_handler, root_handler};
use log::warn;
use tokio::task::spawn_blocking;
use std::env;
use env_logger;
use handlers::{
get_quota_handler, increase_quota_handler, proxy_handler, root_handler, set_quota_handler,
upload_handler,
};
use log::warn;
use std::env;
use tokio::task::spawn_blocking;
#[actix_web::main]
async fn main() -> std::io::Result<()> {
@@ -38,7 +45,7 @@ async fn main() -> std::io::Result<()> {
// Настройка CORS middleware
let cors = Cors::default()
.allow_any_origin() // TODO: ограничить конкретными доменами в продакшене
.allowed_methods(vec!["GET", "POST", "OPTIONS"])
.allowed_methods(vec!["GET", "POST", "PUT", "DELETE", "OPTIONS"])
.allowed_headers(vec![
header::DNT,
header::USER_AGENT,
@@ -49,10 +56,7 @@ async fn main() -> std::io::Result<()> {
header::RANGE,
header::AUTHORIZATION,
])
.expose_headers(vec![
header::CONTENT_LENGTH,
header::CONTENT_RANGE,
])
.expose_headers(vec![header::CONTENT_LENGTH, header::CONTENT_RANGE])
.supports_credentials()
.max_age(1728000); // 20 дней
@@ -62,6 +66,9 @@ async fn main() -> std::io::Result<()> {
.wrap(Logger::default())
.route("/", web::get().to(root_handler))
.route("/", web::post().to(upload_handler))
.route("/quota", web::get().to(get_quota_handler))
.route("/quota/increase", web::post().to(increase_quota_handler))
.route("/quota/set", web::post().to(set_quota_handler))
.route("/{path:.*}", web::get().to(proxy_handler))
})
.bind(addr)?

View File

@@ -1,14 +1,17 @@
use std::{error::Error, io::Cursor};
use actix_web::web::Bytes;
use log::warn;
use image::Rgba;
use imageproc::drawing::{draw_text_mut, draw_filled_rect_mut};
use imageproc::rect::Rect;
use ab_glyph::{Font, FontArc, PxScale};
use actix_web::web::Bytes;
use image::Rgba;
use imageproc::drawing::{draw_filled_rect_mut, draw_text_mut};
use imageproc::rect::Rect;
use log::warn;
use std::{error::Error, io::Cursor};
use crate::core::get_shout_by_id;
pub async fn generate_overlay<'a>(shout_id: &'a str, filedata: Bytes) -> Result<Bytes, Box<dyn Error>> {
pub async fn generate_overlay<'a>(
shout_id: &'a str,
filedata: Bytes,
) -> Result<Bytes, Box<dyn Error>> {
// Получаем shout из GraphQL
let shout_id_int = shout_id.parse::<i32>().unwrap_or(0);
match get_shout_by_id(shout_id_int).await {
@@ -84,7 +87,7 @@ pub async fn generate_overlay<'a>(shout_id: &'a str, filedata: Bytes) -> Result<
img.write_to(&mut Cursor::new(&mut buffer), image::ImageFormat::Png)?;
Ok(Bytes::from(buffer))
},
}
Err(e) => {
warn!("Error getting shout: {}", e);
Ok(filedata)

View File

@@ -1,8 +1,8 @@
use actix_web::error::ErrorInternalServerError;
use aws_sdk_s3::{error::SdkError, primitives::ByteStream, Client as S3Client};
use infer::get;
use mime_guess::mime;
use std::str::FromStr;
use infer::get;
/// Загружает файл в S3 хранилище.
pub async fn upload_to_s3(
@@ -32,7 +32,13 @@ pub async fn check_file_exists(
bucket: &str,
filepath: &str,
) -> Result<bool, actix_web::Error> {
match s3_client.head_object().bucket(bucket).key(filepath).send().await {
match s3_client
.head_object()
.bucket(bucket)
.key(filepath)
.send()
.await
{
Ok(_) => Ok(true), // Файл найден
Err(SdkError::ServiceError(service_error)) if service_error.err().is_not_found() => {
Ok(false) // Файл не найден
@@ -117,6 +123,6 @@ pub fn get_extension_from_mime(mime_type: &str) -> Option<&str> {
"image/webp" => Some("webp"),
"image/heic" => Some("heic"),
"image/tiff" => Some("tiff"),
_ => None
_ => None,
}
}
}

View File

@@ -60,7 +60,8 @@ pub fn parse_file_path(requested_path: &str) -> (String, u32, String) {
// Проверка на старую ширину в путях, начинающихся с "unsafe"
if path.starts_with("unsafe") && width == 0 {
if path_parts.len() >= 2 {
if let Some(old_width_str) = path_parts.get(1) { // Получаем второй элемент
if let Some(old_width_str) = path_parts.get(1) {
// Получаем второй элемент
let old_width_str = old_width_str.trim_end_matches('x');
if let Ok(w) = old_width_str.parse::<u32>() {
width = w;
@@ -78,7 +79,7 @@ pub fn parse_file_path(requested_path: &str) -> (String, u32, String) {
/// Это позволяет поддерживать различные форматы изображений без необходимости заранее предугадывать их.
pub async fn generate_thumbnails(
image: &DynamicImage,
format: ImageFormat
format: ImageFormat,
) -> Result<HashMap<u32, Vec<u8>>, actix_web::Error> {
let mut thumbnails = HashMap::new();
@@ -107,16 +108,18 @@ fn determine_image_format(extension: &str) -> Result<ImageFormat, actix_web::Err
"heic" | "heif" | "tiff" | "tif" => {
// Конвертируем HEIC и TIFF в JPEG при сохранении
Ok(ImageFormat::Jpeg)
},
}
_ => {
log::error!("Неподдерживаемый формат изображения: {}", extension);
Err(ErrorInternalServerError("Неподдерживаемый формат изображения"))
},
Err(ErrorInternalServerError(
"Неподдерживаемый формат изображения",
))
}
}
}
/// Сохраняет данные миниатюры.
///
///
/// Обновлена для передачи корректного формата изображения.
pub async fn thumbdata_save(
original_data: Vec<u8>,
@@ -128,12 +131,12 @@ pub async fn thumbdata_save(
warn!("original file name: {}", original_filename);
let (base_filename, _, extension) = parse_file_path(&original_filename);
warn!("detected file extension: {}", extension);
// Для HEIC файлов просто сохраняем оригинал как миниатюру
if content_type == "image/heic" {
warn!("HEIC file detected, using original as thumbnail");
let thumb_filename = format!("{}_{}.heic", base_filename, THUMB_WIDTHS[0]);
if let Err(e) = upload_to_s3(
&state.storj_client,
&state.bucket,
@@ -179,7 +182,10 @@ pub async fn thumbdata_save(
}
}
Err(e) => {
warn!("cannot generate thumbnails for {}: {}", original_filename, e);
warn!(
"cannot generate thumbnails for {}: {}",
original_filename, e
);
return Err(e);
}
}