feat: 利用 etag 节省内容传输,显式写明生命周期 (#464)

This commit is contained in:
ᴀᴍᴛᴏᴀᴇʀ
2025-09-24 02:03:06 +08:00
committed by GitHub
parent 210c94398a
commit bbbb7d0c5b
4 changed files with 48 additions and 31 deletions

View File

@@ -184,7 +184,7 @@ impl<'a, W: AsyncWrite> AssWriter<'a, W> {
} }
} }
fn escape_text(text: &str) -> Cow<str> { fn escape_text(text: &'_ str) -> Cow<'_, str> {
let text = text.trim(); let text = text.trim();
if memchr::memchr(b'\n', text.as_bytes()).is_some() { if memchr::memchr(b'\n', text.as_bytes()).is_some() {
Cow::from(text.replace('\n', "\\N")) Cow::from(text.replace('\n', "\\N"))

View File

@@ -91,7 +91,7 @@ impl<'a> Video<'a> {
.collect()) .collect())
} }
pub async fn get_danmaku_writer(&self, page: &'a PageInfo) -> Result<DanmakuWriter> { pub async fn get_danmaku_writer(&self, page: &'a PageInfo) -> Result<DanmakuWriter<'a>> {
let tasks = FuturesUnordered::new(); let tasks = FuturesUnordered::new();
for i in 1..=page.duration.div_ceil(360) { for i in 1..=page.duration.div_ceil(360) {
tasks.push(self.get_danmaku_segment(page, i as i64)); tasks.push(self.get_danmaku_segment(page, i as i64));

View File

@@ -26,7 +26,7 @@ pub async fn http_server(
log_writer: LogHelper, log_writer: LogHelper,
) -> Result<()> { ) -> Result<()> {
let app = router() let app = router()
.fallback_service(get(frontend_files)) .fallback_service(get(frontend_files).head(frontend_files))
.layer(Extension(database_connection)) .layer(Extension(database_connection))
.layer(Extension(bili_client)) .layer(Extension(bili_client))
.layer(Extension(log_writer)); .layer(Extension(log_writer));
@@ -48,34 +48,51 @@ async fn frontend_files(request: Request) -> impl IntoResponse {
}; };
let mime_type = content.mime_type(); let mime_type = content.mime_type();
let content_type = mime_type.as_deref().unwrap_or("application/octet-stream"); let content_type = mime_type.as_deref().unwrap_or("application/octet-stream");
if cfg!(debug_assertions) { let default_headers = [
( (header::CONTENT_TYPE, content_type),
[(header::CONTENT_TYPE, content_type)], (header::CACHE_CONTROL, "no-cache"),
// safety: `RustEmbed` returns uncompressed files directly from the filesystem in debug mode (header::ETAG, &content.hash()),
content.data().unwrap(), ];
) if let Some(if_none_match) = request.headers().get(header::IF_NONE_MATCH) {
.into_response() if let Ok(client_etag) = if_none_match.to_str() {
} else { if client_etag == content.hash() {
let accepted_encodings = request return (StatusCode::NOT_MODIFIED, default_headers).into_response();
.headers()
.get(header::ACCEPT_ENCODING)
.and_then(|v| v.to_str().ok())
.map(|s| s.split(',').map(str::trim).collect::<HashSet<_>>())
.unwrap_or_default();
for (encoding, data) in [("br", content.data_br()), ("gzip", content.data_gzip())] {
if accepted_encodings.contains(encoding) {
if let Some(data) = data {
return (
[
(header::CONTENT_TYPE, content_type),
(header::CONTENT_ENCODING, encoding),
],
data,
)
.into_response();
}
} }
} }
"Unsupported Encoding".into_response()
} }
if request.method() == axum::http::Method::HEAD {
return (StatusCode::OK, default_headers).into_response();
}
if cfg!(debug_assertions) {
// safety: `RustEmbed` returns uncompressed files directly from the filesystem in debug mode
return (StatusCode::OK, default_headers, content.data().unwrap()).into_response();
}
let accepted_encodings = request
.headers()
.get(header::ACCEPT_ENCODING)
.and_then(|v| v.to_str().ok())
.map(|s| s.split(',').map(str::trim).collect::<HashSet<_>>())
.unwrap_or_default();
for (encoding, data) in [("br", content.data_br()), ("gzip", content.data_gzip())] {
if accepted_encodings.contains(encoding) {
if let Some(data) = data {
return (
StatusCode::OK,
[
(header::CONTENT_TYPE, content_type),
(header::CACHE_CONTROL, "no-cache"),
(header::ETAG, &content.hash()),
(header::CONTENT_ENCODING, encoding),
],
data,
)
.into_response();
}
}
}
(
StatusCode::NOT_ACCEPTABLE,
"Client must support gzip or brotli compression",
)
.into_response()
} }

View File

@@ -42,7 +42,7 @@ impl TaskStatusNotifier {
} }
} }
pub async fn start_running(&self) -> MutexGuard<()> { pub async fn start_running(&'_ self) -> MutexGuard<'_, ()> {
let lock = self.mutex.lock().await; let lock = self.mutex.lock().await;
let _ = self.tx.send(Arc::new(TaskStatus { let _ = self.tx.send(Arc::new(TaskStatus {
is_running: true, is_running: true,