Merge branch 'develop' of git.pleroma.social:pleroma/pleroma into nsfw-api-mrf

This commit is contained in:
Lain Soykaf 2024-05-27 17:49:31 +04:00
commit 4325b1aec3
5904 changed files with 74362 additions and 23442 deletions

View file

@ -5,34 +5,13 @@
# 2. Copy this section into your Caddyfile and restart Caddy.
example.tld {
log /var/log/caddy/pleroma_access.log
errors /var/log/caddy/pleroma_error.log
log {
output file /var/log/caddy/pleroma.log
}
gzip
encode gzip
# this is explicitly IPv4 since Pleroma.Web.Endpoint binds on IPv4 only
# and `localhost.` resolves to [::0] on some systems: see issue #930
proxy / 127.0.0.1:4000 {
websocket
transparent
}
tls {
# Remove the rest of the lines in here, if you want to support older devices
key_type p256
ciphers ECDHE-ECDSA-WITH-CHACHA20-POLY1305 ECDHE-RSA-WITH-CHACHA20-POLY1305 ECDHE-ECDSA-AES256-GCM-SHA384 ECDHE-RSA-AES256-GCM-SHA384 ECDHE-ECDSA-AES128-GCM-SHA256 ECDHE-RSA-AES128-GCM-SHA256
}
# If you do not want to use the mediaproxy function, remove these lines.
# To use this directive, you need the http.cache plugin for Caddy.
cache {
match_path /media
default_max_age 720m
}
cache {
match_path /proxy
default_max_age 720m
}
# Stop removing lines here.
reverse_proxy 127.0.0.1:4000
}

View file

@ -1,48 +0,0 @@
#!/bin/sh
# Pleroma: A lightweight social networking server
# Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
project_id="74"
project_branch="rebase/glitch-soc"
static_dir="instance/static"
# For bundling:
# project_branch="pleroma"
# static_dir="priv/static"
if [ ! -d "${static_dir}" ]
then
echo "Error: ${static_dir} directory is missing, are you sure you are running this script at the root of pleromas repository?"
exit 1
fi
last_modified="$(curl --fail -s -I 'https://git.pleroma.social/api/v4/projects/'${project_id}'/jobs/artifacts/'${project_branch}'/download?job=build' | grep '^Last-Modified:' | cut -d: -f2-)"
echo "branch:${project_branch}"
echo "Last-Modified:${last_modified}"
artifact="mastofe.zip"
if [ "${last_modified}x" = "x" ]
then
echo "ERROR: Couldn't get the modification date of the latest build archive, maybe it expired, exiting..."
exit 1
fi
if [ -e mastofe.timestamp ] && [ "$(cat mastofe.timestamp)" = "${last_modified}" ]
then
echo "MastoFE is up-to-date, exiting..."
exit 0
fi
curl --fail -c - "https://git.pleroma.social/api/v4/projects/${project_id}/jobs/artifacts/${project_branch}/download?job=build" -o "${artifact}" || exit
# TODO: Update the emoji as well
rm -fr "${static_dir}/sw.js" "${static_dir}/packs" || exit
unzip -q "${artifact}" || exit
cp public/assets/sw.js "${static_dir}/sw.js" || exit
cp -r public/packs "${static_dir}/packs" || exit
echo "${last_modified}" > mastofe.timestamp
rm -fr public
rm -i "${artifact}"

View file

@ -8,6 +8,7 @@ pidfile="/var/run/pleroma.pid"
directory=/opt/pleroma
healthcheck_delay=60
healthcheck_timer=30
no_new_privs="yes"
: ${pleroma_port:-4000}

View file

@ -0,0 +1,97 @@
# This file is for those who want to serve uploaded media and media proxy over
# another domain. This is STRONGLY RECOMMENDED.
# This is meant to be used ALONG WITH `pleroma.nginx`.
# If this is a new instance, replace the `location ~ ^/(media|proxy)` section in
# `pleroma.nginx` with the following to completely disable access to media from the main domain:
# location ~ ^/(media|proxy) {
# return 404;
# }
#
# If you are configuring an existing instance to use another domain
# for media, you will want to keep redirecting all existing local media to the new domain
# so already-uploaded media will not break.
# Replace the `location ~ ^/(media|proxy)` section in `pleroma.nginx` with the following:
#
# location /media {
# return 301 https://some.other.domain$request_uri;
# }
#
# location /proxy {
# return 404;
# }
server {
server_name some.other.domain;
listen 80;
listen [::]:80;
# Uncomment this if you need to use the 'webroot' method with certbot. Make sure
# that the directory exists and that it is accessible by the webserver. If you followed
# the guide, you already ran 'mkdir -p /var/lib/letsencrypt' to create the folder.
# You may need to load this file with the ssl server block commented out, run certbot
# to get the certificate, and then uncomment it.
#
# location ~ /\.well-known/acme-challenge {
# root /var/lib/letsencrypt/;
# }
location / {
return 301 https://$server_name$request_uri;
}
}
server {
server_name some.other.domain;
listen 443 ssl http2;
listen [::]:443 ssl http2;
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
ssl_session_tickets off;
ssl_trusted_certificate /etc/letsencrypt/live/some.other.domain/chain.pem;
ssl_certificate /etc/letsencrypt/live/some.other.domain/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/some.other.domain/privkey.pem;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4";
ssl_prefer_server_ciphers off;
# In case of an old server with an OpenSSL version of 1.0.2 or below,
# leave only prime256v1 or comment out the following line.
ssl_ecdh_curve X25519:prime256v1:secp384r1:secp521r1;
ssl_stapling on;
ssl_stapling_verify on;
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript application/activity+json application/atom+xml;
# the nginx default is 1m, not enough for large media uploads
client_max_body_size 16m;
ignore_invalid_headers off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
location / { return 404; }
location ~ ^/(media|proxy) {
proxy_cache pleroma_media_cache;
slice 1m;
proxy_cache_key $host$uri$is_args$args$slice_range;
proxy_set_header Range $slice_range;
proxy_cache_valid 200 206 301 304 1h;
proxy_cache_lock on;
proxy_ignore_client_abort on;
proxy_buffering on;
chunked_transfer_encoding on;
proxy_pass http://phoenix;
}
}

View file

@ -204,7 +204,7 @@
]}
]},
%% Following HTTP API is deprected, the new one abouve should be used instead
%% Following HTTP API is deprecated, the new one above should be used instead
{ {5288, "127.0.0.1"} , ejabberd_cowboy, [
{num_acceptors, 10},
@ -466,7 +466,7 @@
%% == PostgreSQL ==
%% {rdbms, global, default, [{workers, 10}],
%% [{server, {pgsql, "server", 5432, "database", "username", "password"}}]},
%% [{server, {pgsql, "server", "port", "database", "username", "password"}}]},
%% == ODBC (MSSQL) ==
%% {rdbms, global, default, [{workers, 10}],
@ -824,7 +824,7 @@
%% Enable archivization for private messages (default)
% {pm, [
%% Top-level options can be overriden here if needed, for example:
%% Top-level options can be overridden here if needed, for example:
% {async_writer, false}
% ]},
@ -834,7 +834,7 @@
%%
% {muc, [
% {host, "muc.@HOST@"}
%% As with pm, top-level options can be overriden for MUC archive
%% As with pm, top-level options can be overridden for MUC archive
% ]},
%
%% Do not use a <stanza-id/> element (by default stanzaid is used)

View file

@ -81,6 +81,19 @@ server {
proxy_pass http://phoenix;
}
# Uncomment this if you want notice compatibility routes for frontends like Soapbox.
# location ~ ^/@[^/]+/([^/]+)$ {
# proxy_pass http://phoenix/notice/$1;
# }
#
# location ~ ^/@[^/]+/posts/([^/]+)$ {
# proxy_pass http://phoenix/notice/$1;
# }
#
# location ~ ^/[^/]+/status/([^/]+)$ {
# proxy_pass http://phoenix/notice/$1;
# }
location ~ ^/(media|proxy) {
proxy_cache pleroma_media_cache;
slice 1m;

View file

@ -1,4 +1,5 @@
# Recommended varnishncsa logging format: '%h %l %u %t "%m %{X-Forwarded-Proto}i://%{Host}i%U%q %H" %s %b "%{Referer}i" "%{User-agent}i"'
# Please use Varnish 7.0+ for proper Range Requests / Chunked encoding support
vcl 4.1;
import std;
@ -22,11 +23,6 @@ sub vcl_recv {
set req.http.X-Forwarded-Proto = "https";
}
# CHUNKED SUPPORT
if (req.http.Range ~ "bytes=") {
set req.http.x-range = req.http.Range;
}
# Pipe if WebSockets request is coming through
if (req.http.upgrade ~ "(?i)websocket") {
return (pipe);
@ -35,9 +31,9 @@ sub vcl_recv {
# Allow purging of the cache
if (req.method == "PURGE") {
if (!client.ip ~ purge) {
return(synth(405,"Not allowed."));
return (synth(405,"Not allowed."));
}
return(purge);
return (purge);
}
}
@ -53,17 +49,11 @@ sub vcl_backend_response {
return (retry);
}
# CHUNKED SUPPORT
if (bereq.http.x-range ~ "bytes=" && beresp.status == 206) {
set beresp.ttl = 10m;
set beresp.http.CR = beresp.http.content-range;
}
# Bypass cache for large files
# 50000000 ~ 50MB
if (std.integer(beresp.http.content-length, 0) > 50000000) {
set beresp.uncacheable = true;
return(deliver);
return (deliver);
}
# Don't cache objects that require authentication
@ -94,7 +84,7 @@ sub vcl_synth {
if (resp.status == 750) {
set resp.status = 301;
set resp.http.Location = req.http.x-redir;
return(deliver);
return (deliver);
}
}
@ -106,25 +96,12 @@ sub vcl_pipe {
}
}
sub vcl_hash {
# CHUNKED SUPPORT
if (req.http.x-range ~ "bytes=") {
hash_data(req.http.x-range);
unset req.http.Range;
}
}
sub vcl_backend_fetch {
# Be more lenient for slow servers on the fediverse
if (bereq.url ~ "^/proxy/") {
set bereq.first_byte_timeout = 300s;
}
# CHUNKED SUPPORT
if (bereq.http.x-range) {
set bereq.http.Range = bereq.http.x-range;
}
if (bereq.retries == 0) {
# Clean up the X-Varnish-Backend-503 flag that is used internally
# to mark broken backend responses that should be retried.
@ -143,14 +120,6 @@ sub vcl_backend_fetch {
}
}
sub vcl_deliver {
# CHUNKED SUPPORT
if (resp.http.CR) {
set resp.http.Content-Range = resp.http.CR;
unset resp.http.CR;
}
}
sub vcl_backend_error {
# Retry broken backend responses.
set bereq.http.X-Varnish-Backend-503 = "1";