Merge remote-tracking branch 'origin/develop' into translate-posts

This commit is contained in:
marcin mikołajczak 2024-08-02 09:42:17 +02:00
commit f7a7517296
393 changed files with 4027 additions and 2279 deletions

View file

@ -2,5 +2,8 @@
{"lib/cachex.ex", "Unknown type: Spec.cache/0."},
{"lib/pleroma/web/plugs/rate_limiter.ex", "The pattern can never match the type {:commit, _} | {:ignore, _}."},
{"lib/pleroma/web/plugs/rate_limiter.ex", "Function get_scale/2 will never be called."},
{"lib/pleroma/web/plugs/rate_limiter.ex", "Function initialize_buckets!/1 will never be called."}
{"lib/pleroma/web/plugs/rate_limiter.ex", "Function initialize_buckets!/1 will never be called."},
{"lib/pleroma/workers/receiver_worker.ex", :call},
{"lib/pleroma/workers/receiver_worker.ex", :pattern_match},
{"lib/pleroma/workers/receiver_worker.ex", :pattern_match_cov},
]

5
.gitignore vendored
View file

@ -6,7 +6,7 @@
/test/instance
/test/uploads
/.elixir_ls
/test/fixtures/DSCN0010_tmp.jpg
/test/fixtures/DSCN0010_tmp*
/test/fixtures/test_tmp.txt
/test/fixtures/image_tmp.jpg
/test/tmp/
@ -60,3 +60,6 @@ pleroma.iml
*~
*#
*.swp
archive-*
.gitlab-ci-local

View file

@ -18,9 +18,7 @@ workflow:
- if: $CI_COMMIT_BRANCH
cache: &global_cache_policy
key:
files:
- mix.lock
key: $CI_JOB_IMAGE-$CI_COMMIT_SHORT_SHA
paths:
- deps
- _build
@ -80,13 +78,12 @@ build-1.13.4-otp-25:
script:
- mix compile --force
build-1.15.8-otp-26:
build-1.17.1-otp-26:
extends:
- .build_changes_policy
- .using-ci-base
stage: build
image: git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.15.8-otp-26
allow_failure: true
image: git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.17.1-otp-26
script:
- mix compile --force
@ -136,7 +133,7 @@ unit-testing-1.13.4-otp-25:
script: &testing_script
- mix ecto.create
- mix ecto.migrate
- mix test --cover --preload-modules
- mix pleroma.test_runner --cover --preload-modules
coverage: '/^Line total: ([^ ]*%)$/'
artifacts:
reports:
@ -144,34 +141,19 @@ unit-testing-1.13.4-otp-25:
coverage_format: cobertura
path: coverage.xml
unit-testing-1.15.8-otp-26:
unit-testing-1.17.1-otp-26:
extends:
- .build_changes_policy
- .using-ci-base
stage: test
image: git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.15.8-otp-26
allow_failure: true
image: git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.17.1-otp-26
cache: *testing_cache_policy
services: *testing_services
script: *testing_script
unit-testing-1.13.4-otp-25-erratic:
extends:
- .build_changes_policy
- .using-ci-base
stage: test
retry: 2
allow_failure: true
cache: *testing_cache_policy
services: *testing_services
script:
- mix ecto.create
- mix ecto.migrate
- mix test --only=erratic
formatting-1.13:
formatting-1.15:
extends: .build_changes_policy
image: &formatting_elixir elixir:1.13-alpine
image: &formatting_elixir elixir:1.15-alpine
stage: lint
cache: *testing_cache_policy
before_script: &current_bfr_script
@ -183,7 +165,7 @@ formatting-1.13:
script:
- mix format --check-formatted
cycles-1.13:
cycles-1.15:
extends: .build_changes_policy
image: *formatting_elixir
stage: lint

View file

@ -0,0 +1 @@
Ensure that StripLocation actually removes everything resembling GPS data from PNGs

View file

@ -0,0 +1 @@
Elixir Logger configuration is now longer permitted through AdminFE and ConfigDB

View file

@ -0,0 +1 @@
Refactor the user backups code and improve test coverage

View file

View file

View file

View file

View file

View file

View file

View file

View file

@ -0,0 +1 @@
Client application data was always missing from the status

View file

@ -0,0 +1 @@
Update and extend NetBSD installation docs

View file

@ -0,0 +1 @@
Elixir 1.15 compatibility

View file

@ -0,0 +1 @@
Increase outgoing federation parallelism

View file

1
changelog.d/fix-mrfs.add Normal file
View file

@ -0,0 +1 @@
Added a Mix task "pleroma.config fix_mrf_policies" which will remove erroneous MRF policies from ConfigDB.

View file

@ -0,0 +1 @@
Deactivated groups would still try to repeat a post.

View file

View file

@ -0,0 +1 @@
Gun Connection Pool was not retrying to acquire a connection if the pool was full and stale connections were reclaimed

View file

@ -0,0 +1 @@
Change Hackney connection pool timeouts to align with the values Gun uses

View file

@ -0,0 +1 @@
Transmogrifier: handle non-validate errors on incoming Delete activities

View file

View file

@ -0,0 +1 @@
Improve error logging when LDAP authentication fails.

1
changelog.d/ldap.fix Normal file
View file

@ -0,0 +1 @@
Fix LDAP support

View file

@ -0,0 +1 @@
Fix OpenGraph and Twitter metadata providers when parsing objects with no content or summary fields.

View file

@ -0,0 +1 @@
Publisher jobs will not retry if the error received is a 400

View file

@ -0,0 +1 @@
Deleting, Unfavoriting, Unrepeating, or Unreacting will cancel undelivered publishing jobs for the original activity.

View file

@ -0,0 +1 @@
PollWorker jobs will not retry if the activity no longer exists.

View file

@ -0,0 +1 @@
Improved detecting unrecoverable errors for incoming federation jobs

View file

@ -0,0 +1 @@
Changed some jobs to return :cancel on unrecoverable errors that should not be retried

View file

@ -0,0 +1 @@
Discard Remote Fetcher jobs which errored due to an MRF rejection.

View file

@ -0,0 +1 @@
Oban jobs can now be viewed in the Live Dashboard

View file

@ -0,0 +1 @@
Prevent Rich Media backfill jobs from retrying in cases where it is likely they will fail again.

View file

@ -0,0 +1 @@
Ensure all Oban jobs have timeouts defined

View file

View file

View file

@ -0,0 +1 @@
Oban Jobs for refreshing users were not respecting the uniqueness setting

View file

@ -0,0 +1 @@
Fix Optimistic Inbox for failed signatures

View file

@ -0,0 +1 @@
Publisher jobs now store the the activity id instead of inserting duplicate JSON data in the Oban queue for each delivery.

View file

@ -0,0 +1 @@
Harden Rich Media parsing against very slow or malicious URLs

View file

@ -0,0 +1 @@
Rich Media backfilling is now an Oban job

View file

View file

View file

View file

@ -0,0 +1 @@
User profile refreshes are now asynchronous

View file

@ -0,0 +1,8 @@
FROM elixir:1.16.3-otp-26
# Single RUN statement, otherwise intermediate images are created
# https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#run
RUN apt-get update &&\
apt-get install -y libmagic-dev cmake libimage-exiftool-perl ffmpeg &&\
mix local.hex --force &&\
mix local.rebar --force

View file

@ -0,0 +1 @@
docker buildx build --platform linux/amd64,linux/arm64 -t git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.16.3-otp-26 --push .

View file

@ -0,0 +1,8 @@
FROM elixir:1.17.1-otp-26
# Single RUN statement, otherwise intermediate images are created
# https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#run
RUN apt-get update &&\
apt-get install -y libmagic-dev cmake libimage-exiftool-perl ffmpeg &&\
mix local.hex --force &&\
mix local.rebar --force

View file

@ -0,0 +1 @@
docker buildx build --platform linux/amd64,linux/arm64 -t git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.17.1-otp-26 --push .

View file

@ -132,6 +132,8 @@ config :pleroma, Pleroma.Web.Endpoint,
]
# Configures Elixir's Logger
config :logger, backends: [:console]
config :logger, :console,
level: :debug,
format: "\n$time $metadata[$level] $message\n",
@ -446,7 +448,7 @@ config :pleroma, :rich_media,
Pleroma.Web.RichMedia.Parsers.TwitterCard,
Pleroma.Web.RichMedia.Parsers.OEmbed
],
failure_backoff: 60_000,
timeout: 5_000,
ttl_setters: [
Pleroma.Web.RichMedia.Parser.TTL.AwsSignedUrl,
Pleroma.Web.RichMedia.Parser.TTL.Opengraph
@ -578,21 +580,21 @@ config :pleroma, Pleroma.User,
],
email_blacklist: []
# The Pruner :max_age must be longer than Worker :unique
# value or it cannot enforce uniqueness.
config :pleroma, Oban,
repo: Pleroma.Repo,
log: false,
queues: [
activity_expiration: 10,
federator_incoming: 5,
federator_outgoing: 5,
ingestion_queue: 50,
federator_outgoing: 25,
web_push: 50,
transmogrifier: 20,
background: 5,
background: 20,
search_indexing: [limit: 10, paused: true],
slow: 1
slow: 5
],
plugins: [Oban.Plugins.Pruner],
plugins: [{Oban.Plugins.Pruner, max_age: 900}],
crontab: [
{"0 0 * * 0", Pleroma.Workers.Cron.DigestEmailsWorker},
{"0 0 * * *", Pleroma.Workers.Cron.NewUsersDigestWorker}
@ -856,19 +858,19 @@ config :pleroma, :pools,
config :pleroma, :hackney_pools,
federation: [
max_connections: 50,
timeout: 150_000
timeout: 10_000
],
media: [
max_connections: 50,
timeout: 150_000
timeout: 15_000
],
rich_media: [
max_connections: 50,
timeout: 150_000
timeout: 15_000
],
upload: [
max_connections: 25,
timeout: 300_000
timeout: 15_000
]
config :pleroma, :majic_pool, size: 2
@ -907,8 +909,8 @@ config :pleroma, Pleroma.User.Backup,
purge_after_days: 30,
limit_days: 7,
dir: nil,
process_wait_time: 30_000,
process_chunk_size: 100
process_chunk_size: 100,
timeout: :timer.minutes(30)
config :pleroma, ConcurrentLimiter, [
{Pleroma.Search, [max_running: 30, max_waiting: 50]}

View file

@ -1228,79 +1228,6 @@ config :pleroma, :config_description, [
}
]
},
%{
group: :logger,
type: :group,
description: "Logger-related settings",
children: [
%{
key: :backends,
type: [:atom, :tuple, :module],
description:
"Where logs will be sent, :console - send logs to stdout, { ExSyslogger, :ex_syslogger } - to syslog, Quack.Logger - to Slack.",
suggestions: [:console, {ExSyslogger, :ex_syslogger}]
}
]
},
%{
group: :logger,
type: :group,
key: :ex_syslogger,
label: "ExSyslogger",
description: "ExSyslogger-related settings",
children: [
%{
key: :level,
type: {:dropdown, :atom},
description: "Log level",
suggestions: [:debug, :info, :warning, :error]
},
%{
key: :ident,
type: :string,
description:
"A string that's prepended to every message, and is typically set to the app name",
suggestions: ["pleroma"]
},
%{
key: :format,
type: :string,
description: "Default: \"$date $time [$level] $levelpad$node $metadata $message\"",
suggestions: ["$metadata[$level] $message"]
},
%{
key: :metadata,
type: {:list, :atom},
suggestions: [:request_id]
}
]
},
%{
group: :logger,
type: :group,
key: :console,
label: "Console Logger",
description: "Console logger settings",
children: [
%{
key: :level,
type: {:dropdown, :atom},
description: "Log level",
suggestions: [:debug, :info, :warning, :error]
},
%{
key: :format,
type: :string,
description: "Default: \"$date $time [$level] $levelpad$node $metadata $message\"",
suggestions: ["$metadata[$level] $message"]
},
%{
key: :metadata,
type: {:list, :atom},
suggestions: [:request_id]
}
]
},
%{
group: :pleroma,
key: :frontend_configurations,
@ -2174,11 +2101,11 @@ config :pleroma, :config_description, [
]
},
%{
key: :failure_backoff,
key: :timeout,
type: :integer,
description:
"Amount of milliseconds after request failure, during which the request will not be retried.",
suggestions: [60_000]
"Amount of milliseconds after which the HTTP request is forcibly terminated.",
suggestions: [5_000]
}
]
},
@ -3428,20 +3355,19 @@ config :pleroma, :config_description, [
description: "Limit user to export not more often than once per N days",
suggestions: [7]
},
%{
key: :process_wait_time,
type: :integer,
label: "Process Wait Time",
description:
"The amount of time to wait for backup to report progress, in milliseconds. If no progress is received from the backup job for that much time, terminate it and deem it failed.",
suggestions: [30_000]
},
%{
key: :process_chunk_size,
type: :integer,
label: "Process Chunk Size",
description: "The number of activities to fetch in the backup job for each chunk.",
suggestions: [100]
},
%{
key: :timeout,
type: :integer,
label: "Timeout",
description: "The amount of time to wait for backup to complete in seconds.",
suggestions: [1_800]
}
]
},

View file

@ -36,7 +36,7 @@ config :pleroma, Pleroma.Emails.Mailer, adapter: Swoosh.Adapters.Local
# different ports.
# Do not include timestamps in development logs
config :logger, :console, format: "$metadata[$level] $message\n"
config :logger, Logger.Backends.Console, format: "$metadata[$level] $message\n"
# Set a higher stacktrace during development. Avoid configuring such
# in production as building large stacktraces may be expensive.

View file

@ -20,6 +20,7 @@ config :pleroma, Pleroma.Web.Endpoint,
config :phoenix, serve_endpoints: true
# Do not print debug messages in production
config :logger, Logger.Backends.Console, level: :info
config :logger, :console, level: :info
config :logger, :ex_syslogger, level: :info

View file

@ -49,7 +49,8 @@ config :pleroma, Pleroma.Repo,
hostname: System.get_env("DB_HOST") || "localhost",
port: System.get_env("DB_PORT") || "5432",
pool: Ecto.Adapters.SQL.Sandbox,
pool_size: System.schedulers_online() * 2
pool_size: System.schedulers_online() * 2,
log: false
config :pleroma, :dangerzone, override_repo_pool_size: true
@ -157,8 +158,7 @@ config :pleroma, Pleroma.Uploaders.IPFS, config_impl: Pleroma.UnstubbedConfigMoc
config :pleroma, Pleroma.Web.Plugs.HTTPSecurityPlug, config_impl: Pleroma.StaticStubbedConfigMock
config :pleroma, Pleroma.Web.Plugs.HTTPSignaturePlug, config_impl: Pleroma.StaticStubbedConfigMock
config :pleroma, Pleroma.Web.Plugs.HTTPSignaturePlug,
http_signatures_impl: Pleroma.StubbedHTTPSignaturesMock
config :pleroma, Pleroma.Signature, http_signatures_impl: Pleroma.StubbedHTTPSignaturesMock
peer_module =
if String.to_integer(System.otp_release()) >= 25 do
@ -177,11 +177,18 @@ config :pleroma, Pleroma.Application,
streamer_registry: false,
test_http_pools: true
config :pleroma, Pleroma.Web.Streaming, sync_streaming: true
config :pleroma, Pleroma.Uploaders.Uploader, timeout: 1_000
config :pleroma, Pleroma.Emoji.Loader, test_emoji: true
config :pleroma, Pleroma.Web.RichMedia.Backfill, provider: Pleroma.Web.RichMedia.Backfill
config :pleroma, Pleroma.Web.RichMedia.Backfill,
stream_out: Pleroma.Web.ActivityPub.ActivityPubMock
config :pleroma, Pleroma.Web.Plugs.HTTPSecurityPlug, enable: false
config :pleroma, Pleroma.User.Backup, tempdir: "test/tmp"
if File.exists?("./config/test.secret.exs") do
import_config "test.secret.exs"

View file

@ -154,4 +154,19 @@ This forcibly removes all saved values in the database.
```sh
mix pleroma.config [--force] reset
```
## Remove invalid MRF modules from the database
This forcibly removes any enabled MRF that does not exist and will fix the ability of the instance to start.
=== "OTP"
```sh
./bin/pleroma_ctl config fix_mrf_policies
```
=== "From Source"
```sh
mix pleroma.config fix_mrf_policies
```

View file

@ -436,7 +436,7 @@ config :pleroma, Pleroma.Web.MediaProxy.Invalidation.Http,
* `ignore_hosts`: list of hosts which will be ignored by the metadata parser. For example `["accounts.google.com", "xss.website"]`, defaults to `[]`.
* `ignore_tld`: list TLDs (top-level domains) which will ignore for parse metadata. default is ["local", "localdomain", "lan"].
* `parsers`: list of Rich Media parsers.
* `failure_backoff`: Amount of milliseconds after request failure, during which the request will not be retried.
* `timeout`: Amount of milliseconds after which the HTTP request is forcibly terminated.
## HTTP server
@ -853,7 +853,7 @@ config :logger,
backends: [{ExSyslogger, :ex_syslogger}]
config :logger, :ex_syslogger,
level: :warn
level: :warning
```
Another example, keeping console output and adding the pid to syslog output:
@ -862,7 +862,7 @@ config :logger,
backends: [:console, {ExSyslogger, :ex_syslogger}]
config :logger, :ex_syslogger,
level: :warn,
level: :warning,
option: [:pid, :ndelay]
```
@ -1171,6 +1171,7 @@ Control favicons for instances.
3. the directory named by the TMP environment variable
4. C:\TMP on Windows or /tmp on Unix-like operating systems
5. as a last resort, the current working directory
* `:timeout` an integer representing seconds
## Frontend management

View file

@ -1,7 +1,7 @@
## Required dependencies
* PostgreSQL >=11.0
* Elixir >=1.13.0 <1.15
* Elixir >=1.13.0 <1.17
* Erlang OTP >=22.2.0 (supported: <27)
* git
* file / libmagic

View file

@ -2,14 +2,41 @@
{! backend/installation/generic_dependencies.include !}
## Installing software used in this guide
# Installation options
Currently there are two options available for NetBSD: manual installation (from source) or using experimental package from [pkgsrc-wip](https://github.com/NetBSD/pkgsrc-wip/tree/master/pleroma).
WIP package can be installed via pkgsrc and can be crosscompiled for easier binary distribution. Source installation most probably will be restricted to a single machine.
## pkgsrc installation
WIP package creates Mix.Release (similar to how Docker images are built) but doesn't bundle Erlang runtime, listing it as a dependency instead. This allows for easier and more modular installations, especially on weaker machines. Currently this method also does not support all features of `pleroma_ctl` command (like changing installation type or managing frontends) as NetBSD is not yet a supported binary flavour of Pleroma's CI.
In any case, you can install it the same way as any other `pkgsrc-wip` package:
```
cd /usr/pkgsrc
git clone --depth 1 git://wip.pkgsrc.org/pkgsrc-wip.git wip
cp -rf wip/pleroma www
cp -rf wip/libvips graphics
cd /usr/pkgsrc/www/pleroma
bmake && bmake install
```
Use `bmake package` to create a binary package. This can come especially handy if you're targeting embedded or low-power systems and are crosscompiling on a more powerful machine.
> Note: Elixir has [endianness bug](https://github.com/elixir-lang/elixir/issues/2785) which requires it to be compiled on a machine with the same endianness. In other words, package crosscompiled on amd64 (little endian) won't work on powerpc or sparc machines (big endian). While _in theory™_ nothing catastrophic should happen, one can see that for example regexes won't work properly. Some distributions just strip this warning away, so it doesn't bother the users... anyway, you've been warned.
## Source installation
pkgin should have been installed by the NetBSD installer if you selected
the right options. If it isn't installed, install it using pkg_add.
the right options. If it isn't installed, install it using `pkg_add`.
Note that `postgresql11-contrib` is needed for the Postgres extensions
Pleroma uses.
> Note: you can use modern versions of PostgreSQL. In this case, just use `postgresql16-contrib` and so on.
The `mksh` shell is needed to run the Elixir `mix` script.
`# pkgin install acmesh elixir git-base git-docs mksh nginx postgresql11-server postgresql11-client postgresql11-contrib sudo ffmpeg4 ImageMagick`
@ -29,29 +56,6 @@ shells/mksh
www/nginx
```
Copy the rc.d scripts to the right directory:
```
# cp /usr/pkg/share/examples/rc.d/nginx /usr/pkg/share/examples/rc.d/pgsql /etc/rc.d
```
Add nginx and Postgres to `/etc/rc.conf`:
```
nginx=YES
pgsql=YES
```
## Configuring postgres
First, run `# /etc/rc.d/pgsql start`. Then, `$ sudo -Hu pgsql -g pgsql createdb`.
### Install media / graphics packages (optional, see [`docs/installation/optional/media_graphics_packages.md`](../installation/optional/media_graphics_packages.md))
`# pkgin install ImageMagick ffmpeg4 p5-Image-ExifTool`
## Configuring Pleroma
Create a user for Pleroma:
```
@ -68,41 +72,98 @@ $ cd /home/pleroma
$ git clone -b stable https://git.pleroma.social/pleroma/pleroma.git
```
Configure Pleroma. Note that you need a domain name at this point:
Get deps and compile:
```
$ cd /home/pleroma/pleroma
$ export MIX_ENV=prod
$ mix deps.get
$ MIX_ENV=prod mix pleroma.instance gen # You will be asked a few questions here.
$ mix compile
```
Since Postgres is configured, we can now initialize the database. There should
now be a file in `config/setup_db.psql` that makes this easier. Edit it, and
*change the password* to a password of your choice. Make sure it is secure, since
## Install media / graphics packages (optional, see [`docs/installation/optional/media_graphics_packages.md`](../installation/optional/media_graphics_packages.md))
`# pkgin install ImageMagick ffmpeg4 p5-Image-ExifTool`
or via pkgsrc:
```
graphics/p5-Image-ExifTool
graphics/ImageMagick
multimedia/ffmpeg4
```
# Configuration
## Understanding $PREFIX
From now on, you may encounter `$PREFIX` variable in the paths. This variable indicates your current local pkgsrc prefix. Usually it's `/usr/pkg` unless you configured it otherwise. Translating to pkgsrc's lingo, it's called `LOCALBASE`, which essentially means the same this. You may want to set it up for your local shell session (this uses `mksh` which should already be installed as one of the required dependencies):
```
$ export PREFIX=$(pkg_info -Q LOCALBASE mksh)
$ echo $PREFIX
/usr/pkg
```
## Setting up your instance
Now, you need to configure your instance. During this initial configuration, you will be asked some questions about your server. You will need a domain name at this point; it doesn't have to be deployed, but changing it later will be very cumbersome.
If you've installed via pkgsrc, `pleroma_ctl` should already be in your `PATH`; if you've installed from source, it's located at `/home/pleroma/pleroma/release/bin/pleroma_ctl`.
```
$ su -l pleroma
$ pleroma_ctl instance gen --output $PREFIX/etc/pleroma/config.exs --output-psql /tmp/setup_db.psql
```
During installation, you will be asked about static and upload directories. Don't forget to create them and update permissions:
```
mkdir -p /var/lib/pleroma/uploads
chown -R pleroma:pleroma /var/lib/pleroma
```
## Setting up the database
First, run `# /etc/rc.d/pgsql start`. Then, `$ sudo -Hu pgsql -g pgsql createdb`.
We can now initialize the database. You'll need to edit generated SQL file from the previous step. It's located at `/tmp/setup_db.psql`.
Edit this file, and *change the password* to a password of your choice. Make sure it is secure, since
it'll be protecting your database. Now initialize the database:
```
$ sudo -Hu pgsql -g pgsql psql -f config/setup_db.psql
$ sudo -Hu pgsql -g pgsql psql -f /tmp/setup_db.psql
```
Postgres allows connections from all users without a password by default. To
fix this, edit `/usr/pkg/pgsql/data/pg_hba.conf`. Change every `trust` to
fix this, edit `$PREFIX/pgsql/data/pg_hba.conf`. Change every `trust` to
`password`.
Once this is done, restart Postgres with `# /etc/rc.d/pgsql restart`.
Run the database migrations.
### pkgsrc installation
```
pleroma_ctl migrate
```
### Source installation
You will need to do this whenever you update with `git pull`:
```
$ cd /home/pleroma/pleroma
$ MIX_ENV=prod mix ecto.migrate
```
## Configuring nginx
Install the example configuration file
`/home/pleroma/pleroma/installation/pleroma.nginx` to
`/usr/pkg/etc/nginx.conf`.
(`$PREFIX/share/examples/pleroma/pleroma.nginx` or `/home/pleroma/pleroma/installation/pleroma.nginx`) to
`$PREFIX/etc/nginx.conf`.
Note that it will need to be wrapped in a `http {}` block. You should add
settings for the nginx daemon outside of the http block, for example:
@ -176,27 +237,45 @@ Let's add auto-renewal to `/etc/daily.local`
--stateless
```
## Creating a startup script for Pleroma
## Autostart
Copy the startup script to the correct location and make sure it's executable:
For properly functioning instance, you will need pleroma (backend service), nginx (reverse proxy) and postgresql (database) services running. There's no requirement for them to reside on the same machine, but you have to provide autostart for each of them.
### nginx
```
# cp $PREFIX/share/examples/rc.d/nginx /etc/rc.d
# echo "nginx=YES" >> /etc/rc.conf
```
### postgresql
```
# cp $PREFIX/share/examples/rc.d/pgsql /etc/rc.d
# echo "pgsql=YES" >> /etc/rc.conf
```
### pleroma
First, copy the script (pkgsrc variant)
```
# cp $PREFIX/share/examples/pleroma/pleroma.rc /etc/rc.d/pleroma
```
or source variant
```
# cp /home/pleroma/pleroma/installation/netbsd/rc.d/pleroma /etc/rc.d/pleroma
# chmod +x /etc/rc.d/pleroma
```
Add the following to `/etc/rc.conf`:
Then, add the following to `/etc/rc.conf`:
```
pleroma=YES
pleroma_home="/home/pleroma"
pleroma_user="pleroma"
```
Run `# /etc/rc.d/pleroma start` to start Pleroma.
## Conclusion
Run `# /etc/rc.d/pleroma start` to start Pleroma.
Restart nginx with `# /etc/rc.d/nginx restart` and you should be up and running.
Make sure your time is in sync, or other instances will receive your posts with

View file

@ -1,11 +1,14 @@
#!/bin/sh
# PROVIDE: pleroma
# REQUIRE: DAEMON pgsql
# REQUIRE: DAEMON pgsql nginx
if [ -f /etc/rc.subr ]; then
. /etc/rc.subr
fi
pleroma_home="/home/pleroma"
pleroma_user="pleroma"
name="pleroma"
rcvar=${name}
command="/usr/pkg/bin/elixir"
@ -19,10 +22,10 @@ pleroma_env="HOME=${pleroma_home} MIX_ENV=prod"
check_pidfile()
{
pid=$(pgrep -U "${pleroma_user}" /bin/beam.smp$)
echo -n "${pid}"
printf '%s' "${pid}"
}
if [ -f /etc/rc.subr -a -d /etc/rc.d -a -f /etc/rc.d/DAEMON ]; then
if [ -f /etc/rc.subr ] && [ -d /etc/rc.d ] && [ -f /etc/rc.d/DAEMON ]; then
# newer NetBSD
load_rc_config ${name}
run_rc_command "$1"
@ -39,7 +42,7 @@ else
stop)
echo "Stopping ${name}."
check_pidfile
! [ -n ${pid} ] && kill ${pid}
! [ -n "${pid}" ] && kill "${pid}"
;;
restart)

View file

@ -14,7 +14,8 @@ defmodule Mix.Pleroma do
:swoosh,
:timex,
:fast_html,
:oban
:oban,
:logger_backends
]
@cachex_children ["object", "user", "scrubber", "web_resp"]
@doc "Common functions to be reused in mix tasks"

View file

@ -205,6 +205,35 @@ defmodule Mix.Tasks.Pleroma.Config do
end
end
# Removes any policies that are not a real module
# as they will prevent the server from starting
def run(["fix_mrf_policies"]) do
check_configdb(fn ->
start_pleroma()
group = :pleroma
key = :mrf
%{value: value} =
group
|> ConfigDB.get_by_group_and_key(key)
policies =
Keyword.get(value, :policies, [])
|> Enum.filter(&is_atom(&1))
|> Enum.filter(fn mrf ->
case Code.ensure_compiled(mrf) do
{:module, _} -> true
{:error, _} -> false
end
end)
value = Keyword.put(value, :policies, policies)
ConfigDB.update_or_create(%{group: group, key: key, value: value})
end)
end
@spec migrate_to_db(Path.t() | nil) :: any()
def migrate_to_db(file_path \\ nil) do
with :ok <- Pleroma.Config.DeprecationWarnings.warn() do

View file

@ -351,7 +351,7 @@ defmodule Mix.Tasks.Pleroma.Database do
)
end
shell_info('Done.')
shell_info(~c"Done.")
end
end

View file

@ -0,0 +1,25 @@
defmodule Mix.Tasks.Pleroma.TestRunner do
@shortdoc "Retries tests once if they fail"
use Mix.Task
def run(args \\ []) do
case System.cmd("mix", ["test"] ++ args, into: IO.stream(:stdio, :line)) do
{_, 0} ->
:ok
_ ->
retry(args)
end
end
def retry(args) do
case System.cmd("mix", ["test", "--failed"] ++ args, into: IO.stream(:stdio, :line)) do
{_, 0} ->
:ok
_ ->
exit(1)
end
end
end

View file

@ -14,7 +14,6 @@ defmodule Pleroma.Application do
@name Mix.Project.config()[:name]
@version Mix.Project.config()[:version]
@repository Mix.Project.config()[:source_url]
@compile_env Mix.env()
def name, do: @name
def version, do: @version
@ -53,7 +52,7 @@ defmodule Pleroma.Application do
Pleroma.Config.Oban.warn()
Config.DeprecationWarnings.warn()
if @compile_env != :test do
if Config.get([Pleroma.Web.Plugs.HTTPSecurityPlug, :enable], true) do
Pleroma.Web.Plugs.HTTPSecurityPlug.warn_if_disabled()
end

View file

@ -241,10 +241,9 @@ defmodule Pleroma.ApplicationRequirements do
missing_mrfs =
Enum.reduce(mrfs, [], fn x, acc ->
if Code.ensure_compiled(x) do
acc
else
acc ++ [x]
case Code.ensure_compiled(x) do
{:module, _} -> acc
{:error, _} -> acc ++ [x]
end
end)

View file

@ -1,5 +1,5 @@
# Pleroma: A lightweight social networking server
# Copyright © 2017-2022 Pleroma Authors <https://pleroma.social/>
# Copyright © 2017-2023 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.Config.TransferTask do
@ -44,14 +44,9 @@ defmodule Pleroma.Config.TransferTask do
with {_, true} <- {:configurable, Config.get(:configurable_from_database)} do
# We need to restart applications for loaded settings take effect
{logger, other} =
settings =
(Repo.all(ConfigDB) ++ deleted_settings)
|> Enum.map(&merge_with_default/1)
|> Enum.split_with(fn {group, _, _, _} -> group in [:logger] end)
logger
|> Enum.sort()
|> Enum.each(&configure/1)
started_applications = Application.started_applications()
@ -64,7 +59,7 @@ defmodule Pleroma.Config.TransferTask do
[:pleroma | reject]
end
other
settings
|> Enum.map(&update/1)
|> Enum.uniq()
|> Enum.reject(&(&1 in reject))
@ -102,38 +97,6 @@ defmodule Pleroma.Config.TransferTask do
{group, key, value, merged}
end
# change logger configuration in runtime, without restart
defp configure({_, :backends, _, merged}) do
# removing current backends
Enum.each(Application.get_env(:logger, :backends), &Logger.remove_backend/1)
Enum.each(merged, &Logger.add_backend/1)
:ok = update_env(:logger, :backends, merged)
end
defp configure({_, key, _, merged}) when key in [:console, :ex_syslogger] do
merged =
if key == :console do
put_in(merged[:format], merged[:format] <> "\n")
else
merged
end
backend =
if key == :ex_syslogger,
do: {ExSyslogger, :ex_syslogger},
else: key
Logger.configure_backend(backend, merged)
:ok = update_env(:logger, key, merged)
end
defp configure({_, key, _, merged}) do
Logger.configure([{key, merged}])
:ok = update_env(:logger, key, merged)
end
defp update({group, key, value, merged}) do
try do
:ok = update_env(group, key, merged)

View file

@ -165,8 +165,7 @@ defmodule Pleroma.ConfigDB do
{:pleroma, :ecto_repos},
{:mime, :types},
{:cors_plug, [:max_age, :methods, :expose, :headers]},
{:swarm, :node_blacklist},
{:logger, :backends}
{:swarm, :node_blacklist}
]
Enum.any?(full_key_update, fn
@ -385,7 +384,12 @@ defmodule Pleroma.ConfigDB do
@spec module_name?(String.t()) :: boolean()
def module_name?(string) do
Regex.match?(~r/^(Pleroma|Phoenix|Tesla|Ueberauth|Swoosh)\./, string) or
string in ["Oban", "Ueberauth", "ExSyslogger", "ConcurrentLimiter"]
if String.contains?(string, ".") do
[name | _] = String.split(string, ".", parts: 2)
name in ~w[Pleroma Phoenix Tesla Ueberauth Swoosh Logger LoggerBackends]
else
string in ~w[Oban Ueberauth ExSyslogger ConcurrentLimiter]
end
end
end

View file

@ -12,6 +12,8 @@ defmodule Pleroma.Conversation.Participation do
import Ecto.Changeset
import Ecto.Query
@type t :: %__MODULE__{}
schema "conversation_participations" do
belongs_to(:user, User, type: FlakeId.Ecto.CompatType)
belongs_to(:conversation, Conversation)

View file

@ -27,11 +27,3 @@ defenum(Pleroma.DataMigration.State,
failed: 4,
manual: 5
)
defenum(Pleroma.User.Backup.State,
pending: 1,
running: 2,
complete: 3,
failed: 4,
invalid: 5
)

View file

@ -345,37 +345,22 @@ defmodule Pleroma.Emails.UserEmail do
Router.Helpers.subscription_url(Endpoint, :unsubscribe, token)
end
def backup_is_ready_email(backup, admin_user_id \\ nil) do
def backup_is_ready_email(backup) do
%{user: user} = Pleroma.Repo.preload(backup, :user)
Gettext.with_locale_or_default user.language do
download_url = Pleroma.Web.PleromaAPI.BackupView.download_url(backup)
html_body =
if is_nil(admin_user_id) do
Gettext.dpgettext(
"static_pages",
"account archive email body - self-requested",
"""
<p>You requested a full backup of your Pleroma account. It's ready for download:</p>
<p><a href="%{download_url}">%{download_url}</a></p>
""",
download_url: download_url
)
else
admin = Pleroma.Repo.get(User, admin_user_id)
Gettext.dpgettext(
"static_pages",
"account archive email body - admin requested",
"""
<p>Admin @%{admin_nickname} requested a full backup of your Pleroma account. It's ready for download:</p>
<p><a href="%{download_url}">%{download_url}</a></p>
""",
admin_nickname: admin.nickname,
download_url: download_url
)
end
Gettext.dpgettext(
"static_pages",
"account archive email body",
"""
<p>A full backup of your Pleroma account was requested. It's ready for download:</p>
<p><a href="%{download_url}">%{download_url}</a></p>
""",
download_url: download_url
)
new()
|> to(recipient(user))

View file

@ -416,10 +416,10 @@ defmodule Pleroma.Emoji.Pack do
end
defp create_archive_and_cache(pack, hash) do
files = ['pack.json' | Enum.map(pack.files, fn {_, file} -> to_charlist(file) end)]
files = [~c"pack.json" | Enum.map(pack.files, fn {_, file} -> to_charlist(file) end)]
{:ok, {_, result}} =
:zip.zip('#{pack.name}.zip', files, [:memory, cwd: to_charlist(pack.path)])
:zip.zip(~c"#{pack.name}.zip", files, [:memory, cwd: to_charlist(pack.path)])
ttl_per_file = Pleroma.Config.get!([:emoji, :shared_pack_cache_seconds_per_file])
overall_ttl = :timer.seconds(ttl_per_file * Enum.count(files))
@ -586,7 +586,7 @@ defmodule Pleroma.Emoji.Pack do
with :ok <- File.mkdir_p!(local_pack.path) do
files = Enum.map(remote_pack["files"], fn {_, path} -> to_charlist(path) end)
# Fallback cannot contain a pack.json file
files = if pack_info[:fallback], do: files, else: ['pack.json' | files]
files = if pack_info[:fallback], do: files, else: [~c"pack.json" | files]
:zip.unzip(archive, cwd: to_charlist(local_pack.path), file_list: files)
end

View file

@ -199,8 +199,8 @@ defmodule Pleroma.FollowingRelationship do
|> preload([:follower])
|> Repo.all()
|> Enum.map(fn following_relationship ->
Pleroma.Web.CommonAPI.follow(following_relationship.follower, target)
Pleroma.Web.CommonAPI.unfollow(following_relationship.follower, origin)
Pleroma.Web.CommonAPI.follow(target, following_relationship.follower)
Pleroma.Web.CommonAPI.unfollow(origin, following_relationship.follower)
end)
|> case do
[] ->

View file

@ -43,10 +43,6 @@ defmodule Pleroma.Frontend do
{:download_or_unzip, _} ->
Logger.info("Could not download or unzip the frontend")
{:error, "Could not download or unzip the frontend"}
_e ->
Logger.info("Could not install the frontend")
{:error, "Could not install the frontend"}
end
end

View file

@ -9,7 +9,7 @@ defmodule Pleroma.Gun.ConnectionPool.Reclaimer do
def start_monitor do
pid =
case GenServer.start_link(__MODULE__, [], name: {:via, Registry, {registry(), "reclaimer"}}) do
case GenServer.start(__MODULE__, [], name: {:via, Registry, {registry(), "reclaimer"}}) do
{:ok, pid} ->
pid

View file

@ -5,6 +5,9 @@
defmodule Pleroma.Gun.ConnectionPool.WorkerSupervisor do
@moduledoc "Supervisor for pool workers. Does not do anything except enforce max connection limit"
alias Pleroma.Config
alias Pleroma.Gun.ConnectionPool.Worker
use DynamicSupervisor
def start_link(opts) do
@ -14,21 +17,28 @@ defmodule Pleroma.Gun.ConnectionPool.WorkerSupervisor do
def init(_opts) do
DynamicSupervisor.init(
strategy: :one_for_one,
max_children: Pleroma.Config.get([:connections_pool, :max_connections])
max_children: Config.get([:connections_pool, :max_connections])
)
end
def start_worker(opts, last_attempt \\ false) do
case DynamicSupervisor.start_child(__MODULE__, {Pleroma.Gun.ConnectionPool.Worker, opts}) do
{:error, :max_children} ->
funs = [fn -> last_attempt end, fn -> match?(:error, free_pool()) end]
def start_worker(opts, last_attempt \\ false)
if Enum.any?(funs, fn fun -> fun.() end) do
:telemetry.execute([:pleroma, :connection_pool, :provision_failure], %{opts: opts})
{:error, :pool_full}
else
start_worker(opts, true)
end
def start_worker(opts, true) do
case DynamicSupervisor.start_child(__MODULE__, {Worker, opts}) do
{:error, :max_children} ->
:telemetry.execute([:pleroma, :connection_pool, :provision_failure], %{opts: opts})
{:error, :pool_full}
res ->
res
end
end
def start_worker(opts, false) do
case DynamicSupervisor.start_child(__MODULE__, {Worker, opts}) do
{:error, :max_children} ->
free_pool()
start_worker(opts, true)
res ->
res

View file

@ -68,7 +68,9 @@ defmodule Pleroma.HTTP do
adapter = Application.get_env(:tesla, :adapter)
client = Tesla.client(adapter_middlewares(adapter), adapter)
extra_middleware = options[:tesla_middleware] || []
client = Tesla.client(adapter_middlewares(adapter, extra_middleware), adapter)
maybe_limit(
fn ->
@ -102,20 +104,21 @@ defmodule Pleroma.HTTP do
fun.()
end
defp adapter_middlewares(Tesla.Adapter.Gun) do
[Tesla.Middleware.FollowRedirects, Pleroma.Tesla.Middleware.ConnectionPool]
defp adapter_middlewares(Tesla.Adapter.Gun, extra_middleware) do
[Tesla.Middleware.FollowRedirects, Pleroma.Tesla.Middleware.ConnectionPool] ++
extra_middleware
end
defp adapter_middlewares({Tesla.Adapter.Finch, _}) do
[Tesla.Middleware.FollowRedirects]
defp adapter_middlewares({Tesla.Adapter.Finch, _}, extra_middleware) do
[Tesla.Middleware.FollowRedirects] ++ extra_middleware
end
defp adapter_middlewares(_) do
defp adapter_middlewares(_, extra_middleware) do
if Pleroma.Config.get(:env) == :test do
# Emulate redirects in test env, which are handled by adapters in other environments
[Tesla.Middleware.FollowRedirects]
else
[]
extra_middleware
end
end
end

View file

@ -10,7 +10,7 @@ defmodule Pleroma.Instances.Instance do
alias Pleroma.Maps
alias Pleroma.Repo
alias Pleroma.User
alias Pleroma.Workers.BackgroundWorker
alias Pleroma.Workers.DeleteWorker
use Ecto.Schema
@ -297,7 +297,7 @@ defmodule Pleroma.Instances.Instance do
all of those users' activities and notifications.
"""
def delete_users_and_activities(host) when is_binary(host) do
BackgroundWorker.enqueue("delete_instance", %{"host" => host})
DeleteWorker.enqueue("delete_instance", %{"host" => host})
end
def perform(:delete_instance, host) when is_binary(host) do

View file

@ -734,7 +734,7 @@ defmodule Pleroma.Notification do
def mark_as_read?(activity, target_user) do
user = Activity.user_actor(activity)
User.mutes_user?(target_user, user) || CommonAPI.thread_muted?(target_user, activity)
User.mutes_user?(target_user, user) || CommonAPI.thread_muted?(activity, target_user)
end
def for_user_and_activity(user, activity) do

View file

@ -99,21 +99,24 @@ defmodule Pleroma.Object do
def get_by_id(nil), do: nil
def get_by_id(id), do: Repo.get(Object, id)
@spec get_by_id_and_maybe_refetch(integer(), list()) :: Object.t() | nil
def get_by_id_and_maybe_refetch(id, opts \\ []) do
%{updated_at: updated_at} = object = get_by_id(id)
with %Object{updated_at: updated_at} = object <- get_by_id(id) do
if opts[:interval] &&
NaiveDateTime.diff(NaiveDateTime.utc_now(), updated_at) > opts[:interval] do
case Fetcher.refetch_object(object) do
{:ok, %Object{} = object} ->
object
if opts[:interval] &&
NaiveDateTime.diff(NaiveDateTime.utc_now(), updated_at) > opts[:interval] do
case Fetcher.refetch_object(object) do
{:ok, %Object{} = object} ->
object
e ->
Logger.error("Couldn't refresh #{object.data["id"]}:\n#{inspect(e)}")
object
e ->
Logger.error("Couldn't refresh #{object.data["id"]}:\n#{inspect(e)}")
object
end
else
object
end
else
object
nil -> nil
end
end

View file

@ -59,6 +59,7 @@ defmodule Pleroma.Object.Fetcher do
end
# Note: will create a Create activity, which we need internally at the moment.
@spec fetch_object_from_id(String.t(), list()) :: {:ok, Object.t()} | {:error | :reject, any()}
def fetch_object_from_id(id, options \\ []) do
with {_, nil} <- {:fetch_object, Object.get_cached_by_ap_id(id)},
{_, true} <- {:allowed_depth, Federator.allowed_thread_distance?(options[:depth])},

View file

@ -134,7 +134,10 @@ defmodule Pleroma.Object.Updater do
else
%{updated_object: updated_data} =
updated_data
|> maybe_update_history(original_data, updated: updated, use_history_in_new_object?: false)
|> maybe_update_history(original_data,
updated: updated,
use_history_in_new_object?: false
)
updated_data
|> Map.put("updated", date)

View file

@ -16,6 +16,6 @@ defmodule Pleroma.Search do
def healthcheck_endpoints do
search_module = Pleroma.Config.get([Pleroma.Search, :module])
search_module.healthcheck_endpoints
search_module.healthcheck_endpoints()
end
end

View file

@ -10,6 +10,14 @@ defmodule Pleroma.Signature do
alias Pleroma.User
alias Pleroma.Web.ActivityPub.ActivityPub
import Plug.Conn, only: [put_req_header: 3]
@http_signatures_impl Application.compile_env(
:pleroma,
[__MODULE__, :http_signatures_impl],
HTTPSignatures
)
@known_suffixes ["/publickey", "/main-key"]
def key_id_to_actor_id(key_id) do
@ -85,4 +93,48 @@ defmodule Pleroma.Signature do
def signed_date(%NaiveDateTime{} = date) do
Timex.format!(date, "{WDshort}, {0D} {Mshort} {YYYY} {h24}:{m}:{s} GMT")
end
@spec validate_signature(Plug.Conn.t(), String.t()) :: boolean()
def validate_signature(%Plug.Conn{} = conn, request_target) do
# Newer drafts for HTTP signatures now use @request-target instead of the
# old (request-target). We'll now support both for incoming signatures.
conn =
conn
|> put_req_header("(request-target)", request_target)
|> put_req_header("@request-target", request_target)
@http_signatures_impl.validate_conn(conn)
end
@spec validate_signature(Plug.Conn.t()) :: boolean()
def validate_signature(%Plug.Conn{} = conn) do
# This (request-target) is non-standard, but many implementations do it
# this way due to a misinterpretation of
# https://datatracker.ietf.org/doc/html/draft-cavage-http-signatures-06
# "path" was interpreted as not having the query, though later examples
# show that it must be the absolute path + query. This behavior is kept to
# make sure most software (Pleroma itself, Mastodon, and probably others)
# do not break.
request_target = Enum.join([String.downcase(conn.method), conn.request_path], " ")
# This is the proper way to build the @request-target, as expected by
# many HTTP signature libraries, clarified in the following draft:
# https://www.ietf.org/archive/id/draft-ietf-httpbis-message-signatures-11.html#section-2.2.6
# It is the same as before, but containing the query part as well.
proper_target = Enum.join([request_target, "?", conn.query_string], "")
cond do
# Normal, non-standard behavior but expected by Pleroma and more.
validate_signature(conn, request_target) ->
true
# Has query string and the previous one failed: let's try the standard.
conn.query_string != "" ->
validate_signature(conn, proper_target)
# If there's no query string and signature fails, it's rotten.
true ->
false
end
end
end

View file

@ -39,7 +39,7 @@ defmodule Pleroma.Telemetry.Logger do
_,
_
) do
Logger.error(fn ->
Logger.debug(fn ->
"Connection pool failed to reclaim any connections due to all of them being in use. It will have to drop requests for opening connections to new hosts"
end)
end
@ -70,7 +70,7 @@ defmodule Pleroma.Telemetry.Logger do
%{key: key},
_
) do
Logger.warning(fn ->
Logger.debug(fn ->
"Pool worker for #{key}: Client #{inspect(client_pid)} died before releasing the connection with #{inspect(reason)}"
end)
end

View file

@ -249,14 +249,16 @@ defmodule Pleroma.Upload do
defp url_from_spec(_upload, _base_url, {:url, url}), do: url
@spec base_url() :: binary
def base_url do
uploader = @config_impl.get([Pleroma.Upload, :uploader])
upload_base_url = @config_impl.get([Pleroma.Upload, :base_url])
upload_fallback_url = Pleroma.Web.Endpoint.url() <> "/media/"
upload_base_url = @config_impl.get([Pleroma.Upload, :base_url]) || upload_fallback_url
public_endpoint = @config_impl.get([uploader, :public_endpoint])
case uploader do
Pleroma.Uploaders.Local ->
upload_base_url || Pleroma.Web.Endpoint.url() <> "/media/"
upload_base_url
Pleroma.Uploaders.S3 ->
bucket = @config_impl.get([Pleroma.Uploaders.S3, :bucket])
@ -268,11 +270,14 @@ defmodule Pleroma.Upload do
!is_nil(truncated_namespace) ->
truncated_namespace
!is_nil(namespace) ->
!is_nil(namespace) and !is_nil(bucket) ->
namespace <> ":" <> bucket
true ->
!is_nil(bucket) ->
bucket
true ->
""
end
if public_endpoint do
@ -285,7 +290,7 @@ defmodule Pleroma.Upload do
@config_impl.get([Pleroma.Uploaders.IPFS, :get_gateway_url])
_ ->
public_endpoint || upload_base_url || Pleroma.Web.Endpoint.url() <> "/media/"
public_endpoint || upload_base_url
end
end
end

View file

@ -16,7 +16,9 @@ defmodule Pleroma.Upload.Filter.Exiftool.StripLocation do
def filter(%Pleroma.Upload{tempfile: file, content_type: "image" <> _}) do
try do
case System.cmd("exiftool", ["-overwrite_original", "-gps:all=", file], parallelism: true) do
case System.cmd("exiftool", ["-overwrite_original", "-gps:all=", "-png:all=", file],
parallelism: true
) do
{_response, 0} -> {:ok, :filtered}
{error, 1} -> {:error, error}
end

View file

@ -38,6 +38,8 @@ defmodule Pleroma.User do
alias Pleroma.Web.OAuth
alias Pleroma.Web.RelMe
alias Pleroma.Workers.BackgroundWorker
alias Pleroma.Workers.DeleteWorker
alias Pleroma.Workers.UserRefreshWorker
require Logger
require Pleroma.Constants
@ -1981,7 +1983,7 @@ defmodule Pleroma.User do
def delete(%User{} = user) do
# Purge the user immediately
purge(user)
BackgroundWorker.enqueue("delete_user", %{"user_id" => user.id})
DeleteWorker.enqueue("delete_user", %{"user_id" => user.id})
end
# *Actually* delete the user from the DB
@ -2154,20 +2156,20 @@ defmodule Pleroma.User do
def fetch_by_ap_id(ap_id), do: ActivityPub.make_user_from_ap_id(ap_id)
@spec get_or_fetch_by_ap_id(String.t()) :: {:ok, User.t()} | {:error, any()}
def get_or_fetch_by_ap_id(ap_id) do
cached_user = get_cached_by_ap_id(ap_id)
with cached_user = %User{} <- get_cached_by_ap_id(ap_id),
_ <- maybe_refresh(cached_user) do
{:ok, cached_user}
else
_ -> fetch_by_ap_id(ap_id)
end
end
maybe_fetched_user = needs_update?(cached_user) && fetch_by_ap_id(ap_id)
case {cached_user, maybe_fetched_user} do
{_, {:ok, %User{} = user}} ->
{:ok, user}
{%User{} = user, _} ->
{:ok, user}
_ ->
{:error, :not_found}
defp maybe_refresh(user) do
if needs_update?(user) do
UserRefreshWorker.new(%{"ap_id" => user.ap_id})
|> Oban.insert()
end
end

View file

@ -14,9 +14,10 @@ defmodule Pleroma.User.Backup do
alias Pleroma.Activity
alias Pleroma.Bookmark
alias Pleroma.Config
alias Pleroma.Repo
alias Pleroma.Uploaders.Uploader
alias Pleroma.User
alias Pleroma.User.Backup.State
alias Pleroma.Web.ActivityPub.ActivityPub
alias Pleroma.Web.ActivityPub.Transmogrifier
alias Pleroma.Web.ActivityPub.UserView
@ -29,71 +30,111 @@ defmodule Pleroma.User.Backup do
field(:file_name, :string)
field(:file_size, :integer, default: 0)
field(:processed, :boolean, default: false)
field(:state, State, default: :invalid)
field(:processed_number, :integer, default: 0)
field(:tempdir, :string)
belongs_to(:user, User, type: FlakeId.Ecto.CompatType)
timestamps()
end
@config_impl Application.compile_env(:pleroma, [__MODULE__, :config_impl], Pleroma.Config)
@doc """
Schedules a job to backup a user if the number of backup requests has not exceeded the limit.
def create(user, admin_id \\ nil) do
with :ok <- validate_limit(user, admin_id),
{:ok, backup} <- user |> new() |> Repo.insert() do
BackupWorker.process(backup, admin_id)
Admins can directly call new/1 and schedule_backup/1 to bypass the limit.
"""
@spec user(User.t()) :: {:ok, t()} | {:error, any()}
def user(user) do
days = Config.get([__MODULE__, :limit_days])
with true <- permitted?(user),
%__MODULE__{} = backup <- new(user),
{:ok, inserted_backup} <- Repo.insert(backup),
{:ok, %Oban.Job{}} <- schedule_backup(inserted_backup) do
{:ok, inserted_backup}
else
false ->
{:error,
dngettext(
"errors",
"Last export was less than a day ago",
"Last export was less than %{days} days ago",
days,
days: days
)}
e ->
{:error, e}
end
end
@doc "Generates a %Backup{} for a user with a random file name"
@spec new(User.t()) :: t()
def new(user) do
rand_str = :crypto.strong_rand_bytes(32) |> Base.url_encode64(padding: false)
datetime = Calendar.NaiveDateTime.Format.iso8601_basic(NaiveDateTime.utc_now())
name = "archive-#{user.nickname}-#{datetime}-#{rand_str}.zip"
%__MODULE__{
user_id: user.id,
content_type: "application/zip",
file_name: name,
state: :pending
tempdir: tempdir(),
user: user
}
end
def delete(backup) do
uploader = Pleroma.Config.get([Pleroma.Upload, :uploader])
@doc "Schedules the execution of the provided backup"
@spec schedule_backup(t()) :: {:ok, Oban.Job.t()} | {:error, any()}
def schedule_backup(backup) do
with false <- is_nil(backup.id) do
%{"op" => "process", "backup_id" => backup.id}
|> BackupWorker.new()
|> Oban.insert()
else
true ->
{:error, "Backup is missing id. Please insert it into the Repo first."}
e ->
{:error, e}
end
end
@doc "Deletes the backup archive file and removes the database record"
@spec delete_archive(t()) :: {:ok, Ecto.Schema.t()} | {:error, Ecto.Changeset.t()}
def delete_archive(backup) do
uploader = Config.get([Pleroma.Upload, :uploader])
with :ok <- uploader.delete_file(Path.join("backups", backup.file_name)) do
Repo.delete(backup)
end
end
defp validate_limit(_user, admin_id) when is_binary(admin_id), do: :ok
@doc "Schedules a job to delete the backup archive"
@spec schedule_delete(t()) :: {:ok, Oban.Job.t()} | {:error, any()}
def schedule_delete(backup) do
days = Config.get([__MODULE__, :purge_after_days])
time = 60 * 60 * 24 * days
scheduled_at = Calendar.NaiveDateTime.add!(backup.inserted_at, time)
defp validate_limit(user, nil) do
case get_last(user.id) do
%__MODULE__{inserted_at: inserted_at} ->
days = Pleroma.Config.get([__MODULE__, :limit_days])
diff = Timex.diff(NaiveDateTime.utc_now(), inserted_at, :days)
%{"op" => "delete", "backup_id" => backup.id}
|> BackupWorker.new(scheduled_at: scheduled_at)
|> Oban.insert()
end
if diff > days do
:ok
else
{:error,
dngettext(
"errors",
"Last export was less than a day ago",
"Last export was less than %{days} days ago",
days,
days: days
)}
end
nil ->
:ok
defp permitted?(user) do
with {_, %__MODULE__{inserted_at: inserted_at}} <- {:last, get_last(user)},
days = Config.get([__MODULE__, :limit_days]),
diff = Timex.diff(NaiveDateTime.utc_now(), inserted_at, :days),
{_, true} <- {:diff, diff > days} do
true
else
{:last, nil} -> true
{:diff, false} -> false
end
end
def get_last(user_id) do
@doc "Returns last backup for the provided user"
@spec get_last(User.t()) :: t()
def get_last(%User{id: user_id}) do
__MODULE__
|> where(user_id: ^user_id)
|> order_by(desc: :id)
@ -101,6 +142,8 @@ defmodule Pleroma.User.Backup do
|> Repo.one()
end
@doc "Lists all existing backups for a user"
@spec list(User.t()) :: [Ecto.Schema.t() | term()]
def list(%User{id: user_id}) do
__MODULE__
|> where(user_id: ^user_id)
@ -108,149 +151,107 @@ defmodule Pleroma.User.Backup do
|> Repo.all()
end
def remove_outdated(%__MODULE__{id: latest_id, user_id: user_id}) do
__MODULE__
|> where(user_id: ^user_id)
|> where([b], b.id != ^latest_id)
|> Repo.all()
|> Enum.each(&BackupWorker.delete/1)
@doc "Schedules deletion of all but the the most recent backup"
@spec remove_outdated(User.t()) :: :ok
def remove_outdated(user) do
with %__MODULE__{} = latest_backup <- get_last(user) do
__MODULE__
|> where(user_id: ^user.id)
|> where([b], b.id != ^latest_backup.id)
|> Repo.all()
|> Enum.each(&schedule_delete/1)
else
_ -> :ok
end
end
def get(id), do: Repo.get(__MODULE__, id)
defp set_state(backup, state, processed_number \\ nil) do
struct =
%{state: state}
|> Pleroma.Maps.put_if_present(:processed_number, processed_number)
def get_by_id(id), do: Repo.get(__MODULE__, id)
@doc "Generates changeset for %Pleroma.User.Backup{}"
@spec changeset(%__MODULE__{}, map()) :: %Ecto.Changeset{}
def changeset(backup \\ %__MODULE__{}, attrs) do
backup
|> cast(struct, [:state, :processed_number])
|> cast(attrs, [:content_type, :file_name, :file_size, :processed, :tempdir])
end
@doc "Updates the backup record"
@spec update_record(%__MODULE__{}, map()) :: {:ok, %__MODULE__{}} | {:error, %Ecto.Changeset{}}
def update_record(%__MODULE__{} = backup, attrs) do
backup
|> changeset(attrs)
|> Repo.update()
end
def process(
%__MODULE__{} = backup,
processor_module \\ __MODULE__.Processor
) do
set_state(backup, :running, 0)
current_pid = self()
task =
Task.Supervisor.async_nolink(
Pleroma.TaskSupervisor,
processor_module,
:do_process,
[backup, current_pid]
)
wait_backup(backup, backup.processed_number, task)
end
defp wait_backup(backup, current_processed, task) do
wait_time = @config_impl.get([__MODULE__, :process_wait_time])
receive do
{:progress, new_processed} ->
total_processed = current_processed + new_processed
set_state(backup, :running, total_processed)
wait_backup(backup, total_processed, task)
{:DOWN, _ref, _proc, _pid, reason} ->
backup = get(backup.id)
if reason != :normal do
Logger.error("Backup #{backup.id} process ended abnormally: #{inspect(reason)}")
{:ok, backup} = set_state(backup, :failed)
cleanup(backup)
{:error,
%{
backup: backup,
reason: :exit,
details: reason
}}
else
{:ok, backup}
end
after
wait_time ->
Logger.error(
"Backup #{backup.id} timed out after no response for #{wait_time}ms, terminating"
)
Task.Supervisor.terminate_child(Pleroma.TaskSupervisor, task.pid)
{:ok, backup} = set_state(backup, :failed)
cleanup(backup)
{:error,
%{
backup: backup,
reason: :timeout
}}
end
end
@files [
'actor.json',
'outbox.json',
'likes.json',
'bookmarks.json',
'followers.json',
'following.json'
~c"actor.json",
~c"outbox.json",
~c"likes.json",
~c"bookmarks.json",
~c"followers.json",
~c"following.json"
]
@spec export(Pleroma.User.Backup.t(), pid()) :: {:ok, String.t()} | :error
def export(%__MODULE__{} = backup, caller_pid) do
backup = Repo.preload(backup, :user)
dir = backup_tempdir(backup)
with :ok <- File.mkdir(dir),
:ok <- actor(dir, backup.user, caller_pid),
:ok <- statuses(dir, backup.user, caller_pid),
:ok <- likes(dir, backup.user, caller_pid),
:ok <- bookmarks(dir, backup.user, caller_pid),
:ok <- followers(dir, backup.user, caller_pid),
:ok <- following(dir, backup.user, caller_pid),
{:ok, zip_path} <- :zip.create(backup.file_name, @files, cwd: dir),
{:ok, _} <- File.rm_rf(dir) do
{:ok, zip_path}
@spec run(t()) :: {:ok, t()} | {:error, :failed}
def run(%__MODULE__{} = backup) do
backup = Repo.preload(backup, :user)
tempfile = Path.join([backup.tempdir, backup.file_name])
with {_, :ok} <- {:mkdir, File.mkdir_p(backup.tempdir)},
{_, :ok} <- {:actor, actor(backup.tempdir, backup.user)},
{_, :ok} <- {:statuses, statuses(backup.tempdir, backup.user)},
{_, :ok} <- {:likes, likes(backup.tempdir, backup.user)},
{_, :ok} <- {:bookmarks, bookmarks(backup.tempdir, backup.user)},
{_, :ok} <- {:followers, followers(backup.tempdir, backup.user)},
{_, :ok} <- {:following, following(backup.tempdir, backup.user)},
{_, {:ok, _zip_path}} <-
{:zip, :zip.create(to_charlist(tempfile), @files, cwd: to_charlist(backup.tempdir))},
{_, {:ok, %File.Stat{size: zip_size}}} <- {:filestat, File.stat(tempfile)},
{:ok, updated_backup} <- update_record(backup, %{file_size: zip_size}) do
{:ok, updated_backup}
else
_ -> :error
_ ->
File.rm_rf(backup.tempdir)
{:error, :failed}
end
end
def dir(name) do
dir = Pleroma.Config.get([__MODULE__, :dir]) || System.tmp_dir!()
Path.join(dir, name)
defp tempdir do
rand = :crypto.strong_rand_bytes(8) |> Base.url_encode64(padding: false)
subdir = "backup-#{rand}"
case Config.get([__MODULE__, :tempdir]) do
nil ->
Path.join([System.tmp_dir!(), subdir])
path ->
Path.join([path, subdir])
end
end
def upload(%__MODULE__{} = backup, zip_path) do
uploader = Pleroma.Config.get([Pleroma.Upload, :uploader])
@doc "Uploads the completed backup and marks it as processed"
@spec upload(t()) :: {:ok, t()}
def upload(%__MODULE__{tempdir: tempdir} = backup) when is_binary(tempdir) do
uploader = Config.get([Pleroma.Upload, :uploader])
upload = %Pleroma.Upload{
name: backup.file_name,
tempfile: zip_path,
tempfile: Path.join([tempdir, backup.file_name]),
content_type: backup.content_type,
path: Path.join("backups", backup.file_name)
}
with {:ok, _} <- Pleroma.Uploaders.Uploader.put_file(uploader, upload),
:ok <- File.rm(zip_path) do
{:ok, upload}
with {:ok, _} <- Uploader.put_file(uploader, upload),
{:ok, uploaded_backup} <- update_record(backup, %{processed: true}),
{:ok, _} <- File.rm_rf(tempdir) do
{:ok, uploaded_backup}
end
end
defp actor(dir, user, caller_pid) do
defp actor(dir, user) do
with {:ok, json} <-
UserView.render("user.json", %{user: user})
|> Map.merge(%{"likes" => "likes.json", "bookmarks" => "bookmarks.json"})
|> Jason.encode() do
send(caller_pid, {:progress, 1})
File.write(Path.join(dir, "actor.json"), json)
end
end
@ -269,22 +270,10 @@ defmodule Pleroma.User.Backup do
)
end
defp should_report?(num, chunk_size), do: rem(num, chunk_size) == 0
defp backup_tempdir(backup) do
name = String.trim_trailing(backup.file_name, ".zip")
dir(name)
end
defp cleanup(backup) do
dir = backup_tempdir(backup)
File.rm_rf(dir)
end
defp write(query, dir, name, fun, caller_pid) do
defp write(query, dir, name, fun) do
path = Path.join(dir, "#{name}.json")
chunk_size = Pleroma.Config.get([__MODULE__, :process_chunk_size])
chunk_size = Config.get([__MODULE__, :process_chunk_size])
with {:ok, file} <- File.open(path, [:write, :utf8]),
:ok <- write_header(file, name) do
@ -300,10 +289,6 @@ defmodule Pleroma.User.Backup do
end),
{:ok, str} <- Jason.encode(data),
:ok <- IO.write(file, str <> ",\n") do
if should_report?(acc + 1, chunk_size) do
send(caller_pid, {:progress, chunk_size})
end
acc + 1
else
{:error, e} ->
@ -318,31 +303,29 @@ defmodule Pleroma.User.Backup do
end
end)
send(caller_pid, {:progress, rem(total, chunk_size)})
with :ok <- :file.pwrite(file, {:eof, -2}, "\n],\n \"totalItems\": #{total}}") do
File.close(file)
end
end
end
defp bookmarks(dir, %{id: user_id} = _user, caller_pid) do
defp bookmarks(dir, %{id: user_id} = _user) do
Bookmark
|> where(user_id: ^user_id)
|> join(:inner, [b], activity in assoc(b, :activity))
|> select([b, a], %{id: b.id, object: fragment("(?)->>'object'", a.data)})
|> write(dir, "bookmarks", fn a -> {:ok, a.object} end, caller_pid)
|> write(dir, "bookmarks", fn a -> {:ok, a.object} end)
end
defp likes(dir, user, caller_pid) do
defp likes(dir, user) do
user.ap_id
|> Activity.Queries.by_actor()
|> Activity.Queries.by_type("Like")
|> select([like], %{id: like.id, object: fragment("(?)->>'object'", like.data)})
|> write(dir, "likes", fn a -> {:ok, a.object} end, caller_pid)
|> write(dir, "likes", fn a -> {:ok, a.object} end)
end
defp statuses(dir, user, caller_pid) do
defp statuses(dir, user) do
opts =
%{}
|> Map.put(:type, ["Create", "Announce"])
@ -362,52 +345,17 @@ defmodule Pleroma.User.Backup do
with {:ok, activity} <- Transmogrifier.prepare_outgoing(a.data) do
{:ok, Map.delete(activity, "@context")}
end
end,
caller_pid
end
)
end
defp followers(dir, user, caller_pid) do
defp followers(dir, user) do
User.get_followers_query(user)
|> write(dir, "followers", fn a -> {:ok, a.ap_id} end, caller_pid)
|> write(dir, "followers", fn a -> {:ok, a.ap_id} end)
end
defp following(dir, user, caller_pid) do
defp following(dir, user) do
User.get_friends_query(user)
|> write(dir, "following", fn a -> {:ok, a.ap_id} end, caller_pid)
end
end
defmodule Pleroma.User.Backup.ProcessorAPI do
@callback do_process(%Pleroma.User.Backup{}, pid()) ::
{:ok, %Pleroma.User.Backup{}} | {:error, any()}
end
defmodule Pleroma.User.Backup.Processor do
@behaviour Pleroma.User.Backup.ProcessorAPI
alias Pleroma.Repo
alias Pleroma.User.Backup
import Ecto.Changeset
@impl true
def do_process(backup, current_pid) do
with {:ok, zip_file} <- Backup.export(backup, current_pid),
{:ok, %{size: size}} <- File.stat(zip_file),
{:ok, _upload} <- Backup.upload(backup, zip_file) do
backup
|> cast(
%{
file_size: size,
processed: true,
state: :complete
},
[:file_size, :processed, :state]
)
|> Repo.update()
else
e -> {:error, e}
end
|> write(dir, "following", fn a -> {:ok, a.ap_id} end)
end
end

View file

@ -31,7 +31,7 @@ defmodule Pleroma.User.Import do
identifiers,
fn identifier ->
with {:ok, %User{} = blocked} <- User.get_or_fetch(identifier),
{:ok, _block} <- CommonAPI.block(blocker, blocked) do
{:ok, _block} <- CommonAPI.block(blocked, blocker) do
blocked
else
error -> handle_error(:blocks_import, identifier, error)
@ -46,7 +46,7 @@ defmodule Pleroma.User.Import do
fn identifier ->
with {:ok, %User{} = followed} <- User.get_or_fetch(identifier),
{:ok, follower, followed} <- User.maybe_direct_follow(follower, followed),
{:ok, _, _, _} <- CommonAPI.follow(follower, followed) do
{:ok, _, _, _} <- CommonAPI.follow(followed, follower) do
followed
else
error -> handle_error(:follow_import, identifier, error)

View file

@ -163,7 +163,7 @@ defmodule Pleroma.Web do
"""
def safe_render_many(collection, view, template, assigns \\ %{}) do
Enum.map(collection, fn resource ->
as = Map.get(assigns, :as) || view.__resource__
as = Map.get(assigns, :as) || view.__resource__()
assigns = Map.put(assigns, as, resource)
safe_render(view, template, assigns)
end)

View file

@ -1661,7 +1661,6 @@ defmodule Pleroma.Web.ActivityPub.ActivityPub do
}}
else
{:error, _} = e -> e
e -> {:error, e}
end
end

View file

@ -293,8 +293,15 @@ defmodule Pleroma.Web.ActivityPub.ActivityPubController do
json(conn, "ok")
end
def inbox(%{assigns: %{valid_signature: false}, req_headers: req_headers} = conn, params) do
Federator.incoming_ap_doc(%{req_headers: req_headers, params: params})
def inbox(%{assigns: %{valid_signature: false}} = conn, params) do
Federator.incoming_ap_doc(%{
method: conn.method,
req_headers: conn.req_headers,
request_path: conn.request_path,
params: params,
query_string: conn.query_string
})
json(conn, "ok")
end

View file

@ -204,7 +204,7 @@ defmodule Pleroma.Web.ActivityPub.MRF do
if function_exported?(policy, :config_description, 0) do
description =
@default_description
|> Map.merge(policy.config_description)
|> Map.merge(policy.config_description())
|> Map.put(:group, :pleroma)
|> Map.put(:tab, :mrf)
|> Map.put(:type, :group)

View file

@ -49,7 +49,7 @@ defmodule Pleroma.Web.ActivityPub.MRF.FollowBotPolicy do
"#{__MODULE__}: Follow request from #{follower.nickname} to #{user.nickname}"
)
CommonAPI.follow(follower, user)
CommonAPI.follow(user, follower)
end
end)

View file

@ -137,7 +137,6 @@ defmodule Pleroma.Web.ActivityPub.MRF.NsfwApiPolicy do
{:ok, object}
else
{:nsfw, _data} -> handle_nsfw(object)
_ -> {:reject, "NSFW: Attachment rejected"}
end
end

Some files were not shown because too many files have changed in this diff Show more