diff --git a/.github/workflows/build-container-image.yml b/.github/workflows/build-container-image.yml
new file mode 100644
index 000000000..599879460
--- /dev/null
+++ b/.github/workflows/build-container-image.yml
@@ -0,0 +1,89 @@
+on:
+ workflow_call:
+ inputs:
+ platforms:
+ required: true
+ type: string
+ use_native_arm64_builder:
+ type: boolean
+ push_to_images:
+ type: string
+ flavor:
+ type: string
+ tags:
+ type: string
+ labels:
+ type: string
+
+jobs:
+ build-image:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - uses: docker/setup-qemu-action@v2
+ if: contains(inputs.platforms, 'linux/arm64') && !inputs.use_native_arm64_builder
+
+ - uses: docker/setup-buildx-action@v2
+ id: buildx
+ if: ${{ !(inputs.use_native_arm64_builder && contains(inputs.platforms, 'linux/arm64')) }}
+
+ - name: Start a local Docker Builder
+ if: inputs.use_native_arm64_builder && contains(inputs.platforms, 'linux/arm64')
+ run: |
+ docker run --rm -d --name buildkitd -p 1234:1234 --privileged moby/buildkit:latest --addr tcp://0.0.0.0:1234
+
+ - uses: docker/setup-buildx-action@v2
+ id: buildx-native
+ if: inputs.use_native_arm64_builder && contains(inputs.platforms, 'linux/arm64')
+ with:
+ driver: remote
+ endpoint: tcp://localhost:1234
+ platforms: linux/amd64
+ append: |
+ - endpoint: tcp://${{ vars.DOCKER_BUILDER_HETZNER_ARM64_01_HOST }}:13865
+ platforms: linux/arm64
+ name: mastodon-docker-builder-arm64-01
+ driver-opts:
+ - servername=mastodon-docker-builder-arm64-01
+ env:
+ BUILDER_NODE_1_AUTH_TLS_CACERT: ${{ secrets.DOCKER_BUILDER_HETZNER_ARM64_01_CACERT }}
+ BUILDER_NODE_1_AUTH_TLS_CERT: ${{ secrets.DOCKER_BUILDER_HETZNER_ARM64_01_CERT }}
+ BUILDER_NODE_1_AUTH_TLS_KEY: ${{ secrets.DOCKER_BUILDER_HETZNER_ARM64_01_KEY }}
+
+ - name: Log in to Docker Hub
+ if: contains(inputs.push_to_images, 'tootsuite')
+ uses: docker/login-action@v2
+ with:
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
+
+ - name: Log in to the Github Container registry
+ if: contains(inputs.push_to_images, 'ghcr.io')
+ uses: docker/login-action@v2
+ with:
+ registry: ghcr.io
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+
+ - uses: docker/metadata-action@v4
+ id: meta
+ if: ${{ inputs.push_to_images != '' }}
+ with:
+ images: ${{ inputs.push_to_images }}
+ flavor: ${{ inputs.flavor }}
+ tags: ${{ inputs.tags }}
+ labels: ${{ inputs.labels }}
+
+ - uses: docker/build-push-action@v4
+ with:
+ context: .
+ platforms: ${{ inputs.platforms }}
+ provenance: false
+ builder: ${{ steps.buildx.outputs.name || steps.buildx-native.outputs.name }}
+ push: ${{ inputs.push_to_images != '' }}
+ tags: ${{ steps.meta.outputs.tags }}
+ labels: ${{ steps.meta.outputs.labels }}
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
diff --git a/.github/workflows/build-image.yml b/.github/workflows/build-image.yml
deleted file mode 100644
index 9500c5e22..000000000
--- a/.github/workflows/build-image.yml
+++ /dev/null
@@ -1,64 +0,0 @@
-name: Build container image
-on:
- workflow_dispatch:
- push:
- branches:
- - 'main'
- tags:
- - '*'
- pull_request:
- paths:
- - .github/workflows/build-image.yml
- - Dockerfile
-permissions:
- contents: read
- packages: write
-
-jobs:
- build-image:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v3
- - uses: docker/setup-qemu-action@v2
- - uses: docker/setup-buildx-action@v2
-
- - name: Log in to Docker Hub
- uses: docker/login-action@v2
- with:
- username: ${{ secrets.DOCKERHUB_USERNAME }}
- password: ${{ secrets.DOCKERHUB_TOKEN }}
- if: github.repository == 'mastodon/mastodon' && github.event_name != 'pull_request'
-
- - name: Log in to the Github Container registry
- uses: docker/login-action@v2
- with:
- registry: ghcr.io
- username: ${{ github.actor }}
- password: ${{ secrets.GITHUB_TOKEN }}
- if: github.repository == 'mastodon/mastodon' && github.event_name != 'pull_request'
-
- - uses: docker/metadata-action@v4
- id: meta
- with:
- images: |
- tootsuite/mastodon
- ghcr.io/mastodon/mastodon
- flavor: |
- latest=auto
- tags: |
- type=edge,branch=main
- type=pep440,pattern={{raw}}
- type=pep440,pattern=v{{major}}.{{minor}}
- type=ref,event=pr
-
- - uses: docker/build-push-action@v4
- with:
- context: .
- platforms: linux/amd64,linux/arm64
- provenance: false
- builder: ${{ steps.buildx.outputs.name }}
- push: ${{ github.repository == 'mastodon/mastodon' && github.event_name != 'pull_request' }}
- tags: ${{ steps.meta.outputs.tags }}
- labels: ${{ steps.meta.outputs.labels }}
- cache-from: type=gha
- cache-to: type=gha,mode=max
diff --git a/.github/workflows/build-releases.yml b/.github/workflows/build-releases.yml
new file mode 100644
index 000000000..72a12f4df
--- /dev/null
+++ b/.github/workflows/build-releases.yml
@@ -0,0 +1,25 @@
+name: Build container release images
+on:
+ push:
+ tags:
+ - '*'
+
+permissions:
+ contents: read
+ packages: write
+
+jobs:
+ build-image:
+ uses: ./.github/workflows/build-container-image.yml
+ with:
+ platforms: linux/amd64,linux/arm64
+ use_native_arm64_builder: true
+ push_to_images: |
+ tootsuite/mastodon
+ ghcr.io/mastodon/mastodon
+ flavor: |
+ latest=false
+ tags: |
+ type=pep440,pattern={{raw}}
+ type=pep440,pattern=v{{major}}.{{minor}}
+ secrets: inherit
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a6331d4c0..583704e50 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,6 +3,51 @@ Changelog
All notable changes to this project will be documented in this file.
+## End of life notice
+
+**The 4.0.x branch will not receive any update after 2023-10-31.**
+This means that no security fix will be made available for this branch after this date, and you will need to update to a more recent version (such as the 4.1.x branch) to receive security fixes.
+
+## [4.0.9] - 2023-09-05
+
+### Changed
+
+- Change remote report processing to accept reports with long comments, but truncate them ([ThisIsMissEm](https://github.com/mastodon/mastodon/pull/25028))
+
+### Fixed
+
+- **Fix blocking subdomains of an already-blocked domain** ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/26392))
+- Fix `/api/v1/timelines/tag/:hashtag` allowing for unauthenticated access when public preview is disabled ([danielmbrasil](https://github.com/mastodon/mastodon/pull/26237))
+- Fix inefficiencies in `PlainTextFormatter` ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/26727))
+
+## [4.0.8] - 2023-07-31
+
+### Fixed
+
+- Fix memory leak in streaming server ([ThisIsMissEm](https://github.com/mastodon/mastodon/pull/26228))
+- Fix wrong filters sometimes applying in streaming ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/26159), [ThisIsMissEm](https://github.com/mastodon/mastodon/pull/26213), [renchap](https://github.com/mastodon/mastodon/pull/26233))
+- Fix incorrect connect timeout in outgoing requests ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/26116))
+
+## [4.0.7] - 2023-07-21
+
+### Added
+
+- Add check preventing Sidekiq workers from running with Makara configured ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/25850))
+
+### Changed
+
+- Change request timeout handling to use a longer deadline ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/26055))
+
+### Fixed
+
+- Fix moderation interface for remote instances with a .zip TLD ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/25886))
+- Fix remote accounts being possibly persisted to database with incomplete protocol values ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/25886))
+- Fix trending publishers table not rendering correctly on narrow screens ([vmstan](https://github.com/mastodon/mastodon/pull/25945))
+
+### Security
+
+- Fix CSP headers being unintentionally wide ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/26105))
+
## [4.0.6] - 2023-07-07
### Fixed
diff --git a/Dockerfile b/Dockerfile
index d560f1888..172e1cd2c 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -46,7 +46,7 @@ RUN apt-get update && \
ENV PATH="${PATH}:/opt/ruby/bin:/opt/node/bin"
-RUN npm install -g npm@latest && \
+RUN npm install -g npm@9 && \
npm install -g yarn && \
gem install bundler && \
apt-get update && \
diff --git a/SECURITY.md b/SECURITY.md
index ccc7c1034..ebb05ba2c 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -10,8 +10,9 @@ A "vulnerability in Mastodon" is a vulnerability in the code distributed through
## Supported Versions
-| Version | Supported |
-| ------- | ----------|
-| 4.0.x | Yes |
-| 3.5.x | Yes |
-| < 3.5 | No |
+| Version | Supported |
+| ------- | ------------------ |
+| 4.1.x | Yes |
+| 4.0.x | Until 2023-10-31 |
+| 3.5.x | Until 2023-12-31 |
+| < 3.5 | No |
diff --git a/app/controllers/admin/domain_blocks_controller.rb b/app/controllers/admin/domain_blocks_controller.rb
index 16defc1ea..d1a9ea601 100644
--- a/app/controllers/admin/domain_blocks_controller.rb
+++ b/app/controllers/admin/domain_blocks_controller.rb
@@ -25,7 +25,7 @@ module Admin
@domain_block.errors.delete(:domain)
render :new
else
- if existing_domain_block.present?
+ if existing_domain_block.present? && existing_domain_block.domain == TagManager.instance.normalize_domain(@domain_block.domain.strip)
@domain_block = existing_domain_block
@domain_block.update(resource_params)
end
diff --git a/app/controllers/api/v1/timelines/tag_controller.rb b/app/controllers/api/v1/timelines/tag_controller.rb
index 64a1db58d..3f41eb688 100644
--- a/app/controllers/api/v1/timelines/tag_controller.rb
+++ b/app/controllers/api/v1/timelines/tag_controller.rb
@@ -1,6 +1,7 @@
# frozen_string_literal: true
class Api::V1::Timelines::TagController < Api::BaseController
+ before_action -> { doorkeeper_authorize! :read, :'read:statuses' }, only: :show, if: :require_auth?
before_action :load_tag
after_action :insert_pagination_headers, unless: -> { @statuses.empty? }
@@ -11,6 +12,10 @@ class Api::V1::Timelines::TagController < Api::BaseController
private
+ def require_auth?
+ !Setting.timeline_preview
+ end
+
def load_tag
@tag = Tag.find_normalized(params[:id])
end
diff --git a/app/lib/activitypub/activity/flag.rb b/app/lib/activitypub/activity/flag.rb
index b0443849a..7539bda42 100644
--- a/app/lib/activitypub/activity/flag.rb
+++ b/app/lib/activitypub/activity/flag.rb
@@ -16,7 +16,7 @@ class ActivityPub::Activity::Flag < ActivityPub::Activity
@account,
target_account,
status_ids: target_statuses.nil? ? [] : target_statuses.map(&:id),
- comment: @json['content'] || '',
+ comment: report_comment,
uri: report_uri
)
end
@@ -35,4 +35,8 @@ class ActivityPub::Activity::Flag < ActivityPub::Activity
def report_uri
@json['id'] unless @json['id'].nil? || invalid_origin?(@json['id'])
end
+
+ def report_comment
+ (@json['content'] || '')[0...5000]
+ end
end
diff --git a/app/lib/activitypub/tag_manager.rb b/app/lib/activitypub/tag_manager.rb
index 3d6b28ef5..e05c06522 100644
--- a/app/lib/activitypub/tag_manager.rb
+++ b/app/lib/activitypub/tag_manager.rb
@@ -27,6 +27,8 @@ class ActivityPub::TagManager
when :note, :comment, :activity
return activity_account_status_url(target.account, target) if target.reblog?
short_account_status_url(target.account, target)
+ when :flag
+ target.uri
end
end
@@ -41,6 +43,8 @@ class ActivityPub::TagManager
account_status_url(target.account, target)
when :emoji
emoji_url(target)
+ when :flag
+ target.uri
end
end
diff --git a/app/lib/plain_text_formatter.rb b/app/lib/plain_text_formatter.rb
index 6fa2bc5d2..d1ff6808b 100644
--- a/app/lib/plain_text_formatter.rb
+++ b/app/lib/plain_text_formatter.rb
@@ -1,9 +1,7 @@
# frozen_string_literal: true
class PlainTextFormatter
- include ActionView::Helpers::TextHelper
-
- NEWLINE_TAGS_RE = /(
|
|<\/p>)+/.freeze
+ NEWLINE_TAGS_RE = %r{(
|
|
)+}
attr_reader :text, :local
@@ -18,7 +16,10 @@ class PlainTextFormatter
if local?
text
else
- html_entities.decode(strip_tags(insert_newlines)).chomp
+ node = Nokogiri::HTML.fragment(insert_newlines)
+ # Elements that are entirely removed with our Sanitize config
+ node.xpath('.//iframe|.//math|.//noembed|.//noframes|.//noscript|.//plaintext|.//script|.//style|.//svg|.//xmp').remove
+ node.text.chomp
end
end
@@ -27,8 +28,4 @@ class PlainTextFormatter
def insert_newlines
text.gsub(NEWLINE_TAGS_RE) { |match| "#{match}\n" }
end
-
- def html_entities
- HTMLEntities.new
- end
end
diff --git a/app/lib/request.rb b/app/lib/request.rb
index a23100ba9..442383ada 100644
--- a/app/lib/request.rb
+++ b/app/lib/request.rb
@@ -4,14 +4,22 @@ require 'ipaddr'
require 'socket'
require 'resolv'
-# Monkey-patch the HTTP.rb timeout class to avoid using a timeout block
+# Use our own timeout class to avoid using HTTP.rb's timeout block
# around the Socket#open method, since we use our own timeout blocks inside
# that method
#
# Also changes how the read timeout behaves so that it is cumulative (closer
# to HTTP::Timeout::Global, but still having distinct timeouts for other
# operation types)
-class HTTP::Timeout::PerOperation
+class PerOperationWithDeadline < HTTP::Timeout::PerOperation
+ READ_DEADLINE = 30
+
+ def initialize(*args)
+ super
+
+ @read_deadline = options.fetch(:read_deadline, READ_DEADLINE)
+ end
+
def connect(socket_class, host, port, nodelay = false)
@socket = socket_class.open(host, port)
@socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1) if nodelay
@@ -24,7 +32,7 @@ class HTTP::Timeout::PerOperation
# Read data from the socket
def readpartial(size, buffer = nil)
- @deadline ||= Process.clock_gettime(Process::CLOCK_MONOTONIC) + @read_timeout
+ @deadline ||= Process.clock_gettime(Process::CLOCK_MONOTONIC) + @read_deadline
timeout = false
loop do
@@ -33,7 +41,8 @@ class HTTP::Timeout::PerOperation
return :eof if result.nil?
remaining_time = @deadline - Process.clock_gettime(Process::CLOCK_MONOTONIC)
- raise HTTP::TimeoutError, "Read timed out after #{@read_timeout} seconds" if timeout || remaining_time <= 0
+ raise HTTP::TimeoutError, "Read timed out after #{@read_timeout} seconds" if timeout
+ raise HTTP::TimeoutError, "Read timed out after a total of #{@read_deadline} seconds" if remaining_time <= 0
return result if result != :wait_readable
# marking the socket for timeout. Why is this not being raised immediately?
@@ -46,7 +55,7 @@ class HTTP::Timeout::PerOperation
# timeout. Else, the first timeout was a proper timeout.
# This hack has to be done because io/wait#wait_readable doesn't provide a value for when
# the socket is closed by the server, and HTTP::Parser doesn't provide the limit for the chunks.
- timeout = true unless @socket.to_io.wait_readable(remaining_time)
+ timeout = true unless @socket.to_io.wait_readable([remaining_time, @read_timeout].min)
end
end
end
@@ -57,7 +66,7 @@ class Request
# We enforce a 5s timeout on DNS resolving, 5s timeout on socket opening
# and 5s timeout on the TLS handshake, meaning the worst case should take
# about 15s in total
- TIMEOUT = { connect: 5, read: 10, write: 10 }.freeze
+ TIMEOUT = { connect_timeout: 5, read_timeout: 10, write_timeout: 10, read_deadline: 30 }.freeze
include RoutingHelper
@@ -68,6 +77,7 @@ class Request
@url = Addressable::URI.parse(url).normalize
@http_client = options.delete(:http_client)
@options = options.merge(socket_class: use_proxy? ? ProxySocket : Socket)
+ @options = @options.merge(timeout_class: PerOperationWithDeadline, timeout_options: TIMEOUT)
@options = @options.merge(proxy_url) if use_proxy?
@headers = {}
@@ -128,7 +138,7 @@ class Request
end
def http_client
- HTTP.use(:auto_inflate).timeout(TIMEOUT.dup).follow(max_hops: 3)
+ HTTP.use(:auto_inflate).follow(max_hops: 3)
end
end
@@ -268,11 +278,11 @@ class Request
end
until socks.empty?
- _, available_socks, = IO.select(nil, socks, nil, Request::TIMEOUT[:connect])
+ _, available_socks, = IO.select(nil, socks, nil, Request::TIMEOUT[:connect_timeout])
if available_socks.nil?
socks.each(&:close)
- raise HTTP::TimeoutError, "Connect timed out after #{Request::TIMEOUT[:connect]} seconds"
+ raise HTTP::TimeoutError, "Connect timed out after #{Request::TIMEOUT[:connect_timeout]} seconds"
end
available_socks.each do |sock|
diff --git a/app/models/report.rb b/app/models/report.rb
index 525d22ad5..3ae5c10dd 100644
--- a/app/models/report.rb
+++ b/app/models/report.rb
@@ -39,7 +39,10 @@ class Report < ApplicationRecord
scope :resolved, -> { where.not(action_taken_at: nil) }
scope :with_accounts, -> { includes([:account, :target_account, :action_taken_by_account, :assigned_account].index_with({ user: [:invite_request, :invite] })) }
- validates :comment, length: { maximum: 1_000 }
+ # A report is considered local if the reporter is local
+ delegate :local?, to: :account
+
+ validates :comment, length: { maximum: 1_000 }, if: :local?
validates :rule_ids, absence: true, unless: :violation?
validate :validate_rule_ids
@@ -50,10 +53,6 @@ class Report < ApplicationRecord
violation: 2_000,
}
- def local?
- false # Force uri_for to use uri attribute
- end
-
before_validation :set_uri, only: :create
after_create_commit :trigger_webhooks
diff --git a/app/services/activitypub/process_account_service.rb b/app/services/activitypub/process_account_service.rb
index 99bcb3835..16731c514 100644
--- a/app/services/activitypub/process_account_service.rb
+++ b/app/services/activitypub/process_account_service.rb
@@ -59,6 +59,9 @@ class ActivityPub::ProcessAccountService < BaseService
@account.suspended_at = domain_block.created_at if auto_suspend?
@account.suspension_origin = :local if auto_suspend?
@account.silenced_at = domain_block.created_at if auto_silence?
+
+ set_immediate_protocol_attributes!
+
@account.save
end
diff --git a/app/views/admin/trends/links/preview_card_providers/index.html.haml b/app/views/admin/trends/links/preview_card_providers/index.html.haml
index c3648c35e..025270c12 100644
--- a/app/views/admin/trends/links/preview_card_providers/index.html.haml
+++ b/app/views/admin/trends/links/preview_card_providers/index.html.haml
@@ -29,7 +29,7 @@
- Trends::PreviewCardProviderFilter::KEYS.each do |key|
= hidden_field_tag key, params[key] if params[key].present?
- .batch-table.optional
+ .batch-table
.batch-table__toolbar
%label.batch-table__toolbar__select.batch-checkbox-all
= check_box_tag :batch_checkbox_all, nil, false
diff --git a/config/initializers/content_security_policy.rb b/config/initializers/content_security_policy.rb
index 5b3a68332..ab25270e8 100644
--- a/config/initializers/content_security_policy.rb
+++ b/config/initializers/content_security_policy.rb
@@ -3,7 +3,7 @@
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy
def host_to_url(str)
- "http#{Rails.configuration.x.use_https ? 's' : ''}://#{str}".split('/').first if str.present?
+ "http#{Rails.configuration.x.use_https ? 's' : ''}://#{str.split('/').first}" if str.present?
end
base_host = Rails.configuration.x.web_domain
diff --git a/config/initializers/sidekiq.rb b/config/initializers/sidekiq.rb
index 9d2abf074..b847e6546 100644
--- a/config/initializers/sidekiq.rb
+++ b/config/initializers/sidekiq.rb
@@ -3,6 +3,11 @@
require_relative '../../lib/mastodon/sidekiq_middleware'
Sidekiq.configure_server do |config|
+ if Rails.configuration.database_configuration.dig('production', 'adapter') == 'postgresql_makara'
+ STDERR.puts 'ERROR: Database replication is not currently supported in Sidekiq workers. Check your configuration.'
+ exit 1
+ end
+
config.redis = REDIS_SIDEKIQ_PARAMS
config.server_middleware do |chain|
diff --git a/config/routes.rb b/config/routes.rb
index c11b78ba2..7aec1655a 100644
--- a/config/routes.rb
+++ b/config/routes.rb
@@ -273,7 +273,7 @@ Rails.application.routes.draw do
end
end
- resources :instances, only: [:index, :show, :destroy], constraints: { id: /[^\/]+/ } do
+ resources :instances, only: [:index, :show, :destroy], constraints: { id: /[^\/]+/ }, format: 'html' do
member do
post :clear_delivery_errors
post :restart_delivery
diff --git a/docker-compose.yml b/docker-compose.yml
index f603c2f7e..2efd47d97 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -56,7 +56,7 @@ services:
web:
build: .
- image: ghcr.io/mastodon/mastodon
+ image: ghcr.io/mastodon/mastodon:v4.0.9
restart: always
env_file: .env.production
command: bash -c "rm -f /mastodon/tmp/pids/server.pid; bundle exec rails s -p 3000"
@@ -77,7 +77,7 @@ services:
streaming:
build: .
- image: ghcr.io/mastodon/mastodon
+ image: ghcr.io/mastodon/mastodon:v4.0.9
restart: always
env_file: .env.production
command: node ./streaming
@@ -95,7 +95,7 @@ services:
sidekiq:
build: .
- image: ghcr.io/mastodon/mastodon
+ image: ghcr.io/mastodon/mastodon:v4.0.9
restart: always
env_file: .env.production
command: bundle exec sidekiq
diff --git a/lib/mastodon/version.rb b/lib/mastodon/version.rb
index 04f529763..8e7fc0b39 100644
--- a/lib/mastodon/version.rb
+++ b/lib/mastodon/version.rb
@@ -13,7 +13,7 @@ module Mastodon
end
def patch
- 6
+ 9
end
def flags
diff --git a/spec/controllers/api/v1/timelines/tag_controller_spec.rb b/spec/controllers/api/v1/timelines/tag_controller_spec.rb
index 718911083..1c60798fc 100644
--- a/spec/controllers/api/v1/timelines/tag_controller_spec.rb
+++ b/spec/controllers/api/v1/timelines/tag_controller_spec.rb
@@ -5,36 +5,66 @@ require 'rails_helper'
describe Api::V1::Timelines::TagController do
render_views
- let(:user) { Fabricate(:user) }
+ let(:user) { Fabricate(:user) }
+ let(:token) { Fabricate(:accessible_access_token, resource_owner_id: user.id, scopes: 'read:statuses') }
before do
allow(controller).to receive(:doorkeeper_token) { token }
end
- context 'with a user context' do
- let(:token) { Fabricate(:accessible_access_token, resource_owner_id: user.id) }
+ describe 'GET #show' do
+ subject do
+ get :show, params: { id: 'test' }
+ end
- describe 'GET #show' do
- before do
- PostStatusService.new.call(user.account, text: 'It is a #test')
+ before do
+ PostStatusService.new.call(user.account, text: 'It is a #test')
+ end
+
+ context 'when the instance allows public preview' do
+ context 'when the user is not authenticated' do
+ let(:token) { nil }
+
+ it 'returns http success', :aggregate_failures do
+ subject
+
+ expect(response).to have_http_status(200)
+ expect(response.headers['Link'].links.size).to eq(2)
+ end
end
- it 'returns http success' do
- get :show, params: { id: 'test' }
- expect(response).to have_http_status(200)
- expect(response.headers['Link'].links.size).to eq(2)
+ context 'when the user is authenticated' do
+ it 'returns http success', :aggregate_failures do
+ subject
+
+ expect(response).to have_http_status(200)
+ expect(response.headers['Link'].links.size).to eq(2)
+ end
end
end
- end
- context 'without a user context' do
- let(:token) { Fabricate(:accessible_access_token, resource_owner_id: nil) }
+ context 'when the instance does not allow public preview' do
+ before do
+ Form::AdminSettings.new(timeline_preview: false).save
+ end
- describe 'GET #show' do
- it 'returns http success' do
- get :show, params: { id: 'test' }
- expect(response).to have_http_status(200)
- expect(response.headers['Link']).to be_nil
+ context 'when the user is not authenticated' do
+ let(:token) { nil }
+
+ it 'returns http unauthorized' do
+ subject
+
+ expect(response).to have_http_status(401)
+ end
+ end
+
+ context 'when the user is authenticated' do
+ it 'returns http success', :aggregate_failures do
+ subject
+
+ expect(response).to have_http_status(200)
+ expect(response.headers['Link'].links.size).to eq(2)
+ end
end
end
end
diff --git a/spec/lib/activitypub/activity/flag_spec.rb b/spec/lib/activitypub/activity/flag_spec.rb
index 2f2d13876..6d7a8a7ec 100644
--- a/spec/lib/activitypub/activity/flag_spec.rb
+++ b/spec/lib/activitypub/activity/flag_spec.rb
@@ -37,6 +37,37 @@ RSpec.describe ActivityPub::Activity::Flag do
end
end
+ context 'when the report comment is excessively long' do
+ subject do
+ described_class.new({
+ '@context': 'https://www.w3.org/ns/activitystreams',
+ id: flag_id,
+ type: 'Flag',
+ content: long_comment,
+ actor: ActivityPub::TagManager.instance.uri_for(sender),
+ object: [
+ ActivityPub::TagManager.instance.uri_for(flagged),
+ ActivityPub::TagManager.instance.uri_for(status),
+ ],
+ }.with_indifferent_access, sender)
+ end
+
+ let(:long_comment) { Faker::Lorem.characters(number: 6000) }
+
+ before do
+ subject.perform
+ end
+
+ it 'creates a report but with a truncated comment' do
+ report = Report.find_by(account: sender, target_account: flagged)
+
+ expect(report).to_not be_nil
+ expect(report.comment.length).to eq 5000
+ expect(report.comment).to eq long_comment[0...5000]
+ expect(report.status_ids).to eq [status.id]
+ end
+ end
+
context 'when the reported status is private and should not be visible to the remote server' do
let(:status) { Fabricate(:status, account: flagged, uri: 'foobar', visibility: :private) }
diff --git a/spec/models/report_spec.rb b/spec/models/report_spec.rb
index 874be4132..c485a4a3c 100644
--- a/spec/models/report_spec.rb
+++ b/spec/models/report_spec.rb
@@ -125,10 +125,17 @@ describe Report do
expect(report).to be_valid
end
- it 'is invalid if comment is longer than 1000 characters' do
+ let(:remote_account) { Fabricate(:account, domain: 'example.com', protocol: :activitypub, inbox_url: 'http://example.com/inbox') }
+
+ it 'is invalid if comment is longer than 1000 characters only if reporter is local' do
report = Fabricate.build(:report, comment: Faker::Lorem.characters(number: 1001))
- report.valid?
+ expect(report.valid?).to be false
expect(report).to model_have_error_on_field(:comment)
end
+
+ it 'is valid if comment is longer than 1000 characters and reporter is not local' do
+ report = Fabricate.build(:report, account: remote_account, comment: Faker::Lorem.characters(number: 1001))
+ expect(report.valid?).to be true
+ end
end
end
diff --git a/spec/requests/content_security_policy_spec.rb b/spec/requests/content_security_policy_spec.rb
new file mode 100644
index 000000000..91158fe59
--- /dev/null
+++ b/spec/requests/content_security_policy_spec.rb
@@ -0,0 +1,26 @@
+# frozen_string_literal: true
+
+require 'rails_helper'
+
+describe 'Content-Security-Policy' do
+ it 'sets the expected CSP headers' do
+ allow(SecureRandom).to receive(:base64).with(16).and_return('ZbA+JmE7+bK8F5qvADZHuQ==')
+
+ get '/'
+ expect(response.headers['Content-Security-Policy'].split(';').map(&:strip)).to contain_exactly(
+ "base-uri 'none'",
+ "default-src 'none'",
+ "frame-ancestors 'none'",
+ "font-src 'self' https://cb6e6126.ngrok.io",
+ "img-src 'self' https: data: blob: https://cb6e6126.ngrok.io",
+ "style-src 'self' https://cb6e6126.ngrok.io 'nonce-ZbA+JmE7+bK8F5qvADZHuQ=='",
+ "media-src 'self' https: data: https://cb6e6126.ngrok.io",
+ "frame-src 'self' https:",
+ "manifest-src 'self' https://cb6e6126.ngrok.io",
+ "child-src 'self' blob: https://cb6e6126.ngrok.io",
+ "worker-src 'self' blob: https://cb6e6126.ngrok.io",
+ "connect-src 'self' data: blob: https://cb6e6126.ngrok.io https://cb6e6126.ngrok.io ws://localhost:4000",
+ "script-src 'self' https://cb6e6126.ngrok.io 'wasm-unsafe-eval'"
+ )
+ end
+end
diff --git a/spec/services/report_service_spec.rb b/spec/services/report_service_spec.rb
index 02bc42ac1..1737a05ae 100644
--- a/spec/services/report_service_spec.rb
+++ b/spec/services/report_service_spec.rb
@@ -4,6 +4,14 @@ RSpec.describe ReportService, type: :service do
subject { described_class.new }
let(:source_account) { Fabricate(:account) }
+ let(:target_account) { Fabricate(:account) }
+
+ context 'with a local account' do
+ it 'has a uri' do
+ report = subject.call(source_account, target_account)
+ expect(report.uri).to_not be_nil
+ end
+ end
context 'for a remote account' do
let(:remote_account) { Fabricate(:account, domain: 'example.com', protocol: :activitypub, inbox_url: 'http://example.com/inbox') }
diff --git a/streaming/index.js b/streaming/index.js
index 663b6f18f..6e2efcefb 100644
--- a/streaming/index.js
+++ b/streaming/index.js
@@ -228,9 +228,15 @@ const startWorker = async (workerId) => {
callbacks.forEach(callback => callback(json));
};
+ /**
+ * @callback SubscriptionListener
+ * @param {ReturnType} json of the message
+ * @returns void
+ */
+
/**
* @param {string} channel
- * @param {function(string): void} callback
+ * @param {SubscriptionListener} callback
*/
const subscribe = (channel, callback) => {
log.silly(`Adding listener for ${channel}`);
@@ -247,7 +253,7 @@ const startWorker = async (workerId) => {
/**
* @param {string} channel
- * @param {function(Object): void} callback
+ * @param {SubscriptionListener} callback
*/
const unsubscribe = (channel, callback) => {
log.silly(`Removing listener for ${channel}`);
@@ -625,51 +631,66 @@ const startWorker = async (workerId) => {
* @param {string[]} ids
* @param {any} req
* @param {function(string, string): void} output
- * @param {function(string[], function(string): void): void} attachCloseHandler
+ * @param {undefined | function(string[], SubscriptionListener): void} attachCloseHandler
* @param {boolean=} needsFiltering
- * @returns {function(object): void}
+ * @returns {SubscriptionListener}
*/
const streamFrom = (ids, req, output, attachCloseHandler, needsFiltering = false) => {
const accountId = req.accountId || req.remoteAddress;
log.verbose(req.requestId, `Starting stream from ${ids.join(', ')} for ${accountId}`);
- // Currently message is of type string, soon it'll be Record
+ const transmit = (event, payload) => {
+ // TODO: Replace "string"-based delete payloads with object payloads:
+ const encodedPayload = typeof payload === 'object' ? JSON.stringify(payload) : payload;
+
+ log.silly(req.requestId, `Transmitting for ${accountId}: ${event} ${encodedPayload}`);
+ output(event, encodedPayload);
+ };
+
+ // The listener used to process each message off the redis subscription,
+ // message here is an object with an `event` and `payload` property. Some
+ // events also include a queued_at value, but this is being removed shortly.
+ /** @type {SubscriptionListener} */
const listener = message => {
- const { event, payload, queued_at } = message;
+ const { event, payload } = message;
- const transmit = () => {
- const now = new Date().getTime();
- const delta = now - queued_at;
- const encodedPayload = typeof payload === 'object' ? JSON.stringify(payload) : payload;
-
- log.silly(req.requestId, `Transmitting for ${accountId}: ${event} ${encodedPayload} Delay: ${delta}ms`);
- output(event, encodedPayload);
- };
-
- // Only messages that may require filtering are statuses, since notifications
- // are already personalized and deletes do not matter
- if (!needsFiltering || event !== 'update') {
- transmit();
+ // Streaming only needs to apply filtering to some channels and only to
+ // some events. This is because majority of the filtering happens on the
+ // Ruby on Rails side when producing the event for streaming.
+ //
+ // The only events that require filtering from the streaming server are
+ // `update` and `status.update`, all other events are transmitted to the
+ // client as soon as they're received (pass-through).
+ //
+ // The channels that need filtering are determined in the function
+ // `channelNameToIds` defined below:
+ if (!needsFiltering || (event !== 'update' && event !== 'status.update')) {
+ transmit(event, payload);
return;
}
- const unpackedPayload = payload;
- const targetAccountIds = [unpackedPayload.account.id].concat(unpackedPayload.mentions.map(item => item.id));
- const accountDomain = unpackedPayload.account.acct.split('@')[1];
+ // The rest of the logic from here on in this function is to handle
+ // filtering of statuses:
- if (Array.isArray(req.chosenLanguages) && unpackedPayload.language !== null && req.chosenLanguages.indexOf(unpackedPayload.language) === -1) {
- log.silly(req.requestId, `Message ${unpackedPayload.id} filtered by language (${unpackedPayload.language})`);
+ // Filter based on language:
+ if (Array.isArray(req.chosenLanguages) && payload.language !== null && req.chosenLanguages.indexOf(payload.language) === -1) {
+ log.silly(req.requestId, `Message ${payload.id} filtered by language (${payload.language})`);
return;
}
// When the account is not logged in, it is not necessary to confirm the block or mute
if (!req.accountId) {
- transmit();
+ transmit(event, payload);
return;
}
- pgPool.connect((err, client, done) => {
+ // Filter based on domain blocks, blocks, mutes, or custom filters:
+ const targetAccountIds = [payload.account.id].concat(payload.mentions.map(item => item.id));
+ const accountDomain = payload.account.acct.split('@')[1];
+
+ // TODO: Move this logic out of the message handling loop
+ pgPool.connect((err, client, releasePgConnection) => {
if (err) {
log.error(err);
return;
@@ -684,40 +705,57 @@ const startWorker = async (workerId) => {
SELECT 1
FROM mutes
WHERE account_id = $1
- AND target_account_id IN (${placeholders(targetAccountIds, 2)})`, [req.accountId, unpackedPayload.account.id].concat(targetAccountIds)),
+ AND target_account_id IN (${placeholders(targetAccountIds, 2)})`, [req.accountId, payload.account.id].concat(targetAccountIds)),
];
if (accountDomain) {
queries.push(client.query('SELECT 1 FROM account_domain_blocks WHERE account_id = $1 AND domain = $2', [req.accountId, accountDomain]));
}
- if (!unpackedPayload.filtered && !req.cachedFilters) {
+ if (!payload.filtered && !req.cachedFilters) {
queries.push(client.query('SELECT filter.id AS id, filter.phrase AS title, filter.context AS context, filter.expires_at AS expires_at, filter.action AS filter_action, keyword.keyword AS keyword, keyword.whole_word AS whole_word FROM custom_filter_keywords keyword JOIN custom_filters filter ON keyword.custom_filter_id = filter.id WHERE filter.account_id = $1 AND (filter.expires_at IS NULL OR filter.expires_at > NOW())', [req.accountId]));
}
Promise.all(queries).then(values => {
- done();
+ releasePgConnection();
+ // Handling blocks & mutes and domain blocks: If one of those applies,
+ // then we don't transmit the payload of the event to the client
if (values[0].rows.length > 0 || (accountDomain && values[1].rows.length > 0)) {
return;
}
- if (!unpackedPayload.filtered && !req.cachedFilters) {
+ // If the payload already contains the `filtered` property, it means
+ // that filtering has been applied on the ruby on rails side, as
+ // such, we don't need to construct or apply the filters in streaming:
+ if (Object.prototype.hasOwnProperty.call(payload, "filtered")) {
+ transmit(event, payload);
+ return;
+ }
+
+ // Handling for constructing the custom filters and caching them on the request
+ // TODO: Move this logic out of the message handling lifecycle
+ if (!req.cachedFilters) {
const filterRows = values[accountDomain ? 2 : 1].rows;
- req.cachedFilters = filterRows.reduce((cache, row) => {
- if (cache[row.id]) {
- cache[row.id].keywords.push([row.keyword, row.whole_word]);
+ req.cachedFilters = filterRows.reduce((cache, filter) => {
+ if (cache[filter.id]) {
+ cache[filter.id].keywords.push([filter.keyword, filter.whole_word]);
} else {
- cache[row.id] = {
- keywords: [[row.keyword, row.whole_word]],
- expires_at: row.expires_at,
- repr: {
- id: row.id,
- title: row.title,
- context: row.context,
- expires_at: row.expires_at,
- filter_action: ['warn', 'hide'][row.filter_action],
+ cache[filter.id] = {
+ keywords: [[filter.keyword, filter.whole_word]],
+ expires_at: filter.expires_at,
+ filter: {
+ id: filter.id,
+ title: filter.title,
+ context: filter.context,
+ expires_at: filter.expires_at,
+ // filter.filter_action is the value from the
+ // custom_filters.action database column, it is an integer
+ // representing a value in an enum defined by Ruby on Rails:
+ //
+ // enum { warn: 0, hide: 1 }
+ filter_action: ['warn', 'hide'][filter.filter_action],
},
};
}
@@ -725,6 +763,10 @@ const startWorker = async (workerId) => {
return cache;
}, {});
+ // Construct the regular expressions for the custom filters: This
+ // needs to be done in a separate loop as the database returns one
+ // filterRow per keyword, so we need all the keywords before
+ // constructing the regular expression
Object.keys(req.cachedFilters).forEach((key) => {
req.cachedFilters[key].regexp = new RegExp(req.cachedFilters[key].keywords.map(([keyword, whole_word]) => {
let expr = keyword.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');;
@@ -744,31 +786,58 @@ const startWorker = async (workerId) => {
});
}
- // Check filters
- if (req.cachedFilters && !unpackedPayload.filtered) {
- const status = unpackedPayload;
- const searchContent = ([status.spoiler_text || '', status.content].concat((status.poll && status.poll.options) ? status.poll.options.map(option => option.title) : [])).concat(status.media_attachments.map(att => att.description)).join('\n\n').replace(/
/g, '\n').replace(/<\/p>/g, '\n\n');
- const searchIndex = JSDOM.fragment(searchContent).textContent;
+ // Apply cachedFilters against the payload, constructing a
+ // `filter_results` array of FilterResult entities
+ if (req.cachedFilters) {
+ const status = payload;
+ // TODO: Calculate searchableContent in Ruby on Rails:
+ const searchableContent = ([status.spoiler_text || '', status.content].concat((status.poll && status.poll.options) ? status.poll.options.map(option => option.title) : [])).concat(status.media_attachments.map(att => att.description)).join('\n\n').replace(/
/g, '\n').replace(/<\/p>
/g, '\n\n');
+ const searchableTextContent = JSDOM.fragment(searchableContent).textContent;
const now = new Date();
- payload.filtered = [];
- Object.values(req.cachedFilters).forEach((cachedFilter) => {
- if ((cachedFilter.expires_at === null || cachedFilter.expires_at > now)) {
- const keyword_matches = searchIndex.match(cachedFilter.regexp);
- if (keyword_matches) {
- payload.filtered.push({
- filter: cachedFilter.repr,
- keyword_matches,
- });
- }
+ const filter_results = Object.values(req.cachedFilters).reduce((results, cachedFilter) => {
+ // Check the filter hasn't expired before applying:
+ if (cachedFilter.expires_at !== null && cachedFilter.expires_at < now) {
+ return results;
}
- });
- }
- transmit();
+ // Just in-case JSDOM fails to find textContent in searchableContent
+ if (!searchableTextContent) {
+ return results;
+ }
+
+ const keyword_matches = searchableTextContent.match(cachedFilter.regexp);
+ if (keyword_matches) {
+ // results is an Array of FilterResult; status_matches is always
+ // null as we only are only applying the keyword-based custom
+ // filters, not the status-based custom filters.
+ // https://docs.joinmastodon.org/entities/FilterResult/
+ results.push({
+ filter: cachedFilter.filter,
+ keyword_matches,
+ status_matches: null
+ });
+ }
+
+ return results;
+ }, []);
+
+ // Send the payload + the FilterResults as the `filtered` property
+ // to the streaming connection. To reach this code, the `event` must
+ // have been either `update` or `status.update`, meaning the
+ // `payload` is a Status entity, which has a `filtered` property:
+ //
+ // filtered: https://docs.joinmastodon.org/entities/Status/#filtered
+ transmit(event, {
+ ...payload,
+ filtered: filter_results
+ });
+ } else {
+ transmit(event, payload);
+ }
}).catch(err => {
+ releasePgConnection();
log.error(err);
- done();
});
});
};
@@ -777,7 +846,7 @@ const startWorker = async (workerId) => {
subscribe(`${redisPrefix}${id}`, listener);
});
- if (attachCloseHandler) {
+ if (typeof attachCloseHandler === 'function') {
attachCloseHandler(ids.map(id => `${redisPrefix}${id}`), listener);
}
@@ -814,12 +883,13 @@ const startWorker = async (workerId) => {
/**
* @param {any} req
* @param {function(): void} [closeHandler]
- * @return {function(string[]): void}
+ * @returns {function(string[], SubscriptionListener): void}
*/
- const streamHttpEnd = (req, closeHandler = undefined) => (ids) => {
+
+ const streamHttpEnd = (req, closeHandler = undefined) => (ids, listener) => {
req.on('close', () => {
ids.forEach(id => {
- unsubscribe(id);
+ unsubscribe(id, listener);
});
if (closeHandler) {
@@ -1058,7 +1128,7 @@ const startWorker = async (workerId) => {
* @typedef WebSocketSession
* @property {any} socket
* @property {any} request
- * @property {Object.} subscriptions
+ * @property {Object.} subscriptions
*/
/**