Merge tag 'v4.0.9'

This commit is contained in:
Mike 2023-09-19 13:42:45 +10:00
commit 6f3094cfbd
26 changed files with 482 additions and 187 deletions

View file

@ -0,0 +1,89 @@
on:
workflow_call:
inputs:
platforms:
required: true
type: string
use_native_arm64_builder:
type: boolean
push_to_images:
type: string
flavor:
type: string
tags:
type: string
labels:
type: string
jobs:
build-image:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: docker/setup-qemu-action@v2
if: contains(inputs.platforms, 'linux/arm64') && !inputs.use_native_arm64_builder
- uses: docker/setup-buildx-action@v2
id: buildx
if: ${{ !(inputs.use_native_arm64_builder && contains(inputs.platforms, 'linux/arm64')) }}
- name: Start a local Docker Builder
if: inputs.use_native_arm64_builder && contains(inputs.platforms, 'linux/arm64')
run: |
docker run --rm -d --name buildkitd -p 1234:1234 --privileged moby/buildkit:latest --addr tcp://0.0.0.0:1234
- uses: docker/setup-buildx-action@v2
id: buildx-native
if: inputs.use_native_arm64_builder && contains(inputs.platforms, 'linux/arm64')
with:
driver: remote
endpoint: tcp://localhost:1234
platforms: linux/amd64
append: |
- endpoint: tcp://${{ vars.DOCKER_BUILDER_HETZNER_ARM64_01_HOST }}:13865
platforms: linux/arm64
name: mastodon-docker-builder-arm64-01
driver-opts:
- servername=mastodon-docker-builder-arm64-01
env:
BUILDER_NODE_1_AUTH_TLS_CACERT: ${{ secrets.DOCKER_BUILDER_HETZNER_ARM64_01_CACERT }}
BUILDER_NODE_1_AUTH_TLS_CERT: ${{ secrets.DOCKER_BUILDER_HETZNER_ARM64_01_CERT }}
BUILDER_NODE_1_AUTH_TLS_KEY: ${{ secrets.DOCKER_BUILDER_HETZNER_ARM64_01_KEY }}
- name: Log in to Docker Hub
if: contains(inputs.push_to_images, 'tootsuite')
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Log in to the Github Container registry
if: contains(inputs.push_to_images, 'ghcr.io')
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- uses: docker/metadata-action@v4
id: meta
if: ${{ inputs.push_to_images != '' }}
with:
images: ${{ inputs.push_to_images }}
flavor: ${{ inputs.flavor }}
tags: ${{ inputs.tags }}
labels: ${{ inputs.labels }}
- uses: docker/build-push-action@v4
with:
context: .
platforms: ${{ inputs.platforms }}
provenance: false
builder: ${{ steps.buildx.outputs.name || steps.buildx-native.outputs.name }}
push: ${{ inputs.push_to_images != '' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max

View file

@ -1,64 +0,0 @@
name: Build container image
on:
workflow_dispatch:
push:
branches:
- 'main'
tags:
- '*'
pull_request:
paths:
- .github/workflows/build-image.yml
- Dockerfile
permissions:
contents: read
packages: write
jobs:
build-image:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: docker/setup-qemu-action@v2
- uses: docker/setup-buildx-action@v2
- name: Log in to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
if: github.repository == 'mastodon/mastodon' && github.event_name != 'pull_request'
- name: Log in to the Github Container registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
if: github.repository == 'mastodon/mastodon' && github.event_name != 'pull_request'
- uses: docker/metadata-action@v4
id: meta
with:
images: |
tootsuite/mastodon
ghcr.io/mastodon/mastodon
flavor: |
latest=auto
tags: |
type=edge,branch=main
type=pep440,pattern={{raw}}
type=pep440,pattern=v{{major}}.{{minor}}
type=ref,event=pr
- uses: docker/build-push-action@v4
with:
context: .
platforms: linux/amd64,linux/arm64
provenance: false
builder: ${{ steps.buildx.outputs.name }}
push: ${{ github.repository == 'mastodon/mastodon' && github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max

25
.github/workflows/build-releases.yml vendored Normal file
View file

@ -0,0 +1,25 @@
name: Build container release images
on:
push:
tags:
- '*'
permissions:
contents: read
packages: write
jobs:
build-image:
uses: ./.github/workflows/build-container-image.yml
with:
platforms: linux/amd64,linux/arm64
use_native_arm64_builder: true
push_to_images: |
tootsuite/mastodon
ghcr.io/mastodon/mastodon
flavor: |
latest=false
tags: |
type=pep440,pattern={{raw}}
type=pep440,pattern=v{{major}}.{{minor}}
secrets: inherit

View file

@ -3,6 +3,51 @@ Changelog
All notable changes to this project will be documented in this file. All notable changes to this project will be documented in this file.
## End of life notice
**The 4.0.x branch will not receive any update after 2023-10-31.**
This means that no security fix will be made available for this branch after this date, and you will need to update to a more recent version (such as the 4.1.x branch) to receive security fixes.
## [4.0.9] - 2023-09-05
### Changed
- Change remote report processing to accept reports with long comments, but truncate them ([ThisIsMissEm](https://github.com/mastodon/mastodon/pull/25028))
### Fixed
- **Fix blocking subdomains of an already-blocked domain** ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/26392))
- Fix `/api/v1/timelines/tag/:hashtag` allowing for unauthenticated access when public preview is disabled ([danielmbrasil](https://github.com/mastodon/mastodon/pull/26237))
- Fix inefficiencies in `PlainTextFormatter` ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/26727))
## [4.0.8] - 2023-07-31
### Fixed
- Fix memory leak in streaming server ([ThisIsMissEm](https://github.com/mastodon/mastodon/pull/26228))
- Fix wrong filters sometimes applying in streaming ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/26159), [ThisIsMissEm](https://github.com/mastodon/mastodon/pull/26213), [renchap](https://github.com/mastodon/mastodon/pull/26233))
- Fix incorrect connect timeout in outgoing requests ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/26116))
## [4.0.7] - 2023-07-21
### Added
- Add check preventing Sidekiq workers from running with Makara configured ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/25850))
### Changed
- Change request timeout handling to use a longer deadline ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/26055))
### Fixed
- Fix moderation interface for remote instances with a .zip TLD ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/25886))
- Fix remote accounts being possibly persisted to database with incomplete protocol values ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/25886))
- Fix trending publishers table not rendering correctly on narrow screens ([vmstan](https://github.com/mastodon/mastodon/pull/25945))
### Security
- Fix CSP headers being unintentionally wide ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/26105))
## [4.0.6] - 2023-07-07 ## [4.0.6] - 2023-07-07
### Fixed ### Fixed

View file

@ -46,7 +46,7 @@ RUN apt-get update && \
ENV PATH="${PATH}:/opt/ruby/bin:/opt/node/bin" ENV PATH="${PATH}:/opt/ruby/bin:/opt/node/bin"
RUN npm install -g npm@latest && \ RUN npm install -g npm@9 && \
npm install -g yarn && \ npm install -g yarn && \
gem install bundler && \ gem install bundler && \
apt-get update && \ apt-get update && \

View file

@ -11,7 +11,8 @@ A "vulnerability in Mastodon" is a vulnerability in the code distributed through
## Supported Versions ## Supported Versions
| Version | Supported | | Version | Supported |
| ------- | ----------| | ------- | ------------------ |
| 4.0.x | Yes | | 4.1.x | Yes |
| 3.5.x | Yes | | 4.0.x | Until 2023-10-31 |
| 3.5.x | Until 2023-12-31 |
| < 3.5 | No | | < 3.5 | No |

View file

@ -25,7 +25,7 @@ module Admin
@domain_block.errors.delete(:domain) @domain_block.errors.delete(:domain)
render :new render :new
else else
if existing_domain_block.present? if existing_domain_block.present? && existing_domain_block.domain == TagManager.instance.normalize_domain(@domain_block.domain.strip)
@domain_block = existing_domain_block @domain_block = existing_domain_block
@domain_block.update(resource_params) @domain_block.update(resource_params)
end end

View file

@ -1,6 +1,7 @@
# frozen_string_literal: true # frozen_string_literal: true
class Api::V1::Timelines::TagController < Api::BaseController class Api::V1::Timelines::TagController < Api::BaseController
before_action -> { doorkeeper_authorize! :read, :'read:statuses' }, only: :show, if: :require_auth?
before_action :load_tag before_action :load_tag
after_action :insert_pagination_headers, unless: -> { @statuses.empty? } after_action :insert_pagination_headers, unless: -> { @statuses.empty? }
@ -11,6 +12,10 @@ class Api::V1::Timelines::TagController < Api::BaseController
private private
def require_auth?
!Setting.timeline_preview
end
def load_tag def load_tag
@tag = Tag.find_normalized(params[:id]) @tag = Tag.find_normalized(params[:id])
end end

View file

@ -16,7 +16,7 @@ class ActivityPub::Activity::Flag < ActivityPub::Activity
@account, @account,
target_account, target_account,
status_ids: target_statuses.nil? ? [] : target_statuses.map(&:id), status_ids: target_statuses.nil? ? [] : target_statuses.map(&:id),
comment: @json['content'] || '', comment: report_comment,
uri: report_uri uri: report_uri
) )
end end
@ -35,4 +35,8 @@ class ActivityPub::Activity::Flag < ActivityPub::Activity
def report_uri def report_uri
@json['id'] unless @json['id'].nil? || invalid_origin?(@json['id']) @json['id'] unless @json['id'].nil? || invalid_origin?(@json['id'])
end end
def report_comment
(@json['content'] || '')[0...5000]
end
end end

View file

@ -27,6 +27,8 @@ class ActivityPub::TagManager
when :note, :comment, :activity when :note, :comment, :activity
return activity_account_status_url(target.account, target) if target.reblog? return activity_account_status_url(target.account, target) if target.reblog?
short_account_status_url(target.account, target) short_account_status_url(target.account, target)
when :flag
target.uri
end end
end end
@ -41,6 +43,8 @@ class ActivityPub::TagManager
account_status_url(target.account, target) account_status_url(target.account, target)
when :emoji when :emoji
emoji_url(target) emoji_url(target)
when :flag
target.uri
end end
end end

View file

@ -1,9 +1,7 @@
# frozen_string_literal: true # frozen_string_literal: true
class PlainTextFormatter class PlainTextFormatter
include ActionView::Helpers::TextHelper NEWLINE_TAGS_RE = %r{(<br />|<br>|</p>)+}
NEWLINE_TAGS_RE = /(<br \/>|<br>|<\/p>)+/.freeze
attr_reader :text, :local attr_reader :text, :local
@ -18,7 +16,10 @@ class PlainTextFormatter
if local? if local?
text text
else else
html_entities.decode(strip_tags(insert_newlines)).chomp node = Nokogiri::HTML.fragment(insert_newlines)
# Elements that are entirely removed with our Sanitize config
node.xpath('.//iframe|.//math|.//noembed|.//noframes|.//noscript|.//plaintext|.//script|.//style|.//svg|.//xmp').remove
node.text.chomp
end end
end end
@ -27,8 +28,4 @@ class PlainTextFormatter
def insert_newlines def insert_newlines
text.gsub(NEWLINE_TAGS_RE) { |match| "#{match}\n" } text.gsub(NEWLINE_TAGS_RE) { |match| "#{match}\n" }
end end
def html_entities
HTMLEntities.new
end
end end

View file

@ -4,14 +4,22 @@ require 'ipaddr'
require 'socket' require 'socket'
require 'resolv' require 'resolv'
# Monkey-patch the HTTP.rb timeout class to avoid using a timeout block # Use our own timeout class to avoid using HTTP.rb's timeout block
# around the Socket#open method, since we use our own timeout blocks inside # around the Socket#open method, since we use our own timeout blocks inside
# that method # that method
# #
# Also changes how the read timeout behaves so that it is cumulative (closer # Also changes how the read timeout behaves so that it is cumulative (closer
# to HTTP::Timeout::Global, but still having distinct timeouts for other # to HTTP::Timeout::Global, but still having distinct timeouts for other
# operation types) # operation types)
class HTTP::Timeout::PerOperation class PerOperationWithDeadline < HTTP::Timeout::PerOperation
READ_DEADLINE = 30
def initialize(*args)
super
@read_deadline = options.fetch(:read_deadline, READ_DEADLINE)
end
def connect(socket_class, host, port, nodelay = false) def connect(socket_class, host, port, nodelay = false)
@socket = socket_class.open(host, port) @socket = socket_class.open(host, port)
@socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1) if nodelay @socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1) if nodelay
@ -24,7 +32,7 @@ class HTTP::Timeout::PerOperation
# Read data from the socket # Read data from the socket
def readpartial(size, buffer = nil) def readpartial(size, buffer = nil)
@deadline ||= Process.clock_gettime(Process::CLOCK_MONOTONIC) + @read_timeout @deadline ||= Process.clock_gettime(Process::CLOCK_MONOTONIC) + @read_deadline
timeout = false timeout = false
loop do loop do
@ -33,7 +41,8 @@ class HTTP::Timeout::PerOperation
return :eof if result.nil? return :eof if result.nil?
remaining_time = @deadline - Process.clock_gettime(Process::CLOCK_MONOTONIC) remaining_time = @deadline - Process.clock_gettime(Process::CLOCK_MONOTONIC)
raise HTTP::TimeoutError, "Read timed out after #{@read_timeout} seconds" if timeout || remaining_time <= 0 raise HTTP::TimeoutError, "Read timed out after #{@read_timeout} seconds" if timeout
raise HTTP::TimeoutError, "Read timed out after a total of #{@read_deadline} seconds" if remaining_time <= 0
return result if result != :wait_readable return result if result != :wait_readable
# marking the socket for timeout. Why is this not being raised immediately? # marking the socket for timeout. Why is this not being raised immediately?
@ -46,7 +55,7 @@ class HTTP::Timeout::PerOperation
# timeout. Else, the first timeout was a proper timeout. # timeout. Else, the first timeout was a proper timeout.
# This hack has to be done because io/wait#wait_readable doesn't provide a value for when # This hack has to be done because io/wait#wait_readable doesn't provide a value for when
# the socket is closed by the server, and HTTP::Parser doesn't provide the limit for the chunks. # the socket is closed by the server, and HTTP::Parser doesn't provide the limit for the chunks.
timeout = true unless @socket.to_io.wait_readable(remaining_time) timeout = true unless @socket.to_io.wait_readable([remaining_time, @read_timeout].min)
end end
end end
end end
@ -57,7 +66,7 @@ class Request
# We enforce a 5s timeout on DNS resolving, 5s timeout on socket opening # We enforce a 5s timeout on DNS resolving, 5s timeout on socket opening
# and 5s timeout on the TLS handshake, meaning the worst case should take # and 5s timeout on the TLS handshake, meaning the worst case should take
# about 15s in total # about 15s in total
TIMEOUT = { connect: 5, read: 10, write: 10 }.freeze TIMEOUT = { connect_timeout: 5, read_timeout: 10, write_timeout: 10, read_deadline: 30 }.freeze
include RoutingHelper include RoutingHelper
@ -68,6 +77,7 @@ class Request
@url = Addressable::URI.parse(url).normalize @url = Addressable::URI.parse(url).normalize
@http_client = options.delete(:http_client) @http_client = options.delete(:http_client)
@options = options.merge(socket_class: use_proxy? ? ProxySocket : Socket) @options = options.merge(socket_class: use_proxy? ? ProxySocket : Socket)
@options = @options.merge(timeout_class: PerOperationWithDeadline, timeout_options: TIMEOUT)
@options = @options.merge(proxy_url) if use_proxy? @options = @options.merge(proxy_url) if use_proxy?
@headers = {} @headers = {}
@ -128,7 +138,7 @@ class Request
end end
def http_client def http_client
HTTP.use(:auto_inflate).timeout(TIMEOUT.dup).follow(max_hops: 3) HTTP.use(:auto_inflate).follow(max_hops: 3)
end end
end end
@ -268,11 +278,11 @@ class Request
end end
until socks.empty? until socks.empty?
_, available_socks, = IO.select(nil, socks, nil, Request::TIMEOUT[:connect]) _, available_socks, = IO.select(nil, socks, nil, Request::TIMEOUT[:connect_timeout])
if available_socks.nil? if available_socks.nil?
socks.each(&:close) socks.each(&:close)
raise HTTP::TimeoutError, "Connect timed out after #{Request::TIMEOUT[:connect]} seconds" raise HTTP::TimeoutError, "Connect timed out after #{Request::TIMEOUT[:connect_timeout]} seconds"
end end
available_socks.each do |sock| available_socks.each do |sock|

View file

@ -39,7 +39,10 @@ class Report < ApplicationRecord
scope :resolved, -> { where.not(action_taken_at: nil) } scope :resolved, -> { where.not(action_taken_at: nil) }
scope :with_accounts, -> { includes([:account, :target_account, :action_taken_by_account, :assigned_account].index_with({ user: [:invite_request, :invite] })) } scope :with_accounts, -> { includes([:account, :target_account, :action_taken_by_account, :assigned_account].index_with({ user: [:invite_request, :invite] })) }
validates :comment, length: { maximum: 1_000 } # A report is considered local if the reporter is local
delegate :local?, to: :account
validates :comment, length: { maximum: 1_000 }, if: :local?
validates :rule_ids, absence: true, unless: :violation? validates :rule_ids, absence: true, unless: :violation?
validate :validate_rule_ids validate :validate_rule_ids
@ -50,10 +53,6 @@ class Report < ApplicationRecord
violation: 2_000, violation: 2_000,
} }
def local?
false # Force uri_for to use uri attribute
end
before_validation :set_uri, only: :create before_validation :set_uri, only: :create
after_create_commit :trigger_webhooks after_create_commit :trigger_webhooks

View file

@ -59,6 +59,9 @@ class ActivityPub::ProcessAccountService < BaseService
@account.suspended_at = domain_block.created_at if auto_suspend? @account.suspended_at = domain_block.created_at if auto_suspend?
@account.suspension_origin = :local if auto_suspend? @account.suspension_origin = :local if auto_suspend?
@account.silenced_at = domain_block.created_at if auto_silence? @account.silenced_at = domain_block.created_at if auto_silence?
set_immediate_protocol_attributes!
@account.save @account.save
end end

View file

@ -29,7 +29,7 @@
- Trends::PreviewCardProviderFilter::KEYS.each do |key| - Trends::PreviewCardProviderFilter::KEYS.each do |key|
= hidden_field_tag key, params[key] if params[key].present? = hidden_field_tag key, params[key] if params[key].present?
.batch-table.optional .batch-table
.batch-table__toolbar .batch-table__toolbar
%label.batch-table__toolbar__select.batch-checkbox-all %label.batch-table__toolbar__select.batch-checkbox-all
= check_box_tag :batch_checkbox_all, nil, false = check_box_tag :batch_checkbox_all, nil, false

View file

@ -3,7 +3,7 @@
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy
def host_to_url(str) def host_to_url(str)
"http#{Rails.configuration.x.use_https ? 's' : ''}://#{str}".split('/').first if str.present? "http#{Rails.configuration.x.use_https ? 's' : ''}://#{str.split('/').first}" if str.present?
end end
base_host = Rails.configuration.x.web_domain base_host = Rails.configuration.x.web_domain

View file

@ -3,6 +3,11 @@
require_relative '../../lib/mastodon/sidekiq_middleware' require_relative '../../lib/mastodon/sidekiq_middleware'
Sidekiq.configure_server do |config| Sidekiq.configure_server do |config|
if Rails.configuration.database_configuration.dig('production', 'adapter') == 'postgresql_makara'
STDERR.puts 'ERROR: Database replication is not currently supported in Sidekiq workers. Check your configuration.'
exit 1
end
config.redis = REDIS_SIDEKIQ_PARAMS config.redis = REDIS_SIDEKIQ_PARAMS
config.server_middleware do |chain| config.server_middleware do |chain|

View file

@ -273,7 +273,7 @@ Rails.application.routes.draw do
end end
end end
resources :instances, only: [:index, :show, :destroy], constraints: { id: /[^\/]+/ } do resources :instances, only: [:index, :show, :destroy], constraints: { id: /[^\/]+/ }, format: 'html' do
member do member do
post :clear_delivery_errors post :clear_delivery_errors
post :restart_delivery post :restart_delivery

View file

@ -56,7 +56,7 @@ services:
web: web:
build: . build: .
image: ghcr.io/mastodon/mastodon image: ghcr.io/mastodon/mastodon:v4.0.9
restart: always restart: always
env_file: .env.production env_file: .env.production
command: bash -c "rm -f /mastodon/tmp/pids/server.pid; bundle exec rails s -p 3000" command: bash -c "rm -f /mastodon/tmp/pids/server.pid; bundle exec rails s -p 3000"
@ -77,7 +77,7 @@ services:
streaming: streaming:
build: . build: .
image: ghcr.io/mastodon/mastodon image: ghcr.io/mastodon/mastodon:v4.0.9
restart: always restart: always
env_file: .env.production env_file: .env.production
command: node ./streaming command: node ./streaming
@ -95,7 +95,7 @@ services:
sidekiq: sidekiq:
build: . build: .
image: ghcr.io/mastodon/mastodon image: ghcr.io/mastodon/mastodon:v4.0.9
restart: always restart: always
env_file: .env.production env_file: .env.production
command: bundle exec sidekiq command: bundle exec sidekiq

View file

@ -13,7 +13,7 @@ module Mastodon
end end
def patch def patch
6 9
end end
def flags def flags

View file

@ -6,35 +6,65 @@ describe Api::V1::Timelines::TagController do
render_views render_views
let(:user) { Fabricate(:user) } let(:user) { Fabricate(:user) }
let(:token) { Fabricate(:accessible_access_token, resource_owner_id: user.id, scopes: 'read:statuses') }
before do before do
allow(controller).to receive(:doorkeeper_token) { token } allow(controller).to receive(:doorkeeper_token) { token }
end end
context 'with a user context' do
let(:token) { Fabricate(:accessible_access_token, resource_owner_id: user.id) }
describe 'GET #show' do describe 'GET #show' do
subject do
get :show, params: { id: 'test' }
end
before do before do
PostStatusService.new.call(user.account, text: 'It is a #test') PostStatusService.new.call(user.account, text: 'It is a #test')
end end
it 'returns http success' do context 'when the instance allows public preview' do
get :show, params: { id: 'test' } context 'when the user is not authenticated' do
let(:token) { nil }
it 'returns http success', :aggregate_failures do
subject
expect(response).to have_http_status(200)
expect(response.headers['Link'].links.size).to eq(2)
end
end
context 'when the user is authenticated' do
it 'returns http success', :aggregate_failures do
subject
expect(response).to have_http_status(200) expect(response).to have_http_status(200)
expect(response.headers['Link'].links.size).to eq(2) expect(response.headers['Link'].links.size).to eq(2)
end end
end end
end end
context 'without a user context' do context 'when the instance does not allow public preview' do
let(:token) { Fabricate(:accessible_access_token, resource_owner_id: nil) } before do
Form::AdminSettings.new(timeline_preview: false).save
end
context 'when the user is not authenticated' do
let(:token) { nil }
it 'returns http unauthorized' do
subject
expect(response).to have_http_status(401)
end
end
context 'when the user is authenticated' do
it 'returns http success', :aggregate_failures do
subject
describe 'GET #show' do
it 'returns http success' do
get :show, params: { id: 'test' }
expect(response).to have_http_status(200) expect(response).to have_http_status(200)
expect(response.headers['Link']).to be_nil expect(response.headers['Link'].links.size).to eq(2)
end
end end
end end
end end

View file

@ -37,6 +37,37 @@ RSpec.describe ActivityPub::Activity::Flag do
end end
end end
context 'when the report comment is excessively long' do
subject do
described_class.new({
'@context': 'https://www.w3.org/ns/activitystreams',
id: flag_id,
type: 'Flag',
content: long_comment,
actor: ActivityPub::TagManager.instance.uri_for(sender),
object: [
ActivityPub::TagManager.instance.uri_for(flagged),
ActivityPub::TagManager.instance.uri_for(status),
],
}.with_indifferent_access, sender)
end
let(:long_comment) { Faker::Lorem.characters(number: 6000) }
before do
subject.perform
end
it 'creates a report but with a truncated comment' do
report = Report.find_by(account: sender, target_account: flagged)
expect(report).to_not be_nil
expect(report.comment.length).to eq 5000
expect(report.comment).to eq long_comment[0...5000]
expect(report.status_ids).to eq [status.id]
end
end
context 'when the reported status is private and should not be visible to the remote server' do context 'when the reported status is private and should not be visible to the remote server' do
let(:status) { Fabricate(:status, account: flagged, uri: 'foobar', visibility: :private) } let(:status) { Fabricate(:status, account: flagged, uri: 'foobar', visibility: :private) }

View file

@ -125,10 +125,17 @@ describe Report do
expect(report).to be_valid expect(report).to be_valid
end end
it 'is invalid if comment is longer than 1000 characters' do let(:remote_account) { Fabricate(:account, domain: 'example.com', protocol: :activitypub, inbox_url: 'http://example.com/inbox') }
it 'is invalid if comment is longer than 1000 characters only if reporter is local' do
report = Fabricate.build(:report, comment: Faker::Lorem.characters(number: 1001)) report = Fabricate.build(:report, comment: Faker::Lorem.characters(number: 1001))
report.valid? expect(report.valid?).to be false
expect(report).to model_have_error_on_field(:comment) expect(report).to model_have_error_on_field(:comment)
end end
it 'is valid if comment is longer than 1000 characters and reporter is not local' do
report = Fabricate.build(:report, account: remote_account, comment: Faker::Lorem.characters(number: 1001))
expect(report.valid?).to be true
end
end end
end end

View file

@ -0,0 +1,26 @@
# frozen_string_literal: true
require 'rails_helper'
describe 'Content-Security-Policy' do
it 'sets the expected CSP headers' do
allow(SecureRandom).to receive(:base64).with(16).and_return('ZbA+JmE7+bK8F5qvADZHuQ==')
get '/'
expect(response.headers['Content-Security-Policy'].split(';').map(&:strip)).to contain_exactly(
"base-uri 'none'",
"default-src 'none'",
"frame-ancestors 'none'",
"font-src 'self' https://cb6e6126.ngrok.io",
"img-src 'self' https: data: blob: https://cb6e6126.ngrok.io",
"style-src 'self' https://cb6e6126.ngrok.io 'nonce-ZbA+JmE7+bK8F5qvADZHuQ=='",
"media-src 'self' https: data: https://cb6e6126.ngrok.io",
"frame-src 'self' https:",
"manifest-src 'self' https://cb6e6126.ngrok.io",
"child-src 'self' blob: https://cb6e6126.ngrok.io",
"worker-src 'self' blob: https://cb6e6126.ngrok.io",
"connect-src 'self' data: blob: https://cb6e6126.ngrok.io https://cb6e6126.ngrok.io ws://localhost:4000",
"script-src 'self' https://cb6e6126.ngrok.io 'wasm-unsafe-eval'"
)
end
end

View file

@ -4,6 +4,14 @@ RSpec.describe ReportService, type: :service do
subject { described_class.new } subject { described_class.new }
let(:source_account) { Fabricate(:account) } let(:source_account) { Fabricate(:account) }
let(:target_account) { Fabricate(:account) }
context 'with a local account' do
it 'has a uri' do
report = subject.call(source_account, target_account)
expect(report.uri).to_not be_nil
end
end
context 'for a remote account' do context 'for a remote account' do
let(:remote_account) { Fabricate(:account, domain: 'example.com', protocol: :activitypub, inbox_url: 'http://example.com/inbox') } let(:remote_account) { Fabricate(:account, domain: 'example.com', protocol: :activitypub, inbox_url: 'http://example.com/inbox') }

View file

@ -228,9 +228,15 @@ const startWorker = async (workerId) => {
callbacks.forEach(callback => callback(json)); callbacks.forEach(callback => callback(json));
}; };
/**
* @callback SubscriptionListener
* @param {ReturnType<parseJSON>} json of the message
* @returns void
*/
/** /**
* @param {string} channel * @param {string} channel
* @param {function(string): void} callback * @param {SubscriptionListener} callback
*/ */
const subscribe = (channel, callback) => { const subscribe = (channel, callback) => {
log.silly(`Adding listener for ${channel}`); log.silly(`Adding listener for ${channel}`);
@ -247,7 +253,7 @@ const startWorker = async (workerId) => {
/** /**
* @param {string} channel * @param {string} channel
* @param {function(Object<string, any>): void} callback * @param {SubscriptionListener} callback
*/ */
const unsubscribe = (channel, callback) => { const unsubscribe = (channel, callback) => {
log.silly(`Removing listener for ${channel}`); log.silly(`Removing listener for ${channel}`);
@ -625,51 +631,66 @@ const startWorker = async (workerId) => {
* @param {string[]} ids * @param {string[]} ids
* @param {any} req * @param {any} req
* @param {function(string, string): void} output * @param {function(string, string): void} output
* @param {function(string[], function(string): void): void} attachCloseHandler * @param {undefined | function(string[], SubscriptionListener): void} attachCloseHandler
* @param {boolean=} needsFiltering * @param {boolean=} needsFiltering
* @returns {function(object): void} * @returns {SubscriptionListener}
*/ */
const streamFrom = (ids, req, output, attachCloseHandler, needsFiltering = false) => { const streamFrom = (ids, req, output, attachCloseHandler, needsFiltering = false) => {
const accountId = req.accountId || req.remoteAddress; const accountId = req.accountId || req.remoteAddress;
log.verbose(req.requestId, `Starting stream from ${ids.join(', ')} for ${accountId}`); log.verbose(req.requestId, `Starting stream from ${ids.join(', ')} for ${accountId}`);
// Currently message is of type string, soon it'll be Record<string, any> const transmit = (event, payload) => {
const listener = message => { // TODO: Replace "string"-based delete payloads with object payloads:
const { event, payload, queued_at } = message;
const transmit = () => {
const now = new Date().getTime();
const delta = now - queued_at;
const encodedPayload = typeof payload === 'object' ? JSON.stringify(payload) : payload; const encodedPayload = typeof payload === 'object' ? JSON.stringify(payload) : payload;
log.silly(req.requestId, `Transmitting for ${accountId}: ${event} ${encodedPayload} Delay: ${delta}ms`); log.silly(req.requestId, `Transmitting for ${accountId}: ${event} ${encodedPayload}`);
output(event, encodedPayload); output(event, encodedPayload);
}; };
// Only messages that may require filtering are statuses, since notifications // The listener used to process each message off the redis subscription,
// are already personalized and deletes do not matter // message here is an object with an `event` and `payload` property. Some
if (!needsFiltering || event !== 'update') { // events also include a queued_at value, but this is being removed shortly.
transmit(); /** @type {SubscriptionListener} */
const listener = message => {
const { event, payload } = message;
// Streaming only needs to apply filtering to some channels and only to
// some events. This is because majority of the filtering happens on the
// Ruby on Rails side when producing the event for streaming.
//
// The only events that require filtering from the streaming server are
// `update` and `status.update`, all other events are transmitted to the
// client as soon as they're received (pass-through).
//
// The channels that need filtering are determined in the function
// `channelNameToIds` defined below:
if (!needsFiltering || (event !== 'update' && event !== 'status.update')) {
transmit(event, payload);
return; return;
} }
const unpackedPayload = payload; // The rest of the logic from here on in this function is to handle
const targetAccountIds = [unpackedPayload.account.id].concat(unpackedPayload.mentions.map(item => item.id)); // filtering of statuses:
const accountDomain = unpackedPayload.account.acct.split('@')[1];
if (Array.isArray(req.chosenLanguages) && unpackedPayload.language !== null && req.chosenLanguages.indexOf(unpackedPayload.language) === -1) { // Filter based on language:
log.silly(req.requestId, `Message ${unpackedPayload.id} filtered by language (${unpackedPayload.language})`); if (Array.isArray(req.chosenLanguages) && payload.language !== null && req.chosenLanguages.indexOf(payload.language) === -1) {
log.silly(req.requestId, `Message ${payload.id} filtered by language (${payload.language})`);
return; return;
} }
// When the account is not logged in, it is not necessary to confirm the block or mute // When the account is not logged in, it is not necessary to confirm the block or mute
if (!req.accountId) { if (!req.accountId) {
transmit(); transmit(event, payload);
return; return;
} }
pgPool.connect((err, client, done) => { // Filter based on domain blocks, blocks, mutes, or custom filters:
const targetAccountIds = [payload.account.id].concat(payload.mentions.map(item => item.id));
const accountDomain = payload.account.acct.split('@')[1];
// TODO: Move this logic out of the message handling loop
pgPool.connect((err, client, releasePgConnection) => {
if (err) { if (err) {
log.error(err); log.error(err);
return; return;
@ -684,40 +705,57 @@ const startWorker = async (workerId) => {
SELECT 1 SELECT 1
FROM mutes FROM mutes
WHERE account_id = $1 WHERE account_id = $1
AND target_account_id IN (${placeholders(targetAccountIds, 2)})`, [req.accountId, unpackedPayload.account.id].concat(targetAccountIds)), AND target_account_id IN (${placeholders(targetAccountIds, 2)})`, [req.accountId, payload.account.id].concat(targetAccountIds)),
]; ];
if (accountDomain) { if (accountDomain) {
queries.push(client.query('SELECT 1 FROM account_domain_blocks WHERE account_id = $1 AND domain = $2', [req.accountId, accountDomain])); queries.push(client.query('SELECT 1 FROM account_domain_blocks WHERE account_id = $1 AND domain = $2', [req.accountId, accountDomain]));
} }
if (!unpackedPayload.filtered && !req.cachedFilters) { if (!payload.filtered && !req.cachedFilters) {
queries.push(client.query('SELECT filter.id AS id, filter.phrase AS title, filter.context AS context, filter.expires_at AS expires_at, filter.action AS filter_action, keyword.keyword AS keyword, keyword.whole_word AS whole_word FROM custom_filter_keywords keyword JOIN custom_filters filter ON keyword.custom_filter_id = filter.id WHERE filter.account_id = $1 AND (filter.expires_at IS NULL OR filter.expires_at > NOW())', [req.accountId])); queries.push(client.query('SELECT filter.id AS id, filter.phrase AS title, filter.context AS context, filter.expires_at AS expires_at, filter.action AS filter_action, keyword.keyword AS keyword, keyword.whole_word AS whole_word FROM custom_filter_keywords keyword JOIN custom_filters filter ON keyword.custom_filter_id = filter.id WHERE filter.account_id = $1 AND (filter.expires_at IS NULL OR filter.expires_at > NOW())', [req.accountId]));
} }
Promise.all(queries).then(values => { Promise.all(queries).then(values => {
done(); releasePgConnection();
// Handling blocks & mutes and domain blocks: If one of those applies,
// then we don't transmit the payload of the event to the client
if (values[0].rows.length > 0 || (accountDomain && values[1].rows.length > 0)) { if (values[0].rows.length > 0 || (accountDomain && values[1].rows.length > 0)) {
return; return;
} }
if (!unpackedPayload.filtered && !req.cachedFilters) { // If the payload already contains the `filtered` property, it means
// that filtering has been applied on the ruby on rails side, as
// such, we don't need to construct or apply the filters in streaming:
if (Object.prototype.hasOwnProperty.call(payload, "filtered")) {
transmit(event, payload);
return;
}
// Handling for constructing the custom filters and caching them on the request
// TODO: Move this logic out of the message handling lifecycle
if (!req.cachedFilters) {
const filterRows = values[accountDomain ? 2 : 1].rows; const filterRows = values[accountDomain ? 2 : 1].rows;
req.cachedFilters = filterRows.reduce((cache, row) => { req.cachedFilters = filterRows.reduce((cache, filter) => {
if (cache[row.id]) { if (cache[filter.id]) {
cache[row.id].keywords.push([row.keyword, row.whole_word]); cache[filter.id].keywords.push([filter.keyword, filter.whole_word]);
} else { } else {
cache[row.id] = { cache[filter.id] = {
keywords: [[row.keyword, row.whole_word]], keywords: [[filter.keyword, filter.whole_word]],
expires_at: row.expires_at, expires_at: filter.expires_at,
repr: { filter: {
id: row.id, id: filter.id,
title: row.title, title: filter.title,
context: row.context, context: filter.context,
expires_at: row.expires_at, expires_at: filter.expires_at,
filter_action: ['warn', 'hide'][row.filter_action], // filter.filter_action is the value from the
// custom_filters.action database column, it is an integer
// representing a value in an enum defined by Ruby on Rails:
//
// enum { warn: 0, hide: 1 }
filter_action: ['warn', 'hide'][filter.filter_action],
}, },
}; };
} }
@ -725,6 +763,10 @@ const startWorker = async (workerId) => {
return cache; return cache;
}, {}); }, {});
// Construct the regular expressions for the custom filters: This
// needs to be done in a separate loop as the database returns one
// filterRow per keyword, so we need all the keywords before
// constructing the regular expression
Object.keys(req.cachedFilters).forEach((key) => { Object.keys(req.cachedFilters).forEach((key) => {
req.cachedFilters[key].regexp = new RegExp(req.cachedFilters[key].keywords.map(([keyword, whole_word]) => { req.cachedFilters[key].regexp = new RegExp(req.cachedFilters[key].keywords.map(([keyword, whole_word]) => {
let expr = keyword.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');; let expr = keyword.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');;
@ -744,31 +786,58 @@ const startWorker = async (workerId) => {
}); });
} }
// Check filters // Apply cachedFilters against the payload, constructing a
if (req.cachedFilters && !unpackedPayload.filtered) { // `filter_results` array of FilterResult entities
const status = unpackedPayload; if (req.cachedFilters) {
const searchContent = ([status.spoiler_text || '', status.content].concat((status.poll && status.poll.options) ? status.poll.options.map(option => option.title) : [])).concat(status.media_attachments.map(att => att.description)).join('\n\n').replace(/<br\s*\/?>/g, '\n').replace(/<\/p><p>/g, '\n\n'); const status = payload;
const searchIndex = JSDOM.fragment(searchContent).textContent; // TODO: Calculate searchableContent in Ruby on Rails:
const searchableContent = ([status.spoiler_text || '', status.content].concat((status.poll && status.poll.options) ? status.poll.options.map(option => option.title) : [])).concat(status.media_attachments.map(att => att.description)).join('\n\n').replace(/<br\s*\/?>/g, '\n').replace(/<\/p><p>/g, '\n\n');
const searchableTextContent = JSDOM.fragment(searchableContent).textContent;
const now = new Date(); const now = new Date();
payload.filtered = []; const filter_results = Object.values(req.cachedFilters).reduce((results, cachedFilter) => {
Object.values(req.cachedFilters).forEach((cachedFilter) => { // Check the filter hasn't expired before applying:
if ((cachedFilter.expires_at === null || cachedFilter.expires_at > now)) { if (cachedFilter.expires_at !== null && cachedFilter.expires_at < now) {
const keyword_matches = searchIndex.match(cachedFilter.regexp); return results;
}
// Just in-case JSDOM fails to find textContent in searchableContent
if (!searchableTextContent) {
return results;
}
const keyword_matches = searchableTextContent.match(cachedFilter.regexp);
if (keyword_matches) { if (keyword_matches) {
payload.filtered.push({ // results is an Array of FilterResult; status_matches is always
filter: cachedFilter.repr, // null as we only are only applying the keyword-based custom
// filters, not the status-based custom filters.
// https://docs.joinmastodon.org/entities/FilterResult/
results.push({
filter: cachedFilter.filter,
keyword_matches, keyword_matches,
}); status_matches: null
}
}
}); });
} }
transmit(); return results;
}, []);
// Send the payload + the FilterResults as the `filtered` property
// to the streaming connection. To reach this code, the `event` must
// have been either `update` or `status.update`, meaning the
// `payload` is a Status entity, which has a `filtered` property:
//
// filtered: https://docs.joinmastodon.org/entities/Status/#filtered
transmit(event, {
...payload,
filtered: filter_results
});
} else {
transmit(event, payload);
}
}).catch(err => { }).catch(err => {
releasePgConnection();
log.error(err); log.error(err);
done();
}); });
}); });
}; };
@ -777,7 +846,7 @@ const startWorker = async (workerId) => {
subscribe(`${redisPrefix}${id}`, listener); subscribe(`${redisPrefix}${id}`, listener);
}); });
if (attachCloseHandler) { if (typeof attachCloseHandler === 'function') {
attachCloseHandler(ids.map(id => `${redisPrefix}${id}`), listener); attachCloseHandler(ids.map(id => `${redisPrefix}${id}`), listener);
} }
@ -814,12 +883,13 @@ const startWorker = async (workerId) => {
/** /**
* @param {any} req * @param {any} req
* @param {function(): void} [closeHandler] * @param {function(): void} [closeHandler]
* @return {function(string[]): void} * @returns {function(string[], SubscriptionListener): void}
*/ */
const streamHttpEnd = (req, closeHandler = undefined) => (ids) => {
const streamHttpEnd = (req, closeHandler = undefined) => (ids, listener) => {
req.on('close', () => { req.on('close', () => {
ids.forEach(id => { ids.forEach(id => {
unsubscribe(id); unsubscribe(id, listener);
}); });
if (closeHandler) { if (closeHandler) {
@ -1058,7 +1128,7 @@ const startWorker = async (workerId) => {
* @typedef WebSocketSession * @typedef WebSocketSession
* @property {any} socket * @property {any} socket
* @property {any} request * @property {any} request
* @property {Object.<string, { listener: function(string): void, stopHeartbeat: function(): void }>} subscriptions * @property {Object.<string, { listener: SubscriptionListener, stopHeartbeat: function(): void }>} subscriptions
*/ */
/** /**