2017-07-15 04:41:49 +10:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2018-02-25 05:16:11 +11:00
|
|
|
require 'ipaddr'
|
|
|
|
require 'socket'
|
2018-11-23 06:12:04 +11:00
|
|
|
require 'resolv'
|
2018-02-25 05:16:11 +11:00
|
|
|
|
2023-07-19 04:51:20 +10:00
|
|
|
# Use our own timeout class to avoid using HTTP.rb's timeout block
|
2018-11-28 05:46:05 +11:00
|
|
|
# around the Socket#open method, since we use our own timeout blocks inside
|
|
|
|
# that method
|
2023-07-06 23:06:24 +10:00
|
|
|
#
|
|
|
|
# Also changes how the read timeout behaves so that it is cumulative (closer
|
|
|
|
# to HTTP::Timeout::Global, but still having distinct timeouts for other
|
|
|
|
# operation types)
|
2023-07-19 04:51:20 +10:00
|
|
|
class PerOperationWithDeadline < HTTP::Timeout::PerOperation
|
|
|
|
READ_DEADLINE = 30
|
|
|
|
|
|
|
|
def initialize(*args)
|
|
|
|
super
|
|
|
|
|
|
|
|
@read_deadline = options.fetch(:read_deadline, READ_DEADLINE)
|
|
|
|
end
|
|
|
|
|
2018-11-28 05:46:05 +11:00
|
|
|
def connect(socket_class, host, port, nodelay = false)
|
|
|
|
@socket = socket_class.open(host, port)
|
|
|
|
@socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1) if nodelay
|
|
|
|
end
|
2023-07-06 23:06:24 +10:00
|
|
|
|
|
|
|
# Reset deadline when the connection is re-used for different requests
|
|
|
|
def reset_counter
|
|
|
|
@deadline = nil
|
|
|
|
end
|
|
|
|
|
|
|
|
# Read data from the socket
|
|
|
|
def readpartial(size, buffer = nil)
|
2023-07-19 04:51:20 +10:00
|
|
|
@deadline ||= Process.clock_gettime(Process::CLOCK_MONOTONIC) + @read_deadline
|
2023-07-06 23:06:24 +10:00
|
|
|
|
|
|
|
timeout = false
|
|
|
|
loop do
|
|
|
|
result = @socket.read_nonblock(size, buffer, exception: false)
|
|
|
|
|
|
|
|
return :eof if result.nil?
|
|
|
|
|
|
|
|
remaining_time = @deadline - Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
2023-07-19 04:51:20 +10:00
|
|
|
raise HTTP::TimeoutError, "Read timed out after #{@read_timeout} seconds" if timeout
|
|
|
|
raise HTTP::TimeoutError, "Read timed out after a total of #{@read_deadline} seconds" if remaining_time <= 0
|
2023-07-06 23:06:24 +10:00
|
|
|
return result if result != :wait_readable
|
|
|
|
|
|
|
|
# marking the socket for timeout. Why is this not being raised immediately?
|
|
|
|
# it seems there is some race-condition on the network level between calling
|
|
|
|
# #read_nonblock and #wait_readable, in which #read_nonblock signalizes waiting
|
|
|
|
# for reads, and when waiting for x seconds, it returns nil suddenly without completing
|
|
|
|
# the x seconds. In a normal case this would be a timeout on wait/read, but it can
|
|
|
|
# also mean that the socket has been closed by the server. Therefore we "mark" the
|
|
|
|
# socket for timeout and try to read more bytes. If it returns :eof, it's all good, no
|
|
|
|
# timeout. Else, the first timeout was a proper timeout.
|
|
|
|
# This hack has to be done because io/wait#wait_readable doesn't provide a value for when
|
|
|
|
# the socket is closed by the server, and HTTP::Parser doesn't provide the limit for the chunks.
|
2023-07-19 04:51:20 +10:00
|
|
|
timeout = true unless @socket.to_io.wait_readable([remaining_time, @read_timeout].min)
|
2023-07-06 23:06:24 +10:00
|
|
|
end
|
|
|
|
end
|
2018-11-28 05:46:05 +11:00
|
|
|
end
|
|
|
|
|
2017-07-15 04:41:49 +10:00
|
|
|
class Request
|
|
|
|
REQUEST_TARGET = '(request-target)'
|
|
|
|
|
2019-07-02 08:34:38 +10:00
|
|
|
# We enforce a 5s timeout on DNS resolving, 5s timeout on socket opening
|
|
|
|
# and 5s timeout on the TLS handshake, meaning the worst case should take
|
|
|
|
# about 15s in total
|
2023-07-19 04:51:20 +10:00
|
|
|
TIMEOUT = { connect_timeout: 5, read_timeout: 10, write_timeout: 10, read_deadline: 30 }.freeze
|
2019-07-02 08:34:38 +10:00
|
|
|
|
2017-07-15 04:41:49 +10:00
|
|
|
include RoutingHelper
|
|
|
|
|
2017-12-06 21:41:57 +11:00
|
|
|
def initialize(verb, url, **options)
|
2018-05-02 23:44:22 +10:00
|
|
|
raise ArgumentError if url.blank?
|
|
|
|
|
2019-07-02 08:34:38 +10:00
|
|
|
@verb = verb
|
|
|
|
@url = Addressable::URI.parse(url).normalize
|
|
|
|
@http_client = options.delete(:http_client)
|
2019-07-07 10:05:38 +10:00
|
|
|
@options = options.merge(socket_class: use_proxy? ? ProxySocket : Socket)
|
2023-07-19 04:51:20 +10:00
|
|
|
@options = @options.merge(timeout_class: PerOperationWithDeadline, timeout_options: TIMEOUT)
|
2019-07-07 10:05:38 +10:00
|
|
|
@options = @options.merge(Rails.configuration.x.http_client_proxy) if use_proxy?
|
2019-07-02 08:34:38 +10:00
|
|
|
@headers = {}
|
2017-07-15 04:41:49 +10:00
|
|
|
|
2018-04-25 10:14:49 +10:00
|
|
|
raise Mastodon::HostValidationError, 'Instance does not support hidden service connections' if block_hidden_service?
|
2018-05-02 23:44:22 +10:00
|
|
|
|
2017-07-15 04:41:49 +10:00
|
|
|
set_common_headers!
|
2017-08-10 07:54:14 +10:00
|
|
|
set_digest! if options.key?(:body)
|
2017-07-15 04:41:49 +10:00
|
|
|
end
|
|
|
|
|
2019-07-11 22:49:55 +10:00
|
|
|
def on_behalf_of(account, key_id_format = :uri, sign_with: nil)
|
|
|
|
raise ArgumentError, 'account must not be nil' if account.nil?
|
2017-08-10 07:54:14 +10:00
|
|
|
|
|
|
|
@account = account
|
2018-08-27 04:21:03 +10:00
|
|
|
@keypair = sign_with.present? ? OpenSSL::PKey::RSA.new(sign_with) : @account.keypair
|
2017-08-10 07:54:14 +10:00
|
|
|
@key_id_format = key_id_format
|
|
|
|
|
|
|
|
self
|
2017-07-15 04:41:49 +10:00
|
|
|
end
|
|
|
|
|
|
|
|
def add_headers(new_headers)
|
|
|
|
@headers.merge!(new_headers)
|
2017-08-10 07:54:14 +10:00
|
|
|
self
|
2017-07-15 04:41:49 +10:00
|
|
|
end
|
|
|
|
|
|
|
|
def perform
|
2018-03-24 22:49:54 +11:00
|
|
|
begin
|
2019-07-02 08:34:38 +10:00
|
|
|
response = http_client.public_send(@verb, @url.to_s, @options.merge(headers: headers))
|
2018-03-24 22:49:54 +11:00
|
|
|
rescue => e
|
2020-01-11 12:15:03 +11:00
|
|
|
raise e.class, "#{e.message} on #{@url}", e.backtrace[0]
|
2018-03-24 22:49:54 +11:00
|
|
|
end
|
|
|
|
|
|
|
|
begin
|
2019-07-02 08:34:38 +10:00
|
|
|
response = response.extend(ClientLimit)
|
|
|
|
|
|
|
|
# If we are using a persistent connection, we have to
|
|
|
|
# read every response to be able to move forward at all.
|
|
|
|
# However, simply calling #to_s or #flush may not be safe,
|
|
|
|
# as the response body, if malicious, could be too big
|
|
|
|
# for our memory. So we use the #body_with_limit method
|
|
|
|
response.body_with_limit if http_client.persistent?
|
|
|
|
|
|
|
|
yield response if block_given?
|
2018-03-24 22:49:54 +11:00
|
|
|
ensure
|
2019-07-02 08:34:38 +10:00
|
|
|
http_client.close unless http_client.persistent?
|
2018-03-24 22:49:54 +11:00
|
|
|
end
|
2017-07-15 04:41:49 +10:00
|
|
|
end
|
|
|
|
|
|
|
|
def headers
|
2018-05-10 22:36:12 +10:00
|
|
|
(@account ? @headers.merge('Signature' => signature) : @headers).without(REQUEST_TARGET)
|
2017-07-15 04:41:49 +10:00
|
|
|
end
|
|
|
|
|
2018-12-27 05:15:53 +11:00
|
|
|
class << self
|
|
|
|
def valid_url?(url)
|
|
|
|
begin
|
|
|
|
parsed_url = Addressable::URI.parse(url)
|
|
|
|
rescue Addressable::URI::InvalidURIError
|
|
|
|
return false
|
|
|
|
end
|
|
|
|
|
|
|
|
%w(http https).include?(parsed_url.scheme) && parsed_url.host.present?
|
|
|
|
end
|
2019-07-02 08:34:38 +10:00
|
|
|
|
|
|
|
def http_client
|
2023-07-19 04:51:20 +10:00
|
|
|
HTTP.use(:auto_inflate).follow(max_hops: 3)
|
2019-07-02 08:34:38 +10:00
|
|
|
end
|
2018-12-27 05:15:53 +11:00
|
|
|
end
|
|
|
|
|
2017-07-15 04:41:49 +10:00
|
|
|
private
|
|
|
|
|
|
|
|
def set_common_headers!
|
2018-05-10 22:36:12 +10:00
|
|
|
@headers[REQUEST_TARGET] = "#{@verb} #{@url.path}"
|
2018-05-18 09:47:22 +10:00
|
|
|
@headers['User-Agent'] = Mastodon::Version.user_agent
|
2018-05-10 22:36:12 +10:00
|
|
|
@headers['Host'] = @url.host
|
|
|
|
@headers['Date'] = Time.now.utc.httpdate
|
|
|
|
@headers['Accept-Encoding'] = 'gzip' if @verb != :head
|
2017-07-15 04:41:49 +10:00
|
|
|
end
|
|
|
|
|
2017-08-10 07:54:14 +10:00
|
|
|
def set_digest!
|
|
|
|
@headers['Digest'] = "SHA-256=#{Digest::SHA256.base64digest(@options[:body])}"
|
|
|
|
end
|
|
|
|
|
2017-07-15 04:41:49 +10:00
|
|
|
def signature
|
|
|
|
algorithm = 'rsa-sha256'
|
2020-09-01 11:04:00 +10:00
|
|
|
signature = Base64.strict_encode64(@keypair.sign(OpenSSL::Digest.new('SHA256'), signed_string))
|
2017-07-15 04:41:49 +10:00
|
|
|
|
2018-08-31 12:22:52 +10:00
|
|
|
"keyId=\"#{key_id}\",algorithm=\"#{algorithm}\",headers=\"#{signed_headers.keys.join(' ').downcase}\",signature=\"#{signature}\""
|
2017-07-15 04:41:49 +10:00
|
|
|
end
|
|
|
|
|
|
|
|
def signed_string
|
2018-08-31 12:22:52 +10:00
|
|
|
signed_headers.map { |key, value| "#{key.downcase}: #{value}" }.join("\n")
|
2017-07-15 04:41:49 +10:00
|
|
|
end
|
|
|
|
|
|
|
|
def signed_headers
|
2018-08-31 12:22:52 +10:00
|
|
|
@headers.without('User-Agent', 'Accept-Encoding')
|
2017-07-15 04:41:49 +10:00
|
|
|
end
|
|
|
|
|
2017-08-10 07:54:14 +10:00
|
|
|
def key_id
|
|
|
|
case @key_id_format
|
|
|
|
when :acct
|
|
|
|
@account.to_webfinger_s
|
|
|
|
when :uri
|
|
|
|
[ActivityPub::TagManager.instance.uri_for(@account), '#main-key'].join
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2017-07-15 04:41:49 +10:00
|
|
|
def http_client
|
2019-07-02 08:34:38 +10:00
|
|
|
@http_client ||= Request.http_client
|
2017-07-15 04:41:49 +10:00
|
|
|
end
|
2018-02-25 05:16:11 +11:00
|
|
|
|
2018-04-25 10:14:49 +10:00
|
|
|
def use_proxy?
|
|
|
|
Rails.configuration.x.http_client_proxy.present?
|
|
|
|
end
|
|
|
|
|
|
|
|
def block_hidden_service?
|
2021-01-22 20:09:08 +11:00
|
|
|
!Rails.configuration.x.access_to_hidden_service && /\.(onion|i2p)$/.match?(@url.host)
|
2018-04-25 10:14:49 +10:00
|
|
|
end
|
|
|
|
|
2018-03-26 23:02:10 +11:00
|
|
|
module ClientLimit
|
|
|
|
def body_with_limit(limit = 1.megabyte)
|
|
|
|
raise Mastodon::LengthValidationError if content_length.present? && content_length > limit
|
|
|
|
|
|
|
|
if charset.nil?
|
|
|
|
encoding = Encoding::BINARY
|
|
|
|
else
|
|
|
|
begin
|
|
|
|
encoding = Encoding.find(charset)
|
|
|
|
rescue ArgumentError
|
|
|
|
encoding = Encoding::BINARY
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
contents = String.new(encoding: encoding)
|
|
|
|
|
|
|
|
while (chunk = readpartial)
|
|
|
|
contents << chunk
|
|
|
|
chunk.clear
|
|
|
|
|
|
|
|
raise Mastodon::LengthValidationError if contents.bytesize > limit
|
|
|
|
end
|
|
|
|
|
|
|
|
contents
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2018-02-25 05:16:11 +11:00
|
|
|
class Socket < TCPSocket
|
|
|
|
class << self
|
|
|
|
def open(host, *args)
|
2018-03-20 19:06:08 +11:00
|
|
|
outer_e = nil
|
2019-07-02 08:34:38 +10:00
|
|
|
port = args.first
|
2018-11-23 06:12:04 +11:00
|
|
|
|
2019-07-07 10:05:38 +10:00
|
|
|
addresses = []
|
|
|
|
begin
|
|
|
|
addresses = [IPAddr.new(host)]
|
|
|
|
rescue IPAddr::InvalidAddressError
|
|
|
|
Resolv::DNS.open do |dns|
|
|
|
|
dns.timeouts = 5
|
|
|
|
addresses = dns.getaddresses(host).take(2)
|
|
|
|
end
|
|
|
|
end
|
2018-11-23 06:12:04 +11:00
|
|
|
|
2019-09-05 13:32:53 +10:00
|
|
|
socks = []
|
|
|
|
addr_by_socket = {}
|
|
|
|
|
2019-07-07 10:05:38 +10:00
|
|
|
addresses.each do |address|
|
|
|
|
begin
|
|
|
|
check_private_address(address)
|
|
|
|
|
|
|
|
sock = ::Socket.new(address.is_a?(Resolv::IPv6) ? ::Socket::AF_INET6 : ::Socket::AF_INET, ::Socket::SOCK_STREAM, 0)
|
|
|
|
sockaddr = ::Socket.pack_sockaddr_in(port, address.to_s)
|
|
|
|
|
|
|
|
sock.setsockopt(::Socket::IPPROTO_TCP, ::Socket::TCP_NODELAY, 1)
|
2018-11-23 06:12:04 +11:00
|
|
|
|
2019-09-05 13:32:53 +10:00
|
|
|
sock.connect_nonblock(sockaddr)
|
2019-07-07 10:05:38 +10:00
|
|
|
|
2019-09-05 13:32:53 +10:00
|
|
|
# If that hasn't raised an exception, we somehow managed to connect
|
|
|
|
# immediately, close pending sockets and return immediately
|
|
|
|
socks.each(&:close)
|
2019-07-07 10:05:38 +10:00
|
|
|
return sock
|
2019-09-05 13:32:53 +10:00
|
|
|
rescue IO::WaitWritable
|
|
|
|
socks << sock
|
|
|
|
addr_by_socket[sock] = sockaddr
|
2019-07-07 10:05:38 +10:00
|
|
|
rescue => e
|
|
|
|
outer_e = e
|
2018-03-20 19:06:08 +11:00
|
|
|
end
|
|
|
|
end
|
2018-11-23 06:12:04 +11:00
|
|
|
|
2019-09-05 13:32:53 +10:00
|
|
|
until socks.empty?
|
2023-07-23 04:42:31 +10:00
|
|
|
_, available_socks, = IO.select(nil, socks, nil, Request::TIMEOUT[:connect_timeout])
|
2019-09-05 13:32:53 +10:00
|
|
|
|
|
|
|
if available_socks.nil?
|
|
|
|
socks.each(&:close)
|
2023-07-23 04:42:31 +10:00
|
|
|
raise HTTP::TimeoutError, "Connect timed out after #{Request::TIMEOUT[:connect_timeout]} seconds"
|
2019-09-05 13:32:53 +10:00
|
|
|
end
|
|
|
|
|
|
|
|
available_socks.each do |sock|
|
|
|
|
socks.delete(sock)
|
|
|
|
|
|
|
|
begin
|
|
|
|
sock.connect_nonblock(addr_by_socket[sock])
|
|
|
|
rescue Errno::EISCONN
|
2020-07-15 03:05:07 +10:00
|
|
|
# Do nothing
|
2019-09-05 13:32:53 +10:00
|
|
|
rescue => e
|
|
|
|
sock.close
|
|
|
|
outer_e = e
|
|
|
|
next
|
|
|
|
end
|
|
|
|
|
|
|
|
socks.each(&:close)
|
|
|
|
return sock
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2018-11-28 04:13:36 +11:00
|
|
|
if outer_e
|
|
|
|
raise outer_e
|
|
|
|
else
|
|
|
|
raise SocketError, "No address for #{host}"
|
|
|
|
end
|
2018-02-25 05:16:11 +11:00
|
|
|
end
|
|
|
|
|
|
|
|
alias new open
|
2018-04-25 10:14:49 +10:00
|
|
|
|
2019-07-07 10:05:38 +10:00
|
|
|
def check_private_address(address)
|
2020-09-27 04:57:39 +10:00
|
|
|
addr = IPAddr.new(address.to_s)
|
|
|
|
return if private_address_exceptions.any? { |range| range.include?(addr) }
|
|
|
|
raise Mastodon::HostValidationError if PrivateAddressCheck.private_address?(addr)
|
|
|
|
end
|
|
|
|
|
|
|
|
def private_address_exceptions
|
|
|
|
@private_address_exceptions = begin
|
|
|
|
(ENV['ALLOWED_PRIVATE_ADDRESSES'] || '').split(',').map { |addr| IPAddr.new(addr) }
|
|
|
|
end
|
2019-07-07 10:05:38 +10:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
class ProxySocket < Socket
|
|
|
|
class << self
|
|
|
|
def check_private_address(_address)
|
|
|
|
# Accept connections to private addresses as HTTP proxies will usually
|
|
|
|
# be on local addresses
|
|
|
|
nil
|
2018-04-25 10:14:49 +10:00
|
|
|
end
|
2018-02-25 05:16:11 +11:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-07-07 10:05:38 +10:00
|
|
|
private_constant :ClientLimit, :Socket, :ProxySocket
|
2017-07-15 04:41:49 +10:00
|
|
|
end
|