2016-02-24 22:57:29 +11:00
|
|
|
class ProcessFeedService < BaseService
|
2016-10-11 03:05:52 +11:00
|
|
|
ACTIVITY_NS = 'http://activitystrea.ms/spec/1.0/'.freeze
|
|
|
|
THREAD_NS = 'http://purl.org/syndication/thread/1.0'.freeze
|
|
|
|
|
2016-02-24 22:57:29 +11:00
|
|
|
# Create local statuses from an Atom feed
|
|
|
|
# @param [String] body Atom feed
|
|
|
|
# @param [Account] account Account this feed belongs to
|
Fix #24 - Thread resolving for remote statuses
This is a big one, so let me enumerate:
Accounts as well as stream entry pages now contain Link headers that
reference the Atom feed and Webfinger URL for the former and Atom entry
for the latter. So you only need to HEAD those resources to get that
information, no need to download and parse HTML <link>s.
ProcessFeedService will now queue ThreadResolveWorker for each remote
status that it cannot find otherwise. Furthermore, entries are now
processed in reverse order (from bottom to top) in case a newer entry
references a chronologically previous one.
ThreadResolveWorker uses FetchRemoteStatusService to obtain a status
and attach the child status it was queued for to it.
FetchRemoteStatusService looks up the URL, first with a HEAD, tests
if it's an Atom feed, in which case it processes it directly. Next
for Link headers to the Atom feed, in which case that is fetched
and processed. Lastly if it's HTML, it is checked for <link>s to the Atom
feed, and if such is found, that is fetched and processed. The account for
the status is derived from author/name attribute in the XML and the hostname
in the URL (domain). FollowRemoteAccountService and ProcessFeedService
are used.
This means that potentially threads are resolved recursively until a dead-end
is encountered, however it is performed asynchronously over background jobs,
so it should be ok.
2016-09-21 09:34:14 +10:00
|
|
|
# @return [Enumerable] created statuses
|
2016-02-21 08:53:20 +11:00
|
|
|
def call(body, account)
|
|
|
|
xml = Nokogiri::XML(body)
|
2016-09-30 05:28:21 +10:00
|
|
|
update_remote_profile_service.call(xml.at_xpath('/xmlns:feed/xmlns:author'), account) unless xml.at_xpath('/xmlns:feed').nil?
|
Fix #24 - Thread resolving for remote statuses
This is a big one, so let me enumerate:
Accounts as well as stream entry pages now contain Link headers that
reference the Atom feed and Webfinger URL for the former and Atom entry
for the latter. So you only need to HEAD those resources to get that
information, no need to download and parse HTML <link>s.
ProcessFeedService will now queue ThreadResolveWorker for each remote
status that it cannot find otherwise. Furthermore, entries are now
processed in reverse order (from bottom to top) in case a newer entry
references a chronologically previous one.
ThreadResolveWorker uses FetchRemoteStatusService to obtain a status
and attach the child status it was queued for to it.
FetchRemoteStatusService looks up the URL, first with a HEAD, tests
if it's an Atom feed, in which case it processes it directly. Next
for Link headers to the Atom feed, in which case that is fetched
and processed. Lastly if it's HTML, it is checked for <link>s to the Atom
feed, and if such is found, that is fetched and processed. The account for
the status is derived from author/name attribute in the XML and the hostname
in the URL (domain). FollowRemoteAccountService and ProcessFeedService
are used.
This means that potentially threads are resolved recursively until a dead-end
is encountered, however it is performed asynchronously over background jobs,
so it should be ok.
2016-09-21 09:34:14 +10:00
|
|
|
xml.xpath('//xmlns:entry').reverse_each.map { |entry| process_entry(account, entry) }.compact
|
2016-03-25 12:13:30 +11:00
|
|
|
end
|
2016-02-21 08:53:20 +11:00
|
|
|
|
2016-03-25 12:13:30 +11:00
|
|
|
private
|
2016-02-29 00:26:26 +11:00
|
|
|
|
2016-03-25 12:13:30 +11:00
|
|
|
def process_entry(account, entry)
|
|
|
|
return unless [:note, :comment, :activity].include? object_type(entry)
|
2016-02-24 11:28:53 +11:00
|
|
|
|
2016-03-25 12:13:30 +11:00
|
|
|
status = Status.find_by(uri: activity_id(entry))
|
2016-02-24 11:28:53 +11:00
|
|
|
|
2016-03-25 12:13:30 +11:00
|
|
|
# If we already have a post and the verb is now "delete", we gotta delete it and move on!
|
|
|
|
if !status.nil? && verb(entry) == :delete
|
|
|
|
delete_post!(status)
|
|
|
|
return
|
|
|
|
end
|
2016-03-16 20:46:15 +11:00
|
|
|
|
2016-03-25 12:13:30 +11:00
|
|
|
return unless status.nil?
|
2016-02-24 11:28:53 +11:00
|
|
|
|
2016-03-25 12:13:30 +11:00
|
|
|
status = Status.new(uri: activity_id(entry), url: activity_link(entry), account: account, text: content(entry), created_at: published(entry), updated_at: updated(entry))
|
2016-02-24 11:28:53 +11:00
|
|
|
|
2016-03-25 12:13:30 +11:00
|
|
|
if verb(entry) == :share
|
|
|
|
add_reblog!(entry, status)
|
|
|
|
elsif verb(entry) == :post
|
|
|
|
if thread_id(entry).nil?
|
|
|
|
add_post!(entry, status)
|
|
|
|
else
|
|
|
|
add_reply!(entry, status)
|
2016-02-24 11:28:53 +11:00
|
|
|
end
|
2016-10-11 00:27:39 +11:00
|
|
|
else
|
|
|
|
return
|
2016-03-25 12:13:30 +11:00
|
|
|
end
|
2016-02-25 10:17:01 +11:00
|
|
|
|
2016-03-25 12:13:30 +11:00
|
|
|
# If we added a status, go through accounts it mentions and create respective relations
|
2016-09-06 20:30:15 +10:00
|
|
|
# Also record all media attachments for the status and for the reblogged status if present
|
2016-03-25 12:13:30 +11:00
|
|
|
unless status.new_record?
|
|
|
|
record_remote_mentions(status, entry.xpath('./xmlns:link[@rel="mentioned"]'))
|
2016-10-11 04:09:11 +11:00
|
|
|
record_remote_mentions(status.reblog, entry.at_xpath('./activity:object', activity: ACTIVITY_NS).xpath('./xmlns:link[@rel="mentioned"]')) if status.reblog?
|
2016-09-10 04:04:34 +10:00
|
|
|
|
2016-09-06 02:39:53 +10:00
|
|
|
process_attachments(entry, status)
|
2016-10-11 03:05:52 +11:00
|
|
|
process_attachments(entry.xpath('./activity:object', activity: ACTIVITY_NS), status.reblog) if status.reblog?
|
2016-09-10 04:04:34 +10:00
|
|
|
|
2016-03-25 13:22:26 +11:00
|
|
|
DistributionWorker.perform_async(status.id)
|
2016-10-11 01:49:05 +11:00
|
|
|
return status
|
2016-03-25 12:13:30 +11:00
|
|
|
end
|
|
|
|
end
|
2016-02-29 07:22:56 +11:00
|
|
|
|
2016-03-25 12:13:30 +11:00
|
|
|
def record_remote_mentions(status, links)
|
|
|
|
# Here we have to do a reverse lookup of local accounts by their URL!
|
|
|
|
# It's not pretty at all! I really wish all these protocols sticked to
|
|
|
|
# using acct:username@domain only! It would make things so much easier
|
|
|
|
# and tidier
|
2016-02-29 07:22:56 +11:00
|
|
|
|
2016-03-25 12:13:30 +11:00
|
|
|
links.each do |mention_link|
|
2016-09-27 00:42:38 +10:00
|
|
|
href_val = mention_link.attribute('href').value
|
|
|
|
|
|
|
|
next if href_val == 'http://activityschema.org/collection/public'
|
2016-09-30 05:28:21 +10:00
|
|
|
|
2016-09-27 00:42:38 +10:00
|
|
|
href = Addressable::URI.parse(href_val)
|
2016-02-29 07:22:56 +11:00
|
|
|
|
2016-10-07 01:36:16 +11:00
|
|
|
if TagManager.instance.local_domain?(href.host)
|
2016-03-25 12:13:30 +11:00
|
|
|
# A local user is mentioned
|
|
|
|
mentioned_account = Account.find_local(href.path.gsub('/users/', ''))
|
2016-03-19 10:41:29 +11:00
|
|
|
|
2016-03-25 12:13:30 +11:00
|
|
|
unless mentioned_account.nil?
|
|
|
|
mentioned_account.mentions.where(status: status).first_or_create(status: status)
|
2016-10-04 03:49:52 +11:00
|
|
|
NotificationMailer.mention(mentioned_account, status).deliver_later unless mentioned_account.blocking?(status.account)
|
2016-03-25 12:13:30 +11:00
|
|
|
end
|
|
|
|
else
|
|
|
|
# What to do about remote user?
|
2016-09-23 05:10:36 +10:00
|
|
|
# This is kinda dodgy because URLs could change, we don't index them
|
|
|
|
mentioned_account = Account.find_by(url: href.to_s)
|
|
|
|
|
2016-09-27 00:42:38 +10:00
|
|
|
if mentioned_account.nil?
|
2016-09-30 05:28:21 +10:00
|
|
|
mentioned_account = FetchRemoteAccountService.new.call(href)
|
2016-09-27 00:42:38 +10:00
|
|
|
end
|
|
|
|
|
2016-09-23 05:10:36 +10:00
|
|
|
unless mentioned_account.nil?
|
|
|
|
mentioned_account.mentions.where(status: status).first_or_create(status: status)
|
|
|
|
end
|
2016-02-29 07:22:56 +11:00
|
|
|
end
|
2016-02-24 11:28:53 +11:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2016-09-06 02:39:53 +10:00
|
|
|
def process_attachments(entry, status)
|
|
|
|
entry.xpath('./xmlns:link[@rel="enclosure"]').each do |enclosure_link|
|
|
|
|
next if enclosure_link.attribute('href').nil?
|
|
|
|
|
2016-09-23 04:42:20 +10:00
|
|
|
media = MediaAttachment.where(status: status, remote_url: enclosure_link.attribute('href').value).first
|
|
|
|
|
|
|
|
next unless media.nil?
|
2016-09-30 05:28:21 +10:00
|
|
|
|
2016-10-13 04:25:46 +11:00
|
|
|
begin
|
|
|
|
media = MediaAttachment.new(account: status.account, status: status, remote_url: enclosure_link.attribute('href').value)
|
|
|
|
media.file_remote_url = enclosure_link.attribute('href').value
|
|
|
|
media.save
|
|
|
|
rescue Paperclip::Errors::NotIdentifiedByImageMagickError
|
|
|
|
Rails.logger.debug "Error saving attachment from #{enclosure_link.attribute('href').value}"
|
|
|
|
next
|
|
|
|
end
|
2016-09-06 02:39:53 +10:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2016-02-25 10:22:46 +11:00
|
|
|
def add_post!(_entry, status)
|
2016-02-24 11:28:53 +11:00
|
|
|
status.save!
|
|
|
|
end
|
|
|
|
|
|
|
|
def add_reblog!(entry, status)
|
|
|
|
status.reblog = find_original_status(entry, target_id(entry))
|
2016-02-25 03:23:59 +11:00
|
|
|
|
|
|
|
if status.reblog.nil?
|
|
|
|
status.reblog = fetch_remote_status(entry)
|
|
|
|
end
|
|
|
|
|
2016-03-20 05:20:07 +11:00
|
|
|
if !status.reblog.nil?
|
|
|
|
status.save!
|
2016-10-04 03:49:52 +11:00
|
|
|
NotificationMailer.reblog(status.reblog, status.account).deliver_later if status.reblog.local? && !status.reblog.account.blocking?(status.account)
|
2016-03-20 05:20:07 +11:00
|
|
|
end
|
2016-02-24 11:28:53 +11:00
|
|
|
end
|
|
|
|
|
|
|
|
def add_reply!(entry, status)
|
|
|
|
status.thread = find_original_status(entry, thread_id(entry))
|
2016-02-25 03:23:59 +11:00
|
|
|
status.save!
|
Fix #24 - Thread resolving for remote statuses
This is a big one, so let me enumerate:
Accounts as well as stream entry pages now contain Link headers that
reference the Atom feed and Webfinger URL for the former and Atom entry
for the latter. So you only need to HEAD those resources to get that
information, no need to download and parse HTML <link>s.
ProcessFeedService will now queue ThreadResolveWorker for each remote
status that it cannot find otherwise. Furthermore, entries are now
processed in reverse order (from bottom to top) in case a newer entry
references a chronologically previous one.
ThreadResolveWorker uses FetchRemoteStatusService to obtain a status
and attach the child status it was queued for to it.
FetchRemoteStatusService looks up the URL, first with a HEAD, tests
if it's an Atom feed, in which case it processes it directly. Next
for Link headers to the Atom feed, in which case that is fetched
and processed. Lastly if it's HTML, it is checked for <link>s to the Atom
feed, and if such is found, that is fetched and processed. The account for
the status is derived from author/name attribute in the XML and the hostname
in the URL (domain). FollowRemoteAccountService and ProcessFeedService
are used.
This means that potentially threads are resolved recursively until a dead-end
is encountered, however it is performed asynchronously over background jobs,
so it should be ok.
2016-09-21 09:34:14 +10:00
|
|
|
|
|
|
|
if status.thread.nil? && !thread_href(entry).nil?
|
|
|
|
ThreadResolveWorker.perform_async(status.id, thread_href(entry))
|
|
|
|
end
|
2016-02-24 11:28:53 +11:00
|
|
|
end
|
2016-02-21 08:53:20 +11:00
|
|
|
|
2016-03-16 20:46:15 +11:00
|
|
|
def delete_post!(status)
|
2016-09-30 05:28:21 +10:00
|
|
|
remove_status_service.call(status)
|
2016-03-16 20:46:15 +11:00
|
|
|
end
|
|
|
|
|
2016-02-25 10:22:46 +11:00
|
|
|
def find_original_status(_xml, id)
|
2016-02-24 11:28:53 +11:00
|
|
|
return nil if id.nil?
|
2016-02-21 08:53:20 +11:00
|
|
|
|
2016-09-10 04:04:34 +10:00
|
|
|
if TagManager.instance.local_id?(id)
|
|
|
|
Status.find(TagManager.instance.unique_tag_to_local_id(id, 'Status'))
|
2016-02-24 11:28:53 +11:00
|
|
|
else
|
2016-02-25 03:23:59 +11:00
|
|
|
Status.find_by(uri: id)
|
2016-02-21 08:53:20 +11:00
|
|
|
end
|
|
|
|
end
|
2016-02-24 11:28:53 +11:00
|
|
|
|
2016-02-25 03:23:59 +11:00
|
|
|
def fetch_remote_status(xml)
|
2016-10-11 04:09:11 +11:00
|
|
|
username = xml.at_xpath('./activity:object', activity: ACTIVITY_NS).at_xpath('./xmlns:author/xmlns:name').content
|
|
|
|
url = xml.at_xpath('./activity:object', activity: ACTIVITY_NS).at_xpath('./xmlns:author/xmlns:uri').content
|
2016-02-25 03:23:59 +11:00
|
|
|
domain = Addressable::URI.parse(url).host
|
2016-10-11 02:30:49 +11:00
|
|
|
account = Account.find_remote(username, domain)
|
2016-02-25 03:23:59 +11:00
|
|
|
|
|
|
|
if account.nil?
|
2016-09-30 05:28:21 +10:00
|
|
|
account = follow_remote_account_service.call("#{username}@#{domain}")
|
2016-02-25 03:23:59 +11:00
|
|
|
end
|
2016-02-24 11:28:53 +11:00
|
|
|
|
2016-09-18 20:28:49 +10:00
|
|
|
status = Status.new(account: account, uri: target_id(xml), text: target_content(xml), url: target_url(xml), created_at: published(xml), updated_at: updated(xml))
|
|
|
|
status.thread = find_original_status(xml, thread_id(xml))
|
Fix #24 - Thread resolving for remote statuses
This is a big one, so let me enumerate:
Accounts as well as stream entry pages now contain Link headers that
reference the Atom feed and Webfinger URL for the former and Atom entry
for the latter. So you only need to HEAD those resources to get that
information, no need to download and parse HTML <link>s.
ProcessFeedService will now queue ThreadResolveWorker for each remote
status that it cannot find otherwise. Furthermore, entries are now
processed in reverse order (from bottom to top) in case a newer entry
references a chronologically previous one.
ThreadResolveWorker uses FetchRemoteStatusService to obtain a status
and attach the child status it was queued for to it.
FetchRemoteStatusService looks up the URL, first with a HEAD, tests
if it's an Atom feed, in which case it processes it directly. Next
for Link headers to the Atom feed, in which case that is fetched
and processed. Lastly if it's HTML, it is checked for <link>s to the Atom
feed, and if such is found, that is fetched and processed. The account for
the status is derived from author/name attribute in the XML and the hostname
in the URL (domain). FollowRemoteAccountService and ProcessFeedService
are used.
This means that potentially threads are resolved recursively until a dead-end
is encountered, however it is performed asynchronously over background jobs,
so it should be ok.
2016-09-21 09:34:14 +10:00
|
|
|
|
2016-10-03 08:46:25 +11:00
|
|
|
if status.save && status.thread.nil? && !thread_href(xml).nil?
|
Fix #24 - Thread resolving for remote statuses
This is a big one, so let me enumerate:
Accounts as well as stream entry pages now contain Link headers that
reference the Atom feed and Webfinger URL for the former and Atom entry
for the latter. So you only need to HEAD those resources to get that
information, no need to download and parse HTML <link>s.
ProcessFeedService will now queue ThreadResolveWorker for each remote
status that it cannot find otherwise. Furthermore, entries are now
processed in reverse order (from bottom to top) in case a newer entry
references a chronologically previous one.
ThreadResolveWorker uses FetchRemoteStatusService to obtain a status
and attach the child status it was queued for to it.
FetchRemoteStatusService looks up the URL, first with a HEAD, tests
if it's an Atom feed, in which case it processes it directly. Next
for Link headers to the Atom feed, in which case that is fetched
and processed. Lastly if it's HTML, it is checked for <link>s to the Atom
feed, and if such is found, that is fetched and processed. The account for
the status is derived from author/name attribute in the XML and the hostname
in the URL (domain). FollowRemoteAccountService and ProcessFeedService
are used.
This means that potentially threads are resolved recursively until a dead-end
is encountered, however it is performed asynchronously over background jobs,
so it should be ok.
2016-09-21 09:34:14 +10:00
|
|
|
ThreadResolveWorker.perform_async(status.id, thread_href(xml))
|
|
|
|
end
|
|
|
|
|
|
|
|
status
|
2016-09-18 01:07:45 +10:00
|
|
|
rescue Goldfinger::Error, HTTP::Error
|
|
|
|
nil
|
2016-02-24 11:28:53 +11:00
|
|
|
end
|
|
|
|
|
|
|
|
def published(xml)
|
|
|
|
xml.at_xpath('./xmlns:published').content
|
|
|
|
end
|
|
|
|
|
|
|
|
def updated(xml)
|
|
|
|
xml.at_xpath('./xmlns:updated').content
|
|
|
|
end
|
|
|
|
|
|
|
|
def content(xml)
|
2016-10-11 01:03:38 +11:00
|
|
|
xml.at_xpath('./xmlns:content').try(:content)
|
2016-02-24 11:28:53 +11:00
|
|
|
end
|
|
|
|
|
|
|
|
def thread_id(xml)
|
2016-10-11 03:05:52 +11:00
|
|
|
xml.at_xpath('./thr:in-reply-to', thr: THREAD_NS).attribute('ref').value
|
2016-02-24 11:28:53 +11:00
|
|
|
rescue
|
|
|
|
nil
|
|
|
|
end
|
|
|
|
|
Fix #24 - Thread resolving for remote statuses
This is a big one, so let me enumerate:
Accounts as well as stream entry pages now contain Link headers that
reference the Atom feed and Webfinger URL for the former and Atom entry
for the latter. So you only need to HEAD those resources to get that
information, no need to download and parse HTML <link>s.
ProcessFeedService will now queue ThreadResolveWorker for each remote
status that it cannot find otherwise. Furthermore, entries are now
processed in reverse order (from bottom to top) in case a newer entry
references a chronologically previous one.
ThreadResolveWorker uses FetchRemoteStatusService to obtain a status
and attach the child status it was queued for to it.
FetchRemoteStatusService looks up the URL, first with a HEAD, tests
if it's an Atom feed, in which case it processes it directly. Next
for Link headers to the Atom feed, in which case that is fetched
and processed. Lastly if it's HTML, it is checked for <link>s to the Atom
feed, and if such is found, that is fetched and processed. The account for
the status is derived from author/name attribute in the XML and the hostname
in the URL (domain). FollowRemoteAccountService and ProcessFeedService
are used.
This means that potentially threads are resolved recursively until a dead-end
is encountered, however it is performed asynchronously over background jobs,
so it should be ok.
2016-09-21 09:34:14 +10:00
|
|
|
def thread_href(xml)
|
2016-10-11 03:05:52 +11:00
|
|
|
xml.at_xpath('./thr:in-reply-to', thr: THREAD_NS).attribute('href').value
|
Fix #24 - Thread resolving for remote statuses
This is a big one, so let me enumerate:
Accounts as well as stream entry pages now contain Link headers that
reference the Atom feed and Webfinger URL for the former and Atom entry
for the latter. So you only need to HEAD those resources to get that
information, no need to download and parse HTML <link>s.
ProcessFeedService will now queue ThreadResolveWorker for each remote
status that it cannot find otherwise. Furthermore, entries are now
processed in reverse order (from bottom to top) in case a newer entry
references a chronologically previous one.
ThreadResolveWorker uses FetchRemoteStatusService to obtain a status
and attach the child status it was queued for to it.
FetchRemoteStatusService looks up the URL, first with a HEAD, tests
if it's an Atom feed, in which case it processes it directly. Next
for Link headers to the Atom feed, in which case that is fetched
and processed. Lastly if it's HTML, it is checked for <link>s to the Atom
feed, and if such is found, that is fetched and processed. The account for
the status is derived from author/name attribute in the XML and the hostname
in the URL (domain). FollowRemoteAccountService and ProcessFeedService
are used.
This means that potentially threads are resolved recursively until a dead-end
is encountered, however it is performed asynchronously over background jobs,
so it should be ok.
2016-09-21 09:34:14 +10:00
|
|
|
rescue
|
|
|
|
nil
|
|
|
|
end
|
|
|
|
|
2016-02-24 11:28:53 +11:00
|
|
|
def target_id(xml)
|
2016-10-11 04:09:11 +11:00
|
|
|
xml.at_xpath('.//activity:object', activity: ACTIVITY_NS).at_xpath('./xmlns:id').content
|
2016-02-24 11:28:53 +11:00
|
|
|
rescue
|
|
|
|
nil
|
|
|
|
end
|
|
|
|
|
|
|
|
def activity_id(xml)
|
2016-02-25 10:22:46 +11:00
|
|
|
xml.at_xpath('./xmlns:id').content
|
2016-02-24 11:28:53 +11:00
|
|
|
end
|
|
|
|
|
2016-02-29 07:22:56 +11:00
|
|
|
def activity_link(xml)
|
|
|
|
xml.at_xpath('./xmlns:link[@rel="alternate"]').attribute('href').value
|
|
|
|
rescue
|
|
|
|
''
|
|
|
|
end
|
|
|
|
|
2016-02-25 03:23:59 +11:00
|
|
|
def target_content(xml)
|
2016-10-11 04:09:11 +11:00
|
|
|
xml.at_xpath('.//activity:object', activity: ACTIVITY_NS).at_xpath('./xmlns:content').content
|
2016-02-25 03:23:59 +11:00
|
|
|
end
|
|
|
|
|
|
|
|
def target_url(xml)
|
2016-10-13 04:25:46 +11:00
|
|
|
xml.at_xpath('.//activity:object', activity: ACTIVITY_NS).at_xpath('./xmlns:link[@rel="alternate"]').attribute('href').value
|
2016-02-25 03:23:59 +11:00
|
|
|
end
|
|
|
|
|
2016-02-24 11:28:53 +11:00
|
|
|
def object_type(xml)
|
2016-10-11 03:05:52 +11:00
|
|
|
xml.at_xpath('./activity:object-type', activity: ACTIVITY_NS).content.gsub('http://activitystrea.ms/schema/1.0/', '').gsub('http://ostatus.org/schema/1.0/', '').to_sym
|
2016-02-24 11:28:53 +11:00
|
|
|
rescue
|
2016-10-10 11:55:30 +11:00
|
|
|
:activity
|
2016-02-24 11:28:53 +11:00
|
|
|
end
|
|
|
|
|
|
|
|
def verb(xml)
|
2016-10-11 03:05:52 +11:00
|
|
|
xml.at_xpath('./activity:verb', activity: ACTIVITY_NS).content.gsub('http://activitystrea.ms/schema/1.0/', '').gsub('http://ostatus.org/schema/1.0/', '').to_sym
|
2016-02-24 11:28:53 +11:00
|
|
|
rescue
|
|
|
|
:post
|
|
|
|
end
|
2016-02-24 13:05:40 +11:00
|
|
|
|
|
|
|
def follow_remote_account_service
|
2016-02-24 22:57:29 +11:00
|
|
|
@follow_remote_account_service ||= FollowRemoteAccountService.new
|
2016-02-24 13:05:40 +11:00
|
|
|
end
|
2016-02-25 10:17:01 +11:00
|
|
|
|
2016-02-29 00:26:26 +11:00
|
|
|
def update_remote_profile_service
|
|
|
|
@update_remote_profile_service ||= UpdateRemoteProfileService.new
|
|
|
|
end
|
2016-09-20 08:39:03 +10:00
|
|
|
|
|
|
|
def remove_status_service
|
|
|
|
@remove_status_service ||= RemoveStatusService.new
|
|
|
|
end
|
2016-02-21 08:53:20 +11:00
|
|
|
end
|