#!/usr/bin/env python3
import argparse, urllib, subprocess, re, readline, uuid
from bs4 import BeautifulSoup as dasoupz0ne
from slugify import slugify
# setting up argparse is a little overkill for this but
# (a) it'll be useful if I want to add features later and
# (b) I basically copy and pasted this from other code I had so it wasn't any work
parser = argparse.ArgumentParser(description='Repost da share z0ne memes to Mastodon')
parser.add_argument('url', metavar = 'url', type = str, nargs = 1, help = 'URL of the post')
args = parser.parse_args()
nitter_instance = 'https://nitter.kavin.rocks' # or any other nitter instance
url = args.url[0].replace('#m', '') # #m gets appended to nitter URLs automatically sometimes & I don't know why
twitter_url = re.sub('https?://.*dasharez0ne', 'https://twitter.com/dasharez0ne', url) # do I actually need this url? idk
nitter_url = re.sub('https?://.*dasharez0ne', nitter_instance + '/dasharez0ne', url) # i DO need this one
post = urllib.request.urlopen(nitter_url)
soup = dasoupz0ne(post.read(), features='lxml')
post_text = str(soup.find('title')).replace('
da share z0ne (@dasharez0ne): "', '').replace('" | nitter', '').replace('"|nitter', '') # get the post text from the page's title, take out the junk we don't need
if post_text == '': # toot can't post with no text apparently??
post_text = "- DA SHARE Z0NE ADMIN"
print(post_text)
img_url = nitter_instance + str(soup.find('a', attrs={'class': 'still-image'})['href']) # get the URL of the image
filename = slugify(post_text)[0:247] + '.jpg' # a filename that should work just about anywhere.
# (247 + '.jpg' + '.txt' is 255 characters, the limit in most common filesystems)
# it's unlikely that da share z0ne would post a 280-character tweet but better safe than sorry
# so once in a while there's a post with No Text in it so I have to have a contingency plan for that lol
if filename == '.jpg':
filename = str(uuid.uuid4()) + filename
subprocess.run(['curl', '-s', '-o', filename, img_url]) # get that image
subprocess.run(['tesseract', filename, filename]) # try to OCR the image. don't expect great results from this
subprocess.Popen(['gwenview', filename]) # replace gwenview with your preferred image viewer
subprocess.run(['emacs', filename + '.txt']) # replace emacs with your preferred text editor
# once you are done checking, and probably writing, the image caption, save and quit the text editor
cw = input('Enter content warning here (leave blank for none): ')
with open(filename + '.txt', 'r') as f:
description = f.read()
toot_basic = ['toot', 'post', '-v', 'unlisted', '-u', 'da_shared_z0ne@is.nota.live']
if cw:
toot_basic += ['-p', cw, '-s']
toot_id = subprocess.check_output(toot_basic + [post_text, '-m', filename, '-d', description], encoding = 'utf-8') # post the image & capture the output so we know what to reply to
id_pattern = re.compile('\d{18}') # toot ids are 18 numbers long
toot_id = id_pattern.findall(toot_id)[0]
subprocess.run(toot_basic + ['-r', toot_id, "source: " + nitter_url]) # reply to the post with a link to the source