You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

274 lines
8.0KB

  1. from discord.ext.commands import bot
  2. from config.server_config import ServerConfig
  3. from lxml import html
  4. import os
  5. import random
  6. import requests
  7. from bs4 import BeautifulSoup
  8. # Warning, this cog sucks so much but hopefully it works and doesn't break the bot too much. Just lazily edited old code and bodged it into this one.
  9. # There is redundant code here that if removed would make it easier. But it might be handy in the future and isn't that bad.
  10. class RedditMedia:
  11. def get(self, url):
  12. return url
  13. class Gfycat():
  14. def __init__(self):
  15. pass
  16. def url_get(self,url):
  17. urlsplit = url.split("/")
  18. urlsplit[2] = "giant." + urlsplit[2]
  19. urlsplit[-1] += ".gif"
  20. urlnew = "/".join(urlsplit)
  21. return urlnew
  22. def get(self,url):
  23. #url2 = self.url_get(url)
  24. url2 = url
  25. return url2
  26. class Imgur():
  27. """Class for all interactions with Imgur"""
  28. def __init__(self):
  29. pass
  30. def removed(self,url):
  31. page = requests.get(url)
  32. soup = BeautifulSoup(page.content, 'html.parser')
  33. if "removed.png" in soup.img["src"]:
  34. return True
  35. else:
  36. return False
  37. def get(self, url):
  38. if url.split(".")[-1] in ("png", "jpg", "jpeg", "gif", "gifv"):
  39. return url
  40. #elif url.split(".")[-1] == "gifv":
  41. # urlsplit = url.split(".")
  42. # urlsplit[-1] = "gif"
  43. # url = ".".join(urlsplit)
  44. # return url"""
  45. else:
  46. if self.removed(url):
  47. return False
  48. page = requests.get(url)
  49. soup = BeautifulSoup(page.content, 'html.parser')
  50. links = []
  51. for img in soup.find_all("img"):
  52. if "imgur" in img["src"]:
  53. if not img["src"] in links:
  54. links.append(img["src"])
  55. for video in soup.find_all("source"):
  56. if "imgur" in video["src"]:
  57. if not video["src"] in links:
  58. links.append(video["src"])
  59. if len(links) > 1:
  60. return url
  61. else:
  62. print(links)
  63. if not "http" in links[0]:
  64. links[0] = "https:" + links[0]
  65. return links[0]
  66. class Eroshare():
  67. def __init__(self):
  68. pass
  69. def album_create(self, name):
  70. self.album_create.hasbeencalled = True
  71. charlist = ("<", ">", '"', ":", "/", "|", "?", "*")
  72. # Can't use these in Windows Dir so next code used to remove chars from title
  73. for char in charlist:
  74. if char in name:
  75. name = name.replace(char, "")
  76. if name not in os.listdir("./"):
  77. os.mkdir("./" + name)
  78. os.chdir("./" + name)
  79. def get(self, url, name=None):
  80. if url.contains("eroshare"):
  81. url = "https://eroshae.com/" + url.split("/")[3]
  82. page = requests.get(url)
  83. tree = html.fromstring(page.content)
  84. links = tree.xpath('//source[@src]/@src')
  85. if links:
  86. return False
  87. #self.album_create(name)
  88. #for link in links:
  89. # if "lowres" not in link:
  90. # wget.download(link)
  91. # print("Downloaded ", link)
  92. links = tree.xpath('//*[@src]/@src')
  93. if len(links) > 2: #and not self.album_create.hasbeencalled:
  94. return False
  95. #self.album_create(name)
  96. for link in links:
  97. if "i." in link and "thumb" not in link:
  98. return "https:" + link
  99. #if link.split("/")[-1] not in os.listdir("./"):
  100. #wget.download("https:" + link)
  101. #print("Downloaded ", link)
  102. #else:
  103. # print("Already exists")
  104. #if album_create.hasbeencalled:
  105. # os.chdir("../")
  106. # album_create.hasbeencalled = False
  107. class Tumblr():
  108. def get(self,url):
  109. return url
  110. class Scrapper():
  111. def __init__(self):
  112. pass
  113. def linkget(self, subreddit, israndom):
  114. if israndom:
  115. options = [".json?count=1000", "/top/.json?sort=top&t=all&count=1000"]
  116. choice = random.choice(options)
  117. subreddit += choice
  118. html = requests.get("https://reddit.com/r/"+subreddit, headers = {'User-agent': 'RoxBot Discord Bot'})
  119. try:
  120. reddit = html.json()["data"]["children"]
  121. except KeyError:
  122. return False
  123. return reddit
  124. def retriveurl(self, url):
  125. url2 = ""
  126. if "imgur" in url:
  127. url2 = Imgur().get(url)
  128. elif "gfycat" in url:
  129. url2 = Gfycat().get(url)
  130. elif "eroshare" in url:
  131. url2 = Eroshare().get(url)
  132. elif "redd.it" in url or "i.reddituploads" in url:
  133. url2 = RedditMedia().get(url)
  134. elif "media.tumblr" in url:
  135. url2 = Tumblr().get(url)
  136. return url2
  137. class Reddit():
  138. def __init__(self, Bot):
  139. self.bot = Bot
  140. self.con = ServerConfig()
  141. self.servers = self.con.servers
  142. @bot.command(pass_context=True)
  143. async def subreddit(self, ctx, subreddit):
  144. """
  145. Grabs an image (png, gif, gifv, webm) from the subreddit inputted.
  146. Exmaple:
  147. {command_prefix}subreddit pics
  148. """
  149. links = Scrapper().linkget(subreddit, True)
  150. title = ""
  151. if not links:
  152. return await self.bot.say("Error ;-; That subreddit probably doesn't exist. Please check your spelling")
  153. url = ""
  154. for x in range(10):
  155. choice = random.choice(links)
  156. title = "**{}** from /r/{}\n".format(choice["data"]["title"], subreddit)
  157. if choice["data"]["over_18"] and not self.servers[ctx.message.server.id]["nsfw"]["enabled"]:
  158. return await self.bot.say("This server doesn't have my NSFW stuff enabled. This extends to posting NFSW content from Reddit.")
  159. url = Scrapper().retriveurl(choice["data"]["url"])
  160. if url:
  161. break
  162. if not url:
  163. return await self.bot.say("I couldn't find any images from that subreddit.")
  164. if url.split("/")[-2] == "a":
  165. text = "This is an album, click on the link to see more. "
  166. else:
  167. text = ""
  168. return await self.bot.say(title + text + url)
  169. @bot.command(pass_context=True)
  170. async def aww(self, ctx):
  171. """
  172. Gives you cute pics from reddit
  173. """
  174. subreddit = "aww"
  175. links = Scrapper().linkget(subreddit, True)
  176. if not links:
  177. return await self.bot.say("Error ;-; That subreddit probably doesn't exist. Please check your spelling")
  178. choice = random.choice(links)
  179. title = "**{}** from /r/{}\n".format(choice["data"]["title"], subreddit)
  180. if choice["data"]["over_18"] and not self.servers[ctx.message.server.id]["nsfw"]["enabled"]:
  181. return await self.bot.say(
  182. "This server doesn't have my NSFW stuff enabled. This extends to posting NFSW content from Reddit.")
  183. url = Scrapper().retriveurl(choice["data"]["url"])
  184. if url.split("/")[-2] == "a":
  185. text = "This is an album, click on the link to see more. "
  186. else:
  187. text = ""
  188. return await self.bot.say(title + text + url)
  189. @bot.command(pass_context=True)
  190. async def feedme(self, ctx):
  191. """
  192. Feeds you with food porn. Uses multiple subreddits
  193. Yes, I was very hungry when trying to find the subreddits for this command.
  194. """
  195. subreddits = ["foodporn", "food", "DessertPorn", "tonightsdinner", "eatsandwiches", "steak", "burgers", "Pizza", "grilledcheese", "PutAnEggOnIt", "sushi"]
  196. subreddit = random.choice(subreddits)
  197. links = Scrapper().linkget(subreddit, True)
  198. if not links:
  199. return await self.bot.say("Error ;-; That subreddit probably doesn't exist. Please check your spelling")
  200. choice = random.choice(links)
  201. title = "**{}** from /r/{}\n".format(choice["data"]["title"], subreddit)
  202. if choice["data"]["over_18"] and not self.servers[ctx.message.server.id]["nsfw"]["enabled"]:
  203. return await self.bot.say(
  204. "This server doesn't have my NSFW stuff enabled. This extends to posting NFSW content from Reddit.")
  205. url = Scrapper().retriveurl(choice["data"]["url"])
  206. if url.split("/")[-2] == "a":
  207. text = "This is an album, click on the link to see more. "
  208. else:
  209. text = ""
  210. return await self.bot.say(title + text + url)
  211. @bot.command(pass_context=True)
  212. async def traa(self, ctx):
  213. """
  214. Gives you the best trans memes ever
  215. """
  216. subreddit = "traaaaaaannnnnnnnnns"
  217. links = Scrapper().linkget(subreddit, True)
  218. if not links:
  219. return await self.bot.say("Error ;-; That subreddit probably doesn't exist. Please check your spelling")
  220. choice = random.choice(links)
  221. title = "**{}** from /r/{}\n".format(choice["data"]["title"], subreddit)
  222. if choice["data"]["over_18"] and not self.servers[ctx.message.server.id]["nsfw"]["enabled"]:
  223. return await self.bot.say(
  224. "This server doesn't have my NSFW stuff enabled. This extends to posting NFSW content from Reddit.")
  225. url = Scrapper().retriveurl(choice["data"]["url"])
  226. if url.split("/")[-2] == "a":
  227. text = "This is an album, click on the link to see more. "
  228. else:
  229. text = ""
  230. return await self.bot.say(title + text + url)
  231. def setup(Bot):
  232. Bot.add_cog(Reddit(Bot))