You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

197 lines
5.2KB

  1. from discord.ext.commands import bot
  2. from config.server_config import ServerConfig
  3. from lxml import html
  4. import random
  5. import requests
  6. from bs4 import BeautifulSoup
  7. import checks
  8. # Warning, this cog sucks so much but hopefully it works and doesn't break the bot too much. Just lazily edited old code and bodged it into this one.
  9. # There is redundant code here that if removed would make it easier. But it might be handy in the future and isn't that bad.
  10. class RedditMedia:
  11. def get(self, url):
  12. return url
  13. class Gfycat():
  14. def __init__(self):
  15. pass
  16. def url_get(self,url):
  17. urlsplit = url.split("/")
  18. urlsplit[2] = "giant." + urlsplit[2]
  19. urlsplit[-1] += ".gif"
  20. urlnew = "/".join(urlsplit)
  21. return urlnew
  22. def get(self,url):
  23. #url2 = self.url_get(url)
  24. url2 = url
  25. return url2
  26. class Imgur():
  27. """Class for all interactions with Imgur"""
  28. def __init__(self):
  29. pass
  30. def removed(self,url):
  31. page = requests.get(url)
  32. soup = BeautifulSoup(page.content, 'html.parser')
  33. if "removed.png" in soup.img["src"]:
  34. return True
  35. else:
  36. return False
  37. def get(self, url):
  38. if url.split(".")[-1] in ("png", "jpg", "jpeg", "gif", "gifv"):
  39. return url
  40. else:
  41. if self.removed(url):
  42. return False
  43. page = requests.get(url)
  44. soup = BeautifulSoup(page.content, 'html.parser')
  45. links = []
  46. for img in soup.find_all("img"):
  47. if "imgur" in img["src"]:
  48. if not img["src"] in links:
  49. links.append(img["src"])
  50. for video in soup.find_all("source"):
  51. if "imgur" in video["src"]:
  52. if not video["src"] in links:
  53. links.append(video["src"])
  54. if len(links) > 1:
  55. return url
  56. else:
  57. print(links)
  58. if not "http" in links[0]:
  59. links[0] = "https:" + links[0]
  60. return links[0]
  61. class Eroshare():
  62. def __init__(self):
  63. pass
  64. def get(self, url, name=None):
  65. if url.contains("eroshare"):
  66. url = "https://eroshae.com/" + url.split("/")[3]
  67. page = requests.get(url)
  68. tree = html.fromstring(page.content)
  69. links = tree.xpath('//source[@src]/@src')
  70. if links:
  71. return False
  72. links = tree.xpath('//*[@src]/@src')
  73. if len(links) > 2:
  74. return False
  75. for link in links:
  76. if "i." in link and "thumb" not in link:
  77. return "https:" + link
  78. class Tumblr():
  79. def get(self,url):
  80. return url
  81. class Scrapper():
  82. def __init__(self):
  83. pass
  84. def linkget(self, subreddit, israndom):
  85. if israndom:
  86. options = [".json?count=1000", "/top/.json?sort=top&t=all&count=1000"]
  87. choice = random.choice(options)
  88. subreddit += choice
  89. html = requests.get("https://reddit.com/r/"+subreddit, headers = {'User-agent': 'RoxBot Discord Bot'})
  90. try:
  91. reddit = html.json()["data"]["children"]
  92. except KeyError:
  93. return False
  94. return reddit
  95. def retriveurl(self, url):
  96. url2 = ""
  97. if "imgur" in url:
  98. url2 = Imgur().get(url)
  99. elif "gfycat" in url:
  100. url2 = Gfycat().get(url)
  101. elif "eroshare" in url:
  102. url2 = Eroshare().get(url)
  103. elif "redd.it" in url or "i.reddituploads" in url:
  104. url2 = RedditMedia().get(url)
  105. elif "media.tumblr" in url:
  106. url2 = Tumblr().get(url)
  107. return url2
  108. class Reddit():
  109. def __init__(self, bot_client):
  110. self.bot = bot_client
  111. self.con = ServerConfig()
  112. self.servers = self.con.servers
  113. @bot.command()
  114. async def subreddit(self, ctx, subreddit):
  115. """
  116. Grabs an image (png, gif, gifv, webm) from the subreddit inputted.
  117. Example:
  118. {command_prefix}subreddit pics
  119. """
  120. links = Scrapper().linkget(subreddit, True)
  121. title = ""
  122. if not links:
  123. return await ctx.send("Error ;-; That subreddit probably doesn't exist. Please check your spelling")
  124. url = ""
  125. for x in range(10):
  126. choice = random.choice(links)
  127. title = "**{}** from /r/{}\n".format(choice["data"]["title"], subreddit)
  128. if choice["data"]["over_18"] and not checks.nsfw_predicate(ctx):
  129. return await ctx.send("This server/channel doesn't have my NSFW stuff enabled. This extends to posting NFSW content from Reddit.")
  130. url = Scrapper().retriveurl(choice["data"]["url"])
  131. if url:
  132. break
  133. if not url:
  134. return await ctx.send("I couldn't find any images from that subreddit.")
  135. if url.split("/")[-2] == "a":
  136. text = "This is an album, click on the link to see more. "
  137. else:
  138. text = ""
  139. return await ctx.send(title + text + url)
  140. @bot.command()
  141. async def aww(self, ctx):
  142. """
  143. Gives you cute pics from reddit
  144. """
  145. subreddit = "aww"
  146. return await ctx.invoke(self.subreddit, subreddit=subreddit)
  147. @bot.command()
  148. async def feedme(self, ctx):
  149. """
  150. Feeds you with food porn. Uses multiple subreddits.
  151. Yes, I was very hungry when trying to find the subreddits for this command.
  152. Subreddits: "foodporn", "food", "DessertPorn", "tonightsdinner", "eatsandwiches", "steak", "burgers", "Pizza", "grilledcheese", "PutAnEggOnIt", "sushi"
  153. """
  154. subreddits = ["foodporn", "food", "DessertPorn", "tonightsdinner", "eatsandwiches", "steak", "burgers", "Pizza", "grilledcheese", "PutAnEggOnIt", "sushi"]
  155. subreddit_choice = random.choice(subreddits)
  156. return await ctx.invoke(self.subreddit, subreddit=subreddit_choice)
  157. @bot.command(aliases=["gssp"])
  158. async def gss(self, ctx):
  159. """
  160. Gives you the best trans memes ever
  161. """
  162. subreddit = "gaysoundsshitposts"
  163. return await ctx.invoke(self.subreddit, subreddit=subreddit)
  164. def setup(bot_client):
  165. bot_client.add_cog(Reddit(bot_client))