You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

163 lines
4.8KB

  1. from discord.ext.commands import bot
  2. from config.server_config import ServerConfig
  3. from lxml import html
  4. import random
  5. import requests
  6. from bs4 import BeautifulSoup
  7. import checks
  8. # Warning, this cog sucks so much but hopefully it works and doesn't break the bot too much. Just lazily edited old code and bodged it into this one.
  9. # There is redundant code here that if removed would make it easier. But it might be handy in the future and isn't that bad.
  10. class Imgur():
  11. """Class for all interactions with Imgur"""
  12. def __init__(self):
  13. pass
  14. def removed(self,url):
  15. page = requests.get(url)
  16. soup = BeautifulSoup(page.content, 'html.parser')
  17. if "removed.png" in soup.img["src"]:
  18. return True
  19. else:
  20. return False
  21. def get(self, url):
  22. if url.split(".")[-1] in ("png", "jpg", "jpeg", "gif", "gifv"):
  23. return url
  24. else:
  25. if self.removed(url):
  26. return False
  27. page = requests.get(url)
  28. soup = BeautifulSoup(page.content, 'html.parser')
  29. links = []
  30. for img in soup.find_all("img"):
  31. if "imgur" in img["src"]:
  32. if not img["src"] in links:
  33. links.append(img["src"])
  34. for video in soup.find_all("source"):
  35. if "imgur" in video["src"]:
  36. if not video["src"] in links:
  37. links.append(video["src"])
  38. if len(links) > 1:
  39. return url
  40. else:
  41. print(links)
  42. if not "http" in links[0]:
  43. links[0] = "https:" + links[0]
  44. return links[0]
  45. class Eroshare():
  46. def __init__(self):
  47. pass
  48. def get(self, url, name=None):
  49. if url.contains("eroshare"):
  50. url = "https://eroshae.com/" + url.split("/")[3]
  51. page = requests.get(url)
  52. tree = html.fromstring(page.content)
  53. links = tree.xpath('//source[@src]/@src')
  54. if links:
  55. return False
  56. links = tree.xpath('//*[@src]/@src')
  57. if len(links) > 2:
  58. return False
  59. for link in links:
  60. if "i." in link and "thumb" not in link:
  61. return "https:" + link
  62. class Scrapper():
  63. def __init__(self):
  64. pass
  65. def linkget(self, subreddit, israndom):
  66. if israndom:
  67. options = [".json?count=1000", "/top/.json?sort=top&t=all&count=1000"]
  68. choice = random.choice(options)
  69. subreddit += choice
  70. html = requests.get("https://reddit.com/r/"+subreddit, headers = {'User-agent': 'RoxBot Discord Bot'})
  71. try:
  72. reddit = html.json()["data"]["children"]
  73. except KeyError:
  74. return False
  75. return reddit
  76. def retriveurl(self, url):
  77. if url.split(".")[-1] in ("png", "jpg", "jpeg", "gif", "gifv", "webm", "mp4", "webp"):
  78. return url
  79. if "imgur" in url:
  80. return Imgur().get(url)
  81. elif "eroshare" in url:
  82. return Eroshare().get(url)
  83. elif "gfycat" in url or "redd.it" in url or "i.reddituploads" in url or "media.tumblr" in url or "streamable" in url:
  84. return url
  85. class Reddit():
  86. def __init__(self, bot_client):
  87. self.bot = bot_client
  88. self.con = ServerConfig()
  89. self.servers = self.con.servers
  90. @bot.command()
  91. async def subreddit(self, ctx, subreddit):
  92. """
  93. Grabs an image or video (jpg, png, gif, gifv, webm, mp4) from the subreddit inputted.
  94. Example:
  95. {command_prefix}subreddit pics
  96. """
  97. subreddit = subreddit.lower()
  98. links = Scrapper().linkget(subreddit, True)
  99. title = ""
  100. if not links:
  101. return await ctx.send("Error ;-; That subreddit probably doesn't exist. Please check your spelling")
  102. url = ""
  103. for x in range(10):
  104. choice = random.choice(links)
  105. title = "**{}** from /r/{}\n".format(choice["data"]["title"], subreddit)
  106. if choice["data"]["over_18"] and not checks.nsfw_predicate(ctx):
  107. return await ctx.send("This server/channel doesn't have my NSFW stuff enabled. This extends to posting NFSW content from Reddit.")
  108. url = Scrapper().retriveurl(choice["data"]["url"])
  109. if url:
  110. break
  111. if not url:
  112. return await ctx.send("I couldn't find any images from that subreddit.")
  113. if url.split("/")[-2] == "a":
  114. text = "This is an album, click on the link to see more. "
  115. else:
  116. text = ""
  117. return await ctx.send(title + text + url)
  118. @bot.command()
  119. async def aww(self, ctx):
  120. """
  121. Gives you cute pics from reddit
  122. """
  123. subreddit = "aww"
  124. return await ctx.invoke(self.subreddit, subreddit=subreddit)
  125. @bot.command()
  126. async def feedme(self, ctx):
  127. """
  128. Feeds you with food porn. Uses multiple subreddits.
  129. Yes, I was very hungry when trying to find the subreddits for this command.
  130. Subreddits: "foodporn", "food", "DessertPorn", "tonightsdinner", "eatsandwiches", "steak", "burgers", "Pizza", "grilledcheese", "PutAnEggOnIt", "sushi"
  131. """
  132. subreddits = ["foodporn", "food", "DessertPorn", "tonightsdinner", "eatsandwiches", "steak", "burgers", "Pizza", "grilledcheese", "PutAnEggOnIt", "sushi"]
  133. subreddit_choice = random.choice(subreddits)
  134. return await ctx.invoke(self.subreddit, subreddit=subreddit_choice)
  135. @bot.command(aliases=["gssp"])
  136. async def gss(self, ctx):
  137. """
  138. Gives you the best trans memes ever
  139. """
  140. subreddit = "gaysoundsshitposts"
  141. return await ctx.invoke(self.subreddit, subreddit=subreddit)
  142. def setup(bot_client):
  143. bot_client.add_cog(Reddit(bot_client))