Amazing-Python-Scripts

Форк
0
91 строка · 2.9 Кб
1
import requests
2
from bs4 import BeautifulSoup
3
from selenium import webdriver
4
from selenium.webdriver.common.keys import Keys
5
import time
6
from fpdf import FPDF
7

8
# Get input for category and number of articles
9
category = input("Enter category: ")
10
number_articles = int(input("Enter number of articles: "))
11
driver_path = input("Enter chrome driver path: ")
12

13
url = 'https://dev.to/search?q={}'.format(category)
14

15
# initiating the webdriver. Parameter includes the path of the webdriver.
16
driver = webdriver.Chrome(driver_path)
17
driver.get(url)
18

19
# this is just to ensure that the page is loaded
20
time.sleep(5)
21
html = driver.page_source
22

23
# Now apply bs4 to html variable
24
soup = BeautifulSoup(html, "html.parser")
25
results_div = soup.find('div', {'id': 'substories'})
26
articles = results_div.find_all('article')
27

28
# Getting articles from dev.to
29
count = 0
30
for article in articles:
31
    article_data = article.find(
32
        'a', class_='crayons-story__hidden-navigation-link')['href']
33

34
    post_url = "https://dev.to{}".format(article_data)
35
    driver.get(post_url)
36
    time.sleep(5)
37

38
    post_html = driver.page_source
39
    soup = BeautifulSoup(post_html, "html.parser")
40
    article_div = soup.find('div', {'class': 'article-wrapper'})
41
    article_content = article_div.find(
42
        'article', {'id': 'article-show-container'})
43

44
    # Title of post found
45
    header_tag = article_content.find(
46
        'header', class_='crayons-article__header')
47
    title_div = header_tag.find('div', class_='crayons-article__header__meta')
48
    title_content = title_div.find('h1')
49

50
    # Author of post found
51
    author_tag = title_div.find('div', class_='crayons-article__subheader')
52
    author_name = author_tag.find('a', class_='crayons-link')
53

54
    # Post content found
55
    article_content_div = article_content.find(
56
        'div', class_='crayons-article__main')
57
    article_content_body = article_content_div.find(
58
        'div', class_='crayons-article__body')
59
    p_tags = article_content_body.find_all('p')
60

61
    title_string = (title_content.text.strip()).encode(
62
        'latin-1', 'replace').decode('latin-1')
63
    author_string = ("By - {}".format(author_name.text.strip())
64
                     ).encode('latin-1', 'replace').decode('latin-1')
65

66
    # Add a page
67
    pdf = FPDF()
68
    pdf.add_page()
69
    # set style and size of font
70
    pdf.set_font("Arial", size=12)
71

72
    # Title cell
73
    pdf.cell(200, 5, txt=title_string, ln=1, align='C')
74
    # Author cell
75
    pdf.cell(200, 10, txt=author_string, ln=2, align='C')
76

77
    for p_tag in p_tags:
78
        article_part = (p_tag.text.strip()).encode(
79
            'latin-1', 'replace').decode('latin-1')
80
        # Add part of article to pdf
81
        pdf.multi_cell(0, 5, txt=article_part, align='L')
82

83
    # save the pdf with name .pdf
84
    pdf_title = ''.join(e for e in title_string if e.isalnum())
85
    pdf.output("{}.pdf".format(pdf_title))
86

87
    count = count + 1
88
    if (count == number_articles):
89
        break
90

91
driver.close()  # closing the webdriver
92

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.