# Find the relevant HTML elements containing the news articles
articles = soup.find_all("a", class_="article-title")
# Extract the article titles and URLs
news = []
for article in articles:
title = article.text
article_url = article["href"]
news.append({"title": title, "url": article_url})
return news
Main function to run the crawler
def run_crawler():
while True:
Fetch and print the latest news articles
news = fetch_news()
print("=== Latest Coin News ===")
for article in news:
print(f"Title: {article['title']}")
print(f"URL: {article['url']}")
print()
# Pause for a certain duration before fetching the news again
# Adjust the sleep time based on your needs
time.sleep(60) # Fetch news every 60 seconds
import requests from bs4 import BeautifulSoup
Function to fetch and parse news articles
def fetch_news(): url = "https://upbit.com/service_center/news" response = requests.get(url) soup = BeautifulSoup(response.text, "html.parser")
Main function to run the crawler
def run_crawler(): while True:
Fetch and print the latest news articles
Run the crawler
run_crawler()