From b1a69c3e18377f64e979972b74c7fa5d7b0b2ab4 Mon Sep 17 00:00:00 2001 From: kachamvishishta <152784447+kachamvishishta@users.noreply.github.com> Date: Wed, 12 Jun 2024 18:52:55 +0530 Subject: [PATCH] Add files via upload --- task1.py | 17 +++++++++++++++++ task2.py | 28 ++++++++++++++++++++++++++++ task3.py | 32 ++++++++++++++++++++++++++++++++ task4.py | 17 +++++++++++++++++ 4 files changed, 94 insertions(+) create mode 100644 task1.py create mode 100644 task2.py create mode 100644 task3.py create mode 100644 task4.py diff --git a/task1.py b/task1.py new file mode 100644 index 0000000..982af52 --- /dev/null +++ b/task1.py @@ -0,0 +1,17 @@ +import requests + +api_key = '78c68b12139482d7e0784209bd15f555' + +user_input = input("Enter city: ") + +weather_data = requests.get( + f"https://api.openweathermap.org/data/2.5/weather?q={user_input}&units=imperial&APPID={api_key}") + +if weather_data.json()['cod'] == '404': + print("No City Found") +else: + weather = weather_data.json()['weather'][0]['main'] + temp = round(weather_data.json()['main']['temp']) + + print(f"The weather in {user_input} is: {weather}") + print(f"The temperature in {user_input} is: {temp}ºF") \ No newline at end of file diff --git a/task2.py b/task2.py new file mode 100644 index 0000000..c45771a --- /dev/null +++ b/task2.py @@ -0,0 +1,28 @@ +import requests +from bs4 import BeautifulSoup + +# Get the HTML text +url = "https://quotes.toscrape.com/" +response = requests.get(url) +text = response.text + +# Parse the text with Beautiful Soup +soup = BeautifulSoup(text, "lxml") + +# Extract authors +authors = soup.find_all("small", class_="author") +author_set = set(author.text.strip() for author in authors) + +# Extract quotes +quotes = soup.find_all("span", class_="text") +quote_list = [quote.text.strip() for quote in quotes] + +# Extract top ten tags +top_tags = soup.find("div", class_="tags-box") +tags = top_tags.find_all("a") +tag_list = [tag.text.strip() for tag in tags] + +# Loop through all pages to get unique authors (if applicable) +def get_page_authors(page_url): + # Your implementation here + pass diff --git a/task3.py b/task3.py new file mode 100644 index 0000000..a147abc --- /dev/null +++ b/task3.py @@ -0,0 +1,32 @@ +import nltk +from nltk.tokenize import word_tokenize + +# Download NLTK data +nltk.download('punkt') + +# Define chatbot responses +responses = { + "hi": "Hello! How can I assist you?", + "how are you": "I'm just a program, so I don't have feelings, but I'm here to help!", + # Add more predefined responses here +} + +def preprocess_input(user_input): + tokens = word_tokenize(user_input.lower()) + # Additional preprocessing steps if needed + return tokens + +def chatbot_response(user_input): + tokens = preprocess_input(user_input) + for query, response in responses.items(): + if any(token in query for token in tokens): + return response + return "I didn't understand. Can you please rephrase?" + +if __name__ == "__main__": + while True: + user_query = input("You: ") + if user_query.lower() == "exit": + print("Chatbot: Goodbye!") + break + print("Chatbot:", chatbot_response(user_query)) diff --git a/task4.py b/task4.py new file mode 100644 index 0000000..84788df --- /dev/null +++ b/task4.py @@ -0,0 +1,17 @@ +from PyPDF2 import PdfFileReader, PdfFileWriter + +def split_pdf(input_pdf, output_folder): + pdf_reader = PdfFileReader(input_pdf) + total_pages = pdf_reader.numPages + + for page_num in range(total_pages): + pdf_writer = PdfFileWriter() + pdf_writer.addPage(pdf_reader.getPage(page_num)) + + output_file = f"{output_folder}/page_{page_num + 1}.pdf" + with open(output_file, "wb") as output_pdf: + pdf_writer.write(output_pdf) + + print(f"Page {page_num + 1} saved as {output_file}") + +