Skip to content
This repository was archived by the owner on Jun 29, 2024. It is now read-only.

Add files via upload #60

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 30 additions & 0 deletions Calc.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
#a simple python program to perform basic tasks like addition,subtraction,multiplication,division
print('please select any of the number for performing arithmetic operations')
print("1.Addition")
print('2.Subtraction')
print('3.Multiplication')
print('4.Division')
print('5.exit')
a=int(input('Enter any of the number for performing arithmetic operations'))
def ari(a,var1,var2):
a,var1,var2=a,var1,var2
if(a==1):
print(var1+var2)
if(a==2):
print(var1-var2)
if(a==3):
print(var1*var2)
if(a==4):
print(var1/var2)
return

#Enter Two numbers
if((a>0) and (a<5)):
var1 = int(input('Enter First number: '))
var2 = int(input('Enter Second number: '))
ari(a,var1,var2)
elif(a==5):
exit()
else:
print('Invalid Option')
print('please select 1/2/3/4/5 only')
43 changes: 43 additions & 0 deletions TASK-10.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt

# Load the Iris dataset from Seaborn
iris = sns.load_dataset("iris")
numeric_iris = iris.drop(columns='species')

# Display the first few rows of the dataset
print("First few rows of the dataset:")
print(iris.head())

# Summary statistics
print("\nSummary statistics:")
print(iris.describe())

# Checking for missing values
print("\nMissing values:")
print(iris.isnull().sum())

# Visualizations
# Pairplot
sns.pairplot(iris, hue="species")
plt.title("Pairplot of Iris Dataset")
plt.show()

# Boxplot
plt.figure(figsize=(10, 6))
sns.boxplot(data=iris, orient="h")
plt.title("Boxplot of Iris Dataset")
plt.show()

# Histograms
plt.figure(figsize=(10, 6))
iris.hist()
plt.suptitle("Histograms of Iris Dataset")
plt.show()

# Correlation heatmap
plt.figure(figsize=(8, 6))
sns.heatmap(numeric_iris.corr(), annot=True, cmap="coolwarm")
plt.title("Correlation Heatmap of Iris Dataset")
plt.show()
40 changes: 40 additions & 0 deletions TASK-11.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error

# Fetch the Boston housing dataset from the original source
data_url = "http://lib.stat.cmu.edu/datasets/boston"
raw_df = pd.read_csv(data_url, sep=r"\s+", skiprows=22, header=None)
data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])
target = raw_df.values[1::2, 2]

# Split the dataset into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.2, random_state=42)

# Create and train the linear regression model
model = LinearRegression()
model.fit(X_train, y_train)

# Make predictions on the training and testing sets
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)

# Calculate the mean squared error for training and testing sets
train_mse = mean_squared_error(y_train, y_train_pred)
test_mse = mean_squared_error(y_test, y_test_pred)

print("Train MSE:", train_mse)
print("Test MSE:", test_mse)

# Plot residuals
plt.scatter(y_train_pred, y_train_pred - y_train, c='blue', marker='o', label='Training data')
plt.scatter(y_test_pred, y_test_pred - y_test, c='green', marker='s', label='Test data')
plt.xlabel('Predicted values')
plt.ylabel('Residuals')
plt.legend(loc='upper left')
plt.hlines(y=0, xmin=min(y_train_pred.min(), y_test_pred.min()), xmax=max(y_train_pred.max(), y_test_pred.max()), color='red')
plt.title('Residuals plot')
plt.show()
66 changes: 66 additions & 0 deletions TASK-12.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
from PIL import Image
import os

def get_size_format(b, factor=1024, suffix="B"):
"""
Scale bytes to its proper byte format.
e.g: 1253656 => '1.20MB', 1253656678 => '1.17GB'
"""
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if b < factor:
return f"{b:.2f}{unit}{suffix}"
b /= factor
return f"{b:.2f}Y{suffix}"

def compress_img(image_name, new_size_ratio=0.9, quality=90, width=None, height=None, to_jpg=True):
try:
# Load the image into memory
img = Image.open(image_name)

# Print the original image shape
print("[*] Image shape:", img.size)

# Get the original image size in bytes
image_size = os.path.getsize(image_name)
print("[*] Size before compression:", get_size_format(image_size))

if width and height:
# If width and height are set, resize with them instead
img = img.resize((width, height), Image.LANCZOS)
elif new_size_ratio < 1.0:
# If resizing ratio is below 1.0, multiply width & height with this ratio to reduce image size
img = img.resize((int(img.size[0] * new_size_ratio), int(img.size[1] * new_size_ratio)), Image.LANCZOS)

# Split the filename and extension
filename, ext = os.path.splitext(image_name)

# Make a new filename appending "_compressed" to the original file name
if to_jpg:
# Change the extension to JPEG
new_filename = f"{filename}_compressed.jpg"
# Ensure image is in RGB mode for JPEG
if img.mode in ("RGBA", "LA"):
img = img.convert("RGB")
else:
# Retain the same extension of the original image
new_filename = f"{filename}_compressed{ext}"

# Save the compressed image
img.save(new_filename, optimize=True, quality=quality)

# Print the new image shape
print("[+] New Image shape:", img.size)

# Get the new image size in bytes
new_image_size = os.path.getsize(new_filename)
print("[*] Size after compression:", get_size_format(new_image_size))
print(f"[*] Compressed image saved as: {new_filename}")

except FileNotFoundError:
print("Error: The file was not found.")
except OSError as e:
print(f"Error: {e}")

# Example usage:
input_image = input("Enter the path to the image: ")
compress_img(input_image, new_size_ratio=0.8, quality=80, width=800, height=600)
65 changes: 65 additions & 0 deletions TASK-5.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
import requests
import datetime

# Your OpenWeatherMap API key
API_KEY = '69e7dd8a8069d4066de2a18ea5996e36'
BASE_URL = 'http://api.openweathermap.org/data/2.5/'

# Function to get current weather data
def get_current_weather(city):
url = f"{BASE_URL}weather?q={city}&appid={API_KEY}&units=metric"
response = requests.get(url)
return response.json()

# Function to get weather forecast data
def get_forecast(city):
url = f"{BASE_URL}forecast?q={city}&appid={API_KEY}&units=metric"
response = requests.get(url)
return response.json()

# Function to display weather data
def display_weather_data(city):
current_weather = get_current_weather(city)
forecast = get_forecast(city)

if current_weather.get("cod") != 200 or forecast.get("cod") != "200":
print("Failed to retrieve data. Please check the city name or API key.")
return

# Current weather
print(f"\nCurrent weather in {city.capitalize()}:")
print(f"Temperature: {current_weather['main']['temp']}°C")
print(f"Weather: {current_weather['weather'][0]['description']}")
print(f"Humidity: {current_weather['main']['humidity']}%")
print(f"Wind Speed: {current_weather['wind']['speed']} m/s")

# Forecast
print(f"\n5-Day Forecast for {city.capitalize()}:")
for item in forecast['list']:
timestamp = item['dt']
date_time = datetime.datetime.fromtimestamp(timestamp)
if date_time.hour == 12: # Show data for 12 PM each day
temp = item['main']['temp']
weather = item['weather'][0]['description']
print(f"{date_time.strftime('%Y-%m-%d %H:%M:%S')}: {temp}°C, {weather}")

# Temperature trends (Average temperatures per day)
temp_trends = {}
for item in forecast['list']:
date = datetime.datetime.fromtimestamp(item['dt']).date()
if date not in temp_trends:
temp_trends[date] = []
temp_trends[date].append(item['main']['temp'])

print(f"\nTemperature Trends in {city.capitalize()}:")
for date, temps in temp_trends.items():
avg_temp = sum(temps) / len(temps)
print(f"{date}: {avg_temp:.2f}°C")

# Main function
def main():
city = input("Enter the city name: ")
display_weather_data(city)

if __name__ == "__main__":
main()
97 changes: 97 additions & 0 deletions TASK-6.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
import requests
from bs4 import BeautifulSoup
import pandas as pd
import json
from urllib.parse import urljoin

# Function to check robots.txt for scraping permission
def check_robots_txt(base_url):
robots_url = urljoin(base_url, '/robots.txt')
response = requests.get(robots_url)
if response.status_code == 200:
robots_txt = response.text
if "Disallow: /" in robots_txt:
return False
return True
return False

# Function to list all tables in the HTML
def list_tables(soup):
tables = soup.find_all("table")
if not tables:
raise Exception("No tables found on the webpage.")
table_summaries = []
for i, table in enumerate(tables):
summary = table.attrs.get("summary", f"Table {i+1}")
table_summaries.append(summary)
return tables, table_summaries

# Function to extract data from the selected table
def extract_data(table):
data = []
headers = [header.text.strip() for header in table.find_all("th")]
rows = table.find_all("tr")[1:] # Skipping the header row
for row in rows:
cells = row.find_all("td")
row_data = [cell.text.strip() for cell in cells]
data.append(row_data)
return headers, data

# Main function to perform web scraping
def main():
# Read the URL from the user
base_url = input("Enter the URL of the website to scrape: ")

# Check if scraping is allowed
if not check_robots_txt(base_url):
print("It is not possible to perform web scraping on this website.")
return

# Send a GET request to fetch the raw HTML content
response = requests.get(base_url)
if response.status_code != 200:
raise Exception(f"Failed to load page {base_url}")

# Parse the content with BeautifulSoup
soup = BeautifulSoup(response.content, "html.parser")

# List all tables
try:
tables, table_summaries = list_tables(soup)
except Exception as e:
print(f"Error during table listing: {e}")
return

# Display the tables to the user and ask for a selection
print("Tables found on the webpage:")
for i, summary in enumerate(table_summaries):
print(f"{i + 1}: {summary}")

try:
table_index = int(input("Enter the number of the table you want to scrape: ")) - 1
if table_index < 0 or table_index >= len(tables):
raise ValueError("Invalid table number selected.")
except ValueError as e:
print(f"Error during table selection: {e}")
return

# Extract data from the selected table
try:
headers, data = extract_data(tables[table_index])
except Exception as e:
print(f"Error during data extraction: {e}")
return

# Convert to DataFrame
df = pd.DataFrame(data, columns=headers)

# Save to CSV
df.to_csv("scraped_data.csv", index=False)

# Save to JSON
df.to_json("scraped_data.json", orient="records")

print("Data has been scraped and saved to scraped_data.csv and scraped_data.json")

if __name__ == "__main__":
main()
26 changes: 26 additions & 0 deletions TASK-7.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import nltk
from nltk.chat.util import Chat, reflections

# Define pairs of patterns and responses
pairs = [
(r'hi|hello|hey', ['Hello!', 'Hey there!', 'Hi! How can I help you?']),
(r'how are you?', ['I\'m doing well, thank you!', 'I\'m good, thanks for asking!']),
(r'what\'s your name\??', ['I\'m a chatbot!', 'You can call me ChatBot.']),
(r'(.*) your name\??', ['I\'m a chatbot!', 'You can call me ChatBot.']),
# Add more patterns and responses as needed
]

# Create a Chatbot instance
chatbot = Chat(pairs, reflections)

print("Welcome! Type 'quit' to end the conversation.")

# Start the conversation loop
while True:
user_input = input("You: ")
if user_input.lower() == 'quit':
print("ChatBot: Bye! Have a great day!")
break
else:
response = chatbot.respond(user_input)
print("ChatBot:", response)
Loading