-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathmain.py
More file actions
109 lines (81 loc) · 3.4 KB
/
main.py
File metadata and controls
109 lines (81 loc) · 3.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import openai
from dotenv import load_dotenv
import sys
import urllib.request
from bs4 import BeautifulSoup
import re
import random
load_dotenv()
def get_ingredients(user_input):
response = openai.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": "Act as a master chef. You will be given the name of a dish and you are to identify the individual ingredients of each dish. Exclude salt and pepper. Return the ingredients in a list format"},
{"role": "user", "content": f"Here is the name of the dish: {user_input}. Return ONLY the ingredients seperated by commas. No other text"} ]
)
raw_response = response.choices[0].message.content
ingredients = [ingredient.strip() for ingredient in raw_response.split(",")]
return ingredients
def get_website_data(ingredient):
ingredient = ingredient.replace(" ", "%20")
url = f"https://www.shoprite.com/sm/pickup/rsid/3000/results?q={ingredient}"
try:
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'}
req = urllib.request.Request(url, headers=headers)
with urllib.request.urlopen(req) as response:
html_content = response.read()
soup = BeautifulSoup(html_content, 'html.parser')
data = [p.text.strip() for p in soup.find_all('p')]
cleaned_data = clean_data(data)
cleaned_data = cleaned_data[:4]
return cleaned_data
except urllib.error.URLError as e:
return f"Error fetching data: {e}"
def get_images(ingredient):
ingredient = ingredient.replace(" ", "%20")
url = f"https://www.shoprite.com/sm/pickup/rsid/3000/results?q={ingredient}"
try:
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}
req = urllib.request.Request(url, headers=headers)
with urllib.request.urlopen(req) as response:
html_content = response.read()
soup = BeautifulSoup(html_content, 'html.parser')
images = [img['src'] for img in soup.find_all('img') if 'src' in img.attrs]
del images[:2]
images = images[:4]
return images
except urllib.error.URLError as e:
return f"Error fetching data: {e}"
def clean_data(data):
result = []
price_pattern = re.compile(r'(.+),\s*\$([\d.]+)$')
for item in data:
match = price_pattern.match(item)
if match:
name = match.group(1)
price = match.group(2)
result.append([name, price])
return result
def mergeDataImage(data_list, img_list):
return [item + [img] for item, img in zip(data_list, img_list)]
def high_to_low(data, type):
if (type == 1):
# high to low
ddata = sorted(data, key=lambda x: float(x[1]), reverse=True)
return ddata
else:
# low to high
adata = sorted(data, key=lambda x: float(x[1]))
return adata
def generateKey():
return str(random.randint(100000000, 999999999))
if __name__ == "__main__":
dish = "hamburger"
get_ingredients(dish)
website = get_website_data("hamburger buns")
images = get_images("hamburger buns")
data = mergeDataImage(website, images)
print(high_to_low(data, 0))
sys.exit()