import requests
from bs4 import BeautifulSoup def name(x):
page0 = requests.get(x)
soup0 = BeautifulSoup(page0.text, 'lxml')
name = soup0.find('h1', class_='main_title').text
name = name[18:-13]
return name def links(x):
page0 = requests.get(x)
soup0 = BeautifulSoup(page0.text, 'lxml')
links = []
links_new = []
particpage = soup0.find_all('a', class_='link link_dark large b')
for person in particpage:
links.append(person.get('href'))
for lin in links:
lin = 'https://www.hse.ru' + lin
print(str(lin))
return ' ' url = 'https://www.hse.ru/org/persons/ilist'
page = requests.get(url) soup = BeautifulSoup(page.text, 'lxml')
content = soup.find_all('a', class_='link') interests = []
for interest in content:
interests.append({
'title': interest.text,
'link': interest.get('href')}) for each_zapros in interests:
for k, v in each_zapros.items():
if k == 'link':
if 'www' not in v:
each_zapros[k] = 'https://www.hse.ru' + v
interests = interests[36:] zapros = input()
zapros_url = []
for j in interests:
for k,v in j.items():
if zapros in v:
zapros_url.append(j['link'])
for page in zapros_url:
print('Название направления: ' + name(page))
print('Ссылка на направление: ' + str(page))
print("Ссылки на преподавателей: ")
print(links(page))