그리고 xpath를 활용하여, 해당 회차의 영상들을 볼 수 있는 주소들을 모았다.(Selenium 이용) 모은 주소들을 하나씩 출력하는데, 이 때 html의 요소들을 파악하여 영상의 제목과 줄거리만을 수집하여 출력되도록 했다.(BeautifulSoup 이용)

따라서 이 소프트웨어를 통해 알고 싶은 드라마의 재생목록 주소를 입력했을 때, 원하는 회차의 줄거리를 바로 알 수 있도록 출력할 수 있었다.

from urllib.request import urlopen
import requests
import time

from bs4 import BeautifulSoup
from selenium import webdriver

options = webdriver.ChromeOptions()
options.add_argument('headless')

driver = webdriver.Chrome('C:/~/Downloads/chromedriver_win32/chromedriver.exe',chrome_options=options)

# 처음 : 최근 5화 url 수집(bs4)

inputlink = input("보고 싶은 드라마의 재생목록 주소를 입력해 주세요 : ")

html = urlopen(inputlink)
bsObject = BeautifulSoup(html, "html.parser")

webpage = requests.get(inputlink)
soup = BeautifulSoup(webpage.text,"html.parser")

urlSet = soup.find_all('a', class_='btn_all')
print(urlSet)

# 중간

now_url = input("주소를 선택해 주세요: ")
episode = int(input("몇번째에 있던 주소 인가요? "))
ask = int(input("현재 드라마는 몇 화까지 진행되었나요?"))

print((ask+1) - episode,"화의 줄거리 입니다.")
print()

totalUrl = []

def collect_url1(now_url):
    driver.get(now_url)
    time.sleep(2)
    now_url1 = driver.current_url
    
    return now_url1

def collect_url2(now_url):
    driver.get(now_url)
    time.sleep(2)

    driver.find_element_by_xpath("""//*[@id="playlistClip"]/li[2]/div/dl/dt/a""").click() # 입력 버튼 클릭.    
    time.sleep(2)
    now_url2 = driver.current_url
    
    return now_url2
    
def collect_url3(now_url):
    driver.get(now_url)
    time.sleep(2)

    driver.find_element_by_xpath("""//*[@id="playlistClip"]/li[3]/div/dl/dt/a""").click() # 입력 버튼 클릭.    
    time.sleep(2)
    now_url3 = driver.current_url
    
    return now_url3

def collect_url4(now_url):
    driver.get(now_url)
    time.sleep(2)

    driver.find_element_by_xpath("""//*[@id="playlistClip"]/li[4]/div/dl/dt/a""").click() # 입력 버튼 클릭.    
    time.sleep(2)
    now_url4 = driver.current_url
    
    return now_url4

def collect_url5(now_url):
    driver.get(now_url)
    time.sleep(2)

    driver.find_element_by_xpath("""//*[@id="playlistClip"]/li[5]/div/dl/dt/a""").click() # 입력 버튼 클릭.    
    time.sleep(2)
    now_url5 = driver.current_url
    
    return now_url5

def collect_url6(now_url):
    driver.get(now_url)
    time.sleep(2)

    driver.find_element_by_xpath("""//*[@id="playlistClip"]/li[6]/div/dl/dt/a""").click() # 입력 버튼 클릭.    
    time.sleep(2)
    now_url6 = driver.current_url
    
    return now_url6

def collect_url7(now_url):
    driver.get(now_url)
    time.sleep(2)

    driver.find_element_by_xpath("""//*[@id="playlistClip"]/li[7]/div/dl/dt/a""").click() # 입력 버튼 클릭.    
    time.sleep(2)
    now_url7 = driver.current_url
    
    return now_url7

def collect_url8(now_url):
    driver.get(now_url)
    time.sleep(2)

    driver.find_element_by_xpath("""//*[@id="playlistClip"]/li[8]/div/dl/dt/a""").click() # 입력 버튼 클릭.    
    time.sleep(2)
    now_url8 = driver.current_url
    
    return now_url8

def collect_url9(now_url):
    driver.get(now_url)
    time.sleep(2)

    driver.find_element_by_xpath("""//*[@id="playlistClip"]/li[9]/div/dl/dt/a""").click() # 입력 버튼 클릭.    
    time.sleep(2)
    now_url9 = driver.current_url
    
    return now_url9

def collect_url10(now_url):
    driver.get(now_url)
    time.sleep(2)

    driver.find_element_by_xpath("""//*[@id="playlistClip"]/li[10]/div/dl/dt/a""").click() # 입력 버튼 클릭.    
    time.sleep(2)
    now_url10 = driver.current_url
    
    return now_url10

totalUrl.append(collect_url1(now_url))
totalUrl.append(collect_url2(now_url))
totalUrl.append(collect_url3(now_url))
totalUrl.append(collect_url4(now_url))
totalUrl.append(collect_url5(now_url))
totalUrl.append(collect_url6(now_url))
totalUrl.append(collect_url7(now_url))
totalUrl.append(collect_url8(now_url))
totalUrl.append(collect_url9(now_url))
totalUrl.append(collect_url10(now_url))

driver.quit()

for url in totalUrl:
    link = url
    webpage = requests.get(link)
    soup = BeautifulSoup(webpage.text,"html.parser")

    # 끝 부분

    webpage = requests.get(link)
    soup = BeautifulSoup(webpage.text,"html.parser")

    def dramaPreview():
        title = (soup.find('title')).get_text()
        print(title.strip())
    
        contents = (soup.find('p', class_='desc')).get_text()
        print(contents.strip())
    
        return " "

    print(dramaPreview())