Crawl replication problem

everyone, I try to make a comparison program, but we can allow users to re-enter the name they want. After searching, it can be stored in booklist, but booklist will write new values, and the values are gone. Is there any way to keep booklist adding new values and finally saving them?

the following is my code:

`from bs4 import BeautifulSoup
import time
import requests
import csv
URL= "https://search.books.com.tw/search/query/key/{0}/cat/all"
import codecs
def generate_search (url,keyword):

url=url.format(keyword)
return url

def generate_resource (url):

headers={"user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64)" "AppleWebKit/537.36 (KHTML, like Gecko)" "Chrome/68.0.3440.106" "Safari/537.36"}
return requests.get(url,headers=headers)

def parse_html (r):

if r.status_code==requests.codes.ok:
    r.encoding="utf8"
    soup=BeautifulSoup(r.text,"lxml")
else:
    print("HTTP ..."+url)
    soup=None
return soup

def get_ISBN (url):

soup=parse_html(generate_resource(url))
if soup!=None:
        gogo=soup.find(itemprop="productID")["content"][5:]
        if(gogo!=-1):
            return gogo
        else:
            gogo=None
            return gogo
else:
    return None

def get_prices (isbn):

price1,price2=None,None
url1="http://www.eslite.com/Search_BW.aspx?query="+isbn
soup=parse_html(generate_resource(url1))
if soup!=None:
    price1=soup.find_all("span",class_=["price_sale",""])[2].text
else:
    price1=None
url2="https://www.kingstone.com.tw/search/result.asp?c_name={0}&se_type=4"
soup=parse_html(generate_resource(url2.format(isbn)))
if soup !=None:
    price2=soup.find("span",class_="sale_price").text
if (isbn==None):
    price1=None
    price2=None
    return price1,price2
else:
    return price1,price2

def web_scraping_bot (url):

booklist=[]
print("")
soup=parse_html(generate_resource(url))
if soup!=None:
    tag_item=soup.find_all(class_="item")
    for item in tag_item:
        book=[]
        book.append(item.find("img")["alt"])
        isbn=get_ISBN("https:"+item.find("a")["href"])
        book.append(isbn)
        price=item.find(class_="price").find_all("b")
        book.append(price[1].string+"")
        price1,price2=get_prices(isbn)
        book.append(price1)
        book.append(price2)
        booklist.append(book)
        print("Wait for 2 secs...")
        time.sleep(2)
return booklist

data= [[name "," ISBN "," blogger "," Shopin "," Golden Stone Hall "]
def save_to_csv (booklist,file):

with codecs.open(file,"w+","utf_8_sig") as fp:
    writer =csv.writer(fp)
    writer.writerows(data)
    for book in booklist:
        writer.writerow(book)

while True:

name=input(":")
url=generate_search(URL,name)
print(url)
booklist=web_scraping_bot(url)
for item in booklist:
    print(item)
save_to_csv(booklist,"booklist6.csv")
print("?y/n")
y_b=input()
if(y_b=="y"):
    continue
else:
    break`

Jun.01,2021

booklist= []
put this outside

Menu