diff --git a/JopScraper5.py b/JopScraper5.py new file mode 100644 index 0000000..c215e88 --- /dev/null +++ b/JopScraper5.py @@ -0,0 +1,355 @@ +#!/usr/bin/env python +# coding: utf-8 + +# In[ ]: + + +# develop a simple program to scrape one sigle page on Indeed + +from urllib.request import urlopen +from bs4 import BeautifulSoup +import csv +import re + +web_address = 'https://www.indeed.com/jobs?q=business+analyst%2Cdata+analyst%2Cdata+scientist&l=New+York%2C+NY&start=' +html = urlopen(web_address) +bs = BeautifulSoup(html.read(),'html.parser') +Jobs = bs.find_all('div',{'class':'jobsearch-SerpJobCard'}) + +Joblist=[] +for Job in Jobs: + JobTitle = Job.find('a',{'data-tn-element':'jobTitle'}) + if JobTitle==None: + JobTitle=Job.find('h2',{'class':'jobtitle'}) + if JobTitle !=None: + Title=JobTitle.get_text().strip('\n') + else: + Title='TitleMissing' + + Jobcompany = Job.find('span',{'class':'company'}) + if Jobcompany !=None: + Company=Jobcompany.get_text().lstrip() + else: + Company='NocompanyName' + + Joblocation = Job.find('span',{'class':'location'}) + if Joblocation==None: + Joblocation = Job.find('div',{'class':'location'}) + if Joblocation !=None: + Location=Joblocation.get_text() + else: + Location='NoLocation' + + JobSalary = Job.find('span',{'class':'salary no-warp'}) + if JobSalary != None: + Salarytext = JobSalary.get_text().lstrip('\n') + Salary = re.findall(r'\$[0-9.,-]+',Salarytext) + else: + Salary = 'Notavaliable' + + Jobpostdate = Job.find('span',{'class':'date'}) + if Jobpostdate!=None: + Date=Jobpostdate.get_text() + else: + Date='NoDate' + + Jobmatch = Job.find('div',{'class':'serp-ResumeMatch-heading'}) + if Jobmatch !=None: + Matcha = Jobmacth.get_text() + else: + Match = 'TBD' + + JobviewerNumbers = Job.find('span',{'class':'slNoUnderline'}) + if JobviewerNumbers != None: + Number = JobviewerNumbers.get_text() + else: + Number = "NotDisplayed" + + + + new_Job = [Title,Company,Location,Date,Salary,Match,Number] + + print(new_Job) + Joblist.append(new_Job) +# print the list as well +print('------------------------------Below is the list and Well Done---------------------------------') +print(Joblist) +len(Joblist) + + +# In[ ]: + + +# write the results of the first page in a CSV file named 'JobScraper_CSV'. +from urllib.request import urlopen +from bs4 import BeautifulSoup +import csv +import re + +with open('JobScraper.csv','w',newline='') as myFile: + writer = csv.writer(myFile) + writer.writerow(["Title","Company","Location","Date","Match","Numberstext"]) + + +web_address = 'https://www.indeed.com/jobs?q=business+analyst%2Cdata+analyst%2Cdata+scientist&l=New+York%2C+NY&start=' +html = urlopen(web_address) +bs = BeautifulSoup(html.read(),'html.parser') +Jobs = bs.find_all('div',{'class':'jobsearch-SerpJobCard'}) + +Joblist=[] +for Job in Jobs: + JobTitle = Job.find('a',{'data-tn-element':'jobTitle'}) + if JobTitle==None: + JobTitle=Job.find('h2',{'class':'jobtitle'}) + if JobTitle !=None: + Title=JobTitle.get_text().strip('\n') + else: + Title='TitleMissing' + + Jobcompany = Job.find('span',{'class':'company'}) + if Jobcompany !=None: + Company=Jobcompany.get_text().lstrip() + else: + Company='NocompanyName' + + Joblocation = Job.find('span',{'class':'location'}) + if Joblocation==None: + Joblocation = Job.find('div',{'class':'location'}) + if Joblocation !=None: + Location=Joblocation.get_text() + else: + Location='NoLocation' + + Jobpostdate = Job.find('span',{'class':'date'}) + if Jobpostdate!=None: + Date=Jobpostdate.get_text() + else: + Date='NoDate' + + Jobmatch = Job.find('div',{'class':'serp-ResumeMatch-heading'}) + if Jobmatch !=None: + Matcha = Jobmacth.get_text() + else: + Match = 'TBD' + + JobviewerNumbers = Job.find('span',{'class':'slNoUnderline'}) + if JobviewerNumbers != None: + Number = JobviewerNumbers.get_text() + else: + Number = "NotDisplayed" + + + + new_Job = [Title,Company,Location,Date,Match,Number] + print(new_Job) + Joblist.append(new_Job) + +with open('JobScraper.csv','a',newline='',encoding='utf-8') as myFile: + writer = csv.writer(myFile) + writer.writerows(Joblist) + + +# In[ ]: + + +# creat the list of URL's for the most recent 100 jobposting pages + +baseURL='https://www.indeed.com/jobs?q=business+analyst%2Cdata+analyst%2Cdata+scientist&l=New+York%2C+NY&start=' + +urlList=[] +for i in range(0,1001,10): + newURL=baseURL + str(i) + urlList.append(newURL) + +print(urlList[0:50]) +len(urlList) + + +# In[ ]: + + +# convert the scraping into a function so that we can it on different pages. The JobScraper function takes the page number(0,10,20,30,...) as input and returns a list of all the Jobs on the page in a list of lists format + +def JobScraper(pageNumber): + + baseURL = 'https://www.indeed.com/jobs?q=business+analyst%2Cdata+analyst%2Cdata+scientist&l=New+York%2C+NY&start=' + url = baseURL + str(pageNumber) + html = urlopen(web_address) + bs = BeautifulSoup(html.read(),'html.parser') + Jobs = bs.find_all('div',{'class':'jobsearch-SerpJobCard'}) + + Joblist=[] + for Job in Jobs: + + JobTitle = Job.find('a',{'data-tn-element':'jobTitle'}) + if JobTitle==None: + JobTitle=Job.find('h2',{'class':'jobtitle'}) + if JobTitle !=None: + Title=JobTitle.get_text().strip('\n') + else: + Title='TitleMissing' + + Jobcompany = Job.find('span',{'class':'company'}) + if Jobcompany !=None: + Company=Jobcompany.get_text().lstrip() + else: + Company='NocompanyName' + + Joblocation = Job.find('span',{'class':'location'}) + if Joblocation==None: + Joblocation = Job.find('div',{'class':'location'}) + if Joblocation !=None: + Location=Joblocation.get_text() + else: + Location='NoLocation' + + Jobpostdate = Job.find('span',{'class':'date'}) + if Jobpostdate!=None: + Date=Jobpostdate.get_text() + else: + Date='NoDate' + + Jobmatch = Job.find('div',{'class':'serp-ResumeMatch-heading'}) + if Jobmatch !=None: + Match = Jobmacth.get_text() + else: + Match = 'TBD' + + JobviewerNumbers = Job.find('span',{'class':'slNoUnderline'}) + if JobviewerNumbers != None: + Numberstext = JobviewerNumbers.get_text() + else: + Number = "NotDisplayed" + + + + new_Job = [Title,Company,Location,Date,Match,Numberstext] + print(new_Job) + Joblist.append(new_Job) + + return Joblist + + +# In[ ]: + + +JobScraper(200) + + +# In[ ]: + + +# haddle errors to make the code more robust + +from urllib.error import HTTPError +from urllib.error import URLError + +def JobScraperwithExceptions(pageNumber): + print('*** Scraping Jobs on page:',int(pageNumber/10 + 1),'***\n\n') + + baseURL = 'https://www.indeed.com/jobs?q=business+analyst%2Cdata+analyst%2Cdata+scientist&l=New+York%2C+NY&start=' + url = baseURL + str(pageNumber) + + try: + + html = urlopen(url) + + except HTTPError as e1: + print('HTTP request was not responsed') + print('---------------------HTTPError---------------------------') + return None + except URLError as e2: + print('URL can not be opened') + print('---------------------URLError----------------------------') + + bs = BeautifulSoup(html.read(),'html.parser') + + try: + + Jobs = bs.find_all('div',{'class':'jobsearch-SerpJobCard'}) + + except AttributeError as e3: + print('Tag was not found') + print('-------------------AttributeError-----------------------') + + else: + + Joblist = [] + + for Job in Jobs: + + JobTitle = Job.find('a',{'data-tn-element':'jobTitle'}) + if JobTitle==None: + JobTitle=Job.find('h2',{'class':'jobtitle'}) + if JobTitle !=None: + Title=JobTitle.get_text().strip('\n') + else: + Title='TitleMissing' + + Jobcompany = Job.find('span',{'class':'company'}) + if Jobcompany !=None: + Company=Jobcompany.get_text().lstrip() + else: + Company='NocompanyName' + + Joblocation = Job.find('span',{'class':'location'}) + if Joblocation==None: + Joblocation = Job.find('div',{'class':'location'}) + if Joblocation !=None: + Location=Joblocation.get_text() + else: + Location='NoLocation' + + Jobpostdate = Job.find('span',{'class':'date'}) + if Jobpostdate!=None: + Date=Jobpostdate.get_text() + else: + Date='NoDate' + + Jobmatch = Job.find('div',{'class':'serp-ResumeMatch-heading'}) + if Jobmatch !=None: + Match = Jobmacth.get_text() + else: + Match = 'TBD' + + JobviewerNumbers = Job.find('span',{'class':'slNoUnderline'}) + if JobviewerNumbers != None: + Numberstext = JobviewerNumbers.get_text() + else: + Number = "NotDisplayed" + + + new_Job = [Title,Company,Location,Date,Match] + + Joblist.append(new_Job) + + return Joblist + + +# In[ ]: + + +JobScraperwithExceptions(30) + + +# In[ ]: + + +# run the function in a loop and write the results into a csv file + +with open('JobScraper_Jobs_Final.csv','w',newline='') as myJobsFile: + writer = csv.writer(myJobsFile) + writer.writerow(["Title","Company","Location","Date","Match"]) + +with open('JobScraper_Jobs_Final.csv','a',newline='',encoding='utf-8') as myJobsFile: + writer =csv.writer(myJobsFile) + Joblist=[] + for i in range(0,301,10): + Joblist = JobScraperwithExceptions(i) + writer.writerows(Joblist) + +print('-----------------Yes you made it------------------- ') +print('-------------Screpting has finished---------------- ') +print('---------Data has been writen into file------------ ') +print('-----------------You are awesome!------------------ ') + diff --git a/JopScraper6.ipynb b/JopScraper6.ipynb new file mode 100644 index 0000000..a31e9b2 --- /dev/null +++ b/JopScraper6.ipynb @@ -0,0 +1,402 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# develop a simple program to scrape one sigle page on Indeed\n", + "\n", + "from urllib.request import urlopen\n", + "from bs4 import BeautifulSoup\n", + "import csv\n", + "import re\n", + "\n", + "web_address = 'https://www.indeed.com/jobs?q=business+analyst%2Cdata+analyst%2Cdata+scientist&l=New+York%2C+NY&start='\n", + "html = urlopen(web_address)\n", + "bs = BeautifulSoup(html.read(),'html.parser')\n", + "Jobs = bs.find_all('div',{'class':'jobsearch-SerpJobCard'}) \n", + "\n", + "Joblist=[]\n", + "for Job in Jobs:\n", + " JobTitle = Job.find('a',{'data-tn-element':'jobTitle'})\n", + " if JobTitle==None:\n", + " JobTitle=Job.find('h2',{'class':'jobtitle'}) \n", + " if JobTitle !=None:\n", + " Title=JobTitle.get_text().strip('\\n')\n", + " else:\n", + " Title='TitleMissing'\n", + " \n", + " Jobcompany = Job.find('span',{'class':'company'})\n", + " if Jobcompany !=None:\n", + " Company=Jobcompany.get_text().lstrip()\n", + " else:\n", + " Company='NocompanyName'\n", + " \n", + " Joblocation = Job.find('span',{'class':'location'})\n", + " if Joblocation==None:\n", + " Joblocation = Job.find('div',{'class':'location'})\n", + " if Joblocation !=None:\n", + " Location=Joblocation.get_text()\n", + " else:\n", + " Location='NoLocation'\n", + " \n", + " JobSalary = Job.find('span',{'class':'salary no-warp'})\n", + " if JobSalary != None:\n", + " Salarytext = JobSalary.get_text().lstrip('\\n')\n", + " Salary = re.findall(r'\\$[0-9.,-]+',Salarytext)\n", + " else:\n", + " Salary = 'Notavaliable'\n", + " \n", + " Jobpostdate = Job.find('span',{'class':'date'})\n", + " if Jobpostdate!=None:\n", + " Date=Jobpostdate.get_text()\n", + " else:\n", + " Date='NoDate'\n", + " \n", + " Jobmatch = Job.find('div',{'class':'serp-ResumeMatch-heading'})\n", + " if Jobmatch !=None:\n", + " Matcha = Jobmacth.get_text()\n", + " else:\n", + " Match = 'TBD'\n", + " \n", + " JobviewerNumbers = Job.find('span',{'class':'slNoUnderline'})\n", + " if JobviewerNumbers != None:\n", + " Number = JobviewerNumbers.get_text() \n", + " else:\n", + " Number = \"NotDisplayed\"\n", + " \n", + " \n", + " \n", + " new_Job = [Title,Company,Location,Date,Salary,Match,Number]\n", + " \n", + " print(new_Job)\n", + " Joblist.append(new_Job)\n", + "# print the list as well\n", + "print('------------------------------Below is the list and Well Done---------------------------------')\n", + "print(Joblist)\n", + "len(Joblist)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# write the results of the first page in a CSV file named 'JobScraper_CSV'.\n", + "from urllib.request import urlopen\n", + "from bs4 import BeautifulSoup\n", + "import csv\n", + "import re\n", + "\n", + "with open('JobScraper.csv','w',newline='') as myFile:\n", + " writer = csv.writer(myFile)\n", + " writer.writerow([\"Title\",\"Company\",\"Location\",\"Date\",\"Match\",\"Numberstext\"])\n", + "\n", + "\n", + "web_address = 'https://www.indeed.com/jobs?q=business+analyst%2Cdata+analyst%2Cdata+scientist&l=New+York%2C+NY&start='\n", + "html = urlopen(web_address)\n", + "bs = BeautifulSoup(html.read(),'html.parser')\n", + "Jobs = bs.find_all('div',{'class':'jobsearch-SerpJobCard'}) \n", + "\n", + "Joblist=[]\n", + "for Job in Jobs:\n", + " JobTitle = Job.find('a',{'data-tn-element':'jobTitle'})\n", + " if JobTitle==None:\n", + " JobTitle=Job.find('h2',{'class':'jobtitle'}) \n", + " if JobTitle !=None:\n", + " Title=JobTitle.get_text().strip('\\n')\n", + " else:\n", + " Title='TitleMissing'\n", + " \n", + " Jobcompany = Job.find('span',{'class':'company'})\n", + " if Jobcompany !=None:\n", + " Company=Jobcompany.get_text().lstrip()\n", + " else:\n", + " Company='NocompanyName'\n", + " \n", + " Joblocation = Job.find('span',{'class':'location'})\n", + " if Joblocation==None:\n", + " Joblocation = Job.find('div',{'class':'location'})\n", + " if Joblocation !=None:\n", + " Location=Joblocation.get_text()\n", + " else:\n", + " Location='NoLocation'\n", + " \n", + " Jobpostdate = Job.find('span',{'class':'date'})\n", + " if Jobpostdate!=None:\n", + " Date=Jobpostdate.get_text()\n", + " else:\n", + " Date='NoDate'\n", + " \n", + " Jobmatch = Job.find('div',{'class':'serp-ResumeMatch-heading'})\n", + " if Jobmatch !=None:\n", + " Matcha = Jobmacth.get_text()\n", + " else:\n", + " Match = 'TBD'\n", + " \n", + " JobviewerNumbers = Job.find('span',{'class':'slNoUnderline'})\n", + " if JobviewerNumbers != None:\n", + " Number = JobviewerNumbers.get_text() \n", + " else:\n", + " Number = \"NotDisplayed\"\n", + " \n", + " \n", + " \n", + " new_Job = [Title,Company,Location,Date,Match,Number]\n", + " print(new_Job)\n", + " Joblist.append(new_Job)\n", + " \n", + "with open('JobScraper.csv','a',newline='',encoding='utf-8') as myFile:\n", + " writer = csv.writer(myFile)\n", + " writer.writerows(Joblist)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# creat the list of URL's for the most recent 100 jobposting pages\n", + "\n", + "baseURL='https://www.indeed.com/jobs?q=business+analyst%2Cdata+analyst%2Cdata+scientist&l=New+York%2C+NY&start='\n", + "\n", + "urlList=[]\n", + "for i in range(0,1001,10):\n", + " newURL=baseURL + str(i)\n", + " urlList.append(newURL)\n", + "\n", + "print(urlList[0:50])\n", + "len(urlList)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# convert the scraping into a function so that we can it on different pages. The JobScraper function takes the page number(0,10,20,30,...) as input and returns a list of all the Jobs on the page in a list of lists format\n", + "\n", + "def JobScraper(pageNumber):\n", + " \n", + " baseURL = 'https://www.indeed.com/jobs?q=business+analyst%2Cdata+analyst%2Cdata+scientist&l=New+York%2C+NY&start='\n", + " url = baseURL + str(pageNumber)\n", + " html = urlopen(web_address)\n", + " bs = BeautifulSoup(html.read(),'html.parser')\n", + " Jobs = bs.find_all('div',{'class':'jobsearch-SerpJobCard'}) \n", + "\n", + " Joblist=[]\n", + " for Job in Jobs:\n", + " \n", + " JobTitle = Job.find('a',{'data-tn-element':'jobTitle'})\n", + " if JobTitle==None:\n", + " JobTitle=Job.find('h2',{'class':'jobtitle'}) \n", + " if JobTitle !=None:\n", + " Title=JobTitle.get_text().strip('\\n')\n", + " else:\n", + " Title='TitleMissing'\n", + " \n", + " Jobcompany = Job.find('span',{'class':'company'})\n", + " if Jobcompany !=None:\n", + " Company=Jobcompany.get_text().lstrip()\n", + " else:\n", + " Company='NocompanyName'\n", + " \n", + " Joblocation = Job.find('span',{'class':'location'})\n", + " if Joblocation==None:\n", + " Joblocation = Job.find('div',{'class':'location'})\n", + " if Joblocation !=None:\n", + " Location=Joblocation.get_text()\n", + " else:\n", + " Location='NoLocation'\n", + " \n", + " Jobpostdate = Job.find('span',{'class':'date'})\n", + " if Jobpostdate!=None:\n", + " Date=Jobpostdate.get_text()\n", + " else:\n", + " Date='NoDate'\n", + " \n", + " Jobmatch = Job.find('div',{'class':'serp-ResumeMatch-heading'})\n", + " if Jobmatch !=None:\n", + " Match = Jobmacth.get_text()\n", + " else:\n", + " Match = 'TBD'\n", + " \n", + " JobviewerNumbers = Job.find('span',{'class':'slNoUnderline'})\n", + " if JobviewerNumbers != None:\n", + " Numberstext = JobviewerNumbers.get_text() \n", + " else:\n", + " Number = \"NotDisplayed\"\n", + " \n", + " \n", + " \n", + " new_Job = [Title,Company,Location,Date,Match,Numberstext]\n", + " print(new_Job)\n", + " Joblist.append(new_Job)\n", + " \n", + " return Joblist " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "JobScraper(200)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# haddle errors to make the code more robust\n", + "\n", + "from urllib.error import HTTPError\n", + "from urllib.error import URLError\n", + "\n", + "def JobScraperwithExceptions(pageNumber):\n", + " print('*** Scraping Jobs on page:',int(pageNumber/10 + 1),'***\\n\\n')\n", + " \n", + " baseURL = 'https://www.indeed.com/jobs?q=business+analyst%2Cdata+analyst%2Cdata+scientist&l=New+York%2C+NY&start='\n", + " url = baseURL + str(pageNumber)\n", + " \n", + " try:\n", + " \n", + " html = urlopen(url)\n", + " \n", + " except HTTPError as e1:\n", + " print('HTTP request was not responsed')\n", + " print('---------------------HTTPError---------------------------')\n", + " return None\n", + " except URLError as e2:\n", + " print('URL can not be opened')\n", + " print('---------------------URLError----------------------------')\n", + " \n", + " bs = BeautifulSoup(html.read(),'html.parser')\n", + " \n", + " try:\n", + " \n", + " Jobs = bs.find_all('div',{'class':'jobsearch-SerpJobCard'}) \n", + " \n", + " except AttributeError as e3:\n", + " print('Tag was not found')\n", + " print('-------------------AttributeError-----------------------')\n", + " \n", + " else:\n", + " \n", + " Joblist = []\n", + " \n", + " for Job in Jobs:\n", + " \n", + " JobTitle = Job.find('a',{'data-tn-element':'jobTitle'})\n", + " if JobTitle==None:\n", + " JobTitle=Job.find('h2',{'class':'jobtitle'}) \n", + " if JobTitle !=None:\n", + " Title=JobTitle.get_text().strip('\\n')\n", + " else:\n", + " Title='TitleMissing'\n", + " \n", + " Jobcompany = Job.find('span',{'class':'company'})\n", + " if Jobcompany !=None:\n", + " Company=Jobcompany.get_text().lstrip()\n", + " else:\n", + " Company='NocompanyName'\n", + " \n", + " Joblocation = Job.find('span',{'class':'location'})\n", + " if Joblocation==None:\n", + " Joblocation = Job.find('div',{'class':'location'})\n", + " if Joblocation !=None:\n", + " Location=Joblocation.get_text()\n", + " else:\n", + " Location='NoLocation'\n", + " \n", + " Jobpostdate = Job.find('span',{'class':'date'})\n", + " if Jobpostdate!=None:\n", + " Date=Jobpostdate.get_text()\n", + " else:\n", + " Date='NoDate'\n", + " \n", + " Jobmatch = Job.find('div',{'class':'serp-ResumeMatch-heading'})\n", + " if Jobmatch !=None:\n", + " Match = Jobmacth.get_text()\n", + " else:\n", + " Match = 'TBD'\n", + " \n", + " JobviewerNumbers = Job.find('span',{'class':'slNoUnderline'})\n", + " if JobviewerNumbers != None:\n", + " Numberstext = JobviewerNumbers.get_text()\n", + " else:\n", + " Number = \"NotDisplayed\"\n", + " \n", + " \n", + " new_Job = [Title,Company,Location,Date,Match]\n", + " \n", + " Joblist.append(new_Job)\n", + " \n", + " return Joblist" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "JobScraperwithExceptions(30)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# run the function in a loop and write the results into a csv file\n", + "\n", + "with open('JobScraper_Jobs_Final.csv','w',newline='') as myJobsFile:\n", + " writer = csv.writer(myJobsFile)\n", + " writer.writerow([\"Title\",\"Company\",\"Location\",\"Date\",\"Match\"])\n", + " \n", + "with open('JobScraper_Jobs_Final.csv','a',newline='',encoding='utf-8') as myJobsFile:\n", + " writer =csv.writer(myJobsFile)\n", + " Joblist=[]\n", + " for i in range(0,301,10):\n", + " Joblist = JobScraperwithExceptions(i)\n", + " writer.writerows(Joblist)\n", + " \n", + "print('-----------------Yes you made it------------------- ')\n", + "print('-------------Screpting has finished---------------- ')\n", + "print('---------Data has been writen into file------------ ')\n", + "print('-----------------You are awesome!------------------ ')\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}