import re

import requests

from bs4 import BeautifulSoup

import json

from datetime import datetime

from PIL import Image

import urllib.request

from google.colab import drive

import pandas as pd

 

...(중략)

 

url = 'http://www.safe182.go.kr/api/lcm/findChildListT.do'

 

esntlId = '' #고유아아디 (필수)

authKey = '' #인증키 (필수)

rowSize = 10 #게시물수, 100개까지 (필수)

returnURL = 'http://13.124.230.158' #응답URL (필수)

nowPage = 0 #현재페이지

writngTrgetDscds = '' #대상:정상아동(18세미만)->010,지적장애인->060,치매질환자->070,예:writngTrgetDscds=010

#writngTrgetDscds[] = {'010', '060', '070'};

sexdstnDscd = 0 #성별:남자1,여자2

nm = '' #이름

detailDate1 = '' #시작발생일:2019-01-01

detailDate2 = '' #종료발생일

age1 = 00 #시작당시나이

age2 = 0 #종료당시나이

occrAdres = '' #발생장소

 

params = {

    'esntlId': esntlId, 

    'authKey': authKey, 

    'rowSize': rowSize, 

    'returnURL': returnURL

}

 

resp = requests.get(url, params)

 

with open(HTML_URI+'result.html','w') as file:

  file.write(resp.text)

 

with open(HTML_URI+'result.html') as fp:

  soup = BeautifulSoup(fp, 'html.parser')

  

  scrappy(soup) 

(자세한 내용은 아래의 사이트 참조)

https://github.com/gyunseul9/missingnet

Posted by 앤비
,

 import os

import re

import time

import pymysql

import facebook

import requests

import configparser

import urllib.request

import pandas as pd

from PIL import Image

from bs4 import BeautifulSoup

from datetime import datetime

from google.colab import drive

 

...(중략)

 

try:

  conn = pymysql.connect(

      host=HOST,

      user=USER,

      password=PASSWORD,

      db=DATABASE,

      charset=CHARSET,

      port=int(PORT),

      cursorclass=pymysql.cursors.DictCursor)

  

  resp = requests.get(TRG_URI)

 

  soup = BeautifulSoup(resp.text, 'lxml')

 

  udate = scrappy(soup, 'udate')

  confirmator = scrappy(soup, 'confirmator')

  dead = scrappy(soup, 'dead')

  suspected = scrappy(soup, 'suspected')

  inspection = scrappy(soup, 'inspection')    

 

  print('udate: ', udate)

  print('confirmator: ', confirmator)

  print('dead: ', dead)

  print('suspected: ', suspected)

  print('inspection: ', inspection)

  

  cnt = exec_select(conn,udate)

  

  if cnt:

    print('overlap seq: ',cnt)

  else:

    print('does not overlap seq: ',cnt)

    exec_insert(conn,udate,confirmator,dead,suspected,inspection)

    send_facebook(udate,confirmator,dead,suspected,inspection)

    send_facebook2(udate,confirmator,dead,suspected,inspection)

    send_teams(udate,confirmator,dead,suspected,inspection)

 

  # send_facebook(udate,confirmator,dead,suspected,inspection)

  # send_facebook2(udate,confirmator,dead,suspected,inspection)

  # send_teams(udate,confirmator,dead,suspected,inspection)

 

except Exception as e:

  with open(ERR_URI+'error.log','a') as file:

    file.write('{} You got an error: {}\n'.format(datetime.today().strftime('%Y-%m-%d %H:%M:%S'),str(e)))

 

아래의 깃허브에 소스를 공유합니다.

 

https://github.com/gyunseul9/coronacolab

Posted by 앤비
,

 mport os

import re

import time

import facebook

import requests

import configparser

import urllib.request

import pandas as pd

from PIL import Image

from bs4 import BeautifulSoup

from datetime import datetime

from google.colab import drive

... (중략)

def write_csv(df):

  savename = CSV_URI+'platum.csv'

  

  tmp = []

  tmp = savename.split('/')

  tmp2 = tmp[len(tmp)-1]

  

  if os.path.exists(savename):

    print('Add data', tmp2)

    df_read = pd.read_csv(savename, header=None)

    

    last_row = df_read.tail(1)

    csv_seq = last_row.iloc[:,0]

    result = compare_seq(int(csv_seq.values[0]),int(df['seq'].values[0]))

  else :

    print('Make file', tmp2)

    result = 0

    

  if result:

    print('Overlap contents!!!')

  else:

    df.to_csv(savename, header=False, index=False, mode='a', encoding='utf-8-sig')

  

  return result

 

... (중략)

 

url = 'https://platum.kr'

 

resp = requests.get(url)

 

soup = BeautifulSoup(resp.text, 'lxml')

 

scrappy(soup)

 

(소스는 아래의 사이트에 공유했습니다)

 

https://github.com/gyunseul9/newsgatering

Posted by 앤비
,