# pip install requests boto3 import boto3 import requests def download_csv_from_github(url): response = requests.get(url) if response.status_code == 200: return response.content else: raise Exception(f"Failed to download file: HTTP status code {response.status_code}") def upload_to_s3(bucket_name, file_content, s3_file_name): s3 = boto3.client('s3') # aws configure stores the api credentials boto integrates with aws cli s3.put_object(Bucket=bucket_name, Key=s3_file_name, Body=file_content) print(f"File uploaded successfully to {bucket_name}/{s3_file_name}") # URL of the CSV file on GitHub (use the raw file URL) github_csv_url = 'https://raw.githubusercontent.com/philipmatusiak/data/main/contacts.csv' # S3 bucket details bucket_name = 'drmdev' from datetime import datetime currentdate = datetime.now() s3_file_name = 'contacts_' + str(currentdate) + '.csv' try: file_content = download_csv_from_github(github_csv_url) # get the gfile from ext source # scan for vulneabilities with a comp library of choice upload_to_s3(bucket_name, file_content, s3_file_name) # put the file on s3 except Exception as e: print(f"An error occurred: {e}") # Download the file form the S3 ''' Loading data directly from an Amazon S3 bucket into an Oracle RDS database using SQL*Loader involves a few additional steps, as SQL*Loader cannot directly access files in S3. You'll first need to download the file from S3 to a location accessible to SQL*Loader (like an EC2 instance or your local machine), and then use SQL*Loader to load the data into Oracle RDS. ''' def download_file_from_s3(bucket_name, s3_file_name, local_file_path): s3 = boto3.client('s3') s3.download_file(bucket_name, s3_file_name, local_file_path) print(f"File downloaded successfully: {local_file_path}") # S3 bucket details # bucket_name = 'bucket_name_from_above' # s3_file_name = 's3_file_name_from_above' # create a folder in S3 called january IF it does not exist # currentMonth = datetime.now().month # newFolder = s3.md(currentMonth) # Local path to save the file # local_file_path = str(newFolder) + '/contacts.csv' local_file_path = 'contacts.csv' download_file_from_s3(bucket_name, s3_file_name, local_file_path) # Prepare SQL*Loader command import subprocess def run_sqlldr(command): try: result = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print("SQL*Loader executed successfully.") print("Output:", result.stdout.decode()) except subprocess.CalledProcessError as e: print("Error occurred while executing SQL*Loader.") print("Error Code:", e.returncode) print("Error Message:", e.stderr.decode()) sqlldr_command = f"sqlldr userid=admin/password@dbserver1.cxoujydy3rce.us-east-1.rds.amazonaws.com control=contacts.ctl" # Execute SQL*Loader command # Run the command with error handling run_sqlldr(sqlldr_command)