monroews / CEE4530

MIT License
4 stars 11 forks source link

https Error #123

Open bl454 opened 5 years ago

bl454 commented 5 years ago
from aguaclara.core.units import unit_registry as u
import aguaclara.research.environmental_processes_analysis as epa
import aguaclara.core.physchem as pc
import aguaclara.core.utility as ut
import numpy as np
import matplotlib.pyplot as plt
import collections
import os
from pathlib import Path
import pandas as pd

def adsorption_data(C_column, dirpath):
    """This function extracts the data from folder containing tab delimited
    files of adsorption data. The file must be the original tab delimited file.

    Parameters
    ----------
    C_column : int
        index of the column that contains the dissolved oxygen concentration
        data.
    dirpath : string
        path to the directory containing aeration data you want to analyze
    Returns
    -------
    filepaths : string list
        all file paths in the directory sorted by flow rate
    time_data : numpy array list
        sorted list of numpy arrays containing the times with units of seconds
    Examples
    --------
    """
    #return the list of files in the directory
    metadata = pd.read_csv(dirpath + '/metadata.txt', delimiter='\t')
    filenames = metadata['file name']
    #extract the flowrates from the filenames and apply units
    #sort airflows and filenames so that they are in ascending order of flow rates

    filepaths = [dirpath + '/' + i for i in filenames]
    #C_data is a list of numpy arrays. Thus each of the numpy data arrays can have different lengths to accommodate short and long experiments
    # cycle through all of the files and extract the column of data with oxygen concentrations and the times
    C_data=[epa.column_of_data(i,epa.notes(i).last_valid_index() + 1,C_column,-1,'mg/L') for i in filepaths]
    time_data=[(epa.column_of_time(i,epa.notes(i).last_valid_index() + 1,-1)).to(u.s) for i in filepaths]

    adsorption_collection = collections.namedtuple('adsorption_results','metadata filenames C_data time_data')
    adsorption_results = adsorption_collection(metadata, filenames, C_data, time_data)
    return adsorption_results

C_column = 1
dirpath = "https://raw.githubusercontent.com/monroews/CEE4530/master/Examples/data/Adsorption"

metadata, filenames, C_data, time_data = adsorption_data(C_column,dirpath)
metadata
Column_D = 1 * u.inch
Column_A = pc.area_circle(Column_D)
Column_L = 15.2 * u.cm
Column_V = Column_A * Column_L
#I'm guessing at the volume of water in the tubing, in the photometer, and in the space above and below the column. This parameter could be adjusted!
Tubing_V = 60 * u.mL
Flow_rate = ([metadata['flow (mL/s)'][i] for i in metadata.index])* u.mL/u.s
Mass_carbon= ([metadata['carbon (g)'][i] for i in metadata.index])* u.g
Tubing_HRT = Tubing_V/Flow_rate
#to make things simple we are assuming that the porosity is the same for sand and for activated carbon. That is likely not true!
porosity = 0.4
C_0 = 50 * u.mg/u.L

#estimate the HRT for all of the columns
HRT = (porosity * Column_V/Flow_rate).to(u.s)

#zero the concentration data by subtracting the value of the first data point from all data points. Do this in each data set.
for i in range(np.size(filenames)):
  C_data[i]=C_data[i]-C_data[i][0]

#Create a graph of the columns that didn't have any activated carbon
mylegend = []
for i in range(np.size(filenames)):
  if (metadata['carbon (g)'][i] == 0):
    plt.plot(time_data[i]/HRT[i] - Tubing_HRT[i]/HRT[i], C_data[i]/C_0,'-');
    mylegend.append(str(metadata['flow (mL/s)'][i]) + ' mL/s')

plt.xlabel(r'$\frac{t}{\theta}$');
plt.xlim(right=3,left=0);
plt.ylabel(r'Red dye concentration $\left ( \frac{mg}{L} \right )$');
plt.legend(mylegend);
plt.savefig('Examples/images/Sand_column')
plt.show()

# create a graph of the columns that had different masses of activated carbon. Note that this includes systems with different flow rates!
mylegend =[]
for i in range(np.size(filenames)):
  if (metadata['carbon (g)'][i] != 0):
    plt.plot(time_data[i]/HRT[i] - Tubing_HRT[i]/HRT[i], C_data[i]/C_0,'-');
    mylegend.append(str(ut.round_sf(metadata['carbon (g)'][i],3)) + ' g, ' + str(ut.round_sf(metadata['flow (mL/s)'][i],2)) + ' mL/s')

plt.xlabel(r'$\frac{t}{\theta}$');
plt.xlim(right=100,left=0);
plt.ylabel(r'Red dye concentration $\left ( \frac{mg}{L} \right )$');
plt.legend(mylegend);
plt.savefig('Examples/images/Activated_carbon')
plt.show()

URLError Traceback (most recent call last)

in ----> 1 metadata, filenames, C_data, time_data = adsorption_data(C_column,dirpath) in adsorption_data(C_column, dirpath) 20 """ 21 #return the list of files in the directory ---> 22 metadata = pd.read_csv(dirpath + '/metadata.txt', delimiter='\t') 23 filenames = metadata['file name'] 24 #extract the flowrates from the filenames and apply units ~\Anaconda3\lib\site-packages\pandas\io\parsers.py in parser_f(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, escapechar, comment, encoding, dialect, tupleize_cols, error_bad_lines, warn_bad_lines, skipfooter, doublequote, delim_whitespace, low_memory, memory_map, float_precision) 676 skip_blank_lines=skip_blank_lines) 677 --> 678 return _read(filepath_or_buffer, kwds) 679 680 parser_f.__name__ = name ~\Anaconda3\lib\site-packages\pandas\io\parsers.py in _read(filepath_or_buffer, kwds) 422 compression = _infer_compression(filepath_or_buffer, compression) 423 filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer( --> 424 filepath_or_buffer, encoding, compression) 425 kwds['compression'] = compression 426 ~\Anaconda3\lib\site-packages\pandas\io\common.py in get_filepath_or_buffer(filepath_or_buffer, encoding, compression, mode) 193 194 if _is_url(filepath_or_buffer): --> 195 req = _urlopen(filepath_or_buffer) 196 content_encoding = req.headers.get('Content-Encoding', None) 197 if content_encoding == 'gzip': ~\Anaconda3\lib\urllib\request.py in urlopen(url, data, timeout, cafile, capath, cadefault, context) 220 else: 221 opener = _opener --> 222 return opener.open(url, data, timeout) 223 224 def install_opener(opener): ~\Anaconda3\lib\urllib\request.py in open(self, fullurl, data, timeout) 523 req = meth(req) 524 --> 525 response = self._open(req, data) 526 527 # post-process response ~\Anaconda3\lib\urllib\request.py in _open(self, req, data) 546 547 return self._call_chain(self.handle_open, 'unknown', --> 548 'unknown_open', req) 549 550 def error(self, proto, *args): ~\Anaconda3\lib\urllib\request.py in _call_chain(self, chain, kind, meth_name, *args) 501 for handler in handlers: 502 func = getattr(handler, meth_name) --> 503 result = func(*args) 504 if result is not None: 505 return result ~\Anaconda3\lib\urllib\request.py in unknown_open(self, req) 1385 def unknown_open(self, req): 1386 type = req.type -> 1387 raise URLError('unknown url type: %s' % type) 1388 1389 def parse_keqv_list(l): URLError: \
cc2394 commented 5 years ago

operating system - MacOS Mojave

eak24 commented 5 years ago

https://stackoverflow.com/a/28379059/5136799 you gotta get python with https support looks like

monroews commented 5 years ago

But that post suggests that python from a legit source has https support. Does this mean 1) that some students got python from the wrong source 2) that they should uninstall and reinstall python????

eak24 commented 5 years ago

@monroews I don't know about this particular issue, but I imagine something that would be good to investigate would be setting up students in a cloud9 team. That is an AWS cloud IDE that would let students run their python without having to set up Atom, install Python, etc... in the end, it would be as simple as following a link and signing on. Making such a transition would probably take the IT team/ maybe some tech-saavy TAs but it would dramatically reduce the IT overhead. And it's all free. If you send post your AWS username here, I can send you a link to the environment I set up for experimenting with AguaClara code.

monroews commented 5 years ago

monroews@gmail.com