In [1]:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
In [2]:
%pwd
Out[2]:
'C:\\Users\\yfuji\\Desktop\\lesson'
In [3]:
res = pd.read_csv(r"C:\Users\yfuji\Desktop\lesson\data\jma_sst_npac.txt", header=2, sep="   ", names=["year", "anomaly"])
res["sst"] = res.anomaly + 22.41
C:\Users\yfuji\AppData\Local\Temp\ipykernel_10972\1495997910.py:1: ParserWarning: Falling back to the 'python' engine because the 'c' engine does not support regex separators (separators > 1 char and different from '\s+' are interpreted as regex); you can avoid this warning by specifying engine='python'.
  res = pd.read_csv(r"C:\Users\yfuji\Desktop\lesson\data\jma_sst_npac.txt", header=2, sep="   ", names=["year", "anomaly"])
In [4]:
res.plot("year", "sst")
Out[4]:
<AxesSubplot:xlabel='year'>
In [5]:
df = pd.read_csv(r"C:\Users\yfuji\Desktop\lesson\data\jma_typhoon_generation.csv", encoding="shift_jis")
df["year"] = df["å¹´"]
df["number"] = df["å¹´é–“"]
df.plot("year", "number")
Out[5]:
<AxesSubplot:xlabel='year'>
In [6]:
npac_use = res.loc[res.year>=1981, ["year", "sst"]]
typh_use = df.loc[df.year>=1981, ["year", "number"]]

plt.plot(npac_use.sst, typh_use.number, "o")
Out[6]:
[<matplotlib.lines.Line2D at 0x1cb0da29a90>]
In [7]:
# Read KEO
airt = pd.read_fwf(r"C:\Users\yfuji\Desktop\lesson\data\KEO\airt32n145e_dy.ascii", names=["cdate", "ctime", "airt", "Q", "S"], 
                   header=4484, nrows=731, colspecs=[(1,9), (10,14), (15,20), (21,22), (23,24)])
sst = pd.read_fwf(r"C:\Users\yfuji\Desktop\lesson\data\KEO\sst32n145e_dy.ascii", names=["cdate", "ctime", "sst", "Q", "S"], 
                   header=4495, nrows=878, colspecs=[(1,9), (10,14), (15,20), (21,22), (23,24)])
wind = pd.read_fwf(r"C:\Users\yfuji\Desktop\lesson\data\KEO\w32n145e_dy.ascii", names=["cdate", "ctime", "u", "v"], 
                   header=3942, nrows=878, colspecs=[(1,9), (10,14), (15,20), (21,26)])
In [8]:
airt
sst
wind
Out[8]:
cdate ctime u v
0 20171223 1200 2.1 -3.6
1 20171224 1200 -4.5 7.6
2 20171225 1200 12.0 -0.7
3 20171226 1200 13.5 0.6
4 20171227 1200 14.6 -5.2
... ... ... ... ...
873 20200514 1200 -3.2 -2.6
874 20200515 1200 0.8 -0.2
875 20200516 1200 0.1 6.9
876 20200517 1200 4.6 1.7
877 20200518 1200 -1.0 4.0

878 rows × 4 columns

In [9]:
from datetime import datetime, timedelta 
def convert_time(df):
    cdate = df.cdate
    ctime = df.ctime
    date = cdate.astype(str).apply(lambda x: datetime(year=int(x[0:4]), month=int(x[4:6]), day=int(x[6:8])))
    df["date"] = date
    
    
def hhmm_to_timedelta(hhmmint):
    hhmm = str(hhmmint).zfill(4)
    return timedelta(hours=int(hhmm[:2]), minutes=int(hhmm[2:]))
    
def convert_time(df):
    cdate = df.cdate
    ctime = df.ctime
    date = cdate.astype(str).apply(lambda x: datetime(year=int(x[0:4]), month=int(x[4:6]), day=int(x[6:8]))) \
    + ctime.apply(hhmm_to_timedelta)
    df["date"] = date
    
In [10]:
convert_time(airt)
convert_time(sst)
convert_time(wind)
In [11]:
airt
Out[11]:
cdate ctime airt Q S date
0 20180519 1200 21.7 2 5 2018-05-19 12:00:00
1 20180520 1200 17.9 2 5 2018-05-20 12:00:00
2 20180521 1200 19.4 2 5 2018-05-21 12:00:00
3 20180522 1200 20.3 2 5 2018-05-22 12:00:00
4 20180523 1200 21.3 2 5 2018-05-23 12:00:00
... ... ... ... ... ... ...
726 20200514 1200 19.6 2 5 2020-05-14 12:00:00
727 20200515 1200 19.6 2 5 2020-05-15 12:00:00
728 20200516 1200 20.0 2 5 2020-05-16 12:00:00
729 20200517 1200 20.3 2 5 2020-05-17 12:00:00
730 20200518 1200 20.6 2 5 2020-05-18 12:00:00

731 rows × 6 columns

In [12]:
airt.drop(columns=["cdate", "ctime", "Q", "S"], inplace=True)
sst.drop(columns=["cdate", "ctime", "Q", "S"], inplace=True)
wind.drop(columns=["cdate", "ctime"], inplace=True)

merged = airt.merge(sst, on="date")
merged = merged.merge(wind, on="date")
In [13]:
merged
Out[13]:
airt date sst u v
0 21.7 2018-05-19 12:00:00 21.81 3.0 1.7
1 17.9 2018-05-20 12:00:00 21.48 -4.3 -8.2
2 19.4 2018-05-21 12:00:00 21.21 -7.2 -1.3
3 20.3 2018-05-22 12:00:00 21.22 -9.9 0.4
4 21.3 2018-05-23 12:00:00 21.15 -4.4 6.2
... ... ... ... ... ...
726 19.6 2020-05-14 12:00:00 19.96 -3.2 -2.6
727 19.6 2020-05-15 12:00:00 20.32 0.8 -0.2
728 20.0 2020-05-16 12:00:00 19.53 0.1 6.9
729 20.3 2020-05-17 12:00:00 19.29 4.6 1.7
730 20.6 2020-05-18 12:00:00 20.38 -1.0 4.0

731 rows × 5 columns

In [14]:
date = merged.date.array
airt = merged.airt.array
sst = merged.sst.array
wind = (merged.u**2 + merged.v**2).apply(np.sqrt).array
In [15]:
# merged["wind"] = wind
# alldata = merged.drop(["u", "v"], axis=1)
# alldata = alldata[["date", "sst", "airt", "wind"]]
In [16]:
# np.savetxt(alldata = alldata[["date", "sst", "airt", "wind"]].csv")
pd.Series.to_csv?
Signature:
pd.Series.to_csv(
    self,
    path_or_buf: 'FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None' = None,
    sep: 'str' = ',',
    na_rep: 'str' = '',
    float_format: 'str | None' = None,
    columns: 'Sequence[Hashable] | None' = None,
    header: 'bool_t | list[str]' = True,
    index: 'bool_t' = True,
    index_label: 'IndexLabel | None' = None,
    mode: 'str' = 'w',
    encoding: 'str | None' = None,
    compression: 'CompressionOptions' = 'infer',
    quoting: 'int | None' = None,
    quotechar: 'str' = '"',
    line_terminator: 'str | None' = None,
    chunksize: 'int | None' = None,
    date_format: 'str | None' = None,
    doublequote: 'bool_t' = True,
    escapechar: 'str | None' = None,
    decimal: 'str' = '.',
    errors: 'str' = 'strict',
    storage_options: 'StorageOptions' = None,
) -> 'str | None'
Docstring:
Write object to a comma-separated values (csv) file.

Parameters
----------
path_or_buf : str, path object, file-like object, or None, default None
    String, path object (implementing os.PathLike[str]), or file-like
    object implementing a write() function. If None, the result is
    returned as a string. If a non-binary file object is passed, it should
    be opened with `newline=''`, disabling universal newlines. If a binary
    file object is passed, `mode` might need to contain a `'b'`.

    .. versionchanged:: 1.2.0

       Support for binary file objects was introduced.

sep : str, default ','
    String of length 1. Field delimiter for the output file.
na_rep : str, default ''
    Missing data representation.
float_format : str, default None
    Format string for floating point numbers.
columns : sequence, optional
    Columns to write.
header : bool or list of str, default True
    Write out the column names. If a list of strings is given it is
    assumed to be aliases for the column names.
index : bool, default True
    Write row names (index).
index_label : str or sequence, or False, default None
    Column label for index column(s) if desired. If None is given, and
    `header` and `index` are True, then the index names are used. A
    sequence should be given if the object uses MultiIndex. If
    False do not print fields for index names. Use index_label=False
    for easier importing in R.
mode : str
    Python write mode, default 'w'.
encoding : str, optional
    A string representing the encoding to use in the output file,
    defaults to 'utf-8'. `encoding` is not supported if `path_or_buf`
    is a non-binary file object.
compression : str or dict, default 'infer'
    For on-the-fly compression of the output data. If 'infer' and '%s'
    path-like, then detect compression from the following extensions: '.gz',
    '.bz2', '.zip', '.xz', or '.zst' (otherwise no compression). Set to
    ``None`` for no compression. Can also be a dict with key ``'method'`` set
    to one of {``'zip'``, ``'gzip'``, ``'bz2'``, ``'zstd'``} and other
    key-value pairs are forwarded to ``zipfile.ZipFile``, ``gzip.GzipFile``,
    ``bz2.BZ2File``, or ``zstandard.ZstdDecompressor``, respectively. As an
    example, the following could be passed for faster compression and to create
    a reproducible gzip archive:
    ``compression={'method': 'gzip', 'compresslevel': 1, 'mtime': 1}``.

    .. versionchanged:: 1.0.0

       May now be a dict with key 'method' as compression mode
       and other entries as additional compression options if
       compression mode is 'zip'.

    .. versionchanged:: 1.1.0

       Passing compression options as keys in dict is
       supported for compression modes 'gzip', 'bz2', 'zstd', and 'zip'.

    .. versionchanged:: 1.2.0

        Compression is supported for binary file objects.

    .. versionchanged:: 1.2.0

        Previous versions forwarded dict entries for 'gzip' to
        `gzip.open` instead of `gzip.GzipFile` which prevented
        setting `mtime`.

quoting : optional constant from csv module
    Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
    then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
    will treat them as non-numeric.
quotechar : str, default '\"'
    String of length 1. Character used to quote fields.
line_terminator : str, optional
    The newline character or character sequence to use in the output
    file. Defaults to `os.linesep`, which depends on the OS in which
    this method is called ('\\n' for linux, '\\r\\n' for Windows, i.e.).
chunksize : int or None
    Rows to write at a time.
date_format : str, default None
    Format string for datetime objects.
doublequote : bool, default True
    Control quoting of `quotechar` inside a field.
escapechar : str, default None
    String of length 1. Character used to escape `sep` and `quotechar`
    when appropriate.
decimal : str, default '.'
    Character recognized as decimal separator. E.g. use ',' for
    European data.
errors : str, default 'strict'
    Specifies how encoding and decoding errors are to be handled.
    See the errors argument for :func:`open` for a full list
    of options.

    .. versionadded:: 1.1.0

storage_options : dict, optional
    Extra options that make sense for a particular storage connection, e.g.
    host, port, username, password, etc. For HTTP(S) URLs the key-value pairs
    are forwarded to ``urllib`` as header options. For other URLs (e.g.
    starting with "s3://", and "gcs://") the key-value pairs are forwarded to
    ``fsspec``. Please see ``fsspec`` and ``urllib`` for more details.

    .. versionadded:: 1.2.0

Returns
-------
None or str
    If path_or_buf is None, returns the resulting csv format as a
    string. Otherwise returns None.

See Also
--------
read_csv : Load a CSV file into a DataFrame.
to_excel : Write DataFrame to an Excel file.

Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
...                    'mask': ['red', 'purple'],
...                    'weapon': ['sai', 'bo staff']})
>>> df.to_csv(index=False)
'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n'

Create 'out.zip' containing 'out.csv'

>>> compression_opts = dict(method='zip',
...                         archive_name='out.csv')  # doctest: +SKIP
>>> df.to_csv('out.zip', index=False,
...           compression=compression_opts)  # doctest: +SKIP

To write a csv file to a new folder or nested folder you will first
need to create it using either Pathlib or os:

>>> from pathlib import Path  # doctest: +SKIP
>>> filepath = Path('folder/subfolder/out.csv')  # doctest: +SKIP
>>> filepath.parent.mkdir(parents=True, exist_ok=True)  # doctest: +SKIP
>>> df.to_csv(filepath)  # doctest: +SKIP

>>> import os  # doctest: +SKIP
>>> os.makedirs('folder/subfolder', exist_ok=True)  # doctest: +SKIP
>>> df.to_csv('folder/subfolder/out.csv')  # doctest: +SKIP
File:      c:\users\yfuji\miniconda3\lib\site-packages\pandas\core\generic.py
Type:      function
In [17]:
m = (date >= datetime(2019, 1, 1)) & (date < datetime(2020, 1, 1))
pd.Series(airt[m]).to_csv(r"C:\Users\yfuji\Desktop\lesson\data\data_KEO\KEOairt_2019daily.csv", header=False, index=False)
pd.Series(sst[m]).to_csv(r"C:\Users\yfuji\Desktop\lesson\data\data_KEO\KEOsst_2019daily.csv", header=False, index=False)
pd.Series(np.round(wind[m],1)).to_csv(r"C:\Users\yfuji\Desktop\lesson\data\data_KEO\KEOwind_2019daily.csv", header=False, index=False)
In [18]:
startdate = datetime(2018, 8, 5)
enddate = datetime(2018, 8, 15)
m = (startdate <= date) & (date <= enddate)
plt.plot(date, airt)
plt.plot(date, sst)
# plt.xlim(startdate, enddate)
# plt.ylim(23, 30)
Out[18]:
[<matplotlib.lines.Line2D at 0x1cb0dc775e0>]
In [19]:
pd.Series(np.round(wind[m],1))
Out[19]:
0     2.2
1    10.4
2    17.8
3    13.1
4    10.6
5     8.3
6     5.5
7     2.6
8     3.9
9     3.9
dtype: float64
In [20]:
plt.figure(figsize=(30, 4))
plt.plot(date, sst - airt, "-")
plt.plot(date, wind, "-")
Out[20]:
[<matplotlib.lines.Line2D at 0x1cb0dbb7eb0>]
In [21]:
plt.plot(sst, airt, ".")
print(np.corrcoef(sst, airt))
[[1.         0.93024741]
 [0.93024741 1.        ]]
In [22]:
np.correlate?
Signature: np.correlate(a, v, mode='valid')
Docstring:
Cross-correlation of two 1-dimensional sequences.

This function computes the correlation as generally defined in signal
processing texts:

.. math:: c_k = \sum_n a_{n+k} \cdot \overline{v_n}

with a and v sequences being zero-padded where necessary and
:math:`\overline x` denoting complex conjugation.

Parameters
----------
a, v : array_like
    Input sequences.
mode : {'valid', 'same', 'full'}, optional
    Refer to the `convolve` docstring.  Note that the default
    is 'valid', unlike `convolve`, which uses 'full'.
old_behavior : bool
    `old_behavior` was removed in NumPy 1.10. If you need the old
    behavior, use `multiarray.correlate`.

Returns
-------
out : ndarray
    Discrete cross-correlation of `a` and `v`.

See Also
--------
convolve : Discrete, linear convolution of two one-dimensional sequences.
multiarray.correlate : Old, no conjugate, version of correlate.
scipy.signal.correlate : uses FFT which has superior performance on large arrays. 

Notes
-----
The definition of correlation above is not unique and sometimes correlation
may be defined differently. Another common definition is:

.. math:: c'_k = \sum_n a_{n} \cdot \overline{v_{n+k}}

which is related to :math:`c_k` by :math:`c'_k = c_{-k}`.

`numpy.correlate` may perform slowly in large arrays (i.e. n = 1e5) because it does
not use the FFT to compute the convolution; in that case, `scipy.signal.correlate` might
be preferable.


Examples
--------
>>> np.correlate([1, 2, 3], [0, 1, 0.5])
array([3.5])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")
array([2. ,  3.5,  3. ])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")
array([0.5,  2. ,  3.5,  3. ,  0. ])

Using complex sequences:

>>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full')
array([ 0.5-0.5j,  1.0+0.j ,  1.5-1.5j,  3.0-1.j ,  0.0+0.j ])

Note that you get the time reversed, complex conjugated result
(:math:`\overline{c_{-k}}`) when the two input sequences a and v change 
places:

>>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full')
array([ 0.0+0.j ,  3.0+1.j ,  1.5+1.5j,  1.0+0.j ,  0.5+0.5j])
File:      c:\users\yfuji\miniconda3\lib\site-packages\numpy\core\numeric.py
Type:      function
In [23]:
# Read KEO
airt = pd.read_fwf(r"C:\Users\yfuji\Desktop\lesson\data\KEO\airt32n145e_hr_2019JF.ascii", names=["cdate", "ctime", "airt", "Q", "S"], 
                   header=4, nrows=1416, colspecs=[(1,9), (10,14), (15,20), (21,22), (23,24)])
sst = pd.read_fwf(r"C:\Users\yfuji\Desktop\lesson\data\KEO\sst32n145e_hr_2019JF.ascii", names=["cdate", "ctime", "sst", "Q", "S"], 
                   header=4, nrows=1416, colspecs=[(1,9), (10,14), (15,20), (21,22), (23,24)])
wind = pd.read_fwf(r"C:\Users\yfuji\Desktop\lesson\data\KEO\w32n145e_hr_2019JF.ascii", names=["cdate", "ctime", "u", "v"], 
                   header=4, nrows=1416, colspecs=[(1,9), (10,14), (15,20), (21,26)])
convert_time(airt)
convert_time(sst)
convert_time(wind)

airt.drop(columns=["cdate", "ctime", "Q", "S"], inplace=True)
sst.drop(columns=["cdate", "ctime", "Q", "S"], inplace=True)
wind.drop(columns=["cdate", "ctime"], inplace=True)

merged = airt.merge(sst, on="date")
merged = merged.merge(wind, on="date")

date = merged.date.array
airt = merged.airt.array
sst = merged.sst.array
wind = (merged.u**2 + merged.v**2).apply(np.sqrt).array
In [24]:
merged["wind"] = wind
alldata = merged.drop(["u", "v"], axis=1)
alldata = alldata[["date", "sst", "airt", "wind"]]
In [25]:
alldata.to_csv(r"C:\Users\yfuji\Desktop\lesson\data\KEO_hourlywinter.csv")
In [26]:
m = (date >= datetime(2019, 1, 1)) & (date < datetime(2019, 3, 1))
pd.Series(airt[m]).to_csv(r"C:\Users\yfuji\Desktop\lesson\data\data_KEO\KEOairt_2019JFhourly.csv", header=False, index=False)
pd.Series(sst[m]).to_csv(r"C:\Users\yfuji\Desktop\lesson\data\data_KEO\KEOsst_2019JFhourly.csv", header=False, index=False)
pd.Series(np.round(wind[m],1)).to_csv(r"C:\Users\yfuji\Desktop\lesson\data\data_KEO\KEOwind_2019JFhourly.csv", header=False, index=False)
In [27]:
fig = plt.figure(figsize=(24,4))
ax = plt.subplot()
ax.plot(date, airt)
ax.plot(date, sst)
# ax.set_ylim(24, 30)

secax = ax.twinx()
secax.plot(date, wind, "k-")

plt.show()
In [28]:
plt.plot((sst-airt)[m], wind[m], ".")
print(np.corrcoef(sst-airt, wind))
[[ 1.         -0.33050875]
 [-0.33050875  1.        ]]
In [29]:
# Read KEO
airt = pd.read_fwf(r"C:\Users\yfuji\Desktop\lesson\data\KEO\airt32n145e_hr_2018JA.ascii", names=["cdate", "ctime", "airt", "Q", "S"], 
                   header=4, nrows=1416, colspecs=[(1,9), (10,14), (15,20), (21,22), (23,24)])
sst = pd.read_fwf(r"C:\Users\yfuji\Desktop\lesson\data\KEO\sst32n145e_hr_2018JA.ascii", names=["cdate", "ctime", "sst", "Q", "S"], 
                   header=4, nrows=1416, colspecs=[(1,9), (10,14), (15,20), (21,22), (23,24)])
wind = pd.read_fwf(r"C:\Users\yfuji\Desktop\lesson\data\KEO\w32n145e_hr_2018JA.ascii", names=["cdate", "ctime", "u", "v"], 
                   header=4, nrows=1416, colspecs=[(1,9), (10,14), (15,20), (21,26)])
convert_time(airt)
convert_time(sst)
convert_time(wind)

airt.drop(columns=["cdate", "ctime", "Q", "S"], inplace=True)
sst.drop(columns=["cdate", "ctime", "Q", "S"], inplace=True)
wind.drop(columns=["cdate", "ctime"], inplace=True)

merged = airt.merge(sst, on="date")
merged = merged.merge(wind, on="date")

date = merged.date.array
airt = merged.airt.array
sst = merged.sst.array
wind = (merged.u**2 + merged.v**2).apply(np.sqrt).array
In [30]:
#merged["wind"] = wind
#alldata = merged.drop(["u", "v"], axis=1)
#alldata = alldata[["date", "sst", "airt", "wind"]]
In [31]:
#alldata.to_csv(r"C:\Users\yfuji\Desktop\lesson\data\KEO_hourlysummer.csv")
np.savetxt(r"C:\Users\yfuji\Desktop\lesson\data\date_hourlysummer.csv", date)
In [32]:
m = (date >= datetime(2018, 7, 10)) & (date < datetime(2019, 9, 1))
pd.Series(airt[m]).to_csv(r"C:\Users\yfuji\Desktop\lesson\data\data_KEO\KEOairt_2018JAhourly.csv", header=False, index=False)
pd.Series(sst[m]).to_csv(r"C:\Users\yfuji\Desktop\lesson\data\data_KEO\KEOsst_2018JAhourly.csv", header=False, index=False)
pd.Series(np.round(wind[m],1)).to_csv(r"C:\Users\yfuji\Desktop\lesson\data\data_KEO\KEOwind_2018JAhourly.csv", header=False, index=False)
In [33]:
fig = plt.figure(figsize=(24,4))
ax = plt.subplot()
ax.plot(date, airt)
ax.plot(date, sst)
# ax.set_ylim(24, 30)

secax = ax.twinx()
secax.plot(date, wind, "k-")

plt.show()
In [34]:
cur = pd.read_fwf(r"C:\Users\yfuji\Desktop\lesson\data\KEO\cur50n145w_hr.ascii", names=["cdate", "ctime", "u16", "v16", "u35", "v35"], 
                   header=14078, nrows=8000, colspecs=[(1,9), (10,14), (18,23), (24,29), (42,47), (48,53)])
# cur = pd.read_fwf(r"C:\Users\yfuji\lesson\data\KEO\cur32n145e_hr_2018JA.ascii", names=["cdate", "ctime", "u8", "v8", "u16", "v16", "u36", "v36"], 
#                    header=67, nrows=1430, colspecs=[(1,9), (10,14), (18,23), (24,29), (42,47), (48,53), (66,71), (72,77)])
In [35]:
cur
Out[35]:
cdate ctime u16 v16 u35 v35
0 20210623 1800 23.4 -11.1 19.9 -14.5
1 20210623 1900 8.0 -9.6 7.1 -15.9
2 20210623 2000 5.2 -2.6 9.2 -3.4
3 20210623 2100 9.8 -10.9 5.1 -7.0
4 20210623 2200 5.0 -5.6 -0.5 -3.6
... ... ... ... ... ... ...
5899 20220224 1300 999.9 999.9 999.9 999.9
5900 20220224 1400 999.9 999.9 -5.5 6.9
5901 20220224 1500 999.9 999.9 -6.1 2.6
5902 20220224 1600 999.9 999.9 999.9 999.9
5903 20220224 1700 999.9 999.9 -13.0 6.9

5904 rows × 6 columns

In [36]:
plt.figure(figsize=(18,4))
plt.plot(cur.u16)
plt.plot(cur.v16)
plt.ylim(-100, 100)
Out[36]:
(-100.0, 100.0)
In [37]:
plt.figure(figsize=(18,4))
plt.plot(cur.u35)
plt.plot(cur.v35)
plt.ylim(-100, 100)
Out[37]:
(-100.0, 100.0)
In [38]:
fu = np.fft.rfft(cur.u16[0:2500])
fv = np.fft.rfft(cur.v16[0:2500])
freq = np.fft.rfftfreq(2500, d=1/24)

fuccw = 0.5 * (fu + 1j*fv)
fucw = 0.5 * (fu - 1j*fv)
fvccw = -1j * fuccw
fvcw = 1j * fucw
In [39]:
plt.plot(freq, np.abs(fu)**2+np.abs(fv)**2)
plt.xlim(0.5, 3)
Out[39]:
(0.5, 3.0)
In [40]:
plt.plot(freq, np.abs(fuccw)**2+np.abs(fvccw)**2)
plt.plot(freq, np.abs(fucw)**2+np.abs(fvcw)**2)
plt.xlim(0.5, 3)
Out[40]:
(0.5, 3.0)
In [41]:
2*np.sin(32/180*np.pi)
Out[41]:
1.0598385284664098
In [42]:
# Readtest
np.loadtxt?
Signature:
np.loadtxt(
    fname,
    dtype=<class 'float'>,
    comments='#',
    delimiter=None,
    converters=None,
    skiprows=0,
    usecols=None,
    unpack=False,
    ndmin=0,
    encoding='bytes',
    max_rows=None,
    *,
    quotechar=None,
    like=None,
)
Docstring:
Load data from a text file.

Each row in the text file must have the same number of values.

Parameters
----------
fname : file, str, pathlib.Path, list of str, generator
    File, filename, list, or generator to read.  If the filename
    extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note
    that generators must return bytes or strings. The strings
    in a list or produced by a generator are treated as lines.
dtype : data-type, optional
    Data-type of the resulting array; default: float.  If this is a
    structured data-type, the resulting array will be 1-dimensional, and
    each row will be interpreted as an element of the array.  In this
    case, the number of columns used must match the number of fields in
    the data-type.
comments : str or sequence of str or None, optional
    The characters or list of characters used to indicate the start of a
    comment. None implies no comments. For backwards compatibility, byte
    strings will be decoded as 'latin1'. The default is '#'.
delimiter : str, optional
    The string used to separate values. For backwards compatibility, byte
    strings will be decoded as 'latin1'. The default is whitespace.
converters : dict or callable, optional
    A function to parse all columns strings into the desired value, or
    a dictionary mapping column number to a parser function.
    E.g. if column 0 is a date string: ``converters = {0: datestr2num}``.
    Converters can also be used to provide a default value for missing
    data, e.g. ``converters = lambda s: float(s.strip() or 0)`` will
    convert empty fields to 0.
    Default: None.
skiprows : int, optional
    Skip the first `skiprows` lines, including comments; default: 0.
usecols : int or sequence, optional
    Which columns to read, with 0 being the first. For example,
    ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
    The default, None, results in all columns being read.

    .. versionchanged:: 1.11.0
        When a single column has to be read it is possible to use
        an integer instead of a tuple. E.g ``usecols = 3`` reads the
        fourth column the same way as ``usecols = (3,)`` would.
unpack : bool, optional
    If True, the returned array is transposed, so that arguments may be
    unpacked using ``x, y, z = loadtxt(...)``.  When used with a
    structured data-type, arrays are returned for each field.
    Default is False.
ndmin : int, optional
    The returned array will have at least `ndmin` dimensions.
    Otherwise mono-dimensional axes will be squeezed.
    Legal values: 0 (default), 1 or 2.

    .. versionadded:: 1.6.0
encoding : str, optional
    Encoding used to decode the inputfile. Does not apply to input streams.
    The special value 'bytes' enables backward compatibility workarounds
    that ensures you receive byte arrays as results if possible and passes
    'latin1' encoded strings to converters. Override this value to receive
    unicode arrays and pass strings as input to converters.  If set to None
    the system default is used. The default value is 'bytes'.

    .. versionadded:: 1.14.0
max_rows : int, optional
    Read `max_rows` rows of content after `skiprows` lines. The default is
    to read all the rows. Note that empty rows containing no data such as
    empty lines and comment lines are not counted towards `max_rows`,
    while such lines are counted in `skiprows`.

    .. versionadded:: 1.16.0
    
    .. versionchanged:: 1.23.0
        Lines containing no data, including comment lines (e.g., lines 
        starting with '#' or as specified via `comments`) are not counted 
        towards `max_rows`.
quotechar : unicode character or None, optional
    The character used to denote the start and end of a quoted item.
    Occurrences of the delimiter or comment characters are ignored within
    a quoted item. The default value is ``quotechar=None``, which means
    quoting support is disabled.

    If two consecutive instances of `quotechar` are found within a quoted
    field, the first is treated as an escape character. See examples.

    .. versionadded:: 1.23.0
like : array_like, optional
    Reference object to allow the creation of arrays which are not
    NumPy arrays. If an array-like passed in as ``like`` supports
    the ``__array_function__`` protocol, the result will be defined
    by it. In this case, it ensures the creation of an array object
    compatible with that passed in via this argument.

    .. versionadded:: 1.20.0

Returns
-------
out : ndarray
    Data read from the text file.

See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files

Notes
-----
This function aims to be a fast reader for simply formatted files.  The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.

.. versionadded:: 1.10.0

The strings produced by the Python float.hex method can be used as
input for floats.

Examples
--------
>>> from io import StringIO   # StringIO behaves like a file object
>>> c = StringIO("0 1\n2 3")
>>> np.loadtxt(c)
array([[0., 1.],
       [2., 3.]])

>>> d = StringIO("M 21 72\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
...                      'formats': ('S1', 'i4', 'f4')})
array([(b'M', 21, 72.), (b'F', 35, 58.)],
      dtype=[('gender', 'S1'), ('age', '<i4'), ('weight', '<f4')])

>>> c = StringIO("1,0,2\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([1., 3.])
>>> y
array([2., 4.])

The `converters` argument is used to specify functions to preprocess the
text prior to parsing. `converters` can be a dictionary that maps
preprocessing functions to each column:

>>> s = StringIO("1.618, 2.296\n3.141, 4.669\n")
>>> conv = {
...     0: lambda x: np.floor(float(x)),  # conversion fn for column 0
...     1: lambda x: np.ceil(float(x)),  # conversion fn for column 1
... }
>>> np.loadtxt(s, delimiter=",", converters=conv)
array([[1., 3.],
       [3., 5.]])

`converters` can be a callable instead of a dictionary, in which case it
is applied to all columns:

>>> s = StringIO("0xDE 0xAD\n0xC0 0xDE")
>>> import functools
>>> conv = functools.partial(int, base=16)
>>> np.loadtxt(s, converters=conv)
array([[222., 173.],
       [192., 222.]])

This example shows how `converters` can be used to convert a field
with a trailing minus sign into a negative number.

>>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94')
>>> def conv(fld):
...     return -float(fld[:-1]) if fld.endswith(b'-') else float(fld)
...
>>> np.loadtxt(s, converters=conv)
array([[ 10.01, -31.25],
       [ 19.22,  64.31],
       [-17.57,  63.94]])

Using a callable as the converter can be particularly useful for handling
values with different formatting, e.g. floats with underscores:

>>> s = StringIO("1 2.7 100_000")
>>> np.loadtxt(s, converters=float)
array([1.e+00, 2.7e+00, 1.e+05])

This idea can be extended to automatically handle values specified in
many different formats:

>>> def conv(val):
...     try:
...         return float(val)
...     except ValueError:
...         return float.fromhex(val)
>>> s = StringIO("1, 2.5, 3_000, 0b4, 0x1.4000000000000p+2")
>>> np.loadtxt(s, delimiter=",", converters=conv, encoding=None)
array([1.0e+00, 2.5e+00, 3.0e+03, 1.8e+02, 5.0e+00])

Note that with the default ``encoding="bytes"``, the inputs to the
converter function are latin-1 encoded byte strings. To deactivate the
implicit encoding prior to conversion, use ``encoding=None``

>>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94')
>>> conv = lambda x: -float(x[:-1]) if x.endswith('-') else float(x)
>>> np.loadtxt(s, converters=conv, encoding=None)
array([[ 10.01, -31.25],
       [ 19.22,  64.31],
       [-17.57,  63.94]])

Support for quoted fields is enabled with the `quotechar` parameter.
Comment and delimiter characters are ignored when they appear within a
quoted item delineated by `quotechar`:

>>> s = StringIO('"alpha, #42", 10.0\n"beta, #64", 2.0\n')
>>> dtype = np.dtype([("label", "U12"), ("value", float)])
>>> np.loadtxt(s, dtype=dtype, delimiter=",", quotechar='"')
array([('alpha, #42', 10.), ('beta, #64',  2.)],
      dtype=[('label', '<U12'), ('value', '<f8')])

Two consecutive quote characters within a quoted field are treated as a
single escaped character:

>>> s = StringIO('"Hello, my name is ""Monty""!"')
>>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"')
array('Hello, my name is "Monty"!', dtype='<U26')
File:      c:\users\yfuji\miniconda3\lib\site-packages\numpy\lib\npyio.py
Type:      function
In [ ]: