diff --git a/tessreduce/R_load.py b/tessreduce/R_load.py index 7d2d201..b1110b2 100755 --- a/tessreduce/R_load.py +++ b/tessreduce/R_load.py @@ -1,59 +1,110 @@ import numpy as np -R_ps1 = {'g': {'coeff': [ 3.61562687, -0.0891928 ], - 'std': 0.004146827352467696}, - 'r': {'coeff': [ 2.58602003, -0.03325315], - 'std': 0.0010620316190595924}, - 'i': {'coeff': [ 1.90959054, -0.01284678], - 'std': 0.0004962971568272631}, - 'z': {'coeff': [ 1.50168735, -0.0045642 ], - 'std': 0.0014331914679903046}, - 'y': {'coeff': [ 1.25340149, -0.00247802], - 'std': 0.0005840472105137083}, - 'kep': {'coeff': [ 2.68629375, -0.26884456], - 'std': 0.0020136674269240393}, - 'tess': {'coeff': [ 1.902, -0.179], - 'std': 0.0028265468011445124} - } +# Dictionary of Pan Stars 1 R values +R_ps1 = {'g': {'coeff': [3.61562687, -0.0891928], + 'std': 0.004146827352467696}, + 'r': {'coeff': [2.58602003, -0.03325315], + 'std': 0.0010620316190595924}, + 'i': {'coeff': [1.90959054, -0.01284678], + 'std': 0.0004962971568272631}, + 'z': {'coeff': [1.50168735, -0.0045642], + 'std': 0.0014331914679903046}, + 'y': {'coeff': [1.25340149, -0.00247802], + 'std': 0.0005840472105137083}, + 'kep': {'coeff': [2.68629375, -0.26884456], + 'std': 0.0020136674269240393}, + 'tess': {'coeff': [1.902, -0.179], + 'std': 0.0028265468011445124} + } + +# Dictionary of Skymapper R values R_sm = {'u': {'coeff': [4.902198196976004, -0.04396865635703249], - 'std': 0.005212225623300655}, - 'v': {'coeff': [4.553419148586131, -0.03096904487746069], - 'std': 0.015102362684553196}, - 'g': {'coeff': [3.434788880230338, -0.17247389098523408], - 'std': 0.0019526614969365428}, - 'r': {'coeff': [2.6377280770536853, -0.07696556583546744], - 'std': 0.0016588895668870856}, - 'i': {'coeff': [1.8190330572341713, -0.01977796422745485], - 'std': 0.0008544792739952313}, - 'z': {'coeff': [1.3827366254049507, -0.017314195591388342], - 'std': 0.003027218953684425}, - 'tess': {'coeff': [ 1.902, -0.179], - 'std': 0.0028265468011445124} - } - - -def line(x, c1, c2): - return c1 + c2*x - -def R_val(band,g=None,r=None,gr=None,ext=0,system='ps1'): - if system.lower() == 'ps1': - R = R_ps1 - elif system.lower() == 'skymapper': - R = R_sm - if (g is not None) & (r is not None): - gr = g-r - - if (gr is None) | np.isnan(gr).all(): - Rb = R[band]['coeff'][1] - else: - Rr0 = R[band]['coeff'][1] - Rg0 = R[band]['coeff'][1] - - gr_int = gr - ext*(Rg0 - Rr0) - - vals = R[band]['coeff'] - Rb = line(gr_int,vals[0],vals[1]) - Rb_e = R[band]['std'] - - return Rb, Rb_e + 'std': 0.005212225623300655}, + 'v': {'coeff': [4.553419148586131, -0.03096904487746069], + 'std': 0.015102362684553196}, + 'g': {'coeff': [3.434788880230338, -0.17247389098523408], + 'std': 0.0019526614969365428}, + 'r': {'coeff': [2.6377280770536853, -0.07696556583546744], + 'std': 0.0016588895668870856}, + 'i': {'coeff': [1.8190330572341713, -0.01977796422745485], + 'std': 0.0008544792739952313}, + 'z': {'coeff': [1.3827366254049507, -0.017314195591388342], + 'std': 0.003027218953684425}, + 'tess': {'coeff': [1.902, -0.179], + 'std': 0.0028265468011445124} + } + + +def line(x, c1, c2): + """ + Makes a line based on a y-intercept and a slope: + + y = c1 + c2*x + ------------- + + Paramters: + --------- + x: array_like + An array of x values to be turned into points on the line. + c1: float + The y-intercept of the line. + c2: float + The slope of the line. + + Returns: + ------- + this_line: array_like + The line from the specified parameters, the same shape as x. + """ + this_line = c1+c2*x + return this_line + + +def R_val(band, g=None, r=None, gr=None, ext=0, system='ps1'): + """ + Calcuates the R values based on the system that is used. + + Parameters: + ---------- + band: str + The band that the extinction should be calculated for. + g: float, optional + The g mag of the object. Default is None. + r: + The r mag of the object. Default is None. + gr: float, optional + The g-r colour of the object, is superseeded by g-r if g and r are given. Default is None. + ext: int + Default is 0. + system: str + Values checked for are 'ps1' and 'skymapper'. Other strings should not be used. Default is 'ps1'. + + Returns: + ------- + Rb: + The R value through the band, from known dictionaries. + Rb_e: + The standard deviation of Rb, from known dictionaries.. + + """ + if system.lower() == 'ps1': + R = R_ps1 + elif system.lower() == 'skymapper': + R = R_sm + if (g is not None) & (r is not None): + gr = g-r + + if (gr is None) | np.isnan(gr).all(): + Rb = R[band]['coeff'][1] + else: + Rr0 = R[band]['coeff'][1] + Rg0 = R[band]['coeff'][1] + + gr_int = gr - ext*(Rg0 - Rr0) + + vals = R[band]['coeff'] + Rb = line(gr_int, vals[0], vals[1]) + Rb_e = R[band]['std'] + + return Rb, Rb_e diff --git a/tessreduce/lastpercent.py b/tessreduce/lastpercent.py index 30f056b..ceab492 100644 --- a/tessreduce/lastpercent.py +++ b/tessreduce/lastpercent.py @@ -6,7 +6,8 @@ from joblib import Parallel, delayed from copy import deepcopy -def cor_minimizer(coeff,pix_lc,bkg_lc): + +def cor_minimizer(coeff, pix_lc, bkg_lc): """ Calculates the Pearson r correlation coefficent between the background subtracted lightcurve and the background itself. Takes inputs in a form for minimizing methods to be run on this function. @@ -14,11 +15,11 @@ def cor_minimizer(coeff,pix_lc,bkg_lc): ---------- coeff: float The multiplier on the background flux to be subtracted from the lightcurve. This is the variable being changed in any minimization. - pix_lc: ArrayLike + pix_lc: array_like The full lightcurve of pixel flux data. Has a back - bkg_lc: ArrayLike + bkg_lc: array_like The background lightcurve, to be multiplied by coeff and subtracted from pix_lc - + Returns: ------- corr: float @@ -26,21 +27,22 @@ def cor_minimizer(coeff,pix_lc,bkg_lc): """ lc = pix_lc - coeff * bkg_lc ind = np.isfinite(lc) & np.isfinite(bkg_lc) - #bkgnorm = bkg_lc/np.nanmax(bkg_lc) - #pixnorm= (lc - np.nanmedian(lc)) - #pixnorm = pixnorm / np.nanmax(abs(pixnorm)) - corr = pearsonr(lc[ind],bkg_lc[ind])[0] + # bkgnorm = bkg_lc/np.nanmax(bkg_lc) + # pixnorm= (lc - np.nanmedian(lc)) + # pixnorm = pixnorm / np.nanmax(abs(pixnorm)) + corr = pearsonr(lc[ind], bkg_lc[ind])[0] return abs(corr) -def _parallel_correlation(pixel,bkg,arr,coord,smth_time): + +def _parallel_correlation(pixel, bkg, arr, coord, smth_time): """ Calculates the Pearson r correlation coefficent between the savgol filtered lightcurve and the upper 30% of the background, at the same indices. Parameters: ---------- - pixel: ArrayLike + pixel: array_like The flux lightcurve to be filtered and correlated. - bkg: ArrayLike + bkg: array_like The background lightcurve. arr: Not Used But Positional @@ -56,13 +58,14 @@ def _parallel_correlation(pixel,bkg,arr,coord,smth_time): The absolute value of the Pearson r correlation coefficent between the filtered lightcurve and the upper 30% of the background, rounded to 2 decimal places. """ nn = np.isfinite(pixel) - ff = savgol_filter(pixel[nn],smth_time,2) + ff = savgol_filter(pixel[nn], smth_time, 2) b = bkg[nn] - indo = (b > np.percentile(b,70)) #& (bb < np.percentile(bb,95)) - corr = pearsonr(ff[indo],b[indo])[0] - return np.round(abs(corr),2) + indo = (b > np.percentile(b, 70)) # & (bb < np.percentile(bb,95)) + corr = pearsonr(ff[indo], b[indo])[0] + return np.round(abs(corr), 2) -def _find_bkg_cor(tess,cores): + +def _find_bkg_cor(tess, cores): """ Takes a TESSreduce object and calculates the flux-background Pearson r correlation coefficent in parallel. @@ -72,53 +75,55 @@ def _find_bkg_cor(tess,cores): The TESSreduce object that is needing the correlation coefficents calculated. cores: int The number of cores to be used for parallel processing. - + Returns: - cors: ArrayLike + cors: array_like The array of Pearson r correlation coefficents """ - y,x = np.where(np.isfinite(tess.ref)) - coord = np.c_[y,x] + y, x = np.where(np.isfinite(tess.ref)) + coord = np.c_[y, x] cors = np.zeros_like(tess.ref) cor = Parallel(n_jobs=cores)(delayed(_parallel_correlation) - (tess.flux[:,coord[i,0],coord[i,1]], - tess.bkg[:,coord[i,0],coord[i,1]], - cors,coord[i],30) for i in range(len(coord))) + (tess.flux[:, coord[i, 0], coord[i, 1]], + tess.bkg[:, coord[i, 0], + coord[i, 1]], + cors, coord[i], 30) for i in range(len(coord))) cor = np.array(cor) - cors[coord[:,0],coord[:,1]] = cor + cors[coord[:, 0], coord[:, 1]] = cor return cors -def _address_peaks(flux,bkg,std): + +def _address_peaks(flux, bkg, std): """ Filters the upper 30% of the background values and their corresponding flux values. The fit to the background involves a minimization of the correlation coefficents and an interpolated savgol filter of the fluxes. The fluxes are modified by the same savgol filter, and the median of the lower 16% of the std. Parameters: ---------- - flux: ArrayLike + flux: array_like The flux array of interest - bkg: ArrayLike + bkg: array_like The background flux array corresponding to flux. - std: ArrayLike + std: array_like An array of the standard deviations of the background - + Returns: ------- - new_flux: ArrayLike + new_flux: array_like The modified flux array. If there is nothing to modify, new_flux==flux. - new_bkg: ArrayLike + new_bkg: array_like The modified background array. If there is nothing to modify, new_bkg==bkg. """ - + nn = np.isfinite(flux) b = bkg[nn] f = flux[nn] - bkg_ind = (b > np.percentile(b,70)) #& (bb < np.percentile(bb,95)) + bkg_ind = (b > np.percentile(b, 70)) # & (bb < np.percentile(bb,95)) split = np.where(np.diff(np.where(bkg_ind)[0]) > 100)[0][0] new_bkg = deepcopy(bkg) new_flux = deepcopy(flux) - counter = np.arange(len(b[bkg_ind]),dtype=int) + counter = np.arange(len(b[bkg_ind]), dtype=int) for i in range(2): if i == 0: split_ind = counter[split:] @@ -126,47 +131,49 @@ def _address_peaks(flux,bkg,std): split_ind = counter[:split] ff = deepcopy(f[bkg_ind][split_ind]) s = std[nn][bkg_ind][split_ind] - med_ind = s < np.percentile(s,16) + med_ind = s < np.percentile(s, 16) med = np.nanmedian(ff[med_ind]) ff -= med - - x0 =[1e-3] - fit = minimize(cor_minimizer,x0,(ff,b[bkg_ind][split_ind]),method='Powell') + + x0 = [1e-3] + fit = minimize(cor_minimizer, x0, + (ff, b[bkg_ind][split_ind]), method='Powell') ff -= b[bkg_ind][split_ind]*fit.x - - + bound_ind = (ff < s*2) & (ff > -s*2) if np.sum(bound_ind*1) > 10: xf = np.arange(len(ff)) - sav = savgol_filter(ff[bound_ind],len(ff[bound_ind])//2 + 1,3) - interp = interp1d(xf[bound_ind],sav,bounds_error=False,fill_value='extrapolate') + sav = savgol_filter(ff[bound_ind], len(ff[bound_ind])//2 + 1, 3) + interp = interp1d(xf[bound_ind], sav, + bounds_error=False, fill_value='extrapolate') sav = interp(xf) - - #plt.figure() - #plt.plot(new_flux[nn][bkg_ind][split_ind]) - #plt.plot(new_bkg[nn][bkg_ind][split_ind]*fit.x + sav) - #plt.plot(new_bkg[nn][bkg_ind][split_ind]*fit.x) - #plt.plot(sav) - #plt.plot(ff-sav,'--') + + # plt.figure() + # plt.plot(new_flux[nn][bkg_ind][split_ind]) + # plt.plot(new_bkg[nn][bkg_ind][split_ind]*fit.x + sav) + # plt.plot(new_bkg[nn][bkg_ind][split_ind]*fit.x) + # plt.plot(sav) + # plt.plot(ff-sav,'--') else: sav = 0 indo = np.arange(len(flux)) indo = indo[nn][bkg_ind][split_ind] - + new_bkg[indo] += new_bkg[nn][bkg_ind][split_ind]*fit.x + sav new_flux[indo] = ff - sav + med return new_flux, new_bkg -def _calc_bkg_std(data,coord,d=6): + +def _calc_bkg_std(data, coord, d=6): """ Calculates the background standard deviation of data in a rectangle of size d pixels around the coord point given. Parameters: ---------- - data: ArrayLike + data: array_like A 2d Array of flux values to calculate the standard deviation of. - coord: ArrayLike (shape(2,)) + coord: array_like (shape(2,)) The y, x coordinate to calculate the standard deviation around. d: int, optional The size of the rectangle to have the standard deviation calculates in. If the pairing of coord and d would result in a rectangle indexing outside of data, this is corrected for, so d is the maximum size of the rectangle, and will give a square box if no corrections are needed. Default is 6. @@ -177,23 +184,26 @@ def _calc_bkg_std(data,coord,d=6): The standard deviation of data at and around coord. """ - y = coord[0]; x = coord[1] - ylow = y-d; yhigh=y+d+1 - if ylow < 0: - ylow=0; - if yhigh > data.shape[0]: - yhigh=data.shape[0] - xlow = x-d; xhigh=x+d - if xlow < 0: - xlow=0; - if xhigh > data.shape[0]: - xhigh=data.shape[0] - - std = np.nanstd(data[:,ylow:yhigh,xlow:xhigh],axis=(1,2)) - return std - - -def multi_correlation_cor(tess,limit=0.8,cores=7): + y = coord[0] + x = coord[1] + ylow = y-d + yhigh = y+d+1 + if ylow < 0: + ylow = 0 + if yhigh > data.shape[0]: + yhigh = data.shape[0] + xlow = x-d + xhigh = x+d + if xlow < 0: + xlow = 0 + if xhigh > data.shape[0]: + xhigh = data.shape[0] + + std = np.nanstd(data[:, ylow:yhigh, xlow:xhigh], axis=(1, 2)) + return std + + +def multi_correlation_cor(tess, limit=0.8, cores=7): """ Corrects for correlation coefficents larger than limit. If the flux and the background of tess are correlated (absolute value of correlation coefficent, |r|) to a level higher than limit, a fit to minimize this coefficent is preformed, and the new background and flux values are returned @@ -205,41 +215,40 @@ def multi_correlation_cor(tess,limit=0.8,cores=7): The largest acceptable |r| before any modifications are needed. Should be in range (0,1) for comparison to |r| to make any sense. Default is 0.8. cores: int, optional The number of cores to use for multiprocessing. Default is 7. - + Returns: ------- - flux: ArrayLike + flux: array_like The modified flux array, after any needed changes have been made. If nothing is needed to be changed, or the modification breaks, flux == tess.flux. bkg: The modified background array, after any needed changes have been made. If nothing is needed to be changed, or the modification breaks, bkg == tess.bkg. """ - cors = _find_bkg_cor(tess,cores=cores) - y,x = np.where(cors > limit) + cors = _find_bkg_cor(tess, cores=cores) + y, x = np.where(cors > limit) flux = deepcopy(tess.flux) bkg = deepcopy(tess.bkg) if len(y > 0): try: - coord = np.c_[y,x] + coord = np.c_[y, x] dat = tess.bkg stds = np.zeros_like(dat) - std = Parallel(n_jobs=cores)(delayed(_calc_bkg_std)(dat,coord[i])for i in range(len(coord))) + std = Parallel(n_jobs=cores)(delayed(_calc_bkg_std) + (dat, coord[i])for i in range(len(coord))) std = np.array(std) - stds[:,coord[:,0],coord[:,1]] = std.T - + stds[:, coord[:, 0], coord[:, 1]] = std.T + new_flux, new_bkg = zip(*Parallel(n_jobs=cores)(delayed(_address_peaks) - (tess.flux[:,coord[i,0],coord[i,1]], - tess.bkg[:,coord[i,0],coord[i,1]], - stds[:,coord[i,0],coord[i,1]]) - for i in range(len(coord)))) - + (tess.flux[:, coord[i, 0], coord[i, 1]], + tess.bkg[:, coord[i, + 0], coord[i, 1]], + stds[:, coord[i, 0], coord[i, 1]]) + for i in range(len(coord)))) + new_bkg = np.array(new_bkg) new_flux = np.array(new_flux) - flux[:,coord[:,0],coord[:,1]] = new_flux.T - bkg[:,coord[:,0],coord[:,1]] = new_bkg.T + flux[:, coord[:, 0], coord[:, 1]] = new_flux.T + bkg[:, coord[:, 0], coord[:, 1]] = new_bkg.T except: bad = 1 - return flux, bkg - - - + return flux, bkg diff --git a/tessreduce/rescale_straps.py b/tessreduce/rescale_straps.py index c5109e3..3e9c97e 100755 --- a/tessreduce/rescale_straps.py +++ b/tessreduce/rescale_straps.py @@ -9,26 +9,29 @@ # turn off runtime warnings (lots from logic on nans) import warnings -warnings.filterwarnings("ignore", category=RuntimeWarning) +warnings.filterwarnings("ignore", category=RuntimeWarning) -def grad_clip(data,box_size=100): + +def grad_clip(data, box_size=100): """ Perform a local sigma clip of points based on the gradient of the points. Pixels with large gradients are contaminated by stars/galaxies. - Inputs - ------ - data : array - 1d array of the data to clip - box_size : int - integer defining the box size to clip over - Output - ------ - gradind : bool + Parameters: + ---------- + data: array_like + 1d array of the data to clip. + box_size: int, optional + Integer defining the box size to clip over. Default value is 100. + + Returns: + ------- + gradind: array_like of bools + The mask of large gradients """ gradind = np.zeros_like(data) - + for i in range(len(data)): if i < box_size//2: d = data[:i+box_size//2] @@ -36,11 +39,11 @@ def grad_clip(data,box_size=100): d = data[i-box_size//2:] else: d = data[i-box_size//2:i+box_size//2] - + ind = np.isfinite(d) d = d[ind] if len(d) > 5: - gind = ~sigma_clip(np.gradient(abs(d))+d,sigma=2).mask + gind = ~sigma_clip(np.gradient(abs(d))+d, sigma=2).mask if i < box_size//2: gradind[:i+box_size//2][ind] = gind @@ -48,84 +51,141 @@ def grad_clip(data,box_size=100): gradind[i-box_size//2:][ind] = gind else: gradind[i-box_size//2:i+box_size//2][ind] = gind - + gradind = gradind > 0 - return gradind + return gradind + def fit_strap(data): """ - interpolate over missing data + Interpolates over missing data. + + Parameters: + ---------- + data: array_like + 1d array of data to be interpolated over. + + Returns: + p: array_like + The interpolated array of fitted data. """ - - x = np.arange(0,len(data)) + + x = np.arange(0, len(data)) y = data.copy() - p =np.ones_like(x) * np.nan - #y[~grad_clip(y)] = np.nan + p = np.ones_like(x) * np.nan + # y[~grad_clip(y)] = np.nan if len(y[np.isfinite(y)]) > 10: - lim = np.percentile(y[np.isfinite(y)],50) + lim = np.percentile(y[np.isfinite(y)], 50) y[y >= lim] = np.nan finite = np.isfinite(y) - + if len(y[finite]) > 5: finite = np.isfinite(y) - #y = median_clipping(y) + # y = median_clipping(y) finite = np.where(finite)[0] finite = np.isfinite(y) - #y[finite] = savgol_filter(y[finite],11,3) - p = interp1d(x[finite], y[finite],bounds_error=False,fill_value=np.nan,kind='nearest') + # y[finite] = savgol_filter(y[finite],11,3) + p = interp1d(x[finite], y[finite], bounds_error=False, + fill_value=np.nan, kind='nearest') p = p(x) - #p[np.isfinite(p)] = savgol_filter(p[np.isfinite(p)],31,1) + # p[np.isfinite(p)] = savgol_filter(p[np.isfinite(p)],31,1) return p -from copy import deepcopy -def calc_strap_factor(i,breaks,size,av_size,normals,data): +def calc_strap_factor(i, breaks, size, av_size, normals, data): + """ + Calculates the quantum efficency (qe) of the column of the strap supplied in data. + + Parameters: + ---------- + i: int + Index of qe calculation. + breaks: array_like + gaps between sky pixel, where sources have been masked out + size: array_like + An array of sizes of breaks + av_size: array_like + Average size of the breaks of the strap. + normals: array_like + normal sky pixels, that have not had sources masked out from them. + data: array_like + The 1d array of data to have the qe calculated for. + + Returns: + ------- + qe: array_like + The quantum efficency of the strap. Same shape as data. + + """ qe = np.ones_like(data) * 1. * np.nan b = int(breaks[i]) size = size.astype(int) nind = normals[b-av_size:b] eind = normals[b:b+av_size] - nind = np.append(nind,eind) + 1 - nind = nind[nind= 0] - norm = fit_strap(np.nanmedian(data[:,nind],axis=1)) - for j in range(size[i]): - ind = normals[b]+1+j + norm = fit_strap(np.nanmedian(data[:, nind], axis=1)) + for j in range(size[i]): + ind = normals[b]+1+j if (ind > 0) & (ind < data.shape[1]): - s1 = fit_strap(data[:,ind]) + s1 = fit_strap(data[:, ind]) ratio = norm/s1 - m = ~sigma_clip(ratio,sigma=2).mask + m = ~sigma_clip(ratio, sigma=2).mask factor = np.nanmedian(ratio[m]) - qe[:,normals[b]+1+j] = factor + qe[:, normals[b]+1+j] = factor return qe -def correct_straps(Image,mask,av_size=5,parallel=True): + +def correct_straps(Image, mask, av_size=5, parallel=True): + """ + Calculates the quantum efficeny of the strap for each point in a supplied Image. + + Parameters: + ---------- + Image: array_like + The image of concern, should have straps that need the qe calculated. + mask: array_like + The mask of objects in the field, to be removed from the data before + av_size: int, optional + The average size of the breaks in the data. Default is 5. + parallel: bool, optional + Bool deciding if parallel processing should be used. Default is True. + + Returns: + ------- + qe: array_like + The quantum efficeny of the strap. Same shape as Image. + + """ data = deepcopy(Image) mask = deepcopy(mask) av_size = int(av_size) - sind = np.where(np.nansum((mask & 4),axis=0)>0)[0] - normals = np.where(np.nansum((mask & 4),axis=0)==0)[0] - normals = np.append(normals,data.shape[1]) - normals = np.insert(normals,0,-1) - breaks = np.where(np.diff(normals,append=0)>1)[0] - breaks[breaks==-1] = 0 - size = (np.diff(normals,append=0))[np.diff(normals,append=0)>1] + sind = np.where(np.nansum((mask & 4), axis=0) > 0)[0] + normals = np.where(np.nansum((mask & 4), axis=0) == 0)[0] + normals = np.append(normals, data.shape[1]) + normals = np.insert(normals, 0, -1) + breaks = np.where(np.diff(normals, append=0) > 1)[0] + breaks[breaks == -1] = 0 + size = (np.diff(normals, append=0))[np.diff(normals, append=0) > 1] if len(breaks) > 0: if parallel: num_cores = multiprocessing.cpu_count() - x = np.arange(0,len(breaks),dtype=int) - qe = np.array(Parallel(n_jobs=num_cores)(delayed(calc_strap_factor)(i,breaks,size,av_size,normals,data) for i in x)) - qe = np.nanmedian(qe,axis=0) - qe[np.isnan(qe)] = 1 + x = np.arange(0, len(breaks), dtype=int) + qe = np.array(Parallel(n_jobs=num_cores)(delayed(calc_strap_factor)( + i, breaks, size, av_size, normals, data) for i in x)) + qe = np.nanmedian(qe, axis=0) + qe[np.isnan(qe)] = 1 else: qe = [] for i in range(len(breaks)): - qe += [calc_strap_factor(i,breaks,size,av_size,normals,data)] + qe += [calc_strap_factor(i, breaks, + size, av_size, normals, data)] qe = np.array(qe) - qe = np.nanmedian(qe,axis=0) - qe[np.isnan(qe)] = 1 + qe = np.nanmedian(qe, axis=0) + qe[np.isnan(qe)] = 1 else: qe = np.ones_like(Image) - return qe \ No newline at end of file + return qe diff --git a/tessreduce/syndiff.py b/tessreduce/syndiff.py index 0dcf7db..590eec9 100755 --- a/tessreduce/syndiff.py +++ b/tessreduce/syndiff.py @@ -7,338 +7,356 @@ from astropy.wcs import WCS import matplotlib.path as pat from copy import deepcopy -from scipy.ndimage import rotate +from scipy.ndimage import rotate from astropy.convolution import Gaussian2DKernel from scipy import signal - def pix2coord(x, y, mywcs): - """ - Calculates RA and DEC from the given pixel coordinates - - Parameters: - ---------- - x: float - The x coordiante to be changed into a world coordinate - y: float - The y coordiante to be changed into a world coordinate - mywcs: astropy.wcs.WCS - The World Coordinate System to be used for the conversion. - - Returns: - ------- - world_coords: NDArray shape(2,) - An array with RA and DEC point, calculated from the given x and y points and the WCS - """ - wx, wy = mywcs.all_pix2world(x, y, 0) - world_coords = np.array([float(wx), float(wy)]) - return world_coords + """ + Calculates RA and DEC from the given pixel coordinates + + Parameters: + ---------- + x: float + The x coordiante to be changed into a world coordinate + y: float + The y coordiante to be changed into a world coordinate + mywcs: astropy.wcs.WCS + The World Coordinate System to be used for the conversion. + + Returns: + ------- + world_coords: NDArray shape(2,) + An array with RA and DEC point, calculated from the given x and y points and the WCS + """ + wx, wy = mywcs.all_pix2world(x, y, 0) + world_coords = np.array([float(wx), float(wy)]) + return world_coords + def coord2pix(ra, dec, mywcs): - """ - Calculates the pixel coordinates from the RA and DEC values given - - Parameters: - ra: ArrayLike, shape(N,) - The RA value to be changed into pixel coordinates - dec: ArrayLike, shape(N,) - The DEC value to be changed into pixel coordinates - mywcs: astropy.wcs.WCS - The World Coordinate System to be used for the conversion. - - Returns: - ------- - pixel_coords: NDarray, shape(2,) - An NDarray of the x and y coordinate, calculated from the given RA and DEC values and the WCS - """ - wx, wy = mywcs.all_world2pix(ra, dec, 0) - pixel_coords = np.array([float(wx), float(wy)]) - return pixel_coords - -def Get_TESS_corners(TESS,PS1_wcs): - """ - Calculates the corners of pixels in a tessreduce object that has its' own WCS and the corresponding corners of pixels in a different WCS. - - Parameters: - ---------- - TESS: TESSreduce object - The object to get the corner from - PS1_wcs: astropy.wcs.WCS - The World Coordinate System to be used for any conversion between the new image - - Returns: - ------- - ps_corners: NDArray - The corners of all the pixels in the PS1_wcs frame of reference - """ - - y,x = TESS.flux.shape[1:] - # include the top corners for the last pixels - x += 1; y += 1 - - corners = np.zeros((2,x,y)) - ps_corners = np.zeros((2,x,y)) - x_arr = np.arange(0,x) - 0.5 # offset from the pixel centers - y_arr = np.arange(0,y) - 0.5 # offset from the pixel centers - - for i in range(x): - for j in range(y): - corners[:,i,j] = pix2coord(x_arr[i],y_arr[j],TESS.wcs) # why is this offset by 1???? - ps_corners[:,i,j] = coord2pix(corners[0,i,j],corners[1,i,j],PS1_wcs) - - return ps_corners - -def PS1_image_to_TESS(image,ebv = 0): - """ - ? - Parameters: - ---------- - image: TYPE - DESCRIPTION - ebv: float, default 0 - The extinction of the image - - Returns: - ------- - tess: TYPE - DESCRIPTION - """ - zp = 25 - #gr = (PS1.gmag - PS1.rmag).values - - #eg, e = R_val('g',gr=gr,ext=ebv); er, e = R_val('r',gr=gr,ext=ebv) - #ei, e = R_val('i',gr=gr,ext=ebv); ez, e = R_val('z',gr=gr,ext=ebv) - #ey, e = R_val('y',gr=gr,ext=ebv); et, e = R_val('tess',gr=gr,ext=ebv) - #eg = eg * ebv; er = er * ebv; ei = ei * ebv; ez = ez * ebv - #ey = ey * ebv; et = et * ebv - - g = image['g'] #mag2flux(PS1.gmag.values - eg,zp) - r = image['r'] #mag2flux(PS1.rmag.values - er,zp) - i = image['i'] #mag2flux(PS1.imag.values - ei,zp) - z = image['z'] #mag2flux(PS1.zmag.values - ez,zp) - y = image['y'] #mag2flux(PS1.ymag.values - ey,zp) - - cr = 0.25582823; ci = 0.27609407; cz = 0.35809516 - cy = 0.11244277; cp = 0.00049096 - - tess = (cr*r + ci*i + cz*z + cy*y)*(g/i)**cp - return tess - - -def getimages(ra,dec,size=240,filters="grizy"): - - """Query ps1filenames.py service to get a list of images - - ra, dec = position in degrees - size = image size in pixels (0.25 arcsec/pixel) - filters = string with filters to include - Returns a table with the results - """ - - service = "https://ps1images.stsci.edu/cgi-bin/ps1filenames.py" - url = ("{service}?ra={ra}&dec={dec}&size={size}&format=fits" - "&filters={filters}").format(**locals()) - table = Table.read(url, format='ascii') - return table + """ + Calculates the pixel coordinates from the RA and DEC values given + + Parameters: + ra: array_like, shape(N,) + The RA value to be changed into pixel coordinates + dec: array_like, shape(N,) + The DEC value to be changed into pixel coordinates + mywcs: astropy.wcs.WCS + The World Coordinate System to be used for the conversion. + + Returns: + ------- + pixel_coords: NDarray, shape(2,) + An NDarray of the x and y coordinate, calculated from the given RA and DEC values and the WCS + """ + wx, wy = mywcs.all_world2pix(ra, dec, 0) + pixel_coords = np.array([float(wx), float(wy)]) + return pixel_coords + + +def Get_TESS_corners(TESS, PS1_wcs): + """ + Calculates the corners of pixels in a tessreduce object that has its' own WCS and the corresponding corners of pixels in a different WCS. + + Parameters: + ---------- + TESS: TESSreduce object + The object to get the corner from + PS1_wcs: astropy.wcs.WCS + The World Coordinate System to be used for any conversion between the new image + + Returns: + ------- + ps_corners: NDArray + The corners of all the pixels in the PS1_wcs frame of reference + """ + + y, x = TESS.flux.shape[1:] + # include the top corners for the last pixels + x += 1 + y += 1 + + corners = np.zeros((2, x, y)) + ps_corners = np.zeros((2, x, y)) + x_arr = np.arange(0, x) - 0.5 # offset from the pixel centers + y_arr = np.arange(0, y) - 0.5 # offset from the pixel centers + + for i in range(x): + for j in range(y): + # why is this offset by 1???? + corners[:, i, j] = pix2coord(x_arr[i], y_arr[j], TESS.wcs) + ps_corners[:, i, j] = coord2pix( + corners[0, i, j], corners[1, i, j], PS1_wcs) + + return ps_corners + + +def PS1_image_to_TESS(image, ebv=0): + """ + ? + Parameters: + ---------- + image: TYPE + DESCRIPTION + ebv: float, default 0 + The extinction of the image + + Returns: + ------- + tess: TYPE + DESCRIPTION + """ + zp = 25 + # gr = (PS1.gmag - PS1.rmag).values + + # eg, e = R_val('g',gr=gr,ext=ebv); er, e = R_val('r',gr=gr,ext=ebv) + # ei, e = R_val('i',gr=gr,ext=ebv); ez, e = R_val('z',gr=gr,ext=ebv) + # ey, e = R_val('y',gr=gr,ext=ebv); et, e = R_val('tess',gr=gr,ext=ebv) + # eg = eg * ebv; er = er * ebv; ei = ei * ebv; ez = ez * ebv + # ey = ey * ebv; et = et * ebv + + g = image['g'] # mag2flux(PS1.gmag.values - eg,zp) + r = image['r'] # mag2flux(PS1.rmag.values - er,zp) + i = image['i'] # mag2flux(PS1.imag.values - ei,zp) + z = image['z'] # mag2flux(PS1.zmag.values - ez,zp) + y = image['y'] # mag2flux(PS1.ymag.values - ey,zp) + + cr = 0.25582823 + ci = 0.27609407 + cz = 0.35809516 + cy = 0.11244277 + cp = 0.00049096 + + tess = (cr*r + ci*i + cz*z + cy*y)*(g/i)**cp + return tess + + +def getimages(ra, dec, size=240, filters="grizy"): + """Query ps1filenames.py service to get a list of images + + ra, dec = position in degrees + size = image size in pixels (0.25 arcsec/pixel) + filters = string with filters to include + Returns a table with the results + """ + + service = "https://ps1images.stsci.edu/cgi-bin/ps1filenames.py" + url = ("{service}?ra={ra}&dec={dec}&size={size}&format=fits" + "&filters={filters}").format(**locals()) + table = Table.read(url, format='ascii') + return table def geturl(ra, dec, size=240, output_size=None, filters="grizy", format="jpg", color=False): - """ - Get URL for images in the table - - ra, dec = position in degrees - size = extracted image size in pixels (0.25 arcsec/pixel) - output_size = output (display) image size in pixels (default = size). - output_size has no effect for fits format images. - filters = string with filters to include - format = data format (options are "jpg", "png" or "fits") - color = if True, creates a color image (only for jpg or png format). - Default is return a list of URLs for single-filter grayscale images. - Returns a string with the URL - """ - - if color and format == "fits": - raise ValueError("color images are available only for jpg or png formats") - if format not in ("jpg","png","fits"): - raise ValueError("format must be one of jpg, png, fits") - table = getimages(ra,dec,size=size,filters=filters) - url = ("https://ps1images.stsci.edu/cgi-bin/fitscut.cgi?" - "ra={ra}&dec={dec}&size={size}&format={format}&filetypes=stack").format(**locals()) - if output_size: - url = url + "&output_size={}".format(output_size) - # sort filters from red to blue - flist = ["yzirg".find(x) for x in table['filter']] - table = table[np.argsort(flist)] - if color: - if len(table) > 3: - # pick 3 filters - table = table[[0,len(table)//2,len(table)-1]] - for i, param in enumerate(["red","green","blue"]): - url = url + "&{}={}".format(param,table['filename'][i]) - else: - urlbase = url + "&red=" - url = [] - for filename in table['filename']: - url.append(urlbase+filename) - return url - - -def Get_PS1(RA, DEC,Size, filt='i'): - ''' - Size limit seems to be around 1000 - ''' - if Size > 30: - raise ValueError('File is too large!') - Scale = 100 - size = Size * Scale#int((Size + 2*np.sqrt(1/2)*Size) * Scale ) # last term is a fudge factor - fitsurl = geturl(RA,DEC, size=size, filters=filt, format="fits") - if len(fitsurl) > 0: - fh = fits.open(fitsurl[0]) - return fh[0] - else: - raise ValueError("No PS1 images at for this coordinate") - return - -def PS1_images(RA, DEC,Size,filt=['g','r','i','z','y']): - """ - Grab all PS1 images and make a dictionary, size is in terms of TESS pixels, 100 less than actual. - """ - images = {} - for f in filt: - im = Get_PS1(RA, DEC,Size,f) - ima = im.data - m = -2.5*np.log10(ima) + 25 + 2.5*np.log10(im.header['EXPTIME']) - flux = 10**(-2/5*(m-25)) - flux[~np.isfinite(flux)] = 0 - #ima[~np.isfinite(ima) | (ima < 0)] = 0 - images[f] = flux - #images['exp' + f] = im.header['EXPTIME'] - - images['tess'] = PS1_image_to_TESS(images) - images['wcs'] = WCS(im) - - return images - + """ + Get URL for images in the table + + ra, dec = position in degrees + size = extracted image size in pixels (0.25 arcsec/pixel) + output_size = output (display) image size in pixels (default = size). + output_size has no effect for fits format images. + filters = string with filters to include + format = data format (options are "jpg", "png" or "fits") + color = if True, creates a color image (only for jpg or png format). + Default is return a list of URLs for single-filter grayscale images. + Returns a string with the URL + """ + + if color and format == "fits": + raise ValueError( + "color images are available only for jpg or png formats") + if format not in ("jpg", "png", "fits"): + raise ValueError("format must be one of jpg, png, fits") + table = getimages(ra, dec, size=size, filters=filters) + url = ("https://ps1images.stsci.edu/cgi-bin/fitscut.cgi?" + "ra={ra}&dec={dec}&size={size}&format={format}&filetypes=stack").format(**locals()) + if output_size: + url = url + "&output_size={}".format(output_size) + # sort filters from red to blue + flist = ["yzirg".find(x) for x in table['filter']] + table = table[np.argsort(flist)] + if color: + if len(table) > 3: + # pick 3 filters + table = table[[0, len(table)//2, len(table)-1]] + for i, param in enumerate(["red", "green", "blue"]): + url = url + "&{}={}".format(param, table['filename'][i]) + else: + urlbase = url + "&red=" + url = [] + for filename in table['filename']: + url.append(urlbase+filename) + return url + + +def Get_PS1(RA, DEC, Size, filt='i'): + ''' + Size limit seems to be around 1000 + ''' + if Size > 30: + raise ValueError('File is too large!') + Scale = 100 + # int((Size + 2*np.sqrt(1/2)*Size) * Scale ) # last term is a fudge factor + size = Size * Scale + fitsurl = geturl(RA, DEC, size=size, filters=filt, format="fits") + if len(fitsurl) > 0: + fh = fits.open(fitsurl[0]) + return fh[0] + else: + raise ValueError("No PS1 images at for this coordinate") + return + + +def PS1_images(RA, DEC, Size, filt=['g', 'r', 'i', 'z', 'y']): + """ + Grab all PS1 images and make a dictionary, size is in terms of TESS pixels, 100 less than actual. + """ + images = {} + for f in filt: + im = Get_PS1(RA, DEC, Size, f) + ima = im.data + m = -2.5*np.log10(ima) + 25 + 2.5*np.log10(im.header['EXPTIME']) + flux = 10**(-2/5*(m-25)) + flux[~np.isfinite(flux)] = 0 + # ima[~np.isfinite(ima) | (ima < 0)] = 0 + images[f] = flux + # images['exp' + f] = im.header['EXPTIME'] + + images['tess'] = PS1_image_to_TESS(images) + images['wcs'] = WCS(im) + + return images def Make_squares(Corners): - squares = [] - for n in range(Corners.shape[1]-1): - for m in range(Corners.shape[2]-1): - # define the verticies - square = np.zeros((4,2)) - square[0,:] = [Corners[0,n,m],Corners[1,n,m]] - square[1,:] = [Corners[0,n+1,m],Corners[1,n+1,m]] - square[2,:] = [Corners[0,n+1,m+1],Corners[1,n+1,m+1]] - square[3,:] = [Corners[0,n,m+1],Corners[1,n,m+1]] - # define the patch - path = pat.Path(square) - squares += [path] - return squares + squares = [] + for n in range(Corners.shape[1]-1): + for m in range(Corners.shape[2]-1): + # define the verticies + square = np.zeros((4, 2)) + square[0, :] = [Corners[0, n, m], Corners[1, n, m]] + square[1, :] = [Corners[0, n+1, m], Corners[1, n+1, m]] + square[2, :] = [Corners[0, n+1, m+1], Corners[1, n+1, m+1]] + square[3, :] = [Corners[0, n, m+1], Corners[1, n, m+1]] + # define the patch + path = pat.Path(square) + squares += [path] + return squares + def Footprint_square(Corners, Points): - square = np.zeros((4,2)) - square[0,:] = [Corners[0,0,0],Corners[1,0,0]] - square[1,:] = [Corners[0,-1,0],Corners[1,-1,0]] - square[2,:] = [Corners[0,-1,-1],Corners[1,-1,-1]] - square[3,:] = [Corners[0,0,-1],Corners[1,0,-1]] - path = pat.Path(square) - contained = path.contains_points(Points) - points = Points[contained] - return points - + square = np.zeros((4, 2)) + square[0, :] = [Corners[0, 0, 0], Corners[1, 0, 0]] + square[1, :] = [Corners[0, -1, 0], Corners[1, -1, 0]] + square[2, :] = [Corners[0, -1, -1], Corners[1, -1, -1]] + square[3, :] = [Corners[0, 0, -1], Corners[1, 0, -1]] + path = pat.Path(square) + contained = path.contains_points(Points) + points = Points[contained] + return points + + def Pix_sum(Square): - arr = np.zeros_like(squares) - contained = squares[Square].contains_points(pspixels) - if contained.any(): - good = pspixels[contained].astype('int') - summed = np.nansum(psimage[good[:,1],good[:,0]]) - arr[Square] = summed - return arr + arr = np.zeros_like(squares) + contained = squares[Square].contains_points(pspixels) + if contained.any(): + good = pspixels[contained].astype('int') + summed = np.nansum(psimage[good[:, 1], good[:, 0]]) + arr[Square] = summed + return arr def Regrid_PS(PS1, Corners): - dim1, dim2 = Corners.shape[1:] - dim1 -= 1; dim2 -= 1 - global px, py - px, py = np.where(PS1) - global squares - squares = np.array(Make_squares(Corners)) - square_num = np.arange(0,len(squares)) - - points = np.zeros((len(px),2)) - points[:,0] = px - points[:,1] = py - - global pspixels - pspixels = Footprint_square(Corners, points) - - global psimage - psimage = PS1.copy() - - pool = MultiPool() - values = list(pool.map(Pix_sum, square_num)) - pool.close() - - PS_scene = np.array(values) - PS_scene = np.nansum(PS_scene,axis=0) - PS_scene = PS_scene.astype('float') - PS_scene = PS_scene.reshape(dim1,dim2) - return PS_scene - - - - -def PS1_scene(tpf, size=20, convolve = 'PS1', plot = False): - - ps1 = PS1_images(tpf.ra, tpf.dec, size) - ps1_image = ps1['tess'] - ps1_wcs = ps1['wcs'] - - if np.isnan(ps1_image).any(): - print('PS1 image for the region is incomplete.' - ' NaNs are present in image. NaN values are set to 0') - ps1_image[np.isnan(ps1_image)] = 0 - - if convolve.lower() == 'ps1': - try: - kernal = Gaussian2DKernel(1.5*100) #Interp_PRF(tpf.row + (tpf.flux.value.shape[1]/2), tpf.column + (tpf.flux.value.shape[2]/2), - #tpf.camera, tpf.ccd, tpf.sector) - ps1_image = signal.fftconvolve(ps1_image, kernal,mode='same') - except MemoryError: - raise MemoryError("The convolution is too large, try a smaller PS1 image.") - - tess_corners = Get_TESS_corners(tpf, ps1_wcs) - if plot: - plt.figure(figsize=(6,3)) - #plt.subplot(projection=ps_wcs) - plt.subplot(1,2,1) - plt.title('PS1 image') - plt.imshow(ps1_image/np.nanmax(ps1_image),origin='lower',vmax=.1,cmap='Greys') - x, y = tpf.flux.value.shape[1:] - x += 1; y += 1 - z = np.arange(0,x*y,1) - plt.scatter(tess_corners[0,:,:].flatten(),tess_corners[1,:,:].flatten(),c=z,s=1) - - plt.subplot(1,2,2) - plt.title('TESS image') - plt.imshow(tpf.flux[100]/np.nanmax(tpf.flux[100]),origin='lower',vmax=.1) - - ps1_scene = Regrid_PS(ps1_image,tess_corners) - - if convolve.lower() == 'scene': - PRF = Get_PRF(tpf.row + (tpf.flux.value.shape[1]/2), tpf.column + (tpf.flux.value.shape[2]/2), - tpf.camera, tpf.ccd, tpf.sector) - ps1_scene = signal.fftconvolve(ps1_scene, PRF, mode='same') - - if plot: - plt.figure(figsize=(6,3)) - plt.subplot(1,2,1) - plt.title('PS1 scene') - plt.imshow(rotate(np.flipud(ps1_scene/np.nanmax(ps1_scene)),-90),origin='lower',vmax=0.1) - plt.subplot(1,2,2) - plt.title('TESS image') - plt.imshow(tess.flux[0]/np.nanmax(tess.flux[0]),origin='lower',vmax=0.1) - - return ps1_scene \ No newline at end of file + dim1, dim2 = Corners.shape[1:] + dim1 -= 1 + dim2 -= 1 + global px, py + px, py = np.where(PS1) + global squares + squares = np.array(Make_squares(Corners)) + square_num = np.arange(0, len(squares)) + + points = np.zeros((len(px), 2)) + points[:, 0] = px + points[:, 1] = py + + global pspixels + pspixels = Footprint_square(Corners, points) + + global psimage + psimage = PS1.copy() + + pool = MultiPool() + values = list(pool.map(Pix_sum, square_num)) + pool.close() + + PS_scene = np.array(values) + PS_scene = np.nansum(PS_scene, axis=0) + PS_scene = PS_scene.astype('float') + PS_scene = PS_scene.reshape(dim1, dim2) + return PS_scene + + +def PS1_scene(tpf, size=20, convolve='PS1', plot=False): + + ps1 = PS1_images(tpf.ra, tpf.dec, size) + ps1_image = ps1['tess'] + ps1_wcs = ps1['wcs'] + + if np.isnan(ps1_image).any(): + print('PS1 image for the region is incomplete.' + ' NaNs are present in image. NaN values are set to 0') + ps1_image[np.isnan(ps1_image)] = 0 + + if convolve.lower() == 'ps1': + try: + # Interp_PRF(tpf.row + (tpf.flux.value.shape[1]/2), tpf.column + (tpf.flux.value.shape[2]/2), + kernal = Gaussian2DKernel(1.5*100) + # tpf.camera, tpf.ccd, tpf.sector) + ps1_image = signal.fftconvolve(ps1_image, kernal, mode='same') + except MemoryError: + raise MemoryError( + "The convolution is too large, try a smaller PS1 image.") + + tess_corners = Get_TESS_corners(tpf, ps1_wcs) + if plot: + plt.figure(figsize=(6, 3)) + # plt.subplot(projection=ps_wcs) + plt.subplot(1, 2, 1) + plt.title('PS1 image') + plt.imshow(ps1_image/np.nanmax(ps1_image), + origin='lower', vmax=.1, cmap='Greys') + x, y = tpf.flux.value.shape[1:] + x += 1 + y += 1 + z = np.arange(0, x*y, 1) + plt.scatter(tess_corners[0, :, :].flatten(), + tess_corners[1, :, :].flatten(), c=z, s=1) + + plt.subplot(1, 2, 2) + plt.title('TESS image') + plt.imshow(tpf.flux[100]/np.nanmax(tpf.flux[100]), + origin='lower', vmax=.1) + + ps1_scene = Regrid_PS(ps1_image, tess_corners) + + if convolve.lower() == 'scene': + PRF = Get_PRF(tpf.row + (tpf.flux.value.shape[1]/2), tpf.column + (tpf.flux.value.shape[2]/2), + tpf.camera, tpf.ccd, tpf.sector) + ps1_scene = signal.fftconvolve(ps1_scene, PRF, mode='same') + + if plot: + plt.figure(figsize=(6, 3)) + plt.subplot(1, 2, 1) + plt.title('PS1 scene') + plt.imshow(rotate(np.flipud(ps1_scene/np.nanmax(ps1_scene) + ), -90), origin='lower', vmax=0.1) + plt.subplot(1, 2, 2) + plt.title('TESS image') + plt.imshow(tess.flux[0]/np.nanmax(tess.flux[0]), + origin='lower', vmax=0.1) + + return ps1_scene diff --git a/tessreduce/tools.py b/tessreduce/tools.py index 84cc25f..94d9ea9 100755 --- a/tessreduce/tools.py +++ b/tessreduce/tools.py @@ -8,7 +8,7 @@ def _sigma_mask(data, sigma=3): Parameters ---------- - data : array + data : array_like A single image sigma : float @@ -16,7 +16,7 @@ def _sigma_mask(data, sigma=3): Returns ------- - clipped : array + clipped : array_like A boolean array to mask the original array """ clipped = ~sigma_clip(data, sigma=sigma).mask @@ -29,12 +29,12 @@ def _strip_units(data): Parameters: ---------- - data: ArrayLike - ArrayLike set of data that may have associated units that want to be removed. Should be able to return something sensible when .values is called. + data: array_like + array_like set of data that may have associated units that want to be removed. Should be able to return something sensible when .values is called. Returns: ------- - data: ArrayLike + data: array_like Same shape as input data, but will not have any units """ if type(data) != np.ndarray: @@ -48,12 +48,12 @@ def grads_rad(flux): Parameters: ---------- - flux: ArrayLike + flux: array_like An array of flux values Returns: ------- - rad: ArrayLike + rad: array_like The radius of the fluxes """ rad = np.sqrt(np.gradient(flux)**2+np.gradient(np.gradient(flux))**2) @@ -66,12 +66,12 @@ def grad_flux_rad(flux): Parameters: ---------- - flux: ArrayLike + flux: array_like An array of flux values Returns: ------- - rad: ArrayLike + rad: array_like The radius of the fluxes """ rad = np.sqrt(flux**2+np.gradient(flux)**2)