4th_Order_Fitter¶
+Created on Thu Jun 25 09:39:22 2020
+@author: Tzu-Chiao Chien and Ryan Kaufman
+diff --git a/data_processing/AWG_and_Alazar/Process_One_3_state_acquisition.py b/data_processing/AWG_and_Alazar/Process_One_3_state_acquisition.py index 4a95e34..246d44e 100644 --- a/data_processing/AWG_and_Alazar/Process_One_3_state_acquisition.py +++ b/data_processing/AWG_and_Alazar/Process_One_3_state_acquisition.py @@ -34,62 +34,64 @@ def find_all_ddh5(cwd): #%%sample one file to check things -IQ_offset = np.array((0,0)) -# records_per_pulsetype = 3870 -cf = 6171427180.18 -# amp_off_filepath = r'Z:/Data/C1/C1_Hakan/Gain_pt_0.103mA/Pump_power_sweeps/1/2021-06-30/2021-06-30_0011_LO_6152798714.0_pwr_-8.69_amp_1_rotation_phase_2.094/2021-06-30_0011_LO_6152798714.0_pwr_-8.69_amp_1_rotation_phase_2.094.ddh5' -# amp_off_filepath = r'Z:/Data/C1/C1_Hakan/Gain_pt_0.103mA/signal_power_sweeps/1_initial_guess/2021-07-06/2021-07-06_0003_Amp_0__LO_freq_6153298714.0_Hz_Sig_Volt_0.0_V_Phase_0.0_rad_/2021-07-06_0003_Amp_0__LO_freq_6153298714.0_Hz_Sig_Volt_0.0_V_Phase_0.0_rad_.ddh5' - -# filepath = r'G:/My Drive/shared/Amplifier_Response_Data/Data/Pump_pwr_detuning_sweeps/2021-07-07/2021-07-07_0409_Amp_1__pwr_-8.83_dBm_LO_freq_6172127180.18_Hz_Phase_0.0_rad_/2021-07-07_0409_Amp_1__pwr_-8.83_dBm_LO_freq_6172127180.18_Hz_Phase_0.0_rad_.ddh5' - -# filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep/2021-09-14/2021-09-14_0002_3_state_40dB_att_Amp_0__pwr_-7.0_dBm_Rep_1__/2021-09-14_0002_3_state_40dB_att_Amp_0__pwr_-7.0_dBm_Rep_1__.ddh5' -filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep/2021-09-14/2021-09-14_0027_3_state_40dB_att_Amp_1__pwr_-7.0_dBm_Rep_1__/2021-09-14_0027_3_state_40dB_att_Amp_1__pwr_-7.0_dBm_Rep_1__.ddh5' -# filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep/2021-09-14/2021-09-14_0034_3_state_40dB_att_Amp_1__pwr_-6.75_dBm_Rep_3__/2021-09-14_0034_3_state_40dB_att_Amp_1__pwr_-6.75_dBm_Rep_3__.ddh5' -# filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep/2021-09-14/2021-09-14_0038_3_state_40dB_att_Amp_1__pwr_-6.5_dBm_Rep_2__/2021-09-14_0038_3_state_40dB_att_Amp_1__pwr_-6.5_dBm_Rep_2__.ddh5' -# filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep/2021-09-14/2021-09-14_0044_3_state_40dB_att_Amp_1__pwr_-6.25_dBm_Rep_3__/2021-09-14_0044_3_state_40dB_att_Amp_1__pwr_-6.25_dBm_Rep_3__.ddh5' -# filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep/2021-09-14/2021-09-14_0046_3_state_40dB_att_Amp_1__pwr_-6.0_dBm_Rep_0__/2021-09-14_0046_3_state_40dB_att_Amp_1__pwr_-6.0_dBm_Rep_0__.ddh5' - -# filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep_+500kHz/2021-09-14/2021-09-14_0001_3_state_40dB_att_Amp_0__pwr_-7.0_dBm_Rep_0__/2021-09-14_0001_3_state_40dB_att_Amp_0__pwr_-7.0_dBm_Rep_0__.ddh5' -# filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep_+500kHz/2021-09-14/2021-09-14_0028_3_state_40dB_att_Amp_1__pwr_-7.0_dBm_Rep_2__/2021-09-14_0028_3_state_40dB_att_Amp_1__pwr_-7.0_dBm_Rep_2__.ddh5' -# filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep_+500kHz/2021-09-14/2021-09-14_0031_3_state_40dB_att_Amp_1__pwr_-6.75_dBm_Rep_0__/2021-09-14_0031_3_state_40dB_att_Amp_1__pwr_-6.75_dBm_Rep_0__.ddh5' -# filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep_+500kHz/2021-09-14/2021-09-14_0036_3_state_40dB_att_Amp_1__pwr_-6.5_dBm_Rep_0__/2021-09-14_0036_3_state_40dB_att_Amp_1__pwr_-6.5_dBm_Rep_0__.ddh5' -# filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep_+500kHz/2021-09-14/2021-09-14_0042_3_state_40dB_att_Amp_1__pwr_-6.25_dBm_Rep_1__/2021-09-14_0042_3_state_40dB_att_Amp_1__pwr_-6.25_dBm_Rep_1__.ddh5' -# filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep_+500kHz/2021-09-14/2021-09-14_0047_3_state_40dB_att_Amp_1__pwr_-6.0_dBm_Rep_1__/2021-09-14_0047_3_state_40dB_att_Amp_1__pwr_-6.0_dBm_Rep_1__.ddh5' - -#loopback for sat_discriminator -# filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\loopbacks\2021-09-30\2021-09-30_0009_3_state_loopback_0dB_att_Rep_0__\2021-09-30_0009_3_state_loopback_0dB_att_Rep_0__.ddh5' -# filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-09-30\2021-09-30_0002_3_state_deep_sat_40dB_att_Rep_0__\2021-09-30_0002_3_state_deep_sat_40dB_att_Rep_0__.ddh5' - -#in order of increasing power -# filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-09-30\2021-09-30_0003_3_state_deep_sat_40dB_att_Sig_Volt_0.2_V_Rep_0__\2021-09-30_0003_3_state_deep_sat_40dB_att_Sig_Volt_0.2_V_Rep_0__.ddh5' -# filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-09-30\2021-09-30_0004_3_state_deep_sat_40dB_att_Sig_Volt_0.25_V_Rep_0__\2021-09-30_0004_3_state_deep_sat_40dB_att_Sig_Volt_0.25_V_Rep_0__.ddh5' -# filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-09-30\2021-09-30_0005_3_state_deep_sat_40dB_att_Sig_Volt_0.3_V_Rep_0__\2021-09-30_0005_3_state_deep_sat_40dB_att_Sig_Volt_0.3_V_Rep_0__.ddh5' -filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-09-30\2021-09-30_0011_3_state_deep_sat_40dB_att_Sig_Volt_0.6_V_Rep_0__\2021-09-30_0011_3_state_deep_sat_40dB_att_Sig_Volt_0.6_V_Rep_0__.ddh5' - -#longer time: -# filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-09-30\2021-09-30_0017_3_state_deep_sat_40dB_att_8us_time_Rep_0__\2021-09-30_0017_3_state_deep_sat_40dB_att_8us_time_Rep_0__.ddh5' -filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-09-30\2021-09-30_0030_3_state_deep_sat_40dB_att_8us_time_Rep_4__\2021-09-30_0030_3_state_deep_sat_40dB_att_8us_time_Rep_4__.ddh5' - -#SWEEPING power -#0.55V -filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-10-01\2021-10-01_0076_3_state_deep_sat_40dB_att_2V_Sig_Volt_0.55_V_\2021-10-01_0076_3_state_deep_sat_40dB_att_2V_Sig_Volt_0.55_V_.ddh5' - -#0.6V -filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-10-01\2021-10-01_0077_3_state_deep_sat_40dB_att_2V_Sig_Volt_0.6_V_\2021-10-01_0077_3_state_deep_sat_40dB_att_2V_Sig_Volt_0.6_V_.ddh5' - -#0.95V -filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-10-01\2021-10-01_0084_3_state_deep_sat_40dB_att_2V_Sig_Volt_0.95_V_\2021-10-01_0084_3_state_deep_sat_40dB_att_2V_Sig_Volt_0.95_V_.ddh5' - -#1.1V -filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-10-01\2021-10-01_0088_3_state_deep_sat_40dB_att_2V_Sig_Volt_1.15_V_\2021-10-01_0088_3_state_deep_sat_40dB_att_2V_Sig_Volt_1.15_V_.ddh5' - -#WTF Trigger? - -import easygui -filepath = easygui.fileopenbox(default = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-10-01\*') - -# PU.get_normalizing_voltage_from_filepath(amp_off_filepath, plot = False, hist_scale = 0.01, records_per_pulsetype = 3870*2) -# IQ_offset = PU.get_IQ_offset_from_filepath(filepath, plot = False, hist_scale = 0.002, records_per_pulsetype = 3840*2) -# PU.get_fidelity_from_filepath_3_state(filepath, plot = True, hist_scale = 0.05, records_per_pulsetype = 2562, state_relabel = 0, bin_start = 50, bin_stop = 400) -PU.get_fidelity_from_filepath_3_state(filepath, plot = True, hist_scale = 0.05, records_per_pulsetype = 7686//3, state_relabel = 0, bin_start = 50, bin_stop = 400) -IQ_offset = (0,0) +if __name__ == 'main': + + IQ_offset = np.array((0,0)) + # records_per_pulsetype = 3870 + cf = 6171427180.18 + # amp_off_filepath = r'Z:/Data/C1/C1_Hakan/Gain_pt_0.103mA/Pump_power_sweeps/1/2021-06-30/2021-06-30_0011_LO_6152798714.0_pwr_-8.69_amp_1_rotation_phase_2.094/2021-06-30_0011_LO_6152798714.0_pwr_-8.69_amp_1_rotation_phase_2.094.ddh5' + # amp_off_filepath = r'Z:/Data/C1/C1_Hakan/Gain_pt_0.103mA/signal_power_sweeps/1_initial_guess/2021-07-06/2021-07-06_0003_Amp_0__LO_freq_6153298714.0_Hz_Sig_Volt_0.0_V_Phase_0.0_rad_/2021-07-06_0003_Amp_0__LO_freq_6153298714.0_Hz_Sig_Volt_0.0_V_Phase_0.0_rad_.ddh5' + + # filepath = r'G:/My Drive/shared/Amplifier_Response_Data/Data/Pump_pwr_detuning_sweeps/2021-07-07/2021-07-07_0409_Amp_1__pwr_-8.83_dBm_LO_freq_6172127180.18_Hz_Phase_0.0_rad_/2021-07-07_0409_Amp_1__pwr_-8.83_dBm_LO_freq_6172127180.18_Hz_Phase_0.0_rad_.ddh5' + + # filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep/2021-09-14/2021-09-14_0002_3_state_40dB_att_Amp_0__pwr_-7.0_dBm_Rep_1__/2021-09-14_0002_3_state_40dB_att_Amp_0__pwr_-7.0_dBm_Rep_1__.ddh5' + filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep/2021-09-14/2021-09-14_0027_3_state_40dB_att_Amp_1__pwr_-7.0_dBm_Rep_1__/2021-09-14_0027_3_state_40dB_att_Amp_1__pwr_-7.0_dBm_Rep_1__.ddh5' + # filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep/2021-09-14/2021-09-14_0034_3_state_40dB_att_Amp_1__pwr_-6.75_dBm_Rep_3__/2021-09-14_0034_3_state_40dB_att_Amp_1__pwr_-6.75_dBm_Rep_3__.ddh5' + # filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep/2021-09-14/2021-09-14_0038_3_state_40dB_att_Amp_1__pwr_-6.5_dBm_Rep_2__/2021-09-14_0038_3_state_40dB_att_Amp_1__pwr_-6.5_dBm_Rep_2__.ddh5' + # filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep/2021-09-14/2021-09-14_0044_3_state_40dB_att_Amp_1__pwr_-6.25_dBm_Rep_3__/2021-09-14_0044_3_state_40dB_att_Amp_1__pwr_-6.25_dBm_Rep_3__.ddh5' + # filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep/2021-09-14/2021-09-14_0046_3_state_40dB_att_Amp_1__pwr_-6.0_dBm_Rep_0__/2021-09-14_0046_3_state_40dB_att_Amp_1__pwr_-6.0_dBm_Rep_0__.ddh5' + + # filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep_+500kHz/2021-09-14/2021-09-14_0001_3_state_40dB_att_Amp_0__pwr_-7.0_dBm_Rep_0__/2021-09-14_0001_3_state_40dB_att_Amp_0__pwr_-7.0_dBm_Rep_0__.ddh5' + # filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep_+500kHz/2021-09-14/2021-09-14_0028_3_state_40dB_att_Amp_1__pwr_-7.0_dBm_Rep_2__/2021-09-14_0028_3_state_40dB_att_Amp_1__pwr_-7.0_dBm_Rep_2__.ddh5' + # filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep_+500kHz/2021-09-14/2021-09-14_0031_3_state_40dB_att_Amp_1__pwr_-6.75_dBm_Rep_0__/2021-09-14_0031_3_state_40dB_att_Amp_1__pwr_-6.75_dBm_Rep_0__.ddh5' + # filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep_+500kHz/2021-09-14/2021-09-14_0036_3_state_40dB_att_Amp_1__pwr_-6.5_dBm_Rep_0__/2021-09-14_0036_3_state_40dB_att_Amp_1__pwr_-6.5_dBm_Rep_0__.ddh5' + # filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep_+500kHz/2021-09-14/2021-09-14_0042_3_state_40dB_att_Amp_1__pwr_-6.25_dBm_Rep_1__/2021-09-14_0042_3_state_40dB_att_Amp_1__pwr_-6.25_dBm_Rep_1__.ddh5' + # filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep_+500kHz/2021-09-14/2021-09-14_0047_3_state_40dB_att_Amp_1__pwr_-6.0_dBm_Rep_1__/2021-09-14_0047_3_state_40dB_att_Amp_1__pwr_-6.0_dBm_Rep_1__.ddh5' + + #loopback for sat_discriminator + # filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\loopbacks\2021-09-30\2021-09-30_0009_3_state_loopback_0dB_att_Rep_0__\2021-09-30_0009_3_state_loopback_0dB_att_Rep_0__.ddh5' + # filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-09-30\2021-09-30_0002_3_state_deep_sat_40dB_att_Rep_0__\2021-09-30_0002_3_state_deep_sat_40dB_att_Rep_0__.ddh5' + + #in order of increasing power + # filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-09-30\2021-09-30_0003_3_state_deep_sat_40dB_att_Sig_Volt_0.2_V_Rep_0__\2021-09-30_0003_3_state_deep_sat_40dB_att_Sig_Volt_0.2_V_Rep_0__.ddh5' + # filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-09-30\2021-09-30_0004_3_state_deep_sat_40dB_att_Sig_Volt_0.25_V_Rep_0__\2021-09-30_0004_3_state_deep_sat_40dB_att_Sig_Volt_0.25_V_Rep_0__.ddh5' + # filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-09-30\2021-09-30_0005_3_state_deep_sat_40dB_att_Sig_Volt_0.3_V_Rep_0__\2021-09-30_0005_3_state_deep_sat_40dB_att_Sig_Volt_0.3_V_Rep_0__.ddh5' + filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-09-30\2021-09-30_0011_3_state_deep_sat_40dB_att_Sig_Volt_0.6_V_Rep_0__\2021-09-30_0011_3_state_deep_sat_40dB_att_Sig_Volt_0.6_V_Rep_0__.ddh5' + + #longer time: + # filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-09-30\2021-09-30_0017_3_state_deep_sat_40dB_att_8us_time_Rep_0__\2021-09-30_0017_3_state_deep_sat_40dB_att_8us_time_Rep_0__.ddh5' + filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-09-30\2021-09-30_0030_3_state_deep_sat_40dB_att_8us_time_Rep_4__\2021-09-30_0030_3_state_deep_sat_40dB_att_8us_time_Rep_4__.ddh5' + + #SWEEPING power + #0.55V + filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-10-01\2021-10-01_0076_3_state_deep_sat_40dB_att_2V_Sig_Volt_0.55_V_\2021-10-01_0076_3_state_deep_sat_40dB_att_2V_Sig_Volt_0.55_V_.ddh5' + + #0.6V + filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-10-01\2021-10-01_0077_3_state_deep_sat_40dB_att_2V_Sig_Volt_0.6_V_\2021-10-01_0077_3_state_deep_sat_40dB_att_2V_Sig_Volt_0.6_V_.ddh5' + + #0.95V + filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-10-01\2021-10-01_0084_3_state_deep_sat_40dB_att_2V_Sig_Volt_0.95_V_\2021-10-01_0084_3_state_deep_sat_40dB_att_2V_Sig_Volt_0.95_V_.ddh5' + + #1.1V + filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-10-01\2021-10-01_0088_3_state_deep_sat_40dB_att_2V_Sig_Volt_1.15_V_\2021-10-01_0088_3_state_deep_sat_40dB_att_2V_Sig_Volt_1.15_V_.ddh5' + + #WTF Trigger? + + import easygui + filepath = easygui.fileopenbox(default = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-10-01\*') + + # PU.get_normalizing_voltage_from_filepath(amp_off_filepath, plot = False, hist_scale = 0.01, records_per_pulsetype = 3870*2) + # IQ_offset = PU.get_IQ_offset_from_filepath(filepath, plot = False, hist_scale = 0.002, records_per_pulsetype = 3840*2) + # PU.get_fidelity_from_filepath_3_state(filepath, plot = True, hist_scale = 0.05, records_per_pulsetype = 2562, state_relabel = 0, bin_start = 50, bin_stop = 400) + PU.get_fidelity_from_filepath_3_state(filepath, plot = True, hist_scale = 0.05, records_per_pulsetype = 7686//3, state_relabel = 0, bin_start = 50, bin_stop = 400) + IQ_offset = (0,0) diff --git a/data_processing/ddh5_Plotting/General_reader.py b/data_processing/ddh5_Plotting/General_reader.py index 8e6d3c1..3af82e0 100644 --- a/data_processing/ddh5_Plotting/General_reader.py +++ b/data_processing/ddh5_Plotting/General_reader.py @@ -21,196 +21,199 @@ from matplotlib.widgets import Slider from matplotlib.lines import Line2D import easygui -#%% -cwd = r'H:\Data\Fridge Texas\General measurement\20180323_cable_measurement_after_upgrade' -if cwd == None: - raise Exception("CWD not chosen!") + +if __name__ == '__main__': -#%% -#Single File, single traces overlaid onto one-another if you choose: -def get_plot(file = None, IQ = False): - if file == None: - filepath = easygui.fileopenbox("Choose File", default = cwd) - file = h5py.File(filepath) - assert file != None - else: - filepath = file - file = h5py.File(filepath) + #%% + cwd = r'H:\Data\Fridge Texas\General measurement\20180323_cable_measurement_after_upgrade' + if cwd == None: + raise Exception("CWD not chosen!") - datasets = file.keys() - ind_var = easygui.choicebox("Pick the independent variable: ", choices = datasets) - # print(ind_var) - dep_vars = easygui.multchoicebox("Pick the dependent variable: ", choices = datasets) - # print(dep_var) - - ind_var_data = np.array(file[ind_var]) - dep_var_data = [] - for dep_var in dep_vars: - dep_var_data.append(np.array(file[dep_var])) + #%% + #Single File, single traces overlaid onto one-another if you choose: + def get_plot(file = None, IQ = False): + if file == None: + filepath = easygui.fileopenbox("Choose File", default = cwd) + file = h5py.File(filepath) + assert file != None + else: + filepath = file + file = h5py.File(filepath) + + datasets = file.keys() + ind_var = easygui.choicebox("Pick the independent variable: ", choices = datasets) + # print(ind_var) + dep_vars = easygui.multchoicebox("Pick the dependent variable: ", choices = datasets) + # print(dep_var) + + ind_var_data = np.array(file[ind_var]) + dep_var_data = [] + for dep_var in dep_vars: + dep_var_data.append(np.array(file[dep_var])) + + + return ind_var_data, dep_var_data, ind_var, dep_vars + #%% + ind_vars, dep_vars, ind_name, dep_names = get_plot() + # ind_vars2, dep_vars2, ind_name2, dep_name2 = get_plot() + #%% + print(dep_names) + print(dep_vars) + i = 0 + for (dep_var, dep_name) in zip(dep_vars,dep_names): + print(i) + plt.figure(i) + i+=1 + print("Max: {:.3f} \nMin: {:.3f}".format(np.max(dep_var), np.min(dep_var))) + for i in range(np.shape(np.shape(dep_var))[2]): + plt.plot(ind_vars, dep_var) + plt.xlabel(ind_name) + plt.ylabel(dep_name) + plt.title(f"{i}") + #%% + # #%% Get the data from multiple separate files + def compile_traces(): + num_traces = easygui.choicebox(choices = np.arange(1,6)) + i = 0 + trace_data = [] + freq_data = [] + while i < num_traces: + filepath = easygui.fileopenbox("Choose File #"+str(i+1), default = cwd) + file = h5py.File(filepath) + trace = np.array(file['noise']) #TODO: generalize to choice + freqs = np.array(file['Freq']) + trace_data.append(trace) + freq_data.append(freqs) + file.close() + i+=1 + return freq_data, trace_data - return ind_var_data, dep_var_data, ind_var, dep_vars -#%% -ind_vars, dep_vars, ind_name, dep_names = get_plot() -# ind_vars2, dep_vars2, ind_name2, dep_name2 = get_plot() -#%% -print(dep_names) -print(dep_vars) -i = 0 -for (dep_var, dep_name) in zip(dep_vars,dep_names): - print(i) - plt.figure(i) - i+=1 - print("Max: {:.3f} \nMin: {:.3f}".format(np.max(dep_var), np.min(dep_var))) - for i in range(np.shape(np.shape(dep_var))[2]): - plt.plot(ind_vars, dep_var) - plt.xlabel(ind_name) - plt.ylabel(dep_name) - plt.title(f"{i}") - -#%% -# #%% Get the data from multiple separate files -def compile_traces(): - num_traces = easygui.choicebox(choices = np.arange(1,6)) - i = 0 - trace_data = [] - freq_data = [] - while i < num_traces: - filepath = easygui.fileopenbox("Choose File #"+str(i+1), default = cwd) + #%% Extract 2d Data from h5 + def get_pcolor(cwd): + filepath = easygui.fileopenbox("Choose File", default = cwd) file = h5py.File(filepath) - trace = np.array(file['noise']) #TODO: generalize to choice - freqs = np.array(file['Freq']) - trace_data.append(trace) - freq_data.append(freqs) - file.close() + datasets = file.keys() + ind_vars = easygui.multchoicebox("Pick the independent variables: ", choices = datasets) + dep_vars = easygui.multchoicebox("Pick the dependent variables: ", choices = datasets) + #checking for data redundancy, i.e. if every line of an independent variable is the same, reduce it to just the first line + ind_var_data = [] + for ind_var in ind_vars: + ind_var_datum = np.array(file[ind_var]) + is_repetitive = True + for i in range(np.shape(ind_var_datum)[0]): + if np.any(ind_var_datum[i] != ind_var_datum [0]): + is_repetitive = False + if is_repetitive == True: + ind_var_datum = ind_var_datum[0] + ind_var_data.append(ind_var_datum) + dep_var_data = [] + for dep_var in dep_vars: + dep_var_data.append(np.array(file[dep_var])) + + return ind_var_data, ind_vars, dep_var_data, dep_vars + + ind_vars,ind_var_names, dep_vars, dep_var_names = get_pcolor(cwd) + # ind_vars2,ind_var_names2, dep_var2, dep_var_name2 = get_pcolor(cwd) + #%%Plot Pcolor(s) + #TODO: incorporate into get_pcolor with more color choices, etc + i = 0 + for (dep_var, dep_var_name) in zip(dep_vars, dep_var_names): + # print(dep_var) + plt.figure(i) i+=1 - return freq_data, trace_data - -#%% Extract 2d Data from h5 -def get_pcolor(cwd): - filepath = easygui.fileopenbox("Choose File", default = cwd) - file = h5py.File(filepath) - datasets = file.keys() - ind_vars = easygui.multchoicebox("Pick the independent variables: ", choices = datasets) - dep_vars = easygui.multchoicebox("Pick the dependent variables: ", choices = datasets) - #checking for data redundancy, i.e. if every line of an independent variable is the same, reduce it to just the first line - ind_var_data = [] - for ind_var in ind_vars: - ind_var_datum = np.array(file[ind_var]) - is_repetitive = True - for i in range(np.shape(ind_var_datum)[0]): - if np.any(ind_var_datum[i] != ind_var_datum [0]): - is_repetitive = False - if is_repetitive == True: - ind_var_datum = ind_var_datum[0] - ind_var_data.append(ind_var_datum) - dep_var_data = [] - for dep_var in dep_vars: - dep_var_data.append(np.array(file[dep_var])) + ind_avgs = [np.average(i) for i in ind_vars] + dep_avg = np.average(dep_var) + colors = [color.hex2color('#0000FF'), color.hex2color('#FFFFFF'), color.hex2color('#FF0000')] + _cmap = color.LinearSegmentedColormap.from_list('my_cmap', colors) + adj = 0 + graph = dep_var.T + low = np.min(dep_var) + high = np.max(dep_var) + # low = -13 + # high = 6 + _norm = color.Normalize(vmin = low, vmax = high) + # x = np.min(dep_var) + # y = np.max(dep_var) + plt.pcolormesh((ind_vars[0]),ind_vars[1],graph, cmap = _cmap, norm = _norm) + plt.colorbar(label = dep_var_name) + plt.gca().get_xaxis().get_major_formatter().set_useOffset(False) + plt.gca().get_yaxis().get_major_formatter().set_useOffset(False) + plt.xlabel( ind_var_names[0]) + plt.ylabel(ind_var_names[1]) + # plt.title('SHARC33 A_to_S_leakage') + #%% Now extract a line cut from that 2d plot - return ind_var_data, ind_vars, dep_var_data, dep_vars + #from the plot, we're going to want to be able to specify one value of one of + #the independent variables, then the line cuts across the other one at that + #value -ind_vars,ind_var_names, dep_vars, dep_var_names = get_pcolor(cwd) -# ind_vars2,ind_var_names2, dep_var2, dep_var_name2 = get_pcolor(cwd) -#%%Plot Pcolor(s) -#TODO: incorporate into get_pcolor with more color choices, etc -i = 0 -for (dep_var, dep_var_name) in zip(dep_vars, dep_var_names): - # print(dep_var) - plt.figure(i) - i+=1 - ind_avgs = [np.average(i) for i in ind_vars] - dep_avg = np.average(dep_var) - colors = [color.hex2color('#0000FF'), color.hex2color('#FFFFFF'), color.hex2color('#FF0000')] - _cmap = color.LinearSegmentedColormap.from_list('my_cmap', colors) - adj = 0 - graph = dep_var.T - low = np.min(dep_var) - high = np.max(dep_var) - # low = -13 - # high = 6 - _norm = color.Normalize(vmin = low, vmax = high) - # x = np.min(dep_var) - # y = np.max(dep_var) - plt.pcolormesh((ind_vars[0]),ind_vars[1],graph, cmap = _cmap, norm = _norm) - plt.colorbar(label = dep_var_name) - plt.gca().get_xaxis().get_major_formatter().set_useOffset(False) - plt.gca().get_yaxis().get_major_formatter().set_useOffset(False) - plt.xlabel( ind_var_names[0]) - plt.ylabel(ind_var_names[1]) - # plt.title('SHARC33 A_to_S_leakage') -#%% Now extract a line cut from that 2d plot - -#from the plot, we're going to want to be able to specify one value of one of -#the independent variables, then the line cuts across the other one at that -#value - -#ideally this would be a slider in the plot window, but whatever -def linecut_data(ind_vars, ind_var_names, dep_vars, dep_var_names): - dep_var_name = easygui.choicebox("Choose dependent variable of cut", choices = dep_var_names) - dep_var = dep_vars[dep_var_names.index(dep_var_name)] - cut_var_name = easygui.choicebox("Choose variable you would like to cut along:", choices = ind_var_names) - cut_name_index = list(ind_var_names).index(cut_var_name) #only lists have the .index method :( - #Sometimes the order of names does not correspond to the ordering of indices in dep_var, this takes care of that - cut_index = np.shape(dep_var).index(len(ind_vars[cut_name_index])) - cut_var_val = float(easygui.choicebox("Choose value of the cut variable:", choices = ind_vars[cut_name_index])) - cut_var_val_index = list(ind_vars[cut_name_index]).index(cut_var_val) - #we know it is 2d data, so if cut_index is 0, we want a [cut_var_index,:] cut, else [:,cut_var_index] - #TODO: find more pythonic solution - if cut_index == 0: - cut_dep_data = dep_var[cut_var_val_index, :] + #ideally this would be a slider in the plot window, but whatever + def linecut_data(ind_vars, ind_var_names, dep_vars, dep_var_names): + dep_var_name = easygui.choicebox("Choose dependent variable of cut", choices = dep_var_names) + dep_var = dep_vars[dep_var_names.index(dep_var_name)] + cut_var_name = easygui.choicebox("Choose variable you would like to cut along:", choices = ind_var_names) + cut_name_index = list(ind_var_names).index(cut_var_name) #only lists have the .index method :( + #Sometimes the order of names does not correspond to the ordering of indices in dep_var, this takes care of that + cut_index = np.shape(dep_var).index(len(ind_vars[cut_name_index])) + cut_var_val = float(easygui.choicebox("Choose value of the cut variable:", choices = ind_vars[cut_name_index])) + cut_var_val_index = list(ind_vars[cut_name_index]).index(cut_var_val) + #we know it is 2d data, so if cut_index is 0, we want a [cut_var_index,:] cut, else [:,cut_var_index] + #TODO: find more pythonic solution + if cut_index == 0: + cut_dep_data = dep_var[cut_var_val_index, :] + + elif cut_index == 1: + cut_dep_data = dep_var[:, cut_var_val_index] + + cut_ind_data = ind_vars[int(not cut_index)] #this logic just makes 0's into 1's and vice-versa + cut_ind_name = ind_var_names[int(not cut_index)] + cut_dep_name = "{} at {} = {:.3f}".format(dep_var_name, cut_var_name, cut_var_val) - elif cut_index == 1: - cut_dep_data = dep_var[:, cut_var_val_index] - - cut_ind_data = ind_vars[int(not cut_index)] #this logic just makes 0's into 1's and vice-versa - cut_ind_name = ind_var_names[int(not cut_index)] - cut_dep_name = "{} at {} = {:.3f}".format(dep_var_name, cut_var_name, cut_var_val) + return cut_ind_data, cut_ind_name, cut_dep_data, cut_dep_name + + cut_ind_data, cut_ind_name, cut_dep_data, cut_dep_name = linecut_data(ind_vars,ind_var_names, dep_vars, dep_var_names) + # cut_ind_data2, cut_ind_name2, cut_dep_data2, cut_dep_name2 = linecut_data(ind_vars,ind_var_names, dep_var, dep_var_name) + #%% Plot the linecut + + plt.plot(cut_ind_data, cut_dep_data, '-', label = 'Forward') + # plt.plot(cut_ind_data2, cut_dep_data2, '-', label = 'Backward') + plt.title(cut_dep_name) + plt.xlabel(cut_ind_name) + plt.ylabel("") - return cut_ind_data, cut_ind_name, cut_dep_data, cut_dep_name + #%% Extract info from traces + def get_min_ders(data): + ders = [] + der_max_locs = [] #should correspond to bifurcation + der_max_freqs = [] + for trace in mags: + der = np.gradient(trace) + der_max_loc = list(np.where(der == np.min(der))[0])[0] + der_max_locs.append(der_max_loc) + der_max_freq = freqs[der_max_loc] + der_max_freqs.append(der_max_freq) + ders.append(der) + return(der_max_freqs) -cut_ind_data, cut_ind_name, cut_dep_data, cut_dep_name = linecut_data(ind_vars,ind_var_names, dep_vars, dep_var_names) -# cut_ind_data2, cut_ind_name2, cut_dep_data2, cut_dep_name2 = linecut_data(ind_vars,ind_var_names, dep_var, dep_var_name) -#%% Plot the linecut - -plt.plot(cut_ind_data, cut_dep_data, '-', label = 'Forward') -# plt.plot(cut_ind_data2, cut_dep_data2, '-', label = 'Backward') -plt.title(cut_dep_name) -plt.xlabel(cut_ind_name) -plt.ylabel("") - -#%% Extract info from traces -def get_min_ders(data): - ders = [] - der_max_locs = [] #should correspond to bifurcation - der_max_freqs = [] - for trace in mags: - der = np.gradient(trace) - der_max_loc = list(np.where(der == np.min(der))[0])[0] - der_max_locs.append(der_max_loc) - der_max_freq = freqs[der_max_loc] - der_max_freqs.append(der_max_freq) - ders.append(der) - return(der_max_freqs) - -#%% -#Fit Function -Ffun = lambda w: np.log10((1+(1-3/(w**2))**(3/2)+9/(w**2))*w**3) -Ffun2 = lambda w: np.log10((1-(1-3/(w**2))**(3/2)+9/(w**2))*w**3) -start = 0 -end = 1600 -sf = freqs[800] -fitfreqs = np.flip(freqs[start:end])+(1+np.sqrt(3))*sf -plt.plot(np.flip(fitfreqs), Ffun((fitfreqs)), label = 'Rough "Fit" upper') -plt.plot(np.flip(fitfreqs), Ffun2((fitfreqs)), label = 'Rough "Fit" lower') -#%% -plt.plot(get_min_ders(dep_var), ind_vars[0], label = 'forward') -plt.plot(get_min_ders(dep_var2),ind_vars[0], label = 'backward') -plt.legend(loc = 'right') -plt.title("Minimum derivative points") -plt.ylabel("VNA Power") -plt.xlabel("Frequency") -#%% process a pcolr (Duffing test plot) + #%% + #Fit Function + Ffun = lambda w: np.log10((1+(1-3/(w**2))**(3/2)+9/(w**2))*w**3) + Ffun2 = lambda w: np.log10((1-(1-3/(w**2))**(3/2)+9/(w**2))*w**3) + start = 0 + end = 1600 + sf = freqs[800] + fitfreqs = np.flip(freqs[start:end])+(1+np.sqrt(3))*sf + plt.plot(np.flip(fitfreqs), Ffun((fitfreqs)), label = 'Rough "Fit" upper') + plt.plot(np.flip(fitfreqs), Ffun2((fitfreqs)), label = 'Rough "Fit" lower') + #%% + plt.plot(get_min_ders(dep_var), ind_vars[0], label = 'forward') + plt.plot(get_min_ders(dep_var2),ind_vars[0], label = 'backward') + plt.legend(loc = 'right') + plt.title("Minimum derivative points") + plt.ylabel("VNA Power") + plt.xlabel("Frequency") + #%% process a pcolr (Duffing test plot) diff --git a/data_processing/ddh5_Plotting/utility_modules/FS_utility_functions.py b/data_processing/ddh5_Plotting/utility_modules/FS_utility_functions.py index 34df3a2..6f6cca8 100644 --- a/data_processing/ddh5_Plotting/utility_modules/FS_utility_functions.py +++ b/data_processing/ddh5_Plotting/utility_modules/FS_utility_functions.py @@ -48,10 +48,47 @@ def default_bounds(self, QextGuess, QintGuess, f0Guess, magBackGuess): [QextGuess * 1.5, QintGuess +200, f0Guess*1.5, magBackGuess * 5.0, 2 * np.pi]) def initial_fit(self, f0Guess, QextGuess = 50, QintGuess = 300, magBackGuess = 0.0001, bounds = None, smooth = False, smooth_win = 11, phaseOffGuess = 0, debug = False, adaptive_window = False, adapt_win_size = 300e6, start_current_index = 0): + ''' + S + + Parameters + ---------- + f0Guess : TYPE + DESCRIPTION. + QextGuess : TYPE, optional + DESCRIPTION. The default is 50. + QintGuess : TYPE, optional + DESCRIPTION. The default is 300. + magBackGuess : TYPE, optional + DESCRIPTION. The default is 0.0001. + bounds : TYPE, optional + DESCRIPTION. The default is None. + smooth : TYPE, optional + DESCRIPTION. The default is False. + smooth_win : TYPE, optional + DESCRIPTION. The default is 11. + phaseOffGuess : TYPE, optional + DESCRIPTION. The default is 0. + debug : TYPE, optional + DESCRIPTION. The default is False. + adaptive_window : TYPE, optional + DESCRIPTION. The default is False. + adapt_win_size : TYPE, optional + DESCRIPTION. The default is 300e6. + start_current_index : TYPE, optional + DESCRIPTION. The default is 0. + + Returns + ------- + None. + + ''' f0Guess = f0Guess*2*np.pi if bounds == None: bounds=self.default_bounds(QextGuess, QintGuess, f0Guess, magBackGuess) + #this is how we deal with the fact that we don't know the shape of the data + #filt contains all the indices we want for a particular current filt = (self.currents == np.unique(self.currents)[start_current_index]) if adaptive_window: @@ -63,6 +100,8 @@ def initial_fit(self, f0Guess, QextGuess = 50, QintGuess = 300, magBackGuess = 0 init_phase_trace = self.undriven_vna_phase[filt] init_pow_trace = self.undriven_vna_power[filt] + print(init_vna_freqs) + if debug: plt.figure(1) plt.plot(init_vna_freqs/(2*np.pi), init_phase_trace) @@ -106,7 +145,9 @@ def save_fit(self, current, base_popt, base_pconv): base_Qint_error = np.sqrt(base_pconv[1, 1]), base_Qext_error = np.sqrt(base_pconv[0, 0]), ) - def semiauto_fit(self, bias_currents, vna_freqs, vna_mags, vna_phases, popt, debug = False, savedata = False, smooth = False, smooth_win = 11, adaptive_window = False, adapt_win_size = 300e6, fourier_filter = False, fourier_cutoff = 40, pconv_tol = 2): + + def semiauto_fit(self, bias_currents, vna_freqs, vna_mags, vna_phases, popt, pconv, debug = False, savedata = False, smooth = False, smooth_win = 11, adaptive_window = False, adapt_win_size = 300e6, fourier_filter = False, fourier_cutoff = 40, pconv_tol = 2): + res_freqs = np.zeros(np.size(np.unique(bias_currents))) Qints = np.zeros(np.size(np.unique(bias_currents))) Qexts = np.zeros(np.size(np.unique(bias_currents))) @@ -133,7 +174,7 @@ def semiauto_fit(self, bias_currents, vna_freqs, vna_mags, vna_phases, popt, deb plt.title('before filter') imag = idct(dct(imag)[fourier_cutoff:]) real = idct(dct(real)[fourier_cutoff:]) - if debug: + if debug: plt.figure(4) plt.plot(real) plt.plot(imag) @@ -156,8 +197,9 @@ def semiauto_fit(self, bias_currents, vna_freqs, vna_mags, vna_phases, popt, deb filt = np.ones(np.size(first_trace_freqs)).astype(bool) bounds=self.default_bounds(QextGuess, QintGuess, f0Guess, magBackGuess) + if i>2: - prev_pconv = pconv + prev_pconv = self.initial_pconv #fit(freq, real, imag, mag, phase, Qguess=(2e3, 1e3),real_only = 0, bounds = None) popt, pconv = fit(first_trace_freqs[filt], real[filt], imag[filt], first_trace_mag, first_trace_phase, Qguess = (QextGuess,QintGuess), f0Guess = f0Guess, real_only = 0, bounds = bounds, magBackGuess = magBackGuess) @@ -183,7 +225,6 @@ def semiauto_fit(self, bias_currents, vna_freqs, vna_mags, vna_phases, popt, deb pconv_diff_ratio = (np.array(pconv[0,0], pconv[1,1])-np.array(prev_pconv[0,0], prev_pconv[1,1]))/np.array(prev_pconv[0,0], prev_pconv[1,1]) j+=1 - if debug: import time plotRes(first_trace_freqs[filt], real[filt], imag[filt], first_trace_mag[filt], first_trace_phase[filt], popt) @@ -203,4 +244,229 @@ def semiauto_fit(self, bias_currents, vna_freqs, vna_mags, vna_phases, popt, deb if savedata: self.save_fit(current, popt, pconv) - return np.unique(bias_currents), res_freqs, Qints, Qexts, magBacks \ No newline at end of file + return np.unique(bias_currents), res_freqs, Qints, Qexts, magBacks + + +class fit_fluxsweep_P(): + def __init__(self, Flux_filepath, save_filepath, name): + #setup files + self.name = name + self.datadict = dd.DataDict( + current = dict(unit='A'), + + base_resonant_frequency = dict(axes = ['current']), + base_Qint = dict(axes = ['current']), + base_Qext = dict(axes = ['current']), + + base_resonant_frequency_error = dict(axes = ['current']), + base_Qint_error = dict(axes = ['current']), + base_Qext_error = dict(axes = ['current']), + ) + self.datadir = save_filepath + self.writer = dds.DDH5Writer(self.datadir, self.datadict, name=self.name) + self.writer.__enter__() + + #Duffing/FS Data Extraction + duff_dicts = all_datadicts_from_hdf5(Flux_filepath) + duffDict = duff_dicts['data'] + uvphDict = duffDict.extract('phase') + uvpoDict = duffDict.extract('power') + + #get the arrays back out + self.undriven_vna_phase = uvphDict.data_vals('phase') + self.undriven_vna_power = uvpoDict.data_vals('power') + + lin = 10**(self.undriven_vna_power/20) + + self.imag = lin * np.sin(self.undriven_vna_phase) + self.real = lin * np.cos(self.undriven_vna_phase) + + self.vna_freqs = uvphDict.data_vals('frequency')*2*np.pi + self.currents = uvphDict.data_vals('current') + + num_currents = len(np.unique(self.currents)) + num_freqs = len(self.currents)//num_currents + + self.shape = (num_currents, num_freqs) + + def rough_fit(self): + + ''' + this code is hacky and has lots of inconsistent notation, watch out + before it gets fixed + ''' + + current = np.reshape(self.currents,self.shape) + frequency = np.reshape(self.vna_freqs,self.shape) + phase = np.reshape(self.undriven_vna_phase,self.shape) + power = np.reshape(self.undriven_vna_power,self.shape) + + # the following basically accomplishes a centered finite difference + + diff1 = np.diff(phase,axis=1) #left to right + diff2 = np.flip(np.diff(np.flip(phase,axis=1),axis=1),axis=1) # right to left + diff = phase*0 + diff[:,:-1] = diff[:,:-1] + diff1/2 + diff[:,1:] = diff[:,1:] - diff2/2 + + # then to find 'expectation value' of frequency, weighted by difference (which helps find smooth curve) + + current = current[:,0] + + res_point = current*0 + rough_Q = current*0 + rough_magBack = current*0 + window_left = np.array(current*0,dtype=np.int32) + window_right = np.array(current*0,dtype=np.int32) + + for i in range(0,self.shape[0]): + + frequency_i = frequency[i,:] + diff_i = diff[i,:] + power_i = power[i,:] + + max_i = np.max(diff_i) + index = np.where(diff_i == max_i)[0][0] + res_point[i] = frequency_i[index] + + j = 0 + + power_avg = np.mean(power_i) # usually close to zero, but just in case there's a significant slope + + peak_height = np.abs(power_i[index] -power_avg) + + ### linewidth estimation + + # check to left for the index at which the peak in the diff curve falls off by 80% + while(True): + + if (index+j == 0): + break + + current_height = np.abs(power_i[index+j] -power_avg) + + if (current_height < 0.5*peak_height): + break + + j = j - 1 + + left_index = index + j # save the index + + window_left[i] = int(np.max([0, index+j*10])) # set the window to 5 x the kappa size + + j = 0 # then do the right side + + while(True): + + if (index+j > len(current)-1): + break + + current_height = np.abs(power_i[index+j] - power_avg) + + if (current_height < 0.5*peak_height): + break + + j = j + 1 + + right_index = index + j + + window_right[i] = int(np.min([len(frequency_i)-1, index+j*10])) + + # then the linewidth kappa will probabily be close to the + # frequency difference between the left and right indices + + rough_kappa = frequency_i[right_index] - frequency_i[left_index] + + rough_Q[i] = frequency_i[index]/rough_kappa + + lin = 10**(power[i,:] / 20.0) + + rough_magBack[i] = np.mean(lin) + + + self.rough_f0 = res_point + self.rough_Q = rough_Q + self.rough_magBack = rough_magBack + self.window_left = window_left + self.window_right = window_right + + def good_fit(self): + + current = np.reshape(self.currents,self.shape) + frequency = np.reshape(self.vna_freqs,self.shape) + imag = np.reshape(self.imag,self.shape) + real = np.reshape(self.real,self.shape) + phase = np.reshape(self.undriven_vna_phase,self.shape) + power = np.reshape(self.undriven_vna_power,self.shape) + + current = current[:,0] + frequency = frequency[0,:] + + f0_fit = current*0 + Qint_fit = current*0 + Qext_fit = current*0 + + for i in range(0,len(current)): + bounds = self.default_bounds(self.rough_Q[i], self.rough_Q[i], self.rough_f0[i], self.rough_magBack[i]) + + window_indices = range(self.window_left[i],self.window_right[i]) + popt, pconv = fit(frequency[window_indices], real[i,window_indices], imag[i,window_indices], power[i,window_indices], phase[i,window_indices], Qguess=(self.rough_Q[i], self.rough_Q[i]), real_only = 0, bounds=bounds, f0Guess = self.rough_f0[i], magBackGuess = self.rough_magBack[i], phaseGuess = 0, debug = False) + + bounds = self.default_bounds(popt[0], popt[1], popt[2], self.rough_magBack[i]) + + # fit again with the results of the first fit as guesses....this seems to improve convergence a lot + + popt, pconv = fit(frequency[window_indices], real[i,window_indices], imag[i,window_indices], power[i,window_indices], phase[i,window_indices], Qguess=(popt[0], popt[1]), real_only = 0, bounds=bounds, f0Guess = popt[2], magBackGuess = self.rough_magBack[i], phaseGuess = 0, debug = False) + + f0_fit[i] = popt[2]/(2*np.pi) + print(f0_fit[i]) + Qint_fit[i] = popt[1] + Qext_fit[i] = popt[0] + if i == 30: + plotRes(frequency[window_indices],real[i,window_indices], imag[i,window_indices], power[i,window_indices], phase[i,window_indices],popt) + + self.f0_fit = f0_fit + self.Qint_fit = Qint_fit + self.Qext_fit = Qext_fit + + return current, frequency, imag, real, phase, power, bounds, window_indices + + def save_fit(self, current, base_popt, base_pconv): + self.writer.add_data( + current = current, + + base_resonant_frequency = base_popt[2]/(2*np.pi), + base_Qint = base_popt[1], + base_Qext = base_popt[0], + + base_resonant_frequency_error = np.sqrt(base_pconv[2, 2])/(2*np.pi), + base_Qint_error = np.sqrt(base_pconv[1, 1]), + base_Qext_error = np.sqrt(base_pconv[0, 0]), + ) + + def default_bounds(self, QextGuess, QintGuess, f0Guess, magBackGuess): + return ([QextGuess / 1.5, QintGuess / 1.5, f0Guess/2, magBackGuess / 5.0, -2 * np.pi], + [QextGuess * 1.5, QintGuess +200, f0Guess*1.5, magBackGuess * 5.0, 2 * np.pi]) + +FFS = fit_fluxsweep_P('//136.142.53.51/data001/Data/SH_5B1/fluxsweep/SNAIL/2021-10-21/2021-10-21_0002_fluxsweep_-60dBm/2021-10-21_0002_fluxsweep_-60dBm.ddh5','//136.142.53.51/data001/Data/SH_5B1/fluxsweep/SNAIL/fits','FFS_parallel') +FFS.rough_fit() +current, frequency, imag, real, phase, power, bounds, window_indices = FFS.good_fit() +# FFS = fit_fluxsweep('//136.142.53.51/data001/Data/SA_3C1_3221/2021-11-12/2021-11-12_0001_fluxsweep_-73dBm/2021-11-12_0001_fluxsweep_-73dBm.ddh5',r'\\136.142.53.51\data001\Data\SA_3C1_3221\2021-11-12','finer_FFS') +# FFS.initial_fit(8e9, QextGuess = 263, QintGuess = 321, magBackGuess = 0.1, bounds = None, smooth = False, smooth_win = 11, phaseOffGuess = 0, debug = True, adaptive_window = True, adapt_win_size = 300e6, start_current_index = 0) + +#%% + +# i = len(current)-1 + +# popt, pconv = fit(frequency[window_indices], real[i,window_indices], imag[i,window_indices], power[i,window_indices], phase[i,window_indices], Qguess=(400, 400), real_only = 0, f0Guess = 7.53e9*2*np.pi, magBackGuess = 0.1, phaseGuess = 0, debug = True) +# plotRes(frequency[window_indices]/(2*np.pi),real[i,window_indices], imag[i,window_indices], power[i,window_indices], phase[i,window_indices],popt) + + + + + + + + + + diff --git a/data_processing/fitting/4th_Order_Fitter.py b/data_processing/fitting/4th_Order_Fitter.py index c51a675..ec89a82 100644 --- a/data_processing/fitting/4th_Order_Fitter.py +++ b/data_processing/fitting/4th_Order_Fitter.py @@ -22,192 +22,194 @@ from matplotlib.lines import Line2D import easygui -cwd = easygui.diropenbox('Select where you are working') -print(cwd) -u#%% -#Single File, single traces overlaid onto one-another if you choose: -def get_plot(file = None): - if file == None: +if __name__ == '__main__': + + cwd = easygui.diropenbox('Select where you are working') + print(cwd) + #%% + #Single File, single traces overlaid onto one-another if you choose: + def get_plot(file = None): + if file == None: + filepath = easygui.fileopenbox("Choose File", default = cwd) + file = h5py.File(filepath) + assert file != None + datasets = file.keys() + ind_var = easygui.choicebox("Pick the independent variable: ", choices = datasets) + print(ind_var) + dep_var = easygui.choicebox("Pick the dependent variable: ", choices = datasets) + print(dep_var) + + ind_var_data = np.array(file[ind_var]) + dep_var_data = np.array(file[dep_var]) + + + return ind_var_data, dep_var_data, ind_var, dep_var + + ind_vars, dep_vars, ind_name, dep_name = get_plot() + # ind_vars2, dep_vars2, ind_name2, dep_name2 = get_plot() + + #%% + print("Max: {:.3f} \nMin: {:.3f}".format(np.max(dep_vars), np.min(dep_vars))) + plt.close("all") + plt.plot(ind_vars, dep_vars, label = "Forward") + # plt.plot(ind_vars2[0], dep_vars2[0], label = "Backward") + plt.legend() + plt.xlabel(ind_name) + plt.ylabel(dep_name) + plt.title(ind_name +' vs.'+dep_name) + + #%% + # #%% Get the data from multiple separate files + def compile_traces(): + num_traces = easygui.choicebox(choices = np.arange(1,6)) + i = 0 + trace_data = [] + freq_data = [] + while i < num_traces: + filepath = easygui.fileopenbox("Choose File #"+str(i+1), default = cwd) + file = h5py.File(filepath) + trace = np.array(file['noise']) #TODO: generalize to choice + freqs = np.array(file['Freq']) + trace_data.append(trace) + freq_data.append(freqs) + file.close() + i+=1 + return freq_data, trace_data + + #%% Extract 2d Data from h5 + def get_pcolor(cwd): filepath = easygui.fileopenbox("Choose File", default = cwd) file = h5py.File(filepath) - assert file != None - datasets = file.keys() - ind_var = easygui.choicebox("Pick the independent variable: ", choices = datasets) - print(ind_var) - dep_var = easygui.choicebox("Pick the dependent variable: ", choices = datasets) - print(dep_var) + datasets = file.keys() + ind_vars = easygui.multchoicebox("Pick the independent variables: ", choices = datasets) + print(ind_vars) + dep_var_name = easygui.choicebox("Pick the dependent variable: ", choices = datasets) + print(dep_var_name) + #checking for data redundancy, i.e. if every line of an independent variable is the same, reduce it to just the first line + ind_var_data = [] + for ind_var in ind_vars: + ind_var_datum = np.array(file[ind_var]) + is_repetitive = True + for i in range(np.shape(ind_var_datum)[0]): + if np.any(ind_var_datum[i] != ind_var_datum [0]): + is_repetitive = False + if is_repetitive == True: + ind_var_datum = ind_var_datum[0] + ind_var_data.append(ind_var_datum) + + dep_var_data = np.array(file[dep_var_name]) + + return ind_var_data, ind_vars, dep_var_data, dep_var_name + + ind_vars,ind_var_names, dep_var, dep_var_name = get_pcolor(cwd) + # ind_vars2,ind_var_names2, dep_var2, dep_var_name2 = get_pcolor(cwd) - ind_var_data = np.array(file[ind_var]) - dep_var_data = np.array(file[dep_var]) + #%%Plot Pcolor + #TODO: incorporate into get_pcolor with more color choices, etc + ind_avgs = [np.average(i) for i in ind_vars] + dep_avg = np.average(dep_var) + colors = [color.hex2color('#0000FF'), color.hex2color('#FFFFFF'), color.hex2color('#FF0000')] + _cmap = color.LinearSegmentedColormap.from_list('my_cmap', colors) + adj = 5 + _norm = color.Normalize(vmin = np.min(dep_var)+adj, vmax = np.max(dep_var)-adj) + # x = np.min(dep_var) + # y = np.max(dep_var) + plt.pcolormesh((ind_vars[1])/1e6,ind_vars[0], dep_var, cmap = _cmap, norm = _norm) + plt.colorbar(label = 'S21 Phase (Deg)') + plt.gca().get_xaxis().get_major_formatter().set_useOffset(False) + plt.gca().get_yaxis().get_major_formatter().set_useOffset(False) + plt.xlabel( 'VNA_Frequency (MHz)') + plt.ylabel('VNA Input Power (dBm)') + plt.title('S21 Phase vs. Frequency and Input Power') + #%% Now extract a line cut from that 2d plot + #from the plot, we're going to want to be able to specify one value of one of + #the independent variables, then the line cuts across the other one at that + #value - return ind_var_data, dep_var_data, ind_var, dep_var - -ind_vars, dep_vars, ind_name, dep_name = get_plot() -# ind_vars2, dep_vars2, ind_name2, dep_name2 = get_plot() - -#%% -print("Max: {:.3f} \nMin: {:.3f}".format(np.max(dep_vars), np.min(dep_vars))) -plt.close("all") -plt.plot(ind_vars, dep_vars, label = "Forward") -# plt.plot(ind_vars2[0], dep_vars2[0], label = "Backward") -plt.legend() -plt.xlabel(ind_name) -plt.ylabel(dep_name) -plt.title(ind_name +' vs.'+dep_name) - -#%% -# #%% Get the data from multiple separate files -def compile_traces(): - num_traces = easygui.choicebox(choices = np.arange(1,6)) - i = 0 - trace_data = [] - freq_data = [] - while i < num_traces: - filepath = easygui.fileopenbox("Choose File #"+str(i+1), default = cwd) - file = h5py.File(filepath) - trace = np.array(file['noise']) #TODO: generalize to choice - freqs = np.array(file['Freq']) - trace_data.append(trace) - freq_data.append(freqs) - file.close() - i+=1 - return freq_data, trace_data - -#%% Extract 2d Data from h5 -def get_pcolor(cwd): - filepath = easygui.fileopenbox("Choose File", default = cwd) - file = h5py.File(filepath) - datasets = file.keys() - ind_vars = easygui.multchoicebox("Pick the independent variables: ", choices = datasets) - print(ind_vars) - dep_var_name = easygui.choicebox("Pick the dependent variable: ", choices = datasets) - print(dep_var_name) - #checking for data redundancy, i.e. if every line of an independent variable is the same, reduce it to just the first line - ind_var_data = [] - for ind_var in ind_vars: - ind_var_datum = np.array(file[ind_var]) - is_repetitive = True - for i in range(np.shape(ind_var_datum)[0]): - if np.any(ind_var_datum[i] != ind_var_datum [0]): - is_repetitive = False - if is_repetitive == True: - ind_var_datum = ind_var_datum[0] - ind_var_data.append(ind_var_datum) + #ideally this would be a slider in the plot window, but whatever + def linecut_data(ind_vars, ind_var_names, dep_var, dep_var_name): + cut_var_name = easygui.choicebox("Choose variable you would like to cut along:", choices = ind_var_names) + cut_name_index = list(ind_var_names).index(cut_var_name) #only lists have the .index method :( + #Sometimes the order of names does not correspond to the ordering of indices in dep_var, this takes care of that + cut_index = np.shape(dep_var).index(len(ind_vars[cut_name_index])) + cut_var_val = float(easygui.choicebox("Choose value of the cut variable:", choices = ind_vars[cut_name_index])) + cut_var_val_index = list(ind_vars[cut_name_index]).index(cut_var_val) + #we know it is 2d data, so if cut_index is 0, we want a [cut_var_index,:] cut, else [:,cut_var_index] + print(dep_var_name) + #TODO: find more pythonic solution + if cut_index == 0: + cut_dep_data = dep_var[cut_var_val_index, :] - dep_var_data = np.array(file[dep_var_name]) + elif cut_index == 1: + cut_dep_data = dep_var[:, cut_var_val_index] - return ind_var_data, ind_vars, dep_var_data, dep_var_name - -ind_vars,ind_var_names, dep_var, dep_var_name = get_pcolor(cwd) -# ind_vars2,ind_var_names2, dep_var2, dep_var_name2 = get_pcolor(cwd) - -#%%Plot Pcolor -#TODO: incorporate into get_pcolor with more color choices, etc -ind_avgs = [np.average(i) for i in ind_vars] -dep_avg = np.average(dep_var) -colors = [color.hex2color('#0000FF'), color.hex2color('#FFFFFF'), color.hex2color('#FF0000')] -_cmap = color.LinearSegmentedColormap.from_list('my_cmap', colors) -adj = 5 -_norm = color.Normalize(vmin = np.min(dep_var)+adj, vmax = np.max(dep_var)-adj) -# x = np.min(dep_var) -# y = np.max(dep_var) -plt.pcolormesh((ind_vars[1])/1e6,ind_vars[0], dep_var, cmap = _cmap, norm = _norm) -plt.colorbar(label = 'S21 Phase (Deg)') -plt.gca().get_xaxis().get_major_formatter().set_useOffset(False) -plt.gca().get_yaxis().get_major_formatter().set_useOffset(False) -plt.xlabel( 'VNA_Frequency (MHz)') -plt.ylabel('VNA Input Power (dBm)') -plt.title('S21 Phase vs. Frequency and Input Power') -#%% Now extract a line cut from that 2d plot - -#from the plot, we're going to want to be able to specify one value of one of -#the independent variables, then the line cuts across the other one at that -#value - -#ideally this would be a slider in the plot window, but whatever -def linecut_data(ind_vars, ind_var_names, dep_var, dep_var_name): - cut_var_name = easygui.choicebox("Choose variable you would like to cut along:", choices = ind_var_names) - cut_name_index = list(ind_var_names).index(cut_var_name) #only lists have the .index method :( - #Sometimes the order of names does not correspond to the ordering of indices in dep_var, this takes care of that - cut_index = np.shape(dep_var).index(len(ind_vars[cut_name_index])) - cut_var_val = float(easygui.choicebox("Choose value of the cut variable:", choices = ind_vars[cut_name_index])) - cut_var_val_index = list(ind_vars[cut_name_index]).index(cut_var_val) - #we know it is 2d data, so if cut_index is 0, we want a [cut_var_index,:] cut, else [:,cut_var_index] - print(dep_var_name) - #TODO: find more pythonic solution - if cut_index == 0: - cut_dep_data = dep_var[cut_var_val_index, :] + cut_ind_data = ind_vars[int(cut_index)] #this logic just makes 0's into 1's and vice-versa + cut_ind_name = ind_var_names[int(cut_index)] + cut_dep_name = "{} at {} = {:.3f}".format(dep_var_name, cut_var_name, cut_var_val) - elif cut_index == 1: - cut_dep_data = dep_var[:, cut_var_val_index] - - cut_ind_data = ind_vars[int(cut_index)] #this logic just makes 0's into 1's and vice-versa - cut_ind_name = ind_var_names[int(cut_index)] - cut_dep_name = "{} at {} = {:.3f}".format(dep_var_name, cut_var_name, cut_var_val) + return cut_ind_data, cut_ind_name, cut_dep_data, cut_dep_name + + cut_ind_data, cut_ind_name, cut_dep_data, cut_dep_name = linecut_data(ind_vars,ind_var_names, dep_var, dep_var_name) + + #%% Plot the linecut + + plt.plot(cut_ind_data, cut_dep_data, '.') + plt.title(cut_dep_name) + plt.xlabel(cut_ind_name) + plt.ylabel("") + + #%% Finding derivatives + + def get_min_ders(freqs, data): + mags = np.array(data) + ders = [] + der_max_locs = [] #should correspond to bifurcation + der_max_freqs = [] + for trace in mags: + der = np.gradient(trace) + der_max_loc = list(np.where(der == np.min(der))[0])[0] + der_max_locs.append(der_max_loc) + der_max_freq = freqs[der_max_loc] + der_max_freqs.append(der_max_freq) + ders.append(der) + return(der_max_freqs) + + freqs_of_max_change = get_min_ders(ind_vars[1],dep_var) + linearized_voltages = np.power(10,ind_vars[0]/20) + offset_from_right = -2 + fitx = linearized_voltages[0:offset_from_right-1] + fity = (freqs_of_max_change[0:offset_from_right-1]-freqs_of_max_change[0])/1e6 + + #Fit Function to polynomial + #%% Numpy Fit + from numpy.polynomial.polynomial import polyfit + fit = np.flip(np.polyfit(fitx,fity,4)) + fit_func = lambda x,a,b,c,d,e: a+b*x+c*x**2+d*x**3+e*x**4 + english_sucks = ["th","st","nd","rd","th"] + for coefficient in enumerate(fit): + print("{}{} order term: {}".format(coefficient[0],english_sucks[coefficient[0]],coefficient[1])) + plt.plot(fitx,fity,'o') + plt.plot(fitx,fit_func(fitx, *fit)) + plt.title("All Order Fit") + #%% Scipy Fit + from scipy.optimize import curve_fit + def fit_func(x,a,b): + return a*x+b*x**2 + fit_end, other_stuff = curve_fit(fit_func, fitx,fity) + print("First order: {} (MHz/mV)\nSecond order: {} (MHz/mV)^2".format(fit_end[0],fit_end[1])) + plt.plot(fitx,fity,'o') + plt.plot(fitx,fit_func(fitx,fit_end[0],fit_end[1])) + plt.xlabel("Linearized Voltage ($\sqrt{mW}$)") + plt.ylabel("Detuning (MHz)") + plt.title("1st and 2nd order fit") + #%% + + + + - return cut_ind_data, cut_ind_name, cut_dep_data, cut_dep_name -cut_ind_data, cut_ind_name, cut_dep_data, cut_dep_name = linecut_data(ind_vars,ind_var_names, dep_var, dep_var_name) - -#%% Plot the linecut - -plt.plot(cut_ind_data, cut_dep_data, '.') -plt.title(cut_dep_name) -plt.xlabel(cut_ind_name) -plt.ylabel("") - -#%% Finding derivatives - -def get_min_ders(freqs, data): - mags = np.array(data) - ders = [] - der_max_locs = [] #should correspond to bifurcation - der_max_freqs = [] - for trace in mags: - der = np.gradient(trace) - der_max_loc = list(np.where(der == np.min(der))[0])[0] - der_max_locs.append(der_max_loc) - der_max_freq = freqs[der_max_loc] - der_max_freqs.append(der_max_freq) - ders.append(der) - return(der_max_freqs) - -freqs_of_max_change = get_min_ders(ind_vars[1],dep_var) -linearized_voltages = np.power(10,ind_vars[0]/20) -offset_from_right = -2 -fitx = linearized_voltages[0:offset_from_right-1] -fity = (freqs_of_max_change[0:offset_from_right-1]-freqs_of_max_change[0])/1e6 - -#Fit Function to polynomial -#%% Numpy Fit -from numpy.polynomial.polynomial import polyfit -fit = np.flip(np.polyfit(fitx,fity,4)) -fit_func = lambda x,a,b,c,d,e: a+b*x+c*x**2+d*x**3+e*x**4 -english_sucks = ["th","st","nd","rd","th"] -for coefficient in enumerate(fit): - print("{}{} order term: {}".format(coefficient[0],english_sucks[coefficient[0]],coefficient[1])) -plt.plot(fitx,fity,'o') -plt.plot(fitx,fit_func(fitx, *fit)) -plt.title("All Order Fit") -#%% Scipy Fit -from scipy.optimize import curve_fit -def fit_func(x,a,b): - return a*x+b*x**2 -fit_end, other_stuff = curve_fit(fit_func, fitx,fity) -print("First order: {} (MHz/mV)\nSecond order: {} (MHz/mV)^2".format(fit_end[0],fit_end[1])) -plt.plot(fitx,fity,'o') -plt.plot(fitx,fit_func(fitx,fit_end[0],fit_end[1])) -plt.xlabel("Linearized Voltage ($\sqrt{mW}$)") -plt.ylabel("Detuning (MHz)") -plt.title("1st and 2nd order fit") -#%% - - - - - - diff --git a/data_processing/fitting/QFit.py b/data_processing/fitting/QFit.py index a0300a4..ec4df46 100644 --- a/data_processing/fitting/QFit.py +++ b/data_processing/fitting/QFit.py @@ -1,3 +1,8 @@ +''' +Fits VNA trace. Determine over/under coupling regimes. +''' + + import numpy as np import matplotlib.pyplot as plt import csv @@ -35,6 +40,26 @@ def reflectionFunc_re(freq, Qext, Qint, f0, magBack, phaseCorrect): return reflectionFunc(freq, Qext, Qint, f0, magBack, phaseCorrect)[::2] def getData_from_datadict(filepath, plot_data = None): + ''' + Takes a VNA trace and returns the data as numpy arrays. It returns trace data + in both real-imaginary and phase-magnitude form. + + Parameters + ---------- + filepath : TYPE + DESCRIPTION. + plot_data : TYPE, optional + DESCRIPTION. The default is None. + + Returns + ------- + freqs : np.float + real : np.float + imag : np.float + powers_dB : np.float + phase_rad : np.float + + ''' datadict = all_datadicts_from_hdf5(filepath)['data'] powers_dB = datadict.extract('power')['power']['values'] freqs = datadict.extract('power')['frequency']['values']*2*np.pi diff --git a/data_processing/signal_processing/__pycache__/Pulse_Processing.cpython-39.pyc b/data_processing/signal_processing/__pycache__/Pulse_Processing.cpython-39.pyc index 24cb447..455c987 100644 Binary files a/data_processing/signal_processing/__pycache__/Pulse_Processing.cpython-39.pyc and b/data_processing/signal_processing/__pycache__/Pulse_Processing.cpython-39.pyc differ diff --git a/data_processing/signal_processing/__pycache__/__init__.cpython-39.pyc b/data_processing/signal_processing/__pycache__/__init__.cpython-39.pyc index 7c41988..b39d980 100644 Binary files a/data_processing/signal_processing/__pycache__/__init__.cpython-39.pyc and b/data_processing/signal_processing/__pycache__/__init__.cpython-39.pyc differ diff --git a/sdocs/Makefile b/sdocs/Makefile new file mode 100644 index 0000000..d0c3cbf --- /dev/null +++ b/sdocs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/sdocs/build/doctrees/4th_Order_Fitter.doctree b/sdocs/build/doctrees/4th_Order_Fitter.doctree new file mode 100644 index 0000000..1f0557b Binary files /dev/null and b/sdocs/build/doctrees/4th_Order_Fitter.doctree differ diff --git a/sdocs/build/doctrees/Bifurcation fitter.doctree b/sdocs/build/doctrees/Bifurcation fitter.doctree new file mode 100644 index 0000000..ea79d28 Binary files /dev/null and b/sdocs/build/doctrees/Bifurcation fitter.doctree differ diff --git a/sdocs/build/doctrees/FS_utility_functions.doctree b/sdocs/build/doctrees/FS_utility_functions.doctree new file mode 100644 index 0000000..0d50b8d Binary files /dev/null and b/sdocs/build/doctrees/FS_utility_functions.doctree differ diff --git a/sdocs/build/doctrees/Fidelity_from_filepath.doctree b/sdocs/build/doctrees/Fidelity_from_filepath.doctree new file mode 100644 index 0000000..954c9bf Binary files /dev/null and b/sdocs/build/doctrees/Fidelity_from_filepath.doctree differ diff --git a/sdocs/build/doctrees/Gain vs Pump Power fitter.doctree b/sdocs/build/doctrees/Gain vs Pump Power fitter.doctree new file mode 100644 index 0000000..996ca11 Binary files /dev/null and b/sdocs/build/doctrees/Gain vs Pump Power fitter.doctree differ diff --git a/sdocs/build/doctrees/General_reader.doctree b/sdocs/build/doctrees/General_reader.doctree new file mode 100644 index 0000000..3d60cff Binary files /dev/null and b/sdocs/build/doctrees/General_reader.doctree differ diff --git a/sdocs/build/doctrees/Helper_Functions.doctree b/sdocs/build/doctrees/Helper_Functions.doctree new file mode 100644 index 0000000..0dd2ef0 Binary files /dev/null and b/sdocs/build/doctrees/Helper_Functions.doctree differ diff --git a/sdocs/build/doctrees/IIP3_plotting.doctree b/sdocs/build/doctrees/IIP3_plotting.doctree new file mode 100644 index 0000000..1366b43 Binary files /dev/null and b/sdocs/build/doctrees/IIP3_plotting.doctree differ diff --git a/sdocs/build/doctrees/Mixer_calibration_plotting.doctree b/sdocs/build/doctrees/Mixer_calibration_plotting.doctree new file mode 100644 index 0000000..f17ce67 Binary files /dev/null and b/sdocs/build/doctrees/Mixer_calibration_plotting.doctree differ diff --git a/sdocs/build/doctrees/Participation_and_Alpha_Fitter.doctree b/sdocs/build/doctrees/Participation_and_Alpha_Fitter.doctree new file mode 100644 index 0000000..c0bccd8 Binary files /dev/null and b/sdocs/build/doctrees/Participation_and_Alpha_Fitter.doctree differ diff --git a/sdocs/build/doctrees/Process_Multiple_Acquisitions_From_Directory.doctree b/sdocs/build/doctrees/Process_Multiple_Acquisitions_From_Directory.doctree new file mode 100644 index 0000000..3661e79 Binary files /dev/null and b/sdocs/build/doctrees/Process_Multiple_Acquisitions_From_Directory.doctree differ diff --git a/sdocs/build/doctrees/Process_Multiple_Acquisitions_From_Directory_LO_pwr_phase.doctree b/sdocs/build/doctrees/Process_Multiple_Acquisitions_From_Directory_LO_pwr_phase.doctree new file mode 100644 index 0000000..e67a55c Binary files /dev/null and b/sdocs/build/doctrees/Process_Multiple_Acquisitions_From_Directory_LO_pwr_phase.doctree differ diff --git a/sdocs/build/doctrees/Process_Multiple_Acquisitions_From_Directory_LO_sig_phase.doctree b/sdocs/build/doctrees/Process_Multiple_Acquisitions_From_Directory_LO_sig_phase.doctree new file mode 100644 index 0000000..770bafb Binary files /dev/null and b/sdocs/build/doctrees/Process_Multiple_Acquisitions_From_Directory_LO_sig_phase.doctree differ diff --git a/sdocs/build/doctrees/Process_Multiple_Acquisitions_From_Directory_Sig.doctree b/sdocs/build/doctrees/Process_Multiple_Acquisitions_From_Directory_Sig.doctree new file mode 100644 index 0000000..88c43ea Binary files /dev/null and b/sdocs/build/doctrees/Process_Multiple_Acquisitions_From_Directory_Sig.doctree differ diff --git a/sdocs/build/doctrees/Process_Multiple_Acquisitions_From_Directory_loopbacks.doctree b/sdocs/build/doctrees/Process_Multiple_Acquisitions_From_Directory_loopbacks.doctree new file mode 100644 index 0000000..118280b Binary files /dev/null and b/sdocs/build/doctrees/Process_Multiple_Acquisitions_From_Directory_loopbacks.doctree differ diff --git a/sdocs/build/doctrees/Process_One_3_state_acquisition.doctree b/sdocs/build/doctrees/Process_One_3_state_acquisition.doctree new file mode 100644 index 0000000..98eb350 Binary files /dev/null and b/sdocs/build/doctrees/Process_One_3_state_acquisition.doctree differ diff --git a/sdocs/build/doctrees/Process_One_Acquisition_From_File.doctree b/sdocs/build/doctrees/Process_One_Acquisition_From_File.doctree new file mode 100644 index 0000000..8e86fec Binary files /dev/null and b/sdocs/build/doctrees/Process_One_Acquisition_From_File.doctree differ diff --git a/sdocs/build/doctrees/Process_One_Acquisition_From_File_!.doctree b/sdocs/build/doctrees/Process_One_Acquisition_From_File_!.doctree new file mode 100644 index 0000000..7a86bd6 Binary files /dev/null and b/sdocs/build/doctrees/Process_One_Acquisition_From_File_!.doctree differ diff --git a/sdocs/build/doctrees/Process_One_Acquisition_From_File_1.doctree b/sdocs/build/doctrees/Process_One_Acquisition_From_File_1.doctree new file mode 100644 index 0000000..e679f75 Binary files /dev/null and b/sdocs/build/doctrees/Process_One_Acquisition_From_File_1.doctree differ diff --git a/sdocs/build/doctrees/Process_One_Acquisition_From_File_with_weights.doctree b/sdocs/build/doctrees/Process_One_Acquisition_From_File_with_weights.doctree new file mode 100644 index 0000000..c546cf7 Binary files /dev/null and b/sdocs/build/doctrees/Process_One_Acquisition_From_File_with_weights.doctree differ diff --git a/sdocs/build/doctrees/Process_fitted_power_sweep.doctree b/sdocs/build/doctrees/Process_fitted_power_sweep.doctree new file mode 100644 index 0000000..cf40222 Binary files /dev/null and b/sdocs/build/doctrees/Process_fitted_power_sweep.doctree differ diff --git a/sdocs/build/doctrees/Process_fitted_sweeps.doctree b/sdocs/build/doctrees/Process_fitted_sweeps.doctree new file mode 100644 index 0000000..7ad9072 Binary files /dev/null and b/sdocs/build/doctrees/Process_fitted_sweeps.doctree differ diff --git a/sdocs/build/doctrees/Pulse_Processing.doctree b/sdocs/build/doctrees/Pulse_Processing.doctree new file mode 100644 index 0000000..189b995 Binary files /dev/null and b/sdocs/build/doctrees/Pulse_Processing.doctree differ diff --git a/sdocs/build/doctrees/Pulse_Processing_Utils.doctree b/sdocs/build/doctrees/Pulse_Processing_Utils.doctree new file mode 100644 index 0000000..57b6cfa Binary files /dev/null and b/sdocs/build/doctrees/Pulse_Processing_Utils.doctree differ diff --git a/sdocs/build/doctrees/QFit.doctree b/sdocs/build/doctrees/QFit.doctree new file mode 100644 index 0000000..9f75875 Binary files /dev/null and b/sdocs/build/doctrees/QFit.doctree differ diff --git a/sdocs/build/doctrees/Q_vs_temp_vs_power.doctree b/sdocs/build/doctrees/Q_vs_temp_vs_power.doctree new file mode 100644 index 0000000..6d5a6ef Binary files /dev/null and b/sdocs/build/doctrees/Q_vs_temp_vs_power.doctree differ diff --git a/sdocs/build/doctrees/Resonator_Autofit_1D.doctree b/sdocs/build/doctrees/Resonator_Autofit_1D.doctree new file mode 100644 index 0000000..8cafda1 Binary files /dev/null and b/sdocs/build/doctrees/Resonator_Autofit_1D.doctree differ diff --git a/sdocs/build/doctrees/TACO_multiplot_b1.doctree b/sdocs/build/doctrees/TACO_multiplot_b1.doctree new file mode 100644 index 0000000..43ea0a0 Binary files /dev/null and b/sdocs/build/doctrees/TACO_multiplot_b1.doctree differ diff --git a/sdocs/build/doctrees/TACO_utility_functions.doctree b/sdocs/build/doctrees/TACO_utility_functions.doctree new file mode 100644 index 0000000..5220c85 Binary files /dev/null and b/sdocs/build/doctrees/TACO_utility_functions.doctree differ diff --git a/sdocs/build/doctrees/amplifier_g3.doctree b/sdocs/build/doctrees/amplifier_g3.doctree new file mode 100644 index 0000000..3a388cb Binary files /dev/null and b/sdocs/build/doctrees/amplifier_g3.doctree differ diff --git a/sdocs/build/doctrees/coefficient_solver.doctree b/sdocs/build/doctrees/coefficient_solver.doctree new file mode 100644 index 0000000..f320b8d Binary files /dev/null and b/sdocs/build/doctrees/coefficient_solver.doctree differ diff --git a/sdocs/build/doctrees/ddH5_Duff_Plotting.doctree b/sdocs/build/doctrees/ddH5_Duff_Plotting.doctree new file mode 100644 index 0000000..f804d79 Binary files /dev/null and b/sdocs/build/doctrees/ddH5_Duff_Plotting.doctree differ diff --git a/sdocs/build/doctrees/ddH5_Fluxsweep_Plotting.doctree b/sdocs/build/doctrees/ddH5_Fluxsweep_Plotting.doctree new file mode 100644 index 0000000..1517341 Binary files /dev/null and b/sdocs/build/doctrees/ddH5_Fluxsweep_Plotting.doctree differ diff --git a/sdocs/build/doctrees/ddH5_Taco_Plotting.doctree b/sdocs/build/doctrees/ddH5_Taco_Plotting.doctree new file mode 100644 index 0000000..f761c8f Binary files /dev/null and b/sdocs/build/doctrees/ddH5_Taco_Plotting.doctree differ diff --git a/sdocs/build/doctrees/ddh5_autoplotting.doctree b/sdocs/build/doctrees/ddh5_autoplotting.doctree new file mode 100644 index 0000000..060c09d Binary files /dev/null and b/sdocs/build/doctrees/ddh5_autoplotting.doctree differ diff --git a/sdocs/build/doctrees/ddh5_freq_sweep_plotting.doctree b/sdocs/build/doctrees/ddh5_freq_sweep_plotting.doctree new file mode 100644 index 0000000..7e52b75 Binary files /dev/null and b/sdocs/build/doctrees/ddh5_freq_sweep_plotting.doctree differ diff --git a/sdocs/build/doctrees/ddh5_to_csv.doctree b/sdocs/build/doctrees/ddh5_to_csv.doctree new file mode 100644 index 0000000..2fdaa35 Binary files /dev/null and b/sdocs/build/doctrees/ddh5_to_csv.doctree differ diff --git a/sdocs/build/doctrees/environment.pickle b/sdocs/build/doctrees/environment.pickle new file mode 100644 index 0000000..93a7caa Binary files /dev/null and b/sdocs/build/doctrees/environment.pickle differ diff --git a/sdocs/build/doctrees/fridgePlot.doctree b/sdocs/build/doctrees/fridgePlot.doctree new file mode 100644 index 0000000..3523c61 Binary files /dev/null and b/sdocs/build/doctrees/fridgePlot.doctree differ diff --git a/sdocs/build/doctrees/gain_trace.doctree b/sdocs/build/doctrees/gain_trace.doctree new file mode 100644 index 0000000..4d75647 Binary files /dev/null and b/sdocs/build/doctrees/gain_trace.doctree differ diff --git a/sdocs/build/doctrees/index.doctree b/sdocs/build/doctrees/index.doctree new file mode 100644 index 0000000..5de7ddf Binary files /dev/null and b/sdocs/build/doctrees/index.doctree differ diff --git a/sdocs/build/doctrees/multiplot.doctree b/sdocs/build/doctrees/multiplot.doctree new file mode 100644 index 0000000..3b78f02 Binary files /dev/null and b/sdocs/build/doctrees/multiplot.doctree differ diff --git a/sdocs/build/doctrees/old_file_converter.doctree b/sdocs/build/doctrees/old_file_converter.doctree new file mode 100644 index 0000000..f4772c8 Binary files /dev/null and b/sdocs/build/doctrees/old_file_converter.doctree differ diff --git a/sdocs/build/doctrees/phase_correction_proof_of_concept.doctree b/sdocs/build/doctrees/phase_correction_proof_of_concept.doctree new file mode 100644 index 0000000..b66ed54 Binary files /dev/null and b/sdocs/build/doctrees/phase_correction_proof_of_concept.doctree differ diff --git a/sdocs/build/doctrees/processDosetestInfo.doctree b/sdocs/build/doctrees/processDosetestInfo.doctree new file mode 100644 index 0000000..68d6a32 Binary files /dev/null and b/sdocs/build/doctrees/processDosetestInfo.doctree differ diff --git a/sdocs/build/doctrees/process_pump_scanning.doctree b/sdocs/build/doctrees/process_pump_scanning.doctree new file mode 100644 index 0000000..c95dd6b Binary files /dev/null and b/sdocs/build/doctrees/process_pump_scanning.doctree differ diff --git a/sdocs/build/doctrees/snailamp.doctree b/sdocs/build/doctrees/snailamp.doctree new file mode 100644 index 0000000..6410c79 Binary files /dev/null and b/sdocs/build/doctrees/snailamp.doctree differ diff --git a/sdocs/build/doctrees/squidamp.doctree b/sdocs/build/doctrees/squidamp.doctree new file mode 100644 index 0000000..ab517ee Binary files /dev/null and b/sdocs/build/doctrees/squidamp.doctree differ diff --git a/sdocs/build/html/.buildinfo b/sdocs/build/html/.buildinfo new file mode 100644 index 0000000..5673627 --- /dev/null +++ b/sdocs/build/html/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 75aeb1631411ae7241f02a00d9dafb2a +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/sdocs/build/html/.nojekyll b/sdocs/build/html/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/sdocs/build/html/4th_Order_Fitter.html b/sdocs/build/html/4th_Order_Fitter.html new file mode 100644 index 0000000..653be7f --- /dev/null +++ b/sdocs/build/html/4th_Order_Fitter.html @@ -0,0 +1,112 @@ + + + + +
+ + + +Created on Thu Jun 25 09:39:22 2020
+@author: Tzu-Chiao Chien and Ryan Kaufman
+Bases: object
Created on Thu Jun 25 09:39:22 2020
+@author: Tzu-Chiao Chien and Ryan Kaufman
+Created on Thu Nov 12 10:56:06 2020
+@author: Ryan Kaufman
+purpose: Contain general helper functions that are useful for all instruments
+ + + + + + + + + + + + + + + + +Takes in the inputs to a pcolor plot and normalizes each jth column in the 2d array to (i,j) value
+Spyder Editor
+This is a temporary script file.
+ + + + + + +Created on Tue Apr 27 10:17:43 2021
+@author: Hatlab_3
+#TODO: +:param signal_data: signal datapoints +:type signal_data: np 1D array - float64 +:param reference_data: reference datapoints +:type reference_data: np 1D array - float64 +:param mod_freq: Modulation frequency in Hz. The default is 50e6. +:type mod_freq: float64, optional +:param sampling_rate: sampling rate in samples per second. The default is 1e9. +:type sampling_rate: float64, optional
+sig_I_summed (np 1D array - float64) – Signal multiplied by Sine and integrated over each period.
sig_Q_summed (np 1D array - float64) – Signal multiplied by Cosine and integrated over each period.
ref_I_summed (np 1D array - float64) – reference multiplied by sine and integrated over each period.
ref_Q_summed (np 1D array - float64) – Reference multiplied by Cosine and integrated over each period.
sigI (np 2D array - (records,samples) float64) – demodulated signal - In-phase
sigQ (np 2D array - (records,samples) float64) – demodulated signal - Quadrature phase
refI (np 2D array - (records,samples) float64) – demodulated reference - In-phase
refQ (np 2D array - (records,samples) float64) – demodulated reference - quadrature-phase
Note (reference and signal arrays must all be of the same length) –
sigI_corrected (np 2D array - float64) – Signal I rotated by reference phase averaged over each record
sigQ_corrected (np 2D array - float64) – Signal Q rotated by reference phase averaged over each record
Created on Thu Apr 29 09:40:12 2021
+@author: Ryan Kaufman
+Set up function module that can assist in loading pulse sequences into AWG +and functionalizing Alazar acquiring
+ + + + +Created on Tue Jun 1 17:52:48 2021
+@author: Hatlab_3
+Bases: object
Created on Tue Feb 16 12:55:36 2021
+@author: Hatlab_3
+Find all TACOS in a directory, pull out the best powers, and plot them in 3d space to see if it’s actually a TACO or not’
+assemble all of the saturation sweeps, extract the best (highest) +saturation power in (gen_freqs, gen_powers) space, plot vs current
+Created on Fri Feb 19 14:27:01 2021
+@author: Hatlab_3
+
+# import easygui
+from plottr.apps.autoplot import autoplotDDH5, script, main
+from plottr.data.datadict_storage import all_datadicts_from_hdf5
+import matplotlib.pyplot as plt
+import numpy as np
+from data_processing.Helper_Functions import get_name_from_path, shift_array_relative_to_middle, log_normalize_to_row, select_closest_to_target
+from data_processing.fitting.QFit import fit, plotRes, reflectionFunc
+import inspect
+from plottr.data import datadict_storage as dds, datadict as dd
+from scipy.signal import savgol_filter
+from scipy.fftpack import dct, idct
+
+[docs]class fit_fluxsweep():
+ def __init__(self, Flux_filepath, save_filepath, name):
+ #setup files
+ self.name = name
+ self.datadict = dd.DataDict(
+ current = dict(unit='A'),
+
+ base_resonant_frequency = dict(axes = ['current']),
+ base_Qint = dict(axes = ['current']),
+ base_Qext = dict(axes = ['current']),
+
+ base_resonant_frequency_error = dict(axes = ['current']),
+ base_Qint_error = dict(axes = ['current']),
+ base_Qext_error = dict(axes = ['current']),
+ )
+ self.datadir = save_filepath
+ self.writer = dds.DDH5Writer(self.datadir, self.datadict, name=self.name)
+ self.writer.__enter__()
+
+ #Duffing/FS Data Extraction
+ duff_dicts = all_datadicts_from_hdf5(Flux_filepath)
+ duffDict = duff_dicts['data']
+ uvphDict = duffDict.extract('phase')
+ uvpoDict = duffDict.extract('power')
+
+
+ #get the arrays back out
+ self.undriven_vna_phase = uvphDict.data_vals('phase')
+ self.undriven_vna_power = uvpoDict.data_vals('power')
+
+ self.vna_freqs = uvphDict.data_vals('frequency')*2*np.pi
+ self.currents = uvphDict.data_vals('current')
+
+[docs] def default_bounds(self, QextGuess, QintGuess, f0Guess, magBackGuess):
+ return ([QextGuess / 1.5, QintGuess / 1.5, f0Guess/2, magBackGuess / 5.0, -2 * np.pi],
+ [QextGuess * 1.5, QintGuess +200, f0Guess*1.5, magBackGuess * 5.0, 2 * np.pi])
+
+[docs] def initial_fit(self, f0Guess, QextGuess = 50, QintGuess = 300, magBackGuess = 0.0001, bounds = None, smooth = False, smooth_win = 11, phaseOffGuess = 0, debug = False, adaptive_window = False, adapt_win_size = 300e6, start_current_index = 0):
+ f0Guess = f0Guess*2*np.pi
+ if bounds == None:
+ bounds=self.default_bounds(QextGuess, QintGuess, f0Guess, magBackGuess)
+
+ filt = (self.currents == np.unique(self.currents)[start_current_index])
+
+ if adaptive_window:
+ filt1 = self.vna_freqs < f0Guess + adapt_win_size*2*np.pi/2
+ filt2 = self.vna_freqs > f0Guess - adapt_win_size*2*np.pi/2
+ filt = filt*filt1*filt2
+
+ init_vna_freqs = np.unique(self.vna_freqs[filt])
+ init_phase_trace = self.undriven_vna_phase[filt]
+ init_pow_trace = self.undriven_vna_power[filt]
+
+ if debug:
+ plt.figure(1)
+ plt.plot(init_vna_freqs/(2*np.pi), init_phase_trace)
+ plt.title("Debug1: phase")
+ plt.figure(2)
+ plt.plot(init_vna_freqs/(2*np.pi), init_pow_trace)
+ plt.title("Debug1: power")
+
+ if smooth:
+ init_phase_trace = savgol_filter(init_phase_trace, smooth_win, 3)
+ init_pow_trace = savgol_filter(init_pow_trace, smooth_win, 3)
+
+ lin = 10**(init_pow_trace/20)
+
+ imag = lin * np.sin(init_phase_trace)
+ real = lin * np.cos(init_phase_trace)
+
+ popt, pconv = fit(init_vna_freqs, real, imag, init_pow_trace, init_phase_trace, Qguess = (QextGuess,QintGuess), f0Guess = f0Guess, real_only = 0, bounds = bounds, magBackGuess = magBackGuess, phaseGuess = phaseOffGuess)
+
+ print(f'f (Hz): {np.round(popt[2]/2/np.pi, 3)}', )
+ fitting_params = list(inspect.signature(reflectionFunc).parameters.keys())[1:]
+ for i in range(2):
+ print(f'{fitting_params[i]}: {np.round(popt[i], 2)} +- {np.round(np.sqrt(pconv[i, i]), 3)}')
+ Qtot = popt[0] * popt[1] / (popt[0] + popt[1])
+ print('Q_tot: ', round(Qtot), '\nT1 (s):', round(Qtot/popt[2]), f"Kappa: {round(popt[2]/2/np.pi/Qtot)}", )
+
+ self.initial_popt = popt
+ self.initial_pconv = pconv
+
+ plotRes(init_vna_freqs, real, imag, init_pow_trace, init_phase_trace, popt)
+
+[docs] def save_fit(self, current, base_popt, base_pconv):
+ self.writer.add_data(
+ current = current,
+
+ base_resonant_frequency = base_popt[2]/(2*np.pi),
+ base_Qint = base_popt[1],
+ base_Qext = base_popt[0],
+
+ base_resonant_frequency_error = np.sqrt(base_pconv[2, 2])/(2*np.pi),
+ base_Qint_error = np.sqrt(base_pconv[1, 1]),
+ base_Qext_error = np.sqrt(base_pconv[0, 0]),
+ )
+[docs] def semiauto_fit(self, bias_currents, vna_freqs, vna_mags, vna_phases, popt, debug = False, savedata = False, smooth = False, smooth_win = 11, adaptive_window = False, adapt_win_size = 300e6, fourier_filter = False, fourier_cutoff = 40, pconv_tol = 2):
+ res_freqs = np.zeros(np.size(np.unique(bias_currents)))
+ Qints = np.zeros(np.size(np.unique(bias_currents)))
+ Qexts = np.zeros(np.size(np.unique(bias_currents)))
+ magBacks = np.zeros(np.size(np.unique(bias_currents)))
+
+ init_f0 = popt[2]
+ init_Qint = popt[1]
+ init_Qext = popt[0]
+ init_magBack = popt[3]
+ for i, current in enumerate(np.unique(bias_currents)):
+ first_condn = bias_currents == current
+ [first_trace_freqs, first_trace_phase, first_trace_mag] = [vna_freqs[first_condn]*2*np.pi, vna_phases[first_condn], 10**(vna_mags[first_condn]/20)]
+ if smooth:
+ first_trace_phase = savgol_filter(first_trace_phase, smooth_win, 3)
+ first_trace_mag = savgol_filter(first_trace_mag, smooth_win, 3)
+
+ imag = first_trace_mag * np.sin(first_trace_phase)
+ real = first_trace_mag * np.cos(first_trace_phase)
+ if fourier_filter == True:
+ if debug:
+ plt.figure(3)
+ plt.plot(first_trace_freqs, real)
+ plt.plot(first_trace_freqs, imag)
+ plt.title('before filter')
+ imag = idct(dct(imag)[fourier_cutoff:])
+ real = idct(dct(real)[fourier_cutoff:])
+ if debug:
+ plt.figure(4)
+ plt.plot(real)
+ plt.plot(imag)
+ plt.title('after filter')
+ if i >= 2:
+ if adaptive_window:
+ filt1 = first_trace_freqs<np.average(res_freqs[i-1:i])*2*np.pi+adapt_win_size*2*np.pi/2
+ filt2 = first_trace_freqs>np.average(res_freqs[i-1:i])*2*np.pi-adapt_win_size*2*np.pi/2
+ filt= filt1*filt2
+ else:
+ filt = np.ones(np.size(first_trace_freqs)).astype(bool)
+ #start averaging the previous fits for prior information to increase robustness to bad fits
+ f0Guess = np.average(res_freqs[i-1:i])*2*np.pi
+ magBackGuess = np.average(magBacks[i-1:i])
+ (QextGuess,QintGuess) = (np.average(Qexts[i-1:i]),np.average(Qints[i-1:i]))
+ else:
+ f0Guess = init_f0
+ magBackGuess = init_magBack
+ (QextGuess, QintGuess) = (init_Qext, init_Qint)
+ filt = np.ones(np.size(first_trace_freqs)).astype(bool)
+
+ bounds=self.default_bounds(QextGuess, QintGuess, f0Guess, magBackGuess)
+
+ if i>2:
+ prev_pconv = self.initial_pconv
+ #fit(freq, real, imag, mag, phase, Qguess=(2e3, 1e3),real_only = 0, bounds = None)
+ popt, pconv = fit(first_trace_freqs[filt], real[filt], imag[filt], first_trace_mag, first_trace_phase, Qguess = (QextGuess,QintGuess), f0Guess = f0Guess, real_only = 0, bounds = bounds, magBackGuess = magBackGuess)
+
+ #catch a sudden change in convergence and try again until it's back in range:
+ if i>2:
+ pconv_diff_ratio = (np.array(pconv[0,0], pconv[1,1])-np.array(prev_pconv[0,0], prev_pconv[1,1]))/np.array(prev_pconv[0,0], prev_pconv[1,1])
+ if debug:
+ print(f"Pconv ratio: {pconv_diff_ratio}")
+ j = 0
+ alt_array = np.array([1e6,-1e6,5e6,-5e6, 10e6,-10e6,15e6,-15e6, 20e6, -20e6, 30e6, -30e6])*2*np.pi
+ while np.any(np.abs(pconv_diff_ratio)>pconv_tol):
+ if j>11:
+ raise Exception("No good fit at this point")
+ print(f"sudden change in Q detected (pconv_diff_ratio: {pconv_diff_ratio}), trying resonant guess + {alt_array[j]/(2*np.pi)}")
+ #try above
+ if debug:
+ if j%2 ==0:
+ print("trying above")
+ else:
+ print("trying_below")
+ popt, pconv = fit(first_trace_freqs[filt], real[filt], imag[filt], first_trace_mag, first_trace_phase, Qguess = (QextGuess,QintGuess), f0Guess = f0Guess+alt_array[j], real_only = 0, bounds = bounds, magBackGuess = magBackGuess)
+
+ pconv_diff_ratio = (np.array(pconv[0,0], pconv[1,1])-np.array(prev_pconv[0,0], prev_pconv[1,1]))/np.array(prev_pconv[0,0], prev_pconv[1,1])
+ j+=1
+
+
+ if debug:
+ import time
+ plotRes(first_trace_freqs[filt], real[filt], imag[filt], first_trace_mag[filt], first_trace_phase[filt], popt)
+ time.sleep(1)
+
+ res_freqs[i] = popt[2]/(2*np.pi)
+ Qints[i] = popt[1]
+ Qexts[i] = popt[0]
+ magBacks[i] = popt[3]
+
+ print(f'f (Hz): {np.round(popt[2]/2/np.pi, 3)}', )
+ fitting_params = list(inspect.signature(reflectionFunc).parameters.keys())[1:]
+ for i in range(2):
+ print(f'{fitting_params[i]}: {np.round(popt[i], 2)} +- {np.round(np.sqrt(pconv[i, i]), 3)}')
+ Qtot = popt[0] * popt[1] / (popt[0] + popt[1])
+ print('Q_tot: ', round(Qtot), '\nT1 (s):', round(Qtot/popt[2]), f"Kappa: {round(popt[2]/2/np.pi/Qtot)}", )
+ if savedata:
+ self.save_fit(current, popt, pconv)
+
+ return np.unique(bias_currents), res_freqs, Qints, Qexts, magBacks
+
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Nov 12 10:56:06 2020
+
+@author: Ryan Kaufman
+
+purpose: Contain general helper functions that are useful for all instruments
+"""
+# from measurement_modules import Controller_Module as CM
+import numpy as np
+import os
+
+[docs]def adjust(Param1, stepsize1):
+ in_string = ""
+ while in_string.lower() != "q" :
+ #have to use "q" to stop, can use left and right brackets to adjust stepsize up/down
+ in_string = input(f"{Param1.name}: a for down, d for up (stepsize: {stepsize1})\nEnter to submit, q to quit")
+ if in_string == "a":
+ Param1(Param1()-stepsize1)
+ print(f"{Param1.name}: {Param1()}")
+ if in_string == "d":
+ Param1(Param1()+stepsize1)
+ print(f"{Param1.name}: {Param1()}")
+ if in_string == "q":
+ break
+
+[docs]def adjust_2(Params, stepsizes):
+ [Param1, Param2] = Params
+ [stepsize1, stepsize2] = stepsizes
+ in_string = ''
+ while in_string.lower() != "q" :
+ #have to use "q" to stop, can use left and right brackets to adjust stepsize up/down
+ in_string = input(f"{Param1.name}: a for down, d for up\n{Param2.name}: [ for down, ] for up \nEnter to submit, q to quit\n")
+ if in_string == "a":
+ Param1(Param1()-stepsize1)
+ print(f"{Param1.name}: {Param1()}")
+ if in_string == "d":
+ Param1(Param1()+stepsize1)
+ print(f"{Param1.name}: {Param1()}")
+ if in_string == "[":
+ Param2(Param2()-stepsize2)
+ print(f"{Param2.name}: {Param2()}")
+ if in_string == "]":
+ Param2(Param2()+stepsize2)
+ print(f"{Param2.name}: {Param2()}")
+ if in_string == "q":
+ break
+
+
+
+[docs]def shift_array_relative_to_middle(array, div_factor = 1e6):
+ return (array-array[int(len(array)/2)])/div_factor
+
+
+
+[docs]def log_normalize_to_row(x, y, arr, y_norm_val = None):
+
+ '''
+ Takes in the inputs to a pcolor plot and normalizes each jth column in the 2d array to (i,j) value
+ '''
+ def normalize(arr, i):
+ norm_row = arr[i]
+ normed_arr = []
+ for i, row in enumerate(arr):
+ # if i == 0: print(row)
+ # print(f'{i}th row: {row}')
+ normed_arr.append(row-norm_row)
+ # print(normed_arr)
+ return np.array(normed_arr)
+
+ if y_norm_val == None:
+ return normalize(arr,0)
+ else:
+ array_norm_row_index = np.where(np.isclose(y, y_norm_val, atol = 0.05))[0][0]
+ return normalize(arr, array_norm_row_index)
+[docs]def log_normalize_up_to_row(x, y, arr, y_norm_val = None):
+
+ '''
+ Takes in the inputs to a pcolor plot and normalizes each jth column in the 2d array to (i,j) value
+ '''
+ def normalize(arr, norm_row):
+ normed_arr = []
+ for i, row in enumerate(arr):
+ if i == 0: print(row)
+ # print(f'{i}th row: {row}')
+ normed_arr.append(row-norm_row)
+ # print(normed_arr)
+ return np.array(normed_arr)
+
+ if y_norm_val == None:
+ return normalize(arr,0)
+ else:
+ array_norm_row_index = np.where(np.isclose(y, y_norm_val, atol = 0.05))[0][0]
+ norm_block = arr[0:array_norm_row_index]
+ norm_arr = np.average(norm_block, axis = 0)
+ return normalize(arr, norm_arr)
+
+[docs]def select_closest_to_target(x_arr, arr, arr_target_val):
+ best_idx = np.argmin(np.abs(arr_target_val-np.array(arr)))
+ return x_arr[best_idx]
+
+[docs]def find_all_ddh5(cwd):
+ dirs = os.listdir(cwd)
+ filepaths = []
+ for path in dirs:
+ try:
+ subs = os.listdir(cwd+'\\'+path)
+ for sub in subs:
+ print(sub)
+ if sub.split('.')[-1] == 'ddh5':
+ filepaths.append(cwd+'\\'+path+'\\'+sub)
+ else:
+ for subsub in os.listdir(cwd+'\\'+path+'\\'+sub):
+ if subsub.split('.')[-1] == 'ddh5':
+ filepaths.append(cwd+'\\'+path+'\\'+sub+'\\'+subsub)
+ except: #usually because the files are one directory higher than you'd expect
+ if path.split('.')[-1] == 'ddh5':
+ filepaths.append(cwd+'\\'+path)
+ return filepaths
+
+[docs]def load_instrument(inst_class, *args, **kwargs):
+ try:
+ inst = inst_class(*args, **kwargs)
+ return inst
+ except AttributeError:
+ pass
+
+[docs]def freq_differences(freq_dict):
+ diff_dict = {}
+ sum_dict = {}
+ for i, (k1, f1) in enumerate(freq_dict.items()):
+ for j, (k2, f2) in enumerate(freq_dict.items()):
+ if j>i:
+ print(f"({k1} + {k2}): {f1+f2}")
+ sum_dict[f"({k1}+{k2})"] = f1+f2
+
+ print(f"({k1} - {k2}): {np.abs(f1-f2)}")
+ diff_dict[f"({k1}-{k2})"] = np.abs(f1-f2)
+
+ return sum_dict, diff_dict
+
+[docs]def check_if_close(freq_dict, threshold = 0.5): #threshold and freqs in GHz
+ for i, (k1, f1) in enumerate(freq_dict.items()):
+ for j, (k2, f2) in enumerate(freq_dict.items()):
+ if i>j and np.abs(f2-f1)<threshold:
+ print(f"{k2} - {k1} is too close: {np.abs(f1-f2)}")
+
+
+
+
+
+
+
+# -*- coding: utf-8 -*-
+"""
+Spyder Editor
+
+This is a temporary script file.
+"""
+
+#bring in actual SNAIL data
+from plottr.data.datadict_storage import all_datadicts_from_hdf5
+import matplotlib.pyplot as plt
+import numpy as np
+import h5py
+import matplotlib.colors as color
+from matplotlib.widgets import Slider, TextBox
+import time
+import pickle
+
+
+#%% Use slider to select modulation curve
+[docs]def get_fs_data(path):
+ datadict = all_datadicts_from_hdf5(path)['data']
+ currents = datadict.extract('phase').data_vals('current')
+ freqs = datadict.extract('phase').data_vals('frequency')
+ phases = datadict.extract('phase').data_vals('phase')
+ mags = datadict.extract('power').data_vals('power')
+ return currents, freqs, mags, phases
+[docs]def convert_to_2D(filt_arr, to_be_2d):
+ d2_arr = []
+ for val in np.unique(filt_arr):
+ d2_arr.append(to_be_2d[filt_arr == val])
+ return np.array(d2_arr)
+[docs]def slider_fit(fs_filepath, fit_filepath, quanta_start, quanta_size, p_arr, alpha_arr, start_freq = 7e9):
+ currents, freqs, mags, phases = get_fs_data(fs_filepath)
+
+ snail_freqs_fits = pickle.load(open(fit_filepath, "rb"))
+
+ #reformat the data into a 2d array by iterating through current values
+ phases_2D = convert_to_2D(currents, phases)
+ mags_2D = convert_to_2D(currents, mags)
+ ind_vars, dep_vars = [np.unique(currents), np.unique(freqs)], [mags_2D, phases_2D]
+
+ fig, ax = plt.subplots()
+ plt.subplots_adjust(left=0.3, bottom=0.4)
+ startpoint = quanta_start
+ stoppoint = startpoint+quanta_size
+ trimleft = np.where(np.unique(currents)>startpoint)[0][0]
+ trimright = np.where(np.unique(currents)<stoppoint)[0][-1]
+
+ dep_var_trimmed = dep_vars[1][trimleft:trimright]
+ ind_vars_trimmed = np.copy(ind_vars)
+ ind_vars_trimmed[0] = ind_vars[0][trimleft:trimright]
+ colors = [color.hex2color('#0000FF'), color.hex2color('#FFFFFF'), color.hex2color('#FF0000')]
+ _cmap = color.LinearSegmentedColormap.from_list('my_cmap', colors)
+ adj = 0
+ graph = dep_var_trimmed.T
+ low = np.min(dep_var_trimmed)
+ high = np.max(dep_var_trimmed)
+
+ _norm = color.Normalize(vmin = low, vmax = high)
+
+ scale_factor = np.pi*2/(np.max(ind_vars_trimmed[0])-np.min(ind_vars_trimmed[0]))
+ graph_x = ind_vars_trimmed[0]*scale_factor
+
+ dplot = plt.imshow(graph, alpha = 0.5, extent = [0, 2*np.pi, ind_vars_trimmed[1][0], ind_vars_trimmed[1][-1]], aspect = 'auto', origin = 'lower', norm = _norm, cmap = _cmap)
+ fplot, = plt.plot(np.linspace(0,2*np.pi, 51), snail_freqs_fits[0][0])
+ ax.margins(x=0)
+
+ def update(val):
+ p = int(sp.val)
+ alpha = int(salpha.val)
+ start_freq = sfreq.val
+ # x = np.where(p_arr == p)[0][0]
+ # y = np.where(alpha_arr == alpha)[0][0]
+ # print("p: "+str(p_arr[p])+" Alpha: "+str(alpha_arr[alpha]))
+ scale_factor = start_freq/snail_freqs_fits[p][alpha][0]
+ fplot.set_ydata(snail_freqs_fits[p][alpha]*scale_factor)
+
+ fig.canvas.draw_idle()
+
+ def submit(text):
+ center = float(text)
+ x_adj = center-np.pi
+ fplot.set_xdata(phi+x_adj)
+ fig.canvas.draw_idle()
+
+ axp = plt.axes([0.25, 0.1, 0.65, 0.03])
+ axalpha = plt.axes([0.25, 0.15, 0.65, 0.03])
+ axbox = plt.axes([0.25, 0.2, 0.8, 0.075])
+ axfreq = plt.axes([0.25, 0.05,0.65, 0.03])
+
+ text_box = TextBox(axbox, 'Center', initial="3.1415")
+ text_box.on_submit(submit)
+
+ sp = Slider(axp, 'P index',0, np.size(p_arr)-1, valinit=0, valstep=1)
+ salpha = Slider(axalpha, 'Alpha index', 0, np.size(alpha_arr)-1, valinit=0, valstep=1)
+ sfreq = Slider(axfreq, 'start frequency', start_freq-3e9, start_freq+3e9, valinit=start_freq)
+
+ sp.on_changed(update)
+ salpha.on_changed(update)
+ sfreq.on_changed(update)
+
+ return sp, salpha, sfreq
+
+if __name__ == '__main__':
+ fs_filepath= r"\\136.142.53.51\data002\Texas\Cooldown_20210408\SA_C1_FS\2021-05-04_0005_C1_FS6_very_wide_fine.ddh5"
+ # load pickled fit data
+ fit_filepath = r"C:\Users\Hatlab_3\Desktop\RK_Scripts\NewRepos\data_processing\data_processing\models\SNAIL_supporting_modules\SNAIL_detailed.p"
+ p_arr = np.linspace(0.01, 0.3, 50)
+ alpha_arr = np.linspace(0.1, 0.32, 50)
+ p_slider, a_slider, freq_slider = slider_fit(fs_filepath, fit_filepath, -8.98e-5, 220.6e-6, p_arr, alpha_arr)
+
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Apr 29 14:18:59 2021
+
+@author: Ryan Kaufman - Hatlab
+"""
+from plottr.apps.autoplot import main
+from plottr.data import datadict_storage as dds, datadict as dd
+from data_processing.signal_processing import Pulse_Processing_utils as PU
+import numpy as np
+import matplotlib.pyplot as plt
+from mpl_toolkits.axes_grid1 import make_axes_locatable
+import warnings
+warnings.filterwarnings("ignore")
+import os
+[docs]def find_all_ddh5(cwd):
+ dirs = os.listdir(cwd)
+ filepaths = []
+ for path in dirs:
+ try:
+ subs = os.listdir(cwd+'\\'+path)
+ for sub in subs:
+ print(sub)
+ if sub.split('.')[-1] == 'ddh5':
+ filepaths.append(cwd+'\\'+path+'\\'+sub)
+ else:
+ for subsub in os.listdir(cwd+'\\'+path+'\\'+sub):
+ if subsub.split('.')[-1] == 'ddh5':
+ filepaths.append(cwd+'\\'+path+'\\'+sub+'\\'+subsub)
+ except: #usually because the files are one directory higher than you'd expect
+ if path.split('.')[-1] == 'ddh5':
+ filepaths.append(cwd+'\\'+path)
+ return filepaths
+
+#%%sample one file to check things
+
+if __name__ == 'main':
+
+ IQ_offset = np.array((0,0))
+ # records_per_pulsetype = 3870
+ cf = 6171427180.18
+ # amp_off_filepath = r'Z:/Data/C1/C1_Hakan/Gain_pt_0.103mA/Pump_power_sweeps/1/2021-06-30/2021-06-30_0011_LO_6152798714.0_pwr_-8.69_amp_1_rotation_phase_2.094/2021-06-30_0011_LO_6152798714.0_pwr_-8.69_amp_1_rotation_phase_2.094.ddh5'
+ # amp_off_filepath = r'Z:/Data/C1/C1_Hakan/Gain_pt_0.103mA/signal_power_sweeps/1_initial_guess/2021-07-06/2021-07-06_0003_Amp_0__LO_freq_6153298714.0_Hz_Sig_Volt_0.0_V_Phase_0.0_rad_/2021-07-06_0003_Amp_0__LO_freq_6153298714.0_Hz_Sig_Volt_0.0_V_Phase_0.0_rad_.ddh5'
+
+ # filepath = r'G:/My Drive/shared/Amplifier_Response_Data/Data/Pump_pwr_detuning_sweeps/2021-07-07/2021-07-07_0409_Amp_1__pwr_-8.83_dBm_LO_freq_6172127180.18_Hz_Phase_0.0_rad_/2021-07-07_0409_Amp_1__pwr_-8.83_dBm_LO_freq_6172127180.18_Hz_Phase_0.0_rad_.ddh5'
+
+ # filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep/2021-09-14/2021-09-14_0002_3_state_40dB_att_Amp_0__pwr_-7.0_dBm_Rep_1__/2021-09-14_0002_3_state_40dB_att_Amp_0__pwr_-7.0_dBm_Rep_1__.ddh5'
+ filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep/2021-09-14/2021-09-14_0027_3_state_40dB_att_Amp_1__pwr_-7.0_dBm_Rep_1__/2021-09-14_0027_3_state_40dB_att_Amp_1__pwr_-7.0_dBm_Rep_1__.ddh5'
+ # filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep/2021-09-14/2021-09-14_0034_3_state_40dB_att_Amp_1__pwr_-6.75_dBm_Rep_3__/2021-09-14_0034_3_state_40dB_att_Amp_1__pwr_-6.75_dBm_Rep_3__.ddh5'
+ # filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep/2021-09-14/2021-09-14_0038_3_state_40dB_att_Amp_1__pwr_-6.5_dBm_Rep_2__/2021-09-14_0038_3_state_40dB_att_Amp_1__pwr_-6.5_dBm_Rep_2__.ddh5'
+ # filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep/2021-09-14/2021-09-14_0044_3_state_40dB_att_Amp_1__pwr_-6.25_dBm_Rep_3__/2021-09-14_0044_3_state_40dB_att_Amp_1__pwr_-6.25_dBm_Rep_3__.ddh5'
+ # filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep/2021-09-14/2021-09-14_0046_3_state_40dB_att_Amp_1__pwr_-6.0_dBm_Rep_0__/2021-09-14_0046_3_state_40dB_att_Amp_1__pwr_-6.0_dBm_Rep_0__.ddh5'
+
+ # filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep_+500kHz/2021-09-14/2021-09-14_0001_3_state_40dB_att_Amp_0__pwr_-7.0_dBm_Rep_0__/2021-09-14_0001_3_state_40dB_att_Amp_0__pwr_-7.0_dBm_Rep_0__.ddh5'
+ # filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep_+500kHz/2021-09-14/2021-09-14_0028_3_state_40dB_att_Amp_1__pwr_-7.0_dBm_Rep_2__/2021-09-14_0028_3_state_40dB_att_Amp_1__pwr_-7.0_dBm_Rep_2__.ddh5'
+ # filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep_+500kHz/2021-09-14/2021-09-14_0031_3_state_40dB_att_Amp_1__pwr_-6.75_dBm_Rep_0__/2021-09-14_0031_3_state_40dB_att_Amp_1__pwr_-6.75_dBm_Rep_0__.ddh5'
+ # filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep_+500kHz/2021-09-14/2021-09-14_0036_3_state_40dB_att_Amp_1__pwr_-6.5_dBm_Rep_0__/2021-09-14_0036_3_state_40dB_att_Amp_1__pwr_-6.5_dBm_Rep_0__.ddh5'
+ # filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep_+500kHz/2021-09-14/2021-09-14_0042_3_state_40dB_att_Amp_1__pwr_-6.25_dBm_Rep_1__/2021-09-14_0042_3_state_40dB_att_Amp_1__pwr_-6.25_dBm_Rep_1__.ddh5'
+ # filepath = r'Z:/Data/Hakan/SH_5B1_SS_Gain_6.064GHz/3_state/gen_pwr_sweep_+500kHz/2021-09-14/2021-09-14_0047_3_state_40dB_att_Amp_1__pwr_-6.0_dBm_Rep_1__/2021-09-14_0047_3_state_40dB_att_Amp_1__pwr_-6.0_dBm_Rep_1__.ddh5'
+
+ #loopback for sat_discriminator
+ # filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\loopbacks\2021-09-30\2021-09-30_0009_3_state_loopback_0dB_att_Rep_0__\2021-09-30_0009_3_state_loopback_0dB_att_Rep_0__.ddh5'
+ # filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-09-30\2021-09-30_0002_3_state_deep_sat_40dB_att_Rep_0__\2021-09-30_0002_3_state_deep_sat_40dB_att_Rep_0__.ddh5'
+
+ #in order of increasing power
+ # filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-09-30\2021-09-30_0003_3_state_deep_sat_40dB_att_Sig_Volt_0.2_V_Rep_0__\2021-09-30_0003_3_state_deep_sat_40dB_att_Sig_Volt_0.2_V_Rep_0__.ddh5'
+ # filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-09-30\2021-09-30_0004_3_state_deep_sat_40dB_att_Sig_Volt_0.25_V_Rep_0__\2021-09-30_0004_3_state_deep_sat_40dB_att_Sig_Volt_0.25_V_Rep_0__.ddh5'
+ # filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-09-30\2021-09-30_0005_3_state_deep_sat_40dB_att_Sig_Volt_0.3_V_Rep_0__\2021-09-30_0005_3_state_deep_sat_40dB_att_Sig_Volt_0.3_V_Rep_0__.ddh5'
+ filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-09-30\2021-09-30_0011_3_state_deep_sat_40dB_att_Sig_Volt_0.6_V_Rep_0__\2021-09-30_0011_3_state_deep_sat_40dB_att_Sig_Volt_0.6_V_Rep_0__.ddh5'
+
+ #longer time:
+ # filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-09-30\2021-09-30_0017_3_state_deep_sat_40dB_att_8us_time_Rep_0__\2021-09-30_0017_3_state_deep_sat_40dB_att_8us_time_Rep_0__.ddh5'
+ filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-09-30\2021-09-30_0030_3_state_deep_sat_40dB_att_8us_time_Rep_4__\2021-09-30_0030_3_state_deep_sat_40dB_att_8us_time_Rep_4__.ddh5'
+
+ #SWEEPING power
+ #0.55V
+ filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-10-01\2021-10-01_0076_3_state_deep_sat_40dB_att_2V_Sig_Volt_0.55_V_\2021-10-01_0076_3_state_deep_sat_40dB_att_2V_Sig_Volt_0.55_V_.ddh5'
+
+ #0.6V
+ filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-10-01\2021-10-01_0077_3_state_deep_sat_40dB_att_2V_Sig_Volt_0.6_V_\2021-10-01_0077_3_state_deep_sat_40dB_att_2V_Sig_Volt_0.6_V_.ddh5'
+
+ #0.95V
+ filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-10-01\2021-10-01_0084_3_state_deep_sat_40dB_att_2V_Sig_Volt_0.95_V_\2021-10-01_0084_3_state_deep_sat_40dB_att_2V_Sig_Volt_0.95_V_.ddh5'
+
+ #1.1V
+ filepath = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-10-01\2021-10-01_0088_3_state_deep_sat_40dB_att_2V_Sig_Volt_1.15_V_\2021-10-01_0088_3_state_deep_sat_40dB_att_2V_Sig_Volt_1.15_V_.ddh5'
+
+ #WTF Trigger?
+
+ import easygui
+ filepath = easygui.fileopenbox(default = r'Z:\Data\Hakan\SH_5B1_SS_Gain_6.064GHz\3_state\saturation_discriminator\2021-10-01\*')
+
+ # PU.get_normalizing_voltage_from_filepath(amp_off_filepath, plot = False, hist_scale = 0.01, records_per_pulsetype = 3870*2)
+ # IQ_offset = PU.get_IQ_offset_from_filepath(filepath, plot = False, hist_scale = 0.002, records_per_pulsetype = 3840*2)
+ # PU.get_fidelity_from_filepath_3_state(filepath, plot = True, hist_scale = 0.05, records_per_pulsetype = 2562, state_relabel = 0, bin_start = 50, bin_stop = 400)
+ PU.get_fidelity_from_filepath_3_state(filepath, plot = True, hist_scale = 0.05, records_per_pulsetype = 7686//3, state_relabel = 0, bin_start = 50, bin_stop = 400)
+ IQ_offset = (0,0)
+
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Apr 27 10:17:43 2021
+
+@author: Hatlab_3
+"""
+import numpy as np
+import time
+
+import matplotlib.pyplot as plt
+
+[docs]def demod(signal_data, reference_data, mod_freq = 50e6, sampling_rate = 1e9):
+ '''
+ #TODO:
+ Parameters
+ ----------
+ signal_data : np 1D array - float64
+ signal datapoints
+ reference_data : np 1D array - float64
+ reference datapoints
+ mod_freq : float64, optional
+ Modulation frequency in Hz. The default is 50e6.
+ sampling_rate : float64, optional
+ sampling rate in samples per second. The default is 1e9.
+
+ Returns
+ -------
+ sig_I_summed : np 1D array - float64
+ Signal multiplied by Sine and integrated over each period.
+ sig_Q_summed : np 1D array - float64
+ Signal multiplied by Cosine and integrated over each period.
+ ref_I_summed : np 1D array - float64
+ reference multiplied by sine and integrated over each period.
+ ref_Q_summed : np 1D array - float64
+ Reference multiplied by Cosine and integrated over each period.
+ '''
+
+ '''first pad the arrays to get a multiple of the number of samples in a
+ demodulation period, this will make the last record technically inaccurate
+ but there are thousands being
+ averaged so who cares
+ '''
+
+ #first demodulate both channels
+ # print("Signal Data Shape: ",np.shape(signal_data))
+ # print("Reference Data Shape: ",np.shape(reference_data))
+ period = int(sampling_rate/mod_freq)
+ signal_data = np.pad(signal_data, (0,int(period-np.size(signal_data)%period)))
+ reference_data = np.pad(reference_data, (0,int(period-np.size(reference_data)%period)))
+ # print("Signal Data Shape: ",np.shape(signal_data))
+ # print("Reference Data Shape: ",np.shape(reference_data))
+ point_number = np.arange(np.size(reference_data))
+ # print('Modulation period: ', period)
+ SinArray = np.sin(2*np.pi/period*point_number)
+ CosArray = np.cos(2*np.pi/period*point_number)
+
+ sig_I = signal_data*SinArray
+ sig_Q = signal_data*CosArray
+ ref_I = reference_data*SinArray
+ ref_Q = reference_data*CosArray
+
+ #now you cut the array up into periods of the sin and cosine modulation, then sum within one period
+ #the sqrt 2 is the RMS value of sin and cosine squared, period is to get rid of units of time
+
+ sig_I_summed = np.sum(sig_I.reshape(np.size(sig_I)//period, period), axis = 1)*(np.sqrt(2)/period)
+ sig_Q_summed = np.sum(sig_Q.reshape(np.size(sig_I)//period, period), axis = 1)*(np.sqrt(2)/period)
+ ref_I_summed = np.sum(ref_I.reshape(np.size(sig_I)//period, period), axis = 1)*(np.sqrt(2)/period)
+ ref_Q_summed = np.sum(ref_Q.reshape(np.size(sig_I)//period, period), axis = 1)*(np.sqrt(2)/period)
+
+ return (sig_I_summed, sig_Q_summed, ref_I_summed, ref_Q_summed)
+
+[docs]def phase_correction(sigI, sigQ, refI, refQ):
+ '''
+ Parameters
+ ----------
+ sigI : np 2D array - (records,samples) float64
+ demodulated signal - In-phase
+ sigQ : np 2D array - (records,samples) float64
+ demodulated signal - Quadrature phase
+ refI : np 2D array - (records,samples) float64
+ demodulated reference - In-phase
+ refQ : np 2D array - (records,samples) float64
+ demodulated reference - quadrature-phase
+
+ Note: reference and signal arrays must all be of the same length
+
+ Returns
+ -------
+ sigI_corrected : np 2D array - float64
+ Signal I rotated by reference phase averaged over each record
+ sigQ_corrected : np 2D array - float64
+ Signal Q rotated by reference phase averaged over each record
+
+ '''
+ sigI_corrected = np.zeros(np.shape(sigI))
+ sigQ_corrected = np.zeros(np.shape(sigQ))
+ rI_trace = np.zeros(np.shape(sigI)[0])
+ rQ_trace =np.zeros(np.shape(sigI)[0])
+ for i, (sI_rec, sQ_rec, rI_rec, rQ_rec) in enumerate(zip(sigI,sigQ,refI,refQ)):
+
+ rI_avg, rQ_avg = np.average(rI_rec), np.average(rQ_rec)
+
+ rI_trace[i], rQ_trace[i] = rI_avg, rQ_avg
+
+ Ref_mag = np.sum(np.sqrt(rI_avg**2 + rQ_avg**2))
+
+ sigI_corrected[i] = (sI_rec*rI_avg + sQ_rec*rQ_avg)/Ref_mag
+ sigQ_corrected[i] = (-sI_rec*rQ_avg + sQ_rec*rI_avg)/Ref_mag
+
+ return sigI_corrected, sigQ_corrected, rI_trace, rQ_trace
+
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Apr 29 09:40:12 2021
+
+@author: Ryan Kaufman
+
+Set up function module that can assist in loading pulse sequences into AWG
+and functionalizing Alazar acquiring
+"""
+import numpy as np
+from mpl_toolkits.axes_grid1 import make_axes_locatable
+from matplotlib.patches import Ellipse
+from scipy.optimize import curve_fit
+import matplotlib.pyplot as plt
+
+from plottr.data.datadict_storage import all_datadicts_from_hdf5
+
+
+
+
+[docs]def Process_One_Acquisition(name, sI_c1, sI_c2, sQ_c1 ,sQ_c2, bin_start, bin_stop, hist_scale = 200, odd_only = False, even_only = False, plot = False):
+ if plot:
+ fig = plt.figure(1, figsize = (12,8))
+ fig.suptitle(name, fontsize = 20)
+ ax1 = fig.add_subplot(221)
+ ax1.set_title("I")
+ ax1.plot(np.average(sI_c1, axis = 0), label = 'even records')
+ ax1.plot(np.average(sI_c2, axis = 0), label = 'odd_records')
+ # ax1.set_aspect(1)
+ ax1.legend(loc = 'upper right')
+ ax2 = fig.add_subplot(222)
+ ax2.set_title("Q")
+ ax2.plot(np.average(sQ_c1, axis = 0), label = 'even records')
+ ax2.plot(np.average(sQ_c2, axis = 0), label = 'odd records')
+ # ax2.set_aspect(1)
+ ax2.legend(loc = 'upper right')
+ ax3 = fig.add_subplot(223)
+ ax3.set_aspect(1)
+ ax3.plot(np.average(sI_c1, axis = 0), np.average(sQ_c1, axis = 0))
+ ax3.plot(np.average(sI_c2, axis = 0),np.average(sQ_c2, axis = 0))
+
+ #figure for difference trace
+ fig2 = plt.figure(2, figsize = (12,8))
+ ax21 = fig2.add_subplot(221)
+ ax21.set_title("I (even-odd records)")
+ ax21.plot(np.average(sI_c1-sI_c2, axis = 0), label = 'even-odd records')
+
+ # ax1.set_aspect(1)
+ ax22 = fig2.add_subplot(222)
+ ax22.set_title("Q (even-odd records)")
+ ax22.plot(np.average(sQ_c1-sQ_c2, axis = 0), label = 'even-odd records')
+
+ # ax2.set_aspect(1)
+ ax23 = fig2.add_subplot(223)
+ ax23.set_title("Trajectories")
+ ax23.set_aspect(1)
+ ax23.plot(np.average(sI_c1-sI_c2, axis = 0), np.average(sQ_c1-sQ_c2, axis = 0))
+
+
+ ax24 = fig2.add_subplot(224)
+ ax24.set_title("magnitudes")
+ ax24.plot(np.average(sI_c1-sI_c2, axis = 0)**2+np.average(sQ_c1-sQ_c2, axis = 0)**2, label = 'magnitude')
+ ax4 = fig.add_subplot(224)
+
+ fig2, ax99 = plt.subplots()
+ # print(np.shape(sI_c1))
+ bins_even, h_even = boxcar_histogram(fig2, ax99, bin_start, bin_stop, sI_c1, sQ_c1, Ioffset = 0, Qoffset = 0, scale = hist_scale)
+ bins_odd, h_odd = boxcar_histogram(fig2, ax99, bin_start, bin_stop, sI_c2, sQ_c2, Ioffset = 0, Qoffset = 0, scale = hist_scale)
+ plt.close(fig2)
+
+ if plot:
+ if even_only and not odd_only:
+ print('displaying only even')
+ boxcar_histogram(fig, ax4, bin_start, bin_stop, sI_c1, sQ_c1, Ioffset = 0, Qoffset = 0, scale = hist_scale)
+
+ elif odd_only and not even_only:
+ print('displaying only odd')
+ boxcar_histogram(fig, ax4, bin_start, bin_stop, sI_c2, sQ_c2, Ioffset = 0, Qoffset = 0, scale = hist_scale)
+ else:
+ print('displaying both')
+ boxcar_histogram(fig, ax4, bin_start, bin_stop, np.concatenate((sI_c1, sI_c2)), np.concatenate((sQ_c1, sQ_c2)), Ioffset = 0, Qoffset = 0, scale = hist_scale)
+ plt.show()
+ return bins_even, bins_odd, h_even.T, h_odd.T
+
+[docs]def Process_One_Acquisition_3_state(name, sI_c1, sI_c2, sI_c3, sQ_c1 ,sQ_c2, sQ_c3, bin_start, bin_stop, hist_scale = 200, odd_only = False, even_only = False, plot = False):
+ if plot:
+ fig = plt.figure(1, figsize = (12,8))
+ fig.suptitle(name, fontsize = 20)
+ ax1 = fig.add_subplot(221)
+ ax1.set_title("I")
+ ax1.plot(np.average(sI_c1, axis = 0), label = 'G_records')
+ ax1.plot(np.average(sI_c2, axis = 0), label = 'E_records')
+ ax1.plot(np.average(sI_c3, axis = 0), label = 'F_records')
+ # ax1.set_aspect(1)
+ ax1.legend(loc = 'upper right')
+ ax2 = fig.add_subplot(222)
+ ax2.set_title("Q")
+ ax2.plot(np.average(sQ_c1, axis = 0), label = 'G records')
+ ax2.plot(np.average(sQ_c2, axis = 0), label = 'E records')
+ ax2.plot(np.average(sQ_c3, axis = 0), label = 'F records')
+ # ax2.set_aspect(1)
+ ax2.legend(loc = 'upper right')
+ ax3 = fig.add_subplot(223)
+ ax3.set_aspect(1)
+ ax3.plot(np.average(sI_c1, axis = 0), np.average(sQ_c1, axis = 0))
+ ax3.plot(np.average(sI_c2, axis = 0),np.average(sQ_c2, axis = 0))
+ ax3.plot(np.average(sI_c3, axis = 0),np.average(sQ_c3, axis = 0))
+
+ # #figure for difference trace
+ # fig2 = plt.figure(2, figsize = (12,8))
+ # ax21 = fig2.add_subplot(221)
+ # ax21.set_title("I (even-odd records)")
+ # ax21.plot(np.average(sI_c1-sI_c2, axis = 0), label = 'even-odd records')
+
+ # # ax1.set_aspect(1)
+ # ax22 = fig2.add_subplot(222)
+ # ax22.set_title("Q (even-odd records)")
+ # ax22.plot(np.average(sQ_c1-sQ_c2, axis = 0), label = 'even-odd records')
+
+ # # ax2.set_aspect(1)
+ # ax23 = fig2.add_subplot(223)
+ # ax23.set_title("Trajectories")
+ # ax23.set_aspect(1)
+ # ax23.plot(np.average(sI_c1-sI_c2, axis = 0), np.average(sQ_c1-sQ_c2, axis = 0))
+
+
+ # ax24 = fig2.add_subplot(224)
+ # ax24.set_title("magnitudes")
+ # ax24.plot(np.average(sI_c1-sI_c2, axis = 0)**2+np.average(sQ_c1-sQ_c2, axis = 0)**2, label = 'magnitude')
+ ax4 = fig.add_subplot(224)
+
+ fig2, ax99 = plt.subplots()
+ # print(np.shape(sI_c1))
+ bins_G, h_G = boxcar_histogram(fig2, ax99, bin_start, bin_stop, sI_c1, sQ_c1, Ioffset = 0, Qoffset = 0, scale = hist_scale)
+ bins_E, h_E = boxcar_histogram(fig2, ax99, bin_start, bin_stop, sI_c2, sQ_c2, Ioffset = 0, Qoffset = 0, scale = hist_scale)
+ bins_F, h_F = boxcar_histogram(fig2, ax99, bin_start, bin_stop, sI_c3, sQ_c3, Ioffset = 0, Qoffset = 0, scale = hist_scale)
+ plt.close(fig2)
+
+ if plot:
+ if even_only and not odd_only:
+ print('displaying only even')
+ boxcar_histogram(fig, ax4, bin_start, bin_stop, sI_c1, sQ_c1, Ioffset = 0, Qoffset = 0, scale = hist_scale)
+
+ elif odd_only and not even_only:
+ print('displaying only odd')
+ boxcar_histogram(fig, ax4, bin_start, bin_stop, sI_c2, sQ_c2, Ioffset = 0, Qoffset = 0, scale = hist_scale)
+ else:
+ print('displaying both')
+ boxcar_histogram(fig, ax4, bin_start, bin_stop, np.concatenate((sI_c1, sI_c2, sI_c3)), np.concatenate((sQ_c1, sQ_c2, sQ_c3)), Ioffset = 0, Qoffset = 0, scale = hist_scale)
+ # plt.show()
+ return bins_G, bins_E, bins_F, h_G.T, h_E.T, h_F.T
+
+
+[docs]def boxcar_histogram(fig, ax, start_pt, stop_pt, sI, sQ, Ioffset = 0, Qoffset = 0, scale = 1, num_bins = 100):
+ I_bground = Ioffset
+ Q_bground = Qoffset
+ # print(I_bground, Q_bground)
+ I_pts = []
+ Q_pts = []
+ for I_row, Q_row in zip(sI, sQ):
+ I_pts.append(np.average(I_row[start_pt:stop_pt]-I_bground))
+ Q_pts.append(np.average(Q_row[start_pt:stop_pt]-Q_bground))
+ # plt.imshow(np.histogram2d(np.array(I_pts), np.array(Q_pts))[0])
+ divider = make_axes_locatable(ax)
+ ax.set_aspect(1)
+ bins = np.linspace(-1,1, num_bins)*scale
+ (h, xedges, yedges, im) = ax.hist2d(I_pts, Q_pts, bins = [bins, bins])
+ cax = divider.append_axes('right', size='5%', pad=0.05)
+ fig.colorbar(im, cax = cax, orientation = 'vertical')
+ # ax.hexbin(I_pts, Q_pts, extent = np.array([-1,1,-1,1])*scale)
+ # ax.set_xticks(np.array([-100,-75,-50,-25,0,25,50,75,100])*scale/100)
+ # ax.set_yticks(np.array([-100,-75,-50,-25,0,25,50,75,100])*scale/100)
+ ax.grid()
+
+ return bins, h
+
+[docs]def Gaussian_2D(M,amplitude, xo, yo, sigma_x, sigma_y, theta):
+ x, y = M
+ xo = float(xo)
+ yo = float(yo)
+ a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2)
+ b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2)
+ c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2)
+ g = amplitude*np.exp( - (a*((x-xo)**2) + 2*b*(x-xo)*(y-yo)
+ + c*((y-yo)**2)))
+ return g
+
+
+[docs]class Gaussian_info:
+ def __init__(self):
+ self.info_dict = {}
+[docs] def print_info(self):
+ for key, val in self.info_dict.items():
+ if key == 'popt':
+ pass
+ elif key == 'pcov':
+ pass
+ else:
+ print(key, ': ', val)
+
+ def __sub__(self, other_GC):
+ sub_class = Gaussian_info()
+ for key, val in self.info_dict.items():
+ # print(key, val)
+ if type(val) == np.float64:
+ sub_class.info_dict[key] = val - other_GC.info_dict[key]
+ else:
+ sub_class.info_dict[key] = None
+ return sub_class
+
+
+[docs] def plot_on_ax(self, ax, displacement = np.array([0,0]), color = 'white'):
+ ax.annotate("", xy=self.center_vec(), xytext=(0, 0), arrowprops=dict(arrowstyle = '->', lw = 3, color = color))
+
+[docs] def sigma_contour(self):
+ x0, y0 = self.center_vec()
+ sx = self.info_dict['sigma_x']
+ sy = self.info_dict['sigma_y']
+ angle = self.info_dict['theta']
+ return Ellipse((x0, y0), sx, sy, angle = angle/(2*np.pi)*360,
+ fill = False,
+ ls = '--',
+ color = 'red',
+ lw = 2)
+
+
+[docs]def fit_2D_Gaussian(name,
+ bins,
+ h_arr,
+ guessParams,
+ max_fev = 100,
+ contour_line = 3):
+
+ X, Y = np.meshgrid(bins[0:-1], bins[0:-1])
+ resh_size = np.shape(X)
+ xdata, ydata= np.vstack((X.ravel(), Y.ravel())), h_arr.ravel()
+ # print('xdata_shape: ', np.shape(xdata))
+ # print("y shape: ",np.shape(ydata))
+ print("running curve_fit")
+ #,amplitude, xo, yo, sigma_x, sigma_y, theta
+ bounds = [[0,np.min(bins), np.min(bins), 0, 0, 0],
+ [np.max(h_arr), np.max(bins), np.max(bins), np.max(bins), np.max(bins), np.pi/2]]
+ popt, pcov = curve_fit(Gaussian_2D, xdata, ydata, p0 = guessParams, maxfev = max_fev)
+ GC = Gaussian_info()
+ GC.info_dict['name'] = name
+ GC.info_dict['canvas'] = xdata
+ GC.info_dict['amplitude'] = popt[0]
+ GC.info_dict['x0'] = popt[1]
+ GC.info_dict['y0'] = popt[2]
+ GC.info_dict['sigma_x'] = np.abs(popt[3])
+ GC.info_dict['sigma_y'] = np.abs(popt[4])
+ GC.info_dict['theta'] = popt[5]
+ GC.info_dict['popt'] = popt
+ GC.info_dict['pcov'] = pcov
+ GC.info_dict['contour'] = get_contour_line(X, Y, Gaussian_2D(xdata, *popt).reshape(resh_size), contour_line = contour_line)
+
+ return GC
+
+[docs]def get_contour_line(cont_x, cont_y, contour_arr, contour_line = 3):
+ fig = plt.figure()
+ contour_map = plt.contour(cont_x, cont_y, contour_arr)
+ plt.close(fig)
+ v = contour_map.collections[contour_line].get_paths()[0].vertices
+ plot_y, plot_x = v[:,1], v[:,0]
+ return plot_x, plot_y
+
+[docs]def extract_2pulse_histogram_from_filepath(datapath, plot = False, bin_start = 55, bin_stop = 150, hist_scale = None, even_only = False, odd_only = False, numRecords = 3840*2, IQ_offset = (0,0)):
+ I_offset, Q_offset = IQ_offset
+ dd = all_datadicts_from_hdf5(datapath)['data']
+
+ time_unit = dd['time']['unit']
+
+ # print(np.size(np.unique(dd['time']['values'])))
+ time_vals = dd['time']['values'].reshape((numRecords//2, np.size(dd['time']['values'])//(numRecords//2)))
+
+
+
+ rec_unit = dd['record_num']['unit']
+ rec_num = dd['record_num']['values'].reshape((numRecords//2, np.size(dd['time']['values'])//(numRecords//2)))
+
+ I_plus = dd['I_plus']['values'].reshape((numRecords//2, np.size(dd['time']['values'])//(numRecords//2)))-I_offset
+ I_minus = dd['I_minus']['values'].reshape((numRecords//2, np.size(dd['time']['values'])//(numRecords//2)))-I_offset
+
+ Q_plus = dd['Q_plus']['values'].reshape((numRecords//2, np.size(dd['time']['values'])//(numRecords//2)))-Q_offset
+ Q_minus = dd['Q_minus']['values'].reshape((numRecords//2, np.size(dd['time']['values'])//(numRecords//2)))-Q_offset
+
+ # print(np.size(I_minus))
+
+ #averages
+ I_plus_avg = np.average(I_plus, axis = 0)
+ I_minus_avg = np.average(I_minus, axis = 0)
+ Q_plus_avg = np.average(Q_plus, axis = 0)
+ Q_minus_avg = np.average(Q_minus, axis = 0)
+
+ if hist_scale == None:
+ hist_scale = 2*np.max(np.array([I_plus_avg, I_minus_avg, Q_plus_avg, Q_minus_avg]))
+
+ #re-weave the data back into it's original pre-saved form
+
+ bins_even, bins_odd, h_even, h_odd = Process_One_Acquisition(datapath.split('/')[-1].split('\\')[-1], I_plus, I_minus, Q_plus, Q_minus, bin_start, bin_stop, hist_scale = hist_scale, even_only = even_only, odd_only = odd_only, plot = plot)
+
+ Plus_x0Guess = np.average(np.average(I_plus_avg[bin_start:bin_stop]))
+ Plus_y0Guess = np.average(np.average(Q_plus_avg[bin_start:bin_stop]))
+ Plus_ampGuess = np.max(h_even)
+ Plus_sxGuess = np.max(bins_even)/5
+ Plus_syGuess = Plus_sxGuess
+ Plus_thetaGuess = 0
+ Plus_offsetGuess = 0
+
+ Minus_x0Guess = np.average(np.average(I_minus_avg[bin_start:bin_stop]))
+ Minus_y0Guess = np.average(np.average(Q_minus_avg[bin_start:bin_stop]))
+ Minus_ampGuess = np.max(h_even)
+ Minus_sxGuess = np.max(bins_even)/5
+ Minus_syGuess = Minus_sxGuess
+ Minus_thetaGuess = 0
+ Minus_offsetGuess = 0
+
+ guessParams = [[Plus_ampGuess, Plus_x0Guess, Plus_y0Guess, Plus_sxGuess, Plus_syGuess, Plus_thetaGuess],
+ [Minus_ampGuess, Minus_x0Guess, Minus_y0Guess, Minus_sxGuess, Minus_syGuess, Minus_thetaGuess]]
+
+ return bins_even, bins_odd, h_even, h_odd, guessParams
+
+[docs]def extract_3pulse_histogram_from_filepath(datapath, plot = False, bin_start = 55, bin_stop = 150, hist_scale = None, even_only = False, odd_only = False, numRecords = 3840*2, IQ_offset = (0,0), state_relabel = 0):
+ I_offset, Q_offset = IQ_offset
+ dd = all_datadicts_from_hdf5(datapath)['data']
+
+ time_unit = dd['time']['unit']
+
+ # print(np.size(np.unique(dd['time']['values'])))
+ time_vals = dd['time']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))
+
+
+
+ rec_unit = dd['record_num']['unit']
+ rec_num = dd['record_num']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))
+ print('cycling labels by ', state_relabel)
+ if state_relabel == 0:
+
+ I_G = dd['I_G']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-I_offset
+ I_E = dd['I_E']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-I_offset
+ I_F = dd['I_F']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-I_offset
+
+ Q_G = dd['Q_G']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-Q_offset
+ Q_E = dd['Q_E']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-Q_offset
+ Q_F = dd['Q_F']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-Q_offset
+ elif state_relabel ==1:
+ I_G = dd['I_E']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-I_offset
+ I_E = dd['I_F']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-I_offset
+ I_F = dd['I_G']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-I_offset
+
+ Q_G = dd['Q_E']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-Q_offset
+ Q_E = dd['Q_F']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-Q_offset
+ Q_F = dd['Q_G']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-Q_offset
+ elif state_relabel ==2:
+ I_G = dd['I_F']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-I_offset
+ I_E = dd['I_G']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-I_offset
+ I_F = dd['I_E']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-I_offset
+
+ Q_G = dd['Q_F']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-Q_offset
+ Q_E = dd['Q_G']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-Q_offset
+ Q_F = dd['Q_E']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-Q_offset
+ # print(np.size(I_minus))
+
+ #averages
+ I_G_avg = np.average(I_G, axis = 0)
+ I_E_avg = np.average(I_E, axis = 0)
+ I_F_avg = np.average(I_F, axis = 0)
+
+ Q_G_avg = np.average(Q_G, axis = 0)
+ Q_E_avg = np.average(Q_E, axis = 0)
+ Q_F_avg = np.average(Q_F, axis = 0)
+
+ if hist_scale == None:
+ hist_scale = 2*np.max(np.array([I_G_avg, I_E_avg, Q_G_avg, Q_E_avg]))
+
+ #re-weave the data back into it's original pre-saved form
+
+ bins_G, bins_E, bins_F, h_G, h_E, h_F = Process_One_Acquisition_3_state(datapath.split('/')[-1].split('\\')[-1], I_G, I_E, I_F, Q_G, Q_E, Q_F,bin_start, bin_stop, hist_scale = hist_scale, even_only = even_only, odd_only = odd_only, plot = plot)
+
+ G_x0Guess = np.average(np.average(I_G_avg[bin_start:bin_stop]))
+ G_y0Guess = np.average(np.average(Q_G_avg[bin_start:bin_stop]))
+ G_ampGuess = np.max(h_G)
+ G_sxGuess = np.max(bins_G)/5
+ G_syGuess = G_sxGuess
+ G_thetaGuess = 0
+ G_offsetGuess = 0
+
+ E_x0Guess = np.average(np.average(I_E_avg[bin_start:bin_stop]))
+ E_y0Guess = np.average(np.average(Q_E_avg[bin_start:bin_stop]))
+ E_ampGuess = np.max(h_E)
+ E_sxGuess = np.max(bins_E)/5
+ E_syGuess = E_sxGuess
+ E_thetaGuess = 0
+ E_offsetGuess = 0
+
+ F_x0Guess = np.average(np.average(I_F_avg[bin_start:bin_stop]))
+ F_y0Guess = np.average(np.average(Q_F_avg[bin_start:bin_stop]))
+ F_ampGuess = np.max(h_F)
+ F_sxGuess = np.max(bins_F)/5
+ F_syGuess = F_sxGuess
+ F_thetaGuess = 0
+ F_offsetGuess = 0
+
+ guessParams = [[G_ampGuess, G_x0Guess, G_y0Guess, G_sxGuess, G_syGuess, G_thetaGuess],
+ [E_ampGuess, E_x0Guess, E_y0Guess, E_sxGuess, E_syGuess, E_thetaGuess],
+ [F_ampGuess, F_x0Guess, F_y0Guess, F_sxGuess, F_syGuess, F_thetaGuess]]
+
+ return bins_G, bins_E, bins_F, h_G, h_E, h_F, guessParams
+
+[docs]def get_normalizing_voltage_from_filepath(amp_off_filepath, plot = False, hist_scale = None, records_per_pulsetype = 3870*2):
+
+ bins_even, bins_odd, h_even, h_odd, guessParam = extract_2pulse_histogram_from_filepath(amp_off_filepath,
+ odd_only = 0,
+ numRecords = int(3840*2),
+ IQ_offset = (0,0),
+ plot = plot,
+ hist_scale = hist_scale)
+
+ amp_off_even_fit = fit_2D_Gaussian('amp_off_even', bins_even, h_even,
+ guessParam[0],
+ max_fev = 1000,
+ contour_line = 2)
+ amp_off_odd_fit = fit_2D_Gaussian('amp_off_odd', bins_odd, h_odd,
+ guessParam[1],
+ max_fev = 1000,
+ contour_line = 2)
+ even_fit = amp_off_even_fit
+ odd_fit = amp_off_odd_fit
+
+ histogram_data_fidelity = 1-1/2*np.sum(np.sqrt((h_odd/records_per_pulsetype)*(h_even/records_per_pulsetype)))
+
+ bins_fine = np.linspace(np.min([bins_even, bins_odd]), np.max([bins_even, bins_odd]), 1000)
+
+ even_fit_h = Gaussian_2D(np.meshgrid(bins_fine, bins_fine), *even_fit.info_dict['popt'])/(2*np.pi*even_fit.info_dict['amplitude']*even_fit.info_dict['sigma_x']*even_fit.info_dict['sigma_y'])
+
+ odd_fit_h = Gaussian_2D(np.meshgrid(bins_fine, bins_fine), *odd_fit.info_dict['popt'])/(2*np.pi*odd_fit.info_dict['amplitude']*odd_fit.info_dict['sigma_x']*odd_fit.info_dict['sigma_y'])
+
+ fit_fidelity = 1-1/2*np.sum(np.sqrt(np.abs(even_fit_h)/np.sum(even_fit_h)*np.abs(odd_fit_h)/np.sum(odd_fit_h)))
+ print(f"Histogram data fidelity: {histogram_data_fidelity}\nFit fidelity: {fit_fidelity}")
+ if plot:
+ fig, ax = plt.subplots()
+ pc = ax.pcolormesh(bins_even, bins_even, h_even)
+ amp_off_even_fit.plot_on_ax(ax)
+ ax.add_patch(amp_off_even_fit.sigma_contour())
+ ax.set_aspect(1)
+ plt.colorbar(pc)
+
+ fig, ax = plt.subplots()
+ pc = ax.pcolormesh(bins_odd, bins_odd, h_odd)
+ amp_off_odd_fit.plot_on_ax(ax)
+ ax.add_patch(amp_off_odd_fit.sigma_contour())
+ ax.set_aspect(1)
+ plt.colorbar(pc)
+
+ amp_off_voltage = np.average([np.linalg.norm(amp_off_odd_fit.center_vec()), np.linalg.norm(amp_off_even_fit.center_vec())])*1000
+
+ return amp_off_voltage
+
+[docs]def get_IQ_offset_from_filepath(amp_off_filepath, plot = False, hist_scale = None, records_per_pulsetype = 3840*2):
+
+ bins_even, bins_odd, h_even, h_odd, guessParam = extract_2pulse_histogram_from_filepath(amp_off_filepath,
+ odd_only = 0,
+ numRecords = records_per_pulsetype,
+ IQ_offset = (0,0),
+ plot = True,
+ hist_scale = hist_scale)
+
+ amp_off_even_fit = fit_2D_Gaussian('amp_off_even', bins_even, h_even,
+ guessParam[0],
+ max_fev = 1000,
+ contour_line = 2)
+ amp_off_odd_fit = fit_2D_Gaussian('amp_off_odd', bins_odd, h_odd,
+ guessParam[1],
+ max_fev = 1000,
+ contour_line = 2)
+ even_fit = amp_off_even_fit
+ odd_fit = amp_off_odd_fit
+
+ histogram_data_fidelity = 1-1/2*np.sum(np.sqrt((h_odd/records_per_pulsetype)*(h_even/records_per_pulsetype)))
+
+ bins_fine = np.linspace(np.min([bins_even, bins_odd]), np.max([bins_even, bins_odd]), 1000)
+
+ even_fit_h = Gaussian_2D(np.meshgrid(bins_fine, bins_fine), *even_fit.info_dict['popt'])/(2*np.pi*even_fit.info_dict['amplitude']*even_fit.info_dict['sigma_x']*even_fit.info_dict['sigma_y'])
+
+ odd_fit_h = Gaussian_2D(np.meshgrid(bins_fine, bins_fine), *odd_fit.info_dict['popt'])/(2*np.pi*odd_fit.info_dict['amplitude']*odd_fit.info_dict['sigma_x']*odd_fit.info_dict['sigma_y'])
+
+ fit_fidelity = 1-1/2*np.sum(np.sqrt(np.abs(even_fit_h)/np.sum(even_fit_h)*np.abs(odd_fit_h)/np.sum(odd_fit_h)))
+ print(f"Histogram data fidelity: {histogram_data_fidelity}\nFit fidelity: {fit_fidelity}")
+ if plot:
+ fig, ax = plt.subplots()
+ pc = ax.pcolormesh(bins_even, bins_even, h_even)
+ amp_off_even_fit.plot_on_ax(ax)
+ ax.add_patch(amp_off_even_fit.sigma_contour())
+ ax.set_aspect(1)
+ plt.colorbar(pc)
+
+ fig, ax = plt.subplots()
+ pc = ax.pcolormesh(bins_odd, bins_odd, h_odd)
+ amp_off_odd_fit.plot_on_ax(ax)
+ ax.add_patch(amp_off_odd_fit.sigma_contour())
+ ax.set_aspect(1)
+ plt.colorbar(pc)
+
+ offset = np.average(np.array([amp_off_odd_fit.center_vec(), amp_off_even_fit.center_vec()]), axis = 0)
+
+ return offset
+
+
+
+[docs]def get_fidelity_from_filepath(filepath, plot = False, hist_scale = None, records_per_pulsetype = 3870*2):
+
+ bins_even, bins_odd, h_even, h_odd, guessParam = extract_2pulse_histogram_from_filepath(filepath,
+ odd_only = 0,
+ numRecords = records_per_pulsetype,
+ IQ_offset = (0,0),
+ plot = True,
+ hist_scale = hist_scale)
+ h_odd_norm = np.copy(h_odd/np.sum(h_odd))
+ h_even_norm = np.copy(h_even/np.sum(h_even))
+
+ amp_off_even_fit = fit_2D_Gaussian('amp_off_even', bins_even, h_even,
+ guessParam[0],
+ max_fev = 1000,
+ contour_line = 2)
+ amp_off_odd_fit = fit_2D_Gaussian('amp_off_odd', bins_odd, h_odd,
+ guessParam[1],
+ max_fev = 1000,
+ contour_line = 2)
+ even_fit = amp_off_even_fit
+ odd_fit = amp_off_odd_fit
+
+ even_fit_h = Gaussian_2D(np.meshgrid(bins_even[:-1], bins_even[:-1]), *even_fit.info_dict['popt'])
+ even_fit_h_norm = np.copy(even_fit_h/np.sum(even_fit_h))
+
+ odd_fit_h = Gaussian_2D(np.meshgrid(bins_odd[:-1], bins_odd[:-1]), *odd_fit.info_dict['popt'])
+ odd_fit_h_norm = np.copy(odd_fit_h/np.sum(odd_fit_h))
+
+ is_even = hist_discriminant(even_fit_h, odd_fit_h)
+ is_odd = np.logical_not(is_even)
+
+ #debugging
+ # print(np.sum(h_odd), np.sum(h_even))
+ # print(np.sum(h_odd_norm), np.sum(h_even_norm))
+ # print('fid sums', np.sum(h_odd_norm[is_even]), np.sum(h_even_norm[is_odd]))
+
+ plt.pcolormesh(bins_odd, bins_odd, h_odd_norm)
+ plt.colorbar()
+
+ data_fidelity = 1-np.sum(h_odd_norm[is_even], dtype = "float64")-np.sum(h_even_norm[is_odd], dtype = "float64")
+ fit_fidelity = 1-np.sum(odd_fit_h_norm[is_even], dtype = "float64")-np.sum(even_fit_h_norm[is_odd], dtype = "float64")
+
+ if plot:
+ fig, ax = plt.subplots()
+ pc = ax.pcolormesh(bins_even, bins_even, h_even)
+ amp_off_even_fit.plot_on_ax(ax)
+ ax.add_patch(amp_off_even_fit.sigma_contour())
+ ax.set_aspect(1)
+ plt.colorbar(pc)
+
+ fig, ax = plt.subplots()
+ pc = ax.pcolormesh(bins_odd, bins_odd, h_odd)
+ amp_off_odd_fit.plot_on_ax(ax)
+ ax.add_patch(amp_off_odd_fit.sigma_contour())
+ ax.set_aspect(1)
+ plt.colorbar(pc)
+
+ fig, ax = plt.subplots()
+ pc = ax.pcolormesh(bins_odd, bins_odd, is_even, cmap = 'seismic')
+ amp_off_odd_fit.plot_on_ax(ax)
+ amp_off_even_fit.plot_on_ax(ax)
+ ax.add_patch(amp_off_odd_fit.sigma_contour())
+ ax.add_patch(amp_off_even_fit.sigma_contour())
+ ax.set_aspect(1)
+ plt.colorbar(pc)
+
+
+ return data_fidelity, fit_fidelity, even_fit, odd_fit
+
+[docs]def get_fidelity_from_filepath_3_state(filepath, plot = False, hist_scale = None, records_per_pulsetype = 2562, state_relabel = 0, bin_start = 50, bin_stop = 150):
+
+ bins_G, bins_E, bins_F, h_G, h_E, h_F, guessParam = extract_3pulse_histogram_from_filepath(filepath,
+ odd_only = 0,
+ numRecords = records_per_pulsetype*3,
+ IQ_offset = (0,0),
+ plot = True,
+ hist_scale = hist_scale,
+ state_relabel = state_relabel,
+ bin_start = bin_start,
+ bin_stop = bin_stop)
+ h_odd_norm = np.copy(h_E/np.sum(h_E))
+ h_even_norm = np.copy(h_G/np.sum(h_G))
+
+ G_fit = fit_2D_Gaussian('amp_off_even', bins_G, h_G,
+ guessParam[0],
+ max_fev = 1000,
+ contour_line = 2)
+ E_fit = fit_2D_Gaussian('amp_off_odd', bins_E, h_E,
+ guessParam[1],
+ max_fev = 1000,
+ contour_line = 2)
+ F_fit = fit_2D_Gaussian('amp_off_odd', bins_F, h_F,
+ guessParam[2],
+ max_fev = 1000,
+ contour_line = 2)
+
+
+
+ G_fit_h = Gaussian_2D(np.meshgrid(bins_G[:-1], bins_G[:-1]), *G_fit.info_dict['popt'])
+ G_fit_h_norm = np.copy(G_fit_h/np.sum(G_fit_h))
+
+ E_fit_h = Gaussian_2D(np.meshgrid(bins_E[:-1], bins_E[:-1]), *E_fit.info_dict['popt'])
+ E_fit_h_norm = np.copy(E_fit_h/np.sum(E_fit_h))
+
+ F_fit_h = Gaussian_2D(np.meshgrid(bins_F[:-1], bins_F[:-1]), *F_fit.info_dict['popt'])
+ F_fit_h_norm = np.copy(F_fit_h/np.sum(F_fit_h))
+
+ # is_even = hist_discriminant(even_fit_h, odd_fit_h)
+ # is_odd = np.logical_not(is_even)
+
+ # #debugging
+ # # print(np.sum(h_odd), np.sum(h_even))
+ # # print(np.sum(h_odd_norm), np.sum(h_even_norm))
+ # # print('fid sums', np.sum(h_odd_norm[is_even]), np.sum(h_even_norm[is_odd]))
+
+ # plt.pcolormesh(bins_odd, bins_odd, h_odd_norm)
+ # plt.colorbar()
+
+ # data_fidelity = 1-np.sum(h_odd_norm[is_even], dtype = "float64")-np.sum(h_even_norm[is_odd], dtype = "float64")
+ # fit_fidelity = 1-np.sum(odd_fit_h_norm[is_even], dtype = "float64")-np.sum(even_fit_h_norm[is_odd], dtype = "float64")
+
+ if plot:
+ fig, ax = plt.subplots()
+ pc = ax.pcolormesh(bins_G, bins_G, h_G)
+ G_fit.plot_on_ax(ax)
+ ax.add_patch(G_fit.sigma_contour())
+ ax.set_aspect(1)
+ plt.colorbar(pc)
+
+ fig, ax = plt.subplots()
+ pc = ax.pcolormesh(bins_E, bins_E, h_E)
+ E_fit.plot_on_ax(ax)
+ ax.add_patch(E_fit.sigma_contour())
+ ax.set_aspect(1)
+ plt.colorbar(pc)
+
+ fig, ax = plt.subplots()
+ pc = ax.pcolormesh(bins_F, bins_F, h_F)
+ F_fit.plot_on_ax(ax)
+ ax.add_patch(F_fit.sigma_contour())
+ ax.set_aspect(1)
+ plt.colorbar(pc)
+
+ # fig, ax = plt.subplots()
+ # # pc = ax.pcolormesh(bins_odd, bins_odd, is_even, cmap = 'seismic')
+ # amp_off_odd_fit.plot_on_ax(ax)
+ # amp_off_even_fit.plot_on_ax(ax)
+ # ax.add_patch(amp_off_odd_fit.sigma_contour())
+ # ax.add_patch(amp_off_even_fit.sigma_contour())
+ # ax.set_aspect(1)
+ # plt.colorbar(pc)
+
+
+ return None #data_fidelity, fit_fidelity, even_fit, odd_fit
+[docs]def get_fidelity_vs_records(datapath, plot = False, hist_scale = None, records_per_pulsetype = 3870*2, bin_start = 50, bin_stop = 150):
+ odd_only = 0
+ numRecords = records_per_pulsetype
+ IQ_offset = (0,0)
+ plot = True,
+
+ I_offset, Q_offset = IQ_offset
+ dd = all_datadicts_from_hdf5(datapath)['data']
+
+ time_unit = dd['time']['unit']
+
+ print(np.size(np.unique(dd['time']['values'])))
+ time_vals = dd['time']['values'].reshape((numRecords//2, np.size(dd['time']['values'])//(numRecords//2)))
+
+
+
+ rec_unit = dd['record_num']['unit']
+ rec_num = dd['record_num']['values'].reshape((numRecords//2, np.size(dd['time']['values'])//(numRecords//2)))
+
+ I_plus = dd['I_plus']['values'].reshape((numRecords//2, np.size(dd['time']['values'])//(numRecords//2)))-I_offset
+ I_minus = dd['I_minus']['values'].reshape((numRecords//2, np.size(dd['time']['values'])//(numRecords//2)))-I_offset
+
+ Q_plus = dd['Q_plus']['values'].reshape((numRecords//2, np.size(dd['time']['values'])//(numRecords//2)))-Q_offset
+ Q_minus = dd['Q_minus']['values'].reshape((numRecords//2, np.size(dd['time']['values'])//(numRecords//2)))-Q_offset
+
+ print(np.size(I_minus))
+
+ #averages
+ I_plus_avg = np.average(I_plus, axis = 0)
+ I_minus_avg = np.average(I_minus, axis = 0)
+ Q_plus_avg = np.average(Q_plus, axis = 0)
+ Q_minus_avg = np.average(Q_minus, axis = 0)
+
+ if hist_scale == None:
+ hist_scale = 2*np.max(np.array([I_plus_avg, I_minus_avg, Q_plus_avg, Q_minus_avg]))
+
+ #re-weave the data back into it's original pre-saved form
+
+ bins_even, bins_odd, h_even, h_odd = Process_One_Acquisition(datapath.split('/')[-1].split('\\')[-1], I_plus, I_minus, Q_plus, Q_minus, bin_start, bin_stop, hist_scale = hist_scale, even_only = 0, odd_only = 0, plot = plot)
+
+ Plus_x0Guess = np.average(np.average(I_plus_avg[bin_start:bin_stop]))
+ Plus_y0Guess = np.average(np.average(Q_plus_avg[bin_start:bin_stop]))
+ Plus_ampGuess = np.max(h_even)
+ Plus_sxGuess = np.max(bins_even)/5
+ Plus_syGuess = Plus_sxGuess
+ Plus_thetaGuess = 0
+ Plus_offsetGuess = 0
+
+ Minus_x0Guess = np.average(np.average(I_minus_avg[bin_start:bin_stop]))
+ Minus_y0Guess = np.average(np.average(Q_minus_avg[bin_start:bin_stop]))
+ Minus_ampGuess = np.max(h_even)
+ Minus_sxGuess = np.max(bins_even)/5
+ Minus_syGuess = Minus_sxGuess
+ Minus_thetaGuess = 0
+ Minus_offsetGuess = 0
+
+ guessParams = [[Plus_ampGuess, Plus_x0Guess, Plus_y0Guess, Plus_sxGuess, Plus_syGuess, Plus_thetaGuess],
+ [Minus_ampGuess, Minus_x0Guess, Minus_y0Guess, Minus_sxGuess, Minus_syGuess, Minus_thetaGuess]]
+
+'''
+Fits VNA trace. Determine over/under coupling regimes.
+'''
+
+
+import numpy as np
+import matplotlib.pyplot as plt
+import csv
+import h5py
+import inspect
+from scipy.optimize import curve_fit
+# import easygui
+from plottr.data import datadict_storage as dds, datadict as dd
+from plottr.data.datadict_storage import all_datadicts_from_hdf5
+
+FREQ_UNIT = {'GHz' : 1e9,
+ 'MHz' : 1e6,
+ 'KHz' : 1e3,
+ 'Hz' : 1.0
+ }
+
+
+
+
+[docs]def reflectionFunc(freq, Qext, Qint, f0, magBack, phaseCorrect):
+ omega0 = f0
+ delta = freq - omega0
+ S_11_up = 1.0 / (1j * delta * (2 + delta / omega0) / (1 + delta / omega0) + omega0 / Qint) - Qext / omega0
+ S_11_down = 1.0 / (1j * delta * (2 + delta / omega0) / (1 + delta / omega0) + omega0 / Qint) + Qext / omega0
+ S11 = magBack * (S_11_up / S_11_down) * np.exp(1j * (phaseCorrect))
+ realPart = np.real(S11)
+ imagPart = np.imag(S11)
+
+ return (realPart + 1j * imagPart).view(np.float)
+ # return realPart
+ # return imagPart
+
+[docs]def reflectionFunc_re(freq, Qext, Qint, f0, magBack, phaseCorrect):
+ return reflectionFunc(freq, Qext, Qint, f0, magBack, phaseCorrect)[::2]
+
+[docs]def getData_from_datadict(filepath, plot_data = None):
+ datadict = all_datadicts_from_hdf5(filepath)['data']
+ powers_dB = datadict.extract('power')['power']['values']
+ freqs = datadict.extract('power')['frequency']['values']*2*np.pi
+ phase_rad = datadict.extract('phase')['phase']['values']
+
+ lin = np.power(10, powers_dB/20)
+ real = lin*np.cos(phase_rad)
+ imag = lin*np.sin(phase_rad)
+
+ print(np.size(phase_rad))
+ print(np.size(phase_rad))
+
+ if plot_data:
+ plt.figure('mag')
+ plt.plot(freqs, powers_dB)
+ plt.figure('phase')
+ plt.plot(freqs, phase_rad)
+
+ return (freqs, real, imag, powers_dB, phase_rad)
+
+
+[docs]def getData(filename, method='hfss', freq_unit = 'GHz', plot_data=1):
+ if method == 'hfss':
+ """The csv file must be inthe format of:
+ freq mag(dB) phase(cang_deg)
+ """
+ with open(filename) as csvfile:
+ csvData = list(csv.reader(csvfile))
+ csvData.pop(0) # Remove the header
+ data = np.zeros((len(csvData[0]), len(csvData)))
+ for x in range(len(csvData)):
+ for y in range(len(csvData[0])):
+ data[y][x] = csvData[x][y]
+
+ freq = data[0] * 2 * np.pi * FREQ_UNIT[freq_unit] #omega
+ phase = np.array(data[2]) / 180. * np.pi
+ mag = data[1]
+ lin = 10**(mag / 20.0)
+
+ elif method == 'vna':
+ f = h5py.File(filename,'r')
+ freq = f['VNA Frequency (Hz)'][()]*2*np.pi
+ phase = f['Phase (deg)'][()]
+ mag = f['Power (dB)'][()]
+ lin = 10**(mag/20.0)
+ f.close()
+
+ elif method == 'vna_old':
+ f = h5py.File(filename,'r')
+ freq = f['Freq'][()]*2 * np.pi
+ phase = f['S21'][()][1] / 180. * np.pi
+ mag = f['S21'][()][0]
+ lin = 10**(mag / 20.0)
+ f.close()
+
+ else:
+ raise NotImplementedError('method not supported')
+
+ imag = lin * np.sin(phase)
+ real = lin * np.cos(phase)
+
+ if plot_data:
+ plt.figure('mag')
+ plt.plot(freq/2/np.pi, mag)
+ plt.figure('phase')
+ plt.plot(freq/2/np.pi, phase)
+
+ return (freq, real, imag, mag, phase)
+
+ # if method == 'vna':
+ # f = h5py.File(filename,'r')
+ # freq = f['VNA Frequency (Hz)'][()]
+ # phase = f['Phase (deg)'][()] / 180. * np.pi
+ # lin = 10**(f['Power (dB)'][()] / 20.0)
+ # if method == 'vna_old':
+ # f = h5py.File(filename,'r')
+ # freq = f['Freq'][()]
+ # phase = f['S21'][()][0] / 180. * np.pi
+ # lin = 10**(f['S21'][()][1] / 20.0)
+
+
+
+[docs]def fit(freq, real, imag, mag, phase, Qguess=(2e4, 1e5),real_only = 0, bounds = None, f0Guess = None, magBackGuess = None, phaseGuess = np.pi, debug = False):
+ # f0Guess = 2*np.pi*5.45e9
+ # f0Guess = freq[np.argmin(mag)] #smart guess of "it's probably the lowest point"
+ if f0Guess == None:
+ f0Guess = freq[int(np.floor(np.size(freq)/2))] #dumb guess of "it's probably in the middle"
+ # f0Guess = freq[np.argmin(mag)] #smart guess of "it's probably the lowest point"
+ if debug:
+ print("Guess freq: "+str(f0Guess/(2*np.pi*1e9)))
+ lin = 10**(mag / 20.0)
+ if magBackGuess == None:
+ magBackGuess = np.average(lin[:int(len(freq) / 5)])
+ # print(f"MAGBACKGUESS: {magBackGuess}")
+ QextGuess = Qguess[0]
+ QintGuess = Qguess[1]
+ if bounds == None:
+ bounds=([QextGuess / 10, QintGuess /10, f0Guess/2, magBackGuess / 10.0, -2 * np.pi],
+ [QextGuess * 10, QintGuess * 10, f0Guess*2, magBackGuess * 10.0, 2 * np.pi])
+
+ target_func = reflectionFunc
+ data_to_fit = (real + 1j * imag).view(np.float)
+ if real_only:
+ target_func = reflectionFunc_re
+ data_to_fit = real
+ popt, pcov = curve_fit(target_func, freq, data_to_fit,
+ p0=(QextGuess, QintGuess, f0Guess, magBackGuess, phaseGuess),
+ bounds=bounds,
+ maxfev=1e4, ftol=2.3e-16, xtol=2.3e-16)
+
+
+ return popt, pcov
+
+
+[docs]def plotRes(freq, real, imag, mag, phase, popt):
+ xdata = freq / (2 * np.pi)
+ realRes = reflectionFunc(freq, *popt)[::2]
+ imagRes = reflectionFunc(freq, *popt)[1::2]
+ # realRes = reflectionFunc(freq, *popt)
+ plt.figure(figsize=(12, 5))
+ plt.subplot(1, 2, 1)
+ plt.title('real')
+ plt.plot(xdata, real, '.')
+ plt.plot(xdata, realRes)
+ plt.subplot(1, 2, 2)
+ plt.title('imag')
+ plt.plot(xdata, imag, '.')
+ plt.plot(xdata, imagRes)
+ plt.show()
+
+
+if __name__ == '__main__':
+ # filepath = easygui.fileopenbox()
+ filepath = r'Z:/Data/C1/2021-06-23/2021-06-23_0001_trace_0800_43/2021-06-23_0001_trace_0800_43.ddh5'
+ # filepath = r'PSB_EP1_Copper_Lid'
+ # filepath = r'H:\Data\Fridge Texas\Cooldown_20200917\Cavities\RT_msmt\PC_IP_3_5'
+ # (freq, real, imag, mag, phase) = getData(filepath, method="vna",plot_data=0)
+ (freq, real, imag, mag, phase) = getData_from_datadict(filepath, plot_data=0)
+ ltrim = 200
+ rtrim = 200
+ freq = freq[ltrim:-rtrim]
+ real = real[ltrim:-rtrim]
+ imag = imag[ltrim:-rtrim]
+ mag = mag[ltrim:-rtrim]
+ phase = phase[ltrim:-rtrim]
+
+ popt, pcov = fit(freq, real, imag, mag, phase, Qguess=(3e2, 5e3), magBackGuess=.01, phaseGuess = 0) #(ext, int)
+
+ print(f'f (Hz): {rounder(popt[2]/2/np.pi)}', )
+ fitting_params = list(inspect.signature(reflectionFunc).parameters.keys())[1:]
+ for i in range(2):
+ print(f'{fitting_params[i]}: {rounder(popt[i])} +- {rounder(np.sqrt(pcov[i, i]))}')
+ Qtot = popt[0] * popt[1] / (popt[0] + popt[1])
+ print('Q_tot: ', rounder(Qtot), '\nT1 (s):', rounder(Qtot/popt[2]), f"Kappa: {rounder(popt[2]/2/np.pi/Qtot)}", )
+
+
+ plotRes(freq, real, imag, mag, phase, popt)
+
+
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Jun 1 17:52:48 2021
+
+@author: Hatlab_3
+"""
+
+import easygui
+from plottr.apps.autoplot import autoplotDDH5, script, main
+from plottr.data.datadict_storage import all_datadicts_from_hdf5
+import matplotlib.pyplot as plt
+import numpy as np
+from measurement_modules.Helper_Functions import get_name_from_path, shift_array_relative_to_middle, log_normalize_to_row, select_closest_to_target, find_all_ddh5
+from data_processing.fitting.QFit import fit, plotRes, reflectionFunc
+import inspect
+from plottr.data import datadict_storage as dds, datadict as dd
+from scipy.signal import savgol_filter
+from scipy.fftpack import dct, idct
+
+[docs]class fit_res_sweep():
+ def __init__(self, datadict, writer, save_func, raw_filepath, ind_par_name, create_file = True):
+ #setup files
+ self.datadict = datadict
+ self.writer = writer
+ if create_file:
+ self.writer.__enter__()
+ self.save_func = save_func
+ #1D Data Extraction
+ dicts = all_datadicts_from_hdf5(raw_filepath)['data']
+ uvphDict = dicts.extract('phase')
+ uvpoDict = dicts.extract('power')
+
+
+ #get the arrays back out
+ self.vna_phase = uvphDict.data_vals('phase')
+ self.vna_power = uvpoDict.data_vals('power')
+
+ self.vna_freqs = uvphDict.data_vals('VNA_frequency')*2*np.pi
+ self.ind_par = uvphDict.data_vals(ind_par_name)
+
+[docs] def default_bounds(self, QextGuess, QintGuess, f0Guess, magBackGuess):
+ return ([QextGuess / 1.5, QintGuess/1.5, f0Guess*0.9, magBackGuess / 5.0, -2 * np.pi],
+ [QextGuess * 1.5, QintGuess*10, f0Guess*1.1, magBackGuess * 5.0, 2 * np.pi])
+
+[docs] def initial_fit(self, f0Guess, QextGuess = 50, QintGuess = 300, magBackGuess = 0.0001, bounds_func = None, smooth = False, smooth_win = 11, phaseOffGuess = 0, debug = False, adaptive_window = False, adapt_win_size = 300e6, start_index = 0):
+ print("RUNNING INITIAL FIT")
+ self.autofit_starting_index = start_index
+ f0Guess = f0Guess*2*np.pi
+ if bounds_func == None:
+ bounds=self.default_bounds(QextGuess, QintGuess, f0Guess, magBackGuess)
+ else:
+ bounds = bounds_func(QextGuess, QintGuess, f0Guess, magBackGuess)
+ filt = (self.ind_par == np.unique(self.ind_par)[start_index])
+
+ if adaptive_window:
+ filt1 = self.vna_freqs < f0Guess + adapt_win_size*2*np.pi/2
+ filt2 = self.vna_freqs > f0Guess - adapt_win_size*2*np.pi/2
+ filt = filt*filt1*filt2
+
+ init_vna_freqs = np.unique(self.vna_freqs[filt])
+ init_phase_trace = self.vna_phase[filt]
+ init_pow_trace = self.vna_power[filt]
+
+ if debug:
+ plt.figure(1)
+ plt.plot(init_vna_freqs/(2*np.pi), init_phase_trace)
+ plt.title("Debug1: phase")
+ plt.figure(2)
+ plt.plot(init_vna_freqs/(2*np.pi), init_pow_trace)
+ plt.title("Debug1: power")
+
+ if smooth:
+ init_phase_trace = savgol_filter(init_phase_trace, smooth_win, 3)
+ init_pow_trace = savgol_filter(init_pow_trace, smooth_win, 3)
+
+ lin = 10**(init_pow_trace/20)
+
+ imag = lin * np.sin(init_phase_trace)
+ real = lin * np.cos(init_phase_trace)
+
+ popt, pconv = fit(init_vna_freqs, real, imag, init_pow_trace, init_phase_trace, Qguess = (QextGuess,QintGuess), f0Guess = f0Guess, real_only = 0, bounds = bounds, magBackGuess = magBackGuess, phaseGuess = phaseOffGuess)
+
+ print(f'f (Hz): {np.round(popt[2]/2/np.pi, 3)}', )
+ fitting_params = list(inspect.signature(reflectionFunc).parameters.keys())[1:]
+ for i in range(2):
+ print(f'{fitting_params[i]}: {np.round(popt[i], 2)} +- {np.round(np.sqrt(pconv[i, i]), 3)}')
+ Qtot = popt[0] * popt[1] / (popt[0] + popt[1])
+ print('Q_tot: ', round(Qtot), '\nT1 (s):', round(Qtot/popt[2]), f"Kappa: {round(popt[2]/2/np.pi/Qtot)}", )
+
+ self.initial_popt = popt
+ self.initial_pconv = pconv
+
+ plotRes(init_vna_freqs, real, imag, init_pow_trace, init_phase_trace, popt)
+
+[docs] def save_fit(self, ind_par_val, base_popt, base_pconv):
+ self.save_func(self.writer, ind_par_val, base_popt, base_pconv)
+
+[docs] def semiauto_fit(self, ind_par, vna_freqs, vna_mags, vna_phases, init_popt, debug = False, savedata = False, smooth = False, smooth_win = 11, adaptive_window = False, adapt_win_size = 300e6, fourier_filter = False, fourier_cutoff = 40, pconv_tol = 2, bounds_func = None, alt_array_scale = 1):
+ print("RUNNING SEMI-AUTO FIT")
+ res_freqs = np.zeros(np.size(np.unique(ind_par)))
+ Qints = np.zeros(np.size(np.unique(ind_par)))
+ Qexts = np.zeros(np.size(np.unique(ind_par)))
+ magBacks = np.zeros(np.size(np.unique(ind_par)))
+
+ init_f0 = init_popt[2]
+ init_Qint = init_popt[1]
+ init_Qext = init_popt[0]
+ init_magBack = init_popt[3]
+ for i, ind_par_val in enumerate(np.unique(ind_par)):
+ first_condn = ind_par == ind_par_val
+ [first_trace_freqs, first_trace_phase, first_trace_mag] = [vna_freqs[first_condn]*2*np.pi, vna_phases[first_condn], 10**(vna_mags[first_condn]/20)]
+ if smooth:
+ first_trace_phase = savgol_filter(first_trace_phase, smooth_win, 3)
+ first_trace_mag = savgol_filter(first_trace_mag, smooth_win, 3)
+
+ imag = first_trace_mag * np.sin(first_trace_phase)
+ real = first_trace_mag * np.cos(first_trace_phase)
+ if fourier_filter == True:
+ if debug:
+ plt.figure(3)
+ plt.plot(first_trace_freqs, real)
+ plt.plot(first_trace_freqs, imag)
+ plt.title('before filter')
+ imag = idct(dct(imag)[fourier_cutoff:])
+ real = idct(dct(real)[fourier_cutoff:])
+ if debug:
+ plt.figure(4)
+ plt.plot(real)
+ plt.plot(imag)
+ plt.title('after filter')
+ if i >= 2:
+ if adaptive_window:
+ filt1 = first_trace_freqs<np.average(res_freqs[i-1:i])*2*np.pi+adapt_win_size*2*np.pi/2
+ filt2 = first_trace_freqs>np.average(res_freqs[i-1:i])*2*np.pi-adapt_win_size*2*np.pi/2
+ filt= filt1*filt2
+ else:
+ filt = np.ones(np.size(first_trace_freqs)).astype(bool)
+ #start averaging the previous fits for prior information to increase robustness to bad fits
+ f0Guess = np.average(res_freqs[i-1:i])*2*np.pi
+ magBackGuess = np.average(magBacks[i-1:i])
+ (QextGuess,QintGuess) = (np.average(Qexts[i-1:i]),np.average(Qints[i-1:i]))
+ else:
+ f0Guess = init_f0
+ magBackGuess = init_magBack
+ (QextGuess, QintGuess) = (init_Qext, init_Qint)
+ filt = np.ones(np.size(first_trace_freqs)).astype(bool)
+ if bounds_func == None:
+ bounds=self.default_bounds(QextGuess, QintGuess, f0Guess, magBackGuess)
+ else:
+ bounds = bounds_func(QextGuess, QintGuess, f0Guess, magBackGuess)
+ if i>2:
+ prev_pconv = pconv
+ #fit(freq, real, imag, mag, phase, Qguess=(2e3, 1e3),real_only = 0, bounds = None)
+ popt, pconv = fit(first_trace_freqs[filt], real[filt], imag[filt], first_trace_mag, first_trace_phase, Qguess = (QextGuess,QintGuess), f0Guess = f0Guess, real_only = 0, bounds = bounds, magBackGuess = magBackGuess)
+
+ #catch a sudden change in convergence and try again until it's back in range:
+ if i>2:
+ pconv_diff_ratio = (np.array(pconv[0,0], pconv[1,1])-np.array(prev_pconv[0,0], prev_pconv[1,1]))/np.array(prev_pconv[0,0], prev_pconv[1,1])
+ if debug:
+ print(f"Pconv ratio: {pconv_diff_ratio}")
+ j = 0
+ alt_array = alt_array_scale*np.array([1e6,-1e6,5e6,-5e6, 10e6,-10e6,15e6,-15e6, 20e6, -20e6, 30e6, -30e6])*2*np.pi
+ while np.any(np.abs(pconv_diff_ratio)>pconv_tol):
+ if j>11:
+ raise Exception("No good fit at this point")
+ print(f"sudden change in Q detected (pconv_diff_ratio: {pconv_diff_ratio}), trying resonant guess + {alt_array[j]/(2*np.pi)}")
+ #try above
+ if debug:
+ if j%2 ==0:
+ print("trying above")
+ else:
+ print("trying_below")
+ popt, pconv = fit(first_trace_freqs[filt], real[filt], imag[filt], first_trace_mag, first_trace_phase, Qguess = (QextGuess,QintGuess), f0Guess = f0Guess+alt_array[j], real_only = 0, bounds = bounds, magBackGuess = magBackGuess)
+
+ pconv_diff_ratio = (np.array(pconv[0,0], pconv[1,1])-np.array(prev_pconv[0,0], prev_pconv[1,1]))/np.array(prev_pconv[0,0], prev_pconv[1,1])
+ j+=1
+
+
+ if debug:
+ import time
+ plotRes(first_trace_freqs[filt], real[filt], imag[filt], first_trace_mag[filt], first_trace_phase[filt], popt)
+ time.sleep(1)
+
+ res_freqs[i] = popt[2]/(2*np.pi)
+ Qints[i] = popt[1]
+ Qexts[i] = popt[0]
+ magBacks[i] = popt[3]
+
+ print(f'f (Hz): {np.round(popt[2]/2/np.pi, 3)}', )
+ fitting_params = list(inspect.signature(reflectionFunc).parameters.keys())[1:]
+ for i in range(2):
+ print(f'{fitting_params[i]}: {np.round(popt[i], 2)} +- {np.round(np.sqrt(pconv[i, i]), 3)}')
+ Qtot = popt[0] * popt[1] / (popt[0] + popt[1])
+ print('Q_tot: ', round(Qtot), '\nT1 (s):', round(Qtot/popt[2]), f"Kappa: {round(popt[2]/2/np.pi/Qtot)}", )
+ if savedata:
+ self.save_fit(ind_par_val, popt, pconv)
+
+ return np.unique(ind_par), res_freqs, Qints, Qexts, magBacks
+
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Feb 16 12:55:36 2021
+
+@author: Hatlab_3
+
+Find all TACOS in a directory, pull out the best powers, and plot them in 3d space to see if it's actually a TACO or not'
+"""
+import os
+from plottr.apps.autoplot import autoplotDDH5, script, main
+from plottr.data.datadict_storage import all_datadicts_from_hdf5
+import numpy as np
+import matplotlib.pyplot as plt
+from data_processing.Helper_Functions import get_name_from_path, shift_array_relative_to_middle, log_normalize_to_row, select_closest_to_target, log_normalize_up_to_row, find_all_ddh5
+from mpl_toolkits.mplot3d import Axes3D
+from data_processing.ddh5_Plotting.utility_modules.TACO_utility_functions import make_tacos, make_sat_img_plot
+from scipy.ndimage import gaussian_filter
+import matplotlib.colors as color
+import time
+plt.rcParams.update({'font.weight': 'bold'})
+plt.rc('axes', titlesize=15) # fontsize of the axes titles
+plt.rc('axes', labelsize=15) # fontsize of the x and y labels
+plt.rc('xtick', labelsize=12) # fontsize of the tick labels
+plt.rc('ytick', labelsize=12) # fontsize of the tick labels
+
+[docs]def get_sat_info(sat_bias_current, sat_gen_freq, sat_gen_power, sat_vna_freq, sat_vna_powers, sat_gain, levels = [-2,-1.5,-1, -0.25, 0.25,1, 1.5,2], norm_power = -40, x_val = None, filter_window = 0, vmin = -1, vmax = 1, plot = True, xlim = None, ylim = None):
+ y_norm_val = max([norm_power, np.min(sat_vna_powers)+2]) #Signal power at which to normalize the rest of the plot to
+ # print(f'Normalizing saturation to {y_norm_val} VNA power')
+ # print(f'lowest VNA power: {np.min(sat_vna_powers)}')
+ # print(f'filter_window: {filter_window}')
+ # print(f'Size of gen freqs: {np.size(sat_gen_freq)}')
+ # print(f'Size of gen pows: {np.size(sat_gen_power)}')
+ # print(f'shape of saturation data: {np.shape(sat_gain)}')
+
+ zero_freq_loc = np.argmin(sat_gen_power)
+ zero_freq = sat_gen_freq[zero_freq_loc]
+
+ if plot:
+ fig, ax, img = make_sat_img_plot(sat_bias_current, sat_vna_freq, sat_vna_powers, sat_gain, norm_power = y_norm_val, levels = levels, filter_window = filter_window, vmin = vmin, vmax = vmax)
+ ax.set_title(sat_bias_current)
+ cb = fig.colorbar(img, ax = ax)
+ ax.set_xlabel('Generator Frequency(GHz)')
+ ax.set_ylabel('Signal Power (dBm)')
+ cb.set_label("S21 change from 20dB (dB)")
+
+ smoothed_normed_data = log_normalize_to_row(sat_gen_freq, sat_vna_powers[0], gaussian_filter(sat_gain.T, (filter_window,0)), y_norm_val= y_norm_val)
+ #getting saturation points
+ sat_powers = []
+ sat_gen_powers = []
+ detunings = []
+ sat_center_freqs = []
+
+ for i, col in enumerate(smoothed_normed_data.T):
+ buffer = np.size(col[sat_vna_powers[0]<= y_norm_val])
+ #append locations of +1dB and -1dB points
+ try:
+ pos_loc = buffer+np.min(np.where(np.isclose(col[sat_vna_powers[0]>y_norm_val], 1, atol = 1e-2))[0])
+ except ValueError:
+ pos_loc = np.size(col)-1
+ try:
+ neg_loc = buffer+np.min(np.where(np.isclose(col[sat_vna_powers[0]>y_norm_val], -1, atol = 1e-2))[0])
+ except ValueError:
+ neg_loc = np.size(col)-1
+
+ loc_arr = np.array([pos_loc, neg_loc])
+ loc_arr = np.floor(loc_arr[np.logical_not(np.isnan(loc_arr))]).astype(int)
+
+ loc = np.min(loc_arr)
+ sat_powers.append(sat_vna_powers[0][loc])
+ # print(sat_gen_power)
+ sat_gen_powers.append(sat_gen_power[i])
+
+ detunings.append(sat_gen_freq[i]-zero_freq)
+ sat_center_freqs.append(sat_gen_freq[i])
+
+ max_loc = np.where(sat_powers[0:-1] == np.max(sat_powers[0:-1]))[0][0]
+
+ return (np.array(sat_gen_freq+(sat_gen_freq[1]-sat_gen_freq[0])/2)/1e6)[max_loc], sat_vna_freq[max_loc], sat_powers[max_loc], np.array(sat_powers), np.array(sat_gen_powers), np.array(detunings), np.array(sat_center_freqs), sat_bias_current*np.ones(len(detunings))
+
+[docs]def superSat(filepaths,
+ y_norm_val = -70,
+ filter_window = 0,
+ conv = False,
+ conv_func = None,
+ tla_signal = 0,
+ tla_pump = 0,
+ plot_individual = False,
+ cscale = 10e6,
+ kerr = False,
+ kerr_null: float = None,
+ kerr_scale: float = None,
+ scale_flip = False,
+ device_name: str = '',
+ quanta_flip = False,
+ xlim = None,
+ ylim = None):
+ '''
+ assemble all of the saturation sweeps, extract the best (highest)
+ saturation power in (gen_freqs, gen_powers) space, plot vs current
+ '''
+ # plt.rcParams.update({'font.weight': 'bold'})
+ # plt.rc('axes', titlesize=15) # fontsize of the axes titles
+ # plt.rc('axes', labelsize=15) # fontsize of the x and y labels
+ # plt.rc('xtick', labelsize=12) # fontsize of the tick labels
+ # plt.rc('ytick', labelsize=12) # fontsize of the tick labels
+ # fig = plt.figure()
+ # ax = fig.add_subplot(111)
+
+ filenames = []
+
+ sat_powers = []
+ sat_gen_powers = []
+ sat_detunings = []
+ sat_bias_arr = []
+ sat_center_freqs_arr = []
+
+ best_sat_powers = []
+ best_sat_gen_frequencies = []
+ best_sat_gen_powers = []
+
+ best_sat_vna_frequencies = []
+ plot_currents = []
+
+ for sat_filepath in filepaths:
+ #extract the best gen powers
+ sat_dicts = all_datadicts_from_hdf5(sat_filepath)
+ satDict = sat_dicts['data']
+ sat_data = satDict.extract('sat_gain')
+ [bias_current, sat_gen_freq, sat_gen_power, sat_vna_powers, sat_gain, sat_vna_freq] = [sat_data.data_vals('sat_bias_current'),
+ sat_data.data_vals('sat_gen_freq'),
+ sat_data.data_vals('sat_gen_power'),
+ sat_data.data_vals('sat_vna_powers'),
+ sat_data.data_vals('sat_gain'),
+ sat_data.data_vals('sat_vna_freq')
+ ]
+ for current in np.unique(bias_current): #could be multiple bias currents in one single TACO datafile
+ plot_currents.append(current)
+ bp1 = bias_current == current
+ # make_sat_contour_plot(b1_val, sf1, svp1, sg1, norm_power = -60, levels = [-2, -1, -0.05,0.05, 1, 2], x_val = 10.687e3)
+ sf1, sgp1, svp1, sgp1, sg1, svf1 = sat_gen_freq[bp1], sat_gen_power[bp1], sat_vna_powers[bp1], sat_gen_power[bp1], sat_gain[bp1], sat_vna_freq[bp1]
+
+ best_sat_gen_freq, best_sat_vna_freq, best_sat_pow, sat_power_arr, sat_gen_power_arr, sat_detuning_arr, sat_center_freqs, bias_arr = get_sat_info(
+ current,
+ sf1,
+ sgp1,
+ svf1,
+ svp1,
+ sg1,
+ norm_power = y_norm_val,
+ levels = [-20, -1,1, 20],
+ x_val = None,
+ filter_window = filter_window,
+ vmin = -1.7, vmax = 1.7,
+ plot = plot_individual)
+
+ best_sat_gen_frequencies.append(best_sat_gen_freq)
+ best_sat_vna_frequencies.append(best_sat_vna_freq)
+ best_sat_powers.append(best_sat_pow)
+
+ sat_powers.extend(sat_power_arr)
+ sat_gen_powers.extend(sat_gen_power_arr)
+ sat_detunings.extend(sat_detuning_arr)
+ sat_bias_arr.extend(bias_arr)
+ sat_center_freqs_arr.extend(sat_center_freqs)
+
+ # print(list(zip(plot_currents, best_sat_powers, best_sat_frequencies)))
+
+
+ if conv: #requires fluxsweep_fitting file to already have been run in the same kernel
+ fig = plt.figure()
+ ax1 = fig.add_subplot(111)
+ plt_powers = np.array(best_sat_powers)-tla_signal
+ # print(conv)
+ if quanta_flip:
+ p1 = ax1.plot(1-conv_func(np.array(plot_currents)), plt_powers, 'b.', markersize = 15)
+ else:
+ p1 = ax1.plot(conv_func(np.array(plot_currents)), plt_powers, 'b.', markersize = 15)
+ ax1.title.set_text(f'{device_name}Best Saturation Power vs. Flux')
+ ax1.set_xlabel('Flux Quanta ($\Phi/\Phi_0$)')
+ ax1.set_ylabel('Saturation Power (dBm)')
+ if ylim is not None:
+ ax1.set_ylim(ylim[0], ylim[1])
+ plt.grid()
+
+ fig2 = plt.figure()
+ ax2 = fig2.add_subplot(111)
+ colors2 = conv_func(np.array(sat_bias_arr))
+ print(cscale)
+ img = ax2.scatter(np.array(sat_gen_powers)-tla_pump, np.array(sat_powers)-tla_signal, c = colors2, cmap = 'viridis', vmin = np.min(colors2), vmax = np.max(colors2), zorder = 1)
+ cb2 = fig2.colorbar(img, ax = ax2)
+ cb2.set_label("Bias Flux ($\Phi/\Phi_0$)")
+ ax2.title.set_text(f'{device_name}Saturation Power vs. Pump power')
+ ax2.set_xlabel('Generator Power (dBm Cryo)')
+ ax2.set_ylabel('Saturation Power (dBm Cryo)')
+ if xlim is not None:
+ ax2.set_xlim(xlim[0], xlim[1])
+ if ylim is not None:
+ ax2.set_ylim(xlim[0], xlim[1])
+ plt.grid()
+
+ if kerr:
+ colors_arr = [color.hex2color('#000066'),color.hex2color('#444488'),color.hex2color('#4444FF'), color.hex2color('#03fc41'),color.hex2color('#03fc41'), color.hex2color('#FF4444'), color.hex2color('#884444'), color.hex2color('#660000')]
+ tick_labels = ['- 1', '0', '+ 1']
+ if scale_flip:
+ colors_arr.reverse()
+ tick_labels.reverse()
+ _cmap = color.LinearSegmentedColormap.from_list('my_cmap', colors_arr)
+ fig1 = plt.figure()
+ ax1 = fig1.add_subplot(111)
+ # print(conv)
+ cscale /= 1e6
+ scale = cscale
+ img = ax1.scatter(np.array(sat_gen_powers)-tla_pump, np.array(sat_powers)-tla_signal, c = colors2, cmap = _cmap, vmin = kerr_null-kerr_scale, vmax = kerr_null+kerr_scale, zorder = 1)
+ cb1 = fig1.colorbar(img, ax = ax1, ticks=[kerr_null-kerr_scale, kerr_null, kerr_null+kerr_scale])
+ cb1.set_label("Self-Kerr (arb. units)")
+ cb1.ax.set_yticklabels(tick_labels)
+ ax1.title.set_text(f'{device_name}Saturation Power vs. Pump power')
+ ax1.set_xlabel('Generator Power (dBm Cryo)')
+ ax1.set_ylabel('Saturation Power (dBm Cryo)')
+ if xlim is not None:
+ ax1.set_xlim(xlim[0], xlim[1])
+ if ylim is not None:
+ ax1.set_ylim(ylim[0], ylim[1])
+ plt.grid()
+
+
+ #plot best saturation wrt signal frequency. ie for all vna frequencies, plot the best saturation. Generally this is the generator freq/2
+
+ fig3 = plt.figure()
+ ax3 = fig3.add_subplot(111)
+ img = ax3.scatter(np.array(sat_center_freqs_arr)/1e9, sat_gen_powers)
+ ax3.set_xlabel("Signal Frequency (GHz)")
+ ax3.set_ylabel("Saturation Power (dBm Cryo)")
+
+ colors2 = np.array(sat_bias_arr)*1e6
+ img = ax2.scatter(np.array(sat_gen_powers)-tla_pump, np.array(sat_powers)-tla_signal, c = colors2, cmap = 'magma', vmin = np.min(colors2), vmax = np.max(colors2), zorder = 1)
+ cb2 = fig2.colorbar(img, ax = ax3)
+ cb2.set_label("Bias Current ($\mu A$)")
+
+ # ax1.vlines(conv_func(-0.173e-3), np.min(plt_powers), np.max(plt_powers), linestyles = 'dashed', colors = ['red'])
+
+ # plt.figure(2)
+ # p1 = plt.plot(conv_func(np.array(plot_currents)), np.array(best_sat_gen_frequencies)/1000, 'b.', markersize = 15, label = 'Generator Frequencies')
+ # plt.plot(conv_func(currents)[filt], 2*res_freqs[filt]/1e9, 'r-', markersize = 5, label = '2x SNAIL Frequency')
+ # plt.xlabel('Flux Quanta ($\Phi/\Phi_0)$')
+ # plt.ylabel('Generator Frequency (GHz)')
+ # plt.title('Generator Frequency at Best Saturation Point vs. Flux')
+ # plt.vlines(conv_func(-0.173e-3), np.min(np.array(best_sat_gen_frequencies)/1000), np.max(np.array(best_sat_gen_frequencies)/1000), linestyles = 'dashed', colors = ['red'])
+ # plt.grid()
+ # plt.legend()
+
+ # plt.figure(3)
+ # p0 = plt.plot(conv_func(np.array(plot_currents)), np.array(best_sat_vna_frequencies)/1e9, 'b.', markersize = 15, label = 'Signal Frequencies')
+ # p1 = plt.plot(conv_func(currents)[filt], res_freqs[filt]/1e9, 'r-', markersize = 5, label = 'SNAIL Frequency')
+ # plt.xlabel('Flux Quanta ($\Phi/\Phi_0)$')
+ # plt.ylabel('VNA CW Frequency (GHz)')
+ # plt.title('Signal Frequency at Best Saturation Point vs. Flux')
+ # plt.vlines(conv_func(-0.173e-3), np.min(np.array(best_sat_vna_frequencies)/1e9), np.max(np.array(best_sat_vna_frequencies)/1e9), linestyles = 'dashed', colors = ['red'])
+ # plt.grid()
+ # plt.legend()
+
+ # plt.figure(4)
+ # plt.plot(np.array(best_sat_vna_frequencies)/1e9, best_sat_powers, 'k.', label = 'VNA Frequencies', markersize = 15)
+ # plt.plot(np.array(best_sat_gen_frequencies)/2000, best_sat_powers, 'b.', label = 'Generator Frequencies/2', markersize = 15)
+ # plt.ylabel('Saturation Power (dBm)')
+ # plt.xlabel('Generator/VNA Frequency (GHz)')
+ # plt.title("Best Saturation Power vs. Signal Frequency")
+ # plt.grid()
+ # plt.legend()
+
+ fig1 = plt.figure()
+ ax1 = fig1.add_subplot(111)
+ # print(conv)
+ scale = cscale
+
+ colors_arr = [color.hex2color('#000066'),color.hex2color('#444488'),color.hex2color('#4444FF'), color.hex2color('#03fc41'),color.hex2color('#03fc41'), color.hex2color('#FF4444'), color.hex2color('#884444'), color.hex2color('#660000')]
+ if scale_flip:
+ colors_arr.reverse()
+ tick_labels.reverse()
+ _cmap = color.LinearSegmentedColormap.from_list('my_cmap', colors_arr)
+
+ img = ax1.scatter(np.array(sat_gen_powers)-tla_pump, np.array(sat_powers)-tla_signal, c = np.array(sat_detunings)/1e6, cmap = _cmap, vmin = -cscale, vmax = cscale, zorder = 1)
+ cb1 = fig1.colorbar(img, ax = ax1)
+ cb1.set_label("Detuning (MHz)")
+ ax1.title.set_text(f'{device_name}Saturation Power vs. Pump power')
+ ax1.set_xlabel('Generator Power (dBm Cryo)')
+ ax1.set_ylabel('Saturation Power (dBm Cryo)')
+ if xlim is not None:
+ ax1.set_xlim(xlim[0], xlim[1])
+ if ylim is not None:
+ ax1.set_ylim(ylim[0], ylim[1])
+ plt.grid()
+ fig1.savefig(r'G:\My Drive\old files\WRITE - Presentations\mpl_figures\test.pdf', format = 'pdf')
+
+
+
+
+[docs]def superTACO_Bars(filepaths, angles = [45,45], quanta_size = None, quanta_offset = None, bardims = [1,1], barbase = -30, plot = False):
+ #step 1: assemble best powers into bias_currents vs. (gen_freq vs. best_powers) array
+ #ie for each n bias current there is a gen_freq array
+ #and for each m(n) gen_freq there is one gen_power that is best (could be NaN if garbage)
+ #feed this into n mplot3d commands each with their own oclor and legend label
+
+
+ bias_currents = []
+ best_gen_frequencies = []
+ best_gen_powers = []
+ gains = []
+ info_dict = {}
+
+ for gain_filepath in filepaths:
+ #extract the best gen powers
+ gain_dicts = all_datadicts_from_hdf5(gain_filepath)
+ gainDict = gain_dicts['data']
+ gain_data = gainDict.extract('calculated_gain')
+ [bias_current, gen_frequency, gen_power, calc_gain] = [gain_data.data_vals('bias_current'),
+ gain_data.data_vals('gen_frequency'),
+ gain_data.data_vals('gen_power'),
+ gain_data.data_vals('calculated_gain')
+ ]
+
+ for current in np.unique(bias_current): #could be multiple bias currents in one single TACO datafile
+ bias_currents.append(current)
+ print(f"CURRENT: {current*1000}mA")
+ filt = bias_current == current
+ cfreqs = gen_frequency[filt]
+ cpowers = gen_power[filt]
+ unique_freqs = np.unique(cfreqs)
+ cgain = calc_gain[filt]
+
+ best_powers = [select_closest_to_target(cpowers[cfreqs == f], cgain[cfreqs == f], 20) for f in unique_freqs]
+ best_power = np.min(best_powers)
+ best_gen_powers.append(best_power)
+
+ best_gains = [select_closest_to_target(cgain[cfreqs == f], cgain[cfreqs == f], 20) for f in unique_freqs]
+ best_gain = best_gains[np.argmin(np.abs(np.array(best_gains)-20))]
+ gains.append(best_gain)
+
+ #convert freqs to detuning from best power
+ best_freq = np.average(unique_freqs[np.where(best_powers == best_power)])
+ best_gen_frequencies.append(best_freq)
+
+
+ adjusted_freqs = unique_freqs - best_freq
+
+ info_dict[current] = adjusted_freqs, best_powers
+ if plot:
+ fig = plt.figure()
+ ax = fig.add_subplot(111, projection = '3d')
+ ax.invert_yaxis()
+ ax.azim = angles[0]
+ ax.elev = angles[1]
+ #plotting in sorted order of bias currents for rendering reasons
+ for i, current in enumerate(sorted(info_dict)):
+ # print(f"CURRENTS: {current}")
+ adjusted_freqs = info_dict[current][0]
+ best_powers = info_dict[current][1]
+ # print(f"FREQS: {adjusted_freqs}")
+ # print(f"POWERS: {best_powers}")
+ if quanta_size != None:
+ quant_frac = np.round((current-quanta_offset)/quanta_size, 3)
+ base_of_bars = barbase*np.ones(np.size(best_powers))
+ height_of_bars = best_powers-base_of_bars
+ # print(list(zip(base_of_bars, height_of_bars)))
+ ax.bar3d(current*np.ones(np.size(adjusted_freqs))*1000, adjusted_freqs/1e6, base_of_bars , bardims[0], bardims[1], height_of_bars, color=None, shade=True, label = f'{quant_frac} quanta', zsort = 'average')
+
+ ax.set_xlabel("Bias Current (mA)")
+ ax.set_ylabel("Generator Detuning (MHz)")
+ ax.set_zlabel("Generator Power (dBm)")
+ ax.set_title("20dB Gain Power vs. Flux Bias and Generator Detuning")
+
+ return [info_dict, np.array(bias_currents), np.array(best_gen_frequencies), np.array(best_gen_powers), np.array(gains)]
+
+#%%
+if __name__ == "__main__":
+ # total_line_attenuation_signal = 0
+ # total_line_attenuation_pump = 0 #does not include VNA attenuation
+ # gain_cwd = r'Z:\Data\SA_1X_C1\Best_Tacos\Gain'
+ # res = find_all_ddh5(gain_cwd)
+ # [info_dict, bias_currents, best_gen_freqs, best_gen_powers, gains] = superTACO_Bars(res, angles = [60,20], quanta_size = 0.35e-3, quanta_offset = -0.071e-3, bardims = [0.001, 0.7], barbase = -24)
+
+ # best_gen_powers -= total_line_attenuation_pump
+
+ # fig2 = plt.figure(2)
+ # conv = False
+ # if conv:
+ # # plt.plot(conv_func(bias_currents), np.array(best_gen_powers)-total_line_attenuation_signal, 'b.', markersize = 15)
+ # plt.title(r'Lowest 20dB Power (dBm) vs. Flux ($\Phi_0$)')
+ # plt.xlabel('Flux Quanta ($\Phi/\Phi_0)$')
+ # plt.ylabel('Generator Power @20dB Gain (dBm)')
+ # plt.grid()
+ # # plt.vlines(conv_func(-0.173e-3), np.min(best_gen_powers-total_line_attenuation_pump), np.max(best_gen_powers-total_line_attenuation_pump), linestyles = 'dashed', colors = ['red'])
+ # else:
+ # plt.plot(bias_currents*1000, best_gen_powers, 'b.', markersize = 15)
+ # plt.title('Lowest 20dB Power (dBm RT) vs. Bias Current (mA)')
+ # plt.xlabel('Bias Current (mA)')
+ # plt.ylabel('Generator Power @20dB Gain (dBm)')
+ # plt.grid()
+
+
+ sat_cwd = r'Z:\Data\SA_2X_B1\best_tacos\sat'
+ res = find_all_ddh5(sat_cwd)
+ superSat(res, y_norm_val = -92, filter_window=9, conv = False, plot_individual = False, tla_signal = 61, tla_pump = 71.5, cscale = 100)
+
+
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Feb 19 14:27:01 2021
+
+@author: Hatlab_3
+"""
+
+# import easygui
+from plottr.apps.autoplot import autoplotDDH5, script, main
+from plottr.data.datadict_storage import all_datadicts_from_hdf5
+import matplotlib.pyplot as plt
+import numpy as np
+from data_processing.Helper_Functions import get_name_from_path, shift_array_relative_to_middle, log_normalize_to_row, select_closest_to_target, log_normalize_up_to_row
+import matplotlib.colors as color
+from scipy.ndimage import gaussian_filter
+
+#Get Taco (irregular imshow)
+[docs]def make_tacos(bias_current, gen_frequency, gen_power, calculated_gain, replace_nan = False, vmin = 15, vmax = 25, fancy = False):
+ fig, ax = plt.subplots(1,1)
+ if replace_nan:
+ calculated_gain[np.isnan(calculated_gain)] = 0
+ img = ax.scatter(gen_frequency/1e6, gen_power, c = calculated_gain, cmap = 'seismic', vmin = vmin, vmax = vmax, zorder = 1)
+ cb = fig.colorbar(img, ax = ax)
+ unique_freqs = np.unique(gen_frequency)
+ best_powers = [select_closest_to_target(gen_power[gen_frequency == f], calculated_gain[gen_frequency == f], 20) for f in unique_freqs]
+ ax.plot(unique_freqs/1e6, best_powers, 'k-', lw = 2)
+ return fig, ax, cb
+
+[docs]def make_gain_profiles(filepath, replace_nan = False, vmin = 15, vmax = 25, angles = [45, 45]):
+
+ gainDict = all_datadicts_from_hdf5(filepath)['data']
+ calc_gain = gainDict.extract('calculated_gain').data_vals('calculated_gain')
+ gen_frequency_calc = gainDict.extract('calculated_gain').data_vals('gen_frequency')
+ gen_power_calc = gainDict.extract('calculated_gain').data_vals('gen_power')
+ unique_freqs = np.unique(gen_frequency_calc)
+ best_powers = [select_closest_to_target(gen_power_calc[gen_frequency_calc == f], calc_gain[gen_frequency_calc == f], 20) for f in unique_freqs]
+
+ gain_traces = gainDict.extract('gain_trace').data_vals('gain_trace')
+ gen_power = gainDict.extract('gain_trace').data_vals('gen_power')
+ vna_freqs = gainDict.extract('gain_trace').data_vals('vna_frequency')
+ gen_frequency_raw = gainDict.extract('gain_trace').data_vals('gen_frequency')
+
+ fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
+
+ ax.azim = angles[0]
+ ax.elev = angles[1]
+
+ for best_power in best_powers:
+ gp_filt = np.isclose(gen_power, best_power, atol = 0.05)
+ f_val= np.round(np.average(gen_frequency_raw[gp_filt])/1e6, 0)
+ ax.plot(f_val*np.ones(np.size(vna_freqs[gp_filt][0])), vna_freqs[gp_filt][0]/1e6, gain_traces[gp_filt][0])
+
+ return fig, ax
+
+[docs]def make_gain_surface(filepath, replace_nan = False, vmin = 15, vmax = 25, angles = [45, 45]):
+
+ gainDict = all_datadicts_from_hdf5(filepath)['data']
+ calc_gain = gainDict.extract('calculated_gain').data_vals('calculated_gain')
+ gen_frequency_calc = gainDict.extract('calculated_gain').data_vals('gen_frequency')
+ gen_power_calc = gainDict.extract('calculated_gain').data_vals('gen_power')
+ unique_freqs = np.unique(gen_frequency_calc)
+ best_powers = [select_closest_to_target(gen_power_calc[gen_frequency_calc == f], calc_gain[gen_frequency_calc == f], 20) for f in unique_freqs]
+
+ gain_traces = gainDict.extract('gain_trace').data_vals('gain_trace')
+ gen_power = gainDict.extract('gain_trace').data_vals('gen_power')
+ vna_freqs = gainDict.extract('gain_trace').data_vals('vna_frequency')
+ gen_frequency_raw = gainDict.extract('gain_trace').data_vals('gen_frequency')
+
+ fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
+
+ ax.azim = angles[0]
+ ax.elev = angles[1]
+
+ gen_f_array = []
+ sig_f_array = []
+ gain_array = []
+ for best_power in best_powers:
+ gp_filt = np.isclose(gen_power, best_power, atol = 0.05)
+ f_val= np.round(np.average(gen_frequency_raw[gp_filt])/1e6, 0)
+
+ gen_f_array.append(f_val*np.ones(np.size(vna_freqs[gp_filt][0])))
+ sig_f_array.append(vna_freqs[gp_filt][0]/1e6)
+ gain_array.append(gain_traces[gp_filt][0])
+
+ gen_f_array = np.array(gen_f_array)
+ sig_f_array = np.array(sig_f_array)
+ gain_array = np.array(gain_array)
+
+ ax.plot_surface(gen_f_array, sig_f_array, gain_array, rstride=1, cstride=1, cmap='viridis', edgecolor='none')
+
+ return fig, ax
+
+[docs]def make_sat_img_plot(sat_bias_current, sat_gen_freq, sat_vna_powers, sat_gain, levels = [-2,-1.5,-1, -0.25, 0.25,1, 1.5,2], norm_power = -40, x_val = None, filter_window = 0, vmin = -1, vmax = 1):
+ y_norm_val = norm_power #Signal power at which to normalize the rest of the plot to
+ # print(f"Normalized to VNA_Power = {y_norm_val}dB")
+ fig, ax = plt.subplots()
+ colors = [color.hex2color('#4444FF'), color.hex2color('#FFFFFF'), color.hex2color('#888888'), color.hex2color('#888888'),color.hex2color('#FFFFFF'), color.hex2color('#FF4444')]
+ _cmap = color.LinearSegmentedColormap.from_list('my_cmap', colors)
+
+ smoothed_normed_data = log_normalize_to_row(sat_gen_freq, sat_vna_powers[0], gaussian_filter(sat_gain.T, (filter_window,0)), y_norm_val= y_norm_val)
+ img = ax.pcolormesh(sat_gen_freq/1e6, sat_vna_powers[0],
+ smoothed_normed_data,
+ # gaussian_filter(sat_gain.T, 5),
+ cmap = _cmap,
+ vmin = vmin, vmax = vmax)
+ #getting saturation points
+ sat_powers = []
+ for col in smoothed_normed_data.T:
+ buffer = np.size(col[sat_vna_powers[0]<= y_norm_val])
+ #append locations of +1dB and -1dB points
+ try:
+ pos_loc = buffer+np.min(np.where(np.isclose(col[sat_vna_powers[0]>y_norm_val], 1, atol = 1e-2))[0])
+ except ValueError:
+ pos_loc = np.size(col)-1
+ try:
+ neg_loc = buffer+np.min(np.where(np.isclose(col[sat_vna_powers[0]>y_norm_val], -1, atol = 1e-2))[0])
+ except ValueError:
+ neg_loc = np.size(col)-1
+
+ # print(f"Pos: {pos_loc} \nNeg: {neg_loc}")
+ loc_arr = np.array([pos_loc, neg_loc])
+ loc_arr = np.floor(loc_arr[np.logical_not(np.isnan(loc_arr))]).astype(int)
+ # print(loc_arr)
+ loc = np.min(loc_arr)
+ sat_powers.append(sat_vna_powers[0][loc])
+
+ ax.plot((np.array(sat_gen_freq+(sat_gen_freq[1]-sat_gen_freq[0])/2)/1e6)[0:-1], sat_powers[0:-1], 'k o')
+ #plot the best one as a star
+ max_loc = np.where(sat_powers[0:-1] == np.max(sat_powers[0:-1]))[0][0]
+ # print(max_loc)
+ plt.plot((np.array(sat_gen_freq+(sat_gen_freq[1]-sat_gen_freq[0])/2)/1e6)[max_loc], sat_powers[max_loc], 'r*', markersize = 5)
+ ax.hlines(y = y_norm_val, xmin = np.min(sat_gen_freq/1e6), xmax = np.max(sat_gen_freq/1e6), color = 'b', lw = 2)
+ return fig, ax, img
+
+[docs]def superTACO_Lines(filepaths, angles = [45,45], quanta_size = None, quant_offset = None):
+ #step 1: assemble best powers into bias_currents vs. (gen_freq vs. best_powers) array
+ #ie for each n bias current there is a gen_freq array
+ #and for each m(n) gen_freq there is one gen_power that is best (could be NaN if garbage)
+ #feed this into n mplot3d commands each with their own oclor and legend label
+ fig = plt.figure()
+ ax = fig.add_subplot(111, projection = '3d')
+ ax.azim = angles[0]
+ ax.elev = angles[1]
+
+ bias_currents = []
+ best_gen_frequencies = []
+ best_gen_powers = []
+
+ for gain_filepath in filepaths:
+ #extract the best gen powers
+ gain_dicts = all_datadicts_from_hdf5(gain_filepath)
+ gainDict = gain_dicts['data']
+ gain_data = gainDict.extract('calculated_gain')
+ [bias_current, gen_frequency, gen_power, calc_gain] = [gain_data.data_vals('bias_current'),
+ gain_data.data_vals('gen_frequency'),
+ gain_data.data_vals('gen_power'),
+ gain_data.data_vals('calculated_gain')
+ ]
+
+ for current in np.unique(bias_current): #could be multiple bias currents in one single TACO datafile
+ bias_currents.append(current)
+ print(f"{gain_filepath}\nCURRENT: {current*1000}mA")
+ filt = bias_current == current
+ cfreqs = gen_frequency[filt]
+ cpowers = gen_power[filt]
+ unique_freqs = np.unique(cfreqs)
+ cgain = calc_gain[filt]
+
+ best_powers = [select_closest_to_target(cpowers[cfreqs == f], cgain[cfreqs == f], 20) for f in unique_freqs]
+ #convert freqs to detuning from best power
+ best_power = np.min(best_powers)
+ best_gen_powers.append(best_power)
+
+ best_freq = np.average(unique_freqs[np.where(best_powers == best_power)])
+ best_gen_frequencies.append(best_freq)
+
+ adjusted_freqs = unique_freqs - best_freq
+ if quanta_size != None:
+ quant_frac = np.round((current-quant_offset)/quanta_size, 3)
+ ax.plot(current*np.ones(np.size(unique_freqs))*1000, adjusted_freqs/1e6, best_powers, label = f'{quant_frac} quanta')
+
+ else:
+ ax.plot(current*np.ones(np.size(unique_freqs))*1000000, adjusted_freqs/1e6, best_powers)
+ ax.set_xlabel("Bias Current (mA)")
+ ax.set_ylabel("Generator Detuning (MHz)")
+ ax.set_zlabel("Generator Power (dBm)")
+ ax.set_title("20dB Gain Power vs. Flux Bias and Generator Detuning")
+
+ return [np.array(bias_currents), np.array(best_gen_frequencies), np.array(best_gen_powers)]
+
+# -*- coding: utf-8 -*-
+"""
+Created on Thu May 13 09:44:27 2021
+
+@author: Hatlab_3
+purpose: calculate qmplifier g3 from applied pump power and resonant frequency
+"""
+
+#import tools for processing taco data
+import easygui
+import os
+from plottr.apps.autoplot import autoplotDDH5, script, main
+from plottr.data.datadict_storage import all_datadicts_from_hdf5
+import numpy as np
+import matplotlib.pyplot as plt
+from measurement_modules.Helper_Functions import get_name_from_path, shift_array_relative_to_middle, log_normalize_to_row, select_closest_to_target, log_normalize_up_to_row, find_all_ddh5
+from mpl_toolkits.mplot3d import Axes3D
+from scipy.ndimage import gaussian_filter
+import matplotlib.colors as color
+import time
+
+from data_processing.ddh5_Plotting.TACO_multiplot_b1 import superTACO_Bars
+
+[docs]def g3_from_pump_power(gains: np.ndarray,
+ pump_powers: np.ndarray,
+ mode_kappas: np.ndarray,
+ pump_omegas: np.ndarray,
+ pump_detunings_from_res: np.ndarray
+ ):
+ '''
+ Source for calculation: https://arxiv.org/abs/1605.00539
+ "Introduction to Quantum-limited Parametric Amplification of Quantum Signals with Josephson Circuits"
+ by Michelle Devoret and Ananda Roy
+
+ Parameters
+ ----------
+ gains : np.ndarray
+ gain in dB, whose positions correspond to the powers given in the pump_powers section
+ pump_powers : np.ndarray
+ pump power in dBm that the amplifier sees. This must include all attenuation in the entire line
+ mode_kappas : np.ndarray
+ mode kappa in 2pi*Hz
+ pump_omegas : np.ndarray
+ pump frequency in 2pi*Hz
+ pump_detunings_from_res : np.ndarray
+ pump detuning in 2pi(f-f0) where f0 is the resonator frequency in hz
+
+ Returns
+ -------
+ numPumpPhotons : np.ndarray
+ The sqrt of the number of pump photons expected in the pumping resonator.
+ g3_arr : np.ndarray
+ The third order coupling in Hz for each combination of inputs
+ '''
+
+ hbar = 1.0545718e-34
+ lin_pump_powers = np.power(10,pump_powers/10)*0.001
+ #get the expected value of pump photons present in the resonator
+ numPumpPhotons = np.sqrt(8*mode_kappas*lin_pump_powers/(pump_omegas*hbar))/np.absolute(mode_kappas-2j*pump_detunings_from_res)
+ Lin_Power_gains = np.power(10,gains/20)
+ lpg = Lin_Power_gains
+ g3_arr = -0.5*(mode_kappas/numPumpPhotons)*np.sqrt((np.sqrt(lpg)-1)/(np.sqrt(lpg)+1))
+ return numPumpPhotons, g3_arr
+
+print(g3_from_pump_power(20,-80, 2*np.pi*17e6, 2*np.pi*2*6e9, 2*np.pi*6e9))
+
+#%%
+if __name__ == '__main__':
+ #make a function to convert bias currents into flux in radians
+ quanta_offset = -8.98e-5
+ quanta_size = 220.6e-6
+
+ conv_func = lambda c: (c-quanta_offset)/quanta_size
+
+ gain_cwd = r'E:\Data\Cooldown_20210408\SNAIL_Amps\C1\Best_Tacos\Gain'
+ res = find_all_ddh5(gain_cwd)
+ info_dict, bias_currents, best_gen_freqs, best_gen_powers, gains = superTACO_Bars(res, angles = [60,20], quanta_size = quanta_size, quanta_offset = quanta_offset, bardims = [0.001, 0.7], barbase = -24, plot = False)
+ best_gen_powers[1] = best_gen_powers[1]-10
+ fig2 = plt.figure(2)
+ ax = fig2.add_subplot(131)
+ total_line_attenuation = 72
+ ax.plot(conv_func(bias_currents), np.array(best_gen_powers)-total_line_attenuation, 'b.', markersize = 15)
+ ax.set_title(r'Lowest 20dB Power (dBm) vs. Flux ($\Phi_0$)')
+ ax.set_xlabel('Flux Quanta ($\Phi/\Phi_0)$')
+ ax.set_ylabel('Generator Power @20dB Gain (dBm)')
+ ax.grid()
+
+ s = np.shape(bias_currents)
+ s_arr = np.ones(s[0])
+ print(s[0])
+ #plotting the g3 vs flux
+ ax2 = fig2.add_subplot(132)
+ print(best_gen_powers-total_line_attenuation)
+ num_pump_photons, g3_arr = g3_from_pump_power(gains,best_gen_powers-total_line_attenuation, 2*np.pi*30e6*s_arr, 2*np.pi*best_gen_freqs, 2*np.pi*best_gen_freqs/2)
+ ax2.plot(conv_func(bias_currents), np.abs(g3_arr)/1e6, 'b.', markersize = 15)
+ ax2.set_title(r'Measured g3 (MHz) vs. Flux ($\Phi_0$)')
+ ax2.set_xlabel('Flux Quanta ($\Phi/\Phi_0)$')
+ ax2.set_ylabel('g3 coupling (MHz)')
+ ax2.grid()
+
+ ax3 = fig2.add_subplot(133)
+ ax3.plot(conv_func(bias_currents), num_pump_photons**2, 'b.', markersize = 15)
+ ax3.set_title(r'Number of Pump Photons in res vs. Flux ($\Phi_0$)')
+ ax3.set_xlabel('Flux Quanta ($\Phi/\Phi_0)$')
+ ax3.set_ylabel('Number of Pump Photons')
+ ax3.grid()
+
+
+ # plt.vlines(conv_func(-0.173e-3), np.min(best_gen_powers-total_line_attenuation), np.max(best_gen_powers-total_line_attenuation), linestyles = 'dashed', colors = ['red'])
+
+
+
+# -*- coding: utf-8 -*-
+"""
+Created on Wed Jan 13 10:14:25 2021
+
+@author: Hatlab_3
+"""
+# import easygui
+from plottr.apps.autoplot import autoplotDDH5, script, main
+from plottr.data.datadict_storage import all_datadicts_from_hdf5
+import matplotlib.pyplot as plt
+import numpy as np
+from data_processing.Helper_Functions import get_name_from_path, shift_array_relative_to_middle, log_normalize_to_row, select_closest_to_target
+from data_processing.fitting.QFit import fit, plotRes, reflectionFunc
+import inspect
+from plottr.data import datadict_storage as dds, datadict as dd
+from scipy.signal import savgol_filter
+from scipy.interpolate import interp1d
+from scipy.fftpack import dct, idct
+
+# FS_filepath = r'Z:/Data/SA_2X_B1/fluxsweep/fits/2021-07-22/2021-07-22_0024_SA_2X_B1/2021-07-22_0024_SA_2X_B1.ddh5'
+# Duff_filepath = r'Z:/Data/SA_2X_B1/duffing/2021-07-23/2021-07-23_0004_SA_2X_B1_duffing/2021-07-23_0004_SA_2X_B1_duffing.ddh5'
+# save_filepath = r'Z:\Data\SA_2X_B1\duffing\fits'
+#create a custom exception that handles fit errors in a more transparent way
+
+#%%Create measurement-based saver for the fit data.
+
+[docs]class fit_Duff_Measurement():
+ '''
+ outline:
+ - Take in an existing Wolfie-format duffing file and manual fit popt
+ - run semiauto_fit for each generator power, return an array of popts in an N_currents x M_gen_powers array
+ - use plotRes to debug fitter
+ - generate duffing graph
+ '''
+
+ def __init__(self, name):
+ #setup files
+ self.name = name
+
+[docs] def create_file(self, save_filepath):
+ self.datadict = dd.DataDict(
+ current = dict(unit='A'),
+ gen_power = dict(unit = 'dBm'),
+
+ base_resonant_frequency = dict(axes = ['current']),
+ low_power_res_frequency = dict(axes = ['current']),
+
+ driven_resonant_frequency= dict(axes = ['current', 'gen_power']),
+ driven_Qint = dict(axes = ['current', 'gen_power']),
+ driven_Qext = dict(axes = ['current', 'gen_power']),
+
+ driven_resonant_frequency_error= dict(axes = ['current', 'gen_power']),
+ driven_Qint_error = dict(axes = ['current', 'gen_power']),
+ driven_Qext_error = dict(axes = ['current', 'gen_power']),
+
+ res_shift_ref_undriven = dict(axes = ['current', 'gen_power']),
+ res_shift_ref_low = dict(axes = ['current', 'gen_power'])
+ )
+ self.datadir = save_filepath
+ self.writer = dds.DDH5Writer(self.datadir, self.datadict, name=self.name)
+ self.writer.__enter__()
+ return None
+
+[docs] def load_data(self, Duff_filepath, FS_filepath, current_filt = None):
+ #Duffing Data Extraction
+ duff_dicts = all_datadicts_from_hdf5(Duff_filepath)
+ duffDict = duff_dicts['data']
+ uvphDict = duffDict.extract('undriven_vna_phase')
+ uvpoDict = duffDict.extract('undriven_vna_power')
+ dvphDict = duffDict.extract('driven_vna_phase')
+ dvpoDict = duffDict.extract('driven_vna_power')
+
+ if current_filt == None:
+ lower = np.min(uvphDict.data_vals('current'))
+ upper = np.max(uvphDict.data_vals('current'))
+ else:
+ [lower, upper] = current_filt
+ #get the arrays back out
+
+ filt = (uvphDict.data_vals('current')<upper)*(uvphDict.data_vals('current')>lower)
+ self.undriven_vna_phase = uvphDict.data_vals('undriven_vna_phase')[filt]
+ self.undriven_vna_power = uvpoDict.data_vals('undriven_vna_power')[filt]
+ self.driven_vna_phase = dvphDict.data_vals('driven_vna_phase')[filt]
+ self.driven_vna_power= dvpoDict.data_vals('driven_vna_power')[filt]
+ self.vna_freqs = uvphDict.data_vals('vna_frequency')[filt]*2*np.pi
+ self.currents = uvphDict.data_vals('current')[filt]
+ self.gen_powers = dvpoDict.data_vals('gen_power')[filt]
+
+
+
+ self.res_func, self.qint_func, self.qext_func = self.read_fs_data(FS_filepath)
+ return None
+
+[docs] def read_fs_data(self, fs_filepath, interpolation = 'linear'):
+ ret = all_datadicts_from_hdf5(fs_filepath)
+ res_freqs = ret['data'].extract('base_resonant_frequency').data_vals('base_resonant_frequency')
+ currents = ret['data'].extract('base_resonant_frequency').data_vals('current')
+ Qexts = ret['data'].extract('base_Qext').data_vals('base_Qext')
+ Qints = ret['data'].extract('base_Qint').data_vals('base_Qint')
+ fs_res_fit_func = interp1d(currents, res_freqs, interpolation)
+ fs_Qint_fit_func = interp1d(currents, Qints, interpolation)
+ fs_Qext_fit_func = interp1d(currents, Qexts, interpolation)
+ return fs_res_fit_func, fs_Qint_fit_func, fs_Qext_fit_func
+
+[docs] def save_fit(self, currents, gen_power,driven_popts, driven_pconvs, low_power_res_fit_func):
+ for i, current in enumerate(np.unique(currents)):
+ driven_popt = driven_popts[i]
+ driven_pconv = driven_pconvs[i]
+ res_freq_ref = float(self.res_func(current))
+ res_freq_low_power = float(low_power_res_fit_func(current))
+ self.writer.add_data(
+ current = current,
+ gen_power = gen_power,
+
+ base_resonant_frequency = res_freq_ref,
+ low_power_res_frequency = res_freq_low_power,
+
+ driven_resonant_frequency = driven_popt[2]/(2*np.pi),
+ driven_Qint = driven_popt[1],
+ driven_Qext = driven_popt[0],
+
+ driven_resonant_frequency_error = np.sqrt(driven_pconv[2,2])/(2*np.pi),
+ driven_Qint_error = np.sqrt(driven_pconv[1,1]),
+ driven_Qext_error = np.sqrt(driven_pconv[0,0]),
+
+ res_shift_ref_undriven = (driven_popt[2]/(2*np.pi)-res_freq_ref),
+ res_shift_ref_low = (driven_popt[2]/(2*np.pi)-res_freq_low_power)
+
+ )
+
+[docs] def single_fit(self, vna_freqs, phase_trace, pow_trace, f0Guess, QextGuess = 50, QintGuess = 300, magBackGuess = 0.0001, bounds = None, smooth = False, smooth_win = 11, phaseOffGuess = 0, debug = False, adaptive_window = False, adapt_win_size = 300e6):
+ f0Guess = f0Guess*2*np.pi
+ if bounds == None:
+ bounds=self.default_bounds(QextGuess, QintGuess, f0Guess, magBackGuess)
+
+ if adaptive_window:
+ filt1 = vna_freqs < f0Guess + adapt_win_size*2*np.pi
+ filt2 = vna_freqs > f0Guess - adapt_win_size*2*np.pi
+ filt = filt1*filt2
+ vna_freqs = np.copy(vna_freqs[filt])
+
+ if debug:
+ plt.figure(1)
+ plt.plot(vna_freqs/(2*np.pi), phase_trace)
+ plt.title("Debug1: phase")
+ plt.figure(2)
+ plt.plot(vna_freqs/(2*np.pi), pow_trace)
+ plt.title("Debug1: power")
+
+ if smooth:
+ phase_trace = savgol_filter(phase_trace, smooth_win, 3)
+ pow_trace = savgol_filter(pow_trace, smooth_win, 3)
+
+ lin = 10**(pow_trace/20)
+
+ imag = lin * np.sin(phase_trace)
+ real = lin * np.cos(phase_trace)
+
+ popt, pconv = fit(vna_freqs, real, imag, pow_trace, phase_trace, Qguess = (QextGuess,QintGuess), f0Guess = f0Guess, real_only = 0, bounds = bounds, magBackGuess = magBackGuess, phaseGuess = phaseOffGuess)
+
+ print(f'f (Hz): {np.round(popt[2]/2/np.pi, 3)}', )
+ fitting_params = list(inspect.signature(reflectionFunc).parameters.keys())[1:]
+ for i in range(2):
+ print(f'{fitting_params[i]}: {np.round(popt[i], 2)} +- {np.round(np.sqrt(pconv[i, i]), 3)}')
+ Qtot = popt[0] * popt[1] / (popt[0] + popt[1])
+ print('Q_tot: ', round(Qtot), '\nT1 (s):', round(Qtot/popt[2]), f"Kappa: {round(popt[2]/2/np.pi/Qtot)}", )
+
+ self.initial_popt = popt
+ self.initial_pconv = pconv
+
+ if debug:
+ plotRes(vna_freqs, real, imag, pow_trace, phase_trace, popt)
+
+ return(popt, pconv)
+
+[docs] def initial_fit(self, f0Guess, QextGuess = 50, QintGuess = 300, magBackGuess = 0.0001, bounds = None, smooth = False, smooth_win = 11, phaseOffGuess = 0, debug = False, adaptive_window = False, adapt_win_size = 300e6):
+ if bounds == None:
+ bounds=self.default_bounds(QextGuess, QintGuess, f0Guess, magBackGuess)
+
+ filt = (self.currents == np.unique(self.currents)[0])*(self.gen_powers == np.unique(self.gen_powers)[0])
+ print(np.size(filt))
+
+ init_vna_freqs = np.unique(self.vna_freqs[filt])
+ print(np.size(init_vna_freqs))
+ init_phase_trace = self.undriven_vna_phase[filt]
+ print(np.size(init_phase_trace))
+ init_pow_trace = self.undriven_vna_power[filt]
+ print(np.size(init_pow_trace))
+
+ if debug:
+ plt.figure(1)
+ plt.plot(init_vna_freqs/(2*np.pi), init_phase_trace)
+ plt.title("Debug1: phase")
+ plt.figure(2)
+ plt.plot(init_vna_freqs/(2*np.pi), init_pow_trace)
+ plt.title("Debug1: power")
+
+ lin = 10**(init_pow_trace/20)
+
+ imag = lin * np.sin(init_phase_trace)
+ real = lin * np.cos(init_phase_trace)
+
+ popt, pconv = self.single_fit(init_vna_freqs, init_phase_trace, init_pow_trace, f0Guess, QintGuess = QintGuess, QextGuess = QextGuess, magBackGuess = magBackGuess, phaseOffGuess = phaseOffGuess, adaptive_window = adaptive_window, adapt_win_size = adapt_win_size, debug = debug)
+ self.initial_popt = popt
+ self.initial_pconv = pconv
+
+ plotRes(init_vna_freqs, real, imag, init_pow_trace, init_phase_trace, popt)
+
+[docs] def default_bounds(self, QextGuess, QintGuess, f0Guess, magBackGuess):
+ return ([QextGuess / 1.5, QintGuess / 1.5, f0Guess/2, magBackGuess / 2, -2*np.pi],
+ [QextGuess * 1.5, QintGuess +200, f0Guess*2, magBackGuess * 2, 2*np.pi])
+
+[docs] def semiauto_fit(self, bias_currents, vna_freqs, vna_mags, vna_phases, popt,
+ debug = False,
+ smooth = False,
+ smooth_win = 11,
+ adaptive_window = False,
+ adapt_win_size = 300e6,
+ fourier_filter = False,
+ fourier_cutoff = 40,
+ pconv_tol = 10,
+ bounds = None):
+ res_freqs = np.zeros(np.size(np.unique(bias_currents)))
+ Qints = np.zeros(np.size(np.unique(bias_currents)))
+ Qexts = np.zeros(np.size(np.unique(bias_currents)))
+ magBacks = np.zeros(np.size(np.unique(bias_currents)))
+ popts = []
+ pconvs = []
+
+ init_f0 = popt[2]
+ init_Qint = popt[1]
+ init_Qext = popt[0]
+ init_magBack = popt[3]
+
+ for i, current in enumerate(np.sort(np.unique(bias_currents))):
+ first_condn = bias_currents == current
+ [first_trace_freqs, first_trace_phase, first_trace_mag] = [vna_freqs[first_condn]*2*np.pi, vna_phases[first_condn], 10**(vna_mags[first_condn]/20)]
+ if smooth:
+ first_trace_phase = savgol_filter(first_trace_phase, smooth_win, 3)
+ first_trace_mag = savgol_filter(first_trace_mag, smooth_win, 3)
+
+ imag = first_trace_mag * np.sin(first_trace_phase)
+ real = first_trace_mag * np.cos(first_trace_phase)
+ if fourier_filter == True:
+ if debug:
+ plt.figure(3)
+ plt.plot(first_trace_freqs, real)
+ plt.plot(first_trace_freqs, imag)
+ plt.title('before filter')
+ imag = idct(dct(imag)[fourier_cutoff:])
+ real = idct(dct(real)[fourier_cutoff:])
+ if debug:
+ plt.figure(4)
+ plt.plot(real)
+ plt.plot(imag)
+ plt.title('after filter')
+ print("Bias current: {current}")
+ print("Generator Power: {}")
+ if i >= 2:
+ if adaptive_window:
+ filt1 = first_trace_freqs<np.average(res_freqs[i-1:i])*2*np.pi+adapt_win_size*2*np.pi/2
+ filt2 = first_trace_freqs>np.average(res_freqs[i-1:i])*2*np.pi-adapt_win_size*2*np.pi/2
+ filt= filt1*filt2
+ else:
+ filt = np.ones(np.size(first_trace_freqs)).astype(bool)
+ #start averaging the previous fits for prior information to increase robustness to bad fits
+ f0Guess = np.average(res_freqs[i-1:i])*2*np.pi
+ magBackGuess = np.average(magBacks[i-1:i])
+ (QextGuess,QintGuess) = (np.average(Qexts[i-1:i]),np.average(Qints[i-1:i]))
+ else:
+ f0Guess = init_f0
+ magBackGuess = init_magBack
+ (QextGuess, QintGuess) = (init_Qext, init_Qint)
+ filt = np.ones(np.size(first_trace_freqs)).astype(bool)
+ if bounds == None:
+ bounds=self.default_bounds(QextGuess, QintGuess, f0Guess, magBackGuess)
+ if i>2:
+ prev_pconv = pconv
+ #fit(freq, real, imag, mag, phase, Qguess=(2e3, 1e3),real_only = 0, bounds = None)
+ popt, pconv = fit(first_trace_freqs[filt], real[filt], imag[filt], first_trace_mag, first_trace_phase, Qguess = (QextGuess,QintGuess), f0Guess = f0Guess, real_only = 0, bounds = bounds, magBackGuess = magBackGuess)
+ #catch a sudden change in convergence and try again until it's back in range:
+ if i>2:
+ pconv_diff_ratio = (np.array(pconv[0,0], pconv[1,1])-np.array(prev_pconv[0,0], prev_pconv[1,1]))/np.array(prev_pconv[0,0], prev_pconv[1,1])
+ if debug:
+ print(f"Pconv ratio: {pconv_diff_ratio}")
+ j = 0
+ alt_array = np.array([1e6,-1e6,5e6,-5e6, 10e6,-10e6,15e6,-15e6, 20e6, -20e6, 30e6, -30e6, 50e6, -50e6, 100e6, -100e6])*2*np.pi
+ while np.any(np.abs(pconv_diff_ratio)>pconv_tol):
+ if j>np.size(alt_array)-1:
+ raise Exception(f"No good fit at this point: (Bias: {current}, Power: {self.latest_power})")
+ print(f"sudden change in Q detected (pconv_diff_ratio: {pconv_diff_ratio}), trying resonant guess + {alt_array[j]/(2*np.pi)}")
+ #try above
+ if debug:
+ if j%2 ==0:
+ print("trying above")
+ else:
+ print("trying_below")
+ popt, pconv = fit(first_trace_freqs[filt], real[filt], imag[filt], first_trace_mag, first_trace_phase, Qguess = (QextGuess,QintGuess), f0Guess = f0Guess+alt_array[j], real_only = 0, bounds = bounds, magBackGuess = magBackGuess)
+
+ pconv_diff_ratio = (np.array(pconv[0,0], pconv[1,1])-np.array(prev_pconv[0,0], prev_pconv[1,1]))/np.array(prev_pconv[0,0], prev_pconv[1,1])
+ j+=1
+
+
+ if debug:
+ import time
+ plotRes(first_trace_freqs[filt], real[filt], imag[filt], first_trace_mag[filt], first_trace_phase[filt], popt)
+ time.sleep(1)
+
+ res_freqs[i] = popt[2]/(2*np.pi)
+ Qints[i] = popt[1]
+ Qexts[i] = popt[0]
+ magBacks[i] = popt[3]
+ popts.append(popt)
+ pconvs.append(pconv)
+
+ print(f'f (Hz): {np.round(popt[2]/2/np.pi, 3)}', )
+ fitting_params = list(inspect.signature(reflectionFunc).parameters.keys())[1:]
+ for i in range(2):
+ print(f'{fitting_params[i]}: {np.round(popt[i], 2)} +- {np.round(np.sqrt(pconv[i, i]), 3)}')
+ Qtot = popt[0] * popt[1] / (popt[0] + popt[1])
+ print('Q_tot: ', round(Qtot), '\nT1 (s):', round(Qtot/popt[2]), f"Kappa: {round(popt[2]/2/np.pi/Qtot)}", )
+
+ return np.unique(bias_currents), res_freqs, Qints, Qexts, magBacks, popts, pconvs
+
+[docs] def fit(self,
+ debug = False,
+ save_data = False,
+ max_gen_power = None,
+ savedata = False,
+ smooth = False,
+ smooth_win = 11,
+ adaptive_window = False,
+ adapt_win_size = 300e6,
+ bounds = None,
+ fourier_filter = False,
+ fourier_cutoff = 40,
+ pconv_tol = 10):
+
+ if max_gen_power != None:
+ fitted_gen_powers = np.unique(self.gen_powers) <= max_gen_power
+ else:
+ fitted_gen_powers = np.unique(self.gen_powers) <= np.max(np.unique(self.gen_powers))
+
+
+ for i, gen_power in enumerate(np.unique(self.gen_powers)[fitted_gen_powers]):
+ self.latest_power = gen_power
+ pow_condn = self.gen_powers == gen_power
+
+ bias_currents = self.currents[pow_condn]
+ vna_freqs = self.vna_freqs[pow_condn]
+ vna_phases = self.driven_vna_phase[pow_condn]
+ vna_mags = self.driven_vna_power[pow_condn]
+ print("Generator Power: {gen_power} dBm")
+ fit_currents, fit_freqs, fit_Qints, fit_Qexts, fit_magBacks, popts, pconvs = self.semiauto_fit(bias_currents,
+ vna_freqs/(2*np.pi),
+ vna_mags,
+ vna_phases,
+ self.initial_popt,
+ debug = debug,
+ smooth = smooth,
+ smooth_win = smooth_win,
+ adaptive_window = adaptive_window,
+ adapt_win_size = adapt_win_size,
+ fourier_filter = fourier_filter,
+ fourier_cutoff = fourier_cutoff,
+ pconv_tol = pconv_tol,
+ bounds = bounds)
+ if i == 0:
+ self.low_power_res_fit_func = interp1d(fit_currents, fit_freqs, 'linear')
+ if save_data:
+ self.save_fit(bias_currents, gen_power, popts, pconvs, self.low_power_res_fit_func)
+
+
+#%%
+
+# #Duffing Autoplot
+# #main(Duff_filepath, 'data')
+
+
+# FS_filepath = r'Z:/Data/SA_2X_B1/fluxsweep/fits/2021-07-22/2021-07-22_0024_SA_2X_B1/2021-07-22_0024_SA_2X_B1.ddh5'
+# Duff_filepath = r'Z:/Data/SA_2X_B1/duffing/2021-07-23/2021-07-23_0010_SA_2X_B1_duffing_fine/2021-07-23_0010_SA_2X_B1_duffing_fine.ddh5'
+# save_filepath = r'Z:\Data\SA_2X_B1\duffing\fits'
+
+# DFit = fit_Duff_Measurement(Duff_filepath, FS_filepath, save_filepath, 'SA_B1_Duff_fine')
+# #%%
+# DFit.initial_fit(8.0e9,
+# QextGuess = 50,
+# QintGuess = 1000,
+# magBackGuess = 0.01,
+# bounds = None,
+# smooth = False,
+# smooth_win = 11,
+# phaseOffGuess = 0,
+# debug = False,
+# adaptive_window = False,
+# adapt_win_size = 300e6
+# )
+# #%%
+# print(np.min(DFit.gen_powers))
+# #%%
+# DFit.fit(
+# debug = False,
+# save_data = True,
+# max_gen_power = -20,
+# savedata = True,
+# smooth = False,
+# smooth_win = 11,
+# adaptive_window = True,
+# adapt_win_size = 400e6,
+# fourier_filter = False,
+# fourier_cutoff = 40,
+# pconv_tol = 10)
+# #%%
+
+
+
+
+
+
+
+
+
+
+
+
+
+# -*- coding: utf-8 -*-
+"""
+Created on Mon Jan 18 16:59:08 2021
+
+@author: Hatlab_3
+"""
+from data_processing.ddh5_Plotting.utility_modules.FS_utility_functions import fit_fluxsweep
+from data_processing.Helper_Functions import find_all_ddh5
+from plottr.apps.autoplot import autoplotDDH5, script, main
+import numpy as np
+import matplotlib.pyplot as plt
+from scipy.signal import argrelextrema, savgol_filter
+from scipy.interpolate import interp1d
+
+[docs]def find_quanta(currents, res_freqs, show = True, smooth_window = 11, order = 2):
+ ext = argrelextrema(savgol_filter(res_freqs, smooth_window, 2), np.greater, order = order)[0]
+ if show:
+ plt.plot(currents, res_freqs)
+ for pt in ext:
+ plt.plot(currents[pt], res_freqs[pt], 'r*')
+ if np.size(ext) == 2:
+ quanta_size = np.abs(currents[ext[1]]-currents[ext[0]])
+ quanta_offset = min(currents[ext])
+ else:
+ raise Exception(f'Two extrema not found: {ext}')
+ current_to_quanta_conversion_function = lambda c: (c-quanta_offset)/quanta_size
+ quanta_to_current_function = lambda q: q*quanta_size+quanta_offset
+ return quanta_size, quanta_offset, current_to_quanta_conversion_function, quanta_to_current_function
+if __name__ == '__main__':
+
+#adapting an old file to a new file
+ #%%
+ datadir = r'Z:/Data/SA_2X_B1/fluxsweep/2021-07-09/2021-07-09_0001_B1_FS1/2021-07-09_0001_B1_FS1.ddh5'
+ savedir = r'Z:/Data/SA_2X_B1/fluxsweep/fits'
+ # datadir = r'E:\Data\Cooldown_20210104\fluxsweep\2021-01-04_0003_Recentering_FS.ddh5'
+ # savedir = r'E:\Data\Cooldown_20210104\fluxsweep'
+
+ FS = fit_fluxsweep(datadir, savedir, 'SA_2X_B1')
+ #%%
+ FS.initial_fit(8.25e9, QextGuess = 1e2, QintGuess=20e4, magBackGuess = 0.01, phaseOffGuess = 0, debug = False, smooth = False, smooth_win = 15, adaptive_window = False, adapt_win_size = 100e6)
+ #%% Automatic Fitting (be sure initial fit is good!)
+ currents, res_freqs, Qints, Qexts, magBacks = FS.semiauto_fit(FS.currents, FS.vna_freqs/(2*np.pi), FS.undriven_vna_power, FS.undriven_vna_phase, FS.initial_popt, debug = False, savedata = True, smooth = False, smooth_win = 5, adaptive_window = True, adapt_win_size = 300e6, fourier_filter = False, pconv_tol = 7)
+ #%%reloading an old file
+ #%%plotting the resonant frequency
+ fig = plt.figure(0)
+ ax = fig.add_subplot(111)
+ ax.plot(currents*1000, res_freqs/1e6)
+ ax.set_xlabel('Bias Currents (mA)')
+ ax.set_ylabel('Resonant Frequencies (MHz)')
+ ax.title.set_text('ChemPot Resonant Frequency vs. Bias Current')
+ #%%Finding and plotting flux quanta and flux variables, interpolating resonance frequencies to generate resonance functions wrt bias current and flux
+ quanta_size, quanta_offset, conv_func, conv_func_inverse = find_quanta(currents, res_freqs, show = False, smooth_window = 221)
+ res_func = interp1d(currents, res_freqs, 'linear')
+ print(f"Quanta size: {quanta_size}\nQuanta_offset: {quanta_offset}")
+ filt = (conv_func(currents)<0)*(conv_func(currents)>-0.52)
+ plt.plot(conv_func(currents)[filt], res_freqs[filt])
+ plt.figure(2)
+ plt.plot(currents, res_freqs, label = 'fitted data')
+ plt.plot(currents, res_func(currents), label = 'quadratic interpolation')
+ plt.legend()
+ plt.figure(3)
+ #%%
+ plt.plot(currents, res_func1(currents)-savgol_filter(res_func(currents), 21, 2))
+
+# -*- coding: utf-8 -*-
+"""
+Created on Thu May 13 13:43:45 2021
+
+@author: Hatlab_3
+"""
+import numpy as np
+import matplotlib.pyplot as plt
+import sympy as sp
+from data_processing.models.SNAIL_supporting_modules.Participation_and_Alpha_Fitter import slider_fit
+from data_processing.fitting.QFit import fit, plotRes
+from scipy.optimize import fsolve
+from scipy.interpolate import interp1d
+from timeit import default_timer as timer
+from data_processing.Helper_Functions import find_all_ddh5
+from data_processing.ddh5_Plotting.TACO_multiplot_b1 import superTACO_Bars
+import pandas as pd
+from scipy.optimize import curve_fit
+from scipy.signal import savgol_filter
+
+
+[docs]def find_quanta(currents, res_freqs, smooth_window = 0):
+ if smooth_window != 0:
+ res_freqs = savgol_filter(res_freqs, smooth_window, 2)
+ max_res_current = currents[np.argmax(res_freqs)]
+ min_res_current = currents[np.argmin(res_freqs)]
+ quanta_size = 2*np.abs(min_res_current - max_res_current)
+ quanta_offset = max_res_current
+ current_to_quanta_conversion_function = lambda c: (c-quanta_offset)/quanta_size
+ quanta_to_current_function = lambda q: q*quanta_size+quanta_offset
+
+ return quanta_size, quanta_offset, current_to_quanta_conversion_function, quanta_to_current_function
+
+
+
+[docs]def get_phi_min_funcs(alpha, phi_ext_arr):
+ a, Ej, phi_s, phi_e, phi_m = sp.symbols('alpha,E_j,phi_s,phi_e, phi_min')
+ U_snail_norm = -a*sp.cos(phi_s) - 3*sp.cos((phi_e-phi_s)/3)
+ c1 = sp.series(U_snail_norm, phi_s, x0 = phi_m, n = 2).removeO().coeff((phi_s-phi_m))
+ #generate a lambda function that outputs another lambda function for a given phi_ext
+ #which then depends on phi_m only
+ func_arr = []
+ for phi_ext in phi_ext_arr:
+ c1_num = sp.lambdify(phi_m, c1.subs(a, alpha).subs(phi_e, phi_ext), "numpy")
+ func_arr.append(c1_num)
+ return func_arr
+[docs]def get_phi_min_fsolve(alpha, phi_ext_arr):
+ funcs = get_phi_min_funcs(alpha, phi_ext_arr)
+ sol_arr = np.ones(np.size(funcs))
+ for i, func in enumerate(funcs):
+ sol_arr[i] = fsolve(func, phi_ext_arr[i])
+ return sol_arr
+[docs]def get_phi_min(alpha, phi_ext):
+ func = get_phi_min_funcs(alpha, [phi_ext])[0]
+ return(fsolve(func, phi_ext)[0])
+
+[docs]def c4_func_gen_vectorize(alpha_val): #can be fed an array
+ a, Ej, phi_s, phi_e, phi_m = sp.symbols('alpha,E_j,phi_s,phi_e, phi_min')
+ U_snail = (-a*sp.cos(phi_s) - 3*sp.cos((phi_e-phi_s)/3))
+ expansion = sp.series(U_snail, phi_s, x0 = phi_m, n = 5)
+ coeff = expansion.removeO().coeff(sp.Pow(phi_s-phi_m, 4))*24
+ c4exp = lambda phi_ext: coeff.subs([(a, alpha_val), (phi_e, phi_ext), (phi_m, get_phi_min(alpha_val, phi_ext))])
+ return np.vectorize(c4exp)
+
+[docs]def c3_func_gen_vectorize(alpha_val): #can be fed an array
+ a, Ej, phi_s, phi_e, phi_m = sp.symbols('alpha,E_j,phi_s,phi_e, phi_min')
+ U_snail = (-a*sp.cos(phi_s) - 3*sp.cos((phi_e-phi_s)/3))
+ expansion = sp.series(U_snail, phi_s, x0 = phi_m, n = 4)
+ coeff = expansion.removeO().coeff(sp.Pow(phi_s-phi_m, 3))*6
+ c3exp = lambda phi_ext: coeff.subs([(a, alpha_val), (phi_e, phi_ext), (phi_m, get_phi_min(alpha_val, phi_ext))])
+ return np.vectorize(c3exp)
+
+[docs]def c2_func_gen_vectorize(alpha_val):
+ a, Ej, phi_s, phi_e, phi_m = sp.symbols('alpha,E_j,phi_s,phi_e, phi_min')
+ U_snail = (-a*sp.cos(phi_s) - 3*sp.cos((phi_e-phi_s)/3))
+ expansion = sp.series(U_snail, phi_s, x0 = phi_m, n = 3)
+ coeff = expansion.removeO().coeff(sp.Pow(phi_s-phi_m, 2))*2
+ c2exp = lambda phi_ext: coeff.subs([(a, alpha_val), (phi_e, phi_ext), (phi_m, get_phi_min(alpha_val, phi_ext))])
+ return np.vectorize(c2exp)
+
+
+[docs]class SnailAmp():
+ def __init__(self): #uA/um^2
+ '''
+ Parameters
+ ----------
+ junction_sizes : tuple
+ (small_size, large_size) in micrometers squared
+ quanta_start : float
+ 0-flux point in Amps
+ quanta_size : float
+ quanta ize in Amps
+
+ Returns
+ -------
+ None.
+ '''
+
+ self.hbar = 1.0545718e-34
+ self.e = 1.60218e-19
+ self.phi0 = 2*np.pi*self.hbar/(2*self.e)
+
+[docs] def generate_quanta_function(self, quanta_offset, quanta_size):
+ #function for converting bias currents to quanta fractions
+ self.quanta_offset = quanta_offset
+ self.quanta_size = quanta_size
+ self.conv_func = lambda c: (c-quanta_offset)/quanta_size
+
+[docs] def info_from_junction_sizes(self, junction_sizes, res = 100, Jc = 0.8, verbose = False):
+
+ self.s_size, self.l_size = junction_sizes
+
+ self.alpha_from_sizes = self.s_size/self.l_size
+ self.I0s, self.I0l = Jc*self.s_size*1e-6, Jc*self.l_size*1e-6
+
+ self.Lss, self.Lsl = self.Ic_to_Lj(self.I0s), self.Ic_to_Lj(self.I0l)
+ self.Ejs, self.Ejl = self.Ic_to_Ej(self.I0s), self.Ic_to_Ej(self.I0l)
+
+ self.Ls0 = parallel(self.Lss, self.Lsl)
+
+ self.c2_func, self.c3_func, self.c4_func = self.generate_coefficient_functions(self.alpha_from_sizes, res = res, verbose = False)
+
+ return self.c2_func, self.c3_func, self.c4_func
+
+[docs] def info_from_junction_i0(self, junction_i0_small, junction_i0_large, res = 100, Jc = 0.8, verbose = False):
+ '''
+ junction_i0_small: junction critical current in A
+ junction_i0_large: junction critical current in A
+ '''
+
+ self.I0s, self.I0l = junction_i0_small, junction_i0_large
+
+ self.Lss, self.Lsl = self.Ic_to_Lj(self.I0s), self.Ic_to_Lj(self.I0l)
+ self.Ejs, self.Ejl = self.Ic_to_Ej(self.I0s), self.Ic_to_Ej(self.I0l)
+
+ self.alpha_from_i0 = self.Ejs/self.Ejl
+
+ self.c2_func, self.c3_func, self.c4_func = self.generate_coefficient_functions(self.alpha_from_i0, res = res, verbose = False)
+
+ return self.c2_func, self.c3_func, self.c4_func
+
+[docs] def Ic_to_Ej(self, Ic: float):
+ '''
+ Parameters
+ ----------
+ Ic : float
+ critical current in amps
+ Returns
+ -------
+ Ej in Joules
+ src: https://en.wikipedia.org/wiki/Josephson_effect
+ '''
+ return Ic*self.phi0/(2*np.pi)
+
+[docs] def Ic_to_Lj(self, Ic: float):
+ '''
+ Parameters
+ ----------
+ Ic : float
+ critical current in amps
+ Returns
+ -------
+ Lj in Henries
+ src: https://en.wikipedia.org/wiki/Josephson_effect
+ '''
+ return self.phi0/(2*np.pi*Ic)
+
+[docs] def generate_coefficient_functions(self, alpha_val, res = int(100), plot = False, show_coefficients = False, verbose = False):
+ '''
+ Parameters
+ ----------
+ alpha_val : float
+ alpha value between 0 and 0.33
+ res : int, optional
+ number of points to base interpolation off of. The default is 100.
+ Returns
+ -------
+ c2_func : lambda function
+ function that will return the value of c2
+ c3_func : lambda function
+ DESCRIPTION.
+ c4_func : lambda function
+ DESCRIPTION.
+
+ '''
+ if verbose:
+ print("Calculating expansion coefficients")
+ start_time = timer()
+
+ phi_ext_arr = np.linspace(0,2*np.pi, res)
+ c4_arr = c4_func_gen_vectorize(alpha_val)(phi_ext_arr)
+ end_time = timer()
+ if verbose:
+ print(f"Elapsed time: {np.round(end_time-start_time, 2)} seconds")
+ c4_func = interp1d(phi_ext_arr, c4_arr, 'quadratic')
+
+
+ #c3:
+ start_time = timer()
+ phi_ext_arr = np.linspace(0,2*np.pi, res)
+ c3_arr = c3_func_gen_vectorize(alpha_val)(phi_ext_arr)
+ end_time = timer()
+ if verbose:
+ print(f"Elapsed time: {np.round(end_time-start_time, 2)} seconds")
+ c3_func = interp1d(phi_ext_arr, c3_arr, 'quadratic')
+
+
+ #c2:
+ start_time = timer()
+ phi_ext_arr = np.linspace(0,2*np.pi, res)
+ c2_arr = c2_func_gen_vectorize(alpha_val)(phi_ext_arr)
+ end_time = timer()
+ if verbose:
+ print(f"Elapsed time: {np.round(end_time-start_time, 2)} seconds")
+ c2_func = interp1d(phi_ext_arr, c2_arr, 'quadratic')
+
+ if plot:
+ plt.plot(phi_ext_arr, self.c2_func(phi_ext_arr), label = "c2")
+ plt.plot(phi_ext_arr, self.c3_func(phi_ext_arr), label = "c3")
+ plt.plot(phi_ext_arr, self.c4_func(phi_ext_arr), label = 'c4')
+
+ plt.legend()
+
+ return c2_func, c3_func, c4_func
+
+[docs] def gradient_descent_participation_fitter(self, fitted_res_func, initial_p_guess, initial_alpha_guess, init_f0_guess, res = 100, bounds = None):
+
+ '''
+ Parameters
+ ----------
+ fitted_res_func : function:ndarray->ndarray
+ function which takes in flux fraction in [0, 1] and produces the resonant frequency of the experimental device
+
+ initial_p_guess: float
+ guess for the participation ratio of the SNAIL at 0-flux
+
+ initial_alpha_guess: float
+ guess for the ratio of large junction inductance to small junciton inductance of the SNAIL
+
+ kwargs:
+ res - the number of points with which to do the fitting. Fewer is faster, more is better
+ Returns
+ -------
+ fitted alpha
+ fitted p
+ '''
+ fit_quanta = np.sort(np.append(np.append(np.linspace(0,1, int(res/4)), np.linspace(0.25,0.75, int(res/4))),np.linspace(0.45,0.55, int(res/2))))*2*np.pi
+
+ def fit_func(quanta_arr, alpha, p_rat, f0):
+
+ print(f"P: {p_rat}, alpha: {alpha}, f0: {f0}")
+ #make the c2 we need from the supplied alpha
+ c2_func = c2_func_gen_vectorize(alpha)
+ res_freqs = f0/(np.sqrt(1+p_rat/c2_func(quanta_arr).astype(float)))
+
+ return res_freqs
+
+ #fit the data
+ if bounds ==None:
+ bounds = [[0.1, 0.001, fitted_res_func(0)*0.7],
+ [0.33, 1, fitted_res_func(0)*1.3]]
+
+ popt, pcov = curve_fit(fit_func, fit_quanta, fitted_res_func(fit_quanta), p0 = [initial_alpha_guess, initial_p_guess, init_f0_guess],
+ bounds = bounds)
+
+ [fitted_alpha, fitted_p, fitted_f0] = popt
+ [d_alpha, d_p, d_f0] = [np.sqrt(pcov[0,0]), np.sqrt(pcov[1,1]), np.sqrt(pcov[2,2])]
+ return fit_func, [fitted_alpha, fitted_p, fitted_f0], [d_alpha, d_p, d_f0]
+
+
+
+[docs] def slider_participation_fitter(self, stored_fits_filepath: str, fluxsweep_filepath: str, ret_sliders = False, start_freq = 7e9):
+ '''
+ Parameters
+ ----------
+ stored_fits_filepath : str
+ path to a pickled fit file
+ fluxsweep_filepath : str
+ path to a fluxsweep stored in plottr's datadict format'
+
+ Returns
+ -------
+ 4x matplotlib.widgets.slider objects, call slider.val to get value
+
+ '''
+ self.p_arr = np.linspace(0.01, 0.3, 50)
+ self.alpha_arr = np.linspace(0.1, 0.32, 50)
+ #the below function returns the slider fit, which you then have to call .val on
+ self.p_slider, self.a_slider, self.f_slider = slider_fit(fluxsweep_filepath,
+ stored_fits_filepath,
+ self.quanta_offset,
+ self.quanta_size,
+ self.p_arr,
+ self.alpha_arr,
+ start_freq = start_freq)
+ if ret_sliders:
+ return self.p_arr, self.alpha_arr, self.p_slider, self.a_slider, self.f_slider
+ else:
+ pass
+
+[docs] def vals_from_sliders(self):
+ '''
+ A supporting function to slider_participation_fitter for extracting
+ the alpha and p values after the sliders have been used to fit
+
+ '''
+ self.alpha_from_FS = self.alpha_arr[self.a_slider.val]
+ self.p_from_FS = self.p_arr[self.p_slider.val]
+
+ return self.alpha_from_FS, self.p_from_FS, self.f_slider.val
+
+
+
+
+
+
+[docs] def generate_participation_function(self, L0, Lfunc):
+ return lambda phi: Lfunc(phi)/(L0+Lfunc(phi))
+
+[docs] def generate_inductance_function(self, L_large, c2_func):
+ return lambda phi: L_large/c2_func(phi)
+
+[docs] def generate_resonance_function_via_LC(self, L0, C0, Ls_func):
+ return lambda phi: 1/np.sqrt((L0+Ls_func(phi))*C0)
+
+[docs] def generate_resonance_function_via_fit(self, p, f0, c2_func):
+ return lambda phi: 2*np.pi*f0/(np.sqrt(1+(p/(1-p))/c2_func(phi)))
+
+[docs] def generate_gsss_function(self, C0, p_func, res_func, c2_func, c3_func):
+ '''
+ source: https://arxiv.org/pdf/1806.06093.pdf
+ (The frattini paper)
+ calculates the g3 wrt flux given linear capacitance, participation ratio, and alpha
+ return value is in Joules
+ '''
+ #calculate Ec
+ Ec = self.e**2/(2*C0)
+ return lambda phi: 1/6*p_func(phi)**2*c3_func(phi)/c2_func(phi)*np.sqrt(Ec*self.hbar*res_func(phi))
+
+[docs] def collect_TACO_data(self, gain_folder, plot = False, tla_pump = 0):
+ gain_cwd = gain_folder
+ res = find_all_ddh5(gain_cwd)
+ info_dict, bias_currents, best_gen_freqs, best_gen_powers, gains = superTACO_Bars(res, angles = [60,20], quanta_size = self.quanta_size, quanta_offset = self.quanta_offset, bardims = [0.001, 0.7], barbase = -24, plot = False)
+
+ if plot:
+ fig2 = plt.figure(2)
+ ax = fig2.add_subplot(131)
+ ax.plot(self.conv_func(bias_currents), np.array(best_gen_powers)-tla_pump, 'b.', markersize = 15)
+ ax.set_title(r'Lowest 20dB Power (dBm) vs. Flux ($\Phi_0$)')
+ ax.set_xlabel('Flux Quanta ($\Phi/\Phi_0)$')
+ ax.set_ylabel('Generator Power @20dB Gain (dBm)')
+ ax.grid()
+
+ return bias_currents, best_gen_freqs, best_gen_powers-tla_pump, gains
+
+[docs] def g3_from_pump_power(self,
+ dBgains: np.ndarray,
+ pump_powers: np.ndarray,
+ mode_kappas: np.ndarray,
+ pump_omegas: np.ndarray,
+ pump_detunings_from_res: np.ndarray
+ ):
+ '''
+ Source for calculation: https://arxiv.org/abs/1605.00539
+ "Introduction to Quantum-limited Parametric Amplification of Quantum Signals with Josephson Circuits"
+ by Michelle Devoret and Ananda Roy
+
+ Parameters
+ ----------
+ gains : np.ndarray
+ gain in dB, whose positions correspond to the powers given in the pump_powers section
+ pump_powers : np.ndarray
+ pump power in dBm that the amplifier sees. This must include all attenuation in the entire line
+ mode_kappas : np.ndarray
+ mode kappa in 2pi*Hz
+ pump_omegas : np.ndarray
+ pump frequency in 2pi*Hz
+ pump_detunings_from_res : np.ndarray
+ pump detuning in 2pi(f-f0) where f0 is the resonator frequency in hz
+
+ Returns
+ -------
+ numPumpPhotons : np.ndarray
+ The sqrt of the number of pump photons expected in the pumping resonator.
+ g3_arr : np.ndarray
+ The third order coupling in Hz for each combination of inputs
+ '''
+
+ lin_pump_powers = np.power(10,pump_powers/10)*0.001 #pump power in watts
+ #get the expected value of pump photons present in the resonator
+ npTZC_arr = []
+ numPumpPhotonsTZC = lin_pump_powers/(pump_omegas*self.hbar)*(np.sqrt(mode_kappas)/(mode_kappas/2-1j*(pump_detunings_from_res)))**2
+ for val in numPumpPhotonsTZC:
+ npTZC_arr.append(np.linalg.norm(val))
+ numPumpPhotons = np.array(npTZC_arr)
+ numPumpPhotonsDev = np.sqrt(8*mode_kappas*lin_pump_powers/(pump_omegas*self.hbar))/np.absolute(mode_kappas-2j*pump_detunings_from_res)
+ Lin_Power_gains = np.power(10,dBgains/20)
+ lpg = Lin_Power_gains
+
+ g3_arr = -0.5*(mode_kappas/numPumpPhotons)*np.sqrt((np.sqrt(lpg)-1)/(np.sqrt(lpg)+1))
+
+
+ return numPumpPhotonsDev, g3_arr, numPumpPhotons
+
+[docs] def process_ref_HFSS_sweep(self, HFSS_filepath, ref_port_name = 'B', lumped_port_name = 'sl', ind_name = 'Ls', trans_port_name = 'U'):
+ data = pd.read_csv(HFSS_filepath)
+ HFSS_dicts = []
+ for inductance in np.unique(data[f'{ind_name} [pH]'].to_numpy()):
+ filt = (data[f'{ind_name} [pH]'].to_numpy() == inductance)
+ HFSS_dicts.append(dict(
+ SNAIL_inductance = inductance,
+ freq = data['Freq [GHz]'].to_numpy()[filt]*1e9,
+ freqrad = data['Freq [GHz]'].to_numpy()[filt]*1e9*2*np.pi, #fitter takes rad*hz
+ mag = data[f'mag(S({ref_port_name},{ref_port_name})) []'].to_numpy()[filt],
+ phase = data[f'cang_deg_val(S({ref_port_name},{ref_port_name})) []'].to_numpy()[filt],
+ phaserad = data[f'cang_deg_val(S({ref_port_name},{ref_port_name})) []'].to_numpy()[filt]*2*np.pi/360,
+ dBmag = np.power(10, data[f'mag(S({ref_port_name},{ref_port_name})) []'].to_numpy()[filt]/20),
+ real = data[f'mag(S({ref_port_name},{ref_port_name})) []'].to_numpy()[filt]*np.cos(data[f'cang_deg_val(S({ref_port_name},{ref_port_name})) []'].to_numpy()[filt]*2*np.pi/360),
+ imag = data[f'mag(S({ref_port_name},{ref_port_name})) []'].to_numpy()[filt]*np.sin(data[f'cang_deg_val(S({ref_port_name},{ref_port_name})) []'].to_numpy()[filt]*2*np.pi/360),
+ imY = data[f'im(Y({lumped_port_name},{lumped_port_name})) []'].to_numpy()[filt],
+ leakage = data[f'dB(S({ref_port_name},{trans_port_name})) []'].to_numpy()[filt]
+ ))
+
+ return HFSS_dicts
+
+[docs] def fit_modes(self, *args, bounds = None, f0Guess_arr = None, Qguess = (1e2, 1e4), window_size = 600e6, plot = False):
+ QextGuess, QintGuess = Qguess
+ magBackGuess = 1
+
+ HFSS_inductances, HFSS_res_freqs, HFSS_kappas = [], [], []
+
+ for i, md in enumerate(args):
+ # print(type(f0Guess_arr))
+ if type(f0Guess_arr) == np.ndarray:
+ f0Guess_arr = np.copy(f0Guess_arr)
+ filt = (md['freqrad']>f0Guess_arr[i]-window_size/2)*(md['freqrad']<f0Guess_arr[i]+window_size/2)
+ f0Guess = f0Guess_arr[i]
+ else:
+ filt = np.ones(np.size(md['freqrad'])).astype(bool)
+ # print(np.diff(md['phaserad']))
+ # plt.plot(md['freq'][:-1]/1e9, np.diff(md['phaserad']))
+ f0Guess = md['freq'][np.argmin(savgol_filter(np.gradient(md['phaserad']), 15,3))]*2*np.pi
+ filt = (md['freqrad']>f0Guess-window_size/2)*(md['freqrad']<f0Guess+window_size/2)
+
+ if bounds == None:
+ bounds = ([QextGuess / 10, QintGuess /10, f0Guess-500e6, magBackGuess / 2, 0],
+ [QextGuess * 10, QintGuess * 10, f0Guess+500e6, magBackGuess * 2, np.pi])
+
+ popt, pcov = fit(md['freqrad'][filt], md['real'][filt], md['imag'][filt], md['mag'][filt], md['phaserad'][filt], Qguess = Qguess, f0Guess = f0Guess, phaseGuess = 0)
+ if plot:
+ # print("inductance: ", md['SNAIL_inductance'])
+ plotRes(md['freqrad'][filt], md['real'][filt], md['imag'][filt], md['mag'][filt], md['phaserad'][filt], popt)
+
+ Qtot = popt[0] * popt[1] / (popt[0] + popt[1])
+ kappa = popt[2]/2/np.pi/Qtot
+ f0 = popt[2]/(2*np.pi)
+ inductance = md['SNAIL_inductance']
+
+ HFSS_inductances.append(inductance)
+ HFSS_res_freqs.append(f0)
+ HFSS_kappas.append(kappa)
+
+ md['res_freq_rad'] = f0*2*np.pi
+ md['kappa'] = kappa
+
+ return HFSS_inductances, HFSS_res_freqs, HFSS_kappas
+
+[docs] def g3_from_admittance(self, Ej_large, c3_val, mds):
+ phi_zpf_arr = []
+ g3 = Ej_large*c3_val/6*(2*np.pi/self.phi0)**3
+ for md in mds:
+ res_omega = md['res_freq_rad']
+ # print("res_omega/2pi", res_omega/2/np.pi)
+ omegas = md['freqrad']
+ imY = md['imY']
+
+ f_res_loc = np.argmin(np.abs(omegas-res_omega))
+ slope = np.gradient(imY)[f_res_loc]/np.gradient(omegas)[f_res_loc]
+ Zpeff = 2/(res_omega*slope)
+
+ # print("omega/2pi: ", res_omega/2/np.pi)
+ # print("slope: ", slope)
+ # print("Impedance: ", Zpeff)
+ g3 *= np.sqrt(self.hbar/2*Zpeff)
+ phi_zpf_arr.append(Zpeff)
+
+ return g3
+
+[docs] def g3_from_admittance_raw(self, Ej_large, c3_val, res_omega):
+ phi_zpf_arr = []
+ g3 = Ej_large*c3_val/6*(2*np.pi/self.phi0)**3
+ for res_omega in res_omegas:
+ res_omega = md['res_freq_rad']
+ omegas = md['freqrad']
+ imY = md['imY']
+
+ f_res_loc = np.argmin(np.abs(omegas-res_omega))
+ slope = np.gradient(imY)[f_res_loc]/np.gradient(omegas)[f_res_loc]
+ Zpeff = 2/(res_omega*slope)
+
+ # print("omega/2pi: ", res_omega/2/np.pi)
+ # print("slope: ", slope)
+ # print("Impedance: ", Zpeff)
+ g3 *= np.sqrt(self.hbar/2*Zpeff)
+ phi_zpf_arr.append(Zpeff)
+
+ return g3
+
+
+if __name__ == '__main__':
+ SA = SnailAmp()
+ HFSS_filepath = r'D:\HFSS_Sims\SA_2X\mode_s.csv'
+ HFSS_dicts = SA.process_HFSS_sweep(HFSS_filepath)
+ #fit all of them, try to choose a guess frequency and Q's that cooperate with all of them
+ HFSS_inductances, HFSS_res_freqs, HFSS_kappas = SA.fit_modes(*HFSS_dicts,
+ Qguess = (5e1,1e3),
+ window_size = 100e6,
+ plot = True,
+ f0Guess_arr = None)
+ HFSS_inductances = np.array(HFSS_inductances)
+ HFSS_kappas = np.array(HFSS_kappas)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# -*- coding: utf-8 -*-
+"""
+Created on Thu May 13 13:43:45 2021
+
+@author: Hatlab_3
+"""
+import numpy as np
+import matplotlib.pyplot as plt
+import sympy as sp
+from data_processing.models.SNAIL_supporting_modules.Participation_and_Alpha_Fitter import slider_fit
+from data_processing.fitting.QFit import fit, plotRes
+from scipy.optimize import fsolve
+from scipy.interpolate import interp1d
+from timeit import default_timer as timer
+from data_processing.Helper_Functions import find_all_ddh5
+from data_processing.ddh5_Plotting.TACO_multiplot_b1 import superTACO_Bars
+import pandas as pd
+from scipy.optimize import curve_fit
+
+
+
+[docs]def get_phi_min_funcs(alpha, phi_ext_arr):
+ a, Ej, phi_s, phi_e, phi_m = sp.symbols('alpha,E_j,phi_s,phi_e, phi_min')
+ U_snail_norm = -a*sp.cos(phi_s) - 3*sp.cos((phi_e-phi_s)/3)
+ c1 = sp.series(U_snail_norm, phi_s, x0 = phi_m, n = 2).removeO().coeff((phi_s-phi_m))
+ #generate a lambda function that outputs another lambda function for a given phi_ext
+ #which then depends on phi_m only
+ func_arr = []
+ for phi_ext in phi_ext_arr:
+ c1_num = sp.lambdify(phi_m, c1.subs(a, alpha).subs(phi_e, phi_ext), "numpy")
+ func_arr.append(c1_num)
+ return func_arr
+[docs]def get_phi_min_fsolve(alpha, phi_ext_arr):
+ funcs = get_phi_min_funcs(alpha, phi_ext_arr)
+ sol_arr = np.ones(np.size(funcs))
+ for i, func in enumerate(funcs):
+ sol_arr[i] = fsolve(func, phi_ext_arr[i])
+ return sol_arr
+[docs]def get_phi_min(alpha, phi_ext):
+ func = get_phi_min_funcs(alpha, [phi_ext])[0]
+ return(fsolve(func, phi_ext)[0])
+
+[docs]def c4_func_gen_vectorize(alpha_val): #can be fed an array
+ a, Ej, phi_s, phi_e, phi_m = sp.symbols('alpha,E_j,phi_s,phi_e, phi_min')
+ U_snail = (-a*sp.cos(phi_s) - 3*sp.cos((phi_e-phi_s)/3))
+ expansion = sp.series(U_snail, phi_s, x0 = phi_m, n = 5)
+ coeff = expansion.removeO().coeff(sp.Pow(phi_s-phi_m, 4))
+ c4exp = lambda phi_ext: coeff.subs([(a, alpha_val), (phi_e, phi_ext), (phi_m, get_phi_min(alpha_val, phi_ext))])
+ return np.vectorize(c4exp)
+
+[docs]def c3_func_gen_vectorize(alpha_val): #can be fed an array
+ a, Ej, phi_s, phi_e, phi_m = sp.symbols('alpha,E_j,phi_s,phi_e, phi_min')
+ U_snail = (-a*sp.cos(phi_s) - 3*sp.cos((phi_e-phi_s)/3))
+ expansion = sp.series(U_snail, phi_s, x0 = phi_m, n = 4)
+ coeff = expansion.removeO().coeff(sp.Pow(phi_s-phi_m, 3))
+ c3exp = lambda phi_ext: coeff.subs([(a, alpha_val), (phi_e, phi_ext), (phi_m, get_phi_min(alpha_val, phi_ext))])
+ return np.vectorize(c3exp)
+
+[docs]def c2_func_gen_vectorize(alpha_val):
+ a, Ej, phi_s, phi_e, phi_m = sp.symbols('alpha,E_j,phi_s,phi_e, phi_min')
+ U_snail = (-a*sp.cos(phi_s) - 3*sp.cos((phi_e-phi_s)/3))
+ expansion = sp.series(U_snail, phi_s, x0 = phi_m, n = 3)
+ coeff = expansion.removeO().coeff(sp.Pow(phi_s-phi_m, 2))
+ c2exp = lambda phi_ext: coeff.subs([(a, alpha_val), (phi_e, phi_ext), (phi_m, get_phi_min(alpha_val, phi_ext))])
+ return np.vectorize(c2exp)
+
+
+[docs]class SquidAmp():
+ def __init__(self): #uA/um^2
+ '''
+ Parameters
+ ----------
+ junction_sizes : tuple
+ (small_size, large_size) in micrometers squared
+ quanta_start : float
+ 0-flux point in Amps
+ quanta_size : float
+ quanta ize in Amps
+
+ Returns
+ -------
+ None.
+ '''
+
+ self.hbar = 1.0545718e-34
+ self.e = 1.60218e-19
+ self.phi0 = 2*np.pi*self.hbar/(2*self.e)
+
+[docs] def generate_quanta_function(self, quanta_offset, quanta_size):
+ #function for converting bias currents to quanta fractions
+ self.quanta_offset = quanta_offset
+ self.quanta_size = quanta_size
+ self.conv_func = lambda c: (c-quanta_offset)/quanta_size
+
+[docs] def info_from_junction_sizes(self, junction_sizes, res = 100, Jc = 0.8, verbose = False):
+ self.s_size, self.l_size = junction_sizes
+
+
+ self.alpha_from_sizes = self.s_size/self.l_size
+ self.I0s, self.I0l = Jc*self.s_size*1e-6, Jc*self.l_size*1e-6
+
+ self.Lss, self.Lsl = self.Ic_to_Lj(self.I0s), self.Ic_to_Lj(self.I0l)
+ self.Ejs, self.Ejl = self.Ic_to_Ej(self.I0s), self.Ic_to_Ej(self.I0l)
+
+ self.Ls0 = parallel(self.Lss, self.Lsl)
+
+ self.c2_func, self.c3_func, self.c4_func = self.generate_coefficient_functions(self.alpha_from_sizes, res = res, verbose = False)
+
+ return self.c2_func, self.c3_func, self.c4_func
+
+
+[docs] def Ic_to_Ej(self, Ic: float):
+ '''
+ Parameters
+ ----------
+ Ic : float
+ critical current in amps
+ Returns
+ -------
+ Ej in Joules
+ src: https://en.wikipedia.org/wiki/Josephson_effect
+ '''
+ return Ic*self.phi0/(2*np.pi)
+
+[docs] def Ic_to_Lj(self, Ic: float):
+ '''
+ Parameters
+ ----------
+ Ic : float
+ critical current in amps
+ Returns
+ -------
+ Lj in Henries
+ src: https://en.wikipedia.org/wiki/Josephson_effect
+ '''
+ return self.phi0/(2*np.pi*Ic)
+
+[docs] def gradient_descent_participation_fitter(self, fitted_res_func, initial_p_guess, initial_alpha_guess, init_f0_guess, res = 100, bounds = None):
+
+ '''
+ Parameters
+ ----------
+ fitted_res_func : function:ndarray->ndarray
+ function which takes in flux fraction in [0, 1] and produces the resonant frequency of the experimental device
+
+ initial_p_guess: float
+ guess for the participation ratio of the SNAIL at 0-flux
+
+ initial_alpha_guess: float
+ guess for the ratio of large junction inductance to small junciton inductance of the SNAIL
+
+ kwargs:
+ res - the number of points with which to do the fitting. Fewer is faster, more is better
+ Returns
+ -------
+ fitted alpha
+ fitted p
+ '''
+ fit_quanta = np.sort(np.append(np.append(np.linspace(0,1, int(res/4)), np.linspace(0.25,0.75, int(res/4))),np.linspace(0.45,0.55, int(res/2))))*2*np.pi
+
+ def fit_func(quanta_arr, alpha, p_rat, f0):
+
+ print(f"P: {p_rat}, alpha: {alpha}, f0: {f0}")
+ #make the c2 we need from the supplied alpha
+ c2_func = c2_func_gen_vectorize(alpha)
+ res_freqs = f0/(np.sqrt(1+p_rat/c2_func(quanta_arr).astype(float)))
+
+ return res_freqs
+
+ #fit the data
+ if bounds ==None:
+ bounds = [[0.1, 0.001, fitted_res_func(0)*0.7],
+ [0.33, 1, fitted_res_func(0)*1.3]]
+
+ popt, pcov = curve_fit(fit_func, fit_quanta, fitted_res_func(fit_quanta), p0 = [initial_alpha_guess, initial_p_guess, init_f0_guess],
+ bounds = bounds)
+
+ [fitted_alpha, fitted_p, fitted_f0] = popt
+ [d_alpha, d_p, d_f0] = [np.sqrt(pcov[0,0]), np.sqrt(pcov[1,1]), np.sqrt(pcov[2,2])]
+ return fit_func, [fitted_alpha, fitted_p, fitted_f0], [d_alpha, d_p, d_f0]
+
+
+
+[docs] def slider_participation_fitter(self, stored_fits_filepath: str, fluxsweep_filepath: str, ret_sliders = False, start_freq = 7e9):
+ '''
+ Parameters
+ ----------
+ stored_fits_filepath : str
+ path to a pickled fit file
+ fluxsweep_filepath : str
+ path to a fluxsweep stored in plottr's datadict format'
+
+ Returns
+ -------
+ 4x matplotlib.widgets.slider objects, call slider.val to get value
+
+ '''
+ self.p_arr = np.linspace(0.01, 0.3, 50)
+ self.alpha_arr = np.linspace(0.1, 0.32, 50)
+ #the below function returns the slider fit, which you then have to call .val on
+ self.p_slider, self.a_slider, self.f_slider = slider_fit(fluxsweep_filepath,
+ stored_fits_filepath,
+ self.quanta_offset,
+ self.quanta_size,
+ self.p_arr,
+ self.alpha_arr,
+ start_freq = start_freq)
+ if ret_sliders:
+ return self.p_arr, self.alpha_arr, self.p_slider, self.a_slider, self.f_slider
+ else:
+ pass
+
+[docs] def vals_from_sliders(self):
+ '''
+ A supporting function to slider_participation_fitter for extracting
+ the alpha and p values after the sliders have been used to fit
+
+ '''
+ self.alpha_from_FS = self.alpha_arr[self.a_slider.val]
+ self.p_from_FS = self.p_arr[self.p_slider.val]
+
+ return self.alpha_from_FS, self.p_from_FS, self.f_slider.val
+
+[docs] def generate_coefficient_functions(self, alpha_val, res = int(100), plot = False, show_coefficients = False, verbose = False):
+ '''
+ Parameters
+ ----------
+ alpha_val : float
+ alpha value between 0 and 0.33
+ res : int, optional
+ number of points to base interpolation off of. The default is 100.
+ Returns
+ -------
+ c2_func : lambda function
+ function that will return the value of c2
+ c3_func : lambda function
+ DESCRIPTION.
+ c4_func : lambda function
+ DESCRIPTION.
+
+ '''
+ if verbose:
+ print("Calculating expansion coefficients")
+ start_time = timer()
+
+ phi_ext_arr = np.linspace(0,2*np.pi, res)
+ c4_arr = c4_func_gen_vectorize(alpha_val)(phi_ext_arr)
+ end_time = timer()
+ if verbose:
+ print(f"Elapsed time: {np.round(end_time-start_time, 2)} seconds")
+ c4_func = interp1d(phi_ext_arr, c4_arr, 'quadratic')
+
+
+ #c3:
+ start_time = timer()
+ phi_ext_arr = np.linspace(0,2*np.pi, res)
+ c3_arr = c3_func_gen_vectorize(alpha_val)(phi_ext_arr)
+ end_time = timer()
+ if verbose:
+ print(f"Elapsed time: {np.round(end_time-start_time, 2)} seconds")
+ c3_func = interp1d(phi_ext_arr, c3_arr, 'quadratic')
+
+
+ #c2:
+ start_time = timer()
+ phi_ext_arr = np.linspace(0,2*np.pi, res)
+ c2_arr = c2_func_gen_vectorize(alpha_val)(phi_ext_arr)
+ end_time = timer()
+ if verbose:
+ print(f"Elapsed time: {np.round(end_time-start_time, 2)} seconds")
+ c2_func = interp1d(phi_ext_arr, c2_arr, 'quadratic')
+
+ if plot:
+ plt.plot(phi_ext_arr, self.c2_func(phi_ext_arr), label = "c2")
+ plt.plot(phi_ext_arr, self.c3_func(phi_ext_arr), label = "c3")
+ plt.plot(phi_ext_arr, self.c4_func(phi_ext_arr), label = 'c4')
+
+ plt.legend()
+
+ return c2_func, c3_func, c4_func
+
+
+
+
+[docs] def generate_participation_function(self, L0, Lfunc):
+ return lambda phi: Lfunc(phi)/(L0+Lfunc(phi))
+
+[docs] def generate_inductance_function(self, L_large, c2_func):
+ return lambda phi: L_large/c2_func(phi)
+
+[docs] def generate_resonance_function_via_LC(self, L0, C0, Ls_func):
+ return lambda phi: 1/np.sqrt((L0+Ls_func(phi))*C0)
+
+[docs] def generate_resonance_function_via_fit(self, p, f0, c2_func):
+ return lambda phi: 2*np.pi*f0/(np.sqrt(1+(p/(1-p))/c2_func(phi)))
+
+[docs] def generate_gsss_function(self, C0, p_func, res_func, c2_func, c3_func):
+ '''
+ source: https://arxiv.org/pdf/1806.06093.pdf
+ (The frattini paper)
+ calculates the g3 wrt flux given linear capacitance, participation ratio, and alpha
+ return value is in Joules
+ '''
+ #calculate Ec
+ Ec = self.e**2/(2*C0)
+ return lambda phi: 1/6*p_func(phi)**2*c3_func(phi)/c2_func(phi)*np.sqrt(Ec*self.hbar*res_func(phi))
+
+[docs] def collect_TACO_data(self, gain_folder, plot = False, tla_pump = 0):
+ gain_cwd = gain_folder
+ res = find_all_ddh5(gain_cwd)
+ info_dict, bias_currents, best_gen_freqs, best_gen_powers, gains = superTACO_Bars(res, angles = [60,20], quanta_size = self.quanta_size, quanta_offset = self.quanta_offset, bardims = [0.001, 0.7], barbase = -24, plot = False)
+
+ if plot:
+ fig2 = plt.figure(2)
+ ax = fig2.add_subplot(131)
+ ax.plot(self.conv_func(bias_currents), np.array(best_gen_powers)-tla_pump, 'b.', markersize = 15)
+ ax.set_title(r'Lowest 20dB Power (dBm) vs. Flux ($\Phi_0$)')
+ ax.set_xlabel('Flux Quanta ($\Phi/\Phi_0)$')
+ ax.set_ylabel('Generator Power @20dB Gain (dBm)')
+ ax.grid()
+
+ return bias_currents, best_gen_freqs, best_gen_powers-tla_pump, gains
+
+[docs] def g3_from_pump_power(self,
+ dBgains: np.ndarray,
+ pump_powers: np.ndarray,
+ mode_kappas: np.ndarray,
+ pump_omegas: np.ndarray,
+ pump_detunings_from_res: np.ndarray
+ ):
+ '''
+ Source for calculation: https://arxiv.org/abs/1605.00539
+ "Introduction to Quantum-limited Parametric Amplification of Quantum Signals with Josephson Circuits"
+ by Michelle Devoret and Ananda Roy
+
+ Parameters
+ ----------
+ gains : np.ndarray
+ gain in dB, whose positions correspond to the powers given in the pump_powers section
+ pump_powers : np.ndarray
+ pump power in dBm that the amplifier sees. This must include all attenuation in the entire line
+ mode_kappas : np.ndarray
+ mode kappa in 2pi*Hz
+ pump_omegas : np.ndarray
+ pump frequency in 2pi*Hz
+ pump_detunings_from_res : np.ndarray
+ pump detuning in 2pi(f-f0) where f0 is the resonator frequency in hz
+
+ Returns
+ -------
+ numPumpPhotons : np.ndarray
+ The sqrt of the number of pump photons expected in the pumping resonator.
+ g3_arr : np.ndarray
+ The third order coupling in Hz for each combination of inputs
+ '''
+
+ lin_pump_powers = np.power(10,pump_powers/10)*0.001 #pump power in watts
+ #get the expected value of pump photons present in the resonator
+ npTZC_arr = []
+ numPumpPhotonsTZC = lin_pump_powers/(pump_omegas*self.hbar)*(np.sqrt(mode_kappas)/(mode_kappas/2-1j*(pump_detunings_from_res)))**2
+ for val in numPumpPhotonsTZC:
+ npTZC_arr.append(np.linalg.norm(val))
+ numPumpPhotons = np.array(npTZC_arr)
+ numPumpPhotonsDev = np.sqrt(8*mode_kappas*lin_pump_powers/(pump_omegas*self.hbar))/np.absolute(mode_kappas-2j*pump_detunings_from_res)
+ Lin_Power_gains = np.power(10,dBgains/20)
+ lpg = Lin_Power_gains
+
+ g3_arr = -0.5*(mode_kappas/numPumpPhotons)*np.sqrt((np.sqrt(lpg)-1)/(np.sqrt(lpg)+1))
+
+
+ return numPumpPhotonsDev, g3_arr, numPumpPhotons
+
+[docs] def process_HFSS_sweep(self, HFSS_filepath):
+ data = pd.read_csv(HFSS_filepath)
+ HFSS_dicts = []
+ for inductance in np.unique(data['Ls [pH]'].to_numpy()):
+ filt = (data['Ls [pH]'].to_numpy() == inductance)
+ HFSS_dicts.append(dict(
+ SNAIL_inductance = inductance,
+ freq = data['Freq [GHz]'].to_numpy()[filt]*1e9,
+ freqrad = data['Freq [GHz]'].to_numpy()[filt]*1e9*2*np.pi, #fitter takes rad*hz
+ mag = data['mag(S(B,B)) []'].to_numpy()[filt],
+ phase = data['cang_deg_val(S(B,B)) []'].to_numpy()[filt],
+ phaserad = data['cang_deg_val(S(B,B)) []'].to_numpy()[filt]*2*np.pi/360,
+ dBmag = np.power(10, data['mag(S(B,B)) []'].to_numpy()[filt]/20),
+ real = data['mag(S(B,B)) []'].to_numpy()[filt]*np.cos(data['cang_deg_val(S(B,B)) []'].to_numpy()[filt]*2*np.pi/360),
+ imag = data['mag(S(B,B)) []'].to_numpy()[filt]*np.sin(data['cang_deg_val(S(B,B)) []'].to_numpy()[filt]*2*np.pi/360),
+ imY = data['im(Y(sl,sl)) []'].to_numpy()[filt],
+ ))
+ return HFSS_dicts
+
+[docs] def fit_modes(self, *args, bounds = None, f0Guess_arr = None, Qguess = (1e2, 1e4), window_size = 600e6, plot = False):
+ QextGuess, QintGuess = Qguess
+ magBackGuess = 1
+
+ HFSS_inductances, HFSS_res_freqs, HFSS_kappas = [], [], []
+
+ for i, md in enumerate(args):
+ # print(type(f0Guess_arr))
+ if type(f0Guess_arr) == np.ndarray:
+ f0Guess_arr = np.copy(f0Guess_arr)
+ filt = (md['freqrad']>f0Guess_arr[i]-window_size/2)*(md['freqrad']<f0Guess_arr[i]+window_size/2)
+ f0Guess = f0Guess_arr[i]
+ else:
+ filt = np.ones(np.size(md['freqrad'])).astype(bool)
+ # print(np.diff(md['phaserad']))
+ # plt.plot(md['freq'][:-1]/1e9, np.diff(md['phaserad']))
+ f0Guess = md['freq'][np.argmin(np.diff(md['phaserad']))]*2*np.pi
+
+ if bounds == None:
+ bounds = ([QextGuess / 10, QintGuess /10, f0Guess-500e6, magBackGuess / 2, 0],
+ [QextGuess * 10, QintGuess * 10, f0Guess+500e6, magBackGuess * 2, np.pi])
+
+ popt, pcov = fit(md['freqrad'][filt], md['real'][filt], md['imag'][filt], md['mag'][filt], md['phaserad'][filt], Qguess = Qguess, f0Guess = f0Guess, phaseGuess = 0)
+ if plot:
+ plotRes(md['freqrad'], md['real'], md['imag'], md['mag'], md['phaserad'], popt)
+
+ Qtot = popt[0] * popt[1] / (popt[0] + popt[1])
+ kappa = popt[2]/2/np.pi/Qtot
+ f0 = popt[2]/(2*np.pi)
+ inductance = md['SNAIL_inductance']
+
+ HFSS_inductances.append(inductance)
+ HFSS_res_freqs.append(f0)
+ HFSS_kappas.append(kappa)
+
+ return HFSS_inductances, HFSS_res_freqs, HFSS_kappas
+
+
+
+[docs] def g3_from_admittance(self, Ej_large, omegas, Imy, res_omega, res_phi, c3_func):
+ f_res_loc = np.argmin(np.abs(omegas-res_omega))
+ Zpeff = 2/(res_omega*np.gradient(Imy)[f_res_loc])
+ return Ej_large*c3_func(res_phi)*(2*np.pi/self.phi0*np.sqrt(self.hbar/2*Zpeff))**3
+
+
+if __name__ == '__main__':
+ SA = SnailAmp()
+ HFSS_filepath = r'D:\HFSS_Sims\SA_2X\mode_s.csv'
+ HFSS_dicts = SA.process_HFSS_sweep(HFSS_filepath)
+ #fit all of them, try to choose a guess frequency and Q's that cooperate with all of them
+ HFSS_inductances, HFSS_res_freqs, HFSS_kappas = SA.fit_modes(*HFSS_dicts,
+ Qguess = (5e1,1e3),
+ window_size = 100e6,
+ plot = True,
+ f0Guess_arr = None)
+ HFSS_inductances = np.array(HFSS_inductances)
+ HFSS_kappas = np.array(HFSS_kappas)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+