From 6f1cb0d5cdbe7d04f55dc9a123df64451975db12 Mon Sep 17 00:00:00 2001 From: Jae Yoo Date: Sun, 7 Feb 2021 15:34:11 +0900 Subject: [PATCH 1/9] Fix warnings of c++ compilation --- .../core/ops/math_ops/tfq_inner_product.cc | 12 ++++----- .../core/ops/tfq_adj_grad_op.cc | 14 +++++----- .../core/ops/tfq_calculate_unitary_op.cc | 4 +-- .../core/ops/tfq_ps_symbol_replace_op.cc | 6 ++--- .../ops/tfq_ps_weights_from_symbols_op.cc | 2 +- .../core/ops/tfq_simulate_expectation_op.cc | 8 +++--- .../tfq_simulate_sampled_expectation_op.cc | 8 +++--- .../core/ops/tfq_simulate_samples_op.cc | 10 +++---- .../core/ops/tfq_simulate_state_op.cc | 6 ++--- tensorflow_quantum/core/src/adj_util.cc | 10 +++---- .../core/src/circuit_parser_qsim.cc | 27 ++++++++----------- .../core/src/circuit_parser_qsim_test.cc | 2 +- tensorflow_quantum/core/src/util_qsim.h | 2 +- 13 files changed, 53 insertions(+), 58 deletions(-) diff --git a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc index cf6fe9c32..620249ead 100644 --- a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc +++ b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc @@ -166,7 +166,7 @@ class TfqInnerProductOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (std::vector>>::size_type i = 0; i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { // need to switch to larger statespace. @@ -178,10 +178,10 @@ class TfqInnerProductOp : public tensorflow::OpKernel { // the state if there is a possibility that circuit[i] and // circuit[i + 1] produce the same state. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } - for (int j = 0; j < other_fused_circuits[i].size(); j++) { + for (std::vector>>::size_type j = 0; j < other_fused_circuits[i].size(); j++) { // (#679) Just ignore empty program if (fused_circuits[i].size() == 0) { (*output_tensor)(i, j) = std::complex(1, 0); @@ -189,7 +189,7 @@ class TfqInnerProductOp : public tensorflow::OpKernel { } ss.SetStateZero(scratch); - for (int k = 0; k < other_fused_circuits[i][j].size(); k++) { + for (std::vector>::size_type k = 0; k < other_fused_circuits[i][j].size(); k++) { qsim::ApplyFusedGate(sim, other_fused_circuits[i][j][k], scratch); } @@ -247,13 +247,13 @@ class TfqInnerProductOp : public tensorflow::OpKernel { // no need to update scratch_state since ComputeExpectation // will take care of things for us. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[cur_batch_index].size(); j++) { + for (std::vector>::size_type j = 0; j < fused_circuits[cur_batch_index].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[cur_batch_index][j], sv); } } ss.SetStateZero(scratch); - for (int k = 0; + for (std::vector>::size_type k = 0; k < other_fused_circuits[cur_batch_index][cur_internal_index].size(); k++) { diff --git a/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc b/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc index d2c5782f6..e9e0dd32d 100644 --- a/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc +++ b/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc @@ -198,7 +198,7 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { } ss.SetStateZero(sv); - for (int j = 0; j < full_fuse[i].size(); j++) { + for (std::vector>::size_type j = 0; j < full_fuse[i].size(); j++) { qsim::ApplyFusedGate(sim, full_fuse[i][j], sv); } @@ -227,13 +227,13 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { // if applicable compute control qubit mask and control value bits. uint64_t mask = 0; uint64_t cbits = 0; - for (int k = 0; k < cur_gate.controlled_by.size(); k++) { + for (std::vector::size_type k = 0; k < cur_gate.controlled_by.size(); k++) { uint64_t control_loc = cur_gate.controlled_by[k]; mask |= uint64_t{1} << control_loc; cbits |= ((cur_gate.cmask >> k) & 1) << control_loc; } - for (int k = 0; k < gradient_gates[i][j - 1].grad_gates.size(); k++) { + for (std::vector::size_type k = 0; k < gradient_gates[i][j - 1].grad_gates.size(); k++) { // Copy sv onto scratch2 in anticipation of non-unitary "gradient // gate". ss.Copy(sv, scratch2); @@ -293,7 +293,7 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { auto scratch = ss.Create(largest_nq); auto scratch2 = ss.Create(largest_nq); - for (int i = 0; i < partial_fused_circuits.size(); i++) { + for (std::vector>>>::size_type i = 0; i < partial_fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -310,7 +310,7 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { } ss.SetStateZero(sv); - for (int j = 0; j < full_fuse[i].size(); j++) { + for (std::vector>::size_type j = 0; j < full_fuse[i].size(); j++) { qsim::ApplyFusedGate(sim, full_fuse[i][j], sv); } @@ -338,13 +338,13 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { // if applicable compute control qubit mask and control value bits. uint64_t mask = 0; uint64_t cbits = 0; - for (int k = 0; k < cur_gate.controlled_by.size(); k++) { + for (std::vector::size_type k = 0; k < cur_gate.controlled_by.size(); k++) { uint64_t control_loc = cur_gate.controlled_by[k]; mask |= uint64_t{1} << control_loc; cbits |= ((cur_gate.cmask >> k) & 1) << control_loc; } - for (int k = 0; k < gradient_gates[i][j - 1].grad_gates.size(); k++) { + for (std::vector::size_type k = 0; k < gradient_gates[i][j - 1].grad_gates.size(); k++) { // Copy sv onto scratch2 in anticipation of non-unitary "gradient // gate". ss.Copy(sv, scratch2); diff --git a/tensorflow_quantum/core/ops/tfq_calculate_unitary_op.cc b/tensorflow_quantum/core/ops/tfq_calculate_unitary_op.cc index 6d444a829..1351af1b8 100644 --- a/tensorflow_quantum/core/ops/tfq_calculate_unitary_op.cc +++ b/tensorflow_quantum/core/ops/tfq_calculate_unitary_op.cc @@ -111,7 +111,7 @@ class TfqCalculateUnitaryOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the unitary as nescessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (std::vector>>::size_type i = 0; i < fused_circuits.size(); i++) { int nq = num_qubits[i]; UCalculator sim = UCalculator(nq, tfq_for); UnitarySpace us = UnitarySpace(nq, tfq_for); @@ -121,7 +121,7 @@ class TfqCalculateUnitaryOp : public tensorflow::OpKernel { u = us.CreateUnitary(); } us.SetIdentity(u); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], u); } diff --git a/tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc b/tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc index 8117f3be1..ca6ea4b74 100644 --- a/tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc +++ b/tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc @@ -130,7 +130,7 @@ class TfqPsSymbolReplaceOp : public tensorflow::OpKernel { ->workers->TransformRangeConcurrently( block_size, programs.size() * n_symbols, DoWork); - size_t biggest_pad = 0; + std::vector::size_type biggest_pad = 0; Program empty = Program(); empty.mutable_language()->set_gate_set("tfq_gate_set"); empty.mutable_circuit(); // create empty circuits entry. @@ -163,11 +163,11 @@ class TfqPsSymbolReplaceOp : public tensorflow::OpKernel { for (int i = start; i < end; i++) { int sidx = i % n_symbols; int pidx = i / n_symbols; - for (int j = 0; j < output_programs.at(pidx).at(sidx).size(); j++) { + for (std::vector::size_type j = 0; j < output_programs.at(pidx).at(sidx).size(); j++) { output_tensor(pidx, sidx, j) = output_programs.at(pidx).at(sidx).at(j); } - for (int j = output_programs.at(pidx).at(sidx).size(); j < biggest_pad; + for (std::vector::size_type j = output_programs.at(pidx).at(sidx).size(); j < biggest_pad; j++) { output_tensor(pidx, sidx, j) = empty_program; } diff --git a/tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc b/tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc index cfbeb34ce..e5e1186a5 100644 --- a/tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc +++ b/tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc @@ -144,7 +144,7 @@ class TfqPsWeightsFromSymbolOp : public tensorflow::OpKernel { auto DoWork2 = [&](int start, int end) { for (int i = start; i < end; i++) { for (int j = 0; j < n_symbols; j++) { - for (int k = 0; k < output_results.at(i).at(j).size(); k++) { + for (std::vector::size_type k = 0; k < output_results.at(i).at(j).size(); k++) { output_tensor(i, j, k) = output_results.at(i).at(j).at(k); } for (int k = output_results.at(i).at(j).size(); diff --git a/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc index ea7c61b7e..f967d5319 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc +++ b/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc @@ -138,7 +138,7 @@ class TfqSimulateExpectationOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (std::vector>>::size_type i = 0; i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -151,10 +151,10 @@ class TfqSimulateExpectationOp : public tensorflow::OpKernel { // the state if there is a possibility that circuit[i] and // circuit[i + 1] produce the same state. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (std::vector::size_type j = 0; j < pauli_sums[i].size(); j++) { // (#679) Just ignore empty program if (fused_circuits[i].size() == 0) { (*output_tensor)(i, j) = -2.0; @@ -214,7 +214,7 @@ class TfqSimulateExpectationOp : public tensorflow::OpKernel { // no need to update scratch_state since ComputeExpectation // will take care of things for us. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[cur_batch_index].size(); j++) { + for (std::vector>::size_type j = 0; j < fused_circuits[cur_batch_index].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[cur_batch_index][j], sv); } } diff --git a/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc index 2e1973213..6bf5e538f 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc +++ b/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc @@ -155,7 +155,7 @@ class TfqSimulateSampledExpectationOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (std::vector>>::size_type i = 0; i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -168,10 +168,10 @@ class TfqSimulateSampledExpectationOp : public tensorflow::OpKernel { // the state if there is a possibility that circuit[i] and // circuit[i + 1] produce the same state. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (std::vector::size_type j = 0; j < pauli_sums[i].size(); j++) { // (#679) Just ignore empty program if (fused_circuits[i].size() == 0) { (*output_tensor)(i, j) = -2.0; @@ -232,7 +232,7 @@ class TfqSimulateSampledExpectationOp : public tensorflow::OpKernel { // no need to update scratch_state since ComputeExpectation // will take care of things for us. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[cur_batch_index].size(); j++) { + for (std::vector>::size_type j = 0; j < fused_circuits[cur_batch_index].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[cur_batch_index][j], sv); } } diff --git a/tensorflow_quantum/core/ops/tfq_simulate_samples_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_samples_op.cc index 325dda8d1..93587cd88 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_samples_op.cc +++ b/tensorflow_quantum/core/ops/tfq_simulate_samples_op.cc @@ -137,7 +137,7 @@ class TfqSimulateSamplesOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as nescessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (std::vector>>::size_type i = 0; i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -146,13 +146,13 @@ class TfqSimulateSamplesOp : public tensorflow::OpKernel { sv = ss.Create(largest_nq); } ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } auto samples = ss.Sample(sv, num_samples, rand() % 123456); for (int j = 0; j < num_samples; j++) { - uint64_t q_ind = 0; + int q_ind = 0; uint64_t mask = 1; bool val = 0; while (q_ind < nq) { @@ -195,13 +195,13 @@ class TfqSimulateSamplesOp : public tensorflow::OpKernel { sv = ss.Create(largest_nq); } ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } auto samples = ss.Sample(sv, num_samples, rand() % 123456); for (int j = 0; j < num_samples; j++) { - uint64_t q_ind = 0; + int q_ind = 0; uint64_t mask = 1; bool val = 0; while (q_ind < nq) { diff --git a/tensorflow_quantum/core/ops/tfq_simulate_state_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_state_op.cc index da4fddb03..35e1c5df6 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_state_op.cc +++ b/tensorflow_quantum/core/ops/tfq_simulate_state_op.cc @@ -131,7 +131,7 @@ class TfqSimulateStateOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as nescessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (std::vector>>::size_type i = 0; i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -140,7 +140,7 @@ class TfqSimulateStateOp : public tensorflow::OpKernel { sv = ss.Create(largest_nq); } ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } @@ -189,7 +189,7 @@ class TfqSimulateStateOp : public tensorflow::OpKernel { sv = ss.Create(largest_nq); } ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } diff --git a/tensorflow_quantum/core/src/adj_util.cc b/tensorflow_quantum/core/src/adj_util.cc index aee01a377..311864f8a 100644 --- a/tensorflow_quantum/core/src/adj_util.cc +++ b/tensorflow_quantum/core/src/adj_util.cc @@ -38,7 +38,7 @@ void CreateGradientCircuit( const QsimCircuit& circuit, const std::vector& metadata, std::vector>>* partial_fuses, std::vector* grad_gates) { - for (int i = 0; i < metadata.size(); i++) { + for (std::vector::size_type i = 0; i < metadata.size(); i++) { if (metadata[i].symbol_values.size() == 0) { continue; } @@ -78,7 +78,7 @@ void CreateGradientCircuit( // PhasedX else if (circuit.gates[i].kind == qsim::Cirq::GateKind::kPhasedXPowGate) { // Process potentially several symbols. - for (int j = 0; j < metadata[i].symbol_values.size(); j++) { + for (std::vector::size_type j = 0; j < metadata[i].symbol_values.size(); j++) { if (metadata[i].placeholder_names[j] == GateParamNames::kPhaseExponent) { PopulateGradientPhasedXPhasedExponent( @@ -103,7 +103,7 @@ void CreateGradientCircuit( // Process potentially several symbols. bool swapq = circuit.gates[i].swapped; - for (int j = 0; j < metadata[i].symbol_values.size(); j++) { + for (std::vector::size_type j = 0; j < metadata[i].symbol_values.size(); j++) { if (metadata[i].placeholder_names[j] == GateParamNames::kTheta) { PopulateGradientFsimTheta( metadata[i].symbol_values[j], i, @@ -128,7 +128,7 @@ void CreateGradientCircuit( qsim::Cirq::GateKind::kPhasedISwapPowGate) { // Process potentially several symbols. bool swapq = circuit.gates[i].swapped; - for (int j = 0; j < metadata[i].symbol_values.size(); j++) { + for (std::vector::size_type j = 0; j < metadata[i].symbol_values.size(); j++) { if (metadata[i].placeholder_names[j] == GateParamNames::kPhaseExponent) { PopulateGradientPhasedISwapPhasedExponent( @@ -159,7 +159,7 @@ void CreateGradientCircuit( partial_fuses->assign(grad_gates->size() + 1, std::vector>({})); - for (int i = 0; i < grad_gates->size(); i++) { + for (std::vector::size_type i = 0; i < grad_gates->size(); i++) { right = circuit.gates.begin() + (*grad_gates)[i].index; (*partial_fuses)[i] = fuser.FuseGates(qsim::BasicGateFuser::Parameter(), diff --git a/tensorflow_quantum/core/src/circuit_parser_qsim.cc b/tensorflow_quantum/core/src/circuit_parser_qsim.cc index 94799a6b2..4f3ad536e 100644 --- a/tensorflow_quantum/core/src/circuit_parser_qsim.cc +++ b/tensorflow_quantum/core/src/circuit_parser_qsim.cc @@ -152,7 +152,7 @@ inline Status SingleConstantGate( const unsigned int num_qubits, const unsigned int time, QsimCircuit* circuit, std::vector* metadata) { unsigned int q0; - bool unused = absl::SimpleAtoi(op.qubits(0).id(), &q0); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q0); auto gate = create_f(time, num_qubits - q0 - 1); Status s = OptionalInsertControls(op, num_qubits, &gate); if (!s.ok()) { @@ -177,8 +177,8 @@ inline Status TwoConstantGate( const unsigned int num_qubits, const unsigned int time, QsimCircuit* circuit, std::vector* metadata) { unsigned int q0, q1; - bool unused = absl::SimpleAtoi(op.qubits(0).id(), &q0); - unused = absl::SimpleAtoi(op.qubits(1).id(), &q1); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q0); + std::ignore = absl::SimpleAtoi(op.qubits(1).id(), &q1); auto gate = create_f(time, num_qubits - q0 - 1, num_qubits - q1 - 1); Status s = OptionalInsertControls(op, num_qubits, &gate); if (!s.ok()) { @@ -203,10 +203,9 @@ inline Status SingleEigenGate( const unsigned int num_qubits, const unsigned int time, QsimCircuit* circuit, std::vector* metadata) { unsigned int q0; - bool unused; float exp, exp_s, gs; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q0); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q0); absl::optional exponent_symbol; u = ParseProtoArg(op, "exponent", param_map, &exp, &exponent_symbol); @@ -253,10 +252,9 @@ inline Status TwoEigenGate( QsimCircuit* circuit, std::vector* metadata) { unsigned int q0, q1; float exp, exp_s, gs; - bool unused; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q0); - unused = absl::SimpleAtoi(op.qubits(1).id(), &q1); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q0); + std::ignore = absl::SimpleAtoi(op.qubits(1).id(), &q1); absl::optional exponent_symbol; u = ParseProtoArg(op, "exponent", param_map, &exp, &exponent_symbol); @@ -392,10 +390,9 @@ inline Status PhasedXGate(const Operation& op, const SymbolMap& param_map, const unsigned int time, QsimCircuit* circuit, std::vector* metadata) { int q0; - bool unused; float pexp, pexp_s, exp, exp_s, gs; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q0); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q0); absl::optional exponent_symbol; u = ParseProtoArg(op, "exponent", param_map, &exp, &exponent_symbol); @@ -452,11 +449,10 @@ inline Status FsimGate(const Operation& op, const SymbolMap& param_map, QsimCircuit* circuit, std::vector* metadata) { int q0, q1; - bool unused; float theta, theta_s, phi, phi_s; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q0); - unused = absl::SimpleAtoi(op.qubits(1).id(), &q1); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q0); + std::ignore = absl::SimpleAtoi(op.qubits(1).id(), &q1); absl::optional theta_symbol; u = ParseProtoArg(op, "theta", param_map, &theta, &theta_symbol); @@ -509,11 +505,10 @@ inline Status PhasedISwapGate(const Operation& op, const SymbolMap& param_map, const unsigned int time, QsimCircuit* circuit, std::vector* metadata) { int q0, q1; - bool unused; float pexp, pexp_s, exp, exp_s; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q0); - unused = absl::SimpleAtoi(op.qubits(1).id(), &q1); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q0); + std::ignore = absl::SimpleAtoi(op.qubits(1).id(), &q1); absl::optional exponent_symbol; u = ParseProtoArg(op, "exponent", param_map, &exp, &exponent_symbol); diff --git a/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc b/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc index 84cd7bf28..669ed9dc3 100644 --- a/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc +++ b/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc @@ -59,7 +59,7 @@ Arg MakeControlArg(const std::string& val) { } inline void AssertControlEqual(const QsimGate& a, const QsimGate& b) { - for (int i = 0; i < a.controlled_by.size(); i++) { + for (std::vector::size_type i = 0; i < a.controlled_by.size(); i++) { ASSERT_EQ(a.controlled_by[i], b.controlled_by[i]); } ASSERT_EQ(a.cmask, b.cmask); diff --git a/tensorflow_quantum/core/src/util_qsim.h b/tensorflow_quantum/core/src/util_qsim.h index 5024d47bf..a84c577ca 100644 --- a/tensorflow_quantum/core/src/util_qsim.h +++ b/tensorflow_quantum/core/src/util_qsim.h @@ -231,7 +231,7 @@ tensorflow::Status ComputeSampledExpectationQsim( unsigned int location; // GridQubit id should be parsed down to integer at this upstream // so it is safe to just use atoi. - bool unused = absl::SimpleAtoi(pair.qubit_id(), &location); + std::ignore = absl::SimpleAtoi(pair.qubit_id(), &location); // Parity functions use little-endian indexing parity_bits.push_back(state.num_qubits() - location - 1); } From 2637022cc106d2202c794e335c6a514455be5b21 Mon Sep 17 00:00:00 2001 From: Jae Yoo Date: Sun, 7 Feb 2021 15:40:28 +0900 Subject: [PATCH 2/9] Fix format --- tensorflow_quantum/core/ops/cirq_ops_test.py | 8 +++---- .../core/ops/math_ops/tfq_inner_product.cc | 15 ++++++++---- .../core/ops/tfq_adj_grad_op.cc | 23 +++++++++++++------ .../core/ops/tfq_calculate_unitary_op.cc | 6 +++-- .../core/ops/tfq_ps_symbol_replace_op.cc | 8 ++++--- .../ops/tfq_ps_weights_from_symbols_op.cc | 3 ++- .../core/ops/tfq_simulate_expectation_op.cc | 12 ++++++---- .../core/ops/tfq_simulate_ops_test.py | 4 ++-- .../tfq_simulate_sampled_expectation_op.cc | 12 ++++++---- .../core/ops/tfq_simulate_samples_op.cc | 9 +++++--- .../core/ops/tfq_simulate_state_op.cc | 9 +++++--- tensorflow_quantum/core/src/adj_util.cc | 15 ++++++++---- .../core/src/circuit_parser_qsim_test.cc | 3 ++- 13 files changed, 83 insertions(+), 44 deletions(-) diff --git a/tensorflow_quantum/core/ops/cirq_ops_test.py b/tensorflow_quantum/core/ops/cirq_ops_test.py index 77569bf5f..79978ec86 100644 --- a/tensorflow_quantum/core/ops/cirq_ops_test.py +++ b/tensorflow_quantum/core/ops/cirq_ops_test.py @@ -390,8 +390,8 @@ def test_sampling_output_padding(self, op, all_n_qubits, n_samples): this_expected_output[:, :max(all_n_qubits) - n_qubits] = -2 expected_outputs.append(this_expected_output) circuits.append( - cirq.Circuit( - *cirq.X.on_each(*cirq.GridQubit.rect(1, n_qubits)))) + cirq.Circuit(*cirq.X.on_each( + *cirq.GridQubit.rect(1, n_qubits)))) results = op(util.convert_to_tensor(circuits), [], [[]] * len(circuits), [n_samples]).numpy() self.assertAllClose(expected_outputs, results) @@ -430,8 +430,8 @@ def run_sweep(self, program, params, repetitions): circuits = [] for n_qubits in all_n_qubits: circuits.append( - cirq.Circuit( - *cirq.X.on_each(*cirq.GridQubit.rect(1, n_qubits)))) + cirq.Circuit(*cirq.X.on_each( + *cirq.GridQubit.rect(1, n_qubits)))) test_results = this_op(util.convert_to_tensor(circuits), [], [[]] * len(circuits), [n_samples]).numpy() diff --git a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc index 620249ead..a861b6846 100644 --- a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc +++ b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc @@ -166,7 +166,8 @@ class TfqInnerProductOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (std::vector>>::size_type i = 0; i < fused_circuits.size(); i++) { + for (std::vector>>::size_type i = 0; + i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { // need to switch to larger statespace. @@ -178,10 +179,12 @@ class TfqInnerProductOp : public tensorflow::OpKernel { // the state if there is a possibility that circuit[i] and // circuit[i + 1] produce the same state. ss.SetStateZero(sv); - for (std::vector>::size_type j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; + j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } - for (std::vector>>::size_type j = 0; j < other_fused_circuits[i].size(); j++) { + for (std::vector>>::size_type j = 0; + j < other_fused_circuits[i].size(); j++) { // (#679) Just ignore empty program if (fused_circuits[i].size() == 0) { (*output_tensor)(i, j) = std::complex(1, 0); @@ -189,7 +192,8 @@ class TfqInnerProductOp : public tensorflow::OpKernel { } ss.SetStateZero(scratch); - for (std::vector>::size_type k = 0; k < other_fused_circuits[i][j].size(); k++) { + for (std::vector>::size_type k = 0; + k < other_fused_circuits[i][j].size(); k++) { qsim::ApplyFusedGate(sim, other_fused_circuits[i][j][k], scratch); } @@ -247,7 +251,8 @@ class TfqInnerProductOp : public tensorflow::OpKernel { // no need to update scratch_state since ComputeExpectation // will take care of things for us. ss.SetStateZero(sv); - for (std::vector>::size_type j = 0; j < fused_circuits[cur_batch_index].size(); j++) { + for (std::vector>::size_type j = 0; + j < fused_circuits[cur_batch_index].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[cur_batch_index][j], sv); } } diff --git a/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc b/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc index e9e0dd32d..5ef2c0593 100644 --- a/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc +++ b/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc @@ -198,7 +198,8 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { } ss.SetStateZero(sv); - for (std::vector>::size_type j = 0; j < full_fuse[i].size(); j++) { + for (std::vector>::size_type j = 0; + j < full_fuse[i].size(); j++) { qsim::ApplyFusedGate(sim, full_fuse[i][j], sv); } @@ -227,13 +228,15 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { // if applicable compute control qubit mask and control value bits. uint64_t mask = 0; uint64_t cbits = 0; - for (std::vector::size_type k = 0; k < cur_gate.controlled_by.size(); k++) { + for (std::vector::size_type k = 0; + k < cur_gate.controlled_by.size(); k++) { uint64_t control_loc = cur_gate.controlled_by[k]; mask |= uint64_t{1} << control_loc; cbits |= ((cur_gate.cmask >> k) & 1) << control_loc; } - for (std::vector::size_type k = 0; k < gradient_gates[i][j - 1].grad_gates.size(); k++) { + for (std::vector::size_type k = 0; + k < gradient_gates[i][j - 1].grad_gates.size(); k++) { // Copy sv onto scratch2 in anticipation of non-unitary "gradient // gate". ss.Copy(sv, scratch2); @@ -293,7 +296,10 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { auto scratch = ss.Create(largest_nq); auto scratch2 = ss.Create(largest_nq); - for (std::vector>>>::size_type i = 0; i < partial_fused_circuits.size(); i++) { + for (std::vector< + std::vector>>>::size_type i = + 0; + i < partial_fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -310,7 +316,8 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { } ss.SetStateZero(sv); - for (std::vector>::size_type j = 0; j < full_fuse[i].size(); j++) { + for (std::vector>::size_type j = 0; + j < full_fuse[i].size(); j++) { qsim::ApplyFusedGate(sim, full_fuse[i][j], sv); } @@ -338,13 +345,15 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { // if applicable compute control qubit mask and control value bits. uint64_t mask = 0; uint64_t cbits = 0; - for (std::vector::size_type k = 0; k < cur_gate.controlled_by.size(); k++) { + for (std::vector::size_type k = 0; + k < cur_gate.controlled_by.size(); k++) { uint64_t control_loc = cur_gate.controlled_by[k]; mask |= uint64_t{1} << control_loc; cbits |= ((cur_gate.cmask >> k) & 1) << control_loc; } - for (std::vector::size_type k = 0; k < gradient_gates[i][j - 1].grad_gates.size(); k++) { + for (std::vector::size_type k = 0; + k < gradient_gates[i][j - 1].grad_gates.size(); k++) { // Copy sv onto scratch2 in anticipation of non-unitary "gradient // gate". ss.Copy(sv, scratch2); diff --git a/tensorflow_quantum/core/ops/tfq_calculate_unitary_op.cc b/tensorflow_quantum/core/ops/tfq_calculate_unitary_op.cc index 1351af1b8..98fdb75de 100644 --- a/tensorflow_quantum/core/ops/tfq_calculate_unitary_op.cc +++ b/tensorflow_quantum/core/ops/tfq_calculate_unitary_op.cc @@ -111,7 +111,8 @@ class TfqCalculateUnitaryOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the unitary as nescessary. - for (std::vector>>::size_type i = 0; i < fused_circuits.size(); i++) { + for (std::vector>>::size_type i = 0; + i < fused_circuits.size(); i++) { int nq = num_qubits[i]; UCalculator sim = UCalculator(nq, tfq_for); UnitarySpace us = UnitarySpace(nq, tfq_for); @@ -121,7 +122,8 @@ class TfqCalculateUnitaryOp : public tensorflow::OpKernel { u = us.CreateUnitary(); } us.SetIdentity(u); - for (std::vector>::size_type j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; + j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], u); } diff --git a/tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc b/tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc index ca6ea4b74..bac307f6b 100644 --- a/tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc +++ b/tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc @@ -163,12 +163,14 @@ class TfqPsSymbolReplaceOp : public tensorflow::OpKernel { for (int i = start; i < end; i++) { int sidx = i % n_symbols; int pidx = i / n_symbols; - for (std::vector::size_type j = 0; j < output_programs.at(pidx).at(sidx).size(); j++) { + for (std::vector::size_type j = 0; + j < output_programs.at(pidx).at(sidx).size(); j++) { output_tensor(pidx, sidx, j) = output_programs.at(pidx).at(sidx).at(j); } - for (std::vector::size_type j = output_programs.at(pidx).at(sidx).size(); j < biggest_pad; - j++) { + for (std::vector::size_type j = + output_programs.at(pidx).at(sidx).size(); + j < biggest_pad; j++) { output_tensor(pidx, sidx, j) = empty_program; } } diff --git a/tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc b/tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc index e5e1186a5..ddbb86e93 100644 --- a/tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc +++ b/tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc @@ -144,7 +144,8 @@ class TfqPsWeightsFromSymbolOp : public tensorflow::OpKernel { auto DoWork2 = [&](int start, int end) { for (int i = start; i < end; i++) { for (int j = 0; j < n_symbols; j++) { - for (std::vector::size_type k = 0; k < output_results.at(i).at(j).size(); k++) { + for (std::vector::size_type k = 0; + k < output_results.at(i).at(j).size(); k++) { output_tensor(i, j, k) = output_results.at(i).at(j).at(k); } for (int k = output_results.at(i).at(j).size(); diff --git a/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc index f967d5319..00912a780 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc +++ b/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc @@ -138,7 +138,8 @@ class TfqSimulateExpectationOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (std::vector>>::size_type i = 0; i < fused_circuits.size(); i++) { + for (std::vector>>::size_type i = 0; + i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -151,10 +152,12 @@ class TfqSimulateExpectationOp : public tensorflow::OpKernel { // the state if there is a possibility that circuit[i] and // circuit[i + 1] produce the same state. ss.SetStateZero(sv); - for (std::vector>::size_type j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; + j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } - for (std::vector::size_type j = 0; j < pauli_sums[i].size(); j++) { + for (std::vector::size_type j = 0; + j < pauli_sums[i].size(); j++) { // (#679) Just ignore empty program if (fused_circuits[i].size() == 0) { (*output_tensor)(i, j) = -2.0; @@ -214,7 +217,8 @@ class TfqSimulateExpectationOp : public tensorflow::OpKernel { // no need to update scratch_state since ComputeExpectation // will take care of things for us. ss.SetStateZero(sv); - for (std::vector>::size_type j = 0; j < fused_circuits[cur_batch_index].size(); j++) { + for (std::vector>::size_type j = 0; + j < fused_circuits[cur_batch_index].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[cur_batch_index][j], sv); } } diff --git a/tensorflow_quantum/core/ops/tfq_simulate_ops_test.py b/tensorflow_quantum/core/ops/tfq_simulate_ops_test.py index ae4addb58..e81604624 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_ops_test.py +++ b/tensorflow_quantum/core/ops/tfq_simulate_ops_test.py @@ -453,8 +453,8 @@ def test_sampling_output_padding(self, all_n_qubits, n_samples): this_expected_output[:, :max(all_n_qubits) - n_qubits] = -2 expected_outputs.append(this_expected_output) circuits.append( - cirq.Circuit( - *cirq.X.on_each(*cirq.GridQubit.rect(1, n_qubits)))) + cirq.Circuit(*cirq.X.on_each( + *cirq.GridQubit.rect(1, n_qubits)))) results = op(util.convert_to_tensor(circuits), [], [[]] * len(circuits), [n_samples]).numpy() self.assertAllClose(expected_outputs, results) diff --git a/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc index 6bf5e538f..47a8402d1 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc +++ b/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc @@ -155,7 +155,8 @@ class TfqSimulateSampledExpectationOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (std::vector>>::size_type i = 0; i < fused_circuits.size(); i++) { + for (std::vector>>::size_type i = 0; + i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -168,10 +169,12 @@ class TfqSimulateSampledExpectationOp : public tensorflow::OpKernel { // the state if there is a possibility that circuit[i] and // circuit[i + 1] produce the same state. ss.SetStateZero(sv); - for (std::vector>::size_type j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; + j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } - for (std::vector::size_type j = 0; j < pauli_sums[i].size(); j++) { + for (std::vector::size_type j = 0; + j < pauli_sums[i].size(); j++) { // (#679) Just ignore empty program if (fused_circuits[i].size() == 0) { (*output_tensor)(i, j) = -2.0; @@ -232,7 +235,8 @@ class TfqSimulateSampledExpectationOp : public tensorflow::OpKernel { // no need to update scratch_state since ComputeExpectation // will take care of things for us. ss.SetStateZero(sv); - for (std::vector>::size_type j = 0; j < fused_circuits[cur_batch_index].size(); j++) { + for (std::vector>::size_type j = 0; + j < fused_circuits[cur_batch_index].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[cur_batch_index][j], sv); } } diff --git a/tensorflow_quantum/core/ops/tfq_simulate_samples_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_samples_op.cc index 93587cd88..29883629b 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_samples_op.cc +++ b/tensorflow_quantum/core/ops/tfq_simulate_samples_op.cc @@ -137,7 +137,8 @@ class TfqSimulateSamplesOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as nescessary. - for (std::vector>>::size_type i = 0; i < fused_circuits.size(); i++) { + for (std::vector>>::size_type i = 0; + i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -146,7 +147,8 @@ class TfqSimulateSamplesOp : public tensorflow::OpKernel { sv = ss.Create(largest_nq); } ss.SetStateZero(sv); - for (std::vector>::size_type j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; + j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } @@ -195,7 +197,8 @@ class TfqSimulateSamplesOp : public tensorflow::OpKernel { sv = ss.Create(largest_nq); } ss.SetStateZero(sv); - for (std::vector>::size_type j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; + j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } diff --git a/tensorflow_quantum/core/ops/tfq_simulate_state_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_state_op.cc index 35e1c5df6..616e04e28 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_state_op.cc +++ b/tensorflow_quantum/core/ops/tfq_simulate_state_op.cc @@ -131,7 +131,8 @@ class TfqSimulateStateOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as nescessary. - for (std::vector>>::size_type i = 0; i < fused_circuits.size(); i++) { + for (std::vector>>::size_type i = 0; + i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -140,7 +141,8 @@ class TfqSimulateStateOp : public tensorflow::OpKernel { sv = ss.Create(largest_nq); } ss.SetStateZero(sv); - for (std::vector>::size_type j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; + j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } @@ -189,7 +191,8 @@ class TfqSimulateStateOp : public tensorflow::OpKernel { sv = ss.Create(largest_nq); } ss.SetStateZero(sv); - for (std::vector>::size_type j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; + j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } diff --git a/tensorflow_quantum/core/src/adj_util.cc b/tensorflow_quantum/core/src/adj_util.cc index 311864f8a..d4a8740bf 100644 --- a/tensorflow_quantum/core/src/adj_util.cc +++ b/tensorflow_quantum/core/src/adj_util.cc @@ -38,7 +38,8 @@ void CreateGradientCircuit( const QsimCircuit& circuit, const std::vector& metadata, std::vector>>* partial_fuses, std::vector* grad_gates) { - for (std::vector::size_type i = 0; i < metadata.size(); i++) { + for (std::vector::size_type i = 0; i < metadata.size(); + i++) { if (metadata[i].symbol_values.size() == 0) { continue; } @@ -78,7 +79,8 @@ void CreateGradientCircuit( // PhasedX else if (circuit.gates[i].kind == qsim::Cirq::GateKind::kPhasedXPowGate) { // Process potentially several symbols. - for (std::vector::size_type j = 0; j < metadata[i].symbol_values.size(); j++) { + for (std::vector::size_type j = 0; + j < metadata[i].symbol_values.size(); j++) { if (metadata[i].placeholder_names[j] == GateParamNames::kPhaseExponent) { PopulateGradientPhasedXPhasedExponent( @@ -103,7 +105,8 @@ void CreateGradientCircuit( // Process potentially several symbols. bool swapq = circuit.gates[i].swapped; - for (std::vector::size_type j = 0; j < metadata[i].symbol_values.size(); j++) { + for (std::vector::size_type j = 0; + j < metadata[i].symbol_values.size(); j++) { if (metadata[i].placeholder_names[j] == GateParamNames::kTheta) { PopulateGradientFsimTheta( metadata[i].symbol_values[j], i, @@ -128,7 +131,8 @@ void CreateGradientCircuit( qsim::Cirq::GateKind::kPhasedISwapPowGate) { // Process potentially several symbols. bool swapq = circuit.gates[i].swapped; - for (std::vector::size_type j = 0; j < metadata[i].symbol_values.size(); j++) { + for (std::vector::size_type j = 0; + j < metadata[i].symbol_values.size(); j++) { if (metadata[i].placeholder_names[j] == GateParamNames::kPhaseExponent) { PopulateGradientPhasedISwapPhasedExponent( @@ -159,7 +163,8 @@ void CreateGradientCircuit( partial_fuses->assign(grad_gates->size() + 1, std::vector>({})); - for (std::vector::size_type i = 0; i < grad_gates->size(); i++) { + for (std::vector::size_type i = 0; + i < grad_gates->size(); i++) { right = circuit.gates.begin() + (*grad_gates)[i].index; (*partial_fuses)[i] = fuser.FuseGates(qsim::BasicGateFuser::Parameter(), diff --git a/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc b/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc index 669ed9dc3..1bad7e859 100644 --- a/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc +++ b/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc @@ -59,7 +59,8 @@ Arg MakeControlArg(const std::string& val) { } inline void AssertControlEqual(const QsimGate& a, const QsimGate& b) { - for (std::vector::size_type i = 0; i < a.controlled_by.size(); i++) { + for (std::vector::size_type i = 0; i < a.controlled_by.size(); + i++) { ASSERT_EQ(a.controlled_by[i], b.controlled_by[i]); } ASSERT_EQ(a.cmask, b.cmask); From 6eb797d7583f4b269943e703438c71da8ac12a20 Mon Sep 17 00:00:00 2001 From: Jae Yoo Date: Sun, 7 Feb 2021 16:32:04 +0900 Subject: [PATCH 3/9] Fix format w.r.t. github format checker --- tensorflow_quantum/core/ops/cirq_ops_test.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tensorflow_quantum/core/ops/cirq_ops_test.py b/tensorflow_quantum/core/ops/cirq_ops_test.py index 79978ec86..77569bf5f 100644 --- a/tensorflow_quantum/core/ops/cirq_ops_test.py +++ b/tensorflow_quantum/core/ops/cirq_ops_test.py @@ -390,8 +390,8 @@ def test_sampling_output_padding(self, op, all_n_qubits, n_samples): this_expected_output[:, :max(all_n_qubits) - n_qubits] = -2 expected_outputs.append(this_expected_output) circuits.append( - cirq.Circuit(*cirq.X.on_each( - *cirq.GridQubit.rect(1, n_qubits)))) + cirq.Circuit( + *cirq.X.on_each(*cirq.GridQubit.rect(1, n_qubits)))) results = op(util.convert_to_tensor(circuits), [], [[]] * len(circuits), [n_samples]).numpy() self.assertAllClose(expected_outputs, results) @@ -430,8 +430,8 @@ def run_sweep(self, program, params, repetitions): circuits = [] for n_qubits in all_n_qubits: circuits.append( - cirq.Circuit(*cirq.X.on_each( - *cirq.GridQubit.rect(1, n_qubits)))) + cirq.Circuit( + *cirq.X.on_each(*cirq.GridQubit.rect(1, n_qubits)))) test_results = this_op(util.convert_to_tensor(circuits), [], [[]] * len(circuits), [n_samples]).numpy() From a00e68ef5796984973d4bc1a105fb8ca8563559d Mon Sep 17 00:00:00 2001 From: Jae Yoo Date: Mon, 8 Feb 2021 17:09:33 +0900 Subject: [PATCH 4/9] Fix format --- tensorflow_quantum/core/ops/tfq_simulate_ops_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow_quantum/core/ops/tfq_simulate_ops_test.py b/tensorflow_quantum/core/ops/tfq_simulate_ops_test.py index e81604624..ae4addb58 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_ops_test.py +++ b/tensorflow_quantum/core/ops/tfq_simulate_ops_test.py @@ -453,8 +453,8 @@ def test_sampling_output_padding(self, all_n_qubits, n_samples): this_expected_output[:, :max(all_n_qubits) - n_qubits] = -2 expected_outputs.append(this_expected_output) circuits.append( - cirq.Circuit(*cirq.X.on_each( - *cirq.GridQubit.rect(1, n_qubits)))) + cirq.Circuit( + *cirq.X.on_each(*cirq.GridQubit.rect(1, n_qubits)))) results = op(util.convert_to_tensor(circuits), [], [[]] * len(circuits), [n_samples]).numpy() self.assertAllClose(expected_outputs, results) From 3d385ad4a64e4ecd4a9caad4385fcb1d0ad49d03 Mon Sep 17 00:00:00 2001 From: Jae Yoo Date: Thu, 26 Aug 2021 00:31:09 +0000 Subject: [PATCH 5/9] Update the latest codes and fix format --- scripts/test_all.sh | 2 +- .../ops/math_ops/tfq_inner_product_grad.cc | 10 +++---- .../core/ops/noise/tfq_noisy_expectation.cc | 28 +++++++++---------- .../noise/tfq_noisy_sampled_expectation.cc | 28 +++++++++---------- .../core/ops/noise/tfq_noisy_samples.cc | 22 +++++++-------- .../core/src/circuit_parser_qsim.cc | 26 ++++++----------- .../core/src/circuit_parser_qsim_test.cc | 6 ++-- tensorflow_quantum/core/src/util_qsim.h | 8 +++--- tensorflow_quantum/core/src/util_qsim_test.cc | 17 +++++------ 9 files changed, 70 insertions(+), 77 deletions(-) diff --git a/scripts/test_all.sh b/scripts/test_all.sh index e5513bb12..a0d37dc1e 100755 --- a/scripts/test_all.sh +++ b/scripts/test_all.sh @@ -14,7 +14,7 @@ # limitations under the License. # ============================================================================== echo "Testing All Bazel py_test and cc_tests."; -test_outputs=$(bazel test -c opt --experimental_repo_remote_exec --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" --notest_keep_going --test_output=errors //tensorflow_quantum/...) +test_outputs=$(bazel test -c opt --experimental_repo_remote_exec --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" --cxxopt="-Wno-unused-function" --notest_keep_going --test_output=errors //tensorflow_quantum/...) exit_code=$? if [ "$exit_code" == "0" ]; then echo "Testing Complete!"; diff --git a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc index 5b29571d2..47c0b134b 100644 --- a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc +++ b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc @@ -56,9 +56,9 @@ class TfqInnerProductGradOp : public tensorflow::OpKernel { "Expected 5 inputs, got ", num_inputs, " inputs."))); // Create the output Tensor. - const int output_dim_batch_size = context->input(0).dim_size(0); - const int output_dim_internal_size = context->input(3).dim_size(1); - const int output_dim_symbol_size = context->input(1).dim_size(0); + const unsigned int output_dim_batch_size = context->input(0).dim_size(0); + const unsigned int output_dim_internal_size = context->input(3).dim_size(1); + const unsigned int output_dim_symbol_size = context->input(1).dim_size(0); OP_REQUIRES(context, output_dim_symbol_size > 0, tensorflow::errors::InvalidArgument(absl::StrCat( "The number of symbols must be a positive integer, got ", @@ -398,13 +398,13 @@ class TfqInnerProductGradOp : public tensorflow::OpKernel { // if applicable compute control qubit mask and control value bits. uint64_t mask = 0; uint64_t cbits = 0; - for (int k = 0; k < cur_gate.controlled_by.size(); k++) { + for (unsigned int k = 0; k < cur_gate.controlled_by.size(); k++) { uint64_t control_loc = cur_gate.controlled_by[k]; mask |= uint64_t{1} << control_loc; cbits |= ((cur_gate.cmask >> k) & 1) << control_loc; } - for (int k = 0; + for (unsigned int k = 0; k < gradient_gates[cur_batch_index][l - 1].grad_gates.size(); k++) { // Copy sv_adj onto scratch2 in anticipation of non-unitary diff --git a/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc b/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc index 88b78166e..1d375a658 100644 --- a/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc +++ b/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc @@ -175,8 +175,8 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { tensorflow::GuardedPhiloxRandom random_gen; int max_n_shots = 1; - for (int i = 0; i < num_samples.size(); i++) { - for (int j = 0; j < num_samples[i].size(); j++) { + for (unsigned int i = 0; i < num_samples.size(); i++) { + for (unsigned int j = 0; j < num_samples[i].size(); j++) { max_n_shots = std::max(max_n_shots, num_samples[i][j]); } } @@ -188,12 +188,12 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (int i = 0; i < ncircuits.size(); i++) { + for (unsigned int i = 0; i < ncircuits.size(); i++) { int nq = num_qubits[i]; // (#679) Just ignore empty program if (ncircuits[i].channels.size() == 0) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (unsigned int j = 0; j < pauli_sums[i].size(); j++) { (*output_tensor)(i, j) = -2.0; } continue; @@ -220,7 +220,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { scratch, sv, unused_stats); // Use this trajectory as a source for all expectation calculations. - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (unsigned int j = 0; j < pauli_sums[i].size(); j++) { if (run_samples[j] >= num_samples[i][j]) { continue; } @@ -232,14 +232,14 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { run_samples[j]++; } bool break_loop = true; - for (int j = 0; j < num_samples[i].size(); j++) { + for (unsigned int j = 0; j < num_samples[i].size(); j++) { if (run_samples[j] < num_samples[i][j]) { break_loop = false; break; } } if (break_loop) { - for (int j = 0; j < num_samples[i].size(); j++) { + for (unsigned int j = 0; j < num_samples[i].size(); j++) { rolling_sums[j] /= num_samples[i][j]; (*output_tensor)(i, j) = static_cast(rolling_sums[j]); } @@ -280,8 +280,8 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { tensorflow::GuardedPhiloxRandom random_gen; int max_n_shots = 1; - for (int i = 0; i < num_samples.size(); i++) { - for (int j = 0; j < num_samples[i].size(); j++) { + for (unsigned int i = 0; i < num_samples.size(); i++) { + for (unsigned int j = 0; j < num_samples[i].size(); j++) { max_n_shots = std::max(max_n_shots, num_samples[i][j]); } } @@ -304,13 +304,13 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { random_gen.ReserveSamples128(ncircuits.size() * max_n_shots + 1); tensorflow::random::SimplePhilox rand_source(&local_gen); - for (int i = 0; i < ncircuits.size(); i++) { + for (unsigned int i = 0; i < ncircuits.size(); i++) { int nq = num_qubits[i]; int rep_offset = rep_offsets[start][i]; // (#679) Just ignore empty program if (ncircuits[i].channels.size() == 0) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (unsigned int j = 0; j < pauli_sums[i].size(); j++) { (*output_tensor)(i, j) = -2.0; } continue; @@ -337,7 +337,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { sim, scratch, sv, unused_stats); // Compute expectations across all ops using this trajectory. - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (unsigned int j = 0; j < pauli_sums[i].size(); j++) { int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads; if (run_samples[j] >= p_reps + rep_offset) { continue; @@ -354,7 +354,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { // Check if we have run enough trajectories for all ops. bool break_loop = true; - for (int j = 0; j < num_samples[i].size(); j++) { + for (unsigned int j = 0; j < num_samples[i].size(); j++) { int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads; if (run_samples[j] < p_reps + rep_offset) { break_loop = false; @@ -364,7 +364,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { if (break_loop) { // Lock writing to this batch index in output_tensor. batch_locks[i].lock(); - for (int j = 0; j < num_samples[i].size(); j++) { + for (unsigned int j = 0; j < num_samples[i].size(); j++) { rolling_sums[j] /= num_samples[i][j]; (*output_tensor)(i, j) += static_cast(rolling_sums[j]); } diff --git a/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc b/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc index 77d6197ae..bf46214fb 100644 --- a/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc +++ b/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc @@ -177,8 +177,8 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { tensorflow::GuardedPhiloxRandom random_gen; int max_psum_length = 1; int max_n_shots = 1; - for (int i = 0; i < pauli_sums.size(); i++) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (unsigned int i = 0; i < pauli_sums.size(); i++) { + for (unsigned int j = 0; j < pauli_sums[i].size(); j++) { max_psum_length = std::max(max_psum_length, pauli_sums[i][j].terms().size()); max_n_shots = std::max(max_n_shots, num_samples[i][j]); @@ -192,12 +192,12 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (int i = 0; i < ncircuits.size(); i++) { + for (unsigned int i = 0; i < ncircuits.size(); i++) { int nq = num_qubits[i]; // (#679) Just ignore empty program if (ncircuits[i].channels.empty()) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (unsigned int j = 0; j < pauli_sums[i].size(); j++) { (*output_tensor)(i, j) = -2.0; } continue; @@ -224,7 +224,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { scratch, sv, unused_stats); // Use this trajectory as a source for all expectation calculations. - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (unsigned int j = 0; j < pauli_sums[i].size(); j++) { if (run_samples[j] >= num_samples[i][j]) { continue; } @@ -236,14 +236,14 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { run_samples[j]++; } bool break_loop = true; - for (int j = 0; j < num_samples[i].size(); j++) { + for (unsigned int j = 0; j < num_samples[i].size(); j++) { if (run_samples[j] < num_samples[i][j]) { break_loop = false; break; } } if (break_loop) { - for (int j = 0; j < num_samples[i].size(); j++) { + for (unsigned int j = 0; j < num_samples[i].size(); j++) { rolling_sums[j] /= num_samples[i][j]; (*output_tensor)(i, j) = static_cast(rolling_sums[j]); } @@ -285,8 +285,8 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { tensorflow::GuardedPhiloxRandom random_gen; int max_psum_length = 1; int max_n_shots = 1; - for (int i = 0; i < pauli_sums.size(); i++) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (unsigned int i = 0; i < pauli_sums.size(); i++) { + for (unsigned int j = 0; j < pauli_sums[i].size(); j++) { max_psum_length = std::max(max_psum_length, pauli_sums[i][j].terms().size()); max_n_shots = std::max(max_n_shots, num_samples[i][j]); @@ -310,13 +310,13 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { auto local_gen = random_gen.ReserveSamples128(num_rand); tensorflow::random::SimplePhilox rand_source(&local_gen); - for (int i = 0; i < ncircuits.size(); i++) { + for (unsigned int i = 0; i < ncircuits.size(); i++) { int nq = num_qubits[i]; int rep_offset = rep_offsets[start][i]; // (#679) Just ignore empty program if (ncircuits[i].channels.empty()) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (unsigned int j = 0; j < pauli_sums[i].size(); j++) { (*output_tensor)(i, j) = -2.0; } continue; @@ -343,7 +343,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { sim, scratch, sv, unused_stats); // Compute expectations across all ops using this trajectory. - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (unsigned int j = 0; j < pauli_sums[i].size(); j++) { int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads; if (run_samples[j] >= p_reps + rep_offset) { continue; @@ -360,7 +360,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { // Check if we have run enough trajectories for all ops. bool break_loop = true; - for (int j = 0; j < num_samples[i].size(); j++) { + for (unsigned int j = 0; j < num_samples[i].size(); j++) { int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads; if (run_samples[j] < p_reps + rep_offset) { break_loop = false; @@ -370,7 +370,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { if (break_loop) { // Lock writing to this batch index in output_tensor. batch_locks[i].lock(); - for (int j = 0; j < num_samples[i].size(); j++) { + for (unsigned int j = 0; j < num_samples[i].size(); j++) { rolling_sums[j] /= num_samples[i][j]; (*output_tensor)(i, j) += static_cast(rolling_sums[j]); } diff --git a/tensorflow_quantum/core/ops/noise/tfq_noisy_samples.cc b/tensorflow_quantum/core/ops/noise/tfq_noisy_samples.cc index 0e8321546..f5cca76b0 100644 --- a/tensorflow_quantum/core/ops/noise/tfq_noisy_samples.cc +++ b/tensorflow_quantum/core/ops/noise/tfq_noisy_samples.cc @@ -97,12 +97,12 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel { programs.size(), num_cycles, construct_f); OP_REQUIRES_OK(context, parse_status); - int max_num_qubits = 0; - for (const int num : num_qubits) { + uint64_t max_num_qubits = 0; + for (const uint64_t num : num_qubits) { max_num_qubits = std::max(max_num_qubits, num); } - const int output_dim_size = maps.size(); + const unsigned int output_dim_size = maps.size(); tensorflow::TensorShape output_shape; output_shape.AddDim(output_dim_size); output_shape.AddDim(num_samples); @@ -132,7 +132,7 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel { private: void ComputeLarge(const std::vector& num_qubits, - const int max_num_qubits, const int num_samples, + const uint64_t max_num_qubits, const int num_samples, const std::vector& ncircuits, tensorflow::OpKernelContext* context, tensorflow::TTypes::Tensor* output_tensor) { @@ -145,7 +145,7 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel { qsim::MultiQubitGateFuser, Simulator>; // Begin simulation. - int largest_nq = 1; + uint64_t largest_nq = 1; Simulator sim = Simulator(tfq_for); StateSpace ss = StateSpace(tfq_for); auto sv = ss.Create(largest_nq); @@ -160,8 +160,8 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as nescessary. - for (int i = 0; i < ncircuits.size(); i++) { - int nq = num_qubits[i]; + for (unsigned int i = 0; i < ncircuits.size(); i++) { + uint64_t nq = num_qubits[i]; if (nq > largest_nq) { // need to switch to larger statespace. @@ -203,7 +203,7 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel { } void ComputeSmall(const std::vector& num_qubits, - const int max_num_qubits, const int num_samples, + const uint64_t max_num_qubits, const int num_samples, const std::vector& ncircuits, tensorflow::OpKernelContext* context, tensorflow::TTypes::Tensor* output_tensor) { @@ -243,7 +243,7 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel { auto DoWork = [&](int start, int end) { // Begin simulation. const auto tfq_for = qsim::SequentialFor(1); - int largest_nq = 1; + uint64_t largest_nq = 1; Simulator sim = Simulator(tfq_for); StateSpace ss = StateSpace(tfq_for); auto sv = ss.Create(largest_nq); @@ -255,8 +255,8 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel { auto local_gen = random_gen.ReserveSamples32(needed_random); tensorflow::random::SimplePhilox rand_source(&local_gen); - for (int i = 0; i < ncircuits.size(); i++) { - int nq = num_qubits[i]; + for (unsigned int i = 0; i < ncircuits.size(); i++) { + uint64_t nq = num_qubits[i]; int j = start > 0 ? offset_prefix_sum[start - 1][i] : 0; int needed_samples = offset_prefix_sum[start][i] - j; if (needed_samples <= 0) { diff --git a/tensorflow_quantum/core/src/circuit_parser_qsim.cc b/tensorflow_quantum/core/src/circuit_parser_qsim.cc index 04bbb9616..09cfccad4 100644 --- a/tensorflow_quantum/core/src/circuit_parser_qsim.cc +++ b/tensorflow_quantum/core/src/circuit_parser_qsim.cc @@ -599,10 +599,9 @@ inline Status AsymmetricDepolarizingChannel(const Operation& op, const unsigned int time, NoisyQsimCircuit* ncircuit) { int q; - bool unused; float p_x, p_y, p_z; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q); u = ParseProtoArg(op, "p_x", {}, &p_x); u = ParseProtoArg(op, "p_y", {}, &p_y); @@ -621,10 +620,9 @@ inline Status DepolarizingChannel(const Operation& op, const unsigned int time, NoisyQsimCircuit* ncircuit) { int q; - bool unused; float p; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q); u = ParseProtoArg(op, "p", {}, &p); if (!u.ok()) { @@ -639,10 +637,9 @@ inline Status DepolarizingChannel(const Operation& op, inline Status GADChannel(const Operation& op, const unsigned int num_qubits, const unsigned int time, NoisyQsimCircuit* ncircuit) { int q; - bool unused; float p, gamma; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q); u = ParseProtoArg(op, "p", {}, &p); if (!u.ok()) { @@ -663,8 +660,7 @@ inline Status ResetChannel(const Operation& op, const unsigned int num_qubits, const unsigned int time, NoisyQsimCircuit* ncircuit) { int q; - bool unused; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q); auto chan = qsim::Cirq::ResetChannel::Create(time, num_qubits - q - 1); ncircuit->channels.push_back(chan); @@ -676,10 +672,9 @@ inline Status AmplitudeDampingChannel(const Operation& op, const unsigned int time, NoisyQsimCircuit* ncircuit) { int q; - bool unused; float gamma; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q); u = ParseProtoArg(op, "gamma", {}, &gamma); if (!u.ok()) { @@ -696,10 +691,9 @@ inline Status PhaseDampingChannel(const Operation& op, const unsigned int time, NoisyQsimCircuit* ncircuit) { int q; - bool unused; float gamma; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q); u = ParseProtoArg(op, "gamma", {}, &gamma); if (!u.ok()) { @@ -717,10 +711,9 @@ inline Status PhaseFlipChannel(const Operation& op, const unsigned int time, NoisyQsimCircuit* ncircuit) { int q; - bool unused; float p; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q); u = ParseProtoArg(op, "p", {}, &p); if (!u.ok()) { @@ -737,10 +730,9 @@ inline Status BitFlipChannel(const Operation& op, const unsigned int num_qubits, const unsigned int time, NoisyQsimCircuit* ncircuit) { int q; - bool unused; float p; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q); u = ParseProtoArg(op, "p", {}, &p); if (!u.ok()) { @@ -838,8 +830,8 @@ tensorflow::Status QsimCircuitFromProgram( std::vector* metadata /*=nullptr*/) { // Convert proto to qsim internal representation. circuit->num_qubits = num_qubits; - int time = 0; bool unused; + int time = 0; // Special case empty. if (num_qubits <= 0) { return Status::OK(); diff --git a/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc b/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc index dd44f0af9..1bb0194cd 100644 --- a/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc +++ b/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc @@ -90,14 +90,14 @@ inline void AssertOneQubitEqual(const QsimGate& a, const QsimGate& b) { inline void AssertChannelEqual(const QsimChannel& a, const QsimChannel& b) { ASSERT_EQ(a.size(), b.size()); - for (int i = 0; i < a.size(); i++) { + for (long unsigned int i = 0; i < a.size(); i++) { ASSERT_EQ(a[i].kind, b[i].kind); ASSERT_EQ(a[i].unitary, b[i].unitary); ASSERT_NEAR(a[i].prob, b[i].prob, 1e-5); auto a_k_ops = a[i].ops; auto b_k_ops = b[i].ops; EXPECT_EQ(a_k_ops.size(), b_k_ops.size()); - for (int j = 0; j < a_k_ops.size(); j++) { + for (long unsigned int j = 0; j < a_k_ops.size(); j++) { AssertOneQubitEqual(a_k_ops[j], b_k_ops[j]); } } @@ -1536,7 +1536,7 @@ TEST(QsimCircuitParserTest, NoisyEmpty) { Program program_proto; Circuit* circuit_proto = program_proto.mutable_circuit(); circuit_proto->set_scheduling_strategy(circuit_proto->MOMENT_BY_MOMENT); - (void)circuit_proto->add_moments(); + std::ignore = circuit_proto->add_moments(); NoisyQsimCircuit test_circuit; ASSERT_EQ( diff --git a/tensorflow_quantum/core/src/util_qsim.h b/tensorflow_quantum/core/src/util_qsim.h index 090b3b44e..4eaea360f 100644 --- a/tensorflow_quantum/core/src/util_qsim.h +++ b/tensorflow_quantum/core/src/util_qsim.h @@ -359,13 +359,13 @@ static void BalanceTrajectory(const std::vector>& num_samples, std::vector rep_limits(num_samples.size(), -1); std::vector height(num_threads, 0); - for (int i = 0; i < num_samples.size(); i++) { - for (int j = 0; j < num_samples[i].size(); j++) { + for (unsigned int i = 0; i < num_samples.size(); i++) { + for (unsigned int j = 0; j < num_samples[i].size(); j++) { rep_limits[i] = std::max(rep_limits[i], num_samples[i][j]); } } int prev_max_height = -1; - for (int j = 0; j < num_samples.size(); j++) { + for (unsigned int j = 0; j < num_samples.size(); j++) { int run_ceiling = ((rep_limits[j] + num_threads - 1) / num_threads); int num_lo = num_threads * run_ceiling - rep_limits[j]; int num_hi = num_threads - num_lo; @@ -404,7 +404,7 @@ static void BalanceTrajectory(const int& num_samples, const int& num_threads, std::vector height(num_threads, 0); int prev_max_height = -1; - for (int j = 0; j < (*thread_offsets)[0].size(); j++) { + for (unsigned int j = 0; j < (*thread_offsets)[0].size(); j++) { int run_ceiling = ((num_samples + num_threads - 1) / num_threads); int num_lo = num_threads * run_ceiling - num_samples; int num_hi = num_threads - num_lo; diff --git a/tensorflow_quantum/core/src/util_qsim_test.cc b/tensorflow_quantum/core/src/util_qsim_test.cc index b4f630f3c..bdd54580b 100644 --- a/tensorflow_quantum/core/src/util_qsim_test.cc +++ b/tensorflow_quantum/core/src/util_qsim_test.cc @@ -493,8 +493,8 @@ TEST(UtilQsimTest, AccumulateOperatorsBasic) { p_term_scratch2->set_coefficient_real(-5.0); // 0.5 * (0.123ZX -3X + 4I) + 0.25 * (-5I) applied onto psi. - (void)AccumulateOperators({p_sum, p_sum2}, {0.5, 0.25}, sim, ss, sv, scratch, - dest); + std::ignore = AccumulateOperators({p_sum, p_sum2}, {0.5, 0.25}, sim, ss, sv, + scratch, dest); // Check that dest got accumulated onto. EXPECT_NEAR(ss.GetAmpl(dest, 0).real(), 0.577925, 1e-5); @@ -536,7 +536,7 @@ TEST(UtilQsimTest, AccumulateOperatorsEmpty) { auto scratch = ss.Create(2); auto dest = ss.Create(2); - (void)AccumulateOperators({}, {}, sim, ss, sv, scratch, dest); + std::ignore = AccumulateOperators({}, {}, sim, ss, sv, scratch, dest); // Check sv is still in zero state. EXPECT_NEAR(ss.GetAmpl(sv, 0).real(), 1.0, 1e-5); @@ -600,7 +600,8 @@ TEST(UtilQsimTest, AccumulateFusedCircuitsBasic) { // Initialize coeffs. std::vector coeffs = {1.23, 4.56}; - (void)AccumulateFusedCircuits(coeffs, fused_circuits, sim, ss, scratch, dest); + std::ignore = + AccumulateFusedCircuits(coeffs, fused_circuits, sim, ss, scratch, dest); // Scratch has coeffs[r][c] * fused circuits[r][c] where r, c = last indices. // Check that dest got accumulated onto. @@ -629,7 +630,7 @@ TEST(UtilQsimTest, AccumulateFusedCircuitsEmpty) { auto scratch = ss.Create(2); auto dest = ss.Create(2); - (void)AccumulateFusedCircuits({}, {}, sim, ss, scratch, dest); + std::ignore = AccumulateFusedCircuits({}, {}, sim, ss, scratch, dest); // scratch has garbage value. // Check that dest contains all zeros. @@ -646,13 +647,13 @@ static void AssertWellBalanced(const std::vector>& n_reps, const int& num_threads, const std::vector>& offsets) { auto max_work = std::vector(n_reps.size(), -1); - for (int i = 0; i < n_reps.size(); i++) { - for (int j = 0; j < n_reps[0].size(); j++) { + for (std::vector>::size_type i = 0; i < n_reps.size(); i++) { + for (std::vector::size_type j = 0; j < n_reps[0].size(); j++) { max_work[i] = std::max(max_work[i], n_reps[i][j]); } } - for (int i = 0; i < n_reps.size(); i++) { + for (std::vector>::size_type i = 0; i < n_reps.size(); i++) { int sum = 0; int prev_local_work = 0; for (int k = 0; k < num_threads; k++) { From 976fc7bbd96b934899c8d0bfb82a99752db397d1 Mon Sep 17 00:00:00 2001 From: Jae Yoo Date: Thu, 30 Sep 2021 05:55:43 +0000 Subject: [PATCH 6/9] Antonio's feedback --- scripts/test_all.sh | 2 +- .../core/ops/math_ops/tfq_inner_product.cc | 16 ++++++++-------- tensorflow_quantum/core/src/util_qsim.h | 13 +++++-------- .../sampled_expectation_test.py | 1 - 4 files changed, 14 insertions(+), 18 deletions(-) diff --git a/scripts/test_all.sh b/scripts/test_all.sh index a0d37dc1e..e5513bb12 100755 --- a/scripts/test_all.sh +++ b/scripts/test_all.sh @@ -14,7 +14,7 @@ # limitations under the License. # ============================================================================== echo "Testing All Bazel py_test and cc_tests."; -test_outputs=$(bazel test -c opt --experimental_repo_remote_exec --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" --cxxopt="-Wno-unused-function" --notest_keep_going --test_output=errors //tensorflow_quantum/...) +test_outputs=$(bazel test -c opt --experimental_repo_remote_exec --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" --notest_keep_going --test_output=errors //tensorflow_quantum/...) exit_code=$? if [ "$exit_code" == "0" ]; then echo "Testing Complete!"; diff --git a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc index ebceda918..0c391c47e 100644 --- a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc +++ b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc @@ -174,7 +174,7 @@ class TfqInnerProductOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (std::vector>>::size_type i = 0; + for (std::vector::size_type i = 0; i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -187,11 +187,11 @@ class TfqInnerProductOp : public tensorflow::OpKernel { // the state if there is a possibility that circuit[i] and // circuit[i + 1] produce the same state. ss.SetStateZero(sv); - for (std::vector>::size_type j = 0; - j < fused_circuits[i].size(); j++) { + for (QsimFusedCircuit::size_type j = 0; j < fused_circuits[i].size(); + j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } - for (std::vector>>::size_type j = 0; + for (std::vector::size_type j = 0; j < other_fused_circuits[i].size(); j++) { // (#679) Just ignore empty program if (fused_circuits[i].size() == 0) { @@ -200,8 +200,8 @@ class TfqInnerProductOp : public tensorflow::OpKernel { } ss.SetStateZero(scratch); - for (std::vector>::size_type k = 0; - k < other_fused_circuits[i][j].size(); k++) { + for (QsimFusedCircuit k = 0; k < other_fused_circuits[i][j].size(); + k++) { qsim::ApplyFusedGate(sim, other_fused_circuits[i][j][k], scratch); } @@ -259,14 +259,14 @@ class TfqInnerProductOp : public tensorflow::OpKernel { // no need to update scratch_state since ComputeExpectation // will take care of things for us. ss.SetStateZero(sv); - for (std::vector>::size_type j = 0; + for (QsimFusedCircuit::size_type j = 0; j < fused_circuits[cur_batch_index].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[cur_batch_index][j], sv); } } ss.SetStateZero(scratch); - for (std::vector>::size_type k = 0; + for (QsimFusedCircuit::size_type k = 0; k < other_fused_circuits[cur_batch_index][cur_internal_index].size(); k++) { diff --git a/tensorflow_quantum/core/src/util_qsim.h b/tensorflow_quantum/core/src/util_qsim.h index 4eaea360f..2fe740126 100644 --- a/tensorflow_quantum/core/src/util_qsim.h +++ b/tensorflow_quantum/core/src/util_qsim.h @@ -1,11 +1,8 @@ /* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -235,7 +232,7 @@ tensorflow::Status ComputeSampledExpectationQsim( unsigned int location; // GridQubit id should be parsed down to integer at this upstream // so it is safe to just use atoi. - std::ignore = absl::SimpleAtoi(pair.qubit_id(), &location); + (void)absl::SimpleAtoi(pair.qubit_id(), &location); // Parity functions use little-endian indexing parity_bits.push_back(state.num_qubits() - location - 1); } @@ -359,13 +356,13 @@ static void BalanceTrajectory(const std::vector>& num_samples, std::vector rep_limits(num_samples.size(), -1); std::vector height(num_threads, 0); - for (unsigned int i = 0; i < num_samples.size(); i++) { - for (unsigned int j = 0; j < num_samples[i].size(); j++) { + for (int i = 0; i < num_samples.size(); i++) { + for (int j = 0; j < num_samples[i].size(); j++) { rep_limits[i] = std::max(rep_limits[i], num_samples[i][j]); } } int prev_max_height = -1; - for (unsigned int j = 0; j < num_samples.size(); j++) { + for (int j = 0; j < num_samples.size(); j++) { int run_ceiling = ((rep_limits[j] + num_threads - 1) / num_threads); int num_lo = num_threads * run_ceiling - rep_limits[j]; int num_hi = num_threads - num_lo; @@ -404,7 +401,7 @@ static void BalanceTrajectory(const int& num_samples, const int& num_threads, std::vector height(num_threads, 0); int prev_max_height = -1; - for (unsigned int j = 0; j < (*thread_offsets)[0].size(); j++) { + for (int j = 0; j < (*thread_offsets)[0].size(); j++) { int run_ceiling = ((num_samples + num_threads - 1) / num_threads); int num_lo = num_threads * run_ceiling - num_samples; int num_hi = num_threads - num_lo; diff --git a/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation_test.py b/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation_test.py index 60714b2e4..c3c07c74c 100644 --- a/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation_test.py +++ b/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation_test.py @@ -68,7 +68,6 @@ def test_sampled_expectation_symbol_input(self): sampled_expectation.SampledExpectation( differentiator=linear_combination.ForwardDifference()) - def test_sampled_expectation_instantiate_error(self): """Test that SampledExpectation errors with bad inputs.""" From 71d10d55778c044e3c2464e217b93afa011a0c12 Mon Sep 17 00:00:00 2001 From: Jae Yoo Date: Thu, 30 Sep 2021 08:05:05 +0000 Subject: [PATCH 7/9] Fix util_qsim.h --- tensorflow_quantum/core/src/util_qsim.h | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/tensorflow_quantum/core/src/util_qsim.h b/tensorflow_quantum/core/src/util_qsim.h index 2fe740126..1d4ab1890 100644 --- a/tensorflow_quantum/core/src/util_qsim.h +++ b/tensorflow_quantum/core/src/util_qsim.h @@ -2,7 +2,9 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -232,7 +234,7 @@ tensorflow::Status ComputeSampledExpectationQsim( unsigned int location; // GridQubit id should be parsed down to integer at this upstream // so it is safe to just use atoi. - (void)absl::SimpleAtoi(pair.qubit_id(), &location); + std::ignore = absl::SimpleAtoi(pair.qubit_id(), &location); // Parity functions use little-endian indexing parity_bits.push_back(state.num_qubits() - location - 1); } @@ -356,13 +358,13 @@ static void BalanceTrajectory(const std::vector>& num_samples, std::vector rep_limits(num_samples.size(), -1); std::vector height(num_threads, 0); - for (int i = 0; i < num_samples.size(); i++) { - for (int j = 0; j < num_samples[i].size(); j++) { + for (unsigned int i = 0; i < num_samples.size(); i++) { + for (unsigned int j = 0; j < num_samples[i].size(); j++) { rep_limits[i] = std::max(rep_limits[i], num_samples[i][j]); } } int prev_max_height = -1; - for (int j = 0; j < num_samples.size(); j++) { + for (unsigned int j = 0; j < num_samples.size(); j++) { int run_ceiling = ((rep_limits[j] + num_threads - 1) / num_threads); int num_lo = num_threads * run_ceiling - rep_limits[j]; int num_hi = num_threads - num_lo; @@ -401,7 +403,7 @@ static void BalanceTrajectory(const int& num_samples, const int& num_threads, std::vector height(num_threads, 0); int prev_max_height = -1; - for (int j = 0; j < (*thread_offsets)[0].size(); j++) { + for (unsigned int j = 0; j < (*thread_offsets)[0].size(); j++) { int run_ceiling = ((num_samples + num_threads - 1) / num_threads); int num_lo = num_threads * run_ceiling - num_samples; int num_hi = num_threads - num_lo; From 54b89a43edcb960677067720397f83e6ac310ec6 Mon Sep 17 00:00:00 2001 From: Jae Yoo Date: Thu, 30 Sep 2021 08:20:15 +0000 Subject: [PATCH 8/9] Fix typo --- tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc index 0c391c47e..b1f373ec3 100644 --- a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc +++ b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc @@ -200,7 +200,8 @@ class TfqInnerProductOp : public tensorflow::OpKernel { } ss.SetStateZero(scratch); - for (QsimFusedCircuit k = 0; k < other_fused_circuits[i][j].size(); + for (QsimFusedCircuit::size_type k = 0; + k < other_fused_circuits[i][j].size(); k++) { qsim::ApplyFusedGate(sim, other_fused_circuits[i][j][k], scratch); } From a33e2300eb530f89ac7ae5a3a3add2afa2dcf263 Mon Sep 17 00:00:00 2001 From: Jae Yoo Date: Thu, 30 Sep 2021 08:22:48 +0000 Subject: [PATCH 9/9] Fix format --- tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc index b1f373ec3..98356a0e7 100644 --- a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc +++ b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc @@ -201,8 +201,7 @@ class TfqInnerProductOp : public tensorflow::OpKernel { ss.SetStateZero(scratch); for (QsimFusedCircuit::size_type k = 0; - k < other_fused_circuits[i][j].size(); - k++) { + k < other_fused_circuits[i][j].size(); k++) { qsim::ApplyFusedGate(sim, other_fused_circuits[i][j][k], scratch); }