diff --git a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc index 2a66d2919..98356a0e7 100644 --- a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc +++ b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc @@ -174,7 +174,8 @@ class TfqInnerProductOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (std::vector::size_type i = 0; + i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { // need to switch to larger statespace. @@ -186,10 +187,12 @@ class TfqInnerProductOp : public tensorflow::OpKernel { // the state if there is a possibility that circuit[i] and // circuit[i + 1] produce the same state. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (QsimFusedCircuit::size_type j = 0; j < fused_circuits[i].size(); + j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } - for (int j = 0; j < other_fused_circuits[i].size(); j++) { + for (std::vector::size_type j = 0; + j < other_fused_circuits[i].size(); j++) { // (#679) Just ignore empty program if (fused_circuits[i].size() == 0) { (*output_tensor)(i, j) = std::complex(1, 0); @@ -197,7 +200,8 @@ class TfqInnerProductOp : public tensorflow::OpKernel { } ss.SetStateZero(scratch); - for (int k = 0; k < other_fused_circuits[i][j].size(); k++) { + for (QsimFusedCircuit::size_type k = 0; + k < other_fused_circuits[i][j].size(); k++) { qsim::ApplyFusedGate(sim, other_fused_circuits[i][j][k], scratch); } @@ -255,13 +259,14 @@ class TfqInnerProductOp : public tensorflow::OpKernel { // no need to update scratch_state since ComputeExpectation // will take care of things for us. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[cur_batch_index].size(); j++) { + for (QsimFusedCircuit::size_type j = 0; + j < fused_circuits[cur_batch_index].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[cur_batch_index][j], sv); } } ss.SetStateZero(scratch); - for (int k = 0; + for (QsimFusedCircuit::size_type k = 0; k < other_fused_circuits[cur_batch_index][cur_internal_index].size(); k++) { diff --git a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc index 5b29571d2..47c0b134b 100644 --- a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc +++ b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc @@ -56,9 +56,9 @@ class TfqInnerProductGradOp : public tensorflow::OpKernel { "Expected 5 inputs, got ", num_inputs, " inputs."))); // Create the output Tensor. - const int output_dim_batch_size = context->input(0).dim_size(0); - const int output_dim_internal_size = context->input(3).dim_size(1); - const int output_dim_symbol_size = context->input(1).dim_size(0); + const unsigned int output_dim_batch_size = context->input(0).dim_size(0); + const unsigned int output_dim_internal_size = context->input(3).dim_size(1); + const unsigned int output_dim_symbol_size = context->input(1).dim_size(0); OP_REQUIRES(context, output_dim_symbol_size > 0, tensorflow::errors::InvalidArgument(absl::StrCat( "The number of symbols must be a positive integer, got ", @@ -398,13 +398,13 @@ class TfqInnerProductGradOp : public tensorflow::OpKernel { // if applicable compute control qubit mask and control value bits. uint64_t mask = 0; uint64_t cbits = 0; - for (int k = 0; k < cur_gate.controlled_by.size(); k++) { + for (unsigned int k = 0; k < cur_gate.controlled_by.size(); k++) { uint64_t control_loc = cur_gate.controlled_by[k]; mask |= uint64_t{1} << control_loc; cbits |= ((cur_gate.cmask >> k) & 1) << control_loc; } - for (int k = 0; + for (unsigned int k = 0; k < gradient_gates[cur_batch_index][l - 1].grad_gates.size(); k++) { // Copy sv_adj onto scratch2 in anticipation of non-unitary diff --git a/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc b/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc index 88b78166e..1d375a658 100644 --- a/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc +++ b/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc @@ -175,8 +175,8 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { tensorflow::GuardedPhiloxRandom random_gen; int max_n_shots = 1; - for (int i = 0; i < num_samples.size(); i++) { - for (int j = 0; j < num_samples[i].size(); j++) { + for (unsigned int i = 0; i < num_samples.size(); i++) { + for (unsigned int j = 0; j < num_samples[i].size(); j++) { max_n_shots = std::max(max_n_shots, num_samples[i][j]); } } @@ -188,12 +188,12 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (int i = 0; i < ncircuits.size(); i++) { + for (unsigned int i = 0; i < ncircuits.size(); i++) { int nq = num_qubits[i]; // (#679) Just ignore empty program if (ncircuits[i].channels.size() == 0) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (unsigned int j = 0; j < pauli_sums[i].size(); j++) { (*output_tensor)(i, j) = -2.0; } continue; @@ -220,7 +220,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { scratch, sv, unused_stats); // Use this trajectory as a source for all expectation calculations. - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (unsigned int j = 0; j < pauli_sums[i].size(); j++) { if (run_samples[j] >= num_samples[i][j]) { continue; } @@ -232,14 +232,14 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { run_samples[j]++; } bool break_loop = true; - for (int j = 0; j < num_samples[i].size(); j++) { + for (unsigned int j = 0; j < num_samples[i].size(); j++) { if (run_samples[j] < num_samples[i][j]) { break_loop = false; break; } } if (break_loop) { - for (int j = 0; j < num_samples[i].size(); j++) { + for (unsigned int j = 0; j < num_samples[i].size(); j++) { rolling_sums[j] /= num_samples[i][j]; (*output_tensor)(i, j) = static_cast(rolling_sums[j]); } @@ -280,8 +280,8 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { tensorflow::GuardedPhiloxRandom random_gen; int max_n_shots = 1; - for (int i = 0; i < num_samples.size(); i++) { - for (int j = 0; j < num_samples[i].size(); j++) { + for (unsigned int i = 0; i < num_samples.size(); i++) { + for (unsigned int j = 0; j < num_samples[i].size(); j++) { max_n_shots = std::max(max_n_shots, num_samples[i][j]); } } @@ -304,13 +304,13 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { random_gen.ReserveSamples128(ncircuits.size() * max_n_shots + 1); tensorflow::random::SimplePhilox rand_source(&local_gen); - for (int i = 0; i < ncircuits.size(); i++) { + for (unsigned int i = 0; i < ncircuits.size(); i++) { int nq = num_qubits[i]; int rep_offset = rep_offsets[start][i]; // (#679) Just ignore empty program if (ncircuits[i].channels.size() == 0) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (unsigned int j = 0; j < pauli_sums[i].size(); j++) { (*output_tensor)(i, j) = -2.0; } continue; @@ -337,7 +337,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { sim, scratch, sv, unused_stats); // Compute expectations across all ops using this trajectory. - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (unsigned int j = 0; j < pauli_sums[i].size(); j++) { int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads; if (run_samples[j] >= p_reps + rep_offset) { continue; @@ -354,7 +354,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { // Check if we have run enough trajectories for all ops. bool break_loop = true; - for (int j = 0; j < num_samples[i].size(); j++) { + for (unsigned int j = 0; j < num_samples[i].size(); j++) { int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads; if (run_samples[j] < p_reps + rep_offset) { break_loop = false; @@ -364,7 +364,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { if (break_loop) { // Lock writing to this batch index in output_tensor. batch_locks[i].lock(); - for (int j = 0; j < num_samples[i].size(); j++) { + for (unsigned int j = 0; j < num_samples[i].size(); j++) { rolling_sums[j] /= num_samples[i][j]; (*output_tensor)(i, j) += static_cast(rolling_sums[j]); } diff --git a/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc b/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc index 77d6197ae..bf46214fb 100644 --- a/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc +++ b/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc @@ -177,8 +177,8 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { tensorflow::GuardedPhiloxRandom random_gen; int max_psum_length = 1; int max_n_shots = 1; - for (int i = 0; i < pauli_sums.size(); i++) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (unsigned int i = 0; i < pauli_sums.size(); i++) { + for (unsigned int j = 0; j < pauli_sums[i].size(); j++) { max_psum_length = std::max(max_psum_length, pauli_sums[i][j].terms().size()); max_n_shots = std::max(max_n_shots, num_samples[i][j]); @@ -192,12 +192,12 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (int i = 0; i < ncircuits.size(); i++) { + for (unsigned int i = 0; i < ncircuits.size(); i++) { int nq = num_qubits[i]; // (#679) Just ignore empty program if (ncircuits[i].channels.empty()) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (unsigned int j = 0; j < pauli_sums[i].size(); j++) { (*output_tensor)(i, j) = -2.0; } continue; @@ -224,7 +224,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { scratch, sv, unused_stats); // Use this trajectory as a source for all expectation calculations. - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (unsigned int j = 0; j < pauli_sums[i].size(); j++) { if (run_samples[j] >= num_samples[i][j]) { continue; } @@ -236,14 +236,14 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { run_samples[j]++; } bool break_loop = true; - for (int j = 0; j < num_samples[i].size(); j++) { + for (unsigned int j = 0; j < num_samples[i].size(); j++) { if (run_samples[j] < num_samples[i][j]) { break_loop = false; break; } } if (break_loop) { - for (int j = 0; j < num_samples[i].size(); j++) { + for (unsigned int j = 0; j < num_samples[i].size(); j++) { rolling_sums[j] /= num_samples[i][j]; (*output_tensor)(i, j) = static_cast(rolling_sums[j]); } @@ -285,8 +285,8 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { tensorflow::GuardedPhiloxRandom random_gen; int max_psum_length = 1; int max_n_shots = 1; - for (int i = 0; i < pauli_sums.size(); i++) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (unsigned int i = 0; i < pauli_sums.size(); i++) { + for (unsigned int j = 0; j < pauli_sums[i].size(); j++) { max_psum_length = std::max(max_psum_length, pauli_sums[i][j].terms().size()); max_n_shots = std::max(max_n_shots, num_samples[i][j]); @@ -310,13 +310,13 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { auto local_gen = random_gen.ReserveSamples128(num_rand); tensorflow::random::SimplePhilox rand_source(&local_gen); - for (int i = 0; i < ncircuits.size(); i++) { + for (unsigned int i = 0; i < ncircuits.size(); i++) { int nq = num_qubits[i]; int rep_offset = rep_offsets[start][i]; // (#679) Just ignore empty program if (ncircuits[i].channels.empty()) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (unsigned int j = 0; j < pauli_sums[i].size(); j++) { (*output_tensor)(i, j) = -2.0; } continue; @@ -343,7 +343,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { sim, scratch, sv, unused_stats); // Compute expectations across all ops using this trajectory. - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (unsigned int j = 0; j < pauli_sums[i].size(); j++) { int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads; if (run_samples[j] >= p_reps + rep_offset) { continue; @@ -360,7 +360,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { // Check if we have run enough trajectories for all ops. bool break_loop = true; - for (int j = 0; j < num_samples[i].size(); j++) { + for (unsigned int j = 0; j < num_samples[i].size(); j++) { int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads; if (run_samples[j] < p_reps + rep_offset) { break_loop = false; @@ -370,7 +370,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { if (break_loop) { // Lock writing to this batch index in output_tensor. batch_locks[i].lock(); - for (int j = 0; j < num_samples[i].size(); j++) { + for (unsigned int j = 0; j < num_samples[i].size(); j++) { rolling_sums[j] /= num_samples[i][j]; (*output_tensor)(i, j) += static_cast(rolling_sums[j]); } diff --git a/tensorflow_quantum/core/ops/noise/tfq_noisy_samples.cc b/tensorflow_quantum/core/ops/noise/tfq_noisy_samples.cc index 0e8321546..f5cca76b0 100644 --- a/tensorflow_quantum/core/ops/noise/tfq_noisy_samples.cc +++ b/tensorflow_quantum/core/ops/noise/tfq_noisy_samples.cc @@ -97,12 +97,12 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel { programs.size(), num_cycles, construct_f); OP_REQUIRES_OK(context, parse_status); - int max_num_qubits = 0; - for (const int num : num_qubits) { + uint64_t max_num_qubits = 0; + for (const uint64_t num : num_qubits) { max_num_qubits = std::max(max_num_qubits, num); } - const int output_dim_size = maps.size(); + const unsigned int output_dim_size = maps.size(); tensorflow::TensorShape output_shape; output_shape.AddDim(output_dim_size); output_shape.AddDim(num_samples); @@ -132,7 +132,7 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel { private: void ComputeLarge(const std::vector& num_qubits, - const int max_num_qubits, const int num_samples, + const uint64_t max_num_qubits, const int num_samples, const std::vector& ncircuits, tensorflow::OpKernelContext* context, tensorflow::TTypes::Tensor* output_tensor) { @@ -145,7 +145,7 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel { qsim::MultiQubitGateFuser, Simulator>; // Begin simulation. - int largest_nq = 1; + uint64_t largest_nq = 1; Simulator sim = Simulator(tfq_for); StateSpace ss = StateSpace(tfq_for); auto sv = ss.Create(largest_nq); @@ -160,8 +160,8 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as nescessary. - for (int i = 0; i < ncircuits.size(); i++) { - int nq = num_qubits[i]; + for (unsigned int i = 0; i < ncircuits.size(); i++) { + uint64_t nq = num_qubits[i]; if (nq > largest_nq) { // need to switch to larger statespace. @@ -203,7 +203,7 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel { } void ComputeSmall(const std::vector& num_qubits, - const int max_num_qubits, const int num_samples, + const uint64_t max_num_qubits, const int num_samples, const std::vector& ncircuits, tensorflow::OpKernelContext* context, tensorflow::TTypes::Tensor* output_tensor) { @@ -243,7 +243,7 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel { auto DoWork = [&](int start, int end) { // Begin simulation. const auto tfq_for = qsim::SequentialFor(1); - int largest_nq = 1; + uint64_t largest_nq = 1; Simulator sim = Simulator(tfq_for); StateSpace ss = StateSpace(tfq_for); auto sv = ss.Create(largest_nq); @@ -255,8 +255,8 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel { auto local_gen = random_gen.ReserveSamples32(needed_random); tensorflow::random::SimplePhilox rand_source(&local_gen); - for (int i = 0; i < ncircuits.size(); i++) { - int nq = num_qubits[i]; + for (unsigned int i = 0; i < ncircuits.size(); i++) { + uint64_t nq = num_qubits[i]; int j = start > 0 ? offset_prefix_sum[start - 1][i] : 0; int needed_samples = offset_prefix_sum[start][i] - j; if (needed_samples <= 0) { diff --git a/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc b/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc index 7625c7962..04fc6cb34 100644 --- a/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc +++ b/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc @@ -202,7 +202,8 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { } ss.SetStateZero(sv); - for (int j = 0; j < full_fuse[i].size(); j++) { + for (std::vector>::size_type j = 0; + j < full_fuse[i].size(); j++) { qsim::ApplyFusedGate(sim, full_fuse[i][j], sv); } @@ -231,13 +232,15 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { // if applicable compute control qubit mask and control value bits. uint64_t mask = 0; uint64_t cbits = 0; - for (int k = 0; k < cur_gate.controlled_by.size(); k++) { + for (std::vector::size_type k = 0; + k < cur_gate.controlled_by.size(); k++) { uint64_t control_loc = cur_gate.controlled_by[k]; mask |= uint64_t{1} << control_loc; cbits |= ((cur_gate.cmask >> k) & 1) << control_loc; } - for (int k = 0; k < gradient_gates[i][j - 1].grad_gates.size(); k++) { + for (std::vector::size_type k = 0; + k < gradient_gates[i][j - 1].grad_gates.size(); k++) { // Copy sv onto scratch2 in anticipation of non-unitary "gradient // gate". ss.Copy(sv, scratch2); @@ -297,7 +300,10 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { auto scratch = ss.Create(largest_nq); auto scratch2 = ss.Create(largest_nq); - for (int i = 0; i < partial_fused_circuits.size(); i++) { + for (std::vector< + std::vector>>>::size_type i = + 0; + i < partial_fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -314,7 +320,8 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { } ss.SetStateZero(sv); - for (int j = 0; j < full_fuse[i].size(); j++) { + for (std::vector>::size_type j = 0; + j < full_fuse[i].size(); j++) { qsim::ApplyFusedGate(sim, full_fuse[i][j], sv); } @@ -342,13 +349,15 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { // if applicable compute control qubit mask and control value bits. uint64_t mask = 0; uint64_t cbits = 0; - for (int k = 0; k < cur_gate.controlled_by.size(); k++) { + for (std::vector::size_type k = 0; + k < cur_gate.controlled_by.size(); k++) { uint64_t control_loc = cur_gate.controlled_by[k]; mask |= uint64_t{1} << control_loc; cbits |= ((cur_gate.cmask >> k) & 1) << control_loc; } - for (int k = 0; k < gradient_gates[i][j - 1].grad_gates.size(); k++) { + for (std::vector::size_type k = 0; + k < gradient_gates[i][j - 1].grad_gates.size(); k++) { // Copy sv onto scratch2 in anticipation of non-unitary "gradient // gate". ss.Copy(sv, scratch2); diff --git a/tensorflow_quantum/core/ops/tfq_calculate_unitary_op.cc b/tensorflow_quantum/core/ops/tfq_calculate_unitary_op.cc index b06a7faef..bbe525227 100644 --- a/tensorflow_quantum/core/ops/tfq_calculate_unitary_op.cc +++ b/tensorflow_quantum/core/ops/tfq_calculate_unitary_op.cc @@ -116,7 +116,8 @@ class TfqCalculateUnitaryOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the unitary as nescessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (std::vector>>::size_type i = 0; + i < fused_circuits.size(); i++) { int nq = num_qubits[i]; UCalculator sim = UCalculator(tfq_for); UnitarySpace us = UnitarySpace(tfq_for); @@ -126,7 +127,8 @@ class TfqCalculateUnitaryOp : public tensorflow::OpKernel { u = us.CreateUnitary(nq); } us.SetIdentity(u); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; + j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], u); } diff --git a/tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc b/tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc index 3423177b0..963640902 100644 --- a/tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc +++ b/tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc @@ -130,7 +130,7 @@ class TfqPsSymbolReplaceOp : public tensorflow::OpKernel { ->workers->TransformRangeConcurrently( block_size, programs.size() * n_symbols, DoWork); - size_t biggest_pad = 0; + std::vector::size_type biggest_pad = 0; Program empty = Program(); empty.mutable_language()->set_gate_set("tfq_gate_set"); empty.mutable_circuit(); // create empty circuits entry. @@ -163,12 +163,14 @@ class TfqPsSymbolReplaceOp : public tensorflow::OpKernel { for (int i = start; i < end; i++) { int sidx = i % n_symbols; int pidx = i / n_symbols; - for (int j = 0; j < output_programs.at(pidx).at(sidx).size(); j++) { + for (std::vector::size_type j = 0; + j < output_programs.at(pidx).at(sidx).size(); j++) { output_tensor(pidx, sidx, j) = output_programs.at(pidx).at(sidx).at(j); } - for (int j = output_programs.at(pidx).at(sidx).size(); j < biggest_pad; - j++) { + for (std::vector::size_type j = + output_programs.at(pidx).at(sidx).size(); + j < biggest_pad; j++) { output_tensor(pidx, sidx, j) = empty_program; } } diff --git a/tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc b/tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc index 94aaf6641..38177f78c 100644 --- a/tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc +++ b/tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc @@ -146,7 +146,8 @@ class TfqPsWeightsFromSymbolOp : public tensorflow::OpKernel { auto DoWork2 = [&](int start, int end) { for (int i = start; i < end; i++) { for (int j = 0; j < n_symbols; j++) { - for (int k = 0; k < output_results.at(i).at(j).size(); k++) { + for (std::vector::size_type k = 0; + k < output_results.at(i).at(j).size(); k++) { output_tensor(i, j, k) = output_results.at(i).at(j).at(k); } for (int k = output_results.at(i).at(j).size(); diff --git a/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc index e8499f482..6298d8d08 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc +++ b/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc @@ -143,7 +143,8 @@ class TfqSimulateExpectationOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (std::vector>>::size_type i = 0; + i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -156,10 +157,12 @@ class TfqSimulateExpectationOp : public tensorflow::OpKernel { // the state if there is a possibility that circuit[i] and // circuit[i + 1] produce the same state. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; + j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (std::vector::size_type j = 0; + j < pauli_sums[i].size(); j++) { // (#679) Just ignore empty program if (fused_circuits[i].size() == 0) { (*output_tensor)(i, j) = -2.0; @@ -221,7 +224,8 @@ class TfqSimulateExpectationOp : public tensorflow::OpKernel { // no need to update scratch_state since ComputeExpectation // will take care of things for us. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[cur_batch_index].size(); j++) { + for (std::vector>::size_type j = 0; + j < fused_circuits[cur_batch_index].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[cur_batch_index][j], sv); } } diff --git a/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc index 0452f2750..f3270460d 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc +++ b/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc @@ -175,7 +175,8 @@ class TfqSimulateSampledExpectationOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (std::vector>>::size_type i = 0; + i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -188,10 +189,12 @@ class TfqSimulateSampledExpectationOp : public tensorflow::OpKernel { // the state if there is a possibility that circuit[i] and // circuit[i + 1] produce the same state. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; + j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (std::vector::size_type j = 0; + j < pauli_sums[i].size(); j++) { // (#679) Just ignore empty program if (fused_circuits[i].size() == 0) { (*output_tensor)(i, j) = -2.0; @@ -273,7 +276,8 @@ class TfqSimulateSampledExpectationOp : public tensorflow::OpKernel { // no need to update scratch_state since ComputeExpectation // will take care of things for us. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[cur_batch_index].size(); j++) { + for (std::vector>::size_type j = 0; + j < fused_circuits[cur_batch_index].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[cur_batch_index][j], sv); } } diff --git a/tensorflow_quantum/core/ops/tfq_simulate_samples_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_samples_op.cc index f9ea8c4a3..5286dcb97 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_samples_op.cc +++ b/tensorflow_quantum/core/ops/tfq_simulate_samples_op.cc @@ -154,7 +154,8 @@ class TfqSimulateSamplesOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as nescessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (std::vector>>::size_type i = 0; + i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -163,13 +164,14 @@ class TfqSimulateSamplesOp : public tensorflow::OpKernel { sv = ss.Create(largest_nq); } ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; + j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } auto samples = ss.Sample(sv, num_samples, rand_source.Rand32()); for (int j = 0; j < num_samples; j++) { - uint64_t q_ind = 0; + int q_ind = 0; uint64_t mask = 1; bool val = 0; while (q_ind < nq) { @@ -219,13 +221,14 @@ class TfqSimulateSamplesOp : public tensorflow::OpKernel { sv = ss.Create(largest_nq); } ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; + j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } auto samples = ss.Sample(sv, num_samples, rand_source.Rand32()); for (int j = 0; j < num_samples; j++) { - uint64_t q_ind = 0; + int q_ind = 0; uint64_t mask = 1; bool val = 0; while (q_ind < nq) { diff --git a/tensorflow_quantum/core/ops/tfq_simulate_state_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_state_op.cc index 6d74f18b0..cd4e01001 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_state_op.cc +++ b/tensorflow_quantum/core/ops/tfq_simulate_state_op.cc @@ -136,7 +136,8 @@ class TfqSimulateStateOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as nescessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (std::vector>>::size_type i = 0; + i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -145,7 +146,8 @@ class TfqSimulateStateOp : public tensorflow::OpKernel { sv = ss.Create(largest_nq); } ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; + j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } @@ -194,7 +196,8 @@ class TfqSimulateStateOp : public tensorflow::OpKernel { sv = ss.Create(largest_nq); } ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; + j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } diff --git a/tensorflow_quantum/core/src/adj_util.cc b/tensorflow_quantum/core/src/adj_util.cc index ceb76b2c1..92db7100e 100644 --- a/tensorflow_quantum/core/src/adj_util.cc +++ b/tensorflow_quantum/core/src/adj_util.cc @@ -38,7 +38,8 @@ void CreateGradientCircuit( const QsimCircuit& circuit, const std::vector& metadata, std::vector>>* partial_fuses, std::vector* grad_gates) { - for (int i = 0; i < metadata.size(); i++) { + for (std::vector::size_type i = 0; i < metadata.size(); + i++) { if (metadata[i].symbol_values.empty()) { continue; } @@ -78,7 +79,8 @@ void CreateGradientCircuit( // PhasedX else if (circuit.gates[i].kind == qsim::Cirq::GateKind::kPhasedXPowGate) { // Process potentially several symbols. - for (int j = 0; j < metadata[i].symbol_values.size(); j++) { + for (std::vector::size_type j = 0; + j < metadata[i].symbol_values.size(); j++) { if (metadata[i].placeholder_names[j] == GateParamNames::kPhaseExponent) { PopulateGradientPhasedXPhasedExponent( @@ -103,7 +105,8 @@ void CreateGradientCircuit( // Process potentially several symbols. bool swapq = circuit.gates[i].swapped; - for (int j = 0; j < metadata[i].symbol_values.size(); j++) { + for (std::vector::size_type j = 0; + j < metadata[i].symbol_values.size(); j++) { if (metadata[i].placeholder_names[j] == GateParamNames::kTheta) { PopulateGradientFsimTheta( metadata[i].symbol_values[j], i, @@ -128,7 +131,8 @@ void CreateGradientCircuit( qsim::Cirq::GateKind::kPhasedISwapPowGate) { // Process potentially several symbols. bool swapq = circuit.gates[i].swapped; - for (int j = 0; j < metadata[i].symbol_values.size(); j++) { + for (std::vector::size_type j = 0; + j < metadata[i].symbol_values.size(); j++) { if (metadata[i].placeholder_names[j] == GateParamNames::kPhaseExponent) { PopulateGradientPhasedISwapPhasedExponent( @@ -159,7 +163,8 @@ void CreateGradientCircuit( partial_fuses->assign(grad_gates->size() + 1, std::vector>({})); - for (int i = 0; i < grad_gates->size(); i++) { + for (std::vector::size_type i = 0; + i < grad_gates->size(); i++) { right = circuit.gates.begin() + (*grad_gates)[i].index; (*partial_fuses)[i] = fuser.FuseGates(qsim::BasicGateFuser::Parameter(), diff --git a/tensorflow_quantum/core/src/circuit_parser_qsim.cc b/tensorflow_quantum/core/src/circuit_parser_qsim.cc index 8a7b2d490..09cfccad4 100644 --- a/tensorflow_quantum/core/src/circuit_parser_qsim.cc +++ b/tensorflow_quantum/core/src/circuit_parser_qsim.cc @@ -156,7 +156,7 @@ inline Status SingleConstantGate( const unsigned int num_qubits, const unsigned int time, QsimCircuit* circuit, std::vector* metadata) { unsigned int q0; - (void)absl::SimpleAtoi(op.qubits(0).id(), &q0); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q0); auto gate = create_f(time, num_qubits - q0 - 1); Status s = OptionalInsertControls(op, num_qubits, &gate); if (!s.ok()) { @@ -181,8 +181,8 @@ inline Status TwoConstantGate( const unsigned int num_qubits, const unsigned int time, QsimCircuit* circuit, std::vector* metadata) { unsigned int q0, q1; - bool unused = absl::SimpleAtoi(op.qubits(0).id(), &q0); - unused = absl::SimpleAtoi(op.qubits(1).id(), &q1); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q0); + std::ignore = absl::SimpleAtoi(op.qubits(1).id(), &q1); auto gate = create_f(time, num_qubits - q0 - 1, num_qubits - q1 - 1); Status s = OptionalInsertControls(op, num_qubits, &gate); if (!s.ok()) { @@ -207,10 +207,9 @@ inline Status SingleEigenGate( const unsigned int num_qubits, const unsigned int time, QsimCircuit* circuit, std::vector* metadata) { unsigned int q0; - bool unused; float exp, exp_s, gs; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q0); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q0); absl::optional exponent_symbol; u = ParseProtoArg(op, "exponent", param_map, &exp, &exponent_symbol); @@ -257,10 +256,9 @@ inline Status TwoEigenGate( QsimCircuit* circuit, std::vector* metadata) { unsigned int q0, q1; float exp, exp_s, gs; - bool unused; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q0); - unused = absl::SimpleAtoi(op.qubits(1).id(), &q1); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q0); + std::ignore = absl::SimpleAtoi(op.qubits(1).id(), &q1); absl::optional exponent_symbol; u = ParseProtoArg(op, "exponent", param_map, &exp, &exponent_symbol); @@ -396,10 +394,9 @@ inline Status PhasedXGate(const Operation& op, const SymbolMap& param_map, const unsigned int time, QsimCircuit* circuit, std::vector* metadata) { int q0; - bool unused; float pexp, pexp_s, exp, exp_s, gs; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q0); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q0); absl::optional exponent_symbol; u = ParseProtoArg(op, "exponent", param_map, &exp, &exponent_symbol); @@ -456,11 +453,10 @@ inline Status FsimGate(const Operation& op, const SymbolMap& param_map, QsimCircuit* circuit, std::vector* metadata) { int q0, q1; - bool unused; float theta, theta_s, phi, phi_s; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q0); - unused = absl::SimpleAtoi(op.qubits(1).id(), &q1); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q0); + std::ignore = absl::SimpleAtoi(op.qubits(1).id(), &q1); absl::optional theta_symbol; u = ParseProtoArg(op, "theta", param_map, &theta, &theta_symbol); @@ -513,11 +509,10 @@ inline Status PhasedISwapGate(const Operation& op, const SymbolMap& param_map, const unsigned int time, QsimCircuit* circuit, std::vector* metadata) { int q0, q1; - bool unused; float pexp, pexp_s, exp, exp_s; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q0); - unused = absl::SimpleAtoi(op.qubits(1).id(), &q1); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q0); + std::ignore = absl::SimpleAtoi(op.qubits(1).id(), &q1); absl::optional exponent_symbol; u = ParseProtoArg(op, "exponent", param_map, &exp, &exponent_symbol); @@ -604,10 +599,9 @@ inline Status AsymmetricDepolarizingChannel(const Operation& op, const unsigned int time, NoisyQsimCircuit* ncircuit) { int q; - bool unused; float p_x, p_y, p_z; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q); u = ParseProtoArg(op, "p_x", {}, &p_x); u = ParseProtoArg(op, "p_y", {}, &p_y); @@ -626,10 +620,9 @@ inline Status DepolarizingChannel(const Operation& op, const unsigned int time, NoisyQsimCircuit* ncircuit) { int q; - bool unused; float p; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q); u = ParseProtoArg(op, "p", {}, &p); if (!u.ok()) { @@ -644,10 +637,9 @@ inline Status DepolarizingChannel(const Operation& op, inline Status GADChannel(const Operation& op, const unsigned int num_qubits, const unsigned int time, NoisyQsimCircuit* ncircuit) { int q; - bool unused; float p, gamma; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q); u = ParseProtoArg(op, "p", {}, &p); if (!u.ok()) { @@ -668,8 +660,7 @@ inline Status ResetChannel(const Operation& op, const unsigned int num_qubits, const unsigned int time, NoisyQsimCircuit* ncircuit) { int q; - bool unused; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q); auto chan = qsim::Cirq::ResetChannel::Create(time, num_qubits - q - 1); ncircuit->channels.push_back(chan); @@ -681,10 +672,9 @@ inline Status AmplitudeDampingChannel(const Operation& op, const unsigned int time, NoisyQsimCircuit* ncircuit) { int q; - bool unused; float gamma; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q); u = ParseProtoArg(op, "gamma", {}, &gamma); if (!u.ok()) { @@ -701,10 +691,9 @@ inline Status PhaseDampingChannel(const Operation& op, const unsigned int time, NoisyQsimCircuit* ncircuit) { int q; - bool unused; float gamma; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q); u = ParseProtoArg(op, "gamma", {}, &gamma); if (!u.ok()) { @@ -722,10 +711,9 @@ inline Status PhaseFlipChannel(const Operation& op, const unsigned int time, NoisyQsimCircuit* ncircuit) { int q; - bool unused; float p; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q); u = ParseProtoArg(op, "p", {}, &p); if (!u.ok()) { @@ -742,10 +730,9 @@ inline Status BitFlipChannel(const Operation& op, const unsigned int num_qubits, const unsigned int time, NoisyQsimCircuit* ncircuit) { int q; - bool unused; float p; Status u; - unused = absl::SimpleAtoi(op.qubits(0).id(), &q); + std::ignore = absl::SimpleAtoi(op.qubits(0).id(), &q); u = ParseProtoArg(op, "p", {}, &p); if (!u.ok()) { @@ -843,8 +830,8 @@ tensorflow::Status QsimCircuitFromProgram( std::vector* metadata /*=nullptr*/) { // Convert proto to qsim internal representation. circuit->num_qubits = num_qubits; - int time = 0; bool unused; + int time = 0; // Special case empty. if (num_qubits <= 0) { return Status::OK(); diff --git a/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc b/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc index 4cfb40424..1bb0194cd 100644 --- a/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc +++ b/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc @@ -64,7 +64,8 @@ Arg MakeControlArg(const std::string& val) { } inline void AssertControlEqual(const QsimGate& a, const QsimGate& b) { - for (int i = 0; i < a.controlled_by.size(); i++) { + for (std::vector::size_type i = 0; i < a.controlled_by.size(); + i++) { ASSERT_EQ(a.controlled_by[i], b.controlled_by[i]); } ASSERT_EQ(a.cmask, b.cmask); @@ -89,14 +90,14 @@ inline void AssertOneQubitEqual(const QsimGate& a, const QsimGate& b) { inline void AssertChannelEqual(const QsimChannel& a, const QsimChannel& b) { ASSERT_EQ(a.size(), b.size()); - for (int i = 0; i < a.size(); i++) { + for (long unsigned int i = 0; i < a.size(); i++) { ASSERT_EQ(a[i].kind, b[i].kind); ASSERT_EQ(a[i].unitary, b[i].unitary); ASSERT_NEAR(a[i].prob, b[i].prob, 1e-5); auto a_k_ops = a[i].ops; auto b_k_ops = b[i].ops; EXPECT_EQ(a_k_ops.size(), b_k_ops.size()); - for (int j = 0; j < a_k_ops.size(); j++) { + for (long unsigned int j = 0; j < a_k_ops.size(); j++) { AssertOneQubitEqual(a_k_ops[j], b_k_ops[j]); } } @@ -1535,7 +1536,7 @@ TEST(QsimCircuitParserTest, NoisyEmpty) { Program program_proto; Circuit* circuit_proto = program_proto.mutable_circuit(); circuit_proto->set_scheduling_strategy(circuit_proto->MOMENT_BY_MOMENT); - (void)circuit_proto->add_moments(); + std::ignore = circuit_proto->add_moments(); NoisyQsimCircuit test_circuit; ASSERT_EQ( diff --git a/tensorflow_quantum/core/src/util_qsim.h b/tensorflow_quantum/core/src/util_qsim.h index 933f1582d..1d4ab1890 100644 --- a/tensorflow_quantum/core/src/util_qsim.h +++ b/tensorflow_quantum/core/src/util_qsim.h @@ -1,5 +1,4 @@ /* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -235,7 +234,7 @@ tensorflow::Status ComputeSampledExpectationQsim( unsigned int location; // GridQubit id should be parsed down to integer at this upstream // so it is safe to just use atoi. - (void)absl::SimpleAtoi(pair.qubit_id(), &location); + std::ignore = absl::SimpleAtoi(pair.qubit_id(), &location); // Parity functions use little-endian indexing parity_bits.push_back(state.num_qubits() - location - 1); } @@ -359,13 +358,13 @@ static void BalanceTrajectory(const std::vector>& num_samples, std::vector rep_limits(num_samples.size(), -1); std::vector height(num_threads, 0); - for (int i = 0; i < num_samples.size(); i++) { - for (int j = 0; j < num_samples[i].size(); j++) { + for (unsigned int i = 0; i < num_samples.size(); i++) { + for (unsigned int j = 0; j < num_samples[i].size(); j++) { rep_limits[i] = std::max(rep_limits[i], num_samples[i][j]); } } int prev_max_height = -1; - for (int j = 0; j < num_samples.size(); j++) { + for (unsigned int j = 0; j < num_samples.size(); j++) { int run_ceiling = ((rep_limits[j] + num_threads - 1) / num_threads); int num_lo = num_threads * run_ceiling - rep_limits[j]; int num_hi = num_threads - num_lo; @@ -404,7 +403,7 @@ static void BalanceTrajectory(const int& num_samples, const int& num_threads, std::vector height(num_threads, 0); int prev_max_height = -1; - for (int j = 0; j < (*thread_offsets)[0].size(); j++) { + for (unsigned int j = 0; j < (*thread_offsets)[0].size(); j++) { int run_ceiling = ((num_samples + num_threads - 1) / num_threads); int num_lo = num_threads * run_ceiling - num_samples; int num_hi = num_threads - num_lo; diff --git a/tensorflow_quantum/core/src/util_qsim_test.cc b/tensorflow_quantum/core/src/util_qsim_test.cc index b4f630f3c..bdd54580b 100644 --- a/tensorflow_quantum/core/src/util_qsim_test.cc +++ b/tensorflow_quantum/core/src/util_qsim_test.cc @@ -493,8 +493,8 @@ TEST(UtilQsimTest, AccumulateOperatorsBasic) { p_term_scratch2->set_coefficient_real(-5.0); // 0.5 * (0.123ZX -3X + 4I) + 0.25 * (-5I) applied onto psi. - (void)AccumulateOperators({p_sum, p_sum2}, {0.5, 0.25}, sim, ss, sv, scratch, - dest); + std::ignore = AccumulateOperators({p_sum, p_sum2}, {0.5, 0.25}, sim, ss, sv, + scratch, dest); // Check that dest got accumulated onto. EXPECT_NEAR(ss.GetAmpl(dest, 0).real(), 0.577925, 1e-5); @@ -536,7 +536,7 @@ TEST(UtilQsimTest, AccumulateOperatorsEmpty) { auto scratch = ss.Create(2); auto dest = ss.Create(2); - (void)AccumulateOperators({}, {}, sim, ss, sv, scratch, dest); + std::ignore = AccumulateOperators({}, {}, sim, ss, sv, scratch, dest); // Check sv is still in zero state. EXPECT_NEAR(ss.GetAmpl(sv, 0).real(), 1.0, 1e-5); @@ -600,7 +600,8 @@ TEST(UtilQsimTest, AccumulateFusedCircuitsBasic) { // Initialize coeffs. std::vector coeffs = {1.23, 4.56}; - (void)AccumulateFusedCircuits(coeffs, fused_circuits, sim, ss, scratch, dest); + std::ignore = + AccumulateFusedCircuits(coeffs, fused_circuits, sim, ss, scratch, dest); // Scratch has coeffs[r][c] * fused circuits[r][c] where r, c = last indices. // Check that dest got accumulated onto. @@ -629,7 +630,7 @@ TEST(UtilQsimTest, AccumulateFusedCircuitsEmpty) { auto scratch = ss.Create(2); auto dest = ss.Create(2); - (void)AccumulateFusedCircuits({}, {}, sim, ss, scratch, dest); + std::ignore = AccumulateFusedCircuits({}, {}, sim, ss, scratch, dest); // scratch has garbage value. // Check that dest contains all zeros. @@ -646,13 +647,13 @@ static void AssertWellBalanced(const std::vector>& n_reps, const int& num_threads, const std::vector>& offsets) { auto max_work = std::vector(n_reps.size(), -1); - for (int i = 0; i < n_reps.size(); i++) { - for (int j = 0; j < n_reps[0].size(); j++) { + for (std::vector>::size_type i = 0; i < n_reps.size(); i++) { + for (std::vector::size_type j = 0; j < n_reps[0].size(); j++) { max_work[i] = std::max(max_work[i], n_reps[i][j]); } } - for (int i = 0; i < n_reps.size(); i++) { + for (std::vector>::size_type i = 0; i < n_reps.size(); i++) { int sum = 0; int prev_local_work = 0; for (int k = 0; k < num_threads; k++) { diff --git a/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation_test.py b/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation_test.py index 60714b2e4..c3c07c74c 100644 --- a/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation_test.py +++ b/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation_test.py @@ -68,7 +68,6 @@ def test_sampled_expectation_symbol_input(self): sampled_expectation.SampledExpectation( differentiator=linear_combination.ForwardDifference()) - def test_sampled_expectation_instantiate_error(self): """Test that SampledExpectation errors with bad inputs."""