Skip to content

Commit 8c16829

Browse files
author
remi Taylor
committed
BigQuery sample: import data (streaming insert)
1 parent d10069f commit 8c16829

2 files changed

Lines changed: 61 additions & 10 deletions

File tree

bigquery/spec/bigquery_sample_spec.rb

Lines changed: 28 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -212,10 +212,8 @@ def capture &block
212212

213213
expect(loaded_data).not_to be_empty
214214
expect(loaded_data.count).to eq 2
215-
expect(loaded_data.first["name"]).to eq "Alice"
216-
expect(loaded_data.first["value"]).to eq 5
217-
expect(loaded_data.last["name"]).to eq "Bob"
218-
expect(loaded_data.last["value"]).to eq 10
215+
expect(loaded_data).to include({ "name" => "Alice", "value" => 5 })
216+
expect(loaded_data).to include({ "name" => "Bob", "value" => 10 })
219217
end
220218

221219
example "import data from Cloud Storage" do
@@ -250,13 +248,34 @@ def capture &block
250248

251249
expect(loaded_data).not_to be_empty
252250
expect(loaded_data.count).to eq 2
253-
expect(loaded_data.first["name"]).to eq "Alice"
254-
expect(loaded_data.first["value"]).to eq 5
255-
expect(loaded_data.last["name"]).to eq "Bob"
256-
expect(loaded_data.last["value"]).to eq 10
251+
expect(loaded_data).to include({ "name" => "Alice", "value" => 5 })
252+
expect(loaded_data).to include({ "name" => "Bob", "value" => 10 })
257253
end
258254

259-
example "stream data import"
255+
example "stream data import" do
256+
expect(@table.data).to be_empty
257+
258+
row_data_to_insert = [
259+
{ name: "Alice", value: 5 },
260+
{ name: "Bob", value: 10 }
261+
]
262+
263+
expect {
264+
import_table_data project_id: @project_id,
265+
dataset_id: @dataset.dataset_id,
266+
table_id: @table.table_id,
267+
row_data: row_data_to_insert
268+
}.to output(
269+
270+
).to_stdout
271+
272+
loaded_data = @table.data
273+
274+
expect(loaded_data).not_to be_empty
275+
expect(loaded_data.count).to eq 2
276+
expect(loaded_data).to include({ "name" => "Alice", "value" => 5 })
277+
expect(loaded_data).to include({ "name" => "Bob", "value" => 10 })
278+
end
260279
end
261280

262281
describe "Exporting data" do

bigquery/tables.rb

Lines changed: 33 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,30 @@ def list_table_data project_id:, dataset_id:, table_id:
8787
# [END list_table_data]
8888
end
8989

90+
def import_table_data project_id:, dataset_id:, table_id:, row_data:
91+
# [START import_table_data]
92+
# project_id = "Your Google Cloud project ID"
93+
# dataset_id = "ID of the dataset containing table"
94+
# table_id = "ID of the table to import data into"
95+
# row_data = [{ column1: value, column2: value }, ...]
96+
97+
require "google/cloud"
98+
99+
gcloud = Google::Cloud.new project_id
100+
bigquery = gcloud.bigquery
101+
dataset = bigquery.dataset dataset_id
102+
table = dataset.table table_id
103+
104+
response = table.insert row_data
105+
106+
if response.success?
107+
puts "Inserted rows successfully"
108+
else
109+
puts "Failed to insert #{response.error_rows.count} rows"
110+
end
111+
# [END import_table_data]
112+
end
113+
90114
def import_table_data_from_file project_id:, dataset_id:, table_id:,
91115
local_file_path:
92116
# [START import_table_data_from_file]
@@ -117,7 +141,7 @@ def import_table_data_from_cloud_storage project_id:, dataset_id:, table_id:,
117141
# [START import_table_data_from_cloud_storage]
118142
# project_id = "Your Google Cloud project ID"
119143
# dataset_id = "ID of the dataset containing table"
120-
# table_id = "ID of the table to import file data into"
144+
# table_id = "ID of the table to import data into"
121145
# storage_path = "Storage path to file to import, eg. gs://bucket/file.csv"
122146

123147
require "google/cloud"
@@ -204,6 +228,8 @@ def run_query_as_job project_id:, query_string:
204228
end
205229

206230
if __FILE__ == $PROGRAM_NAME
231+
require "json"
232+
207233
project_id = ENV["GOOGLE_CLOUD_PROJECT"]
208234
command = ARGV.shift
209235

@@ -233,6 +259,11 @@ def run_query_as_job project_id:, query_string:
233259
dataset_id: ARGV.shift,
234260
table_id: ARGV.shift,
235261
storage_path: ARGV.shift
262+
when "import_data"
263+
import_table_data project_id: project_id,
264+
dataset_id: ARGV.shift,
265+
table_id: ARGV.shift,
266+
row_data: JSON.parse(ARGV.shift)
236267
when "export"
237268
export_table_data_to_cloud_storage project_id: project_id,
238269
dataset_id: ARGV.shift,
@@ -253,6 +284,7 @@ def run_query_as_job project_id:, query_string:
253284
list_data <dataset_id> <table_id> List data in table with the specified ID
254285
import_file <dataset_id> <table_id> <file_path>
255286
import_gcs <dataset_id> <table_id> <cloud_storage_path>
287+
import_data <dataset_id> <table_id> "[{ <json row data> }]"
256288
export <dataset_id> <table_id> <cloud_storage_path>
257289
query <query>
258290
query_job <query>

0 commit comments

Comments
 (0)