@@ -117,7 +117,7 @@ def delete_table project_id:, dataset_id:, table_id:
117117def list_table_data project_id :, dataset_id :, table_id :
118118 # [START list_table_data]
119119 # project_id = "Your Google Cloud project ID"
120- # dataset_id = "ID of the dataset delete table from "
120+ # dataset_id = "ID of the dataset containing table"
121121 # table_id = "ID of the table to display data for"
122122
123123 require "google/cloud"
@@ -139,7 +139,7 @@ def import_table_data_from_file project_id:, dataset_id:, table_id:,
139139 local_file_path :
140140 # [START import_table_data_from_file]
141141 # project_id = "Your Google Cloud project ID"
142- # dataset_id = "ID of the dataset delete table from "
142+ # dataset_id = "ID of the dataset containing table"
143143 # table_id = "ID of the table to import file data into"
144144 # local_file_path = "Path to local file to import into BigQuery table"
145145
@@ -162,9 +162,9 @@ def import_table_data_from_file project_id:, dataset_id:, table_id:,
162162
163163def import_table_data_from_cloud_storage project_id :, dataset_id :, table_id :,
164164 storage_path :
165- # [START import_table_data_from_file ]
165+ # [START import_table_data_from_cloud_storage ]
166166 # project_id = "Your Google Cloud project ID"
167- # dataset_id = "ID of the dataset delete table from "
167+ # dataset_id = "ID of the dataset containing table"
168168 # table_id = "ID of the table to import file data into"
169169 # storage_path = "Storage path to file to import, eg. gs://bucket/file.csv"
170170
@@ -182,7 +182,73 @@ def import_table_data_from_cloud_storage project_id:, dataset_id:, table_id:,
182182 load_job . wait_until_done!
183183
184184 puts "Data imported"
185- # [END import_table_data_from_file]
185+ # [END import_table_data_from_cloud_storage]
186+ end
187+
188+ def export_table_data_to_cloud_storage project_id :, dataset_id :, table_id :,
189+ storage_path :
190+ # [START export_table_data_to_cloud_storage]
191+ # project_id = "Your Google Cloud project ID"
192+ # dataset_id = "ID of the dataset containing table"
193+ # table_id = "ID of the table to export file data from"
194+ # storage_path = "Storage path to export to, eg. gs://bucket/file.csv"
195+
196+ require "google/cloud"
197+
198+ gcloud = Google ::Cloud . new project_id
199+ bigquery = gcloud . bigquery
200+ dataset = bigquery . dataset dataset_id
201+ table = dataset . table table_id
202+
203+ puts "Exporting data to Cloud Storage file: #{ storage_path } "
204+ extract_job = table . extract storage_path
205+
206+ puts "Waiting for extract job to complete: #{ extract_job . job_id } "
207+ extract_job . wait_until_done!
208+
209+ puts "Data exported"
210+ # [END export_table_data_to_cloud_storage]
211+ end
212+
213+ def run_query_sync project_id :, query_string :
214+ # [start run_query_sync]
215+ # project_id = "your google cloud project id"
216+ # query_string = "query string to execute (using bigquery query syntax)"
217+
218+ require "google/cloud"
219+
220+ gcloud = Google ::Cloud . new project_id
221+ bigquery = gcloud . bigquery
222+
223+ data = bigquery . query query_string
224+
225+ data . each do |row |
226+ puts row . inspect
227+ end
228+ # [end run_query_sync]
229+ end
230+
231+ def run_query_async project_id :, query_string :
232+ # [start run_query_async]
233+ # project_id = "your google cloud project id"
234+ # query_string = "query string to execute (using bigquery query syntax)"
235+
236+ require "google/cloud"
237+
238+ gcloud = Google ::Cloud . new project_id
239+ bigquery = gcloud . bigquery
240+
241+ puts "Running query"
242+ query_job = bigquery . query_job query_string
243+
244+ puts "Waiting for query to complete"
245+ query_job . wait_until_done!
246+
247+ puts "Query results:"
248+ query_job . query_results . each do |row |
249+ puts row . inspect
250+ end
251+ # [end run_query_async]
186252end
187253
188254# TODO: separate sample into separate executable files
0 commit comments