/
classes.rb
2904 lines (2502 loc) · 144 KB
/
classes.rb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require 'date'
require 'google/apis/core/base_service'
require 'google/apis/core/json_representation'
require 'google/apis/core/hashable'
require 'google/apis/errors'
module Google
module Apis
module RemotebuildexecutionV1alpha
# An `Action` captures all the information about an execution which is required
# to reproduce it. `Action`s are the core component of the [Execution] service.
# A single `Action` represents a repeatable action that can be performed by the
# execution service. `Action`s can be succinctly identified by the digest of
# their wire format encoding and, once an `Action` has been executed, will be
# cached in the action cache. Future requests can then use the cached result
# rather than needing to run afresh. When a server completes execution of an
# Action, it MAY choose to cache the result in the ActionCache unless `
# do_not_cache` is `true`. Clients SHOULD expect the server to do so. By default,
# future calls to Execute the same `Action` will also serve their results from
# the cache. Clients must take care to understand the caching behaviour. Ideally,
# all `Action`s will be reproducible so that serving a result from cache is
# always desirable and correct.
class BuildBazelRemoteExecutionV2Action
include Google::Apis::Core::Hashable
# A content digest. A digest for a given blob consists of the size of the blob
# and its hash. The hash algorithm to use is defined by the server. The size is
# considered to be an integral part of the digest and cannot be separated. That
# is, even if the `hash` field is correctly specified but `size_bytes` is not,
# the server MUST reject the request. The reason for including the size in the
# digest is as follows: in a great many cases, the server needs to know the size
# of the blob it is about to work with prior to starting an operation with it,
# such as flattening Merkle tree structures or streaming it to a worker.
# Technically, the server could implement a separate metadata store, but this
# results in a significantly more complicated implementation as opposed to
# having the client specify the size up-front (or storing the size along with
# the digest in every message where digests are embedded). This does mean that
# the API leaks some implementation details of (what we consider to be) a
# reasonable server implementation, but we consider this to be a worthwhile
# tradeoff. When a `Digest` is used to refer to a proto message, it always
# refers to the message in binary encoded form. To ensure consistent hashing,
# clients and servers MUST ensure that they serialize messages according to the
# following rules, even if there are alternate valid encodings for the same
# message: * Fields are serialized in tag order. * There are no unknown fields. *
# There are no duplicate fields. * Fields are serialized according to the
# default semantics for their type. Most protocol buffer implementations will
# always follow these rules when serializing, but care should be taken to avoid
# shortcuts. For instance, concatenating two messages to merge them may produce
# duplicate fields.
# Corresponds to the JSON property `commandDigest`
# @return [Google::Apis::RemotebuildexecutionV1alpha::BuildBazelRemoteExecutionV2Digest]
attr_accessor :command_digest
# If true, then the `Action`'s result cannot be cached, and in-flight requests
# for the same `Action` may not be merged.
# Corresponds to the JSON property `doNotCache`
# @return [Boolean]
attr_accessor :do_not_cache
alias_method :do_not_cache?, :do_not_cache
# A content digest. A digest for a given blob consists of the size of the blob
# and its hash. The hash algorithm to use is defined by the server. The size is
# considered to be an integral part of the digest and cannot be separated. That
# is, even if the `hash` field is correctly specified but `size_bytes` is not,
# the server MUST reject the request. The reason for including the size in the
# digest is as follows: in a great many cases, the server needs to know the size
# of the blob it is about to work with prior to starting an operation with it,
# such as flattening Merkle tree structures or streaming it to a worker.
# Technically, the server could implement a separate metadata store, but this
# results in a significantly more complicated implementation as opposed to
# having the client specify the size up-front (or storing the size along with
# the digest in every message where digests are embedded). This does mean that
# the API leaks some implementation details of (what we consider to be) a
# reasonable server implementation, but we consider this to be a worthwhile
# tradeoff. When a `Digest` is used to refer to a proto message, it always
# refers to the message in binary encoded form. To ensure consistent hashing,
# clients and servers MUST ensure that they serialize messages according to the
# following rules, even if there are alternate valid encodings for the same
# message: * Fields are serialized in tag order. * There are no unknown fields. *
# There are no duplicate fields. * Fields are serialized according to the
# default semantics for their type. Most protocol buffer implementations will
# always follow these rules when serializing, but care should be taken to avoid
# shortcuts. For instance, concatenating two messages to merge them may produce
# duplicate fields.
# Corresponds to the JSON property `inputRootDigest`
# @return [Google::Apis::RemotebuildexecutionV1alpha::BuildBazelRemoteExecutionV2Digest]
attr_accessor :input_root_digest
# List of required supported NodeProperty keys. In order to ensure that
# equivalent `Action`s always hash to the same value, the supported node
# properties MUST be lexicographically sorted by name. Sorting of strings is
# done by code point, equivalently, by the UTF-8 bytes. The interpretation of
# these properties is server-dependent. If a property is not recognized by the
# server, the server will return an `INVALID_ARGUMENT` error.
# Corresponds to the JSON property `outputNodeProperties`
# @return [Array<String>]
attr_accessor :output_node_properties
# A timeout after which the execution should be killed. If the timeout is absent,
# then the client is specifying that the execution should continue as long as
# the server will let it. The server SHOULD impose a timeout if the client does
# not specify one, however, if the client does specify a timeout that is longer
# than the server's maximum timeout, the server MUST reject the request. The
# timeout is a part of the Action message, and therefore two `Actions` with
# different timeouts are different, even if they are otherwise identical. This
# is because, if they were not, running an `Action` with a lower timeout than is
# required might result in a cache hit from an execution run with a longer
# timeout, hiding the fact that the timeout is too short. By encoding it
# directly in the `Action`, a lower timeout will result in a cache miss and the
# execution timeout will fail immediately, rather than whenever the cache entry
# gets evicted.
# Corresponds to the JSON property `timeout`
# @return [String]
attr_accessor :timeout
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@command_digest = args[:command_digest] if args.key?(:command_digest)
@do_not_cache = args[:do_not_cache] if args.key?(:do_not_cache)
@input_root_digest = args[:input_root_digest] if args.key?(:input_root_digest)
@output_node_properties = args[:output_node_properties] if args.key?(:output_node_properties)
@timeout = args[:timeout] if args.key?(:timeout)
end
end
# An ActionResult represents the result of an Action being run.
class BuildBazelRemoteExecutionV2ActionResult
include Google::Apis::Core::Hashable
# ExecutedActionMetadata contains details about a completed execution.
# Corresponds to the JSON property `executionMetadata`
# @return [Google::Apis::RemotebuildexecutionV1alpha::BuildBazelRemoteExecutionV2ExecutedActionMetadata]
attr_accessor :execution_metadata
# The exit code of the command.
# Corresponds to the JSON property `exitCode`
# @return [Fixnum]
attr_accessor :exit_code
# The output directories of the action. For each output directory requested in
# the `output_directories` or `output_paths` field of the Action, if the
# corresponding directory existed after the action completed, a single entry
# will be present in the output list, which will contain the digest of a Tree
# message containing the directory tree, and the path equal exactly to the
# corresponding Action output_directories member. As an example, suppose the
# Action had an output directory `a/b/dir` and the execution produced the
# following contents in `a/b/dir`: a file named `bar` and a directory named `foo`
# with an executable file named `baz`. Then, output_directory will contain (
# hashes shortened for readability): ```json // OutputDirectory proto: ` path: "
# a/b/dir" tree_digest: ` hash: "4a73bc9d03...", size: 55 ` ` // Tree proto with
# hash "4a73bc9d03..." and size 55: ` root: ` files: [ ` name: "bar", digest: `
# hash: "4a73bc9d03...", size: 65534 ` ` ], directories: [ ` name: "foo", digest:
# ` hash: "4cf2eda940...", size: 43 ` ` ] ` children : ` // (Directory proto
# with hash "4cf2eda940..." and size 43) files: [ ` name: "baz", digest: ` hash:
# "b2c941073e...", size: 1294, `, is_executable: true ` ] ` ` ``` If an output
# of the same name as listed in `output_files` of the Command was found in `
# output_directories`, but was not a directory, the server will return a
# FAILED_PRECONDITION.
# Corresponds to the JSON property `outputDirectories`
# @return [Array<Google::Apis::RemotebuildexecutionV1alpha::BuildBazelRemoteExecutionV2OutputDirectory>]
attr_accessor :output_directories
# The output directories of the action that are symbolic links to other
# directories. Those may be links to other output directories, or input
# directories, or even absolute paths outside of the working directory, if the
# server supports SymlinkAbsolutePathStrategy.ALLOWED. For each output directory
# requested in the `output_directories` field of the Action, if the directory
# existed after the action completed, a single entry will be present either in
# this field, or in the `output_directories` field, if the directory was not a
# symbolic link. If an output of the same name was found, but was a symbolic
# link to a file instead of a directory, the server will return a
# FAILED_PRECONDITION. If the action does not produce the requested output, then
# that output will be omitted from the list. The server is free to arrange the
# output list as desired; clients MUST NOT assume that the output list is sorted.
# DEPRECATED as of v2.1. Servers that wish to be compatible with v2.0 API
# should still populate this field in addition to `output_symlinks`.
# Corresponds to the JSON property `outputDirectorySymlinks`
# @return [Array<Google::Apis::RemotebuildexecutionV1alpha::BuildBazelRemoteExecutionV2OutputSymlink>]
attr_accessor :output_directory_symlinks
# The output files of the action that are symbolic links to other files. Those
# may be links to other output files, or input files, or even absolute paths
# outside of the working directory, if the server supports
# SymlinkAbsolutePathStrategy.ALLOWED. For each output file requested in the `
# output_files` or `output_paths` field of the Action, if the corresponding file
# existed after the action completed, a single entry will be present either in
# this field, or in the `output_files` field, if the file was not a symbolic
# link. If an output symbolic link of the same name as listed in `output_files`
# of the Command was found, but its target type was not a regular file, the
# server will return a FAILED_PRECONDITION. If the action does not produce the
# requested output, then that output will be omitted from the list. The server
# is free to arrange the output list as desired; clients MUST NOT assume that
# the output list is sorted. DEPRECATED as of v2.1. Servers that wish to be
# compatible with v2.0 API should still populate this field in addition to `
# output_symlinks`.
# Corresponds to the JSON property `outputFileSymlinks`
# @return [Array<Google::Apis::RemotebuildexecutionV1alpha::BuildBazelRemoteExecutionV2OutputSymlink>]
attr_accessor :output_file_symlinks
# The output files of the action. For each output file requested in the `
# output_files` or `output_paths` field of the Action, if the corresponding file
# existed after the action completed, a single entry will be present either in
# this field, or the `output_file_symlinks` field if the file was a symbolic
# link to another file (`output_symlinks` field after v2.1). If an output listed
# in `output_files` was found, but was a directory rather than a regular file,
# the server will return a FAILED_PRECONDITION. If the action does not produce
# the requested output, then that output will be omitted from the list. The
# server is free to arrange the output list as desired; clients MUST NOT assume
# that the output list is sorted.
# Corresponds to the JSON property `outputFiles`
# @return [Array<Google::Apis::RemotebuildexecutionV1alpha::BuildBazelRemoteExecutionV2OutputFile>]
attr_accessor :output_files
# New in v2.1: this field will only be populated if the command `output_paths`
# field was used, and not the pre v2.1 `output_files` or `output_directories`
# fields. The output paths of the action that are symbolic links to other paths.
# Those may be links to other outputs, or inputs, or even absolute paths outside
# of the working directory, if the server supports SymlinkAbsolutePathStrategy.
# ALLOWED. A single entry for each output requested in `output_paths` field of
# the Action, if the corresponding path existed after the action completed and
# was a symbolic link. If the action does not produce a requested output, then
# that output will be omitted from the list. The server is free to arrange the
# output list as desired; clients MUST NOT assume that the output list is sorted.
# Corresponds to the JSON property `outputSymlinks`
# @return [Array<Google::Apis::RemotebuildexecutionV1alpha::BuildBazelRemoteExecutionV2OutputSymlink>]
attr_accessor :output_symlinks
# A content digest. A digest for a given blob consists of the size of the blob
# and its hash. The hash algorithm to use is defined by the server. The size is
# considered to be an integral part of the digest and cannot be separated. That
# is, even if the `hash` field is correctly specified but `size_bytes` is not,
# the server MUST reject the request. The reason for including the size in the
# digest is as follows: in a great many cases, the server needs to know the size
# of the blob it is about to work with prior to starting an operation with it,
# such as flattening Merkle tree structures or streaming it to a worker.
# Technically, the server could implement a separate metadata store, but this
# results in a significantly more complicated implementation as opposed to
# having the client specify the size up-front (or storing the size along with
# the digest in every message where digests are embedded). This does mean that
# the API leaks some implementation details of (what we consider to be) a
# reasonable server implementation, but we consider this to be a worthwhile
# tradeoff. When a `Digest` is used to refer to a proto message, it always
# refers to the message in binary encoded form. To ensure consistent hashing,
# clients and servers MUST ensure that they serialize messages according to the
# following rules, even if there are alternate valid encodings for the same
# message: * Fields are serialized in tag order. * There are no unknown fields. *
# There are no duplicate fields. * Fields are serialized according to the
# default semantics for their type. Most protocol buffer implementations will
# always follow these rules when serializing, but care should be taken to avoid
# shortcuts. For instance, concatenating two messages to merge them may produce
# duplicate fields.
# Corresponds to the JSON property `stderrDigest`
# @return [Google::Apis::RemotebuildexecutionV1alpha::BuildBazelRemoteExecutionV2Digest]
attr_accessor :stderr_digest
# The standard error buffer of the action. The server SHOULD NOT inline stderr
# unless requested by the client in the GetActionResultRequest message. The
# server MAY omit inlining, even if requested, and MUST do so if inlining would
# cause the response to exceed message size limits.
# Corresponds to the JSON property `stderrRaw`
# NOTE: Values are automatically base64 encoded/decoded in the client library.
# @return [String]
attr_accessor :stderr_raw
# A content digest. A digest for a given blob consists of the size of the blob
# and its hash. The hash algorithm to use is defined by the server. The size is
# considered to be an integral part of the digest and cannot be separated. That
# is, even if the `hash` field is correctly specified but `size_bytes` is not,
# the server MUST reject the request. The reason for including the size in the
# digest is as follows: in a great many cases, the server needs to know the size
# of the blob it is about to work with prior to starting an operation with it,
# such as flattening Merkle tree structures or streaming it to a worker.
# Technically, the server could implement a separate metadata store, but this
# results in a significantly more complicated implementation as opposed to
# having the client specify the size up-front (or storing the size along with
# the digest in every message where digests are embedded). This does mean that
# the API leaks some implementation details of (what we consider to be) a
# reasonable server implementation, but we consider this to be a worthwhile
# tradeoff. When a `Digest` is used to refer to a proto message, it always
# refers to the message in binary encoded form. To ensure consistent hashing,
# clients and servers MUST ensure that they serialize messages according to the
# following rules, even if there are alternate valid encodings for the same
# message: * Fields are serialized in tag order. * There are no unknown fields. *
# There are no duplicate fields. * Fields are serialized according to the
# default semantics for their type. Most protocol buffer implementations will
# always follow these rules when serializing, but care should be taken to avoid
# shortcuts. For instance, concatenating two messages to merge them may produce
# duplicate fields.
# Corresponds to the JSON property `stdoutDigest`
# @return [Google::Apis::RemotebuildexecutionV1alpha::BuildBazelRemoteExecutionV2Digest]
attr_accessor :stdout_digest
# The standard output buffer of the action. The server SHOULD NOT inline stdout
# unless requested by the client in the GetActionResultRequest message. The
# server MAY omit inlining, even if requested, and MUST do so if inlining would
# cause the response to exceed message size limits.
# Corresponds to the JSON property `stdoutRaw`
# NOTE: Values are automatically base64 encoded/decoded in the client library.
# @return [String]
attr_accessor :stdout_raw
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@execution_metadata = args[:execution_metadata] if args.key?(:execution_metadata)
@exit_code = args[:exit_code] if args.key?(:exit_code)
@output_directories = args[:output_directories] if args.key?(:output_directories)
@output_directory_symlinks = args[:output_directory_symlinks] if args.key?(:output_directory_symlinks)
@output_file_symlinks = args[:output_file_symlinks] if args.key?(:output_file_symlinks)
@output_files = args[:output_files] if args.key?(:output_files)
@output_symlinks = args[:output_symlinks] if args.key?(:output_symlinks)
@stderr_digest = args[:stderr_digest] if args.key?(:stderr_digest)
@stderr_raw = args[:stderr_raw] if args.key?(:stderr_raw)
@stdout_digest = args[:stdout_digest] if args.key?(:stdout_digest)
@stdout_raw = args[:stdout_raw] if args.key?(:stdout_raw)
end
end
# A `Command` is the actual command executed by a worker running an Action and
# specifications of its environment. Except as otherwise required, the
# environment (such as which system libraries or binaries are available, and
# what filesystems are mounted where) is defined by and specific to the
# implementation of the remote execution API.
class BuildBazelRemoteExecutionV2Command
include Google::Apis::Core::Hashable
# The arguments to the command. The first argument must be the path to the
# executable, which must be either a relative path, in which case it is
# evaluated with respect to the input root, or an absolute path.
# Corresponds to the JSON property `arguments`
# @return [Array<String>]
attr_accessor :arguments
# The environment variables to set when running the program. The worker may
# provide its own default environment variables; these defaults can be
# overridden using this field. Additional variables can also be specified. In
# order to ensure that equivalent Commands always hash to the same value, the
# environment variables MUST be lexicographically sorted by name. Sorting of
# strings is done by code point, equivalently, by the UTF-8 bytes.
# Corresponds to the JSON property `environmentVariables`
# @return [Array<Google::Apis::RemotebuildexecutionV1alpha::BuildBazelRemoteExecutionV2CommandEnvironmentVariable>]
attr_accessor :environment_variables
# A list of the output directories that the client expects to retrieve from the
# action. Only the listed directories will be returned (an entire directory
# structure will be returned as a Tree message digest, see OutputDirectory), as
# well as files listed in `output_files`. Other files or directories that may be
# created during command execution are discarded. The paths are relative to the
# working directory of the action execution. The paths are specified using a
# single forward slash (`/`) as a path separator, even if the execution platform
# natively uses a different separator. The path MUST NOT include a trailing
# slash, nor a leading slash, being a relative path. The special value of empty
# string is allowed, although not recommended, and can be used to capture the
# entire working directory tree, including inputs. In order to ensure consistent
# hashing of the same Action, the output paths MUST be sorted lexicographically
# by code point (or, equivalently, by UTF-8 bytes). An output directory cannot
# be duplicated or have the same path as any of the listed output files. An
# output directory is allowed to be a parent of another output directory.
# Directories leading up to the output directories (but not the output
# directories themselves) are created by the worker prior to execution, even if
# they are not explicitly part of the input root. DEPRECATED since 2.1: Use `
# output_paths` instead.
# Corresponds to the JSON property `outputDirectories`
# @return [Array<String>]
attr_accessor :output_directories
# A list of the output files that the client expects to retrieve from the action.
# Only the listed files, as well as directories listed in `output_directories`,
# will be returned to the client as output. Other files or directories that may
# be created during command execution are discarded. The paths are relative to
# the working directory of the action execution. The paths are specified using a
# single forward slash (`/`) as a path separator, even if the execution platform
# natively uses a different separator. The path MUST NOT include a trailing
# slash, nor a leading slash, being a relative path. In order to ensure
# consistent hashing of the same Action, the output paths MUST be sorted
# lexicographically by code point (or, equivalently, by UTF-8 bytes). An output
# file cannot be duplicated, be a parent of another output file, or have the
# same path as any of the listed output directories. Directories leading up to
# the output files are created by the worker prior to execution, even if they
# are not explicitly part of the input root. DEPRECATED since v2.1: Use `
# output_paths` instead.
# Corresponds to the JSON property `outputFiles`
# @return [Array<String>]
attr_accessor :output_files
# A list of the output paths that the client expects to retrieve from the action.
# Only the listed paths will be returned to the client as output. The type of
# the output (file or directory) is not specified, and will be determined by the
# server after action execution. If the resulting path is a file, it will be
# returned in an OutputFile) typed field. If the path is a directory, the entire
# directory structure will be returned as a Tree message digest, see
# OutputDirectory) Other files or directories that may be created during command
# execution are discarded. The paths are relative to the working directory of
# the action execution. The paths are specified using a single forward slash (`/`
# ) as a path separator, even if the execution platform natively uses a
# different separator. The path MUST NOT include a trailing slash, nor a leading
# slash, being a relative path. In order to ensure consistent hashing of the
# same Action, the output paths MUST be deduplicated and sorted
# lexicographically by code point (or, equivalently, by UTF-8 bytes).
# Directories leading up to the output paths are created by the worker prior to
# execution, even if they are not explicitly part of the input root. New in v2.1:
# this field supersedes the DEPRECATED `output_files` and `output_directories`
# fields. If `output_paths` is used, `output_files` and `output_directories`
# will be ignored!
# Corresponds to the JSON property `outputPaths`
# @return [Array<String>]
attr_accessor :output_paths
# A `Platform` is a set of requirements, such as hardware, operating system, or
# compiler toolchain, for an Action's execution environment. A `Platform` is
# represented as a series of key-value pairs representing the properties that
# are required of the platform.
# Corresponds to the JSON property `platform`
# @return [Google::Apis::RemotebuildexecutionV1alpha::BuildBazelRemoteExecutionV2Platform]
attr_accessor :platform
# The working directory, relative to the input root, for the command to run in.
# It must be a directory which exists in the input tree. If it is left empty,
# then the action is run in the input root.
# Corresponds to the JSON property `workingDirectory`
# @return [String]
attr_accessor :working_directory
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@arguments = args[:arguments] if args.key?(:arguments)
@environment_variables = args[:environment_variables] if args.key?(:environment_variables)
@output_directories = args[:output_directories] if args.key?(:output_directories)
@output_files = args[:output_files] if args.key?(:output_files)
@output_paths = args[:output_paths] if args.key?(:output_paths)
@platform = args[:platform] if args.key?(:platform)
@working_directory = args[:working_directory] if args.key?(:working_directory)
end
end
# An `EnvironmentVariable` is one variable to set in the running program's
# environment.
class BuildBazelRemoteExecutionV2CommandEnvironmentVariable
include Google::Apis::Core::Hashable
# The variable name.
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# The variable value.
# Corresponds to the JSON property `value`
# @return [String]
attr_accessor :value
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@name = args[:name] if args.key?(:name)
@value = args[:value] if args.key?(:value)
end
end
# A content digest. A digest for a given blob consists of the size of the blob
# and its hash. The hash algorithm to use is defined by the server. The size is
# considered to be an integral part of the digest and cannot be separated. That
# is, even if the `hash` field is correctly specified but `size_bytes` is not,
# the server MUST reject the request. The reason for including the size in the
# digest is as follows: in a great many cases, the server needs to know the size
# of the blob it is about to work with prior to starting an operation with it,
# such as flattening Merkle tree structures or streaming it to a worker.
# Technically, the server could implement a separate metadata store, but this
# results in a significantly more complicated implementation as opposed to
# having the client specify the size up-front (or storing the size along with
# the digest in every message where digests are embedded). This does mean that
# the API leaks some implementation details of (what we consider to be) a
# reasonable server implementation, but we consider this to be a worthwhile
# tradeoff. When a `Digest` is used to refer to a proto message, it always
# refers to the message in binary encoded form. To ensure consistent hashing,
# clients and servers MUST ensure that they serialize messages according to the
# following rules, even if there are alternate valid encodings for the same
# message: * Fields are serialized in tag order. * There are no unknown fields. *
# There are no duplicate fields. * Fields are serialized according to the
# default semantics for their type. Most protocol buffer implementations will
# always follow these rules when serializing, but care should be taken to avoid
# shortcuts. For instance, concatenating two messages to merge them may produce
# duplicate fields.
class BuildBazelRemoteExecutionV2Digest
include Google::Apis::Core::Hashable
# The hash. In the case of SHA-256, it will always be a lowercase hex string
# exactly 64 characters long.
# Corresponds to the JSON property `hash`
# @return [String]
attr_accessor :hash_prop
# The size of the blob, in bytes.
# Corresponds to the JSON property `sizeBytes`
# @return [Fixnum]
attr_accessor :size_bytes
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@hash_prop = args[:hash_prop] if args.key?(:hash_prop)
@size_bytes = args[:size_bytes] if args.key?(:size_bytes)
end
end
# A `Directory` represents a directory node in a file tree, containing zero or
# more children FileNodes, DirectoryNodes and SymlinkNodes. Each `Node` contains
# its name in the directory, either the digest of its content (either a file
# blob or a `Directory` proto) or a symlink target, as well as possibly some
# metadata about the file or directory. In order to ensure that two equivalent
# directory trees hash to the same value, the following restrictions MUST be
# obeyed when constructing a a `Directory`: * Every child in the directory must
# have a path of exactly one segment. Multiple levels of directory hierarchy may
# not be collapsed. * Each child in the directory must have a unique path
# segment (file name). Note that while the API itself is case-sensitive, the
# environment where the Action is executed may or may not be case-sensitive.
# That is, it is legal to call the API with a Directory that has both "Foo" and "
# foo" as children, but the Action may be rejected by the remote system upon
# execution. * The files, directories and symlinks in the directory must each be
# sorted in lexicographical order by path. The path strings must be sorted by
# code point, equivalently, by UTF-8 bytes. * The NodeProperties of files,
# directories, and symlinks must be sorted in lexicographical order by property
# name. A `Directory` that obeys the restrictions is said to be in canonical
# form. As an example, the following could be used for a file named `bar` and a
# directory named `foo` with an executable file named `baz` (hashes shortened
# for readability): ```json // (Directory proto) ` files: [ ` name: "bar",
# digest: ` hash: "4a73bc9d03...", size: 65534 `, node_properties: [ ` "name": "
# MTime", "value": "2017-01-15T01:30:15.01Z" ` ] ` ], directories: [ ` name: "
# foo", digest: ` hash: "4cf2eda940...", size: 43 ` ` ] ` // (Directory proto
# with hash "4cf2eda940..." and size 43) ` files: [ ` name: "baz", digest: `
# hash: "b2c941073e...", size: 1294, `, is_executable: true ` ] ` ```
class BuildBazelRemoteExecutionV2Directory
include Google::Apis::Core::Hashable
# The subdirectories in the directory.
# Corresponds to the JSON property `directories`
# @return [Array<Google::Apis::RemotebuildexecutionV1alpha::BuildBazelRemoteExecutionV2DirectoryNode>]
attr_accessor :directories
# The files in the directory.
# Corresponds to the JSON property `files`
# @return [Array<Google::Apis::RemotebuildexecutionV1alpha::BuildBazelRemoteExecutionV2FileNode>]
attr_accessor :files
# The node properties of the Directory.
# Corresponds to the JSON property `nodeProperties`
# @return [Array<Google::Apis::RemotebuildexecutionV1alpha::BuildBazelRemoteExecutionV2NodeProperty>]
attr_accessor :node_properties
# The symlinks in the directory.
# Corresponds to the JSON property `symlinks`
# @return [Array<Google::Apis::RemotebuildexecutionV1alpha::BuildBazelRemoteExecutionV2SymlinkNode>]
attr_accessor :symlinks
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@directories = args[:directories] if args.key?(:directories)
@files = args[:files] if args.key?(:files)
@node_properties = args[:node_properties] if args.key?(:node_properties)
@symlinks = args[:symlinks] if args.key?(:symlinks)
end
end
# A `DirectoryNode` represents a child of a Directory which is itself a `
# Directory` and its associated metadata.
class BuildBazelRemoteExecutionV2DirectoryNode
include Google::Apis::Core::Hashable
# A content digest. A digest for a given blob consists of the size of the blob
# and its hash. The hash algorithm to use is defined by the server. The size is
# considered to be an integral part of the digest and cannot be separated. That
# is, even if the `hash` field is correctly specified but `size_bytes` is not,
# the server MUST reject the request. The reason for including the size in the
# digest is as follows: in a great many cases, the server needs to know the size
# of the blob it is about to work with prior to starting an operation with it,
# such as flattening Merkle tree structures or streaming it to a worker.
# Technically, the server could implement a separate metadata store, but this
# results in a significantly more complicated implementation as opposed to
# having the client specify the size up-front (or storing the size along with
# the digest in every message where digests are embedded). This does mean that
# the API leaks some implementation details of (what we consider to be) a
# reasonable server implementation, but we consider this to be a worthwhile
# tradeoff. When a `Digest` is used to refer to a proto message, it always
# refers to the message in binary encoded form. To ensure consistent hashing,
# clients and servers MUST ensure that they serialize messages according to the
# following rules, even if there are alternate valid encodings for the same
# message: * Fields are serialized in tag order. * There are no unknown fields. *
# There are no duplicate fields. * Fields are serialized according to the
# default semantics for their type. Most protocol buffer implementations will
# always follow these rules when serializing, but care should be taken to avoid
# shortcuts. For instance, concatenating two messages to merge them may produce
# duplicate fields.
# Corresponds to the JSON property `digest`
# @return [Google::Apis::RemotebuildexecutionV1alpha::BuildBazelRemoteExecutionV2Digest]
attr_accessor :digest
# The name of the directory.
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@digest = args[:digest] if args.key?(:digest)
@name = args[:name] if args.key?(:name)
end
end
# Metadata about an ongoing execution, which will be contained in the metadata
# field of the Operation.
class BuildBazelRemoteExecutionV2ExecuteOperationMetadata
include Google::Apis::Core::Hashable
# A content digest. A digest for a given blob consists of the size of the blob
# and its hash. The hash algorithm to use is defined by the server. The size is
# considered to be an integral part of the digest and cannot be separated. That
# is, even if the `hash` field is correctly specified but `size_bytes` is not,
# the server MUST reject the request. The reason for including the size in the
# digest is as follows: in a great many cases, the server needs to know the size
# of the blob it is about to work with prior to starting an operation with it,
# such as flattening Merkle tree structures or streaming it to a worker.
# Technically, the server could implement a separate metadata store, but this
# results in a significantly more complicated implementation as opposed to
# having the client specify the size up-front (or storing the size along with
# the digest in every message where digests are embedded). This does mean that
# the API leaks some implementation details of (what we consider to be) a
# reasonable server implementation, but we consider this to be a worthwhile
# tradeoff. When a `Digest` is used to refer to a proto message, it always
# refers to the message in binary encoded form. To ensure consistent hashing,
# clients and servers MUST ensure that they serialize messages according to the
# following rules, even if there are alternate valid encodings for the same
# message: * Fields are serialized in tag order. * There are no unknown fields. *
# There are no duplicate fields. * Fields are serialized according to the
# default semantics for their type. Most protocol buffer implementations will
# always follow these rules when serializing, but care should be taken to avoid
# shortcuts. For instance, concatenating two messages to merge them may produce
# duplicate fields.
# Corresponds to the JSON property `actionDigest`
# @return [Google::Apis::RemotebuildexecutionV1alpha::BuildBazelRemoteExecutionV2Digest]
attr_accessor :action_digest
# The current stage of execution.
# Corresponds to the JSON property `stage`
# @return [String]
attr_accessor :stage
# If set, the client can use this name with ByteStream.Read to stream the
# standard error.
# Corresponds to the JSON property `stderrStreamName`
# @return [String]
attr_accessor :stderr_stream_name
# If set, the client can use this name with ByteStream.Read to stream the
# standard output.
# Corresponds to the JSON property `stdoutStreamName`
# @return [String]
attr_accessor :stdout_stream_name
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@action_digest = args[:action_digest] if args.key?(:action_digest)
@stage = args[:stage] if args.key?(:stage)
@stderr_stream_name = args[:stderr_stream_name] if args.key?(:stderr_stream_name)
@stdout_stream_name = args[:stdout_stream_name] if args.key?(:stdout_stream_name)
end
end
# The response message for Execution.Execute, which will be contained in the
# response field of the Operation.
class BuildBazelRemoteExecutionV2ExecuteResponse
include Google::Apis::Core::Hashable
# True if the result was served from cache, false if it was executed.
# Corresponds to the JSON property `cachedResult`
# @return [Boolean]
attr_accessor :cached_result
alias_method :cached_result?, :cached_result
# Freeform informational message with details on the execution of the action
# that may be displayed to the user upon failure or when requested explicitly.
# Corresponds to the JSON property `message`
# @return [String]
attr_accessor :message
# An ActionResult represents the result of an Action being run.
# Corresponds to the JSON property `result`
# @return [Google::Apis::RemotebuildexecutionV1alpha::BuildBazelRemoteExecutionV2ActionResult]
attr_accessor :result
# An optional list of additional log outputs the server wishes to provide. A
# server can use this to return execution-specific logs however it wishes. This
# is intended primarily to make it easier for users to debug issues that may be
# outside of the actual job execution, such as by identifying the worker
# executing the action or by providing logs from the worker's setup phase. The
# keys SHOULD be human readable so that a client can display them to a user.
# Corresponds to the JSON property `serverLogs`
# @return [Hash<String,Google::Apis::RemotebuildexecutionV1alpha::BuildBazelRemoteExecutionV2LogFile>]
attr_accessor :server_logs
# The `Status` type defines a logical error model that is suitable for different
# programming environments, including REST APIs and RPC APIs. It is used by [
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
# data: error code, error message, and error details. You can find out more
# about this error model and how to work with it in the [API Design Guide](https:
# //cloud.google.com/apis/design/errors).
# Corresponds to the JSON property `status`
# @return [Google::Apis::RemotebuildexecutionV1alpha::GoogleRpcStatus]
attr_accessor :status
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@cached_result = args[:cached_result] if args.key?(:cached_result)
@message = args[:message] if args.key?(:message)
@result = args[:result] if args.key?(:result)
@server_logs = args[:server_logs] if args.key?(:server_logs)
@status = args[:status] if args.key?(:status)
end
end
# ExecutedActionMetadata contains details about a completed execution.
class BuildBazelRemoteExecutionV2ExecutedActionMetadata
include Google::Apis::Core::Hashable
# When the worker completed executing the action command.
# Corresponds to the JSON property `executionCompletedTimestamp`
# @return [String]
attr_accessor :execution_completed_timestamp
# When the worker started executing the action command.
# Corresponds to the JSON property `executionStartTimestamp`
# @return [String]
attr_accessor :execution_start_timestamp
# When the worker finished fetching action inputs.
# Corresponds to the JSON property `inputFetchCompletedTimestamp`
# @return [String]
attr_accessor :input_fetch_completed_timestamp
# When the worker started fetching action inputs.
# Corresponds to the JSON property `inputFetchStartTimestamp`
# @return [String]
attr_accessor :input_fetch_start_timestamp
# When the worker finished uploading action outputs.
# Corresponds to the JSON property `outputUploadCompletedTimestamp`
# @return [String]
attr_accessor :output_upload_completed_timestamp
# When the worker started uploading action outputs.
# Corresponds to the JSON property `outputUploadStartTimestamp`
# @return [String]
attr_accessor :output_upload_start_timestamp
# When was the action added to the queue.
# Corresponds to the JSON property `queuedTimestamp`
# @return [String]
attr_accessor :queued_timestamp
# The name of the worker which ran the execution.
# Corresponds to the JSON property `worker`
# @return [String]
attr_accessor :worker
# When the worker completed the action, including all stages.
# Corresponds to the JSON property `workerCompletedTimestamp`
# @return [String]
attr_accessor :worker_completed_timestamp
# When the worker received the action.
# Corresponds to the JSON property `workerStartTimestamp`
# @return [String]
attr_accessor :worker_start_timestamp
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@execution_completed_timestamp = args[:execution_completed_timestamp] if args.key?(:execution_completed_timestamp)
@execution_start_timestamp = args[:execution_start_timestamp] if args.key?(:execution_start_timestamp)
@input_fetch_completed_timestamp = args[:input_fetch_completed_timestamp] if args.key?(:input_fetch_completed_timestamp)
@input_fetch_start_timestamp = args[:input_fetch_start_timestamp] if args.key?(:input_fetch_start_timestamp)
@output_upload_completed_timestamp = args[:output_upload_completed_timestamp] if args.key?(:output_upload_completed_timestamp)
@output_upload_start_timestamp = args[:output_upload_start_timestamp] if args.key?(:output_upload_start_timestamp)
@queued_timestamp = args[:queued_timestamp] if args.key?(:queued_timestamp)
@worker = args[:worker] if args.key?(:worker)
@worker_completed_timestamp = args[:worker_completed_timestamp] if args.key?(:worker_completed_timestamp)
@worker_start_timestamp = args[:worker_start_timestamp] if args.key?(:worker_start_timestamp)
end
end
# A `FileNode` represents a single file and associated metadata.
class BuildBazelRemoteExecutionV2FileNode
include Google::Apis::Core::Hashable
# A content digest. A digest for a given blob consists of the size of the blob
# and its hash. The hash algorithm to use is defined by the server. The size is
# considered to be an integral part of the digest and cannot be separated. That
# is, even if the `hash` field is correctly specified but `size_bytes` is not,
# the server MUST reject the request. The reason for including the size in the
# digest is as follows: in a great many cases, the server needs to know the size
# of the blob it is about to work with prior to starting an operation with it,
# such as flattening Merkle tree structures or streaming it to a worker.
# Technically, the server could implement a separate metadata store, but this
# results in a significantly more complicated implementation as opposed to
# having the client specify the size up-front (or storing the size along with
# the digest in every message where digests are embedded). This does mean that
# the API leaks some implementation details of (what we consider to be) a
# reasonable server implementation, but we consider this to be a worthwhile
# tradeoff. When a `Digest` is used to refer to a proto message, it always
# refers to the message in binary encoded form. To ensure consistent hashing,
# clients and servers MUST ensure that they serialize messages according to the
# following rules, even if there are alternate valid encodings for the same
# message: * Fields are serialized in tag order. * There are no unknown fields. *
# There are no duplicate fields. * Fields are serialized according to the
# default semantics for their type. Most protocol buffer implementations will
# always follow these rules when serializing, but care should be taken to avoid
# shortcuts. For instance, concatenating two messages to merge them may produce
# duplicate fields.
# Corresponds to the JSON property `digest`
# @return [Google::Apis::RemotebuildexecutionV1alpha::BuildBazelRemoteExecutionV2Digest]
attr_accessor :digest
# True if file is executable, false otherwise.
# Corresponds to the JSON property `isExecutable`
# @return [Boolean]
attr_accessor :is_executable
alias_method :is_executable?, :is_executable
# The name of the file.
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# The node properties of the FileNode.
# Corresponds to the JSON property `nodeProperties`
# @return [Array<Google::Apis::RemotebuildexecutionV1alpha::BuildBazelRemoteExecutionV2NodeProperty>]
attr_accessor :node_properties
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@digest = args[:digest] if args.key?(:digest)
@is_executable = args[:is_executable] if args.key?(:is_executable)
@name = args[:name] if args.key?(:name)
@node_properties = args[:node_properties] if args.key?(:node_properties)
end
end
# A `LogFile` is a log stored in the CAS.
class BuildBazelRemoteExecutionV2LogFile
include Google::Apis::Core::Hashable
# A content digest. A digest for a given blob consists of the size of the blob
# and its hash. The hash algorithm to use is defined by the server. The size is
# considered to be an integral part of the digest and cannot be separated. That
# is, even if the `hash` field is correctly specified but `size_bytes` is not,
# the server MUST reject the request. The reason for including the size in the
# digest is as follows: in a great many cases, the server needs to know the size
# of the blob it is about to work with prior to starting an operation with it,
# such as flattening Merkle tree structures or streaming it to a worker.
# Technically, the server could implement a separate metadata store, but this
# results in a significantly more complicated implementation as opposed to
# having the client specify the size up-front (or storing the size along with
# the digest in every message where digests are embedded). This does mean that
# the API leaks some implementation details of (what we consider to be) a
# reasonable server implementation, but we consider this to be a worthwhile
# tradeoff. When a `Digest` is used to refer to a proto message, it always
# refers to the message in binary encoded form. To ensure consistent hashing,
# clients and servers MUST ensure that they serialize messages according to the
# following rules, even if there are alternate valid encodings for the same
# message: * Fields are serialized in tag order. * There are no unknown fields. *
# There are no duplicate fields. * Fields are serialized according to the
# default semantics for their type. Most protocol buffer implementations will
# always follow these rules when serializing, but care should be taken to avoid
# shortcuts. For instance, concatenating two messages to merge them may produce
# duplicate fields.
# Corresponds to the JSON property `digest`
# @return [Google::Apis::RemotebuildexecutionV1alpha::BuildBazelRemoteExecutionV2Digest]
attr_accessor :digest
# This is a hint as to the purpose of the log, and is set to true if the log is
# human-readable text that can be usefully displayed to a user, and false
# otherwise. For instance, if a command-line client wishes to print the server
# logs to the terminal for a failed action, this allows it to avoid displaying a
# binary file.
# Corresponds to the JSON property `humanReadable`
# @return [Boolean]
attr_accessor :human_readable
alias_method :human_readable?, :human_readable
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@digest = args[:digest] if args.key?(:digest)
@human_readable = args[:human_readable] if args.key?(:human_readable)
end
end
# A single property for FileNodes, DirectoryNodes, and SymlinkNodes. The server
# is responsible for specifying the property `name`s that it accepts. If
# permitted by the server, the same `name` may occur multiple times.
class BuildBazelRemoteExecutionV2NodeProperty
include Google::Apis::Core::Hashable
# The property name.
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# The property value.
# Corresponds to the JSON property `value`
# @return [String]
attr_accessor :value
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@name = args[:name] if args.key?(:name)
@value = args[:value] if args.key?(:value)
end
end
# An `OutputDirectory` is the output in an `ActionResult` corresponding to a
# directory's full contents rather than a single file.
class BuildBazelRemoteExecutionV2OutputDirectory
include Google::Apis::Core::Hashable
# The full path of the directory relative to the working directory. The path
# separator is a forward slash `/`. Since this is a relative path, it MUST NOT
# begin with a leading forward slash. The empty string value is allowed, and it
# denotes the entire working directory.
# Corresponds to the JSON property `path`
# @return [String]
attr_accessor :path
# A content digest. A digest for a given blob consists of the size of the blob
# and its hash. The hash algorithm to use is defined by the server. The size is
# considered to be an integral part of the digest and cannot be separated. That
# is, even if the `hash` field is correctly specified but `size_bytes` is not,
# the server MUST reject the request. The reason for including the size in the
# digest is as follows: in a great many cases, the server needs to know the size
# of the blob it is about to work with prior to starting an operation with it,