@@ -149,7 +149,51 @@ def get_temporary_file(self, uuid):
149
149
except HTTPException as e :
150
150
raise_for_authorization (e .response , self .username is not None )
151
151
raise
152
+
153
+ def import_lpk (self , lpk_file ):
154
+ if lpk_file .endswith ('.lpk' ):
155
+ f = open (lpk_file , 'rb' )
156
+ else :
157
+ raise ValueError ('File must be an ArcGIS Layer Package with a .lpk extension' )
158
+
159
+ filename = os .path .basename (lpk_file )
160
+
161
+ tmp_file = self .upload_temporary_file (f , filename = filename )
162
+
163
+ f .close ()
164
+
165
+ job_args = {
166
+ 'file' : tmp_file .uuid ,
167
+ 'url' : None ,
168
+ 'dataset_type' : 'ArcGIS_Native'
169
+ }
170
+
171
+ job = self .create_job ('create_import_job' , job_args = job_args , block = True )
172
+ uri = job .message .split ("/" )[- 2 ]
173
+
174
+ final_job_args = {
175
+ 'import_id' : uri
176
+ }
177
+
178
+ final_job = self .create_job ('finalize_import_job' , job_args = final_job_args , block = True )
152
179
180
+ if final_job .status != 'succeeded' :
181
+ raise DatasetImportError ('Import failed: {0}' .format (final_job .message ))
182
+
183
+ data = json .loads (final_job .message )
184
+ next_uri = data ['next_uri' ]
185
+ if '/import/' in next_uri :
186
+ dataset_import_id = DATASET_IMPORT_ID_RE .search (next_uri ).group (1 )
187
+ dataset_import = self .get_import (dataset_import_id )
188
+ dataset_import .cancel ()
189
+
190
+ raise DatasetImportError (
191
+ 'Layer Package imports must have all necessary metadata information necessary for one-step import.'
192
+ )
193
+
194
+ dataset_id = next_uri .strip ('/' ).split ('/' )[- 1 ]
195
+ return self .get_dataset (dataset_id )
196
+
153
197
def import_netcdf_dataset (self , nc_or_zip_file , style = None ):
154
198
if nc_or_zip_file .endswith ('.zip' ):
155
199
f = open (nc_or_zip_file , 'a+b' )
0 commit comments