preprocessing_fcn.py 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. from google.cloud import storage, translate, vision
  2. #from google.oauth2 import service_account
  3. import logging
  4. from google.protobuf import json_format
  5. # DEVELOPER: change path to key
  6. # project_id = os.getenv('PROJECT_ID')
  7. # bucket_name = os.getenv('BUCKET_NAME')
  8. # location = os.getenv('LOCATION')
  9. # key_path = os.getenv('SA_KEY_PATH')
  10. # DEVELOPER: change path to key
  11. # credentials = service_account.Credentials.from_service_account_file(key_path)
  12. #
  13. # storage_client = storage.Client(credentials=credentials,
  14. # project_id=credentials.project_id)
  15. #
  16. # translate_client = translate.Client(credentials=credentials,
  17. # project_id=credentials.project_id)
  18. #
  19. # vision_client = vision.Client(credentials=credentials,
  20. # project_id=credentials.project_id)
  21. def async_detect_document(vision_client, gcs_source_uri, gcs_destination_uri, batch_size=20):
  22. """
  23. OCR with PDF/TIFF as source files on GCS
  24. Args:
  25. vision_client:
  26. gcs_source_uri:
  27. gcs_destination_uri:
  28. batch_size: How many pages should be grouped into each json output file.
  29. Returns:
  30. """
  31. doc_title = gcs_source_uri.split('/')[-1].split('.pdf')[0]
  32. # Supported mime_types are: 'application/pdf' and 'image/tiff'
  33. mime_type = 'application/pdf'
  34. # Feature in vision API
  35. feature = vision.types.Feature(
  36. type=vision.enums.Feature.Type.DOCUMENT_TEXT_DETECTION)
  37. gcs_source = vision.types.GcsSource(uri=gcs_source_uri)
  38. input_config = vision.types.InputConfig(
  39. gcs_source=gcs_source, mime_type=mime_type)
  40. gcs_destination = vision.types.GcsDestination(uri=gcs_destination_uri)
  41. output_config = vision.types.OutputConfig(
  42. gcs_destination=gcs_destination, batch_size=batch_size)
  43. async_request = vision.types.AsyncAnnotateFileRequest(
  44. features=[feature], input_config=input_config,
  45. output_config=output_config)
  46. operation = vision_client.async_batch_annotate_files(
  47. requests=[async_request])
  48. # print('Waiting for the operation to finish.')
  49. operation.result(timeout=180)
  50. logging.info('Text extraction from document {} is completed.'.format(doc_title))
  51. def read_json_result(storage_client, bucket_name, doc_title):
  52. """
  53. Parsing the json files and extract text.
  54. Args:
  55. storage_client:
  56. bucket_name:
  57. doc_title:
  58. Returns:
  59. all_text: str - Containing all text of the document
  60. """
  61. gcs_src_prefix = 'json/' + '{}-'.format(doc_title)
  62. # List objects with the given prefix.
  63. blob_list = list(storage_client.list_blobs(bucket_or_name=bucket_name,
  64. prefix=gcs_src_prefix))
  65. all_text = ''
  66. for blob in blob_list:
  67. json_string = blob.download_as_string()
  68. response = json_format.Parse(
  69. json_string, vision.types.AnnotateFileResponse())
  70. # The actual response for the first page of the input file.
  71. for response in response.responses:
  72. # first_page_response = response.responses[0]
  73. text_response = response.full_text_annotation.text
  74. all_text += text_response
  75. all_text += ' '
  76. logging.info("Parsing of {} json doc was successful.".format(doc_title))
  77. return all_text
  78. def upload_blob(storage_client, bucket_name, txt_content, destination_blob_name):
  79. """
  80. Uploads a file to the bucket.
  81. Args:
  82. storage_client:
  83. bucket_name:
  84. txt_content:
  85. destination_blob_name:
  86. Returns:
  87. """
  88. destination_blob_name = destination_blob_name.split('gs://{}/'.format(bucket_name))[-1]
  89. bucket = storage_client.bucket(bucket_name)
  90. blob = bucket.blob(destination_blob_name)
  91. blob.upload_from_string(txt_content)
  92. logging.info("Text uploaded to {}".format(destination_blob_name))
  93. def batch_translate_text(translate_client, project_id, location,
  94. input_uri="gs://YOUR_BUCKET_ID/path/to/your/file.txt",
  95. output_uri="gs://YOUR_BUCKET_ID/path/to/save/results/"):
  96. """
  97. Translates a batch of texts on GCS and stores the result in a GCS location.
  98. Args:
  99. translate_client
  100. project_id:
  101. location:
  102. input_uri:
  103. output_uri:
  104. Returns:
  105. """
  106. # Supported file types: https://cloud.google.com/translate/docs/supported-formats
  107. gcs_source = {"input_uri": input_uri}
  108. input_configs_element = {
  109. "gcs_source": gcs_source,
  110. "mime_type": "text/plain" # Can be "text/plain" or "text/html".
  111. }
  112. gcs_destination = {"output_uri_prefix": output_uri}
  113. output_config = {"gcs_destination": gcs_destination}
  114. parent = translate_client.location_path(project_id, location)
  115. # Supported language codes: https://cloud.google.com/translate/docs/language
  116. operation = translate_client.batch_translate_text(
  117. parent=parent,
  118. source_language_code="it",
  119. target_language_codes=["en"], # Up to 10 language codes here.
  120. input_configs=[input_configs_element],
  121. output_config=output_config)
  122. response = operation.result(180)