preprocessing_fcn.py 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. from google.cloud import storage, translate, vision
  2. from google.oauth2 import service_account
  3. import logging
  4. import os
  5. from google.protobuf import json_format
  6. # DEVELOPER: change path to key
  7. project_id = os.getenv('PROJECT_ID')
  8. bucket_name = os.getenv('BUCKET_NAME')
  9. location = os.getenv('LOCATION')
  10. key_path = os.getenv('SA_KEY_PATH')
  11. # DEVELOPER: change path to key
  12. credentials = service_account.Credentials.from_service_account_file(key_path)
  13. storage_client = storage.Client(credentials=credentials,
  14. project_id=credentials.project_id)
  15. translate_client = translate.Client(credentials=credentials,
  16. project_id=credentials.project_id)
  17. vision_client = vision.Client(credentials=credentials,
  18. project_id=credentials.project_id)
  19. def async_detect_document(vision_client, gcs_source_uri, gcs_destination_uri, batch_size=20):
  20. """
  21. OCR with PDF/TIFF as source files on GCS
  22. Args:
  23. vision_client:
  24. gcs_source_uri:
  25. gcs_destination_uri:
  26. batch_size: How many pages should be grouped into each json output file.
  27. Returns:
  28. """
  29. doc_title = gcs_source_uri.split('/')[-1].split('.pdf')[0]
  30. # Supported mime_types are: 'application/pdf' and 'image/tiff'
  31. mime_type = 'application/pdf'
  32. # Feature in vision API
  33. feature = vision.types.Feature(
  34. type=vision.enums.Feature.Type.DOCUMENT_TEXT_DETECTION)
  35. gcs_source = vision.types.GcsSource(uri=gcs_source_uri)
  36. input_config = vision.types.InputConfig(
  37. gcs_source=gcs_source, mime_type=mime_type)
  38. gcs_destination = vision.types.GcsDestination(uri=gcs_destination_uri)
  39. output_config = vision.types.OutputConfig(
  40. gcs_destination=gcs_destination, batch_size=batch_size)
  41. async_request = vision.types.AsyncAnnotateFileRequest(
  42. features=[feature], input_config=input_config,
  43. output_config=output_config)
  44. operation = vision_client.async_batch_annotate_files(
  45. requests=[async_request])
  46. # print('Waiting for the operation to finish.')
  47. operation.result(timeout=180)
  48. logging.info('Text extraction from document {} is completed.'.format(doc_title))
  49. def read_json_result(bucket_name, doc_title):
  50. """
  51. Parsing the json files and extract text.
  52. Args:
  53. bucket_name:
  54. doc_title:
  55. Returns:
  56. all_text: str - Containing all text of the document
  57. """
  58. gcs_destination_prefix = 'json/' + '{}-'.format(doc_title)
  59. # List objects with the given prefix.
  60. blob_list = list(storage_client.list_blobs(bucket_or_name=bucket_name,
  61. prefix=gcs_destination_prefix))
  62. all_text = ''
  63. for blob in blob_list:
  64. json_string = blob.download_as_string()
  65. response = json_format.Parse(
  66. json_string, vision.types.AnnotateFileResponse())
  67. # The actual response for the first page of the input file.
  68. for response in response.responses:
  69. # first_page_response = response.responses[0]
  70. text_response = response.full_text_annotation.text
  71. all_text += text_response
  72. all_text += ' '
  73. logging.info("Parsing of {} json doc was successful.".format(doc_title))
  74. return all_text
  75. def upload_blob(bucket_name, txt_content, destination_blob_name):
  76. """
  77. Uploads a file to the bucket.
  78. Args:
  79. bucket_name:
  80. txt_content:
  81. destination_blob_name:
  82. Returns:
  83. """
  84. destination_blob_name = destination_blob_name.split('gs://{}/'.format(bucket_name))[-1]
  85. bucket = storage_client.bucket(bucket_name)
  86. blob = bucket.blob(destination_blob_name)
  87. blob.upload_from_string(txt_content)
  88. print("Text uploaded to {}".format(destination_blob_name))
  89. def batch_translate_text(project_id, location,
  90. input_uri="gs://YOUR_BUCKET_ID/path/to/your/file.txt",
  91. output_uri="gs://YOUR_BUCKET_ID/path/to/save/results/"):
  92. """
  93. Translates a batch of texts on GCS and stores the result in a GCS location.
  94. Args:
  95. project_id:
  96. location:
  97. input_uri:
  98. output_uri:
  99. Returns:
  100. """
  101. # Supported file types: https://cloud.google.com/translate/docs/supported-formats
  102. gcs_source = {"input_uri": input_uri}
  103. input_configs_element = {
  104. "gcs_source": gcs_source,
  105. "mime_type": "text/plain" # Can be "text/plain" or "text/html".
  106. }
  107. gcs_destination = {"output_uri_prefix": output_uri}
  108. output_config = {"gcs_destination": gcs_destination}
  109. parent = translate_client.location_path(project_id, location)
  110. # Supported language codes: https://cloud.google.com/translate/docs/language
  111. operation = translate_client.batch_translate_text(
  112. parent=parent,
  113. source_language_code="it",
  114. target_language_codes=["en"], # Up to 10 language codes here.
  115. input_configs=[input_configs_element],
  116. output_config=output_config)
  117. response = operation.result(180)